repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
kmike/scikit-learn | examples/linear_model/plot_logistic_l1_l2_sparsity.py | 4 | 2586 | """
==============================================
L1 Penalty and Sparsity in Logistic Regression
==============================================
Comparison of the sparsity (percentage of zero coefficients) of solutions when
L1 and L2 penalty are used for different values of C. We can see that large
values of C give more freedom to the model. Conversely, smaller values of C
constrain the model more. In the L1 penalty case, this leads to sparser
solutions.
We classify 8x8 images of digits into two classes: 0-4 against 5-9.
The visualization shows coefficients of the models for varying C.
"""
print(__doc__)
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Andreas Mueller <[email protected]>
# License: BSD Style.
import numpy as np
import pylab as pl
from sklearn.linear_model import LogisticRegression
from sklearn import datasets
from sklearn.preprocessing import StandardScaler
digits = datasets.load_digits()
X, y = digits.data, digits.target
X = StandardScaler().fit_transform(X)
# classify small against large digits
y = (y > 4).astype(np.int)
# Set regularization parameter
for i, C in enumerate(10. ** np.arange(1, 4)):
# turn down tolerance for short training time
clf_l1_LR = LogisticRegression(C=C, penalty='l1', tol=0.01)
clf_l2_LR = LogisticRegression(C=C, penalty='l2', tol=0.01)
clf_l1_LR.fit(X, y)
clf_l2_LR.fit(X, y)
coef_l1_LR = clf_l1_LR.coef_.ravel()
coef_l2_LR = clf_l2_LR.coef_.ravel()
# coef_l1_LR contains zeros due to the
# L1 sparsity inducing norm
sparsity_l1_LR = np.mean(coef_l1_LR == 0) * 100
sparsity_l2_LR = np.mean(coef_l2_LR == 0) * 100
print("C=%d" % C)
print("Sparsity with L1 penalty: %.2f%%" % sparsity_l1_LR)
print("score with L1 penalty: %.4f" % clf_l1_LR.score(X, y))
print("Sparsity with L2 penalty: %.2f%%" % sparsity_l2_LR)
print("score with L2 penalty: %.4f" % clf_l2_LR.score(X, y))
l1_plot = pl.subplot(3, 2, 2 * i + 1)
l2_plot = pl.subplot(3, 2, 2 * (i + 1))
if i == 0:
l1_plot.set_title("L1 penalty")
l2_plot.set_title("L2 penalty")
l1_plot.imshow(np.abs(coef_l1_LR.reshape(8, 8)), interpolation='nearest',
cmap='binary', vmax=1, vmin=0)
l2_plot.imshow(np.abs(coef_l2_LR.reshape(8, 8)), interpolation='nearest',
cmap='binary', vmax=1, vmin=0)
pl.text(-8, 3, "C = %d" % C)
l1_plot.set_xticks(())
l1_plot.set_yticks(())
l2_plot.set_xticks(())
l2_plot.set_yticks(())
pl.show()
| bsd-3-clause |
rrohan/scikit-learn | sklearn/ensemble/weight_boosting.py | 71 | 40664 | """Weight Boosting
This module contains weight boosting estimators for both classification and
regression.
The module structure is the following:
- The ``BaseWeightBoosting`` base class implements a common ``fit`` method
for all the estimators in the module. Regression and classification
only differ from each other in the loss function that is optimized.
- ``AdaBoostClassifier`` implements adaptive boosting (AdaBoost-SAMME) for
classification problems.
- ``AdaBoostRegressor`` implements adaptive boosting (AdaBoost.R2) for
regression problems.
"""
# Authors: Noel Dawe <[email protected]>
# Gilles Louppe <[email protected]>
# Hamzeh Alsalhi <[email protected]>
# Arnaud Joly <[email protected]>
#
# Licence: BSD 3 clause
from abc import ABCMeta, abstractmethod
import numpy as np
from numpy.core.umath_tests import inner1d
from .base import BaseEnsemble
from ..base import ClassifierMixin, RegressorMixin
from ..externals import six
from ..externals.six.moves import zip
from ..externals.six.moves import xrange as range
from .forest import BaseForest
from ..tree import DecisionTreeClassifier, DecisionTreeRegressor
from ..tree.tree import BaseDecisionTree
from ..tree._tree import DTYPE
from ..utils import check_array, check_X_y, check_random_state
from ..metrics import accuracy_score, r2_score
from sklearn.utils.validation import has_fit_parameter, check_is_fitted
__all__ = [
'AdaBoostClassifier',
'AdaBoostRegressor',
]
class BaseWeightBoosting(six.with_metaclass(ABCMeta, BaseEnsemble)):
"""Base class for AdaBoost estimators.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator=None,
n_estimators=50,
estimator_params=tuple(),
learning_rate=1.,
random_state=None):
super(BaseWeightBoosting, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params)
self.learning_rate = learning_rate
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
"""Build a boosted classifier/regressor from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR. The dtype is
forced to DTYPE from tree._tree if the base classifier of this
ensemble weighted boosting classifier is a tree or forest.
y : array-like of shape = [n_samples]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like of shape = [n_samples], optional
Sample weights. If None, the sample weights are initialized to
1 / n_samples.
Returns
-------
self : object
Returns self.
"""
# Check parameters
if self.learning_rate <= 0:
raise ValueError("learning_rate must be greater than zero")
if (self.base_estimator is None or
isinstance(self.base_estimator, (BaseDecisionTree,
BaseForest))):
dtype = DTYPE
accept_sparse = 'csc'
else:
dtype = None
accept_sparse = ['csr', 'csc']
X, y = check_X_y(X, y, accept_sparse=accept_sparse, dtype=dtype)
if sample_weight is None:
# Initialize weights to 1 / n_samples
sample_weight = np.empty(X.shape[0], dtype=np.float)
sample_weight[:] = 1. / X.shape[0]
else:
# Normalize existing weights
sample_weight = sample_weight / sample_weight.sum(dtype=np.float64)
# Check that the sample weights sum is positive
if sample_weight.sum() <= 0:
raise ValueError(
"Attempting to fit with a non-positive "
"weighted number of samples.")
# Check parameters
self._validate_estimator()
# Clear any previous fit results
self.estimators_ = []
self.estimator_weights_ = np.zeros(self.n_estimators, dtype=np.float)
self.estimator_errors_ = np.ones(self.n_estimators, dtype=np.float)
for iboost in range(self.n_estimators):
# Boosting step
sample_weight, estimator_weight, estimator_error = self._boost(
iboost,
X, y,
sample_weight)
# Early termination
if sample_weight is None:
break
self.estimator_weights_[iboost] = estimator_weight
self.estimator_errors_[iboost] = estimator_error
# Stop if error is zero
if estimator_error == 0:
break
sample_weight_sum = np.sum(sample_weight)
# Stop if the sum of sample weights has become non-positive
if sample_weight_sum <= 0:
break
if iboost < self.n_estimators - 1:
# Normalize
sample_weight /= sample_weight_sum
return self
@abstractmethod
def _boost(self, iboost, X, y, sample_weight):
"""Implement a single boost.
Warning: This method needs to be overriden by subclasses.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels).
sample_weight : array-like of shape = [n_samples]
The current sample weights.
Returns
-------
sample_weight : array-like of shape = [n_samples] or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
error : float
The classification error for the current boost.
If None then boosting has terminated early.
"""
pass
def staged_score(self, X, y, sample_weight=None):
"""Return staged scores for X, y.
This generator method yields the ensemble score after each iteration of
boosting and therefore allows monitoring, such as to determine the
score on a test set after each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like, shape = [n_samples]
Labels for X.
sample_weight : array-like, shape = [n_samples], optional
Sample weights.
Returns
-------
z : float
"""
for y_pred in self.staged_predict(X):
if isinstance(self, ClassifierMixin):
yield accuracy_score(y, y_pred, sample_weight=sample_weight)
else:
yield r2_score(y, y_pred, sample_weight=sample_weight)
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise ValueError("Estimator not fitted, "
"call `fit` before `feature_importances_`.")
try:
norm = self.estimator_weights_.sum()
return (sum(weight * clf.feature_importances_ for weight, clf
in zip(self.estimator_weights_, self.estimators_))
/ norm)
except AttributeError:
raise AttributeError(
"Unable to compute feature importances "
"since base_estimator does not have a "
"feature_importances_ attribute")
def _validate_X_predict(self, X):
"""Ensure that X is in the proper format"""
if (self.base_estimator is None or
isinstance(self.base_estimator,
(BaseDecisionTree, BaseForest))):
X = check_array(X, accept_sparse='csr', dtype=DTYPE)
else:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
return X
def _samme_proba(estimator, n_classes, X):
"""Calculate algorithm 4, step 2, equation c) of Zhu et al [1].
References
----------
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
proba = estimator.predict_proba(X)
# Displace zero probabilities so the log is defined.
# Also fix negative elements which may occur with
# negative sample weights.
proba[proba < np.finfo(proba.dtype).eps] = np.finfo(proba.dtype).eps
log_proba = np.log(proba)
return (n_classes - 1) * (log_proba - (1. / n_classes)
* log_proba.sum(axis=1)[:, np.newaxis])
class AdaBoostClassifier(BaseWeightBoosting, ClassifierMixin):
"""An AdaBoost classifier.
An AdaBoost [1] classifier is a meta-estimator that begins by fitting a
classifier on the original dataset and then fits additional copies of the
classifier on the same dataset but where the weights of incorrectly
classified instances are adjusted such that subsequent classifiers focus
more on difficult cases.
This class implements the algorithm known as AdaBoost-SAMME [2].
Read more in the :ref:`User Guide <adaboost>`.
Parameters
----------
base_estimator : object, optional (default=DecisionTreeClassifier)
The base estimator from which the boosted ensemble is built.
Support for sample weighting is required, as well as proper `classes_`
and `n_classes_` attributes.
n_estimators : integer, optional (default=50)
The maximum number of estimators at which boosting is terminated.
In case of perfect fit, the learning procedure is stopped early.
learning_rate : float, optional (default=1.)
Learning rate shrinks the contribution of each classifier by
``learning_rate``. There is a trade-off between ``learning_rate`` and
``n_estimators``.
algorithm : {'SAMME', 'SAMME.R'}, optional (default='SAMME.R')
If 'SAMME.R' then use the SAMME.R real boosting algorithm.
``base_estimator`` must support calculation of class probabilities.
If 'SAMME' then use the SAMME discrete boosting algorithm.
The SAMME.R algorithm typically converges faster than SAMME,
achieving a lower test error with fewer boosting iterations.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
estimators_ : list of classifiers
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes]
The classes labels.
n_classes_ : int
The number of classes.
estimator_weights_ : array of floats
Weights for each estimator in the boosted ensemble.
estimator_errors_ : array of floats
Classification error for each estimator in the boosted
ensemble.
feature_importances_ : array of shape = [n_features]
The feature importances if supported by the ``base_estimator``.
See also
--------
AdaBoostRegressor, GradientBoostingClassifier, DecisionTreeClassifier
References
----------
.. [1] Y. Freund, R. Schapire, "A Decision-Theoretic Generalization of
on-Line Learning and an Application to Boosting", 1995.
.. [2] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
def __init__(self,
base_estimator=None,
n_estimators=50,
learning_rate=1.,
algorithm='SAMME.R',
random_state=None):
super(AdaBoostClassifier, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
learning_rate=learning_rate,
random_state=random_state)
self.algorithm = algorithm
def fit(self, X, y, sample_weight=None):
"""Build a boosted classifier from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels).
sample_weight : array-like of shape = [n_samples], optional
Sample weights. If None, the sample weights are initialized to
``1 / n_samples``.
Returns
-------
self : object
Returns self.
"""
# Check that algorithm is supported
if self.algorithm not in ('SAMME', 'SAMME.R'):
raise ValueError("algorithm %s is not supported" % self.algorithm)
# Fit
return super(AdaBoostClassifier, self).fit(X, y, sample_weight)
def _validate_estimator(self):
"""Check the estimator and set the base_estimator_ attribute."""
super(AdaBoostClassifier, self)._validate_estimator(
default=DecisionTreeClassifier(max_depth=1))
# SAMME-R requires predict_proba-enabled base estimators
if self.algorithm == 'SAMME.R':
if not hasattr(self.base_estimator_, 'predict_proba'):
raise TypeError(
"AdaBoostClassifier with algorithm='SAMME.R' requires "
"that the weak learner supports the calculation of class "
"probabilities with a predict_proba method.\n"
"Please change the base estimator or set "
"algorithm='SAMME' instead.")
if not has_fit_parameter(self.base_estimator_, "sample_weight"):
raise ValueError("%s doesn't support sample_weight."
% self.base_estimator_.__class__.__name__)
def _boost(self, iboost, X, y, sample_weight):
"""Implement a single boost.
Perform a single boost according to the real multi-class SAMME.R
algorithm or to the discrete SAMME algorithm and return the updated
sample weights.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels).
sample_weight : array-like of shape = [n_samples]
The current sample weights.
Returns
-------
sample_weight : array-like of shape = [n_samples] or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
estimator_error : float
The classification error for the current boost.
If None then boosting has terminated early.
"""
if self.algorithm == 'SAMME.R':
return self._boost_real(iboost, X, y, sample_weight)
else: # elif self.algorithm == "SAMME":
return self._boost_discrete(iboost, X, y, sample_weight)
def _boost_real(self, iboost, X, y, sample_weight):
"""Implement a single boost using the SAMME.R real algorithm."""
estimator = self._make_estimator()
try:
estimator.set_params(random_state=self.random_state)
except ValueError:
pass
estimator.fit(X, y, sample_weight=sample_weight)
y_predict_proba = estimator.predict_proba(X)
if iboost == 0:
self.classes_ = getattr(estimator, 'classes_', None)
self.n_classes_ = len(self.classes_)
y_predict = self.classes_.take(np.argmax(y_predict_proba, axis=1),
axis=0)
# Instances incorrectly classified
incorrect = y_predict != y
# Error fraction
estimator_error = np.mean(
np.average(incorrect, weights=sample_weight, axis=0))
# Stop if classification is perfect
if estimator_error <= 0:
return sample_weight, 1., 0.
# Construct y coding as described in Zhu et al [2]:
#
# y_k = 1 if c == k else -1 / (K - 1)
#
# where K == n_classes_ and c, k in [0, K) are indices along the second
# axis of the y coding with c being the index corresponding to the true
# class label.
n_classes = self.n_classes_
classes = self.classes_
y_codes = np.array([-1. / (n_classes - 1), 1.])
y_coding = y_codes.take(classes == y[:, np.newaxis])
# Displace zero probabilities so the log is defined.
# Also fix negative elements which may occur with
# negative sample weights.
proba = y_predict_proba # alias for readability
proba[proba < np.finfo(proba.dtype).eps] = np.finfo(proba.dtype).eps
# Boost weight using multi-class AdaBoost SAMME.R alg
estimator_weight = (-1. * self.learning_rate
* (((n_classes - 1.) / n_classes) *
inner1d(y_coding, np.log(y_predict_proba))))
# Only boost the weights if it will fit again
if not iboost == self.n_estimators - 1:
# Only boost positive weights
sample_weight *= np.exp(estimator_weight *
((sample_weight > 0) |
(estimator_weight < 0)))
return sample_weight, 1., estimator_error
def _boost_discrete(self, iboost, X, y, sample_weight):
"""Implement a single boost using the SAMME discrete algorithm."""
estimator = self._make_estimator()
try:
estimator.set_params(random_state=self.random_state)
except ValueError:
pass
estimator.fit(X, y, sample_weight=sample_weight)
y_predict = estimator.predict(X)
if iboost == 0:
self.classes_ = getattr(estimator, 'classes_', None)
self.n_classes_ = len(self.classes_)
# Instances incorrectly classified
incorrect = y_predict != y
# Error fraction
estimator_error = np.mean(
np.average(incorrect, weights=sample_weight, axis=0))
# Stop if classification is perfect
if estimator_error <= 0:
return sample_weight, 1., 0.
n_classes = self.n_classes_
# Stop if the error is at least as bad as random guessing
if estimator_error >= 1. - (1. / n_classes):
self.estimators_.pop(-1)
if len(self.estimators_) == 0:
raise ValueError('BaseClassifier in AdaBoostClassifier '
'ensemble is worse than random, ensemble '
'can not be fit.')
return None, None, None
# Boost weight using multi-class AdaBoost SAMME alg
estimator_weight = self.learning_rate * (
np.log((1. - estimator_error) / estimator_error) +
np.log(n_classes - 1.))
# Only boost the weights if I will fit again
if not iboost == self.n_estimators - 1:
# Only boost positive weights
sample_weight *= np.exp(estimator_weight * incorrect *
((sample_weight > 0) |
(estimator_weight < 0)))
return sample_weight, estimator_weight, estimator_error
def predict(self, X):
"""Predict classes for X.
The predicted class of an input sample is computed as the weighted mean
prediction of the classifiers in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
y : array of shape = [n_samples]
The predicted classes.
"""
pred = self.decision_function(X)
if self.n_classes_ == 2:
return self.classes_.take(pred > 0, axis=0)
return self.classes_.take(np.argmax(pred, axis=1), axis=0)
def staged_predict(self, X):
"""Return staged predictions for X.
The predicted class of an input sample is computed as the weighted mean
prediction of the classifiers in the ensemble.
This generator method yields the ensemble prediction after each
iteration of boosting and therefore allows monitoring, such as to
determine the prediction on a test set after each boost.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array, shape = [n_samples]
The predicted classes.
"""
n_classes = self.n_classes_
classes = self.classes_
if n_classes == 2:
for pred in self.staged_decision_function(X):
yield np.array(classes.take(pred > 0, axis=0))
else:
for pred in self.staged_decision_function(X):
yield np.array(classes.take(
np.argmax(pred, axis=1), axis=0))
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
score : array, shape = [n_samples, k]
The decision function of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
Binary classification is a special cases with ``k == 1``,
otherwise ``k==n_classes``. For binary classification,
values closer to -1 or 1 mean more like the first or second
class in ``classes_``, respectively.
"""
check_is_fitted(self, "n_classes_")
X = self._validate_X_predict(X)
n_classes = self.n_classes_
classes = self.classes_[:, np.newaxis]
pred = None
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
pred = sum(_samme_proba(estimator, n_classes, X)
for estimator in self.estimators_)
else: # self.algorithm == "SAMME"
pred = sum((estimator.predict(X) == classes).T * w
for estimator, w in zip(self.estimators_,
self.estimator_weights_))
pred /= self.estimator_weights_.sum()
if n_classes == 2:
pred[:, 0] *= -1
return pred.sum(axis=1)
return pred
def staged_decision_function(self, X):
"""Compute decision function of ``X`` for each boosting iteration.
This method allows monitoring (i.e. determine error on testing set)
after each boosting iteration.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
Binary classification is a special cases with ``k == 1``,
otherwise ``k==n_classes``. For binary classification,
values closer to -1 or 1 mean more like the first or second
class in ``classes_``, respectively.
"""
check_is_fitted(self, "n_classes_")
X = self._validate_X_predict(X)
n_classes = self.n_classes_
classes = self.classes_[:, np.newaxis]
pred = None
norm = 0.
for weight, estimator in zip(self.estimator_weights_,
self.estimators_):
norm += weight
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
current_pred = _samme_proba(estimator, n_classes, X)
else: # elif self.algorithm == "SAMME":
current_pred = estimator.predict(X)
current_pred = (current_pred == classes).T * weight
if pred is None:
pred = current_pred
else:
pred += current_pred
if n_classes == 2:
tmp_pred = np.copy(pred)
tmp_pred[:, 0] *= -1
yield (tmp_pred / norm).sum(axis=1)
else:
yield pred / norm
def predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the weighted mean predicted class probabilities of the classifiers
in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
p : array of shape = [n_samples]
The class probabilities of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
"""
check_is_fitted(self, "n_classes_")
n_classes = self.n_classes_
X = self._validate_X_predict(X)
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
proba = sum(_samme_proba(estimator, n_classes, X)
for estimator in self.estimators_)
else: # self.algorithm == "SAMME"
proba = sum(estimator.predict_proba(X) * w
for estimator, w in zip(self.estimators_,
self.estimator_weights_))
proba /= self.estimator_weights_.sum()
proba = np.exp((1. / (n_classes - 1)) * proba)
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
def staged_predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the weighted mean predicted class probabilities of the classifiers
in the ensemble.
This generator method yields the ensemble predicted class probabilities
after each iteration of boosting and therefore allows monitoring, such
as to determine the predicted class probabilities on a test set after
each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
p : generator of array, shape = [n_samples]
The class probabilities of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
"""
X = self._validate_X_predict(X)
n_classes = self.n_classes_
proba = None
norm = 0.
for weight, estimator in zip(self.estimator_weights_,
self.estimators_):
norm += weight
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
current_proba = _samme_proba(estimator, n_classes, X)
else: # elif self.algorithm == "SAMME":
current_proba = estimator.predict_proba(X) * weight
if proba is None:
proba = current_proba
else:
proba += current_proba
real_proba = np.exp((1. / (n_classes - 1)) * (proba / norm))
normalizer = real_proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
real_proba /= normalizer
yield real_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
The predicted class log-probabilities of an input sample is computed as
the weighted mean predicted class log-probabilities of the classifiers
in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
p : array of shape = [n_samples]
The class probabilities of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
"""
return np.log(self.predict_proba(X))
class AdaBoostRegressor(BaseWeightBoosting, RegressorMixin):
"""An AdaBoost regressor.
An AdaBoost [1] regressor is a meta-estimator that begins by fitting a
regressor on the original dataset and then fits additional copies of the
regressor on the same dataset but where the weights of instances are
adjusted according to the error of the current prediction. As such,
subsequent regressors focus more on difficult cases.
This class implements the algorithm known as AdaBoost.R2 [2].
Read more in the :ref:`User Guide <adaboost>`.
Parameters
----------
base_estimator : object, optional (default=DecisionTreeRegressor)
The base estimator from which the boosted ensemble is built.
Support for sample weighting is required.
n_estimators : integer, optional (default=50)
The maximum number of estimators at which boosting is terminated.
In case of perfect fit, the learning procedure is stopped early.
learning_rate : float, optional (default=1.)
Learning rate shrinks the contribution of each regressor by
``learning_rate``. There is a trade-off between ``learning_rate`` and
``n_estimators``.
loss : {'linear', 'square', 'exponential'}, optional (default='linear')
The loss function to use when updating the weights after each
boosting iteration.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
estimators_ : list of classifiers
The collection of fitted sub-estimators.
estimator_weights_ : array of floats
Weights for each estimator in the boosted ensemble.
estimator_errors_ : array of floats
Regression error for each estimator in the boosted ensemble.
feature_importances_ : array of shape = [n_features]
The feature importances if supported by the ``base_estimator``.
See also
--------
AdaBoostClassifier, GradientBoostingRegressor, DecisionTreeRegressor
References
----------
.. [1] Y. Freund, R. Schapire, "A Decision-Theoretic Generalization of
on-Line Learning and an Application to Boosting", 1995.
.. [2] H. Drucker, "Improving Regressors using Boosting Techniques", 1997.
"""
def __init__(self,
base_estimator=None,
n_estimators=50,
learning_rate=1.,
loss='linear',
random_state=None):
super(AdaBoostRegressor, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
learning_rate=learning_rate,
random_state=random_state)
self.loss = loss
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
"""Build a boosted regressor from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (real numbers).
sample_weight : array-like of shape = [n_samples], optional
Sample weights. If None, the sample weights are initialized to
1 / n_samples.
Returns
-------
self : object
Returns self.
"""
# Check loss
if self.loss not in ('linear', 'square', 'exponential'):
raise ValueError(
"loss must be 'linear', 'square', or 'exponential'")
# Fit
return super(AdaBoostRegressor, self).fit(X, y, sample_weight)
def _validate_estimator(self):
"""Check the estimator and set the base_estimator_ attribute."""
super(AdaBoostRegressor, self)._validate_estimator(
default=DecisionTreeRegressor(max_depth=3))
def _boost(self, iboost, X, y, sample_weight):
"""Implement a single boost for regression
Perform a single boost according to the AdaBoost.R2 algorithm and
return the updated sample weights.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like of shape = [n_samples]
The current sample weights.
Returns
-------
sample_weight : array-like of shape = [n_samples] or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
estimator_error : float
The regression error for the current boost.
If None then boosting has terminated early.
"""
estimator = self._make_estimator()
try:
estimator.set_params(random_state=self.random_state)
except ValueError:
pass
generator = check_random_state(self.random_state)
# Weighted sampling of the training set with replacement
# For NumPy >= 1.7.0 use np.random.choice
cdf = sample_weight.cumsum()
cdf /= cdf[-1]
uniform_samples = generator.random_sample(X.shape[0])
bootstrap_idx = cdf.searchsorted(uniform_samples, side='right')
# searchsorted returns a scalar
bootstrap_idx = np.array(bootstrap_idx, copy=False)
# Fit on the bootstrapped sample and obtain a prediction
# for all samples in the training set
estimator.fit(X[bootstrap_idx], y[bootstrap_idx])
y_predict = estimator.predict(X)
error_vect = np.abs(y_predict - y)
error_max = error_vect.max()
if error_max != 0.:
error_vect /= error_max
if self.loss == 'square':
error_vect **= 2
elif self.loss == 'exponential':
error_vect = 1. - np.exp(- error_vect)
# Calculate the average loss
estimator_error = (sample_weight * error_vect).sum()
if estimator_error <= 0:
# Stop if fit is perfect
return sample_weight, 1., 0.
elif estimator_error >= 0.5:
# Discard current estimator only if it isn't the only one
if len(self.estimators_) > 1:
self.estimators_.pop(-1)
return None, None, None
beta = estimator_error / (1. - estimator_error)
# Boost weight using AdaBoost.R2 alg
estimator_weight = self.learning_rate * np.log(1. / beta)
if not iboost == self.n_estimators - 1:
sample_weight *= np.power(
beta,
(1. - error_vect) * self.learning_rate)
return sample_weight, estimator_weight, estimator_error
def _get_median_predict(self, X, limit):
# Evaluate predictions of all estimators
predictions = np.array([
est.predict(X) for est in self.estimators_[:limit]]).T
# Sort the predictions
sorted_idx = np.argsort(predictions, axis=1)
# Find index of median prediction for each sample
weight_cdf = self.estimator_weights_[sorted_idx].cumsum(axis=1)
median_or_above = weight_cdf >= 0.5 * weight_cdf[:, -1][:, np.newaxis]
median_idx = median_or_above.argmax(axis=1)
median_estimators = sorted_idx[np.arange(X.shape[0]), median_idx]
# Return median predictions
return predictions[np.arange(X.shape[0]), median_estimators]
def predict(self, X):
"""Predict regression value for X.
The predicted regression value of an input sample is computed
as the weighted median prediction of the classifiers in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
y : array of shape = [n_samples]
The predicted regression values.
"""
check_is_fitted(self, "estimator_weights_")
X = self._validate_X_predict(X)
return self._get_median_predict(X, len(self.estimators_))
def staged_predict(self, X):
"""Return staged predictions for X.
The predicted regression value of an input sample is computed
as the weighted median prediction of the classifiers in the ensemble.
This generator method yields the ensemble prediction after each
iteration of boosting and therefore allows monitoring, such as to
determine the prediction on a test set after each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
y : generator of array, shape = [n_samples]
The predicted regression values.
"""
check_is_fitted(self, "estimator_weights_")
X = self._validate_X_predict(X)
for i, _ in enumerate(self.estimators_, 1):
yield self._get_median_predict(X, limit=i)
| bsd-3-clause |
vityurkiv/Ox | test/tests/time_integrators/scalar/run.py | 5 | 4015 | #!/usr/bin/env python
import subprocess
import sys
import csv
import matplotlib.pyplot as plt
import numpy as np
# Use fonts that match LaTeX
from matplotlib import rcParams
rcParams['font.family'] = 'serif'
rcParams['font.size'] = 17
rcParams['font.serif'] = ['Computer Modern Roman']
rcParams['text.usetex'] = True
# Small font size for the legend
from matplotlib.font_manager import FontProperties
fontP = FontProperties()
fontP.set_size('x-small')
def get_last_row(csv_filename):
'''
Function which returns just the last row of a CSV file. We have to
read every line of the file, there was no stackoverflow example of
reading just the last line.
http://stackoverflow.com/questions/20296955/reading-last-row-from-csv-file-python-error
'''
with open(csv_filename, 'r') as f:
lastrow = None
for row in csv.reader(f):
if (row != []): # skip blank lines at end of file.
lastrow = row
return lastrow
def run_moose(dt, time_integrator):
'''
Function which actually runs MOOSE.
'''
implicit_flag = 'true'
explicit_methods = ['ExplicitEuler', 'ExplicitMidpoint', 'Heun', 'Ralston']
# Set implicit_flag based on TimeIntegrator name
if (time_integrator in explicit_methods):
implicit_flag = 'false'
command_line_args = ['../../../moose_test-opt', '-i', 'scalar.i',
'Executioner/dt={}'.format(dt),
'Executioner/TimeIntegrator/type={}'.format(time_integrator),
'GlobalParams/implicit={}'.format(implicit_flag)]
try:
child = subprocess.Popen(command_line_args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
# communicate() waits for the process to terminate, so there's no
# need to wait() for it. It also sets the returncode attribute on
# child.
(stdoutdata, stderrdata) = child.communicate()
if (child.returncode != 0):
print('Running MOOSE failed: program output is below:')
print(stdoutdata)
raise
except:
print('Error executing moose_test')
sys.exit(1)
# Parse the last line of the output file to get the error at the final time.
last_row = get_last_row('scalar_out.csv')
return float(last_row[1])
#
# Main program
#
fig = plt.figure()
ax1 = fig.add_subplot(111)
# Lists of timesteps and TimeIntegrators to plot.
time_integrators = ['ImplicitEuler', 'ImplicitMidpoint', 'LStableDirk2', 'BDF2', 'CrankNicolson',
'LStableDirk3', 'LStableDirk4', 'AStableDirk4',
'ExplicitEuler', 'ExplicitMidpoint', 'Heun', 'Ralston']
dts = [.125, .0625, .03125, .015625]
# Plot colors
colors = ['maroon', 'blue', 'green', 'black', 'burlywood', 'olivedrab', 'midnightblue',
'tomato', 'darkmagenta', 'chocolate', 'lightslategray', 'skyblue']
# Plot line markers
markers = ['v', 'o', 'x', '^', 'H', 'h', '+', 'D', '*', '4', 'd', '8']
# Plot line styles
linestyles = [':', '-', '-.', '--', ':', '-.', '--', ':', '--', '-', '-.', '-']
for i in xrange(len(time_integrators)):
time_integrator = time_integrators[i]
# Place to store the results for this TimeIntegrator
results = []
# Call MOOSE to compute the results
for dt in dts:
results.append(run_moose(dt, time_integrator))
# Make plot
xdata = np.log10(np.reciprocal(dts))
ydata = np.log10(results)
# Compute linear fit of last three points.
start_fit = len(xdata) - 3
end_fit = len(xdata)
fit = np.polyfit(xdata[start_fit:end_fit], ydata[start_fit:end_fit], 1)
# Make the plot -- unpack the user's additional plotting arguments
# from kwargs by prepending with **.
ax1.plot(xdata, ydata, label=time_integrator + ", $" + "{:.2f}".format(fit[0]) + "$",
color=colors[i], marker=markers[i], linestyle=linestyles[i])
# Set up the axis labels.
ax1.set_xlabel('$\log (\Delta t^{-1})$')
ax1.set_ylabel('$\log \|e(T)\|_{L^2}$')
# Add a legend
plt.legend(loc='lower left', prop=fontP)
# Save a PDF
plt.savefig('plot.pdf', format='pdf')
# Local Variables:
# python-indent: 2
# End:
| lgpl-2.1 |
hitszxp/scikit-learn | sklearn/datasets/twenty_newsgroups.py | 26 | 13430 | """Caching loader for the 20 newsgroups text classification dataset
The description of the dataset is available on the official website at:
http://people.csail.mit.edu/jrennie/20Newsgroups/
Quoting the introduction:
The 20 Newsgroups data set is a collection of approximately 20,000
newsgroup documents, partitioned (nearly) evenly across 20 different
newsgroups. To the best of my knowledge, it was originally collected
by Ken Lang, probably for his Newsweeder: Learning to filter netnews
paper, though he does not explicitly mention this collection. The 20
newsgroups collection has become a popular data set for experiments
in text applications of machine learning techniques, such as text
classification and text clustering.
This dataset loader will download the recommended "by date" variant of the
dataset and which features a point in time split between the train and
test sets. The compressed dataset size is around 14 Mb compressed. Once
uncompressed the train set is 52 MB and the test set is 34 MB.
The data is downloaded, extracted and cached in the '~/scikit_learn_data'
folder.
The `fetch_20newsgroups` function will not vectorize the data into numpy
arrays but the dataset lists the filenames of the posts and their categories
as target labels.
The `fetch_20newsgroups_tfidf` function will in addition do a simple tf-idf
vectorization step.
"""
# Copyright (c) 2011 Olivier Grisel <[email protected]>
# License: BSD 3 clause
import os
import logging
import tarfile
import pickle
import shutil
import re
import codecs
import numpy as np
import scipy.sparse as sp
from .base import get_data_home
from .base import Bunch
from .base import load_files
from ..utils import check_random_state
from ..feature_extraction.text import CountVectorizer
from ..preprocessing import normalize
from ..externals import joblib, six
if six.PY3:
from urllib.request import urlopen
else:
from urllib2 import urlopen
logger = logging.getLogger(__name__)
URL = ("http://people.csail.mit.edu/jrennie/"
"20Newsgroups/20news-bydate.tar.gz")
ARCHIVE_NAME = "20news-bydate.tar.gz"
CACHE_NAME = "20news-bydate.pkz"
TRAIN_FOLDER = "20news-bydate-train"
TEST_FOLDER = "20news-bydate-test"
def download_20newsgroups(target_dir, cache_path):
"""Download the 20 newsgroups data and stored it as a zipped pickle."""
archive_path = os.path.join(target_dir, ARCHIVE_NAME)
train_path = os.path.join(target_dir, TRAIN_FOLDER)
test_path = os.path.join(target_dir, TEST_FOLDER)
if not os.path.exists(target_dir):
os.makedirs(target_dir)
if os.path.exists(archive_path):
# Download is not complete as the .tar.gz file is removed after
# download.
logger.warn("Download was incomplete, downloading again.")
os.remove(archive_path)
logger.warn("Downloading dataset from %s (14 MB)", URL)
opener = urlopen(URL)
open(archive_path, 'wb').write(opener.read())
logger.info("Decompressing %s", archive_path)
tarfile.open(archive_path, "r:gz").extractall(path=target_dir)
os.remove(archive_path)
# Store a zipped pickle
cache = dict(train=load_files(train_path, encoding='latin1'),
test=load_files(test_path, encoding='latin1'))
compressed_content = codecs.encode(pickle.dumps(cache), 'zlib_codec')
open(cache_path, 'wb').write(compressed_content)
shutil.rmtree(target_dir)
return cache
def strip_newsgroup_header(text):
"""
Given text in "news" format, strip the headers, by removing everything
before the first blank line.
"""
_before, _blankline, after = text.partition('\n\n')
return after
_QUOTE_RE = re.compile(r'(writes in|writes:|wrote:|says:|said:'
r'|^In article|^Quoted from|^\||^>)')
def strip_newsgroup_quoting(text):
"""
Given text in "news" format, strip lines beginning with the quote
characters > or |, plus lines that often introduce a quoted section
(for example, because they contain the string 'writes:'.)
"""
good_lines = [line for line in text.split('\n')
if not _QUOTE_RE.search(line)]
return '\n'.join(good_lines)
def strip_newsgroup_footer(text):
"""
Given text in "news" format, attempt to remove a signature block.
As a rough heuristic, we assume that signatures are set apart by either
a blank line or a line made of hyphens, and that it is the last such line
in the file (disregarding blank lines at the end).
"""
lines = text.strip().split('\n')
for line_num in range(len(lines) - 1, -1, -1):
line = lines[line_num]
if line.strip().strip('-') == '':
break
if line_num > 0:
return '\n'.join(lines[:line_num])
else:
return text
def fetch_20newsgroups(data_home=None, subset='train', categories=None,
shuffle=True, random_state=42,
remove=(),
download_if_missing=True):
"""Load the filenames and data from the 20 newsgroups dataset.
Parameters
----------
subset: 'train' or 'test', 'all', optional
Select the dataset to load: 'train' for the training set, 'test'
for the test set, 'all' for both, with shuffled ordering.
data_home: optional, default: None
Specify an download and cache folder for the datasets. If None,
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
categories: None or collection of string or unicode
If None (default), load all the categories.
If not None, list of category names to load (other categories
ignored).
shuffle: bool, optional
Whether or not to shuffle the data: might be important for models that
make the assumption that the samples are independent and identically
distributed (i.i.d.), such as stochastic gradient descent.
random_state: numpy random number generator or seed integer
Used to shuffle the dataset.
download_if_missing: optional, True by default
If False, raise an IOError if the data is not locally available
instead of trying to download the data from the source site.
remove: tuple
May contain any subset of ('headers', 'footers', 'quotes'). Each of
these are kinds of text that will be detected and removed from the
newsgroup posts, preventing classifiers from overfitting on
metadata.
'headers' removes newsgroup headers, 'footers' removes blocks at the
ends of posts that look like signatures, and 'quotes' removes lines
that appear to be quoting another post.
'headers' follows an exact standard; the other filters are not always
correct.
"""
data_home = get_data_home(data_home=data_home)
cache_path = os.path.join(data_home, CACHE_NAME)
twenty_home = os.path.join(data_home, "20news_home")
cache = None
if os.path.exists(cache_path):
try:
with open(cache_path, 'rb') as f:
compressed_content = f.read()
uncompressed_content = codecs.decode(
compressed_content, 'zlib_codec')
cache = pickle.loads(uncompressed_content)
except Exception as e:
print(80 * '_')
print('Cache loading failed')
print(80 * '_')
print(e)
if cache is None:
if download_if_missing:
cache = download_20newsgroups(target_dir=twenty_home,
cache_path=cache_path)
else:
raise IOError('20Newsgroups dataset not found')
if subset in ('train', 'test'):
data = cache[subset]
elif subset == 'all':
data_lst = list()
target = list()
filenames = list()
for subset in ('train', 'test'):
data = cache[subset]
data_lst.extend(data.data)
target.extend(data.target)
filenames.extend(data.filenames)
data.data = data_lst
data.target = np.array(target)
data.filenames = np.array(filenames)
data.description = 'the 20 newsgroups by date dataset'
else:
raise ValueError(
"subset can only be 'train', 'test' or 'all', got '%s'" % subset)
if 'headers' in remove:
data.data = [strip_newsgroup_header(text) for text in data.data]
if 'footers' in remove:
data.data = [strip_newsgroup_footer(text) for text in data.data]
if 'quotes' in remove:
data.data = [strip_newsgroup_quoting(text) for text in data.data]
if categories is not None:
labels = [(data.target_names.index(cat), cat) for cat in categories]
# Sort the categories to have the ordering of the labels
labels.sort()
labels, categories = zip(*labels)
mask = np.in1d(data.target, labels)
data.filenames = data.filenames[mask]
data.target = data.target[mask]
# searchsorted to have continuous labels
data.target = np.searchsorted(labels, data.target)
data.target_names = list(categories)
# Use an object array to shuffle: avoids memory copy
data_lst = np.array(data.data, dtype=object)
data_lst = data_lst[mask]
data.data = data_lst.tolist()
if shuffle:
random_state = check_random_state(random_state)
indices = np.arange(data.target.shape[0])
random_state.shuffle(indices)
data.filenames = data.filenames[indices]
data.target = data.target[indices]
# Use an object array to shuffle: avoids memory copy
data_lst = np.array(data.data, dtype=object)
data_lst = data_lst[indices]
data.data = data_lst.tolist()
return data
def fetch_20newsgroups_vectorized(subset="train", remove=(), data_home=None):
"""Load the 20 newsgroups dataset and transform it into tf-idf vectors.
This is a convenience function; the tf-idf transformation is done using the
default settings for `sklearn.feature_extraction.text.Vectorizer`. For more
advanced usage (stopword filtering, n-gram extraction, etc.), combine
fetch_20newsgroups with a custom `Vectorizer` or `CountVectorizer`.
Parameters
----------
subset: 'train' or 'test', 'all', optional
Select the dataset to load: 'train' for the training set, 'test'
for the test set, 'all' for both, with shuffled ordering.
data_home: optional, default: None
Specify an download and cache folder for the datasets. If None,
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
remove: tuple
May contain any subset of ('headers', 'footers', 'quotes'). Each of
these are kinds of text that will be detected and removed from the
newsgroup posts, preventing classifiers from overfitting on
metadata.
'headers' removes newsgroup headers, 'footers' removes blocks at the
ends of posts that look like signatures, and 'quotes' removes lines
that appear to be quoting another post.
Returns
-------
bunch : Bunch object
bunch.data: sparse matrix, shape [n_samples, n_features]
bunch.target: array, shape [n_samples]
bunch.target_names: list, length [n_classes]
"""
data_home = get_data_home(data_home=data_home)
filebase = '20newsgroup_vectorized'
if remove:
filebase += 'remove-' + ('-'.join(remove))
target_file = os.path.join(data_home, filebase + ".pk")
# we shuffle but use a fixed seed for the memoization
data_train = fetch_20newsgroups(data_home=data_home,
subset='train',
categories=None,
shuffle=True,
random_state=12,
remove=remove)
data_test = fetch_20newsgroups(data_home=data_home,
subset='test',
categories=None,
shuffle=True,
random_state=12,
remove=remove)
if os.path.exists(target_file):
X_train, X_test = joblib.load(target_file)
else:
vectorizer = CountVectorizer(dtype=np.int16)
X_train = vectorizer.fit_transform(data_train.data).tocsr()
X_test = vectorizer.transform(data_test.data).tocsr()
joblib.dump((X_train, X_test), target_file, compress=9)
# the data is stored as int16 for compactness
# but normalize needs floats
X_train = X_train.astype(np.float64)
X_test = X_test.astype(np.float64)
normalize(X_train, copy=False)
normalize(X_test, copy=False)
target_names = data_train.target_names
if subset == "train":
data = X_train
target = data_train.target
elif subset == "test":
data = X_test
target = data_test.target
elif subset == "all":
data = sp.vstack((X_train, X_test)).tocsr()
target = np.concatenate((data_train.target, data_test.target))
else:
raise ValueError("%r is not a valid subset: should be one of "
"['train', 'test', 'all']" % subset)
return Bunch(data=data, target=target, target_names=target_names)
| bsd-3-clause |
emon10005/scikit-image | skimage/io/tests/test_mpl_imshow.py | 12 | 2852 | from __future__ import division
import numpy as np
from skimage import io
from skimage._shared._warnings import expected_warnings
import matplotlib.pyplot as plt
def setup():
io.reset_plugins()
# test images. Note that they don't have their full range for their dtype,
# but we still expect the display range to equal the full dtype range.
im8 = np.array([[0, 64], [128, 240]], np.uint8)
im16 = im8.astype(np.uint16) * 256
im64 = im8.astype(np.uint64)
imf = im8 / 255
im_lo = imf / 1000
im_hi = imf + 10
def n_subplots(ax_im):
"""Return the number of subplots in the figure containing an ``AxesImage``.
Parameters
----------
ax_im : matplotlib.pyplot.AxesImage object
The input ``AxesImage``.
Returns
-------
n : int
The number of subplots in the corresponding figure.
Notes
-----
This function is intended to check whether a colorbar was drawn, in
which case two subplots are expected. For standard imshows, one
subplot is expected.
"""
return len(ax_im.get_figure().get_axes())
def test_uint8():
plt.figure()
ax_im = io.imshow(im8)
assert ax_im.cmap.name == 'gray'
assert ax_im.get_clim() == (0, 255)
assert n_subplots(ax_im) == 1
assert ax_im.colorbar is None
def test_uint16():
plt.figure()
ax_im = io.imshow(im16)
assert ax_im.cmap.name == 'gray'
assert ax_im.get_clim() == (0, 65535)
assert n_subplots(ax_im) == 1
assert ax_im.colorbar is None
def test_float():
plt.figure()
ax_im = io.imshow(imf)
assert ax_im.cmap.name == 'gray'
assert ax_im.get_clim() == (0, 1)
assert n_subplots(ax_im) == 1
assert ax_im.colorbar is None
def test_low_dynamic_range():
with expected_warnings(["Low image dynamic range"]):
ax_im = io.imshow(im_lo)
assert ax_im.get_clim() == (im_lo.min(), im_lo.max())
# check that a colorbar was created
assert ax_im.colorbar is not None
def test_outside_standard_range():
plt.figure()
with expected_warnings(["out of standard range"]):
ax_im = io.imshow(im_hi)
assert ax_im.get_clim() == (im_hi.min(), im_hi.max())
assert n_subplots(ax_im) == 2
assert ax_im.colorbar is not None
def test_nonstandard_type():
plt.figure()
with expected_warnings(["Non-standard image type",
"Low image dynamic range"]):
ax_im = io.imshow(im64)
assert ax_im.get_clim() == (im64.min(), im64.max())
assert n_subplots(ax_im) == 2
assert ax_im.colorbar is not None
def test_signed_image():
plt.figure()
im_signed = np.array([[-0.5, -0.2], [0.1, 0.4]])
ax_im = io.imshow(im_signed)
assert ax_im.get_clim() == (-0.5, 0.5)
assert n_subplots(ax_im) == 2
assert ax_im.colorbar is not None
if __name__ == '__main__':
np.testing.run_module_suite()
| bsd-3-clause |
huletlab/PyAbel | examples/example_all_O2.py | 1 | 4291 | # -*- coding: utf-8 -*-
# This example compares the available inverse Abel transform methods
# currently - direct, hansenlaw, and basex
# processing the O2- photoelectron velocity-map image
#
# Note it transforms only the Q0 (top-right) quadrant
# using the fundamental transform code
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import abel
import collections
import matplotlib.pylab as plt
from time import time
# inverse Abel transform methods -----------------------------
# dictionary of method: function()
transforms = {
"basex": abel.basex.basex_transform,
"direct": abel.direct.direct_transform,
"hansenlaw": abel.hansenlaw.hansenlaw_transform,
"onion_bordas": abel.onion_bordas.onion_bordas_transform,
"onion_dasch": abel.dasch.onion_peeling_transform,
"three_point": abel.dasch.three_point_transform,
"two_point" : abel.dasch.two_point_transform,
}
# sort dictionary
transforms = collections.OrderedDict(sorted(transforms.items()))
ntrans = np.size(transforms.keys()) # number of transforms
# Image: O2- VMI 1024x1024 pixel ------------------
IM = np.loadtxt('data/O2-ANU1024.txt.bz2')
# recenter the image to mid-pixel (odd image width)
IModd = abel.tools.center.center_image(IM, center="slice", odd_size=True)
h, w = IModd.shape
print("centered image 'data/O2-ANU2048.txt' shape = {:d}x{:d}".format(h, w))
# split image into quadrants
Q = abel.tools.symmetry.get_image_quadrants(IModd, reorient=True)
Q0 = Q[0]
Q0fresh = Q0.copy() # keep clean copy
print ("quadrant shape {}".format(Q0.shape))
# Intensity mask used for intensity normalization
# quadrant image region of bright pixels
mask = np.zeros(Q0.shape, dtype=bool)
mask[500:512, 358:365] = True
# process Q0 quadrant using each method --------------------
iabelQ = [] # keep inverse Abel transformed image
sp = [] # speed distributions
meth = [] # methods
for q, method in enumerate(transforms.keys()):
Q0 = Q0fresh.copy() # top-right quadrant of O2- image
print ("\n------- {:s} inverse ...".format(method))
t0 = time()
# inverse Abel transform using 'method'
IAQ0 = transforms[method](Q0, direction="inverse", dr=0.1)
print (" {:.1f} sec".format(time()-t0))
# polar projection and speed profile
radial, speed = abel.tools.vmi.angular_integration(IAQ0, origin=(0, 0),
dr=0.1)
# normalize image intensity and speed distribution
IAQ0 /= IAQ0[mask].max()
speed /= speed[radial > 50].max()
# keep data for plots
iabelQ.append(IAQ0)
sp.append((radial, speed))
meth.append(method)
# reassemble image, each quadrant a different method
# plot inverse Abel transformed image slices, and respective speed distributions
ax0 = plt.subplot2grid((1, 2), (0, 0))
ax1 = plt.subplot2grid((1, 2), (0, 1))
def ann_plt (quad, subquad, txt):
# -ve because numpy coords from top
annot_angle = -(30+30*subquad+quad*90)*np.pi/180
annot_coord = (h/2+(h*0.8)*np.cos(annot_angle)/2,
w/2+(w*0.8)*np.sin(annot_angle)/2)
ax0.annotate(txt, annot_coord, color="yellow", horizontalalignment='left')
# for < 4 images pad using a blank quadrant
r, c = Q0.shape
Q = np.zeros((4, r, c))
indx = np.triu_indices(iabelQ[0].shape[0])
iq = 0
for q in range(4):
Q[q] = iabelQ[iq].copy()
ann_plt(q, 0, meth[iq])
ax1.plot(*(sp[iq]), label=meth[iq], alpha=0.3)
iq += 1
if iq < len(transforms):
Q[q][indx] = np.triu(iabelQ[iq])[indx]
ann_plt(q, 1, meth[iq])
ax1.plot(*(sp[iq]), label=meth[iq], alpha=0.3)
iq += 1
# reassemble image from transformed (part-)quadrants
im = abel.tools.symmetry.put_image_quadrants((Q[0], Q[1], Q[2], Q[3]),
original_image_shape=IModd.shape)
ax0.axis('off')
ax0.set_title("inverse Abel transforms")
ax0.imshow(im, vmin=0, vmax=0.8)
ax1.set_title("speed distribution")
ax1.axis(ymin=-0.05, ymax=1.1, xmin=50, xmax=450)
ax1.legend(loc=0, labelspacing=0.1, fontsize=10)
plt.tight_layout()
# save a copy of the plot
plt.savefig('example_all_O2.png', dpi=100)
plt.show()
| mit |
heyrict/exam | SysAna/sysana.py | 1 | 3303 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
sys.path.insert(0, '..')
from exam import *
from data_processing import split_wrd, _in_list, unsqueeze_numlist, colorit
import pandas as pd
CHAP = 'Which chapter to choose(empty to enter search mode)\n'\
'\t0\t未知\n'\
'\t1\t骨、骨连接\n'\
'\t2\t肌\n'\
'\t3\t内脏学总论\n'\
'\t4\t消化系统\n'\
'\t5\t呼吸系统\n'\
'\t6\t泌尿系统\n'\
'\t7\t生殖系统\n'\
'\t8\t腹膜\n'\
'\t9\t心血管系统\n'\
'\t10\t淋巴系统\n'\
'\t11\t感觉器总论\n'\
'\t12\t视器\n'\
'\t13\t前庭蜗器\n'\
'\t16\t中枢神经系统\n'\
'\t17\t周围神经系统\n'\
'\t18\t内分泌系统\n'\
'\t19\t组织胚胎学\n'
class BeginQuestFormSysAna(BeginQuestForm):
def selchap(self,qf):
# chapter
kind = InteractiveAnswer(CHAP+':',accept_empty=True,
serializer=lambda x: [int(i) for i in split_wrd(x,list(', ,、'))],
verify=unsqueeze_numlist('0-13,16-19')).get()
if kind:
qf = QuestForm([i for i in qf if _in_list(i.args['Chapter'],kind)])
else:
# include
kind = InteractiveAnswer('Which chapter to include?(empty to include all): ',accept_empty=True,
serializer=lambda x:split_wrd(x,list(', ,、'))).get()
if kind:
qf = QuestForm([i for i in qf if _in_list(','.join(i.q+i.sel),kind)])
# exclude
kind = InteractiveAnswer('Which chapter to exclude?(empty to skip): ',accept_empty=True,
serializer=lambda x:split_wrd(x,list(', ,、'))).get()
if kind:
qf = QuestForm([i for i in qf if not _in_list(','.join(i.q+i.sel),kind)])
# difficulties
kind = InteractiveAnswer('Which difficulty(ies) to choose? ',\
serializer=lambda x:sum([list(i) for i in split_wrd(x,list(', ,、'),ignore_space=True)],[]),\
verify='1234').get()
outqf = QuestForm([i for i in qf if _in_list(i.args['Difficulty'],kind)])
return outqf
def raise_sel(self,quest,**kwargs):
if quest.sel:
for s,t in zip(quest.sel,'ABCDE'):
print(t+'.',s)
def raise_q(self,quest,**kwargs):
print('Question %d/%d: '%(len(self.other)+len(self.correct)+len(self.wrong)+1,self.length),end='')
print('\n'.join(quest.q),'' if len(quest.ta[0])==1 else '[多选]')
return
def check_ans(self,ans,quest,**kwargs):
if ans == 'pass':
print(colorit('Roger!','magenta'))
return 'pass'
if set(list(split_wrd(ans.upper(),list(', ,、'),''))) == set(list(''.join(quest.ta))):
print(colorit('Correct!','green'))
return True
else:
print(colorit('WRONG!','red'))
return False
def main():
t=QuestFormExcelLoader(qcol='question',selcol=['option_'+i for i in 'abcde'],
tacol='question_answer', argcol={'Difficulty':'question_difclt','Chapter':'question_num'})
BeginQuestFormSysAna(t.load('Data.xlsx'),no_filter=t.is_cached).start()
if __name__ == '__main__':
main()
| apache-2.0 |
pratapvardhan/pandas | pandas/core/reshape/concat.py | 3 | 22200 | """
concat routines
"""
import numpy as np
from pandas import compat, DataFrame, Series, Index, MultiIndex
from pandas.core.index import (_get_objs_combined_axis,
_ensure_index, _get_consensus_names,
_all_indexes_same)
from pandas.core.arrays.categorical import (_factorize_from_iterable,
_factorize_from_iterables)
from pandas.core.internals import concatenate_block_managers
from pandas.core import common as com
from pandas.core.generic import NDFrame
import pandas.core.dtypes.concat as _concat
# ---------------------------------------------------------------------
# Concatenate DataFrame objects
def concat(objs, axis=0, join='outer', join_axes=None, ignore_index=False,
keys=None, levels=None, names=None, verify_integrity=False,
sort=None, copy=True):
"""
Concatenate pandas objects along a particular axis with optional set logic
along the other axes.
Can also add a layer of hierarchical indexing on the concatenation axis,
which may be useful if the labels are the same (or overlapping) on
the passed axis number.
Parameters
----------
objs : a sequence or mapping of Series, DataFrame, or Panel objects
If a dict is passed, the sorted keys will be used as the `keys`
argument, unless it is passed, in which case the values will be
selected (see below). Any None objects will be dropped silently unless
they are all None in which case a ValueError will be raised
axis : {0/'index', 1/'columns'}, default 0
The axis to concatenate along
join : {'inner', 'outer'}, default 'outer'
How to handle indexes on other axis(es)
join_axes : list of Index objects
Specific indexes to use for the other n - 1 axes instead of performing
inner/outer set logic
ignore_index : boolean, default False
If True, do not use the index values along the concatenation axis. The
resulting axis will be labeled 0, ..., n - 1. This is useful if you are
concatenating objects where the concatenation axis does not have
meaningful indexing information. Note the index values on the other
axes are still respected in the join.
keys : sequence, default None
If multiple levels passed, should contain tuples. Construct
hierarchical index using the passed keys as the outermost level
levels : list of sequences, default None
Specific levels (unique values) to use for constructing a
MultiIndex. Otherwise they will be inferred from the keys
names : list, default None
Names for the levels in the resulting hierarchical index
verify_integrity : boolean, default False
Check whether the new concatenated axis contains duplicates. This can
be very expensive relative to the actual data concatenation
sort : boolean, default None
Sort non-concatenation axis if it is not already aligned when `join`
is 'outer'. The current default of sorting is deprecated and will
change to not-sorting in a future version of pandas.
Explicitly pass ``sort=True`` to silence the warning and sort.
Explicitly pass ``sort=False`` to silence the warning and not sort.
This has no effect when ``join='inner'``, which already preserves
the order of the non-concatenation axis.
.. versionadded:: 0.23.0
copy : boolean, default True
If False, do not copy data unnecessarily
Returns
-------
concatenated : object, type of objs
When concatenating all ``Series`` along the index (axis=0), a
``Series`` is returned. When ``objs`` contains at least one
``DataFrame``, a ``DataFrame`` is returned. When concatenating along
the columns (axis=1), a ``DataFrame`` is returned.
Notes
-----
The keys, levels, and names arguments are all optional.
A walkthrough of how this method fits in with other tools for combining
pandas objects can be found `here
<http://pandas.pydata.org/pandas-docs/stable/merging.html>`__.
See Also
--------
Series.append
DataFrame.append
DataFrame.join
DataFrame.merge
Examples
--------
Combine two ``Series``.
>>> s1 = pd.Series(['a', 'b'])
>>> s2 = pd.Series(['c', 'd'])
>>> pd.concat([s1, s2])
0 a
1 b
0 c
1 d
dtype: object
Clear the existing index and reset it in the result
by setting the ``ignore_index`` option to ``True``.
>>> pd.concat([s1, s2], ignore_index=True)
0 a
1 b
2 c
3 d
dtype: object
Add a hierarchical index at the outermost level of
the data with the ``keys`` option.
>>> pd.concat([s1, s2], keys=['s1', 's2',])
s1 0 a
1 b
s2 0 c
1 d
dtype: object
Label the index keys you create with the ``names`` option.
>>> pd.concat([s1, s2], keys=['s1', 's2'],
... names=['Series name', 'Row ID'])
Series name Row ID
s1 0 a
1 b
s2 0 c
1 d
dtype: object
Combine two ``DataFrame`` objects with identical columns.
>>> df1 = pd.DataFrame([['a', 1], ['b', 2]],
... columns=['letter', 'number'])
>>> df1
letter number
0 a 1
1 b 2
>>> df2 = pd.DataFrame([['c', 3], ['d', 4]],
... columns=['letter', 'number'])
>>> df2
letter number
0 c 3
1 d 4
>>> pd.concat([df1, df2])
letter number
0 a 1
1 b 2
0 c 3
1 d 4
Combine ``DataFrame`` objects with overlapping columns
and return everything. Columns outside the intersection will
be filled with ``NaN`` values.
>>> df3 = pd.DataFrame([['c', 3, 'cat'], ['d', 4, 'dog']],
... columns=['letter', 'number', 'animal'])
>>> df3
letter number animal
0 c 3 cat
1 d 4 dog
>>> pd.concat([df1, df3])
animal letter number
0 NaN a 1
1 NaN b 2
0 cat c 3
1 dog d 4
Combine ``DataFrame`` objects with overlapping columns
and return only those that are shared by passing ``inner`` to
the ``join`` keyword argument.
>>> pd.concat([df1, df3], join="inner")
letter number
0 a 1
1 b 2
0 c 3
1 d 4
Combine ``DataFrame`` objects horizontally along the x axis by
passing in ``axis=1``.
>>> df4 = pd.DataFrame([['bird', 'polly'], ['monkey', 'george']],
... columns=['animal', 'name'])
>>> pd.concat([df1, df4], axis=1)
letter number animal name
0 a 1 bird polly
1 b 2 monkey george
Prevent the result from including duplicate index values with the
``verify_integrity`` option.
>>> df5 = pd.DataFrame([1], index=['a'])
>>> df5
0
a 1
>>> df6 = pd.DataFrame([2], index=['a'])
>>> df6
0
a 2
>>> pd.concat([df5, df6], verify_integrity=True)
Traceback (most recent call last):
...
ValueError: Indexes have overlapping values: ['a']
"""
op = _Concatenator(objs, axis=axis, join_axes=join_axes,
ignore_index=ignore_index, join=join,
keys=keys, levels=levels, names=names,
verify_integrity=verify_integrity,
copy=copy, sort=sort)
return op.get_result()
class _Concatenator(object):
"""
Orchestrates a concatenation operation for BlockManagers
"""
def __init__(self, objs, axis=0, join='outer', join_axes=None,
keys=None, levels=None, names=None,
ignore_index=False, verify_integrity=False, copy=True,
sort=False):
if isinstance(objs, (NDFrame, compat.string_types)):
raise TypeError('first argument must be an iterable of pandas '
'objects, you passed an object of type '
'"{name}"'.format(name=type(objs).__name__))
if join == 'outer':
self.intersect = False
elif join == 'inner':
self.intersect = True
else: # pragma: no cover
raise ValueError('Only can inner (intersect) or outer (union) '
'join the other axis')
if isinstance(objs, dict):
if keys is None:
keys = sorted(objs)
objs = [objs[k] for k in keys]
else:
objs = list(objs)
if len(objs) == 0:
raise ValueError('No objects to concatenate')
if keys is None:
objs = list(com._not_none(*objs))
else:
# #1649
clean_keys = []
clean_objs = []
for k, v in zip(keys, objs):
if v is None:
continue
clean_keys.append(k)
clean_objs.append(v)
objs = clean_objs
name = getattr(keys, 'name', None)
keys = Index(clean_keys, name=name)
if len(objs) == 0:
raise ValueError('All objects passed were None')
# consolidate data & figure out what our result ndim is going to be
ndims = set()
for obj in objs:
if not isinstance(obj, NDFrame):
msg = ('cannot concatenate object of type "{0}";'
' only pd.Series, pd.DataFrame, and pd.Panel'
' (deprecated) objs are valid'.format(type(obj)))
raise TypeError(msg)
# consolidate
obj._consolidate(inplace=True)
ndims.add(obj.ndim)
# get the sample
# want the highest ndim that we have, and must be non-empty
# unless all objs are empty
sample = None
if len(ndims) > 1:
max_ndim = max(ndims)
for obj in objs:
if obj.ndim == max_ndim and np.sum(obj.shape):
sample = obj
break
else:
# filter out the empties if we have not multi-index possibilities
# note to keep empty Series as it affect to result columns / name
non_empties = [obj for obj in objs
if sum(obj.shape) > 0 or isinstance(obj, Series)]
if (len(non_empties) and (keys is None and names is None and
levels is None and
join_axes is None and
not self.intersect)):
objs = non_empties
sample = objs[0]
if sample is None:
sample = objs[0]
self.objs = objs
# Standardize axis parameter to int
if isinstance(sample, Series):
axis = DataFrame()._get_axis_number(axis)
else:
axis = sample._get_axis_number(axis)
# Need to flip BlockManager axis in the DataFrame special case
self._is_frame = isinstance(sample, DataFrame)
if self._is_frame:
axis = 1 if axis == 0 else 0
self._is_series = isinstance(sample, Series)
if not 0 <= axis <= sample.ndim:
raise AssertionError("axis must be between 0 and {ndim}, input was"
" {axis}".format(ndim=sample.ndim, axis=axis))
# if we have mixed ndims, then convert to highest ndim
# creating column numbers as needed
if len(ndims) > 1:
current_column = 0
max_ndim = sample.ndim
self.objs, objs = [], self.objs
for obj in objs:
ndim = obj.ndim
if ndim == max_ndim:
pass
elif ndim != max_ndim - 1:
raise ValueError("cannot concatenate unaligned mixed "
"dimensional NDFrame objects")
else:
name = getattr(obj, 'name', None)
if ignore_index or name is None:
name = current_column
current_column += 1
# doing a row-wise concatenation so need everything
# to line up
if self._is_frame and axis == 1:
name = 0
obj = sample._constructor({name: obj})
self.objs.append(obj)
# note: this is the BlockManager axis (since DataFrame is transposed)
self.axis = axis
self.join_axes = join_axes
self.keys = keys
self.names = names or getattr(keys, 'names', None)
self.levels = levels
self.sort = sort
self.ignore_index = ignore_index
self.verify_integrity = verify_integrity
self.copy = copy
self.new_axes = self._get_new_axes()
def get_result(self):
# series only
if self._is_series:
# stack blocks
if self.axis == 0:
name = com._consensus_name_attr(self.objs)
mgr = self.objs[0]._data.concat([x._data for x in self.objs],
self.new_axes)
cons = _concat._get_series_result_type(mgr, self.objs)
return cons(mgr, name=name).__finalize__(self, method='concat')
# combine as columns in a frame
else:
data = dict(zip(range(len(self.objs)), self.objs))
cons = _concat._get_series_result_type(data)
index, columns = self.new_axes
df = cons(data, index=index)
df.columns = columns
return df.__finalize__(self, method='concat')
# combine block managers
else:
mgrs_indexers = []
for obj in self.objs:
mgr = obj._data
indexers = {}
for ax, new_labels in enumerate(self.new_axes):
if ax == self.axis:
# Suppress reindexing on concat axis
continue
obj_labels = mgr.axes[ax]
if not new_labels.equals(obj_labels):
indexers[ax] = obj_labels.reindex(new_labels)[1]
mgrs_indexers.append((obj._data, indexers))
new_data = concatenate_block_managers(
mgrs_indexers, self.new_axes, concat_axis=self.axis,
copy=self.copy)
if not self.copy:
new_data._consolidate_inplace()
cons = _concat._get_frame_result_type(new_data, self.objs)
return (cons._from_axes(new_data, self.new_axes)
.__finalize__(self, method='concat'))
def _get_result_dim(self):
if self._is_series and self.axis == 1:
return 2
else:
return self.objs[0].ndim
def _get_new_axes(self):
ndim = self._get_result_dim()
new_axes = [None] * ndim
if self.join_axes is None:
for i in range(ndim):
if i == self.axis:
continue
new_axes[i] = self._get_comb_axis(i)
else:
if len(self.join_axes) != ndim - 1:
raise AssertionError("length of join_axes must not be equal "
"to {length}".format(length=ndim - 1))
# ufff...
indices = compat.lrange(ndim)
indices.remove(self.axis)
for i, ax in zip(indices, self.join_axes):
new_axes[i] = ax
new_axes[self.axis] = self._get_concat_axis()
return new_axes
def _get_comb_axis(self, i):
data_axis = self.objs[0]._get_block_manager_axis(i)
try:
return _get_objs_combined_axis(self.objs, axis=data_axis,
intersect=self.intersect,
sort=self.sort)
except IndexError:
types = [type(x).__name__ for x in self.objs]
raise TypeError("Cannot concatenate list of {types}"
.format(types=types))
def _get_concat_axis(self):
"""
Return index to be used along concatenation axis.
"""
if self._is_series:
if self.axis == 0:
indexes = [x.index for x in self.objs]
elif self.ignore_index:
idx = com._default_index(len(self.objs))
return idx
elif self.keys is None:
names = [None] * len(self.objs)
num = 0
has_names = False
for i, x in enumerate(self.objs):
if not isinstance(x, Series):
raise TypeError("Cannot concatenate type 'Series' "
"with object of type {type!r}"
.format(type=type(x).__name__))
if x.name is not None:
names[i] = x.name
has_names = True
else:
names[i] = num
num += 1
if has_names:
return Index(names)
else:
return com._default_index(len(self.objs))
else:
return _ensure_index(self.keys)
else:
indexes = [x._data.axes[self.axis] for x in self.objs]
if self.ignore_index:
idx = com._default_index(sum(len(i) for i in indexes))
return idx
if self.keys is None:
concat_axis = _concat_indexes(indexes)
else:
concat_axis = _make_concat_multiindex(indexes, self.keys,
self.levels, self.names)
self._maybe_check_integrity(concat_axis)
return concat_axis
def _maybe_check_integrity(self, concat_index):
if self.verify_integrity:
if not concat_index.is_unique:
overlap = concat_index[concat_index.duplicated()].unique()
raise ValueError('Indexes have overlapping values: '
'{overlap!s}'.format(overlap=overlap))
def _concat_indexes(indexes):
return indexes[0].append(indexes[1:])
def _make_concat_multiindex(indexes, keys, levels=None, names=None):
if ((levels is None and isinstance(keys[0], tuple)) or
(levels is not None and len(levels) > 1)):
zipped = compat.lzip(*keys)
if names is None:
names = [None] * len(zipped)
if levels is None:
_, levels = _factorize_from_iterables(zipped)
else:
levels = [_ensure_index(x) for x in levels]
else:
zipped = [keys]
if names is None:
names = [None]
if levels is None:
levels = [_ensure_index(keys)]
else:
levels = [_ensure_index(x) for x in levels]
if not _all_indexes_same(indexes):
label_list = []
# things are potentially different sizes, so compute the exact labels
# for each level and pass those to MultiIndex.from_arrays
for hlevel, level in zip(zipped, levels):
to_concat = []
for key, index in zip(hlevel, indexes):
try:
i = level.get_loc(key)
except KeyError:
raise ValueError('Key {key!s} not in level {level!s}'
.format(key=key, level=level))
to_concat.append(np.repeat(i, len(index)))
label_list.append(np.concatenate(to_concat))
concat_index = _concat_indexes(indexes)
# these go at the end
if isinstance(concat_index, MultiIndex):
levels.extend(concat_index.levels)
label_list.extend(concat_index.labels)
else:
codes, categories = _factorize_from_iterable(concat_index)
levels.append(categories)
label_list.append(codes)
if len(names) == len(levels):
names = list(names)
else:
# make sure that all of the passed indices have the same nlevels
if not len({idx.nlevels for idx in indexes}) == 1:
raise AssertionError("Cannot concat indices that do"
" not have the same number of levels")
# also copies
names = names + _get_consensus_names(indexes)
return MultiIndex(levels=levels, labels=label_list, names=names,
verify_integrity=False)
new_index = indexes[0]
n = len(new_index)
kpieces = len(indexes)
# also copies
new_names = list(names)
new_levels = list(levels)
# construct labels
new_labels = []
# do something a bit more speedy
for hlevel, level in zip(zipped, levels):
hlevel = _ensure_index(hlevel)
mapped = level.get_indexer(hlevel)
mask = mapped == -1
if mask.any():
raise ValueError('Values not found in passed level: {hlevel!s}'
.format(hlevel=hlevel[mask]))
new_labels.append(np.repeat(mapped, n))
if isinstance(new_index, MultiIndex):
new_levels.extend(new_index.levels)
new_labels.extend([np.tile(lab, kpieces) for lab in new_index.labels])
else:
new_levels.append(new_index)
new_labels.append(np.tile(np.arange(n), kpieces))
if len(new_names) < len(new_levels):
new_names.extend(new_index.names)
return MultiIndex(levels=new_levels, labels=new_labels, names=new_names,
verify_integrity=False)
| bsd-3-clause |
Juanlu001/aquagpusph | examples/3D/spheric_testcase2_dambreak/cMake/plot_t.py | 7 | 9347 | #******************************************************************************
# *
# * ** * * * * *
# * * * * * * * * * *
# ***** * * * * ***** ** *** * * ** *** *** *
# * * * * * * * * * * * * * * * * * * * *
# * * * * * * * * * * * * * * * * * * * *
# * * ** * ** * * *** *** *** ** *** * * *
# * * * *
# ** * * *
# *
#******************************************************************************
# *
# This file is part of AQUAgpusph, a free CFD program based on SPH. *
# Copyright (C) 2012 Jose Luis Cercos Pita <[email protected]> *
# *
# AQUAgpusph is free software: you can redistribute it and/or modify *
# it under the terms of the GNU General Public License as published by *
# the Free Software Foundation, either version 3 of the License, or *
# (at your option) any later version. *
# *
# AQUAgpusph is distributed in the hope that it will be useful, *
# but WITHOUT ANY WARRANTY; without even the implied warranty of *
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# GNU General Public License for more details. *
# *
# You should have received a copy of the GNU General Public License *
# along with AQUAgpusph. If not, see <http://www.gnu.org/licenses/>. *
# *
#******************************************************************************
import math
import sys
import os
from os import path
try:
from PyQt4 import QtGui, QtCore
except:
try:
from PySide import QtGui, QtCore
except:
raise ImportError("PyQt4 or PySide is required to use this tool")
try:
from matplotlib.figure import Figure
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
except:
raise ImportError("matplotlib is required to use this tool")
class FigureController(FigureCanvas):
"""Matplotlib figure widget controller"""
def __init__(self, parent=None):
"""Constructor"""
# Create the figure in the canvas
self.fig = Figure()
self.ax = self.fig.add_subplot(111)
FigureCanvas.__init__(self, self.fig)
self.setParent(parent)
# generates first "empty" plot
t = [0.0]
e = [0.0]
self.fove = self.ax.fill_between(t,
0,
e,
facecolor='red',
linewidth=0.0)
self.fave = self.ax.fill_between(t,
0,
e,
facecolor='blue',
linestyle="-",
linewidth=0.0)
self.love, = self.ax.plot(t,
e,
color='#990000',
linestyle="-",
linewidth=2.0,
label='Average overhead')
self.lave, = self.ax.plot(t,
e,
color='#000099',
linestyle="-",
linewidth=2.0,
label='Average elapsed')
self.line, = self.ax.plot(t,
e,
color="black",
linestyle="-",
linewidth=1.0,
alpha=0.5,
label='Elapsed')
# Set some options
self.ax.grid()
self.ax.set_xlim(0, 0.1)
self.ax.set_ylim(-0.1, 0.1)
self.ax.set_autoscale_on(False)
self.ax.set_xlabel(r"$t \, [\mathrm{s}]$", fontsize=21)
self.ax.set_ylabel(r"$t_{CPU} \, [\mathrm{s}]$", fontsize=21)
self.ax.legend(handles=[self.lave, self.love, self.line],
loc='upper right')
# force the figure redraw
self.fig.canvas.draw()
# call the update method (to speed-up visualization)
self.timerEvent(None)
# start timer, trigger event every 10000 millisecs (=10sec)
self.timer = self.startTimer(1000)
def readFile(self, filepath):
""" Read and extract data from a file
:param filepath File ot read
"""
abspath = filepath
if not path.isabs(filepath):
abspath = path.join(path.dirname(path.abspath(__file__)), filepath)
# Read the file by lines
f = open(abspath, "r")
lines = f.readlines()
f.close()
data = []
for l in lines[:-1]: # Skip the last line, which may be unready
l = l.strip()
while l.find(' ') != -1:
l = l.replace(' ', ' ')
fields = l.split(' ')
try:
data.append(map(float, fields))
except:
continue
# Transpose the data
return map(list, zip(*data))
def timerEvent(self, evt):
"""Custom timerEvent code, called at timer event receive"""
# Read and plot the new data
data = self.readFile('Performance.dat')
t = data[0]
e = data[1]
e_ela = data[2]
e_ove = data[5]
# Clear nan values
for i in range(len(e_ela)):
if math.isnan(e_ela[i]):
e_ela[i] = 0.0
if math.isnan(e_ove[i]):
e_ove[i] = 0.0
e_ave = [e_ela[i] - e_ove[i] for i in range(len(e_ela))]
# clear the fills
for coll in (self.ax.collections):
self.ax.collections.remove(coll)
self.fove = self.ax.fill_between(t,
0,
e_ela,
facecolor='red',
linestyle="-",
linewidth=2.0)
self.fave = self.ax.fill_between(t,
0,
e_ave,
facecolor='blue',
linestyle="-",
linewidth=2.0)
self.love.set_data(t, e_ela)
self.lave.set_data(t, e_ave)
self.line.set_data(t, e)
self.ax.set_xlim(0, t[-1])
self.ax.set_ylim(0, 1.5 * e_ela[-1])
# Redraw
self.fig.canvas.draw()
class ApplicationWindow(QtGui.QMainWindow):
def __init__(self):
QtGui.QMainWindow.__init__(self)
self.setAttribute(QtCore.Qt.WA_DeleteOnClose)
self.main_widget = QtGui.QWidget(self)
lv = QtGui.QVBoxLayout(self.main_widget)
plot_widget = FigureController(self.main_widget)
show_inst_ela = QtGui.QCheckBox('Show instantaneous elapsed time')
show_inst_ela.setCheckState(True)
show_inst_ela.setTristate(False)
lv.addWidget(plot_widget)
lv.addWidget(show_inst_ela)
show_inst_ela.stateChanged.connect(self.showInstEla)
self.show_inst_ela = show_inst_ela
self.plot_widget = plot_widget
self.main_widget.setFocus()
self.setCentralWidget(self.main_widget)
def showInstEla(self, state):
if not state:
self.plot_widget.line.set_alpha(0.0)
self.plot_widget.ax.legend(handles=[self.plot_widget.lave,
self.plot_widget.love],
loc='upper right')
return
self.plot_widget.ax.legend(handles=[self.plot_widget.lave,
self.plot_widget.love,
self.plot_widget.line],
loc='upper right')
self.plot_widget.line.set_alpha(0.5)
return
def fileQuit(self):
self.close()
def closeEvent(self, ce):
self.fileQuit()
if __name__ == '__main__':
app = QtGui.QApplication(sys.argv)
aw = ApplicationWindow()
aw.setWindowTitle("Performance")
aw.show()
sys.exit(app.exec_()) | gpl-3.0 |
marcocaccin/scikit-learn | examples/cluster/plot_mini_batch_kmeans.py | 265 | 4081 | """
====================================================================
Comparison of the K-Means and MiniBatchKMeans clustering algorithms
====================================================================
We want to compare the performance of the MiniBatchKMeans and KMeans:
the MiniBatchKMeans is faster, but gives slightly different results (see
:ref:`mini_batch_kmeans`).
We will cluster a set of data, first with KMeans and then with
MiniBatchKMeans, and plot the results.
We will also plot the points that are labelled differently between the two
algorithms.
"""
print(__doc__)
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import MiniBatchKMeans, KMeans
from sklearn.metrics.pairwise import pairwise_distances_argmin
from sklearn.datasets.samples_generator import make_blobs
##############################################################################
# Generate sample data
np.random.seed(0)
batch_size = 45
centers = [[1, 1], [-1, -1], [1, -1]]
n_clusters = len(centers)
X, labels_true = make_blobs(n_samples=3000, centers=centers, cluster_std=0.7)
##############################################################################
# Compute clustering with Means
k_means = KMeans(init='k-means++', n_clusters=3, n_init=10)
t0 = time.time()
k_means.fit(X)
t_batch = time.time() - t0
k_means_labels = k_means.labels_
k_means_cluster_centers = k_means.cluster_centers_
k_means_labels_unique = np.unique(k_means_labels)
##############################################################################
# Compute clustering with MiniBatchKMeans
mbk = MiniBatchKMeans(init='k-means++', n_clusters=3, batch_size=batch_size,
n_init=10, max_no_improvement=10, verbose=0)
t0 = time.time()
mbk.fit(X)
t_mini_batch = time.time() - t0
mbk_means_labels = mbk.labels_
mbk_means_cluster_centers = mbk.cluster_centers_
mbk_means_labels_unique = np.unique(mbk_means_labels)
##############################################################################
# Plot result
fig = plt.figure(figsize=(8, 3))
fig.subplots_adjust(left=0.02, right=0.98, bottom=0.05, top=0.9)
colors = ['#4EACC5', '#FF9C34', '#4E9A06']
# We want to have the same colors for the same cluster from the
# MiniBatchKMeans and the KMeans algorithm. Let's pair the cluster centers per
# closest one.
order = pairwise_distances_argmin(k_means_cluster_centers,
mbk_means_cluster_centers)
# KMeans
ax = fig.add_subplot(1, 3, 1)
for k, col in zip(range(n_clusters), colors):
my_members = k_means_labels == k
cluster_center = k_means_cluster_centers[k]
ax.plot(X[my_members, 0], X[my_members, 1], 'w',
markerfacecolor=col, marker='.')
ax.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
ax.set_title('KMeans')
ax.set_xticks(())
ax.set_yticks(())
plt.text(-3.5, 1.8, 'train time: %.2fs\ninertia: %f' % (
t_batch, k_means.inertia_))
# MiniBatchKMeans
ax = fig.add_subplot(1, 3, 2)
for k, col in zip(range(n_clusters), colors):
my_members = mbk_means_labels == order[k]
cluster_center = mbk_means_cluster_centers[order[k]]
ax.plot(X[my_members, 0], X[my_members, 1], 'w',
markerfacecolor=col, marker='.')
ax.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
ax.set_title('MiniBatchKMeans')
ax.set_xticks(())
ax.set_yticks(())
plt.text(-3.5, 1.8, 'train time: %.2fs\ninertia: %f' %
(t_mini_batch, mbk.inertia_))
# Initialise the different array to all False
different = (mbk_means_labels == 4)
ax = fig.add_subplot(1, 3, 3)
for l in range(n_clusters):
different += ((k_means_labels == k) != (mbk_means_labels == order[k]))
identic = np.logical_not(different)
ax.plot(X[identic, 0], X[identic, 1], 'w',
markerfacecolor='#bbbbbb', marker='.')
ax.plot(X[different, 0], X[different, 1], 'w',
markerfacecolor='m', marker='.')
ax.set_title('Difference')
ax.set_xticks(())
ax.set_yticks(())
plt.show()
| bsd-3-clause |
NKI-CCB/imfusion | src/imfusion/expression/stats.py | 2 | 2730 | # -*- coding: utf-8 -*-
"""Implements a statistical models used for DE tests."""
# pylint: disable=wildcard-import,redefined-builtin,unused-wildcard-import
from __future__ import absolute_import, division, print_function
from builtins import *
# pylint: enable=wildcard-import,redefined-builtin,unused-wildcard-import
import numpy as np
try:
from rpy2.robjects import pandas2ri
from rpy2.robjects.packages import importr
pandas2ri.activate()
# Note robject_translations is no longer needed in Rpy2 2.6+,
# but is kept for compatibility with older Rpy2 versions.
r_mass = importr('MASS')
r_stats = importr(
'stats', robject_translations={'format_perc': '_format_perc'})
except ImportError:
r_mass = None
r_stats = None
class NegativeBinomial(object):
""" Models a negative binomial distribution.
NegativeBinomial class that wraps functionality from the R MASS
and stats packages for fitting and evaluating Negative Binomials.
Requires rpy2 to be installed.
"""
def __init__(self, mu=None, size=None):
self._check_rpy()
self._mu = mu
self._size = size
@classmethod
def fit(cls, data):
"""First the distribution using the given observations."""
cls._check_rpy()
fit = r_mass.fitdistr(data, densfun='negative binomial')
fit = dict(zip(fit.names, [np.array(item) for item in fit]))
mu, size = fit['estimate'][1], fit['estimate'][0]
return cls(mu=mu, size=size)
@staticmethod
def _check_rpy():
"""Checks if Mass and Stats are available from rpy2."""
if r_mass is None or r_stats is None:
raise ValueError('Rpy2 must be installed to use the '
'NegativeBinomial distribution.')
def pdf(self, x, log=False):
"""Returns the pdf of the distribution."""
return np.array(
r_stats.dnbinom(
x=x, mu=self._mu, size=self._size, log=log))
def cdf(self, q, log_p=False):
"""Returns the cdf of the distribution."""
return self._pnbinom(
q, mu=self._mu, size=self._size, lower_tail=True, log_p=log_p)
def sf(self, q, log_p=False):
"""Returns the sf of the distribution."""
return self._pnbinom(
q, mu=self._mu, size=self._size, lower_tail=False, log_p=log_p)
@staticmethod
def _pnbinom(q, mu, size, **kwargs):
p_val = r_stats.pnbinom(q, mu=mu, size=size, **kwargs)
return p_val[0] if len(p_val) == 1 else np.array(p_val)
def __repr__(self):
return 'NegativeBinomial(mu={}, size={})'.format(self._size, self._mu)
def __str__(self):
return self.__repr__()
| mit |
kmather73/ggplot | ggplot/tests/test_geom_lines.py | 12 | 4895 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
from six.moves import xrange
from nose.tools import assert_equal, assert_true, assert_raises
from . import get_assert_same_ggplot, cleanup
assert_same_ggplot = get_assert_same_ggplot(__file__)
from ggplot import *
from ggplot.exampledata import diamonds
import numpy as np
import pandas as pd
def _build_line_df():
np.random.seed(7776)
df = pd.DataFrame({'wt': mtcars['wt'][:10],
'mpg': mtcars['mpg'][:10],
'a': np.random.normal(15, size=10),
'b': np.random.normal(0, size=10)
})
return df
@cleanup
def test_geom_abline():
df = _build_line_df()
gg = ggplot(df, aes(x='wt', y='mpg'))
gg = gg + geom_point() + geom_abline(intercept=22, slope=.8, size=10)
assert_same_ggplot(gg, 'geom_abline')
@cleanup
def test_geom_abline_multiple():
df = _build_line_df()
gg = ggplot(df, aes(x='wt', y='mpg'))
gg = gg + geom_point() + geom_abline(intercept=(20, 12),
slope=(.8, 2),
color=('red', 'blue'),
alpha=(.3, .9),
size=(10, 20))
assert_same_ggplot(gg, 'geom_abline_multiple')
@cleanup
def test_geom_abline_mapped():
df = _build_line_df()
gg = ggplot(df, aes(x='wt', y='mpg', intercept='a', slope='b'))
gg = gg + geom_point() + geom_abline(size=2)
assert_same_ggplot(gg, 'geom_abline_mapped')
# TODO: Uncomment when the handling is proper
@cleanup
def test_geom_abline_functions():
df = _build_line_df()
def sfunc(x, y):
return (y.iloc[-1] - y.iloc[0]) / (x.iloc[-1] - x.iloc[0])
def ifunc(x, y):
return np.mean(y)
gg = ggplot(df, aes(x='wt', y='mpg'))
# Note, could have done intercept=np.mean
gg = gg + geom_point() + geom_abline(aes(x='wt', y='mpg'),
slope=sfunc,
intercept=ifunc)
assert_same_ggplot(gg, 'geom_abline_functions')
@cleanup
def test_geom_vline():
df = _build_line_df()
gg = ggplot(df, aes(x='wt', y='mpg'))
gg = gg + geom_point() + geom_vline(xintercept=3, size=10)
assert_same_ggplot(gg, 'geom_vline')
@cleanup
def test_geom_vline_multiple():
df = _build_line_df()
gg = ggplot(df, aes(x='wt', y='mpg'))
gg = gg + geom_point() + geom_vline(xintercept=[2.5, 3.5], size=10)
assert_same_ggplot(gg, 'geom_vline_multiple')
@cleanup
def test_geom_vline_mapped():
df = _build_line_df()
gg = ggplot(df, aes(x='wt', y='mpg', xintercept='wt'))
gg = (gg + geom_point(size=200, color='green', fill='blue', alpha=.7) +
geom_vline(size=2))
assert_same_ggplot(gg, 'geom_vline_mapped')
@cleanup
def test_geom_vline_function():
df = _build_line_df()
gg = ggplot(df, aes(x='wt', y='mpg'))
def ifunc(x):
return np.mean(x)
gg = gg + geom_point() + geom_vline(aes(x='wt'), xintercept=ifunc)
assert_same_ggplot(gg, 'geom_vline_function')
@cleanup
def test_geom_hline():
df = _build_line_df()
gg = ggplot(df, aes(x='wt', y='mpg'))
gg = gg + geom_point() + geom_hline(yintercept=20, size=10)
assert_same_ggplot(gg, 'geom_hline')
@cleanup
def test_geom_hline_multiple():
df = _build_line_df()
gg = ggplot(df, aes(x='wt', y='mpg'))
gg = gg + geom_point() + geom_hline(yintercept=[16., 24.], size=10)
assert_same_ggplot(gg, 'geom_hline_multiple')
@cleanup
def test_geom_hline_mapped():
df = _build_line_df()
gg = ggplot(df, aes(x='wt', y='mpg', yintercept='mpg'))
gg = (gg + geom_point(size=150, color='blue', alpha=.5) +
geom_hline(size=2))
assert_same_ggplot(gg, 'geom_hline_mapped')
@cleanup
def test_geom_hline_function():
df = _build_line_df()
gg = ggplot(df, aes(x='wt', y='mpg'))
def ifunc(y):
return np.mean(y)
gg = gg + geom_point() + geom_hline(aes(y='mpg'), yintercept=ifunc)
assert_same_ggplot(gg, 'geom_hline_function')
@cleanup
def test_geom_festival_of_lines():
# All 3 lines should intersect at the point of the same color.
# Horizontal and vertical will overlap for points on the same line
df = _build_line_df()
df['color'] = range(len(df['wt']))
def xfunc(x):
return x
def yfunc(y):
return y
def ifunc(x, y):
return y - 5 * x
def sfunc(x, y):
return 5
gg = ggplot(df, aes(x='wt', y='mpg', color='factor(color)'))
gg = (gg + geom_point(size=150, alpha=.9) +
geom_abline(size=2, intercept=ifunc, slope=sfunc) +
geom_vline(size=2, xintercept=xfunc) +
geom_hline(size=2, yintercept=yfunc))
assert_same_ggplot(gg, 'geom_festival_of_lines')
| bsd-2-clause |
PalNilsson/pilot2 | pilot/user/atlas/setup.py | 1 | 17850 | #!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Authors:
# - Paul Nilsson, [email protected], 2017-2020
import os
import re
import glob
from time import sleep
from pilot.common.errorcodes import ErrorCodes
from pilot.common.exception import NoSoftwareDir
from pilot.info import infosys
from pilot.util.container import execute
from pilot.util.filehandling import read_file, write_file, copy
from .metadata import get_file_info_from_xml
import logging
logger = logging.getLogger(__name__)
errors = ErrorCodes()
def get_file_system_root_path():
"""
Return the root path of the local file system.
The function returns "/cvmfs" or "/(some path)/cvmfs" in case the expected file system root path is not
where it usually is (e.g. on an HPC). A site can set the base path by exporting ATLAS_SW_BASE.
:return: path (string)
"""
return os.environ.get('ATLAS_SW_BASE', '/cvmfs')
def should_pilot_prepare_setup(noexecstrcnv, jobpars, imagename=None):
"""
Determine whether the pilot should add the setup to the payload command or not.
The pilot will not add asetup if jobPars already contain the information (i.e. it was set by the payload creator).
If noExecStrCnv is set, then jobPars is expected to contain asetup.sh + options
If a stand-alone container / user defined container is used, pilot should not prepare asetup.
:param noexecstrcnv: boolean.
:param jobpars: job parameters (string).
:param imagename: container image (string).
:return: boolean.
"""
if imagename:
return False
if noexecstrcnv:
if "asetup.sh" in jobpars:
logger.info("asetup will be taken from jobPars")
preparesetup = False
else:
logger.info("noExecStrCnv is set but asetup command was not found in jobPars (pilot will prepare asetup)")
preparesetup = True
else:
logger.info("pilot will prepare the setup")
preparesetup = True
return preparesetup
def get_alrb_export(add_if=False):
"""
Return the export command for the ALRB path if it exists.
If the path does not exist, return empty string.
:param add_if: Boolean. True means that an if statement will be placed around the export.
:return: export command
"""
path = "%s/atlas.cern.ch/repo" % get_file_system_root_path()
cmd = "export ATLAS_LOCAL_ROOT_BASE=%s/ATLASLocalRootBase;" % path if os.path.exists(path) else ""
# if [ -z "$ATLAS_LOCAL_ROOT_BASE" ]; then export ATLAS_LOCAL_ROOT_BASE=/cvmfs/atlas.cern.ch/repo/ATLASLocalRootBase; fi;
if cmd and add_if:
cmd = 'if [ -z \"$ATLAS_LOCAL_ROOT_BASE\" ]; then ' + cmd + ' fi;'
return cmd
def get_asetup(asetup=True, alrb=False, add_if=False):
"""
Define the setup for asetup, i.e. including full path to asetup and setting of ATLAS_LOCAL_ROOT_BASE
Only include the actual asetup script if asetup=True. This is not needed if the jobPars contain the payload command
but the pilot still needs to add the exports and the atlasLocalSetup.
:param asetup: Boolean. True value means that the pilot should include the asetup command.
:param alrb: Boolean. True value means that the function should return special setup used with ALRB and containers.
:param add_if: Boolean. True means that an if statement will be placed around the export.
:raises: NoSoftwareDir if appdir does not exist.
:return: source <path>/asetup.sh (string).
"""
cmd = ""
alrb_cmd = get_alrb_export(add_if=add_if)
if alrb_cmd != "":
cmd = alrb_cmd
if not alrb:
cmd += "source ${ATLAS_LOCAL_ROOT_BASE}/user/atlasLocalSetup.sh --quiet;"
if asetup:
cmd += "source $AtlasSetup/scripts/asetup.sh"
else:
try: # use try in case infosys has not been initiated
appdir = infosys.queuedata.appdir
except Exception:
appdir = ""
if appdir == "":
appdir = os.environ.get('VO_ATLAS_SW_DIR', '')
if appdir != "":
# make sure that the appdir exists
if not os.path.exists(appdir):
msg = 'appdir does not exist: %s' % appdir
logger.warning(msg)
raise NoSoftwareDir(msg)
if asetup:
cmd = "source %s/scripts/asetup.sh" % appdir
# do not return an empty string
#if not cmd:
# cmd = "what?"
return cmd
def get_asetup_options(release, homepackage):
"""
Determine the proper asetup options.
:param release: ATLAS release string.
:param homepackage: ATLAS homePackage string.
:return: asetup options (string).
"""
asetupopt = []
release = re.sub('^Atlas-', '', release)
# is it a user analysis homePackage?
if 'AnalysisTransforms' in homepackage:
_homepackage = re.sub('^AnalysisTransforms-*', '', homepackage)
if _homepackage == '' or re.search(r'^\d+\.\d+\.\d+$', release) is None: # Python 3 (added r)
if release != "":
asetupopt.append(release)
if _homepackage != '':
asetupopt += _homepackage.split('_')
else:
asetupopt += homepackage.split('/')
if release not in homepackage and release not in asetupopt:
asetupopt.append(release)
# Add the notest,here for all setups (not necessary for late releases but harmless to add)
asetupopt.append('notest')
# asetupopt.append('here')
# Add the fast option if possible (for the moment, check for locally defined env variable)
if "ATLAS_FAST_ASETUP" in os.environ:
asetupopt.append('fast')
return ','.join(asetupopt)
def is_standard_atlas_job(release):
"""
Is it a standard ATLAS job?
A job is a standard ATLAS job if the release string begins with 'Atlas-'.
:param release: Release value (string).
:return: Boolean. Returns True if standard ATLAS job.
"""
return release.startswith('Atlas-')
def set_inds(dataset):
"""
Set the INDS environmental variable used by runAthena.
:param dataset: dataset for input files (realDatasetsIn) (string).
:return:
"""
inds = ""
_dataset = dataset.split(',')
for ds in _dataset:
if "DBRelease" not in ds and ".lib." not in ds:
inds = ds
break
if inds != "":
logger.info("setting INDS environmental variable to: %s" % (inds))
os.environ['INDS'] = inds
else:
logger.warning("INDS unknown")
def get_analysis_trf(transform, workdir):
"""
Prepare to download the user analysis transform with curl.
The function will verify the download location from a known list of hosts.
:param transform: full trf path (url) (string).
:param workdir: work directory (string).
:return: exit code (int), diagnostics (string), transform_name (string)
"""
ec = 0
diagnostics = ""
# test if $HARVESTER_WORKDIR is set
harvester_workdir = os.environ.get('HARVESTER_WORKDIR')
if harvester_workdir is not None:
search_pattern = "%s/jobO.*.tar.gz" % harvester_workdir
logger.debug("search_pattern - %s" % search_pattern)
jobopt_files = glob.glob(search_pattern)
for jobopt_file in jobopt_files:
logger.debug("jobopt_file = %s workdir = %s" % (jobopt_file, workdir))
try:
copy(jobopt_file, workdir)
except Exception as e:
logger.error("could not copy file %s to %s : %s" % (jobopt_file, workdir, e))
if '/' in transform:
transform_name = transform.split('/')[-1]
else:
logger.warning('did not detect any / in %s (using full transform name)' % transform)
transform_name = transform
# is the command already available? (e.g. if already downloaded by a preprocess/main process step)
if os.path.exists(os.path.join(workdir, transform_name)):
logger.info('script %s is already available - no need to download again' % transform_name)
return ec, diagnostics, transform_name
original_base_url = ""
# verify the base URL
for base_url in get_valid_base_urls():
if transform.startswith(base_url):
original_base_url = base_url
break
if original_base_url == "":
diagnostics = "invalid base URL: %s" % transform
return errors.TRFDOWNLOADFAILURE, diagnostics, ""
# try to download from the required location, if not - switch to backup
status = False
for base_url in get_valid_base_urls(order=original_base_url):
trf = re.sub(original_base_url, base_url, transform)
logger.debug("attempting to download script: %s" % trf)
status, diagnostics = download_transform(trf, transform_name, workdir)
if status:
break
if not status:
return errors.TRFDOWNLOADFAILURE, diagnostics, ""
logger.info("successfully downloaded script")
path = os.path.join(workdir, transform_name)
logger.debug("changing permission of %s to 0o755" % path)
try:
os.chmod(path, 0o755) # Python 2/3
except Exception as e:
diagnostics = "failed to chmod %s: %s" % (transform_name, e)
return errors.CHMODTRF, diagnostics, ""
return ec, diagnostics, transform_name
def download_transform(url, transform_name, workdir):
"""
Download the transform from the given url
:param url: download URL with path to transform (string).
:param transform_name: trf name (string).
:param workdir: work directory (string).
:return:
"""
status = False
diagnostics = ""
path = os.path.join(workdir, transform_name)
cmd = 'curl -sS \"%s\" > %s' % (url, path)
trial = 1
max_trials = 3
# test if $HARVESTER_WORKDIR is set
harvester_workdir = os.environ.get('HARVESTER_WORKDIR')
if harvester_workdir is not None:
# skip curl by setting max_trials = 0
max_trials = 0
source_path = os.path.join(harvester_workdir, transform_name)
try:
copy(source_path, path)
status = True
except Exception as error:
status = False
diagnostics = "Failed to copy file %s to %s : %s" % (source_path, path, error)
logger.error(diagnostics)
# try to download the trf a maximum of 3 times
while trial <= max_trials:
logger.info("executing command [trial %d/%d]: %s" % (trial, max_trials, cmd))
exit_code, stdout, stderr = execute(cmd, mute=True)
if not stdout:
stdout = "(None)"
if exit_code != 0:
# Analyze exit code / output
diagnostics = "curl command failed: %d, %s, %s" % (exit_code, stdout, stderr)
logger.warning(diagnostics)
if trial == max_trials:
logger.fatal('could not download transform: %s' % stdout)
status = False
break
else:
logger.info("will try again after 60 s")
sleep(60)
else:
logger.info("curl command returned: %s" % stdout)
status = True
break
trial += 1
return status, diagnostics
def get_valid_base_urls(order=None):
"""
Return a list of valid base URLs from where the user analysis transform may be downloaded from.
If order is defined, return given item first.
E.g. order=http://atlpan.web.cern.ch/atlpan -> ['http://atlpan.web.cern.ch/atlpan', ...]
NOTE: the URL list may be out of date.
:param order: order (string).
:return: valid base URLs (list).
"""
valid_base_urls = []
_valid_base_urls = ["http://www.usatlas.bnl.gov",
"https://www.usatlas.bnl.gov",
"http://pandaserver.cern.ch",
"http://atlpan.web.cern.ch/atlpan",
"https://atlpan.web.cern.ch/atlpan",
"http://classis01.roma1.infn.it",
"http://atlas-install.roma1.infn.it"]
if order:
valid_base_urls.append(order)
for url in _valid_base_urls:
if url != order:
valid_base_urls.append(url)
else:
valid_base_urls = _valid_base_urls
return valid_base_urls
def get_payload_environment_variables(cmd, job_id, task_id, attempt_nr, processing_type, site_name, analysis_job):
"""
Return an array with enviroment variables needed by the payload.
:param cmd: payload execution command (string).
:param job_id: PanDA job id (string).
:param task_id: PanDA task id (string).
:param attempt_nr: PanDA job attempt number (int).
:param processing_type: processing type (string).
:param site_name: site name (string).
:param analysis_job: True for user analysis jobs, False otherwise (boolean).
:return: list of environment variables needed by the payload.
"""
variables = []
variables.append('export PANDA_RESOURCE=\'%s\';' % site_name)
variables.append('export FRONTIER_ID=\"[%s_%s]\";' % (task_id, job_id))
variables.append('export CMSSW_VERSION=$FRONTIER_ID;')
variables.append('export PandaID=%s;' % os.environ.get('PANDAID', 'unknown'))
variables.append('export PanDA_TaskID=\'%s\';' % os.environ.get('PanDA_TaskID', 'unknown'))
variables.append('export PanDA_AttemptNr=\'%d\';' % attempt_nr)
variables.append('export INDS=\'%s\';' % os.environ.get('INDS', 'unknown'))
# Unset ATHENA_PROC_NUMBER if set for event service Merge jobs
if "Merge_tf" in cmd and 'ATHENA_PROC_NUMBER' in os.environ:
variables.append('unset ATHENA_PROC_NUMBER;')
variables.append('unset ATHENA_CORE_NUMBER;')
if analysis_job:
variables.append('export ROOT_TTREECACHE_SIZE=1;')
try:
core_count = int(os.environ.get('ATHENA_PROC_NUMBER'))
except Exception:
_core_count = 'export ROOTCORE_NCPUS=1;'
else:
_core_count = 'export ROOTCORE_NCPUS=%d;' % core_count
variables.append(_core_count)
if processing_type == "":
logger.warning("RUCIO_APPID needs job.processingType but it is not set!")
else:
variables.append('export RUCIO_APPID=\'%s\';' % processing_type)
variables.append('export RUCIO_ACCOUNT=\'%s\';' % os.environ.get('RUCIO_ACCOUNT', 'pilot'))
return variables
def get_writetoinput_filenames(writetofile):
"""
Extract the writeToFile file name(s).
writeToFile='tmpin_mc16_13TeV.345935.PhPy8EG_A14_ttbarMET100_200_hdamp258p75_nonallhad.merge.AOD.e6620_e5984_s3126_r10724_r10726_tid15760866_00:AOD.15760866._000002.pool.root.1'
-> return 'tmpin_mc16_13TeV.345935.PhPy8EG_A14_ttbarMET100_200_hdamp258p75_nonallhad.merge.AOD.e6620_e5984_s3126_r10724_r10726_tid15760866_00'
:param writetofile: string containing file name information.
:return: list of file names
"""
filenames = []
entries = writetofile.split('^')
for entry in entries:
if ':' in entry:
name = entry.split(":")[0]
name = name.replace('.pool.root.', '.txt.') # not necessary?
filenames.append(name)
return filenames
def replace_lfns_with_turls(cmd, workdir, filename, infiles, writetofile=""):
"""
Replace all LFNs with full TURLs in the payload execution command.
This function is used with direct access in production jobs. Athena requires a full TURL instead of LFN.
:param cmd: payload execution command (string).
:param workdir: location of metadata file (string).
:param filename: metadata file name (string).
:param infiles: list of input files.
:param writetofile:
:return: updated cmd (string).
"""
turl_dictionary = {} # { LFN: TURL, ..}
path = os.path.join(workdir, filename)
if os.path.exists(path):
file_info_dictionary = get_file_info_from_xml(workdir, filename=filename)
for inputfile in infiles:
if inputfile in cmd:
turl = file_info_dictionary[inputfile][0]
turl_dictionary[inputfile] = turl
# if turl.startswith('root://') and turl not in cmd:
if turl not in cmd:
cmd = cmd.replace(inputfile, turl)
logger.info("replaced '%s' with '%s' in the run command" % (inputfile, turl))
# replace the LFNs with TURLs in the writetofile input file list (if it exists)
if writetofile and turl_dictionary:
filenames = get_writetoinput_filenames(writetofile)
logger.info("filenames=%s" % filenames)
for fname in filenames:
new_lines = []
path = os.path.join(workdir, fname)
if os.path.exists(path):
f = read_file(path)
for line in f.split('\n'):
fname = os.path.basename(line)
if fname in turl_dictionary:
turl = turl_dictionary[fname]
new_lines.append(turl)
else:
if line:
new_lines.append(line)
lines = '\n'.join(new_lines)
if lines:
write_file(path, lines)
logger.info("lines=%s" % lines)
else:
logger.warning("file does not exist: %s" % path)
else:
logger.warning("could not find file: %s (cannot locate TURLs for direct access)" % filename)
return cmd
| apache-2.0 |
wwf5067/statsmodels | statsmodels/sandbox/tsa/movstat.py | 34 | 14871 | '''using scipy signal and numpy correlate to calculate some time series
statistics
original developer notes
see also scikits.timeseries (movstat is partially inspired by it)
added 2009-08-29
timeseries moving stats are in c, autocorrelation similar to here
I thought I saw moving stats somewhere in python, maybe not)
TODO
moving statistics
- filters don't handle boundary conditions nicely (correctly ?)
e.g. minimum order filter uses 0 for out of bounds value
-> append and prepend with last resp. first value
- enhance for nd arrays, with axis = 0
Note: Equivalence for 1D signals
>>> np.all(signal.correlate(x,[1,1,1],'valid')==np.correlate(x,[1,1,1]))
True
>>> np.all(ndimage.filters.correlate(x,[1,1,1], origin = -1)[:-3+1]==np.correlate(x,[1,1,1]))
True
# multidimensional, but, it looks like it uses common filter across time series, no VAR
ndimage.filters.correlate(np.vstack([x,x]),np.array([[1,1,1],[0,0,0]]), origin = 1)
ndimage.filters.correlate(x,[1,1,1],origin = 1))
ndimage.filters.correlate(np.vstack([x,x]),np.array([[0.5,0.5,0.5],[0.5,0.5,0.5]]), \
origin = 1)
>>> np.all(ndimage.filters.correlate(np.vstack([x,x]),np.array([[1,1,1],[0,0,0]]), origin = 1)[0]==\
ndimage.filters.correlate(x,[1,1,1],origin = 1))
True
>>> np.all(ndimage.filters.correlate(np.vstack([x,x]),np.array([[0.5,0.5,0.5],[0.5,0.5,0.5]]), \
origin = 1)[0]==ndimage.filters.correlate(x,[1,1,1],origin = 1))
update
2009-09-06: cosmetic changes, rearrangements
'''
from __future__ import print_function
import numpy as np
from scipy import signal
from numpy.testing import assert_array_equal, assert_array_almost_equal
import statsmodels.api as sm
def expandarr(x,k):
#make it work for 2D or nD with axis
kadd = k
if np.ndim(x) == 2:
kadd = (kadd, np.shape(x)[1])
return np.r_[np.ones(kadd)*x[0],x,np.ones(kadd)*x[-1]]
def movorder(x, order = 'med', windsize=3, lag='lagged'):
'''moving order statistics
Parameters
----------
x : array
time series data
order : float or 'med', 'min', 'max'
which order statistic to calculate
windsize : int
window size
lag : 'lagged', 'centered', or 'leading'
location of window relative to current position
Returns
-------
filtered array
'''
#if windsize is even should it raise ValueError
if lag == 'lagged':
lead = windsize//2
elif lag == 'centered':
lead = 0
elif lag == 'leading':
lead = -windsize//2 +1
else:
raise ValueError
if np.isfinite(order) == True: #if np.isnumber(order):
ord = order # note: ord is a builtin function
elif order == 'med':
ord = (windsize - 1)/2
elif order == 'min':
ord = 0
elif order == 'max':
ord = windsize - 1
else:
raise ValueError
#return signal.order_filter(x,np.ones(windsize),ord)[:-lead]
xext = expandarr(x, windsize)
#np.r_[np.ones(windsize)*x[0],x,np.ones(windsize)*x[-1]]
return signal.order_filter(xext,np.ones(windsize),ord)[windsize-lead:-(windsize+lead)]
def check_movorder():
'''graphical test for movorder'''
import matplotlib.pylab as plt
x = np.arange(1,10)
xo = movorder(x, order='max')
assert_array_equal(xo, x)
x = np.arange(10,1,-1)
xo = movorder(x, order='min')
assert_array_equal(xo, x)
assert_array_equal(movorder(x, order='min', lag='centered')[:-1], x[1:])
tt = np.linspace(0,2*np.pi,15)
x = np.sin(tt) + 1
xo = movorder(x, order='max')
plt.figure()
plt.plot(tt,x,'.-',tt,xo,'.-')
plt.title('moving max lagged')
xo = movorder(x, order='max', lag='centered')
plt.figure()
plt.plot(tt,x,'.-',tt,xo,'.-')
plt.title('moving max centered')
xo = movorder(x, order='max', lag='leading')
plt.figure()
plt.plot(tt,x,'.-',tt,xo,'.-')
plt.title('moving max leading')
# identity filter
##>>> signal.order_filter(x,np.ones(1),0)
##array([ 1., 2., 3., 4., 5., 6., 7., 8., 9.])
# median filter
##signal.medfilt(np.sin(x), kernel_size=3)
##>>> plt.figure()
##<matplotlib.figure.Figure object at 0x069BBB50>
##>>> x=np.linspace(0,3,100);plt.plot(x,np.sin(x),x,signal.medfilt(np.sin(x), kernel_size=3))
# remove old version
##def movmeanvar(x, windowsize=3, valid='same'):
## '''
## this should also work along axis or at least for columns
## '''
## n = x.shape[0]
## x = expandarr(x, windowsize - 1)
## takeslice = slice(windowsize-1, n + windowsize-1)
## avgkern = (np.ones(windowsize)/float(windowsize))
## m = np.correlate(x, avgkern, 'same')#[takeslice]
## print(m.shape)
## print(x.shape)
## xm = x - m
## v = np.correlate(x*x, avgkern, 'same') - m**2
## v1 = np.correlate(xm*xm, avgkern, valid) #not correct for var of window
###>>> np.correlate(xm*xm,np.array([1,1,1])/3.0,'valid')-np.correlate(xm*xm,np.array([1,1,1])/3.0,'valid')**2
## return m[takeslice], v[takeslice], v1
def movmean(x, windowsize=3, lag='lagged'):
'''moving window mean
Parameters
----------
x : array
time series data
windsize : int
window size
lag : 'lagged', 'centered', or 'leading'
location of window relative to current position
Returns
-------
mk : array
moving mean, with same shape as x
Notes
-----
for leading and lagging the data array x is extended by the closest value of the array
'''
return movmoment(x, 1, windowsize=windowsize, lag=lag)
def movvar(x, windowsize=3, lag='lagged'):
'''moving window variance
Parameters
----------
x : array
time series data
windsize : int
window size
lag : 'lagged', 'centered', or 'leading'
location of window relative to current position
Returns
-------
mk : array
moving variance, with same shape as x
'''
m1 = movmoment(x, 1, windowsize=windowsize, lag=lag)
m2 = movmoment(x, 2, windowsize=windowsize, lag=lag)
return m2 - m1*m1
def movmoment(x, k, windowsize=3, lag='lagged'):
'''non-central moment
Parameters
----------
x : array
time series data
windsize : int
window size
lag : 'lagged', 'centered', or 'leading'
location of window relative to current position
Returns
-------
mk : array
k-th moving non-central moment, with same shape as x
Notes
-----
If data x is 2d, then moving moment is calculated for each
column.
'''
windsize = windowsize
#if windsize is even should it raise ValueError
if lag == 'lagged':
#lead = -0 + windsize #windsize//2
lead = -0# + (windsize-1) + windsize//2
sl = slice((windsize-1) or None, -2*(windsize-1) or None)
elif lag == 'centered':
lead = -windsize//2 #0#-1 #+ #(windsize-1)
sl = slice((windsize-1)+windsize//2 or None, -(windsize-1)-windsize//2 or None)
elif lag == 'leading':
#lead = -windsize +1#+1 #+ (windsize-1)#//2 +1
lead = -windsize +2 #-windsize//2 +1
sl = slice(2*(windsize-1)+1+lead or None, -(2*(windsize-1)+lead)+1 or None)
else:
raise ValueError
avgkern = (np.ones(windowsize)/float(windowsize))
xext = expandarr(x, windsize-1)
#Note: expandarr increases the array size by 2*(windsize-1)
#sl = slice(2*(windsize-1)+1+lead or None, -(2*(windsize-1)+lead)+1 or None)
print(sl)
if xext.ndim == 1:
return np.correlate(xext**k, avgkern, 'full')[sl]
#return np.correlate(xext**k, avgkern, 'same')[windsize-lead:-(windsize+lead)]
else:
print(xext.shape)
print(avgkern[:,None].shape)
# try first with 2d along columns, possibly ndim with axis
return signal.correlate(xext**k, avgkern[:,None], 'full')[sl,:]
#x=0.5**np.arange(10);xm=x-x.mean();a=np.correlate(xm,[1],'full')
#x=0.5**np.arange(3);np.correlate(x,x,'same')
##>>> x=0.5**np.arange(10);xm=x-x.mean();a=np.correlate(xm,xo,'full')
##
##>>> xo=np.ones(10);d=np.correlate(xo,xo,'full')
##>>> xo
##xo=np.ones(10);d=np.correlate(xo,xo,'full')
##>>> x=np.ones(10);xo=x-x.mean();a=np.correlate(xo,xo,'full')
##>>> xo=np.ones(10);d=np.correlate(xo,xo,'full')
##>>> d
##array([ 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 9.,
## 8., 7., 6., 5., 4., 3., 2., 1.])
##def ccovf():
## pass
## #x=0.5**np.arange(10);xm=x-x.mean();a=np.correlate(xm,xo,'full')
__all__ = ['movorder', 'movmean', 'movvar', 'movmoment']
if __name__ == '__main__':
print('\ncheckin moving mean and variance')
nobs = 10
x = np.arange(nobs)
ws = 3
ave = np.array([ 0., 1/3., 1., 2., 3., 4., 5., 6., 7., 8.,
26/3., 9])
va = np.array([[ 0. , 0. ],
[ 0.22222222, 0.88888889],
[ 0.66666667, 2.66666667],
[ 0.66666667, 2.66666667],
[ 0.66666667, 2.66666667],
[ 0.66666667, 2.66666667],
[ 0.66666667, 2.66666667],
[ 0.66666667, 2.66666667],
[ 0.66666667, 2.66666667],
[ 0.66666667, 2.66666667],
[ 0.22222222, 0.88888889],
[ 0. , 0. ]])
ave2d = np.c_[ave, 2*ave]
print(movmean(x, windowsize=ws, lag='lagged'))
print(movvar(x, windowsize=ws, lag='lagged'))
print([np.var(x[i-ws:i]) for i in range(ws, nobs)])
m1 = movmoment(x, 1, windowsize=3, lag='lagged')
m2 = movmoment(x, 2, windowsize=3, lag='lagged')
print(m1)
print(m2)
print(m2 - m1*m1)
# this implicitly also tests moment
assert_array_almost_equal(va[ws-1:,0],
movvar(x, windowsize=3, lag='leading'))
assert_array_almost_equal(va[ws//2:-ws//2+1,0],
movvar(x, windowsize=3, lag='centered'))
assert_array_almost_equal(va[:-ws+1,0],
movvar(x, windowsize=ws, lag='lagged'))
print('\nchecking moving moment for 2d (columns only)')
x2d = np.c_[x, 2*x]
print(movmoment(x2d, 1, windowsize=3, lag='centered'))
print(movmean(x2d, windowsize=ws, lag='lagged'))
print(movvar(x2d, windowsize=ws, lag='lagged'))
assert_array_almost_equal(va[ws-1:,:],
movvar(x2d, windowsize=3, lag='leading'))
assert_array_almost_equal(va[ws//2:-ws//2+1,:],
movvar(x2d, windowsize=3, lag='centered'))
assert_array_almost_equal(va[:-ws+1,:],
movvar(x2d, windowsize=ws, lag='lagged'))
assert_array_almost_equal(ave2d[ws-1:],
movmoment(x2d, 1, windowsize=3, lag='leading'))
assert_array_almost_equal(ave2d[ws//2:-ws//2+1],
movmoment(x2d, 1, windowsize=3, lag='centered'))
assert_array_almost_equal(ave2d[:-ws+1],
movmean(x2d, windowsize=ws, lag='lagged'))
from scipy import ndimage
print(ndimage.filters.correlate1d(x2d, np.array([1,1,1])/3., axis=0))
#regression test check
xg = np.array([ 0. , 0.1, 0.3, 0.6, 1. , 1.5, 2.1, 2.8, 3.6,
4.5, 5.5, 6.5, 7.5, 8.5, 9.5, 10.5, 11.5, 12.5,
13.5, 14.5, 15.5, 16.5, 17.5, 18.5, 19.5, 20.5, 21.5,
22.5, 23.5, 24.5, 25.5, 26.5, 27.5, 28.5, 29.5, 30.5,
31.5, 32.5, 33.5, 34.5, 35.5, 36.5, 37.5, 38.5, 39.5,
40.5, 41.5, 42.5, 43.5, 44.5, 45.5, 46.5, 47.5, 48.5,
49.5, 50.5, 51.5, 52.5, 53.5, 54.5, 55.5, 56.5, 57.5,
58.5, 59.5, 60.5, 61.5, 62.5, 63.5, 64.5, 65.5, 66.5,
67.5, 68.5, 69.5, 70.5, 71.5, 72.5, 73.5, 74.5, 75.5,
76.5, 77.5, 78.5, 79.5, 80.5, 81.5, 82.5, 83.5, 84.5,
85.5, 86.5, 87.5, 88.5, 89.5, 90.5, 91.5, 92.5, 93.5,
94.5])
assert_array_almost_equal(xg, movmean(np.arange(100), 10,'lagged'))
xd = np.array([ 0.3, 0.6, 1. , 1.5, 2.1, 2.8, 3.6, 4.5, 5.5,
6.5, 7.5, 8.5, 9.5, 10.5, 11.5, 12.5, 13.5, 14.5,
15.5, 16.5, 17.5, 18.5, 19.5, 20.5, 21.5, 22.5, 23.5,
24.5, 25.5, 26.5, 27.5, 28.5, 29.5, 30.5, 31.5, 32.5,
33.5, 34.5, 35.5, 36.5, 37.5, 38.5, 39.5, 40.5, 41.5,
42.5, 43.5, 44.5, 45.5, 46.5, 47.5, 48.5, 49.5, 50.5,
51.5, 52.5, 53.5, 54.5, 55.5, 56.5, 57.5, 58.5, 59.5,
60.5, 61.5, 62.5, 63.5, 64.5, 65.5, 66.5, 67.5, 68.5,
69.5, 70.5, 71.5, 72.5, 73.5, 74.5, 75.5, 76.5, 77.5,
78.5, 79.5, 80.5, 81.5, 82.5, 83.5, 84.5, 85.5, 86.5,
87.5, 88.5, 89.5, 90.5, 91.5, 92.5, 93.5, 94.5, 95.4,
96.2, 96.9, 97.5, 98. , 98.4, 98.7, 98.9, 99. ])
assert_array_almost_equal(xd, movmean(np.arange(100), 10,'leading'))
xc = np.array([ 1.36363636, 1.90909091, 2.54545455, 3.27272727,
4.09090909, 5. , 6. , 7. ,
8. , 9. , 10. , 11. ,
12. , 13. , 14. , 15. ,
16. , 17. , 18. , 19. ,
20. , 21. , 22. , 23. ,
24. , 25. , 26. , 27. ,
28. , 29. , 30. , 31. ,
32. , 33. , 34. , 35. ,
36. , 37. , 38. , 39. ,
40. , 41. , 42. , 43. ,
44. , 45. , 46. , 47. ,
48. , 49. , 50. , 51. ,
52. , 53. , 54. , 55. ,
56. , 57. , 58. , 59. ,
60. , 61. , 62. , 63. ,
64. , 65. , 66. , 67. ,
68. , 69. , 70. , 71. ,
72. , 73. , 74. , 75. ,
76. , 77. , 78. , 79. ,
80. , 81. , 82. , 83. ,
84. , 85. , 86. , 87. ,
88. , 89. , 90. , 91. ,
92. , 93. , 94. , 94.90909091,
95.72727273, 96.45454545, 97.09090909, 97.63636364])
assert_array_almost_equal(xc, movmean(np.arange(100), 11,'centered'))
| bsd-3-clause |
akrherz/iem | scripts/climodat/check_database.py | 1 | 1942 | """Rectify climodat database entries."""
from io import StringIO
import sys
import pandas as pd
from pandas.io.sql import read_sql
from pyiem.network import Table as NetworkTable
from pyiem.util import get_dbconn, logger
LOG = logger()
def main(argv):
"""Go Main"""
state = argv[1]
nt = NetworkTable(f"{state}CLIMATE", only_online=False)
pgconn = get_dbconn("coop")
df = read_sql(
f"SELECT station, year, day from alldata_{state} "
"ORDER by station, day",
pgconn,
index_col=None,
)
for station, gdf in df.groupby("station"):
if station not in nt.sts:
LOG.info(
"station: %s is unknown to %sCLIMATE, skip", station, state
)
continue
# Make sure that our data archive starts on the first of a month
minday = gdf["day"].min().replace(day=1)
missing = pd.date_range(minday, gdf["day"].max()).difference(
gdf["day"]
)
if missing.empty:
continue
LOG.info(
"station: %s, missing: %s [%s - %s] has:%s days",
station,
len(missing),
missing.min().date(),
missing.max().date(),
len(gdf.index),
)
sio = StringIO()
for day in missing:
sio.write(
("%s,%s,%s,%s,%s\n")
% (
station,
day,
"%02i%02i" % (day.month, day.day),
day.year,
day.month,
)
)
sio.seek(0)
cursor = pgconn.cursor()
cursor.copy_from(
sio,
f"alldata_{state.lower()}",
columns=("station", "day", "sday", "year", "month"),
sep=",",
)
del sio
cursor.close()
pgconn.commit()
if __name__ == "__main__":
main(sys.argv)
| mit |
bl4ckdu5t/registron | setup.py | 6 | 6544 | #! /usr/bin/env python
#-----------------------------------------------------------------------------
# Copyright (c) 2013, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License with exception
# for distributing bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
import os
import stat
from setuptools import setup, find_packages
from distutils.command.build_py import build_py
from distutils.command.sdist import sdist
from PyInstaller import get_version
import PyInstaller.utils.git
DESC = ('Converts (packages) Python programs into stand-alone executables, '
'under Windows, Linux, Mac OS X, AIX and Solaris.')
LONG_DESC = """
PyInstaller is a program that converts (packages) Python
programs into stand-alone executables, under Windows, Linux, Mac OS X,
AIX and Solaris. Its main advantages over similar tools are that
PyInstaller works with any version of Python since 2.3, it builds smaller
executables thanks to transparent compression, it is fully multi-platform,
and uses the OS support to load the dynamic libraries, thus ensuring full
compatibility.
The main goal of PyInstaller is to be compatible with 3rd-party packages
out-of-the-box. This means that, with PyInstaller, all the required tricks
to make external packages work are already integrated within PyInstaller
itself so that there is no user intervention required. You'll never be
required to look for tricks in wikis and apply custom modification to your
files or your setup scripts. As an example, libraries like PyQt, Django or
matplotlib are fully supported, without having to handle plugins or
external data files manually.
"""
CLASSIFIERS = """
Classifier: Development Status :: 5 - Production/Stable
Classifier: Environment :: Console
Classifier: Intended Audience :: Developers
Classifier: Intended Audience :: Other Audience
Classifier: Intended Audience :: System Administrators
Classifier: License :: OSI Approved :: GNU General Public License v2 (GPLv2)
Classifier: Natural Language :: English
Classifier: Operating System :: MacOS :: MacOS X
Classifier: Operating System :: Microsoft :: Windows
Classifier: Operating System :: POSIX
Classifier: Operating System :: POSIX :: AIX
Classifier: Operating System :: POSIX :: Linux
Classifier: Operating System :: POSIX :: SunOS/Solaris
Classifier: Programming Language :: C
Classifier: Programming Language :: Python
Classifier: Programming Language :: Python :: 2
Classifier: Programming Language :: Python :: 2.4
Classifier: Programming Language :: Python :: 2.5
Classifier: Programming Language :: Python :: 2.6
Classifier: Programming Language :: Python :: 2.7
Classifier: Programming Language :: Python :: 2 :: Only
Classifier: Programming Language :: Python :: Implementation :: CPython
Classifier: Topic :: Software Development
Classifier: Topic :: Software Development :: Build Tools
Classifier: Topic :: System :: Installation/Setup
Classifier: Topic :: System :: Software Distribution
Classifier: Topic :: Utilities
""".splitlines()
# Make the distribution files to always report the git-revision used
# then building the distribution packages. This is done by replacing
# PyInstaller/utils/git.py within the dist/build by a fake-module
# which always returns the current git-revision. The original
# source-file is unchanged.
#
# This has to be done in 'build_py' for bdist-commands and in 'sdist'
# for sdist-commands.
def _write_git_version_file(filename):
"""
Fake PyInstaller.utils.git.py to always return the current revision.
"""
git_version = PyInstaller.utils.git.get_repo_revision()
st = os.stat(filename)
# remove the file first for the case it's hard-linked to the
# original file
os.remove(filename)
git_mod = open(filename, 'w')
template = "def get_repo_revision(): return %r"
try:
git_mod.write(template % git_version)
finally:
git_mod.close()
os.chmod(filename, stat.S_IMODE(st.st_mode))
class my_build_py(build_py):
def build_module(self, module, module_file, package):
res = build_py.build_module(self, module, module_file, package)
if module == 'git' and package == 'PyInstaller.utils':
filename = self.get_module_outfile(
self.build_lib, package.split('.'), module)
_write_git_version_file(filename)
return res
class my_sdist(sdist):
def make_release_tree(self, base_dir, files):
res = sdist.make_release_tree(self, base_dir, files)
build_py = self.get_finalized_command('build_py')
filename = build_py.get_module_outfile(
base_dir, ['PyInstaller', 'utils'], 'git')
_write_git_version_file(filename)
return res
setup(
install_requires=['distribute'],
name='PyInstaller',
version=get_version(),
description=DESC,
long_description=LONG_DESC,
keywords='packaging, standalone executable, pyinstaller, macholib, freeze, py2exe, py2app, bbfreeze',
author='Giovanni Bajo, Hartmut Goebel, Martin Zibricky',
author_email='[email protected]',
maintainer='Giovanni Bajo, Hartmut Goebel, Martin Zibricky',
maintainer_email='[email protected]',
license=('GPL license with a special exception which allows to use '
'PyInstaller to build and distribute non-free programs '
'(including commercial ones)'),
url='http://www.pyinstaller.org',
download_url='https://sourceforge.net/projects/pyinstaller/files',
classifiers=CLASSIFIERS,
zip_safe=False,
packages=find_packages(),
package_data={
# This includes precompiled bootloaders.
'PyInstaller': ['bootloader/*/*'],
# This file is necessary for rthooks (runtime hooks).
'PyInstaller.loader': ['rthooks.dat'],
},
include_package_data=True,
cmdclass = {
'sdist': my_sdist,
'build_py': my_build_py,
},
entry_points="""
[console_scripts]
pyinstaller=PyInstaller.main:run
pyi-archive_viewer=PyInstaller.cliutils.archive_viewer:run
pyi-bindepend=PyInstaller.cliutils.bindepend:run
pyi-build=PyInstaller.cliutils.build:run
pyi-grab_version=PyInstaller.cliutils.grab_version:run
pyi-make_comserver=PyInstaller.cliutils.make_comserver:run
pyi-makespec=PyInstaller.cliutils.makespec:run
pyi-set_version=PyInstaller.cliutils.set_version:run
"""
)
| mit |
switch-model/switch-hawaii-studies | database/build_database/tracking_pv.py | 1 | 19745 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# sample SAM/NSRDB code available here:
# https://sam.nrel.gov/samples
# https://developer.nrel.gov/docs/solar/pvwatts-v5/
# https://nsrdb.nrel.gov/api-instructions
# https://sam.nrel.gov/sdk
# note: this script can be run piecemeal from iPython/Jupyter, or all-at-once from the command line
# NOTE: this uses pandas DataFrames for most calculations, and breaks the work into batches
# to avoid exceeding available memory (e.g., do one load zone worth of projects at a time,
# then calculate one grid cell at a time and add it to the relevant projects). However, this
# makes the code complex and the logic unclear, so it would probably be better to use a database,
# where everything can be calculated at once in one query. (e.g. get a list of grid cells for all
# projects from the database, then calculate cap factor for each grid cell and store it incrementally
# in a cell cap factor table, then run one query which joins project -> project cell -> cell to
# get hourly cap factors for all projects. This could use temporary tables for the cells, which
# are then discarded.
# NOTE: this stores all the hourly capacity factors in the postgresql database. That makes it
# difficult to share with others. An alternative would be to store a separate text file for each
# technology for each day and sync those via github. Disadvantages of that: querying is more complex
# and we have to specify a reference time zone before saving the data (day boundaries are time zone
# specific). Alternative: store in postgresql and publish a dump of the database.
from __future__ import print_function, division
import os, re, sys, struct, ctypes, datetime
import numpy as np
import pandas as pd
import dateutil.tz # should be available, since pandas uses it
import sqlalchemy
from util import execute, executemany, switch_db, switch_host
import shared_tables
# number of digits that latitude and longitude should be rounded to before matching
# to the nsrdb files
lat_lon_digits = 2
# location of main database directory relative to this script file
database_rel_dir = '..'
# location of nsrdb hourly data files relative to the main database directory
# all subdirectories of this one will be scanned for data files
nsrdb_dir = 'NSRDB Hourly Irradiance Data'
# pattern to match lat and lon in nsrdb file name (all matching files will
# be read for that site, e.g., for multiple years); this should specify
# named groups for at least 'lat' and 'lon'.
# note: we don't try too hard to match an exact pattern of digits and symbols
# (just use .* for each group). If the expressions can't be parsed, we let them
# generate errors later.
nsrdb_file_regex = re.compile(r'^(?P<stn>.*)_(?P<lat>.*)_(?P<lon>.*)_(?P<year>.*)[.]csv$')
# location of System Advisor Model SDK relative to this script file
sam_sdk_rel_dir = 'System Advisor Model'
# load zone for which data is being prepared
# TODO: add load zone to cluster input file
load_zone = 'Oahu'
# tuple of technology name and array_type for pvwatts
# note: Appendix F of 2016-04-01 PSIP uses 2 for tracking,
# but 3 (backtracking) seems like a better choice
central_solar_techs = pd.DataFrame(dict(
technology=['CentralFixedPV', 'CentralTrackingPV'],
array_type=[0, 3],
acres_per_mw=[7.6, 8.7], # for projects < 20 MW from p. v of http://www.nrel.gov/docs/fy13osti/56290.pdf
))
# index the central_solar_techs and derive some useful values
central_solar_techs.set_index('technology', inplace=True)
# 1 / [(m2/acre) * (acre/mw)]
central_solar_techs['mw_per_m2'] = (1.0 / (4046.86 * central_solar_techs['acres_per_mw']))
# find the database directory and System Advisor Model
try:
curdir = os.path.dirname(__file__)
except NameError:
# no __file__ variable; we're copying and pasting in an interactive session
curdir = os.getcwd()
pd.set_option('display.width', 200)
database_dir = os.path.normpath(os.path.join(curdir, database_rel_dir))
if not os.path.exists(database_dir):
raise RuntimeError("Unable to find database directory at " + database_dir)
sam_sdk_dir = os.path.normpath(os.path.join(curdir, sam_sdk_rel_dir))
if not os.path.exists(sam_sdk_dir):
raise RuntimeError("Unable to find System Advisor Model (SAM) SDK directory at " + sam_sdk_dir)
# Load the System Advisor Model (SAM) SDK API
# Note: SAM SDK can be downloaded from https://sam.nrel.gov/sdk
# nsrdb/sam code is based on examples in sscapi.py itself
# Also see https://nsrdb.nrel.gov/api-instructions and sam-sdk/ssc_guide.pdf
# preload ssc library, so sscapi won't fail if it's not in the library search path
if sys.platform == 'win32' or sys.platform == 'cygwin':
if 8 * struct.calcsize("P") == 64:
path = ['win64', 'ssc.dll']
else:
path = ['win32', 'ssc.dll']
elif sys.platform == 'darwin':
path = ['osx64', 'ssc.dylib']
elif sys.platform == 'linux2':
path = ['linux64', 'ssc.so']
else:
raise RuntimeError('Unsupported operating system: {}'.format(sys.platform))
ssc_dll = ctypes.CDLL(os.path.join(sam_sdk_dir, *path))
# add search path to sscapi.py
sys.path.append(os.path.join(sam_sdk_dir, 'languages', 'python'))
import sscapi
ssc = sscapi.PySSC()
pvwatts5 = ssc.module_create("pvwattsv5")
ssc.module_exec_set_print(0)
# setup database
db_engine = sqlalchemy.create_engine('postgresql://' + switch_host + '/' + switch_db)
def main():
tracking_pv()
distributed_pv()
shared_tables.calculate_interconnect_costs()
def tracking_pv():
# make a list of all available NSRDB data files
nsrdb_file_dict, years = get_nsrdb_file_dict()
cluster_cell = pd.DataFrame.from_csv(
db_path('GIS/Utility-Scale Solar Sites/solar_cluster_nsrdb_grid_renamed.csv'),
index_col='gridclstid'
)
cluster_cell = cluster_cell[cluster_cell['solar_covg']>0]
cell = cluster_cell.groupby('nsrdb_id')
cluster = cluster_cell.groupby('cluster_id')
cluster_total_solar_area = cluster['solar_area'].sum()
cluster_ids = cluster.groups.keys() # list of all cluster ids, for convenience
cluster_id_digits = len(str(max(cluster_ids))) # max number of digits for a cluster id
# site_ids for each cluster_id (these distinguish PV sites from wind sites that may have the same number)
site_ids = ['PV_' + str(cluster_id).zfill(cluster_id_digits) for cluster_id in cluster_ids]
# calculate weighted average lat and lon for each cluster
# (note: the axis=0 and axis=1 keep pandas from generating lots of nans due to
# trying to match column name in addition to row index)
cluster_coords = pd.concat([
cluster_cell['cluster_id'],
cluster_cell[['solar_lat', 'solar_lon']].multiply(cluster_cell['solar_area'], axis=0)
], axis=1).groupby('cluster_id').sum().div(cluster_total_solar_area, axis=0)
cluster_coords.columns=['latitude', 'longitude']
# get list of technologies to be defined
technologies = central_solar_techs.index.values
# calculate capacity factors for all projects
# This dict will hold vectors of capacity factors for each cluster for each year and technology.
# This arrangement is simpler than using a DataFrame because we don't yet know the
# indexes (timesteps) of the data for each year.
cluster_cap_factors = dict()
for tech in technologies:
# go through all the needed nsrdb cells and add them to the capacity factor for the
# relevant cluster and year
for cell_id, grp in cell:
# grp has one row for each cluster that uses data from this cell
lat = round_coord(grp['nsrdb_lat'].iloc[0])
lon = round_coord(grp['nsrdb_lon'].iloc[0])
for year in years:
cap_factors = get_cap_factors(nsrdb_file_dict[lat, lon, year], central_solar_techs.loc[tech, 'array_type'])
# note: iterrows() would convert everything to a single (float) series, but itertuples doesn't
for clst in grp.itertuples():
contrib = cap_factors * clst.solar_area / cluster_total_solar_area[clst.cluster_id]
key = (tech, clst.cluster_id, year)
if key in cluster_cap_factors:
cluster_cap_factors[key] += contrib
else:
cluster_cap_factors[key] = contrib
# get timesteps for each year (based on lat and lon of first cell in the list)
timesteps = dict()
lat = round_coord(cluster_cell['nsrdb_lat'].iloc[0])
lon = round_coord(cluster_cell['nsrdb_lon'].iloc[0])
for year in years:
timesteps[year] = get_timesteps(nsrdb_file_dict[(lat, lon, year)])
# make an index of all timesteps
timestep_index = pd.concat([pd.DataFrame(index=x) for x in timesteps.values()]).index.sort_values()
# make a single dataframe to hold all the data
cap_factor_df = pd.DataFrame(
index=timestep_index,
columns=pd.MultiIndex.from_product([technologies, site_ids]),
dtype=float
)
# assign values to the dataframe
for ((tech, cluster_id, year), cap_factors) in cluster_cap_factors.iteritems():
cap_factor_df.update(pd.DataFrame(
cap_factors,
index=timesteps[year],
columns=[(tech, 'PV_' + str(cluster_id).zfill(cluster_id_digits))]
))
cap_factor_df.columns.names = ['technology', 'site']
cap_factor_df.index.names=['date_time']
# add load_zone and orientation to the index
cap_factor_df['load_zone'] = load_zone
cap_factor_df['orientation'] = 'na'
cap_factor_df.set_index(['load_zone', 'orientation'], append=True, inplace=True)
# convert to database orientation, with natural order for indexes,
# but also keep as a DataFrame
cap_factor_df = pd.DataFrame(
{'cap_factor': cap_factor_df.stack(cap_factor_df.columns.names)}
)
# sort table, then switch to using z, t, s, o as index (to match with project table)
cap_factor_df = cap_factor_df.reorder_levels(
['load_zone', 'technology', 'site', 'orientation', 'date_time']
).sort_index().reset_index('date_time')
# make a dataframe showing potential projects (same structure as "project" table)
# note: for now we don't really handle multiple load zones and we don't worry about orientation
# (may eventually have projects available with different azimuth and slope)
# This concatenates a list of DataFrames, one for each technology
project_df = pd.concat([
pd.DataFrame(dict(
load_zone=load_zone,
technology=tech,
site=site_ids,
orientation='na',
max_capacity=cluster_total_solar_area*central_solar_techs.loc[tech, 'mw_per_m2'],
latitude=cluster_coords['latitude'],
longitude=cluster_coords['longitude'],
))
for tech in technologies
], axis=0).set_index(['load_zone', 'technology', 'site', 'orientation'])
# store data in postgresql tables
shared_tables.create_table("project")
execute("DELETE FROM project WHERE technology IN %s;", [tuple(technologies)])
project_df.to_sql('project', db_engine, if_exists='append')
# retrieve the project IDs (created automatically in the database)
project_ids = pd.read_sql(
"SELECT project_id, load_zone, technology, site, orientation "
+ "FROM project WHERE technology IN %(techs)s;",
db_engine, index_col=['load_zone', 'technology', 'site', 'orientation'],
params={'techs': tuple(technologies)}
)
cap_factor_df['project_id'] = project_ids['project_id']
# convert date_time values into strings for insertion into postgresql.
# Inserting a timezone-aware DatetimeIndex into postgresql fails; see
# http://stackoverflow.com/questions/35435424/pandas-to-sql-gives-valueerror-with-timezone-aware-column/35552061
# note: the string conversion is pretty slow
cap_factor_df['date_time'] = pd.DatetimeIndex(cap_factor_df['date_time']).strftime("%Y-%m-%d %H:%M:%S%z")
cap_factor_df.set_index(['project_id', 'date_time'], inplace=True)
# Do we need error checking here? If any projects aren't in cap_factor_df, they'll
# create single rows with NaNs (and any prior existing cap_factors for them will
# get dropped below).
# If any rows in cap_factor_df aren't matched to a project, they'll go in with
# a null project_id.
shared_tables.create_table("cap_factor") # only created if it doesn't exist
shared_tables.drop_indexes("cap_factor") # drop and recreate is faster than incremental sorting
execute("DELETE FROM cap_factor WHERE project_id IN %s;", [tuple(project_ids['project_id'])])
cap_factor_df.to_sql('cap_factor', db_engine, if_exists='append', chunksize=10000)
shared_tables.create_indexes("cap_factor")
def get_cap_factors(file, array_type):
dat = ssc.data_create()
# set system parameters
# These match Table 7 of Appendix F of 2016-04-01 PSIP Book 1 unless otherwise noted
dc_ac_ratio = 1.5
ssc.data_set_number(dat, 'system_capacity', 1.0 * dc_ac_ratio) # dc, kW (we want 1000 kW AC)
ssc.data_set_number(dat, 'dc_ac_ratio', dc_ac_ratio)
ssc.data_set_number(dat, 'tilt', 0)
ssc.data_set_number(dat, 'azimuth', 180)
ssc.data_set_number(dat, 'inv_eff', 96)
ssc.data_set_number(dat, 'losses', 14.0757)
# array_type: 0=fixed rack, 1=fixed roof, 2=single-axis, 3=single-axis backtracked
ssc.data_set_number(dat, 'array_type', array_type)
# gcr: ground cover ratio (may be used for backtrack and shading calculations)
ssc.data_set_number(dat, 'gcr', 0.4)
ssc.data_set_number(dat, 'adjust:constant', 0)
# module_type: 0=standard, 1=premium, 2=thin film
# I set it to a reasonable value (probably default)
ssc.data_set_number(dat, 'module_type', 0)
# specify the file holding the solar data
ssc.data_set_string(dat, 'solar_resource_file', file)
# run PVWatts5
if ssc.module_exec(pvwatts5, dat) == 0:
err = 'PVWatts V5 simulation error:\n'
idx = 1
msg = ssc.module_log(pvwatts5, 0)
while (msg is not None):
err += '\t: {}\n'.format(msg)
msg = ssc.module_log(pvwatts5, idx)
idx += 1
raise RuntimeError(err.strip())
else:
# get power production in kW; for a 1 kW AC system this is also the capacity factor
cap_factors = np.asarray(ssc.data_get_array(dat, 'gen'), dtype=float)
ssc.data_free(dat)
return cap_factors
def get_timesteps(file):
"""Retrieve timesteps from nsrdb file as pandas datetime series. Based on code in sscapi.run_test2()."""
dat = ssc.data_create()
ssc.data_set_string(dat, 'file_name', file)
ssc.module_exec_simple_no_thread('wfreader', dat)
# create a tzinfo structure for this file
# note: nsrdb uses a fixed offset from UTC, i.e., no daylight saving time
tz_offset = ssc.data_get_number(dat, 'tz')
tzinfo = dateutil.tz.tzoffset(None, 3600 * tz_offset)
df = pd.DataFrame(dict(
year=ssc.data_get_array(dat, 'year'),
month=ssc.data_get_array(dat, 'month'),
day=ssc.data_get_array(dat, 'day'),
hour=ssc.data_get_array(dat, 'hour'),
minute=ssc.data_get_array(dat, 'minute'),
)).astype(int)
ssc.data_free(dat)
# create pandas DatetimeIndex for the timesteps in the file
# note: we ignore minutes because time indexes always refer to the start of the hour
# in our database
# note: if you use tz-aware datetime objects with pd.DatetimeIndex(), it converts them
# to UTC and makes them tz-naive. If you use pd.to_datetime() to make a column of datetime
# values, you have to specify UTC=True and then it does the same thing.
# So we start with naive datetimes and then specify the tzinfo when creating the
# DatetimeIndex. (We could also use idx.localize(tzinfo) after creating a naive DatetimeIndex.)
timesteps = pd.DatetimeIndex(
[datetime.datetime(year=t.year, month=t.month, day=t.day, hour=t.hour) for t in df.itertuples()],
tz=tzinfo
)
return timesteps
# # This class is based on http://stackoverflow.com/questions/17976063/how-to-create-tzinfo-when-i-have-utc-offset
# # It does the same thing as dateutil.tz.tzoffset, so we use that instead.
# class TimeZoneInfo(datetime.tzinfo):
# """tzinfo derived concrete class"""
# _dst = datetime.timedelta(0)
# _name = None
# def __init__(self, offset_hours):
# self._offset = datetime.timedelta(hours=offset_hours)
# def utcoffset(self, dt):
# return self._offset
# def dst(self, dt):
# return self.__class__._dst
# def tzname(self, dt):
# return self.__class__._name
def db_path(path):
"""Convert the path specified relative to the database directory into a real path.
For convenience, this also converts '/' file separators to whateer is appropriate for
the current operating system."""
return os.path.join(database_dir, *path.split('/'))
def round_coord(coord):
# convert lat or lon from whatever form it's currently in to a standard form (2-digit rounded float)
# this gives more stable matching in dictionaries, indexes, etc.
return round(float(coord), 2)
def get_nsrdb_file_dict():
# get a list of all the files that have data for each lat/lon pair
# (parsed from the file names)
file_dict = dict()
years = set()
for dir_name, dirs, files in os.walk(db_path(nsrdb_dir)):
for f in files:
file_path = os.path.join(dir_name, f)
m = nsrdb_file_regex.match(f)
if m is None:
# print "Skipping unrecognized file {}".format(file_path)
pass
else:
lat = round_coord(m.group('lat'))
lon = round_coord(m.group('lon'))
year = int(m.group('year'))
file_dict[lat, lon, year] = file_path
years.add(year)
return file_dict, years
def distributed_pv():
# for now, just reuse old data
# store data in postgresql tables
shared_tables.create_table("project")
shared_tables.create_table("cap_factor")
# remove old records (best before removing indexes)
execute("""
DELETE FROM cap_factor WHERE project_id IN (SELECT project_id FROM project WHERE technology = 'DistPV');
""")
execute("""
DELETE FROM project WHERE technology = 'DistPV';
""")
# remove indexes
shared_tables.drop_indexes("cap_factor") # drop and recreate is faster than incremental sorting
execute("""
INSERT INTO project (load_zone, technology, site, orientation, max_capacity)
SELECT load_zone, technology, 'DistPV' AS site, orientation, max_capacity
FROM max_capacity_pre_2016_06_21
WHERE technology = 'DistPV';
""")
execute("""
INSERT INTO cap_factor (project_id, date_time, cap_factor)
SELECT project_id, date_time, cap_factor
FROM cap_factor_pre_2016_06_21 cf JOIN project USING (load_zone, technology, orientation)
WHERE cf.technology = 'DistPV';
""")
# restore indexes
shared_tables.create_indexes("cap_factor")
if __name__ == '__main__':
main()
| apache-2.0 |
manashmndl/scikit-learn | sklearn/metrics/cluster/supervised.py | 207 | 27395 | """Utilities to evaluate the clustering performance of models
Functions named as *_score return a scalar value to maximize: the higher the
better.
"""
# Authors: Olivier Grisel <[email protected]>
# Wei LI <[email protected]>
# Diego Molla <[email protected]>
# License: BSD 3 clause
from math import log
from scipy.misc import comb
from scipy.sparse import coo_matrix
import numpy as np
from .expected_mutual_info_fast import expected_mutual_information
from ...utils.fixes import bincount
def comb2(n):
# the exact version is faster for k == 2: use it by default globally in
# this module instead of the float approximate variant
return comb(n, 2, exact=1)
def check_clusterings(labels_true, labels_pred):
"""Check that the two clusterings matching 1D integer arrays"""
labels_true = np.asarray(labels_true)
labels_pred = np.asarray(labels_pred)
# input checks
if labels_true.ndim != 1:
raise ValueError(
"labels_true must be 1D: shape is %r" % (labels_true.shape,))
if labels_pred.ndim != 1:
raise ValueError(
"labels_pred must be 1D: shape is %r" % (labels_pred.shape,))
if labels_true.shape != labels_pred.shape:
raise ValueError(
"labels_true and labels_pred must have same size, got %d and %d"
% (labels_true.shape[0], labels_pred.shape[0]))
return labels_true, labels_pred
def contingency_matrix(labels_true, labels_pred, eps=None):
"""Build a contengency matrix describing the relationship between labels.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
eps: None or float
If a float, that value is added to all values in the contingency
matrix. This helps to stop NaN propagation.
If ``None``, nothing is adjusted.
Returns
-------
contingency: array, shape=[n_classes_true, n_classes_pred]
Matrix :math:`C` such that :math:`C_{i, j}` is the number of samples in
true class :math:`i` and in predicted class :math:`j`. If
``eps is None``, the dtype of this array will be integer. If ``eps`` is
given, the dtype will be float.
"""
classes, class_idx = np.unique(labels_true, return_inverse=True)
clusters, cluster_idx = np.unique(labels_pred, return_inverse=True)
n_classes = classes.shape[0]
n_clusters = clusters.shape[0]
# Using coo_matrix to accelerate simple histogram calculation,
# i.e. bins are consecutive integers
# Currently, coo_matrix is faster than histogram2d for simple cases
contingency = coo_matrix((np.ones(class_idx.shape[0]),
(class_idx, cluster_idx)),
shape=(n_classes, n_clusters),
dtype=np.int).toarray()
if eps is not None:
# don't use += as contingency is integer
contingency = contingency + eps
return contingency
# clustering measures
def adjusted_rand_score(labels_true, labels_pred):
"""Rand index adjusted for chance
The Rand Index computes a similarity measure between two clusterings
by considering all pairs of samples and counting pairs that are
assigned in the same or different clusters in the predicted and
true clusterings.
The raw RI score is then "adjusted for chance" into the ARI score
using the following scheme::
ARI = (RI - Expected_RI) / (max(RI) - Expected_RI)
The adjusted Rand index is thus ensured to have a value close to
0.0 for random labeling independently of the number of clusters and
samples and exactly 1.0 when the clusterings are identical (up to
a permutation).
ARI is a symmetric measure::
adjusted_rand_score(a, b) == adjusted_rand_score(b, a)
Read more in the :ref:`User Guide <adjusted_rand_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
Returns
-------
ari : float
Similarity score between -1.0 and 1.0. Random labelings have an ARI
close to 0.0. 1.0 stands for perfect match.
Examples
--------
Perfectly maching labelings have a score of 1 even
>>> from sklearn.metrics.cluster import adjusted_rand_score
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_rand_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not always pure, hence penalized::
>>> adjusted_rand_score([0, 0, 1, 2], [0, 0, 1, 1]) # doctest: +ELLIPSIS
0.57...
ARI is symmetric, so labelings that have pure clusters with members
coming from the same classes but unnecessary splits are penalized::
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 2]) # doctest: +ELLIPSIS
0.57...
If classes members are completely split across different clusters, the
assignment is totally incomplete, hence the ARI is very low::
>>> adjusted_rand_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [Hubert1985] `L. Hubert and P. Arabie, Comparing Partitions,
Journal of Classification 1985`
http://www.springerlink.com/content/x64124718341j1j0/
.. [wk] http://en.wikipedia.org/wiki/Rand_index#Adjusted_Rand_index
See also
--------
adjusted_mutual_info_score: Adjusted Mutual Information
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split;
# or trivial clustering where each document is assigned a unique cluster.
# These are perfect matches hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0
or classes.shape[0] == clusters.shape[0] == len(labels_true)):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
# Compute the ARI using the contingency data
sum_comb_c = sum(comb2(n_c) for n_c in contingency.sum(axis=1))
sum_comb_k = sum(comb2(n_k) for n_k in contingency.sum(axis=0))
sum_comb = sum(comb2(n_ij) for n_ij in contingency.flatten())
prod_comb = (sum_comb_c * sum_comb_k) / float(comb(n_samples, 2))
mean_comb = (sum_comb_k + sum_comb_c) / 2.
return ((sum_comb - prod_comb) / (mean_comb - prod_comb))
def homogeneity_completeness_v_measure(labels_true, labels_pred):
"""Compute the homogeneity and completeness and V-Measure scores at once
Those metrics are based on normalized conditional entropy measures of
the clustering labeling to evaluate given the knowledge of a Ground
Truth class labels of the same samples.
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
Both scores have positive values between 0.0 and 1.0, larger values
being desirable.
Those 3 metrics are independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score values in any way.
V-Measure is furthermore symmetric: swapping ``labels_true`` and
``label_pred`` will give the same score. This does not hold for
homogeneity and completeness.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
homogeneity: float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
completeness: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
v_measure: float
harmonic mean of the first two
See also
--------
homogeneity_score
completeness_score
v_measure_score
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
if len(labels_true) == 0:
return 1.0, 1.0, 1.0
entropy_C = entropy(labels_true)
entropy_K = entropy(labels_pred)
MI = mutual_info_score(labels_true, labels_pred)
homogeneity = MI / (entropy_C) if entropy_C else 1.0
completeness = MI / (entropy_K) if entropy_K else 1.0
if homogeneity + completeness == 0.0:
v_measure_score = 0.0
else:
v_measure_score = (2.0 * homogeneity * completeness
/ (homogeneity + completeness))
return homogeneity, completeness, v_measure_score
def homogeneity_score(labels_true, labels_pred):
"""Homogeneity metric of a cluster labeling given a ground truth
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`completeness_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
homogeneity: float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
completeness_score
v_measure_score
Examples
--------
Perfect labelings are homogeneous::
>>> from sklearn.metrics.cluster import homogeneity_score
>>> homogeneity_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that further split classes into more clusters can be
perfectly homogeneous::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
1.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
1.0...
Clusters that include samples from different classes do not make for an
homogeneous labeling::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 0, 1]))
... # doctest: +ELLIPSIS
0.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[0]
def completeness_score(labels_true, labels_pred):
"""Completeness metric of a cluster labeling given a ground truth
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`homogeneity_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
completeness: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
v_measure_score
Examples
--------
Perfect labelings are complete::
>>> from sklearn.metrics.cluster import completeness_score
>>> completeness_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that assign all classes members to the same clusters
are still complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 0, 0, 0]))
1.0
>>> print(completeness_score([0, 1, 2, 3], [0, 0, 1, 1]))
1.0
If classes members are split across different clusters, the
assignment cannot be complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 1, 0, 1]))
0.0
>>> print(completeness_score([0, 0, 0, 0], [0, 1, 2, 3]))
0.0
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[1]
def v_measure_score(labels_true, labels_pred):
"""V-measure cluster labeling given a ground truth.
This score is identical to :func:`normalized_mutual_info_score`.
The V-measure is the harmonic mean between homogeneity and completeness::
v = 2 * (homogeneity * completeness) / (homogeneity + completeness)
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
v_measure: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
completeness_score
Examples
--------
Perfect labelings are both homogeneous and complete, hence have score 1.0::
>>> from sklearn.metrics.cluster import v_measure_score
>>> v_measure_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> v_measure_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not homogeneous, hence penalized::
>>> print("%.6f" % v_measure_score([0, 0, 1, 2], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 1, 2, 3], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.66...
Labelings that have pure clusters with members coming from the same
classes are homogeneous but un-necessary splits harms completeness
and thus penalize V-measure as well::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.66...
If classes members are completely split across different clusters,
the assignment is totally incomplete, hence the V-Measure is null::
>>> print("%.6f" % v_measure_score([0, 0, 0, 0], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.0...
Clusters that include samples from totally different classes totally
destroy the homogeneity of the labeling, hence::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[2]
def mutual_info_score(labels_true, labels_pred, contingency=None):
"""Mutual Information between two clusterings
The Mutual Information is a measure of the similarity between two labels of
the same data. Where :math:`P(i)` is the probability of a random sample
occurring in cluster :math:`U_i` and :math:`P'(j)` is the probability of a
random sample occurring in cluster :math:`V_j`, the Mutual Information
between clusterings :math:`U` and :math:`V` is given as:
.. math::
MI(U,V)=\sum_{i=1}^R \sum_{j=1}^C P(i,j)\log\\frac{P(i,j)}{P(i)P'(j)}
This is equal to the Kullback-Leibler divergence of the joint distribution
with the product distribution of the marginals.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
contingency: None or array, shape = [n_classes_true, n_classes_pred]
A contingency matrix given by the :func:`contingency_matrix` function.
If value is ``None``, it will be computed, otherwise the given value is
used, with ``labels_true`` and ``labels_pred`` ignored.
Returns
-------
mi: float
Mutual information, a non-negative value
See also
--------
adjusted_mutual_info_score: Adjusted against chance Mutual Information
normalized_mutual_info_score: Normalized Mutual Information
"""
if contingency is None:
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
contingency_sum = np.sum(contingency)
pi = np.sum(contingency, axis=1)
pj = np.sum(contingency, axis=0)
outer = np.outer(pi, pj)
nnz = contingency != 0.0
# normalized contingency
contingency_nm = contingency[nnz]
log_contingency_nm = np.log(contingency_nm)
contingency_nm /= contingency_sum
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
log_outer = -np.log(outer[nnz]) + log(pi.sum()) + log(pj.sum())
mi = (contingency_nm * (log_contingency_nm - log(contingency_sum))
+ contingency_nm * log_outer)
return mi.sum()
def adjusted_mutual_info_score(labels_true, labels_pred):
"""Adjusted Mutual Information between two clusterings
Adjusted Mutual Information (AMI) is an adjustment of the Mutual
Information (MI) score to account for chance. It accounts for the fact that
the MI is generally higher for two clusterings with a larger number of
clusters, regardless of whether there is actually more information shared.
For two clusterings :math:`U` and :math:`V`, the AMI is given as::
AMI(U, V) = [MI(U, V) - E(MI(U, V))] / [max(H(U), H(V)) - E(MI(U, V))]
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Be mindful that this function is an order of magnitude slower than other
metrics, such as the Adjusted Rand Index.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
Returns
-------
ami: float(upperlimited by 1.0)
The AMI returns a value of 1 when the two partitions are identical
(ie perfectly matched). Random partitions (independent labellings) have
an expected AMI around 0 on average hence can be negative.
See also
--------
adjusted_rand_score: Adjusted Rand Index
mutual_information_score: Mutual Information (not adjusted for chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import adjusted_mutual_info_score
>>> adjusted_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the AMI is null::
>>> adjusted_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [1] `Vinh, Epps, and Bailey, (2010). Information Theoretic Measures for
Clusterings Comparison: Variants, Properties, Normalization and
Correction for Chance, JMLR
<http://jmlr.csail.mit.edu/papers/volume11/vinh10a/vinh10a.pdf>`_
.. [2] `Wikipedia entry for the Adjusted Mutual Information
<http://en.wikipedia.org/wiki/Adjusted_Mutual_Information>`_
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
emi = expected_mutual_information(contingency, n_samples)
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
ami = (mi - emi) / (max(h_true, h_pred) - emi)
return ami
def normalized_mutual_info_score(labels_true, labels_pred):
"""Normalized Mutual Information between two clusterings
Normalized Mutual Information (NMI) is an normalization of the Mutual
Information (MI) score to scale the results between 0 (no mutual
information) and 1 (perfect correlation). In this function, mutual
information is normalized by ``sqrt(H(labels_true) * H(labels_pred))``
This measure is not adjusted for chance. Therefore
:func:`adjusted_mustual_info_score` might be preferred.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
Returns
-------
nmi: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
See also
--------
adjusted_rand_score: Adjusted Rand Index
adjusted_mutual_info_score: Adjusted Mutual Information (adjusted
against chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import normalized_mutual_info_score
>>> normalized_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> normalized_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the NMI is null::
>>> normalized_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
nmi = mi / max(np.sqrt(h_true * h_pred), 1e-10)
return nmi
def entropy(labels):
"""Calculates the entropy for a labeling."""
if len(labels) == 0:
return 1.0
label_idx = np.unique(labels, return_inverse=True)[1]
pi = bincount(label_idx).astype(np.float)
pi = pi[pi > 0]
pi_sum = np.sum(pi)
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
return -np.sum((pi / pi_sum) * (np.log(pi) - log(pi_sum)))
| bsd-3-clause |
nelango/ViralityAnalysis | model/lib/nltk/parse/transitionparser.py | 3 | 31227 | # Natural Language Toolkit: Arc-Standard and Arc-eager Transition Based Parsers
#
# Author: Long Duong <[email protected]>
#
# Copyright (C) 2001-2015 NLTK Project
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tempfile
import pickle
from os import remove
from copy import deepcopy
from operator import itemgetter
try:
from numpy import array
from scipy import sparse
from sklearn.datasets import load_svmlight_file
from sklearn import svm
except ImportError:
pass
from nltk.parse import ParserI, DependencyGraph, DependencyEvaluator
class Configuration(object):
"""
Class for holding configuration which is the partial analysis of the input sentence.
The transition based parser aims at finding set of operators that transfer the initial
configuration to the terminal configuration.
The configuration includes:
- Stack: for storing partially proceeded words
- Buffer: for storing remaining input words
- Set of arcs: for storing partially built dependency tree
This class also provides a method to represent a configuration as list of features.
"""
def __init__(self, dep_graph):
"""
:param dep_graph: the representation of an input in the form of dependency graph.
:type dep_graph: DependencyGraph where the dependencies are not specified.
"""
# dep_graph.nodes contain list of token for a sentence
self.stack = [0] # The root element
self.buffer = list(range(1, len(dep_graph.nodes))) # The rest is in the buffer
self.arcs = [] # empty set of arc
self._tokens = dep_graph.nodes
self._max_address = len(self.buffer)
def __str__(self):
return 'Stack : ' + \
str(self.stack) + ' Buffer : ' + str(self.buffer) + ' Arcs : ' + str(self.arcs)
def _check_informative(self, feat, flag=False):
"""
Check whether a feature is informative
The flag control whether "_" is informative or not
"""
if feat is None:
return False
if feat == '':
return False
if flag is False:
if feat == '_':
return False
return True
def extract_features(self):
"""
Extract the set of features for the current configuration. Implement standard features as describe in
Table 3.2 (page 31) in Dependency Parsing book by Sandra Kubler, Ryan McDonal, Joakim Nivre.
Please note that these features are very basic.
:return: list(str)
"""
result = []
# Todo : can come up with more complicated features set for better
# performance.
if len(self.stack) > 0:
# Stack 0
stack_idx0 = self.stack[len(self.stack) - 1]
token = self._tokens[stack_idx0]
if self._check_informative(token['word'], True):
result.append('STK_0_FORM_' + token['word'])
if 'lemma' in token and self._check_informative(token['lemma']):
result.append('STK_0_LEMMA_' + token['lemma'])
if self._check_informative(token['tag']):
result.append('STK_0_POS_' + token['tag'])
if 'feats' in token and self._check_informative(token['feats']):
feats = token['feats'].split("|")
for feat in feats:
result.append('STK_0_FEATS_' + feat)
# Stack 1
if len(self.stack) > 1:
stack_idx1 = self.stack[len(self.stack) - 2]
token = self._tokens[stack_idx1]
if self._check_informative(token['tag']):
result.append('STK_1_POS_' + token['tag'])
# Left most, right most dependency of stack[0]
left_most = 1000000
right_most = -1
dep_left_most = ''
dep_right_most = ''
for (wi, r, wj) in self.arcs:
if wi == stack_idx0:
if (wj > wi) and (wj > right_most):
right_most = wj
dep_right_most = r
if (wj < wi) and (wj < left_most):
left_most = wj
dep_left_most = r
if self._check_informative(dep_left_most):
result.append('STK_0_LDEP_' + dep_left_most)
if self._check_informative(dep_right_most):
result.append('STK_0_RDEP_' + dep_right_most)
# Check Buffered 0
if len(self.buffer) > 0:
# Buffer 0
buffer_idx0 = self.buffer[0]
token = self._tokens[buffer_idx0]
if self._check_informative(token['word'], True):
result.append('BUF_0_FORM_' + token['word'])
if 'lemma' in token and self._check_informative(token['lemma']):
result.append('BUF_0_LEMMA_' + token['lemma'])
if self._check_informative(token['tag']):
result.append('BUF_0_POS_' + token['tag'])
if 'feats' in token and self._check_informative(token['feats']):
feats = token['feats'].split("|")
for feat in feats:
result.append('BUF_0_FEATS_' + feat)
# Buffer 1
if len(self.buffer) > 1:
buffer_idx1 = self.buffer[1]
token = self._tokens[buffer_idx1]
if self._check_informative(token['word'], True):
result.append('BUF_1_FORM_' + token['word'])
if self._check_informative(token['tag']):
result.append('BUF_1_POS_' + token['tag'])
if len(self.buffer) > 2:
buffer_idx2 = self.buffer[2]
token = self._tokens[buffer_idx2]
if self._check_informative(token['tag']):
result.append('BUF_2_POS_' + token['tag'])
if len(self.buffer) > 3:
buffer_idx3 = self.buffer[3]
token = self._tokens[buffer_idx3]
if self._check_informative(token['tag']):
result.append('BUF_3_POS_' + token['tag'])
# Left most, right most dependency of stack[0]
left_most = 1000000
right_most = -1
dep_left_most = ''
dep_right_most = ''
for (wi, r, wj) in self.arcs:
if wi == buffer_idx0:
if (wj > wi) and (wj > right_most):
right_most = wj
dep_right_most = r
if (wj < wi) and (wj < left_most):
left_most = wj
dep_left_most = r
if self._check_informative(dep_left_most):
result.append('BUF_0_LDEP_' + dep_left_most)
if self._check_informative(dep_right_most):
result.append('BUF_0_RDEP_' + dep_right_most)
return result
class Transition(object):
"""
This class defines a set of transition which is applied to a configuration to get another configuration
Note that for different parsing algorithm, the transition is different.
"""
# Define set of transitions
LEFT_ARC = 'LEFTARC'
RIGHT_ARC = 'RIGHTARC'
SHIFT = 'SHIFT'
REDUCE = 'REDUCE'
def __init__(self, alg_option):
"""
:param alg_option: the algorithm option of this parser. Currently support `arc-standard` and `arc-eager` algorithm
:type alg_option: str
"""
self._algo = alg_option
if alg_option not in [
TransitionParser.ARC_STANDARD,
TransitionParser.ARC_EAGER]:
raise ValueError(" Currently we only support %s and %s " %
(TransitionParser.ARC_STANDARD, TransitionParser.ARC_EAGER))
def left_arc(self, conf, relation):
"""
Note that the algorithm for left-arc is quite similar except for precondition for both arc-standard and arc-eager
:param configuration: is the current configuration
:return : A new configuration or -1 if the pre-condition is not satisfied
"""
if (len(conf.buffer) <= 0) or (len(conf.stack) <= 0):
return -1
if conf.buffer[0] == 0:
# here is the Root element
return -1
idx_wi = conf.stack[len(conf.stack) - 1]
flag = True
if self._algo == TransitionParser.ARC_EAGER:
for (idx_parent, r, idx_child) in conf.arcs:
if idx_child == idx_wi:
flag = False
if flag:
conf.stack.pop()
idx_wj = conf.buffer[0]
conf.arcs.append((idx_wj, relation, idx_wi))
else:
return -1
def right_arc(self, conf, relation):
"""
Note that the algorithm for right-arc is DIFFERENT for arc-standard and arc-eager
:param configuration: is the current configuration
:return : A new configuration or -1 if the pre-condition is not satisfied
"""
if (len(conf.buffer) <= 0) or (len(conf.stack) <= 0):
return -1
if self._algo == TransitionParser.ARC_STANDARD:
idx_wi = conf.stack.pop()
idx_wj = conf.buffer[0]
conf.buffer[0] = idx_wi
conf.arcs.append((idx_wi, relation, idx_wj))
else: # arc-eager
idx_wi = conf.stack[len(conf.stack) - 1]
idx_wj = conf.buffer.pop(0)
conf.stack.append(idx_wj)
conf.arcs.append((idx_wi, relation, idx_wj))
def reduce(self, conf):
"""
Note that the algorithm for reduce is only available for arc-eager
:param configuration: is the current configuration
:return : A new configuration or -1 if the pre-condition is not satisfied
"""
if self._algo != TransitionParser.ARC_EAGER:
return -1
if len(conf.stack) <= 0:
return -1
idx_wi = conf.stack[len(conf.stack) - 1]
flag = False
for (idx_parent, r, idx_child) in conf.arcs:
if idx_child == idx_wi:
flag = True
if flag:
conf.stack.pop() # reduce it
else:
return -1
def shift(self, conf):
"""
Note that the algorithm for shift is the SAME for arc-standard and arc-eager
:param configuration: is the current configuration
:return : A new configuration or -1 if the pre-condition is not satisfied
"""
if len(conf.buffer) <= 0:
return -1
idx_wi = conf.buffer.pop(0)
conf.stack.append(idx_wi)
class TransitionParser(ParserI):
"""
Class for transition based parser. Implement 2 algorithms which are "arc-standard" and "arc-eager"
"""
ARC_STANDARD = 'arc-standard'
ARC_EAGER = 'arc-eager'
def __init__(self, algorithm):
"""
:param algorithm: the algorithm option of this parser. Currently support `arc-standard` and `arc-eager` algorithm
:type algorithm: str
"""
if not(algorithm in [self.ARC_STANDARD, self.ARC_EAGER]):
raise ValueError(" Currently we only support %s and %s " %
(self.ARC_STANDARD, self.ARC_EAGER))
self._algorithm = algorithm
self._dictionary = {}
self._transition = {}
self._match_transition = {}
def _get_dep_relation(self, idx_parent, idx_child, depgraph):
p_node = depgraph.nodes[idx_parent]
c_node = depgraph.nodes[idx_child]
if c_node['word'] is None:
return None # Root word
if c_node['head'] == p_node['address']:
return c_node['rel']
else:
return None
def _convert_to_binary_features(self, features):
"""
:param features: list of feature string which is needed to convert to binary features
:type features: list(str)
:return : string of binary features in libsvm format which is 'featureID:value' pairs
"""
unsorted_result = []
for feature in features:
self._dictionary.setdefault(feature, len(self._dictionary))
unsorted_result.append(self._dictionary[feature])
# Default value of each feature is 1.0
return ' '.join(str(featureID) + ':1.0' for featureID in sorted(unsorted_result))
def _is_projective(self, depgraph):
arc_list = []
for key in depgraph.nodes:
node = depgraph.nodes[key]
if 'head' in node:
childIdx = node['address']
parentIdx = node['head']
if parentIdx is not None:
arc_list.append((parentIdx, childIdx))
for (parentIdx, childIdx) in arc_list:
# Ensure that childIdx < parentIdx
if childIdx > parentIdx:
temp = childIdx
childIdx = parentIdx
parentIdx = temp
for k in range(childIdx + 1, parentIdx):
for m in range(len(depgraph.nodes)):
if (m < childIdx) or (m > parentIdx):
if (k, m) in arc_list:
return False
if (m, k) in arc_list:
return False
return True
def _write_to_file(self, key, binary_features, input_file):
"""
write the binary features to input file and update the transition dictionary
"""
self._transition.setdefault(key, len(self._transition) + 1)
self._match_transition[self._transition[key]] = key
input_str = str(self._transition[key]) + ' ' + binary_features + '\n'
input_file.write(input_str.encode('utf-8'))
def _create_training_examples_arc_std(self, depgraphs, input_file):
"""
Create the training example in the libsvm format and write it to the input_file.
Reference : Page 32, Chapter 3. Dependency Parsing by Sandra Kubler, Ryan McDonal and Joakim Nivre (2009)
"""
operation = Transition(self.ARC_STANDARD)
count_proj = 0
training_seq = []
for depgraph in depgraphs:
if not self._is_projective(depgraph):
continue
count_proj += 1
conf = Configuration(depgraph)
while len(conf.buffer) > 0:
b0 = conf.buffer[0]
features = conf.extract_features()
binary_features = self._convert_to_binary_features(features)
if len(conf.stack) > 0:
s0 = conf.stack[len(conf.stack) - 1]
# Left-arc operation
rel = self._get_dep_relation(b0, s0, depgraph)
if rel is not None:
key = Transition.LEFT_ARC + ':' + rel
self._write_to_file(key, binary_features, input_file)
operation.left_arc(conf, rel)
training_seq.append(key)
continue
# Right-arc operation
rel = self._get_dep_relation(s0, b0, depgraph)
if rel is not None:
precondition = True
# Get the max-index of buffer
maxID = conf._max_address
for w in range(maxID + 1):
if w != b0:
relw = self._get_dep_relation(b0, w, depgraph)
if relw is not None:
if (b0, relw, w) not in conf.arcs:
precondition = False
if precondition:
key = Transition.RIGHT_ARC + ':' + rel
self._write_to_file(
key,
binary_features,
input_file)
operation.right_arc(conf, rel)
training_seq.append(key)
continue
# Shift operation as the default
key = Transition.SHIFT
self._write_to_file(key, binary_features, input_file)
operation.shift(conf)
training_seq.append(key)
print(" Number of training examples : " + str(len(depgraphs)))
print(" Number of valid (projective) examples : " + str(count_proj))
return training_seq
def _create_training_examples_arc_eager(self, depgraphs, input_file):
"""
Create the training example in the libsvm format and write it to the input_file.
Reference : 'A Dynamic Oracle for Arc-Eager Dependency Parsing' by Joav Goldberg and Joakim Nivre
"""
operation = Transition(self.ARC_EAGER)
countProj = 0
training_seq = []
for depgraph in depgraphs:
if not self._is_projective(depgraph):
continue
countProj += 1
conf = Configuration(depgraph)
while len(conf.buffer) > 0:
b0 = conf.buffer[0]
features = conf.extract_features()
binary_features = self._convert_to_binary_features(features)
if len(conf.stack) > 0:
s0 = conf.stack[len(conf.stack) - 1]
# Left-arc operation
rel = self._get_dep_relation(b0, s0, depgraph)
if rel is not None:
key = Transition.LEFT_ARC + ':' + rel
self._write_to_file(key, binary_features, input_file)
operation.left_arc(conf, rel)
training_seq.append(key)
continue
# Right-arc operation
rel = self._get_dep_relation(s0, b0, depgraph)
if rel is not None:
key = Transition.RIGHT_ARC + ':' + rel
self._write_to_file(key, binary_features, input_file)
operation.right_arc(conf, rel)
training_seq.append(key)
continue
# reduce operation
flag = False
for k in range(s0):
if self._get_dep_relation(k, b0, depgraph) is not None:
flag = True
if self._get_dep_relation(b0, k, depgraph) is not None:
flag = True
if flag:
key = Transition.REDUCE
self._write_to_file(key, binary_features, input_file)
operation.reduce(conf)
training_seq.append(key)
continue
# Shift operation as the default
key = Transition.SHIFT
self._write_to_file(key, binary_features, input_file)
operation.shift(conf)
training_seq.append(key)
print(" Number of training examples : " + str(len(depgraphs)))
print(" Number of valid (projective) examples : " + str(countProj))
return training_seq
def train(self, depgraphs, modelfile):
"""
:param depgraphs : list of DependencyGraph as the training data
:type depgraphs : DependencyGraph
:param modelfile : file name to save the trained model
:type modelfile : str
"""
try:
input_file = tempfile.NamedTemporaryFile(
prefix='transition_parse.train',
dir=tempfile.gettempdir(),
delete=False)
if self._algorithm == self.ARC_STANDARD:
self._create_training_examples_arc_std(depgraphs, input_file)
else:
self._create_training_examples_arc_eager(depgraphs, input_file)
input_file.close()
# Using the temporary file to train the libsvm classifier
x_train, y_train = load_svmlight_file(input_file.name)
# The parameter is set according to the paper:
# Algorithms for Deterministic Incremental Dependency Parsing by Joakim Nivre
# Todo : because of probability = True => very slow due to
# cross-validation. Need to improve the speed here
model = svm.SVC(
kernel='poly',
degree=2,
coef0=0,
gamma=0.2,
C=0.5,
verbose=True,
probability=True)
model.fit(x_train, y_train)
# Save the model to file name (as pickle)
pickle.dump(model, open(modelfile, 'wb'))
finally:
remove(input_file.name)
def parse(self, depgraphs, modelFile):
"""
:param depgraphs: the list of test sentence, each sentence is represented as a dependency graph where the 'head' information is dummy
:type depgraphs: list(DependencyGraph)
:param modelfile: the model file
:type modelfile: str
:return: list (DependencyGraph) with the 'head' and 'rel' information
"""
result = []
# First load the model
model = pickle.load(open(modelFile, 'rb'))
operation = Transition(self._algorithm)
for depgraph in depgraphs:
conf = Configuration(depgraph)
while len(conf.buffer) > 0:
features = conf.extract_features()
col = []
row = []
data = []
for feature in features:
if feature in self._dictionary:
col.append(self._dictionary[feature])
row.append(0)
data.append(1.0)
np_col = array(sorted(col)) # NB : index must be sorted
np_row = array(row)
np_data = array(data)
x_test = sparse.csr_matrix((np_data, (np_row, np_col)), shape=(1, len(self._dictionary)))
# It's best to use decision function as follow BUT it's not supported yet for sparse SVM
# Using decision funcion to build the votes array
#dec_func = model.decision_function(x_test)[0]
#votes = {}
#k = 0
# for i in range(len(model.classes_)):
# for j in range(i+1, len(model.classes_)):
# #if dec_func[k] > 0:
# votes.setdefault(i,0)
# votes[i] +=1
# else:
# votes.setdefault(j,0)
# votes[j] +=1
# k +=1
# Sort votes according to the values
#sorted_votes = sorted(votes.items(), key=itemgetter(1), reverse=True)
# We will use predict_proba instead of decision_function
prob_dict = {}
pred_prob = model.predict_proba(x_test)[0]
for i in range(len(pred_prob)):
prob_dict[i] = pred_prob[i]
sorted_Prob = sorted(
prob_dict.items(),
key=itemgetter(1),
reverse=True)
# Note that SHIFT is always a valid operation
for (y_pred_idx, confidence) in sorted_Prob:
#y_pred = model.predict(x_test)[0]
# From the prediction match to the operation
y_pred = model.classes_[y_pred_idx]
if y_pred in self._match_transition:
strTransition = self._match_transition[y_pred]
baseTransition = strTransition.split(":")[0]
if baseTransition == Transition.LEFT_ARC:
if operation.left_arc(conf, strTransition.split(":")[1]) != -1:
break
elif baseTransition == Transition.RIGHT_ARC:
if operation.right_arc(conf, strTransition.split(":")[1]) != -1:
break
elif baseTransition == Transition.REDUCE:
if operation.reduce(conf) != -1:
break
elif baseTransition == Transition.SHIFT:
if operation.shift(conf) != -1:
break
else:
raise ValueError("The predicted transition is not recognized, expected errors")
# Finish with operations build the dependency graph from Conf.arcs
new_depgraph = deepcopy(depgraph)
for key in new_depgraph.nodes:
node = new_depgraph.nodes[key]
node['rel'] = ''
# With the default, all the token depend on the Root
node['head'] = 0
for (head, rel, child) in conf.arcs:
c_node = new_depgraph.nodes[child]
c_node['head'] = head
c_node['rel'] = rel
result.append(new_depgraph)
return result
def demo():
"""
>>> from nltk.parse import DependencyGraph, DependencyEvaluator
>>> from nltk.parse.transitionparser import TransitionParser, Configuration, Transition
>>> gold_sent = DependencyGraph(\"""
... Economic JJ 2 ATT
... news NN 3 SBJ
... has VBD 0 ROOT
... little JJ 5 ATT
... effect NN 3 OBJ
... on IN 5 ATT
... financial JJ 8 ATT
... markets NNS 6 PC
... . . 3 PU
... \""")
>>> conf = Configuration(gold_sent)
###################### Check the Initial Feature ########################
>>> print(', '.join(conf.extract_features()))
STK_0_POS_TOP, BUF_0_FORM_Economic, BUF_0_LEMMA_Economic, BUF_0_POS_JJ, BUF_1_FORM_news, BUF_1_POS_NN, BUF_2_POS_VBD, BUF_3_POS_JJ
###################### Check The Transition #######################
Check the Initialized Configuration
>>> print(conf)
Stack : [0] Buffer : [1, 2, 3, 4, 5, 6, 7, 8, 9] Arcs : []
A. Do some transition checks for ARC-STANDARD
>>> operation = Transition('arc-standard')
>>> operation.shift(conf)
>>> operation.left_arc(conf, "ATT")
>>> operation.shift(conf)
>>> operation.left_arc(conf,"SBJ")
>>> operation.shift(conf)
>>> operation.shift(conf)
>>> operation.left_arc(conf, "ATT")
>>> operation.shift(conf)
>>> operation.shift(conf)
>>> operation.shift(conf)
>>> operation.left_arc(conf, "ATT")
Middle Configuration and Features Check
>>> print(conf)
Stack : [0, 3, 5, 6] Buffer : [8, 9] Arcs : [(2, 'ATT', 1), (3, 'SBJ', 2), (5, 'ATT', 4), (8, 'ATT', 7)]
>>> print(', '.join(conf.extract_features()))
STK_0_FORM_on, STK_0_LEMMA_on, STK_0_POS_IN, STK_1_POS_NN, BUF_0_FORM_markets, BUF_0_LEMMA_markets, BUF_0_POS_NNS, BUF_1_FORM_., BUF_1_POS_., BUF_0_LDEP_ATT
>>> operation.right_arc(conf, "PC")
>>> operation.right_arc(conf, "ATT")
>>> operation.right_arc(conf, "OBJ")
>>> operation.shift(conf)
>>> operation.right_arc(conf, "PU")
>>> operation.right_arc(conf, "ROOT")
>>> operation.shift(conf)
Terminated Configuration Check
>>> print(conf)
Stack : [0] Buffer : [] Arcs : [(2, 'ATT', 1), (3, 'SBJ', 2), (5, 'ATT', 4), (8, 'ATT', 7), (6, 'PC', 8), (5, 'ATT', 6), (3, 'OBJ', 5), (3, 'PU', 9), (0, 'ROOT', 3)]
B. Do some transition checks for ARC-EAGER
>>> conf = Configuration(gold_sent)
>>> operation = Transition('arc-eager')
>>> operation.shift(conf)
>>> operation.left_arc(conf,'ATT')
>>> operation.shift(conf)
>>> operation.left_arc(conf,'SBJ')
>>> operation.right_arc(conf,'ROOT')
>>> operation.shift(conf)
>>> operation.left_arc(conf,'ATT')
>>> operation.right_arc(conf,'OBJ')
>>> operation.right_arc(conf,'ATT')
>>> operation.shift(conf)
>>> operation.left_arc(conf,'ATT')
>>> operation.right_arc(conf,'PC')
>>> operation.reduce(conf)
>>> operation.reduce(conf)
>>> operation.reduce(conf)
>>> operation.right_arc(conf,'PU')
>>> print(conf)
Stack : [0, 3, 9] Buffer : [] Arcs : [(2, 'ATT', 1), (3, 'SBJ', 2), (0, 'ROOT', 3), (5, 'ATT', 4), (3, 'OBJ', 5), (5, 'ATT', 6), (8, 'ATT', 7), (6, 'PC', 8), (3, 'PU', 9)]
###################### Check The Training Function #######################
A. Check the ARC-STANDARD training
>>> import tempfile
>>> import os
>>> input_file = tempfile.NamedTemporaryFile(prefix='transition_parse.train', dir=tempfile.gettempdir(), delete=False)
>>> parser_std = TransitionParser('arc-standard')
>>> print(', '.join(parser_std._create_training_examples_arc_std([gold_sent], input_file)))
Number of training examples : 1
Number of valid (projective) examples : 1
SHIFT, LEFTARC:ATT, SHIFT, LEFTARC:SBJ, SHIFT, SHIFT, LEFTARC:ATT, SHIFT, SHIFT, SHIFT, LEFTARC:ATT, RIGHTARC:PC, RIGHTARC:ATT, RIGHTARC:OBJ, SHIFT, RIGHTARC:PU, RIGHTARC:ROOT, SHIFT
>>> parser_std.train([gold_sent],'temp.arcstd.model')
Number of training examples : 1
Number of valid (projective) examples : 1
...
>>> remove(input_file.name)
B. Check the ARC-EAGER training
>>> input_file = tempfile.NamedTemporaryFile(prefix='transition_parse.train', dir=tempfile.gettempdir(),delete=False)
>>> parser_eager = TransitionParser('arc-eager')
>>> print(', '.join(parser_eager._create_training_examples_arc_eager([gold_sent], input_file)))
Number of training examples : 1
Number of valid (projective) examples : 1
SHIFT, LEFTARC:ATT, SHIFT, LEFTARC:SBJ, RIGHTARC:ROOT, SHIFT, LEFTARC:ATT, RIGHTARC:OBJ, RIGHTARC:ATT, SHIFT, LEFTARC:ATT, RIGHTARC:PC, REDUCE, REDUCE, REDUCE, RIGHTARC:PU
>>> parser_eager.train([gold_sent],'temp.arceager.model')
Number of training examples : 1
Number of valid (projective) examples : 1
...
>>> remove(input_file.name)
###################### Check The Parsing Function ########################
A. Check the ARC-STANDARD parser
>>> result = parser_std.parse([gold_sent], 'temp.arcstd.model')
>>> de = DependencyEvaluator(result, [gold_sent])
>>> de.eval() >= (0, 0)
True
B. Check the ARC-EAGER parser
>>> result = parser_eager.parse([gold_sent], 'temp.arceager.model')
>>> de = DependencyEvaluator(result, [gold_sent])
>>> de.eval() >= (0, 0)
True
Note that result is very poor because of only one training example.
"""
| mit |
marius311/cosmoslik | cosmoslik/chains.py | 1 | 36828 | """
Python module for dealing with MCMC chains.
Contains utilities to:
* Load chains in a variety of formats
* Compute statistics (mean, std-dev, conf intervals, ...)
* Plot 1- and 2-d distributions of one or multiple parameters.
* Post-process chains
"""
import os, sys, re
from numpy import *
from numpy.linalg import inv
from itertools import takewhile, chain
from collections import defaultdict
import pickle
from functools import partial
from multiprocessing.pool import Pool
from numbers import Number
from functools import reduce
from subprocess import check_output, CalledProcessError, STDOUT
__all__ = ['Chain','Chains',
'like1d','like2d','likegrid','likegrid1d','likepoints',
'get_covariance', 'load_chain', 'load_cosmomc_chain']
class Chain(dict):
"""
An MCMC chain. This is just a Python dictionary mapping parameter names
to arrays of values, along with the special keys 'lnl' and 'weight'
"""
def __init__(self,*args,**kwargs):
super().__init__(*args,**kwargs)
for k,v in list(self.items()): self[k]=atleast_1d(v)
if self and 'weight' not in self: self['weight']=ones(len(list(self.values())[0]))
def copy(self):
"""Deep copy the chain so post-processing, etc... works right"""
return Chain({k:v.copy() for k,v in self.items()})
def params(self,non_numeric=False, fixed=False):
"""
Returns the parameters in this chain (i.e. the keys except 'lnl' and 'weight')
Args:
numeric: whether to include non-numeric parameters (default: False)
fixed: whether to include parameters which don't vary (default: False)
"""
return (set([k for k,v in list(self.items()) if
(not k.startswith('_')
and (non_numeric or (v.ndim==1 and issubclass(v.dtype.type,Number)))
and (fixed or not all(v==v[0]))
)])
-set(["lnl","weight"]))
def sample(self,s,keys=None):
"""Return a sample or a range of samples depending on if s is an integer or a slice object."""
return Chain((k,self[k][s]) for k in (keys if keys else list(self.keys())))
def iterrows(self):
"""Iterate over the samples in this chain."""
for i in range(self.length()):
yield {k:v[i] for k,v in list(self.items())}
def matrix(self,params=None):
"""Return this chain as an nsamp * nparams matrix."""
if params is None: params=self.params()
if is_iter(params) and not isinstance(params,str):
return vstack([self[p] for p in (params if params else self.params())]).T
else:
return self[params]
def cov(self,params=None):
"""Returns the covariance of the parameters (or some subset of them) in this chain."""
return get_covariance(self.matrix(params), self["weight"])
def corr(self,params=None):
"""Returns the correlation matrix of the parameters (or some subset of them) in this chain."""
return get_correlation(self.matrix(params), self["weight"])
def mean(self,params=None):
"""Returns the mean of the parameters (or some subset of them) in this chain."""
return average(self.matrix(params),axis=0,weights=self["weight"])
def std(self,params=None):
"""Returns the std of the parameters (or some subset of them) in this chain."""
return sqrt(average((self.matrix(params)-self.mean(params))**2,axis=0,weights=self["weight"]))
def skew(self,params=None):
"""Return skewness of one or more parameters. """
return average((self.matrix(params)-self.mean(params))**3,axis=0,weights=self["weight"])/self.std(params)**3
def confbound(self,param,level=0.95,bound=None):
"""
Compute an upper or lower confidence bound.
Args:
param (string): name of the parameter
level (float): confidence level
bound ('upper', 'lower', or None): whether to compute an upper or
lower confidence bound. If set to None, will guess which based
on the skewness of the distribution (will be lower bound for
positively skewed distributions)
"""
if bound==None:
bound = 'upper' if self.skew(param)>0 else 'lower'
if bound=='lower':
level = 1-level
H, x = histogram(self[param],weights=self['weight'],bins=1000)
xc = (x[1:]+x[:-1])/2
b = interp(level,cumsum(H)/float(H.sum()),xc)
return (self[param].min(),b) if bound=='upper' else (b,self[param].max())
def acceptance(self):
"""Returns the acceptance ratio."""
return 1./mean(self["weight"])
def length(self,unique=True):
"""Returns the number of unique samples. Set unique=False to get total samples."""
return (len if unique else sum)(self['weight'])
def burnin(self,n):
"""Remove the first n non-unique samples from the beginning of the chain."""
return self.sample(slice(sum(1 for _ in takewhile(lambda x: x<n, cumsum(self['weight']))),None))
def postprocd(self,func,nthreads=1,pool=None):
"""
Post-process some values into this chain.
Args:
func : a function which accepts all the keys in the chain
and returns a dictionary of new keys to add. `func` must accept *all*
keys in the chain, if there are ones you don't need, capture them
with **_ in its call signature, e.g. to add in a parameter 'b'
which is 'a' squared, use postprocd(lambda a,**_: {'b':a**2})
nthreads : the number of threads to use
pool : any worker pool which has a pool.map function.
default: multiprocessing.Pool(nthreads)
Returns:
A new chain with the new values post-processed in.
Does not alter the original chain. If for some rows in the
chain `func` did not return all the keys, these will be filled
in with `nan`.
Note:
This repeatedly calls `func` on rows in the chain, so its very inneficient
if you already have a vectorized version of your post-processing function.
`postprocd` is mostly useful for slow non-vectorized post-processing functions,
allowing convenient use of the `nthreads` option to this function.
For the default implementation of `pool`, `func` must be picklable,
meaning it must be a module-level function.
"""
if pool is not None: _pool = pool
elif nthreads!=1: _pool = Pool(nthreads)
else: _pool = None
mp=map if _pool is None else _pool.map
try:
dat = list(mp(partial(_postprocd_helper,func),self.iterrows()))
finally:
if pool is None and _pool is not None: _pool.terminate()
c=self.copy()
allkeys = set(chain(*[list(d.keys()) for d in dat]))
c.update({k:array([d.get(k,nan) for d in dat]) for k in allkeys})
return c
def reweighted(self,func,nthreads=1,pool=None):
"""
Reweight this chain.
Args:
func : a function which accepts all keys in the chain, and returns
a new weight for the step. `func` must accept *all*
keys, if there are ones you don't need, capture them
with **_ in its call signature, e.g. to add unit gaussian prior
on parameter 'a' use reweighted(lambda a,**_: exp(-a**2/2)
nthreads : the number of threads to use
pool : any worker pool which has a pool.map function.
default: multiprocessing.Pool(nthreads)
Returns:
A new chain, without altering the original chain
Note:
This repeatedly calls `func` on rows in the chain, so its very inneficient
compared to a vectorized version of your post-processing function. `postprocd` is mostly
useful for slow post-processing functions, allowing you to conveniently
use the `nthreads` option to this function.
For the default implementation of `pool`, `func` must be picklable,
meaning it must be a module-level function.
"""
return self.postprocd(partial(_reweighted_helper,func),nthreads=nthreads,pool=pool)
def add_gauss_prior(self, params, mean, covstd, nthreads=1, pool=None):
"""
Post-process a gaussian prior into the chain.
Args:
params - a parameter name, or a list of parameters
mean - the mean (should be a list if params was a list)
covstd - if params was a list, this should be a 2-d array holding the covariance
if params was a single parameter, this should be the standard devation
Returns:
A new chain, without altering the original chain
"""
c=self.copy()
dx = self.matrix(params) - mean
if is_iter(params) and not isinstance(params,str):
dlnl=sum(dot(dx,inv(covstd))*dx,axis=1)/2
else:
dlnl=dx**2/2/covstd**2
c['weight'] *= exp(-dlnl)
c['lnl'] += dlnl
return c
def best_fit(self):
"""Get the best fit sample."""
return {k:v[0] for k,v in list(self.sample(self['lnl'].argmin()).items())}
def thin(self,delta):
"""Take every delta non-unique samples."""
c=ceil(cumsum(append(0,self["weight"]))/float(delta))
ids=where(c[1:]>c[:-1])[0]
weight=diff(c[[0]+list(ids+1)])
t=self.sample(ids)
t['weight']=weight
return t
def thinto(self,num):
"""Thin so we end up with `num` total samples"""
return self.thin(self.length(unique=False)//num)
def savecov(self,file,params=None):
"""Write the covariance to a file where the first line is specifies the parameter names."""
if not params: params = self.params()
with open(file,'wb') as f:
f.write(("# "+" ".join(params)+"\n").encode())
savetxt(f,self.cov(params))
def savechain(self,file,params=None):
"""Write the chain to a file where the first line is specifies the parameter names."""
keys = ['lnl','weight']+list(params if params else self.params())
with open(file,'w') as f:
f.write("# "+" ".join(keys)+"\n")
savetxt(f,self.matrix(keys))
def plot(self,param=None,ncols=5,fig=None,size=4):
"""Plot the value of a parameter as a function of sample number."""
from matplotlib.pyplot import figure
if fig is None: fig=figure()
params = [param] if param is not None else self.params()
nrows = len(self.params())/ncols+1
fig.set_size_inches(ncols*size,nrows*size/1.6)
for i,param in enumerate(params,1):
ax=fig.add_subplot(nrows,ncols,i)
ax.plot(cumsum(self['weight']),self[param])
ax.set_title(param)
fig.tight_layout()
def like1d(self,p,**kwargs):
"""
Plots 1D likelihood contours for a parameter.
See :func:`~cosmoslik.chains.like1d`
"""
like1d(self[p],weights=self["weight"],**kwargs)
def like2d(self,p1,p2,**kwargs):
"""
Plots 2D likelihood contours for a pair of parameters.
See :func:`~cosmoslik.chains.like2d`
"""
like2d(self[p1], self[p2], weights=self["weight"], **kwargs)
def likegrid(self,**kwargs):
"""
Make a grid (aka "triangle plot") of 1- and 2-d likelihood contours.
See :func:`~cosmoslik.chains.likegrid`
"""
if 'color' in kwargs: kwargs['colors']=[kwargs.pop('color')]
likegrid(self,**kwargs)
def likepoints(self,*args,**kwargs):
"""
Plots samples from the chain as colored points.
See :func:`~cosmoslik.chains.likepoints`
"""
return likepoints(self,*args,**kwargs)
def likegrid1d(self,**kwargs):
"""
Make a grid of 1-d likelihood contours.
See :func:`~cosmoslik.chains.likegrid1d`
"""
likegrid1d(self,**kwargs)
def join(self):
"""Does nothing since already one chain."""
return self
def __repr__(self):
return self.__str__()
def __str__(self):
"""Print a summary of the chain. """
return chain_stats(self)
def chain_stats(chain):
lines = []
lines.append(object.__repr__(chain))
params = chain.params(fixed=True)
maxlen = max(12,max(len(p) for p in params))+4
if isinstance(chain,Chains):
lines.append(('{:>%i}: {:}'%maxlen).format('# of chains',len(chain)))
chain = chain.join()
lines.append(('{:>%i}: {:}'%maxlen).format('# of steps',chain.length()))
lines.append(('{:>%i}: {:.2f}'%maxlen).format('total weight',chain['weight'].sum()))
lines.append(('{:>%i}: {:.3g}'%maxlen).format('acceptance',chain.acceptance()))
for p in sorted(params):
lines.append(('{:>%i}: {:>10.4g} ± {:.4g}'%maxlen).format(p,chain.mean(p),chain.std(p)))
return '\n'.join(lines)
class Chains(list):
"""A list of chains, e.g. from a run of several parallel chains"""
def burnin(self,n):
"""Remove the first n samples from each chain."""
return Chains(c.burnin(n) for c in self)
def join(self):
"""Combine the chains into one."""
return Chain((k,concatenate([c[k] for c in self])) for k in list(self[0].keys()))
def plot(self,param=None,fig=None,**kwargs):
"""Plot the value of a parameter as a function of sample number for each chain."""
from matplotlib.pyplot import figure
if fig is None: fig=figure()
for c in self:
if c: c.plot(param,fig=fig,**kwargs)
def params(self,**kwargs):
return self[0].params(**kwargs)
def __repr__(self):
return self.__str__()
def __str__(self):
"""Print a summary of the chain. """
return chain_stats(self)
def _postprocd_helper(func,kwargs):
return func(**kwargs)
def _reweighted_helper(func,weight,**kwargs):
return {'weight': weight * func(weight=weight,**kwargs)}
def likepoints(chain,p1,p2,pcolor,
npoints=1000,cmap=None,nsig=3,clim=None,marker='.',markersize=10,
ax=None,zorder=-1,cbar=True,cax=None,**kwargs):
"""
Plot p1 vs. p2 as points colored by the value of pcolor.
Args:
p1,p2,pcolor : parameter names
npoints : first thins the chain so this number of points are plotted
cmap : a colormap (default: jet)
nsig : map the range of the color map to +/- nsig
ax : axes to use for plotting (default: current axes)
cbar : whether to draw a colorbar
cax : axes to use for colorbar (default: steal from ax)
marker, markersize, zorder, **kwargs : passed to the plot() command
"""
from matplotlib.pyplot import get_cmap, cm, gca, sca, colorbar
from matplotlib import colors, colorbar
if cmap is None: cmap=get_cmap('jet')
if ax is None: ax=gca()
mu,sig = chain.mean(pcolor), chain.std(pcolor)
for s in chain.thin(int(sum(chain['weight'])/float(npoints))).iterrows():
if clim is None: c=cmap((s[pcolor]-mu)/(2*nsig*sig) + 0.5)
else: c = cmap((s[pcolor]-clim[0])/(clim[1]-clim[0]))
ax.plot(s[p1],s[p2],color=c,markeredgecolor=c,marker=marker,markersize=markersize,zorder=-1,**kwargs)
if cax is None: cax = colorbar.make_axes(ax)[0]
if clim is None: cb = colorbar.ColorbarBase(ax=cax, norm=colors.Normalize(vmin=mu-nsig*sig, vmax=mu+nsig*sig), cmap=cmap)
else: cb = colorbar.ColorbarBase(ax=cax, norm=colors.Normalize(vmin=clim[0], vmax=clim[1]), cmap=cmap)
sca(ax)
return ax,cax
def like2d(datx,daty,weights=None,
nbins=15, which=[.68,.95], range=None,
filled=True, color=None, cmap=None, smooth=None,
ax=None,
**kwargs):
from matplotlib.pyplot import gca, get_cmap
from matplotlib.mlab import movavg
from matplotlib.colors import LinearSegmentedColormap
from scipy.ndimage import gaussian_filter
if ax is None: ax = gca()
if weights is None: weights=ones(len(datx))
if color is None: color = kwargs.pop('c') if 'c' in kwargs else 'b'
H,xe,ye = histogram2d(datx,daty,nbins,weights=weights, range=range)
xem, yem = movavg(xe,2), movavg(ye,2)
kwargs = dict(levels=confint2d(H, sorted(which)[::-1]+[0]),**kwargs)
if smooth: H = gaussian_filter(H,smooth)
args = (xem,yem,transpose(H))
if cmap is None:
cmap = {'b':'Blues',
'g':'Greens',
'r':'Reds',
'orange':'Oranges',
'grey':'Greys'}.get(color)
if cmap is None: cmap = LinearSegmentedColormap.from_list(None,['w',color])
else: cmap = get_cmap(cmap)
if filled: ax.contourf(*args,cmap=cmap,**kwargs)
ax.contour(*args,colors=color,**kwargs)
def like1d(dat,weights=None,
nbins=30,ranges=None,maxed=False,
ax=None,smooth=False,
kde=True,
zero_endpoints=False,
filled=False,
**kw):
from matplotlib.pyplot import gca
if ax is None: ax = gca()
if kde:
try:
from getdist import MCSamples
except ImportError as e:
raise Exception("Plotting with kde, kde1d, or kde2d set to True requires package 'getdist'. Install this package or set to False.") from e
if ranges:
i = bitwise_and(dat>(ranges[0] or -Inf), dat<(ranges[1] or Inf))
dat = dat[i]
weights = weights[i]
d = MCSamples(samples=dat, weights=weights, names=['x'], ranges={'x':ranges or (None,None)}, settings={'smooth_scale_1D':(smooth or -1)}).get1DDensity(0)
d.normalize('max' if maxed else 'integral')
xem, H = d.x, d.P * (maxed or 1)
else:
from matplotlib.mlab import movavg
H, xe = histogram(dat,bins=nbins,weights=weights,normed=True,range=ranges)
xem=movavg(xe,2)
if smooth:
from scipy.interpolate import PchipInterpolator
itp = PchipInterpolator(xem,H)
xem = linspace(xem.min(),xem.max(),100)
H = itp(xem)
if maxed: H = H/max(H) * (maxed or 1)
if zero_endpoints:
xem = hstack([[xem[0]],xem,[xem[-1]]])
H = hstack([[0],H,[0]])
if filled:
ax.fill_between(xem,H,alpha=(0.5 if filled is True else filled),**kw)
kw.pop('label')
ax.plot(xem,H,**kw)
def get_correlation(data,weights=None):
cv = get_covariance(data,weights)
n,n = cv.shape
for i in range(n):
std=sqrt(cv[i,i])
cv[i,:]/=std
cv[:,i]/=std
return cv
def get_covariance(data,weights=None):
if (weights is None): return cov(data.T)
else:
mean = sum(data.T*weights,axis=1)/sum(weights)
zdata = data-mean
return dot(zdata.T*weights,zdata)/(sum(weights)-1)
def likegrid(chains, params=None,
lims=None, ticks=None, nticks=4,
nsig=4,
spacing=0.05,
xtick_rotation=30,
colors=None, filled=True,
nbins1d=30, smooth1d=False, kde1d=True,
nbins2d=20,
labels=None,
fig=None,
size=2,
legend_loc=None,
param_name_mapping=None,
param_label_size=None):
"""
Make a grid (aka "triangle plot") of 1- and 2-d likelihood contours.
Parameters
----------
chains :
one or a list of `Chain` objects
default_chain, optional :
the chain used to get default parameters names, axes limits, and ticks
either an index into chains or a `Chain` object (default: chains[0])
params, optional :
list of parameter names which to show
(default: all parameters from default_chain)
lims, optional :
a dictionary mapping parameter names to (min,max) axes limits
(default: +/- 4 sigma from default_chain)
ticks, optional :
a dictionary mapping parameter names to list of [ticks]
(default: automatically picks `nticks`)
nticks, optional :
roughly how many ticks per axes (default: 5)
xtick_rotation, optional :
numbers of degrees to rotate the xticks by (default: 30)
spacing, optional :
space in between plots as a fraction of figure width (default: 0.05)
fig, optional :
figure of figure number in which to plot (default: figure(0))
size, optional :
size in inches of one plot (default: 2)
colors, optional :
colors to cycle through for plotting
filled, optional :
whether to fill in the contours (default: True)
labels, optional :
list of names for a legend
legend_loc, optional :
(x,y) location of the legend (coordinates scaled to [0,1])
nbins1d, optional :
number (or len(chains) length list) of bins for 1d plots (default: 30)
nbins2d, optional :
number (or len(chains) length list) of bins for 2d plots (default: 20)
"""
from matplotlib.pyplot import figure, Line2D, xticks
from matplotlib.ticker import MaxNLocator
fig = figure(0) if fig is None else (figure(fig) if isinstance(fig,int) else fig)
if type(chains)!=list: chains=[chains]
if params==None: params = sorted(reduce(lambda x,y: set(x)&set(y), [c.params() for c in chains]))
if param_name_mapping is None: param_name_mapping = {}
if size is not None: fig.set_size_inches(*([size*len(params)]*2))
if colors is None: colors=['b','orange','k','m','cyan']
if not isinstance(nbins2d,list): nbins2d = [nbins2d]*len(chains)
if not isinstance(nbins1d,list): nbins1d = [nbins1d]*len(chains)
fig.subplots_adjust(hspace=spacing,wspace=spacing)
if lims is None: lims = {}
lims = {p:(lims[p] if p in lims
else (min(max(min(c[p]),c.mean(p)-nsig*c.std(p)) for c in chains if p in c.params()),
max(min(max(c[p]),c.mean(p)+nsig*c.std(p)) for c in chains if p in c.params())))
for p in params}
if ticks is None: ticks = {}
if isinstance(nticks,int): nticks={p:nticks for p in params}
n=len(params)
for (i,p1) in enumerate(params):
for (j,p2) in enumerate(params):
if (i<=j):
ax=fig.add_subplot(n,n,j*n+i+1)
ax.xaxis.set_major_locator(MaxNLocator(nticks.get(p1,5)))
ax.yaxis.set_major_locator(MaxNLocator(nticks.get(p2,5)))
ax.set_xlim(*lims[p1])
if (i==j):
for (ch,col,nbins) in zip(chains,colors,nbins1d):
if p1 in ch: ch.like1d(p1,nbins=nbins,color=col,ax=ax,smooth=smooth1d,kde=kde1d)
ax.set_yticklabels([])
elif (i<j):
for (ch,col,nbins) in zip(chains,colors,nbins2d):
if p1 in ch and p2 in ch: ch.like2d(p1,p2,filled=filled,nbins=nbins,color=col,ax=ax)
if p2 in ticks: ax.set_yticks(ticks[p2])
ax.set_ylim(*lims[p2])
if i==0:
ax.set_ylabel(param_name_mapping.get(p2,p2),size=param_label_size)
else:
ax.set_yticklabels([])
if j==n-1:
ax.set_xlabel(param_name_mapping.get(p1,p1),size=param_label_size)
xticks(rotation=xtick_rotation)
else:
ax.set_xticklabels([])
if labels is not None:
fig.legend([Line2D([0],[0],c=c,lw=2) for c in colors],labels,
fancybox=True,shadow=False,
loc='upper right', bbox_to_anchor=(legend_loc or (0.8,0.8)))
from collections import Iterable
import operator as op
def likegrid1d(chains,
params='all',
lims=None,
ticks=None,
nticks=4,
nsig=3,
colors=None,
nbins1d=30,
smooth1d=False,
kde1d=True,
labels=None,
fig=None,
size=2,
aspect=1,
legend_loc=None,
linewidth=1,
param_name_mapping=None,
param_label_size=None,
tick_label_size=None,
titley=1,
ncol=4,
axes=None):
"""
Make a grid of 1-d likelihood contours.
Arguments:
----------
chains :
one or a list of `Chain` objects
default_chain, optional :
the chain used to get default parameters names, axes limits, and ticks
either an index into chains or a `Chain` object (default: chains[0])
params, optional :
list of parameter names which to show
can also be 'all' or 'common' which does the union/intersection of
the params in all the chains
lims, optional :
a dictionary mapping parameter names to (min,max) axes limits
(default: +/- 4 sigma from default_chain)
ticks, optional :
a dictionary giving a list of ticks for each parameter
nticks, optional :
roughly how many x ticks to show. can be dictionary to
specify each parameter separately. (default: 4)
fig, optional :
figure of figure number in which to plot (default: new figure)
ncol, optional :
the number of colunms (default: 4)
axes, optional :
an array of axes into which to plot. if this is provided, fig and ncol
are ignored. must have len(axes) >= len(params).
size, optional :
size in inches of one plot (default: 2)
aspect, optional :
aspect ratio (default: 1)
colors, optional :
colors to cycle through for plotting
filled, optional :
whether to fill in the contours (default: True)
labels, optional :
list of names for a legend
legend_loc, optional :
(x,y) location of the legend (coordinates scaled to [0,1])
nbins1d, optional :
number of bins for 1d plots (default: 30)
nbins2d, optional :
number of bins for 2d plots (default: 20)
"""
from matplotlib.pyplot import gcf, Line2D
from matplotlib.ticker import AutoMinorLocator, ScalarFormatter, MaxNLocator
if type(chains)!=list: chains=[chains]
if params in ['all','common']:
params = sorted(reduce(lambda x,y: (op.__or__ if params=='all' else op.__and__)(set(x),set(y)), [c.params() for c in chains]))
elif not isinstance(params,Iterable):
raise ValueError("params should be iterable or 'all' or 'common'")
if param_name_mapping is None: param_name_mapping = {}
nrow = int(floor(len(params)/ncol))+1
if fig is None:
fig = gcf()
fig.subplots_adjust(hspace=0.4,wspace=0.1)#,bottom=0, top=1, left=0, right=1)
if size is not None:
fig.set_size_inches(size*ncol,size*nrow/aspect)
if colors is None:
colors = ['b','orange','k','m','cyan']
if lims is None: lims = {}
lims = {p:(lims[p] if p in lims
else (min(max(min(c[p]),c.mean(p)-nsig*c.std(p)) for c in chains if p in c.params()),
max(min(max(c[p]),c.mean(p)+nsig*c.std(p)) for c in chains if p in c.params())))
for p in params}
for (i,p1) in enumerate(params,1):
ax=fig.add_subplot(nrow,ncol,i)
if ticks is not None and p1 in ticks:
ax.set_xticks(ticks[p1])
for (ch,col) in zip(chains,colors):
if p1 in ch: ch.like1d(p1,nbins=nbins1d,color=col,ax=ax,linewidth=linewidth,smooth=smooth1d,kde=kde1d)
ax.set_yticks([])
ax.set_xlim(lims[p1])
ax.set_ylim(0,1)
ax.set_title(param_name_mapping.get(p1,p1),size=param_label_size,y=titley)
ax.tick_params(labelsize=tick_label_size)
if ticks and p1 in ticks:
ax.set_xticks(ticks[p1])
else:
ax.xaxis.set_major_locator(MaxNLocator(nbins=nticks.get(p1,4) if isinstance(nticks,dict) else nticks))
ax.xaxis.set_minor_locator(AutoMinorLocator())
ax.xaxis.set_major_formatter(ScalarFormatter(useOffset=False))
if labels is not None:
lg = fig.legend([Line2D([0],[0],c=c,linewidth=2) for c in colors],labels,fancybox=True,shadow=False,
loc='upper center', bbox_to_anchor=(legend_loc or (0.5,1.1)))
return lg
def confint2d(hist,which):
"""
Return confidence levels in a histogram.
Parameters
----------
hist:
A 2D histogram
which:
A list of confidence levels, e.g. [.68, .95]
"""
H=sort(hist.ravel())[::-1]
sumH=sum(H)
cdf=array([sum(H[H>x])/sumH for x in H])
return interp(which,cdf,H)
def load_cosmomc_chain(path,paramnames=None):
"""
Load a chain from a file or files in a variety of different formats.
If ``path`` is a CosmoSlik ini, the read the ``output_file`` key from the
ini and load that chain.
If ``path`` is a file, return a :class:`~cosmoslik.chains.Chain` object. The names of the parameters
are expected either as a whitespace-separated comment on the first line of
the file (CosmoSlik style) or in a separate file
called <path>.paramnames (CosmoMC style).
If ``path`` is a directory, assumes it contains one file for each parameter (WMAP style)
and gets the parameter name from the file name.
If ``path`` is a prefix such that there exists <path>_1, <path>_2, etc...
then returns a :class:`~cosmoslik.chains.Chains` object which is a list of chains.
"""
if path.endswith('.ini'):
p = load_ini(path)
return load_chain(p['output_file'])
else:
def load_one_chain(path):
nonlocal paramnames
if os.path.isdir(path):
if paramnames is not None: raise Exception("Can't specify custom parameter names if loading chain from a directory.")
chain = {}
for k in os.listdir(path):
try: chain[k]=loadtxt(os.path.join(path,k),usecols=[-1])
except: pass
return Chain(chain)
else:
# try automatically finding the corresponding *.paramnames file
if paramnames is None:
pnfiles = [os.path.join(os.path.dirname(path),f) for f in os.listdir(os.path.dirname(path)) if f.endswith('.paramnames') and os.path.basename(path).startswith(f[:-len('.paramnames')])]
if len(pnfiles)>1:
raise Exception('Found multiple paramnames files for this chain; %s'%pnfiles)
elif len(pnfiles)==1:
paramnames = pnfiles[0]
# if we have a *.paramnames file at this point, load it
if isinstance(paramnames,str):
with open(paramnames) as f:
paramnames = ['weight','lnl']+[line.split()[0] for line in f]
with open(path) as f:
# if still no *.paramnames, look for names inside a comment on the first line
if paramnames is None:
line = f.readline()
if not line.startswith("#"):
raise Exception("Couldn't find any paramnames. Specify paramnames=... by hand.")
paramnames = re.sub("#","",line).split()
try:
data = loadtxt(f).T
except:
return None
else:
return Chain(list(zip(paramnames,data)))
path = os.path.abspath(path)
dir = os.path.dirname(path)
files = [os.path.join(dir,f) for f in os.listdir('.' if dir=='' else dir) if re.match(os.path.basename(path)+'_[0-9]+',f) or f==os.path.basename(path)]
if len(files)==1: return load_one_chain(files[0])
elif len(files)>1: return Chains([c for c in (load_one_chain(f) for f in files) if c])
else: raise IOError("File not found: "+path)
def is_iter(x):
try:
iter(x)
return True
except:
return False
def combine_covs(*covs):
"""
Combines a bunch of covariances into a single array. If a parameter appears
in multiple covariances, the covariance appearing last in the list is used.
Each cov can be:
* tuple of ([names...], 2-d array)
* filename (file's first line is "#paramname1 paramname2 ..." and next lines are array)
* `Chain` object (will call its cov() function)
* {k:std(v)...}
Returns:
Tuple of ([names...], 2-d array)
"""
def to_array(cov):
if isinstance(cov,tuple):
return cov
elif isinstance(cov,str):
with open(cov) as f:
return re.sub("#","",f.readline()).split(), loadtxt(f)
elif isinstance(cov,Chain):
return cov.params(), cov.cov()
elif isinstance(cov,dict):
return [k for k in cov], diag([v**2 for v in list(cov.values())])
else:
raise ValueError("Unrecognized covariance data type.")
covs = [to_array(cov) for cov in covs]
allnames = list(chain(*[n for n,_ in covs]))
allcov = zeros((len(allnames),len(allnames)))
for (names,cov) in covs:
idxs = [allnames.index(n) for n in names]
for i in idxs: allcov[i,:] = allcov[:,i] = 0
allcov[ix_(idxs,idxs)] = cov
return allnames, allcov
def load_chain(filename, repack=False):
"""
Load a chain produced by a compatible CosmoSlik sampler like
metropolis_hastings or emcee.
Parameters:
-----------
repack :
If the chain file is not currently open (i.e. the chain is not currently
running), and if the chain is stored in chunks as output from an MPI run,
then overwrite the file with a more efficiently stored version which
will be faster to load the next time.
"""
with open(filename, 'rb') as f:
c = pickle.load(f)
if isinstance(c,(Chain,Chains)):
return c
else:
names = [n.decode() if isinstance(n,bytes) else n for n in c]
dat = []
while True:
try:
dat.append(pickle.load(f,encoding="latin1"))
except:
break
ii=set(i for i,_ in dat)
if dat[0][1].dtype.kind=='V':
c = Chains([Chain({n:concatenate([d['f%i'%k] for j,d in dat if i==j]) for k,n in enumerate(names)}) for i in ii])
else:
c = Chains([Chain(dict(list(zip(names,vstack([d for j,d in dat if i==j]).T)))) for i in ii])
if repack:
try:
open_files = check_output(["lsof","--",filename], stderr=STDOUT).splitlines()[1:]
except CalledProcessError as e:
if e.returncode == 1 and e.output==b'':
open_files = []
else:
return c
if not any([l.split()[3].decode()[-1] in "uw" for l in open_files]):
with open(filename,'wb') as f:
pickle.dump(c,f)
print("Repacked: "+filename)
return c
| gpl-3.0 |
btabibian/scikit-learn | examples/calibration/plot_calibration.py | 66 | 4795 | """
======================================
Probability calibration of classifiers
======================================
When performing classification you often want to predict not only
the class label, but also the associated probability. This probability
gives you some kind of confidence on the prediction. However, not all
classifiers provide well-calibrated probabilities, some being over-confident
while others being under-confident. Thus, a separate calibration of predicted
probabilities is often desirable as a postprocessing. This example illustrates
two different methods for this calibration and evaluates the quality of the
returned probabilities using Brier's score
(see https://en.wikipedia.org/wiki/Brier_score).
Compared are the estimated probability using a Gaussian naive Bayes classifier
without calibration, with a sigmoid calibration, and with a non-parametric
isotonic calibration. One can observe that only the non-parametric model is able
to provide a probability calibration that returns probabilities close to the
expected 0.5 for most of the samples belonging to the middle cluster with
heterogeneous labels. This results in a significantly improved Brier score.
"""
print(__doc__)
# Author: Mathieu Blondel <[email protected]>
# Alexandre Gramfort <[email protected]>
# Balazs Kegl <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# License: BSD Style.
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from sklearn.datasets import make_blobs
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import brier_score_loss
from sklearn.calibration import CalibratedClassifierCV
from sklearn.model_selection import train_test_split
n_samples = 50000
n_bins = 3 # use 3 bins for calibration_curve as we have 3 clusters here
# Generate 3 blobs with 2 classes where the second blob contains
# half positive samples and half negative samples. Probability in this
# blob is therefore 0.5.
centers = [(-5, -5), (0, 0), (5, 5)]
X, y = make_blobs(n_samples=n_samples, n_features=2, cluster_std=1.0,
centers=centers, shuffle=False, random_state=42)
y[:n_samples // 2] = 0
y[n_samples // 2:] = 1
sample_weight = np.random.RandomState(42).rand(y.shape[0])
# split train, test for calibration
X_train, X_test, y_train, y_test, sw_train, sw_test = \
train_test_split(X, y, sample_weight, test_size=0.9, random_state=42)
# Gaussian Naive-Bayes with no calibration
clf = GaussianNB()
clf.fit(X_train, y_train) # GaussianNB itself does not support sample-weights
prob_pos_clf = clf.predict_proba(X_test)[:, 1]
# Gaussian Naive-Bayes with isotonic calibration
clf_isotonic = CalibratedClassifierCV(clf, cv=2, method='isotonic')
clf_isotonic.fit(X_train, y_train, sw_train)
prob_pos_isotonic = clf_isotonic.predict_proba(X_test)[:, 1]
# Gaussian Naive-Bayes with sigmoid calibration
clf_sigmoid = CalibratedClassifierCV(clf, cv=2, method='sigmoid')
clf_sigmoid.fit(X_train, y_train, sw_train)
prob_pos_sigmoid = clf_sigmoid.predict_proba(X_test)[:, 1]
print("Brier scores: (the smaller the better)")
clf_score = brier_score_loss(y_test, prob_pos_clf, sw_test)
print("No calibration: %1.3f" % clf_score)
clf_isotonic_score = brier_score_loss(y_test, prob_pos_isotonic, sw_test)
print("With isotonic calibration: %1.3f" % clf_isotonic_score)
clf_sigmoid_score = brier_score_loss(y_test, prob_pos_sigmoid, sw_test)
print("With sigmoid calibration: %1.3f" % clf_sigmoid_score)
###############################################################################
# Plot the data and the predicted probabilities
plt.figure()
y_unique = np.unique(y)
colors = cm.rainbow(np.linspace(0.0, 1.0, y_unique.size))
for this_y, color in zip(y_unique, colors):
this_X = X_train[y_train == this_y]
this_sw = sw_train[y_train == this_y]
plt.scatter(this_X[:, 0], this_X[:, 1], s=this_sw * 50, c=color, alpha=0.5,
label="Class %s" % this_y)
plt.legend(loc="best")
plt.title("Data")
plt.figure()
order = np.lexsort((prob_pos_clf, ))
plt.plot(prob_pos_clf[order], 'r', label='No calibration (%1.3f)' % clf_score)
plt.plot(prob_pos_isotonic[order], 'g', linewidth=3,
label='Isotonic calibration (%1.3f)' % clf_isotonic_score)
plt.plot(prob_pos_sigmoid[order], 'b', linewidth=3,
label='Sigmoid calibration (%1.3f)' % clf_sigmoid_score)
plt.plot(np.linspace(0, y_test.size, 51)[1::2],
y_test[order].reshape(25, -1).mean(1),
'k', linewidth=3, label=r'Empirical')
plt.ylim([-0.05, 1.05])
plt.xlabel("Instances sorted according to predicted probability "
"(uncalibrated GNB)")
plt.ylabel("P(y=1)")
plt.legend(loc="upper left")
plt.title("Gaussian naive Bayes probabilities")
plt.show()
| bsd-3-clause |
depet/scikit-learn | sklearn/neighbors/classification.py | 2 | 14043 | """Nearest Neighbor Classification"""
# Authors: Jake Vanderplas <[email protected]>
# Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Sparseness support by Lars Buitinck <[email protected]>
# Multi-output support by Arnaud Joly <[email protected]>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import warnings
import numpy as np
from scipy import stats
from ..utils.extmath import weighted_mode
from .base import \
_check_weights, _get_weights, \
NeighborsBase, KNeighborsMixin,\
RadiusNeighborsMixin, SupervisedIntegerMixin
from ..base import ClassifierMixin
from ..utils import atleast2d_or_csr
class KNeighborsClassifier(NeighborsBase, KNeighborsMixin,
SupervisedIntegerMixin, ClassifierMixin):
"""Classifier implementing the k-nearest neighbors vote.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`k_neighbors` queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDTree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default='minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
**kwargs :
additional keyword arguments are passed to the distance function as
additional arguments.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import KNeighborsClassifier
>>> neigh = KNeighborsClassifier(n_neighbors=3)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
KNeighborsClassifier(...)
>>> print(neigh.predict([[1.1]]))
[0]
>>> print(neigh.predict_proba([[0.9]]))
[[ 0.66666667 0.33333333]]
See also
--------
RadiusNeighborsClassifier
KNeighborsRegressor
RadiusNeighborsRegressor
NearestNeighbors
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
.. warning::
Regarding the Nearest Neighbors algorithms, if it is found that two
neighbors, neighbor `k+1` and `k`, have identical distances but
but different labels, the results will depend on the ordering of the
training data.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5,
weights='uniform', algorithm='auto', leaf_size=30,
p=2, metric='minkowski', **kwargs):
if kwargs:
if 'warn_on_equidistant' in kwargs:
kwargs.pop('warn_on_equidistant')
warnings.warn("The warn_on_equidistant parameter is "
"deprecated and will be removed in 0.16.",
DeprecationWarning,
stacklevel=2)
self._init_params(n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p, **kwargs)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the class labels for the provided data
Parameters
----------
X : array of shape [n_samples, n_features]
A 2-D array representing the test points.
Returns
-------
y : array of shape [n_samples] or [n_samples, n_outputs]
Class labels for each data sample.
"""
X = atleast2d_or_csr(X)
neigh_dist, neigh_ind = self.kneighbors(X)
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_outputs = len(classes_)
n_samples = X.shape[0]
weights = _get_weights(neigh_dist, self.weights)
y_pred = np.empty((n_samples, n_outputs), dtype=classes_[0].dtype)
for k, classes_k in enumerate(classes_):
if weights is None:
mode, _ = stats.mode(_y[neigh_ind, k], axis=1)
else:
mode, _ = weighted_mode(_y[neigh_ind, k], weights, axis=1)
y_pred[:, k] = classes_k.take(mode.flatten().astype(np.int))
if not self.outputs_2d_:
y_pred = y_pred.ravel()
return y_pred
def predict_proba(self, X):
"""Return probability estimates for the test data X.
Parameters
----------
X : array, shape = (n_samples, n_features)
A 2-D array representing the test points.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
of such arrays if n_outputs > 1.
The class probabilities of the input samples. Classes are ordered
by lexicographic order.
"""
X = atleast2d_or_csr(X)
neigh_dist, neigh_ind = self.kneighbors(X)
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_samples = X.shape[0]
weights = _get_weights(neigh_dist, self.weights)
if weights is None:
weights = np.ones_like(neigh_ind)
all_rows = np.arange(X.shape[0])
probabilities = []
for k, classes_k in enumerate(classes_):
pred_labels = _y[:, k][neigh_ind]
proba_k = np.zeros((n_samples, classes_k.size))
# a simple ':' index doesn't work right
for i, idx in enumerate(pred_labels.T): # loop is O(n_neighbors)
proba_k[all_rows, idx] += weights[:, i]
# normalize 'votes' into real [0,1] probabilities
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
probabilities.append(proba_k)
if not self.outputs_2d_:
probabilities = probabilities[0]
return probabilities
class RadiusNeighborsClassifier(NeighborsBase, RadiusNeighborsMixin,
SupervisedIntegerMixin, ClassifierMixin):
"""Classifier implementing a vote among neighbors within a given radius
Parameters
----------
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth`radius_neighbors`
queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default='minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
outlier_label : int, optional (default = None)
Label, which is given for outlier samples (samples with no
neighbors on given radius).
If set to None, ValueError is raised, when outlier is detected.
**kwargs :
additional keyword arguments are passed to the distance function as
additional arguments.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import RadiusNeighborsClassifier
>>> neigh = RadiusNeighborsClassifier(radius=1.0)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
RadiusNeighborsClassifier(...)
>>> print(neigh.predict([[1.5]]))
[0]
See also
--------
KNeighborsClassifier
RadiusNeighborsRegressor
KNeighborsRegressor
NearestNeighbors
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, radius=1.0, weights='uniform',
algorithm='auto', leaf_size=30, p=2, metric='minkowski',
outlier_label=None, **kwargs):
self._init_params(radius=radius,
algorithm=algorithm,
leaf_size=leaf_size,
metric=metric, p=p, **kwargs)
self.weights = _check_weights(weights)
self.outlier_label = outlier_label
def predict(self, X):
"""Predict the class labels for the provided data
Parameters
----------
X : array of shape [n_samples, n_features]
A 2-D array representing the test points.
Returns
-------
y : array of shape [n_samples] or [n_samples, n_outputs]
Class labels for each data sample.
"""
X = atleast2d_or_csr(X)
n_samples = X.shape[0]
neigh_dist, neigh_ind = self.radius_neighbors(X)
inliers = [i for i, nind in enumerate(neigh_ind) if len(nind) != 0]
outliers = [i for i, nind in enumerate(neigh_ind) if len(nind) == 0]
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_outputs = len(classes_)
if self.outlier_label is not None:
neigh_dist[outliers] = 1e-6
elif outliers:
raise ValueError('No neighbors found for test samples %r, '
'you can try using larger radius, '
'give a label for outliers, '
'or consider removing them from your dataset.'
% outliers)
weights = _get_weights(neigh_dist, self.weights)
y_pred = np.empty((n_samples, n_outputs), dtype=classes_[0].dtype)
for k, classes_k in enumerate(classes_):
pred_labels = np.array([_y[ind, k] for ind in neigh_ind],
dtype=object)
if weights is None:
mode = np.array([stats.mode(pl)[0]
for pl in pred_labels[inliers]], dtype=np.int)
else:
mode = np.array([weighted_mode(pl, w)[0]
for (pl, w)
in zip(pred_labels[inliers], weights)],
dtype=np.int)
mode = mode.ravel().astype(np.int)
y_pred[inliers, k] = classes_k.take(mode)
if outliers:
y_pred[outliers, :] = self.outlier_label
if not self.outputs_2d_:
y_pred = y_pred.ravel()
return y_pred
| bsd-3-clause |
hmenke/espresso | samples/visualization_ljliquid.py | 1 | 6372 | #
# Copyright (C) 2013-2018 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
""" Visualization sample for a Lennard Jones liquid with live plotting via matplotlib.
"""
from __future__ import print_function
import numpy as np
from matplotlib import pyplot
from threading import Thread
import espressomd
from espressomd import thermostat
from espressomd import integrate
from espressomd import visualization
required_features = ["LENNARD_JONES"]
espressomd.assert_features(required_features)
print("""
=======================================================
= lj_liquid.py =
=======================================================
Program Information:""")
print(espressomd.features())
dev = "cpu"
# System parameters
#############################################################
# 10 000 Particles
box_l = 10.7437
density = 0.7
# Interaction parameters (repulsive Lennard Jones)
#############################################################
lj_eps = 1.0
lj_sig = 1.0
lj_cut = 1.12246
lj_cap = 20
# Integration parameters
#############################################################
system = espressomd.System(box_l=[box_l] * 3)
system.set_random_state_PRNG()
#system.seed = system.cell_system.get_state()['n_nodes'] * [1234]
np.random.seed(seed=system.seed)
system.time_step = 0.001
system.cell_system.skin = 0.4
#es._espressoHandle.Tcl_Eval('thermostat langevin 1.0 1.0')
system.thermostat.set_langevin(kT=1.0, gamma=1.0)
# warmup integration (with capped LJ potential)
warm_steps = 100
warm_n_times = 30
# do the warmup until the particles have at least the distance min__dist
min_dist = 0.9
# integration
int_steps = 10
int_n_times = 50000
#############################################################
# Setup System #
#############################################################
# Interaction setup
#############################################################
system.non_bonded_inter[0, 0].lennard_jones.set_params(
epsilon=lj_eps, sigma=lj_sig,
cutoff=lj_cut, shift="auto")
system.force_cap = lj_cap
print("LJ-parameters:")
print(system.non_bonded_inter[0, 0].lennard_jones.get_params())
# Particle setup
#############################################################
volume = box_l * box_l * box_l
n_part = int(volume * density)
for i in range(n_part):
system.part.add(id=i, pos=np.random.random(3) * system.box_l)
system.analysis.dist_to(0)
print("Simulate {} particles in a cubic simulation box {} at density {}."
.format(n_part, box_l, density).strip())
print("Interactions:\n")
act_min_dist = system.analysis.min_dist()
print("Start with minimal distance {}".format(act_min_dist))
system.cell_system.max_num_cells = 2744
# Switch between openGl/Mayavi
#visualizer = visualization.mayaviLive(system)
visualizer = visualization.openGLLive(system)
#############################################################
# Warmup Integration #
#############################################################
print("""
Start warmup integration:
At maximum {} times {} steps
Stop if minimal distance is larger than {}
""".strip().format(warm_n_times, warm_steps, min_dist))
# set LJ cap
lj_cap = 20
system.force_cap = lj_cap
print(system.non_bonded_inter[0, 0].lennard_jones)
# Warmup Integration Loop
i = 0
while (i < warm_n_times and act_min_dist < min_dist):
system.integrator.run(warm_steps)
# Warmup criterion
act_min_dist = system.analysis.min_dist()
# print("\rrun %d at time=%f (LJ cap=%f) min dist = %f\r" %
# (i,system.time,lj_cap,act_min_dist), end=' ')
i += 1
# Increase LJ cap
lj_cap = lj_cap + 10
system.force_cap = lj_cap
visualizer.update()
# Just to see what else we may get from the c code
# print("""
# ro variables:
# cell_grid {0.cell_grid}
# cell_size {0.cell_size}
# local_box_l {0.local_box_l}
# max_cut {0.max_cut}
# max_part {0.max_part}
# max_range {0.max_range}
# max_skin {0.max_skin}
# n_nodes {0.n_nodes}
# n_part {0.n_part}
# n_part_types {0.n_part_types}
# periodicity {0.periodicity}
# verlet_reuse {0.verlet_reuse}
#""".format(system))
#############################################################
# Integration #
#############################################################
print("\nStart integration: run %d times %d steps" % (int_n_times, int_steps))
# remove force capping
lj_cap = 0
system.force_cap = lj_cap
print(system.non_bonded_inter[0, 0].lennard_jones)
# print initial energies
energies = system.analysis.energy()
print(energies)
plot, = pyplot.plot([0], [energies['total']], label="total")
pyplot.xlabel("Time")
pyplot.ylabel("Energy")
pyplot.legend()
pyplot.show(block=False)
j = 0
def main_loop():
global energies
print("run %d at time=%f " % (i, system.time))
system.integrator.run(int_steps)
visualizer.update()
energies = system.analysis.energy()
plot.set_xdata(np.append(plot.get_xdata(), system.time))
plot.set_ydata(np.append(plot.get_ydata(), energies['total']))
def main_thread():
for i in range(0, int_n_times):
main_loop()
last_plotted = 0
def update_plot():
global last_plotted
current_time = plot.get_xdata()[-1]
if last_plotted == current_time:
return
last_plotted = current_time
pyplot.xlim(0, plot.get_xdata()[-1])
pyplot.ylim(plot.get_ydata().min(), plot.get_ydata().max())
pyplot.draw()
pyplot.pause(0.01)
t = Thread(target=main_thread)
t.daemon = True
t.start()
visualizer.register_callback(update_plot, interval=1000)
visualizer.start()
# terminate program
print("\nFinished.")
| gpl-3.0 |
treycausey/scikit-learn | sklearn/pipeline.py | 8 | 16439 | """
The :mod:`sklearn.pipeline` module implements utilities to build a composite
estimator, as a chain of transforms and estimators.
"""
# Author: Edouard Duchesnay
# Gael Varoquaux
# Virgile Fritsch
# Alexandre Gramfort
# Lars Buitinck
# Licence: BSD
from collections import defaultdict
import numpy as np
from scipy import sparse
from .base import BaseEstimator, TransformerMixin
from .externals.joblib import Parallel, delayed
from .externals import six
from .utils import tosequence
from .externals.six import iteritems
__all__ = ['Pipeline', 'FeatureUnion']
# One round of beers on me if someone finds out why the backslash
# is needed in the Attributes section so as not to upset sphinx.
class Pipeline(BaseEstimator):
"""Pipeline of transforms with a final estimator.
Sequentially apply a list of transforms and a final estimator.
Intermediate steps of the pipeline must be 'transforms', that is, they
must implements fit and transform methods.
The final estimator needs only implements fit.
The purpose of the pipeline is to assemble several steps that can be
cross-validated together while setting different parameters.
For this, it enables setting parameters of the various steps using their
names and the parameter name separated by a '__', as in the example below.
Parameters
----------
steps: list
List of (name, transform) tuples (implementing fit/transform) that are
chained, in the order in which they are chained, with the last object
an estimator.
Examples
--------
>>> from sklearn import svm
>>> from sklearn.datasets import samples_generator
>>> from sklearn.feature_selection import SelectKBest
>>> from sklearn.feature_selection import f_regression
>>> from sklearn.pipeline import Pipeline
>>> # generate some data to play with
>>> X, y = samples_generator.make_classification(
... n_informative=5, n_redundant=0, random_state=42)
>>> # ANOVA SVM-C
>>> anova_filter = SelectKBest(f_regression, k=5)
>>> clf = svm.SVC(kernel='linear')
>>> anova_svm = Pipeline([('anova', anova_filter), ('svc', clf)])
>>> # You can set the parameters using the names issued
>>> # For instance, fit using a k of 10 in the SelectKBest
>>> # and a parameter 'C' of the svm
>>> anova_svm.set_params(anova__k=10, svc__C=.1).fit(X, y)
... # doctest: +ELLIPSIS
Pipeline(steps=[...])
>>> prediction = anova_svm.predict(X)
>>> anova_svm.score(X, y) # doctest: +ELLIPSIS
0.77...
"""
# BaseEstimator interface
def __init__(self, steps):
self.named_steps = dict(steps)
names, estimators = zip(*steps)
if len(self.named_steps) != len(steps):
raise ValueError("Names provided are not unique: %s" % (names,))
# shallow copy of steps
self.steps = tosequence(zip(names, estimators))
transforms = estimators[:-1]
estimator = estimators[-1]
for t in transforms:
if (not (hasattr(t, "fit") or hasattr(t, "fit_transform")) or not
hasattr(t, "transform")):
raise TypeError("All intermediate steps a the chain should "
"be transforms and implement fit and transform"
" '%s' (type %s) doesn't)" % (t, type(t)))
if not hasattr(estimator, "fit"):
raise TypeError("Last step of chain should implement fit "
"'%s' (type %s) doesn't)"
% (estimator, type(estimator)))
def get_params(self, deep=True):
if not deep:
return super(Pipeline, self).get_params(deep=False)
else:
out = self.named_steps.copy()
for name, step in six.iteritems(self.named_steps):
for key, value in six.iteritems(step.get_params(deep=True)):
out['%s__%s' % (name, key)] = value
return out
# Estimator interface
def _pre_transform(self, X, y=None, **fit_params):
fit_params_steps = dict((step, {}) for step, _ in self.steps)
for pname, pval in six.iteritems(fit_params):
step, param = pname.split('__', 1)
fit_params_steps[step][param] = pval
Xt = X
for name, transform in self.steps[:-1]:
if hasattr(transform, "fit_transform"):
Xt = transform.fit_transform(Xt, y, **fit_params_steps[name])
else:
Xt = transform.fit(Xt, y, **fit_params_steps[name]) \
.transform(Xt)
return Xt, fit_params_steps[self.steps[-1][0]]
def fit(self, X, y=None, **fit_params):
"""Fit all the transforms one after the other and transform the
data, then fit the transformed data using the final estimator.
"""
Xt, fit_params = self._pre_transform(X, y, **fit_params)
self.steps[-1][-1].fit(Xt, y, **fit_params)
return self
def fit_transform(self, X, y=None, **fit_params):
"""Fit all the transforms one after the other and transform the
data, then use fit_transform on transformed data using the final
estimator."""
Xt, fit_params = self._pre_transform(X, y, **fit_params)
if hasattr(self.steps[-1][-1], 'fit_transform'):
return self.steps[-1][-1].fit_transform(Xt, y, **fit_params)
else:
return self.steps[-1][-1].fit(Xt, y, **fit_params).transform(Xt)
def predict(self, X):
"""Applies transforms to the data, and the predict method of the
final estimator. Valid only if the final estimator implements
predict."""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].predict(Xt)
def predict_proba(self, X):
"""Applies transforms to the data, and the predict_proba method of the
final estimator. Valid only if the final estimator implements
predict_proba."""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].predict_proba(Xt)
def decision_function(self, X):
"""Applies transforms to the data, and the decision_function method of
the final estimator. Valid only if the final estimator implements
decision_function."""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].decision_function(Xt)
def predict_log_proba(self, X):
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].predict_log_proba(Xt)
def transform(self, X):
"""Applies transforms to the data, and the transform method of the
final estimator. Valid only if the final estimator implements
transform."""
Xt = X
for name, transform in self.steps:
Xt = transform.transform(Xt)
return Xt
def inverse_transform(self, X):
if X.ndim == 1:
X = X[None, :]
Xt = X
for name, step in self.steps[::-1]:
Xt = step.inverse_transform(Xt)
return Xt
def score(self, X, y=None):
"""Applies transforms to the data, and the score method of the
final estimator. Valid only if the final estimator implements
score."""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].score(Xt, y)
@property
def _pairwise(self):
# check if first estimator expects pairwise input
return getattr(self.steps[0][1], '_pairwise', False)
def _name_estimators(estimators):
"""Generate names for estimators."""
names = [type(estimator).__name__.lower() for estimator in estimators]
namecount = defaultdict(int)
for est, name in zip(estimators, names):
namecount[name] += 1
for k, v in list(six.iteritems(namecount)):
if v == 1:
del namecount[k]
for i in reversed(range(len(estimators))):
name = names[i]
if name in namecount:
names[i] += "-%d" % namecount[name]
namecount[name] -= 1
return list(zip(names, estimators))
def make_pipeline(*steps):
"""Construct a Pipeline from the given estimators.
This is a shorthand for the Pipeline constructor; it does not require, and
does not permit, naming the estimators. Instead, they will be given names
automatically based on their types.
Examples
--------
>>> from sklearn.naive_bayes import GaussianNB
>>> from sklearn.preprocessing import StandardScaler
>>> make_pipeline(StandardScaler(), GaussianNB()) # doctest: +NORMALIZE_WHITESPACE
Pipeline(steps=[('standardscaler',
StandardScaler(copy=True, with_mean=True, with_std=True)),
('gaussiannb', GaussianNB())])
Returns
-------
p : Pipeline
"""
return Pipeline(_name_estimators(steps))
def _fit_one_transformer(transformer, X, y):
return transformer.fit(X, y)
def _transform_one(transformer, name, X, transformer_weights):
if transformer_weights is not None and name in transformer_weights:
# if we have a weight for this transformer, muliply output
return transformer.transform(X) * transformer_weights[name]
return transformer.transform(X)
def _fit_transform_one(transformer, name, X, y, transformer_weights,
**fit_params):
if transformer_weights is not None and name in transformer_weights:
# if we have a weight for this transformer, muliply output
if hasattr(transformer, 'fit_transform'):
X_transformed = transformer.fit_transform(X, y, **fit_params)
return X_transformed * transformer_weights[name], transformer
else:
X_transformed = transformer.fit(X, y, **fit_params).transform(X)
return X_transformed * transformer_weights[name], transformer
if hasattr(transformer, 'fit_transform'):
X_transformed = transformer.fit_transform(X, y, **fit_params)
return X_transformed, transformer
else:
X_transformed = transformer.fit(X, y, **fit_params).transform(X)
return X_transformed, transformer
class FeatureUnion(BaseEstimator, TransformerMixin):
"""Concatenates results of multiple transformer objects.
This estimator applies a list of transformer objects in parallel to the
input data, then concatenates the results. This is useful to combine
several feature extraction mechanisms into a single transformer.
Parameters
----------
transformer_list: list of (string, transformer) tuples
List of transformer objects to be applied to the data. The first
half of each tuple is the name of the transformer.
n_jobs: int, optional
Number of jobs to run in parallel (default 1).
transformer_weights: dict, optional
Multiplicative weights for features per transformer.
Keys are transformer names, values the weights.
"""
def __init__(self, transformer_list, n_jobs=1, transformer_weights=None):
self.transformer_list = transformer_list
self.n_jobs = n_jobs
self.transformer_weights = transformer_weights
def get_feature_names(self):
"""Get feature names from all transformers.
Returns
-------
feature_names : list of strings
Names of the features produced by transform.
"""
feature_names = []
for name, trans in self.transformer_list:
if not hasattr(trans, 'get_feature_names'):
raise AttributeError("Transformer %s does not provide"
" get_feature_names." % str(name))
feature_names.extend([name + "__" + f for f in
trans.get_feature_names()])
return feature_names
def fit(self, X, y=None):
"""Fit all transformers using X.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data, used to fit transformers.
"""
transformers = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_one_transformer)(trans, X, y)
for name, trans in self.transformer_list)
self._update_transformer_list(transformers)
return self
def fit_transform(self, X, y=None, **fit_params):
"""Fit all transformers using X, transform the data and concatenate
results.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data to be transformed.
Returns
-------
X_t : array-like or sparse matrix, shape (n_samples, sum_n_components)
hstack of results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers.
"""
result = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_transform_one)(trans, name, X, y,
self.transformer_weights, **fit_params)
for name, trans in self.transformer_list)
Xs, transformers = zip(*result)
self._update_transformer_list(transformers)
if any(sparse.issparse(f) for f in Xs):
Xs = sparse.hstack(Xs).tocsr()
else:
Xs = np.hstack(Xs)
return Xs
def transform(self, X):
"""Transform X separately by each transformer, concatenate results.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data to be transformed.
Returns
-------
X_t : array-like or sparse matrix, shape (n_samples, sum_n_components)
hstack of results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers.
"""
Xs = Parallel(n_jobs=self.n_jobs)(
delayed(_transform_one)(trans, name, X, self.transformer_weights)
for name, trans in self.transformer_list)
if any(sparse.issparse(f) for f in Xs):
Xs = sparse.hstack(Xs).tocsr()
else:
Xs = np.hstack(Xs)
return Xs
def get_params(self, deep=True):
if not deep:
return super(FeatureUnion, self).get_params(deep=False)
else:
out = dict(self.transformer_list)
for name, trans in self.transformer_list:
for key, value in iteritems(trans.get_params(deep=True)):
out['%s__%s' % (name, key)] = value
return out
def _update_transformer_list(self, transformers):
self.transformer_list[:] = [
(name, new)
for ((name, old), new) in zip(self.transformer_list, transformers)
]
# XXX it would be nice to have a keyword-only n_jobs argument to this function,
# but that's not allowed in Python 2.x.
def make_union(*transformers):
"""Construct a FeatureUnion from the given transformers.
This is a shorthand for the FeatureUnion constructor; it does not require,
and does not permit, naming the transformers. Instead, they will be given
names automatically based on their types. It also does not allow weighting.
Examples
--------
>>> from sklearn.decomposition import PCA, TruncatedSVD
>>> make_union(PCA(), TruncatedSVD()) # doctest: +NORMALIZE_WHITESPACE
FeatureUnion(n_jobs=1,
transformer_list=[('pca', PCA(copy=True, n_components=None,
whiten=False)),
('truncatedsvd',
TruncatedSVD(algorithm='randomized',
n_components=2, n_iter=5,
random_state=None, tol=0.0))],
transformer_weights=None)
Returns
-------
f : FeatureUnion
"""
return FeatureUnion(_name_estimators(transformers))
| bsd-3-clause |
lthurlow/Network-Grapher | proj/external/matplotlib-1.2.1/doc/mpl_examples/animation/old_animation/dynamic_collection.py | 9 | 1371 | import random
from matplotlib.collections import RegularPolyCollection
import matplotlib.cm as cm
from matplotlib.pyplot import figure, show
from numpy.random import rand
fig = figure()
ax = fig.add_subplot(111, xlim=(0,1), ylim=(0,1), autoscale_on=False)
ax.set_title("Press 'a' to add a point, 'd' to delete one")
# a single point
offsets = [(0.5,0.5)]
facecolors = [cm.jet(0.5)]
collection = RegularPolyCollection(
#fig.dpi,
5, # a pentagon
rotation=0,
sizes=(50,),
facecolors = facecolors,
edgecolors = 'black',
linewidths = (1,),
offsets = offsets,
transOffset = ax.transData,
)
ax.add_collection(collection)
def onpress(event):
"""
press 'a' to add a random point from the collection, 'd' to delete one
"""
if event.key=='a':
x,y = rand(2)
color = cm.jet(rand())
offsets.append((x,y))
facecolors.append(color)
collection.set_offsets(offsets)
collection.set_facecolors(facecolors)
fig.canvas.draw()
elif event.key=='d':
N = len(offsets)
if N>0:
ind = random.randint(0,N-1)
offsets.pop(ind)
facecolors.pop(ind)
collection.set_offsets(offsets)
collection.set_facecolors(facecolors)
fig.canvas.draw()
fig.canvas.mpl_connect('key_press_event', onpress)
show()
| mit |
thu-ml/zhusuan | examples/toy_examples/gaussian.py | 1 | 3158 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
import tensorflow as tf
import zhusuan as zs
@zs.meta_bayesian_net()
def gaussian(n_x, stdev, n_particles):
bn = zs.BayesianNet()
bn.normal('x', tf.zeros([n_x]), std=stdev, n_samples=n_particles,
group_ndims=1)
return bn
if __name__ == "__main__":
tf.set_random_seed(1)
# Define model parameters
n_x = 1
# n_x = 10
stdev = 1 / (np.arange(n_x, dtype=np.float32) + 1)
# Define HMC parameters
kernel_width = 0.1
n_chains = 1000
n_iters = 200
burnin = n_iters // 2
n_leapfrogs = 5
# Build the computation graph
model = gaussian(n_x, stdev, n_chains)
adapt_step_size = tf.placeholder(tf.bool, shape=[], name="adapt_step_size")
adapt_mass = tf.placeholder(tf.bool, shape=[], name="adapt_mass")
hmc = zs.HMC(step_size=1e-3, n_leapfrogs=n_leapfrogs,
adapt_step_size=adapt_step_size, adapt_mass=adapt_mass,
target_acceptance_rate=0.9)
x = tf.Variable(tf.zeros([n_chains, n_x]), trainable=False, name='x')
sample_op, hmc_info = hmc.sample(model, {}, {'x': x})
# Run the inference
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
samples = []
print('Sampling...')
for i in range(n_iters):
_, x_sample, acc, ss = sess.run(
[sample_op, hmc_info.samples['x'], hmc_info.acceptance_rate,
hmc_info.updated_step_size],
feed_dict={adapt_step_size: i < burnin // 2,
adapt_mass: i < burnin // 2})
print('Sample {}: Acceptance rate = {}, updated step size = {}'
.format(i, np.mean(acc), ss))
if i >= burnin:
samples.append(x_sample)
print('Finished.')
samples = np.vstack(samples)
# Check & plot the results
print('Expected mean = {}'.format(np.zeros(n_x)))
print('Sample mean = {}'.format(np.mean(samples, 0)))
print('Expected stdev = {}'.format(stdev))
print('Sample stdev = {}'.format(np.std(samples, 0)))
print('Relative error of stdev = {}'.format(
(np.std(samples, 0) - stdev) / stdev))
def kde(xs, mu, batch_size):
mu_n = len(mu)
assert mu_n % batch_size == 0
xs_row = np.expand_dims(xs, 1)
ys = np.zeros(xs.shape)
for b in range(mu_n // batch_size):
mu_col = np.expand_dims(mu[b * batch_size:(b + 1) * batch_size], 0)
ys += (1 / np.sqrt(2 * np.pi) / kernel_width) * \
np.mean(np.exp((-0.5 / kernel_width ** 2) *
np.square(xs_row - mu_col)), 1)
ys /= (mu_n / batch_size)
return ys
if n_x == 1:
xs = np.linspace(-5, 5, 1000)
ys = kde(xs, np.squeeze(samples), n_chains)
f, ax = plt.subplots()
ax.plot(xs, ys)
ax.plot(xs, stats.norm.pdf(xs, scale=stdev[0]))
plt.show()
| mit |
cloud-fan/spark | python/pyspark/testing/sqlutils.py | 23 | 7740 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
import os
import shutil
import tempfile
from contextlib import contextmanager
from pyspark.sql import SparkSession
from pyspark.sql.types import ArrayType, DoubleType, UserDefinedType, Row
from pyspark.testing.utils import ReusedPySparkTestCase
pandas_requirement_message = None
try:
from pyspark.sql.pandas.utils import require_minimum_pandas_version
require_minimum_pandas_version()
except ImportError as e:
# If Pandas version requirement is not satisfied, skip related tests.
pandas_requirement_message = str(e)
pyarrow_requirement_message = None
try:
from pyspark.sql.pandas.utils import require_minimum_pyarrow_version
require_minimum_pyarrow_version()
except ImportError as e:
# If Arrow version requirement is not satisfied, skip related tests.
pyarrow_requirement_message = str(e)
test_not_compiled_message = None
try:
from pyspark.sql.utils import require_test_compiled
require_test_compiled()
except Exception as e:
test_not_compiled_message = str(e)
have_pandas = pandas_requirement_message is None
have_pyarrow = pyarrow_requirement_message is None
test_compiled = test_not_compiled_message is None
class UTCOffsetTimezone(datetime.tzinfo):
"""
Specifies timezone in UTC offset
"""
def __init__(self, offset=0):
self.ZERO = datetime.timedelta(hours=offset)
def utcoffset(self, dt):
return self.ZERO
def dst(self, dt):
return self.ZERO
class ExamplePointUDT(UserDefinedType):
"""
User-defined type (UDT) for ExamplePoint.
"""
@classmethod
def sqlType(self):
return ArrayType(DoubleType(), False)
@classmethod
def module(cls):
return 'pyspark.sql.tests'
@classmethod
def scalaUDT(cls):
return 'org.apache.spark.sql.test.ExamplePointUDT'
def serialize(self, obj):
return [obj.x, obj.y]
def deserialize(self, datum):
return ExamplePoint(datum[0], datum[1])
class ExamplePoint:
"""
An example class to demonstrate UDT in Scala, Java, and Python.
"""
__UDT__ = ExamplePointUDT()
def __init__(self, x, y):
self.x = x
self.y = y
def __repr__(self):
return "ExamplePoint(%s,%s)" % (self.x, self.y)
def __str__(self):
return "(%s,%s)" % (self.x, self.y)
def __eq__(self, other):
return isinstance(other, self.__class__) and \
other.x == self.x and other.y == self.y
class PythonOnlyUDT(UserDefinedType):
"""
User-defined type (UDT) for ExamplePoint.
"""
@classmethod
def sqlType(self):
return ArrayType(DoubleType(), False)
@classmethod
def module(cls):
return '__main__'
def serialize(self, obj):
return [obj.x, obj.y]
def deserialize(self, datum):
return PythonOnlyPoint(datum[0], datum[1])
@staticmethod
def foo():
pass
@property
def props(self):
return {}
class PythonOnlyPoint(ExamplePoint):
"""
An example class to demonstrate UDT in only Python
"""
__UDT__ = PythonOnlyUDT() # type: ignore
class MyObject(object):
def __init__(self, key, value):
self.key = key
self.value = value
class SQLTestUtils(object):
"""
This util assumes the instance of this to have 'spark' attribute, having a spark session.
It is usually used with 'ReusedSQLTestCase' class but can be used if you feel sure the
the implementation of this class has 'spark' attribute.
"""
@contextmanager
def sql_conf(self, pairs):
"""
A convenient context manager to test some configuration specific logic. This sets
`value` to the configuration `key` and then restores it back when it exits.
"""
assert isinstance(pairs, dict), "pairs should be a dictionary."
assert hasattr(self, "spark"), "it should have 'spark' attribute, having a spark session."
keys = pairs.keys()
new_values = pairs.values()
old_values = [self.spark.conf.get(key, None) for key in keys]
for key, new_value in zip(keys, new_values):
self.spark.conf.set(key, new_value)
try:
yield
finally:
for key, old_value in zip(keys, old_values):
if old_value is None:
self.spark.conf.unset(key)
else:
self.spark.conf.set(key, old_value)
@contextmanager
def database(self, *databases):
"""
A convenient context manager to test with some specific databases. This drops the given
databases if it exists and sets current database to "default" when it exits.
"""
assert hasattr(self, "spark"), "it should have 'spark' attribute, having a spark session."
try:
yield
finally:
for db in databases:
self.spark.sql("DROP DATABASE IF EXISTS %s CASCADE" % db)
self.spark.catalog.setCurrentDatabase("default")
@contextmanager
def table(self, *tables):
"""
A convenient context manager to test with some specific tables. This drops the given tables
if it exists.
"""
assert hasattr(self, "spark"), "it should have 'spark' attribute, having a spark session."
try:
yield
finally:
for t in tables:
self.spark.sql("DROP TABLE IF EXISTS %s" % t)
@contextmanager
def tempView(self, *views):
"""
A convenient context manager to test with some specific views. This drops the given views
if it exists.
"""
assert hasattr(self, "spark"), "it should have 'spark' attribute, having a spark session."
try:
yield
finally:
for v in views:
self.spark.catalog.dropTempView(v)
@contextmanager
def function(self, *functions):
"""
A convenient context manager to test with some specific functions. This drops the given
functions if it exists.
"""
assert hasattr(self, "spark"), "it should have 'spark' attribute, having a spark session."
try:
yield
finally:
for f in functions:
self.spark.sql("DROP FUNCTION IF EXISTS %s" % f)
class ReusedSQLTestCase(ReusedPySparkTestCase, SQLTestUtils):
@classmethod
def setUpClass(cls):
super(ReusedSQLTestCase, cls).setUpClass()
cls.spark = SparkSession(cls.sc)
cls.tempdir = tempfile.NamedTemporaryFile(delete=False)
os.unlink(cls.tempdir.name)
cls.testData = [Row(key=i, value=str(i)) for i in range(100)]
cls.df = cls.spark.createDataFrame(cls.testData)
@classmethod
def tearDownClass(cls):
super(ReusedSQLTestCase, cls).tearDownClass()
cls.spark.stop()
shutil.rmtree(cls.tempdir.name, ignore_errors=True)
| apache-2.0 |
theoryno3/scikit-learn | sklearn/preprocessing/tests/test_data.py | 3 | 35967 | import warnings
import numpy as np
import numpy.linalg as la
from scipy import sparse
from distutils.version import LooseVersion
from sklearn.utils.testing import assert_almost_equal, clean_warning_registry
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.sparsefuncs import mean_variance_axis
from sklearn.preprocessing.data import _transform_selected
from sklearn.preprocessing.data import Binarizer
from sklearn.preprocessing.data import KernelCenterer
from sklearn.preprocessing.data import Normalizer
from sklearn.preprocessing.data import normalize
from sklearn.preprocessing.data import OneHotEncoder
from sklearn.preprocessing.data import StandardScaler
from sklearn.preprocessing.data import scale
from sklearn.preprocessing.data import MinMaxScaler
from sklearn.preprocessing.data import RobustScaler
from sklearn.preprocessing.data import robust_scale
from sklearn.preprocessing.data import add_dummy_feature
from sklearn.preprocessing.data import PolynomialFeatures
from sklearn.utils.validation import DataConversionWarning
from sklearn import datasets
iris = datasets.load_iris()
def toarray(a):
if hasattr(a, "toarray"):
a = a.toarray()
return a
def test_polynomial_features():
# Test Polynomial Features
X1 = np.arange(6)[:, np.newaxis]
P1 = np.hstack([np.ones_like(X1),
X1, X1 ** 2, X1 ** 3])
deg1 = 3
X2 = np.arange(6).reshape((3, 2))
x1 = X2[:, :1]
x2 = X2[:, 1:]
P2 = np.hstack([x1 ** 0 * x2 ** 0,
x1 ** 1 * x2 ** 0,
x1 ** 0 * x2 ** 1,
x1 ** 2 * x2 ** 0,
x1 ** 1 * x2 ** 1,
x1 ** 0 * x2 ** 2])
deg2 = 2
for (deg, X, P) in [(deg1, X1, P1), (deg2, X2, P2)]:
P_test = PolynomialFeatures(deg, include_bias=True).fit_transform(X)
assert_array_almost_equal(P_test, P)
P_test = PolynomialFeatures(deg, include_bias=False).fit_transform(X)
assert_array_almost_equal(P_test, P[:, 1:])
interact = PolynomialFeatures(2, interaction_only=True, include_bias=True)
X_poly = interact.fit_transform(X)
assert_array_almost_equal(X_poly, P2[:, [0, 1, 2, 4]])
def test_scaler_1d():
# Test scaling of dataset along single axis
rng = np.random.RandomState(0)
X = rng.randn(5)
X_orig_copy = X.copy()
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X_orig_copy)
# Test with 1D list
X = [0., 1., 2, 0.4, 1.]
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
X_scaled = scale(X)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
X = np.ones(5)
assert_array_equal(scale(X, with_mean=False), X)
def test_standard_scaler_numerical_stability():
"""Test numerical stability of scaling"""
# np.log(1e-5) is taken because of its floating point representation
# was empirically found to cause numerical problems with np.mean & np.std.
x = np.zeros(8, dtype=np.float64) + np.log(1e-5, dtype=np.float64)
if LooseVersion(np.__version__) >= LooseVersion('1.9'):
# This does not raise a warning as the number of samples is too low
# to trigger the problem in recent numpy
x_scaled = assert_no_warnings(scale, x)
assert_array_almost_equal(scale(x), np.zeros(8))
else:
w = "standard deviation of the data is probably very close to 0"
x_scaled = assert_warns_message(UserWarning, w, scale, x)
assert_array_almost_equal(x_scaled, np.zeros(8))
# with 2 more samples, the std computation run into numerical issues:
x = np.zeros(10, dtype=np.float64) + np.log(1e-5, dtype=np.float64)
w = "standard deviation of the data is probably very close to 0"
x_scaled = assert_warns_message(UserWarning, w, scale, x)
assert_array_almost_equal(x_scaled, np.zeros(10))
x = np.ones(10, dtype=np.float64) * 1e-100
x_small_scaled = assert_no_warnings(scale, x)
assert_array_almost_equal(x_small_scaled, np.zeros(10))
# Large values can cause (often recoverable) numerical stability issues:
x_big = np.ones(10, dtype=np.float64) * 1e100
w = "Dataset may contain too large values"
x_big_scaled = assert_warns_message(UserWarning, w, scale, x_big)
assert_array_almost_equal(x_big_scaled, np.zeros(10))
assert_array_almost_equal(x_big_scaled, x_small_scaled)
x_big_centered = assert_warns_message(UserWarning, w, scale, x_big,
with_std=False)
assert_array_almost_equal(x_big_centered, np.zeros(10))
assert_array_almost_equal(x_big_centered, x_small_scaled)
def test_scaler_2d_arrays():
# Test scaling of 2d array along first axis
rng = np.random.RandomState(0)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has been copied
assert_true(X_scaled is not X)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_scaled = scale(X, axis=1, with_std=False)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=1), 4 * [0.0])
X_scaled = scale(X, axis=1, with_std=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=1), 4 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=1), 4 * [1.0])
# Check that the data hasn't been modified
assert_true(X_scaled is not X)
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is X)
X = rng.randn(4, 5)
X[:, 0] = 1.0 # first feature is a constant, non zero feature
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is not X)
def test_min_max_scaler_iris():
X = iris.data
scaler = MinMaxScaler()
# default params
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), 0)
assert_array_almost_equal(X_trans.min(axis=0), 0)
assert_array_almost_equal(X_trans.max(axis=0), 1)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# not default params: min=1, max=2
scaler = MinMaxScaler(feature_range=(1, 2))
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), 1)
assert_array_almost_equal(X_trans.max(axis=0), 2)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# min=-.5, max=.6
scaler = MinMaxScaler(feature_range=(-.5, .6))
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), -.5)
assert_array_almost_equal(X_trans.max(axis=0), .6)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# raises on invalid range
scaler = MinMaxScaler(feature_range=(2, 1))
assert_raises(ValueError, scaler.fit, X)
def test_min_max_scaler_zero_variance_features():
# Check min max scaler on toy data with zero variance features
X = [[0., 1., +0.5],
[0., 1., -0.1],
[0., 1., +1.1]]
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
# default params
scaler = MinMaxScaler()
X_trans = scaler.fit_transform(X)
X_expected_0_1 = [[0., 0., 0.5],
[0., 0., 0.0],
[0., 0., 1.0]]
assert_array_almost_equal(X_trans, X_expected_0_1)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
X_trans_new = scaler.transform(X_new)
X_expected_0_1_new = [[+0., 1., 0.500],
[-1., 0., 0.083],
[+0., 0., 1.333]]
assert_array_almost_equal(X_trans_new, X_expected_0_1_new, decimal=2)
# not default params
scaler = MinMaxScaler(feature_range=(1, 2))
X_trans = scaler.fit_transform(X)
X_expected_1_2 = [[1., 1., 1.5],
[1., 1., 1.0],
[1., 1., 2.0]]
assert_array_almost_equal(X_trans, X_expected_1_2)
def test_min_max_scaler_1d():
# Test scaling of dataset along single axis
rng = np.random.RandomState(0)
X = rng.randn(5)
X_orig_copy = X.copy()
scaler = MinMaxScaler()
X_scaled = scaler.fit(X).transform(X)
assert_array_almost_equal(X_scaled.min(axis=0), 0.0)
assert_array_almost_equal(X_scaled.max(axis=0), 1.0)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X_orig_copy)
# Test with 1D list
X = [0., 1., 2, 0.4, 1.]
scaler = MinMaxScaler()
X_scaled = scaler.fit(X).transform(X)
assert_array_almost_equal(X_scaled.min(axis=0), 0.0)
assert_array_almost_equal(X_scaled.max(axis=0), 1.0)
# Constant feature.
X = np.zeros(5)
scaler = MinMaxScaler()
X_scaled = scaler.fit(X).transform(X)
assert_greater_equal(X_scaled.min(), 0.)
assert_less_equal(X_scaled.max(), 1.)
def test_scaler_without_centering():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
assert_raises(ValueError, StandardScaler().fit, X_csr)
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
X_null = null_transform.fit_transform(X_csr)
assert_array_equal(X_null.data, X_csr.data)
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.data, X_csr.data)
scaler = StandardScaler(with_mean=False).fit(X)
X_scaled = scaler.transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
scaler_csr = StandardScaler(with_mean=False).fit(X_csr)
X_csr_scaled = scaler_csr.transform(X_csr, copy=True)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
scaler_csc = StandardScaler(with_mean=False).fit(X_csc)
X_csc_scaled = scaler_csr.transform(X_csc, copy=True)
assert_false(np.any(np.isnan(X_csc_scaled.data)))
assert_equal(scaler.mean_, scaler_csr.mean_)
assert_array_almost_equal(scaler.std_, scaler_csr.std_)
assert_equal(scaler.mean_, scaler_csc.mean_)
assert_array_almost_equal(scaler.std_, scaler_csc.std_)
assert_array_almost_equal(
X_scaled.mean(axis=0), [0., -0.01, 2.24, -0.35, -0.78], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(X_csr_scaled, 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# Check that X has not been modified (copy)
assert_true(X_scaled is not X)
assert_true(X_csr_scaled is not X_csr)
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled)
assert_true(X_csr_scaled_back is not X_csr)
assert_true(X_csr_scaled_back is not X_csr_scaled)
assert_array_almost_equal(X_csr_scaled_back.toarray(), X)
X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc())
assert_true(X_csc_scaled_back is not X_csc)
assert_true(X_csc_scaled_back is not X_csc_scaled)
assert_array_almost_equal(X_csc_scaled_back.toarray(), X)
def test_scaler_int():
# test that scaler converts integer input to floating
# for both sparse and dense matrices
rng = np.random.RandomState(42)
X = rng.randint(20, size=(4, 5))
X[:, 0] = 0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
clean_warning_registry()
with warnings.catch_warnings(record=True):
X_null = null_transform.fit_transform(X_csr)
assert_array_equal(X_null.data, X_csr.data)
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.data, X_csr.data)
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler = StandardScaler(with_mean=False).fit(X)
X_scaled = scaler.transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler_csr = StandardScaler(with_mean=False).fit(X_csr)
X_csr_scaled = scaler_csr.transform(X_csr, copy=True)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler_csc = StandardScaler(with_mean=False).fit(X_csc)
X_csc_scaled = scaler_csr.transform(X_csc, copy=True)
assert_false(np.any(np.isnan(X_csc_scaled.data)))
assert_equal(scaler.mean_, scaler_csr.mean_)
assert_array_almost_equal(scaler.std_, scaler_csr.std_)
assert_equal(scaler.mean_, scaler_csc.mean_)
assert_array_almost_equal(scaler.std_, scaler_csc.std_)
assert_array_almost_equal(
X_scaled.mean(axis=0),
[0., 1.109, 1.856, 21., 1.559], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(
X_csr_scaled.astype(np.float), 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# Check that X has not been modified (copy)
assert_true(X_scaled is not X)
assert_true(X_csr_scaled is not X_csr)
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled)
assert_true(X_csr_scaled_back is not X_csr)
assert_true(X_csr_scaled_back is not X_csr_scaled)
assert_array_almost_equal(X_csr_scaled_back.toarray(), X)
X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc())
assert_true(X_csc_scaled_back is not X_csc)
assert_true(X_csc_scaled_back is not X_csc_scaled)
assert_array_almost_equal(X_csc_scaled_back.toarray(), X)
def test_scaler_without_copy():
# Check that StandardScaler.fit does not change input
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_copy = X.copy()
StandardScaler(copy=False).fit(X)
assert_array_equal(X, X_copy)
X_csr_copy = X_csr.copy()
StandardScaler(with_mean=False, copy=False).fit(X_csr)
assert_array_equal(X_csr.toarray(), X_csr_copy.toarray())
def test_scale_sparse_with_mean_raise_exception():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X_csr = sparse.csr_matrix(X)
# check scaling and fit with direct calls on sparse data
assert_raises(ValueError, scale, X_csr, with_mean=True)
assert_raises(ValueError, StandardScaler(with_mean=True).fit, X_csr)
# check transform and inverse_transform after a fit on a dense array
scaler = StandardScaler(with_mean=True).fit(X)
assert_raises(ValueError, scaler.transform, X_csr)
X_transformed_csr = sparse.csr_matrix(scaler.transform(X))
assert_raises(ValueError, scaler.inverse_transform, X_transformed_csr)
def test_scale_input_finiteness_validation():
# Check if non finite inputs raise ValueError
X = [np.nan, 5, 6, 7, 8]
assert_raises_regex(ValueError,
"Input contains NaN, infinity or a value too large",
scale, X)
X = [np.inf, 5, 6, 7, 8]
assert_raises_regex(ValueError,
"Input contains NaN, infinity or a value too large",
scale, X)
def test_scale_function_without_centering():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_scaled = scale(X, with_mean=False)
assert_false(np.any(np.isnan(X_scaled)))
X_csr_scaled = scale(X_csr, with_mean=False)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
# test csc has same outcome
X_csc_scaled = scale(X_csr.tocsc(), with_mean=False)
assert_array_almost_equal(X_scaled, X_csc_scaled.toarray())
# raises value error on axis != 0
assert_raises(ValueError, scale, X_csr, with_mean=False, axis=1)
assert_array_almost_equal(X_scaled.mean(axis=0),
[0., -0.01, 2.24, -0.35, -0.78], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is not X)
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(X_csr_scaled, 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
def test_robust_scaler_2d_arrays():
"""Test robust scaling of 2d array along first axis"""
rng = np.random.RandomState(0)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
scaler = RobustScaler()
X_scaled = scaler.fit(X).transform(X)
assert_array_almost_equal(np.median(X_scaled, axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0)[0], 0)
def test_robust_scaler_iris():
X = iris.data
scaler = RobustScaler()
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(np.median(X_trans, axis=0), 0)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
q = np.percentile(X_trans, q=(25, 75), axis=0)
iqr = q[1] - q[0]
assert_array_almost_equal(iqr, 1)
def test_robust_scale_axis1():
X = iris.data
X_trans = robust_scale(X, axis=1)
assert_array_almost_equal(np.median(X_trans, axis=1), 0)
q = np.percentile(X_trans, q=(25, 75), axis=1)
iqr = q[1] - q[0]
assert_array_almost_equal(iqr, 1)
def test_robust_scaler_zero_variance_features():
"""Check RobustScaler on toy data with zero variance features"""
X = [[0., 1., +0.5],
[0., 1., -0.1],
[0., 1., +1.1]]
scaler = RobustScaler()
X_trans = scaler.fit_transform(X)
# NOTE: for such a small sample size, what we expect in the third column
# depends HEAVILY on the method used to calculate quantiles. The values
# here were calculated to fit the quantiles produces by np.percentile
# using numpy 1.9 Calculating quantiles with
# scipy.stats.mstats.scoreatquantile or scipy.stats.mstats.mquantiles
# would yield very different results!
X_expected = [[0., 0., +0.0],
[0., 0., -1.0],
[0., 0., +1.0]]
assert_array_almost_equal(X_trans, X_expected)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# make sure new data gets transformed correctly
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
X_trans_new = scaler.transform(X_new)
X_expected_new = [[+0., 1., +0.],
[-1., 0., -0.83333],
[+0., 0., +1.66667]]
assert_array_almost_equal(X_trans_new, X_expected_new, decimal=3)
def test_warning_scaling_integers():
# Check warning when scaling integer data
X = np.array([[1, 2, 0],
[0, 0, 0]], dtype=np.uint8)
w = "Data with input dtype uint8 was converted to float64"
clean_warning_registry()
assert_warns_message(DataConversionWarning, w, scale, X)
assert_warns_message(DataConversionWarning, w, StandardScaler().fit, X)
assert_warns_message(DataConversionWarning, w, MinMaxScaler().fit, X)
def test_normalizer_l1():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='l1', copy=True)
X_norm = normalizer.transform(X)
assert_true(X_norm is not X)
X_norm1 = toarray(X_norm)
normalizer = Normalizer(norm='l1', copy=False)
X_norm = normalizer.transform(X)
assert_true(X_norm is X)
X_norm2 = toarray(X_norm)
for X_norm in (X_norm1, X_norm2):
row_sums = np.abs(X_norm).sum(axis=1)
for i in range(3):
assert_almost_equal(row_sums[i], 1.0)
assert_almost_equal(row_sums[3], 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(row_sums[i], 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalizer_l2():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='l2', copy=True)
X_norm1 = normalizer.transform(X)
assert_true(X_norm1 is not X)
X_norm1 = toarray(X_norm1)
normalizer = Normalizer(norm='l2', copy=False)
X_norm2 = normalizer.transform(X)
assert_true(X_norm2 is X)
X_norm2 = toarray(X_norm2)
for X_norm in (X_norm1, X_norm2):
for i in range(3):
assert_almost_equal(la.norm(X_norm[i]), 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(la.norm(X_norm[i]), 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalizer_max():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='max', copy=True)
X_norm1 = normalizer.transform(X)
assert_true(X_norm1 is not X)
X_norm1 = toarray(X_norm1)
normalizer = Normalizer(norm='max', copy=False)
X_norm2 = normalizer.transform(X)
assert_true(X_norm2 is X)
X_norm2 = toarray(X_norm2)
for X_norm in (X_norm1, X_norm2):
row_maxs = X_norm.max(axis=1)
for i in range(3):
assert_almost_equal(row_maxs[i], 1.0)
assert_almost_equal(row_maxs[3], 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(row_maxs[i], 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalize():
# Test normalize function
# Only tests functionality not used by the tests for Normalizer.
X = np.random.RandomState(37).randn(3, 2)
assert_array_equal(normalize(X, copy=False),
normalize(X.T, axis=0, copy=False).T)
assert_raises(ValueError, normalize, [[0]], axis=2)
assert_raises(ValueError, normalize, [[0]], norm='l3')
def test_binarizer():
X_ = np.array([[1, 0, 5], [2, 3, -1]])
for init in (np.array, list, sparse.csr_matrix, sparse.csc_matrix):
X = init(X_.copy())
binarizer = Binarizer(threshold=2.0, copy=True)
X_bin = toarray(binarizer.transform(X))
assert_equal(np.sum(X_bin == 0), 4)
assert_equal(np.sum(X_bin == 1), 2)
X_bin = binarizer.transform(X)
assert_equal(sparse.issparse(X), sparse.issparse(X_bin))
binarizer = Binarizer(copy=True).fit(X)
X_bin = toarray(binarizer.transform(X))
assert_true(X_bin is not X)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(copy=True)
X_bin = binarizer.transform(X)
assert_true(X_bin is not X)
X_bin = toarray(X_bin)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(copy=False)
X_bin = binarizer.transform(X)
if init is not list:
assert_true(X_bin is X)
X_bin = toarray(X_bin)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(threshold=-0.5, copy=True)
for init in (np.array, list):
X = init(X_.copy())
X_bin = toarray(binarizer.transform(X))
assert_equal(np.sum(X_bin == 0), 1)
assert_equal(np.sum(X_bin == 1), 5)
X_bin = binarizer.transform(X)
# Cannot use threshold < 0 for sparse
assert_raises(ValueError, binarizer.transform, sparse.csc_matrix(X))
def test_center_kernel():
# Test that KernelCenterer is equivalent to StandardScaler
# in feature space
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
scaler = StandardScaler(with_std=False)
scaler.fit(X_fit)
X_fit_centered = scaler.transform(X_fit)
K_fit = np.dot(X_fit, X_fit.T)
# center fit time matrix
centerer = KernelCenterer()
K_fit_centered = np.dot(X_fit_centered, X_fit_centered.T)
K_fit_centered2 = centerer.fit_transform(K_fit)
assert_array_almost_equal(K_fit_centered, K_fit_centered2)
# center predict time matrix
X_pred = rng.random_sample((2, 4))
K_pred = np.dot(X_pred, X_fit.T)
X_pred_centered = scaler.transform(X_pred)
K_pred_centered = np.dot(X_pred_centered, X_fit_centered.T)
K_pred_centered2 = centerer.transform(K_pred)
assert_array_almost_equal(K_pred_centered, K_pred_centered2)
def test_fit_transform():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
for obj in ((StandardScaler(), Normalizer(), Binarizer())):
X_transformed = obj.fit(X).transform(X)
X_transformed2 = obj.fit_transform(X)
assert_array_equal(X_transformed, X_transformed2)
def test_add_dummy_feature():
X = [[1, 0], [0, 1], [0, 1]]
X = add_dummy_feature(X)
assert_array_equal(X, [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_coo():
X = sparse.coo_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_coo(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_csc():
X = sparse.csc_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_csc(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_csr():
X = sparse.csr_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_csr(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_one_hot_encoder_sparse():
# Test OneHotEncoder's fit and transform.
X = [[3, 2, 1], [0, 1, 1]]
enc = OneHotEncoder()
# discover max values automatically
X_trans = enc.fit_transform(X).toarray()
assert_equal(X_trans.shape, (2, 5))
assert_array_equal(enc.active_features_,
np.where([1, 0, 0, 1, 0, 1, 1, 0, 1])[0])
assert_array_equal(enc.feature_indices_, [0, 4, 7, 9])
# check outcome
assert_array_equal(X_trans,
[[0., 1., 0., 1., 1.],
[1., 0., 1., 0., 1.]])
# max value given as 3
enc = OneHotEncoder(n_values=4)
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 4 * 3))
assert_array_equal(enc.feature_indices_, [0, 4, 8, 12])
# max value given per feature
enc = OneHotEncoder(n_values=[3, 2, 2])
X = [[1, 0, 1], [0, 1, 1]]
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 3 + 2 + 2))
assert_array_equal(enc.n_values_, [3, 2, 2])
# check that testing with larger feature works:
X = np.array([[2, 0, 1], [0, 1, 1]])
enc.transform(X)
# test that an error is raised when out of bounds:
X_too_large = [[0, 2, 1], [0, 1, 1]]
assert_raises(ValueError, enc.transform, X_too_large)
assert_raises(ValueError, OneHotEncoder(n_values=2).fit_transform, X)
# test that error is raised when wrong number of features
assert_raises(ValueError, enc.transform, X[:, :-1])
# test that error is raised when wrong number of features in fit
# with prespecified n_values
assert_raises(ValueError, enc.fit, X[:, :-1])
# test exception on wrong init param
assert_raises(TypeError, OneHotEncoder(n_values=np.int).fit, X)
enc = OneHotEncoder()
# test negative input to fit
assert_raises(ValueError, enc.fit, [[0], [-1]])
# test negative input to transform
enc.fit([[0], [1]])
assert_raises(ValueError, enc.transform, [[0], [-1]])
def test_one_hot_encoder_dense():
# check for sparse=False
X = [[3, 2, 1], [0, 1, 1]]
enc = OneHotEncoder(sparse=False)
# discover max values automatically
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 5))
assert_array_equal(enc.active_features_,
np.where([1, 0, 0, 1, 0, 1, 1, 0, 1])[0])
assert_array_equal(enc.feature_indices_, [0, 4, 7, 9])
# check outcome
assert_array_equal(X_trans,
np.array([[0., 1., 0., 1., 1.],
[1., 0., 1., 0., 1.]]))
def _check_transform_selected(X, X_expected, sel):
for M in (X, sparse.csr_matrix(X)):
Xtr = _transform_selected(M, Binarizer().transform, sel)
assert_array_equal(toarray(Xtr), X_expected)
def test_transform_selected():
X = [[3, 2, 1], [0, 1, 1]]
X_expected = [[1, 2, 1], [0, 1, 1]]
_check_transform_selected(X, X_expected, [0])
_check_transform_selected(X, X_expected, [True, False, False])
X_expected = [[1, 1, 1], [0, 1, 1]]
_check_transform_selected(X, X_expected, [0, 1, 2])
_check_transform_selected(X, X_expected, [True, True, True])
_check_transform_selected(X, X_expected, "all")
_check_transform_selected(X, X, [])
_check_transform_selected(X, X, [False, False, False])
def _run_one_hot(X, X2, cat):
enc = OneHotEncoder(categorical_features=cat)
Xtr = enc.fit_transform(X)
X2tr = enc.transform(X2)
return Xtr, X2tr
def _check_one_hot(X, X2, cat, n_features):
ind = np.where(cat)[0]
# With mask
A, B = _run_one_hot(X, X2, cat)
# With indices
C, D = _run_one_hot(X, X2, ind)
# Check shape
assert_equal(A.shape, (2, n_features))
assert_equal(B.shape, (1, n_features))
assert_equal(C.shape, (2, n_features))
assert_equal(D.shape, (1, n_features))
# Check that mask and indices give the same results
assert_array_equal(toarray(A), toarray(C))
assert_array_equal(toarray(B), toarray(D))
def test_one_hot_encoder_categorical_features():
X = np.array([[3, 2, 1], [0, 1, 1]])
X2 = np.array([[1, 1, 1]])
cat = [True, False, False]
_check_one_hot(X, X2, cat, 4)
# Edge case: all non-categorical
cat = [False, False, False]
_check_one_hot(X, X2, cat, 3)
# Edge case: all categorical
cat = [True, True, True]
_check_one_hot(X, X2, cat, 5)
def test_one_hot_encoder_unknown_transform():
X = np.array([[0, 2, 1], [1, 0, 3], [1, 0, 2]])
y = np.array([[4, 1, 1]])
# Test that one hot encoder raises error for unknown features
# present during transform.
oh = OneHotEncoder(handle_unknown='error')
oh.fit(X)
assert_raises(ValueError, oh.transform, y)
# Test the ignore option, ignores unknown features.
oh = OneHotEncoder(handle_unknown='ignore')
oh.fit(X)
assert_array_equal(
oh.transform(y).toarray(),
np.array([[0., 0., 0., 0., 1., 0., 0.]])
)
# Raise error if handle_unknown is neither ignore or error.
oh = OneHotEncoder(handle_unknown='42')
oh.fit(X)
assert_raises(ValueError, oh.transform, y)
| bsd-3-clause |
santis19/tesina-fisica | Flexion/modelo-sintetico/4-flex-y-lito-0.8/lib/double_window_selection.py | 2 | 3825 | import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
class DoubleWindowSelection(object):
def __init__(self,x,y,ax1,ax2):
self.x = x
self.y = y
#~ self.z = z
self.dx = abs(self.x[0][0] - self.x[0][-1])/(np.shape(self.x)[1]-1)
self.dy = abs(self.y[0][0] - self.y[-1][0])/(np.shape(self.y)[0]-1)
assert self.dx == self.dy, "dx != dy"
self.rect1 = Rectangle((0,0), 1, 1,fc='None')
self.rect2 = Rectangle((0,0), 1, 1,fc='None')
self.x_center, self.y_center = None, None
self.half_width = (min(np.shape(self.x))/8)*self.dx
self.x1, self.x2 = None, None
self.y1, self.y2 = None, None
self.ax1 = ax1
self.ax2 = ax2
self.l1, = self.ax1.plot([self.x_center],[self.y_center],'o')
self.l2, = self.ax2.plot([self.x_center],[self.y_center],'o')
self.ax1.add_patch(self.rect1)
self.ax2.add_patch(self.rect2)
self.ax1.figure.canvas.mpl_connect('button_press_event', self.on_press)
self.ax1.figure.canvas.mpl_connect('scroll_event', self.on_scroll)
self.ax1.figure.canvas.mpl_connect('key_press_event', self.on_key)
print "\nWINDOWS INSTRUCTIONS:"
print "Click to select the window center"
print "Move the center with arrows or click again"
print "Resize the window with the mouse scroll or with '+' and '-'"
print "Press 'i' to show information about the window"
def on_press(self,event):
if event.inaxes == self.ax1 or event.inaxes == self.ax2:
if event.button == 1:
self.x_center, self.y_center = event.xdata, event.ydata
#~ self.x_center, self.y_center = nearest_point(self.x, self.y,
#~ self.x_center, self.y_center)
self.x_center -= self.x_center%self.dx
self.y_center -= self.y_center%self.dx
self.rectangle_construction()
else:
return
def on_scroll(self,event):
self.half_width += event.step * self.dx
self.rectangle_construction()
def on_key(self,event):
event_list = ["right","left","up","down"]
if event.key in event_list:
if event.key == "right":
self.x_center += self.dx
elif event.key == "left":
self.x_center -= self.dx
elif event.key == "up":
self.y_center += self.dx
elif event.key == "down":
self.y_center -= self.dx
self.rectangle_construction()
if event.key == "i":
print "(x,y)=",(self.x_center,self.y_center),"Width:",self.half_width*2
if event.key == "+" or event.key == "-":
if event.key == "+":
self.half_width += self.dx
elif event.key == "-":
self.half_width -= self.dx
self.rectangle_construction()
def rectangle_construction(self):
self.x1 = self.x_center - self.half_width
self.x2 = self.x_center + self.half_width
self.y1 = self.y_center - self.half_width
self.y2 = self.y_center + self.half_width
self.rect1.set_width(self.x2 - self.x1)
self.rect1.set_height(self.y2 - self.y1)
self.rect1.set_xy((self.x1, self.y1))
self.l1.set_xdata([self.x_center])
self.l1.set_ydata([self.y_center])
self.rect2.set_width(self.x2 - self.x1)
self.rect2.set_height(self.y2 - self.y1)
self.rect2.set_xy((self.x1, self.y1))
self.l2.set_xdata([self.x_center])
self.l2.set_ydata([self.y_center])
self.ax1.figure.canvas.draw()
| gpl-2.0 |
McIntyre-Lab/papers | fear_ase_2016/scripts/mcscript/wigglePlot.py | 3 | 7358 | #!/usr/bin/env python
# Built-in packages
import argparse
from argparse import RawDescriptionHelpFormatter
import logging
import sys
import re
# Add-on packages
import numpy as np
import matplotlib
matplotlib.use('Agg')
# McLab Packages
import mclib_Python as mclib
from mclib_Python import bam as mcbam
from mclib_Python import gff as mcgff
from mclib_Python import vcf2 as mcvcf
from mclib_Python import wiggle as mcwiggle
# TODO: Add ability to make overlapping wiggle when up to XX groups are given
def getOptions():
""" Function to pull in arguments """
description = """ This script constructs wiggle plots and gene models.
Wiggle plots are created from sorted BAM files. Gene models require a GFF
file at this time.
"""
parser = argparse.ArgumentParser(description=description, formatter_class=RawDescriptionHelpFormatter)
group1 = parser.add_argument_group('Data Types', 'The following types of data are capable of being handled.')
group1.add_argument("--bam", nargs='*', dest="bamList", action='store', required=True, help="Name with PATH to sorted BAM file. If multiple files are given they will be averaged. [Required]")
group1.add_argument("--gff", dest="gffName", action='store', required=False, help="Name with PATH to a GFF file. Note if a GFFutils database has not been created it will be, this could take around 30min [Optional]")
#group1.add_argument("--bed", dest="bedName", action='store', required=False, help="Name with PATH to a bed file [Optional].") # TODO: Add bed functionality
group1.add_argument("--vcf", dest="vcfName", action='store', required=False, help="Name with PATH to a vcf file zipped using bgzip. [Optional]")
#group1.add_argument("--fusions", dest="fusName", action='store', required=False, help="Name with PATH to a bed file contianing the genome coordinates to fusion name [Optional].") # TODO: Add fusion functionality
group2 = parser.add_argument_group('Region of Interest', 'Select which region you want to focus on, one of the following is Required:')
group2a = group2.add_mutually_exclusive_group(required=True)
group2a.add_argument("--gene", dest="geneName", action='store', help="Name of the gene you want to make wiggles of.")
group2a.add_argument("--region", dest="region", action='store', help="Name of the region of interset in the format 'chrom:start-end'")
group3 = parser.add_argument_group('Factors', 'Various options to tweak output')
group3.add_argument("--sample", dest="sample", action='store', required=False, help="Name of the sample you are working on. Note if you are using VCF file this name needs to match. [Optional]")
group3.add_argument("--fudge", dest="ff", action='store', type=int, default=0, required=False, help="Use a fudge factor, if 999 then use the built in fudge factor calculation. [Optional]")
group3.add_argument("--debug", dest="debug", action='store_true', required=False, help="Trun on debug output. [Optional]")
group4 = parser.add_argument_group('Output')
group4.add_argument("-o", dest="oname", action='store', required=True, help="Name of the output PNG. [Required]")
group4.add_argument("--log", dest="log", action='store', required=False, help="Name of the log file [Optional]; NOTE: if no file is provided logging information will be output to STDOUT")
args = parser.parse_args()
#args = parser.parse_args(['--gff', '/home/jfear/storage/useful_dmel_data/dmel-all-no-analysis-r5.51.gff', '--bam','/mnt/storage/cegs_aln/bam_fb551_genome_nodup/r101_V1.sorted.bam','/mnt/storage/cegs_aln/bam_fb551_genome_nodup/r101_V2.sorted.bam', '-g', 'InR', '-o', '/home/jfear/tmp/inr.png'])
return(args)
def main(args):
################################################################################
# GENE ANNOTATION
################################################################################
if args.gffName and args.geneName:
logger.info('Getting gene annotation from GFF file')
## Import GFF database
myGffDb = mcgff.FlyGff(args.gffName)
## Pull gene from database
myGene = mcgff.FlyGene(args.geneName, myGffDb)
chrom = myGene.chrom
start = myGene.start
end = myGene.end
## Create Gene Model
geneModel = mcwiggle.GeneModel(myGene)
elif args.region:
location = re.split(':|-',args.region)
chrom = location[0]
start = int(location[1])
end = int(location[2])
# TODO: would be nice to plot all gene models in a region
geneModel = None
else:
logger.error('You need to specify either a gene name along with a GFF or a region to plot')
raise ValueError
################################################################################
# GENE COVERAGE
################################################################################
logger.info('Creating pileups')
# Pull in bam file and make gene pileup
pileups = []
for bam in args.bamList:
currBam = mcbam.Bam(bam)
pileups.append(currBam.get_pileup(chrom, start, end))
# Average Pileups together
avgPileup = mcbam.avg_pileups(pileups, fudgeFactor=args.ff)
################################################################################
# Pull Variants if requested
################################################################################
if args.vcfName:
logger.info('Processing VCF file')
# Attach vcf file
myVcf = mcvcf.Vcf(args.vcfName)
# Pull the region of interest
region = myVcf.pull_vcf_region(chrom, start, end)
# Grab inidividuals that are homozygous for an alternate base
homz = myVcf.pull_homz(region=region, snp=True)
# Create list of positions that have a variants. If a certain sample is
# given only output variants for that sample.
variantPos = list()
for pos in homz:
if args.sample:
for line in homz[pos]:
if line == args.sample:
variantPos.append(pos)
else:
variantPos.append(pos)
else:
logger.debug('Setting VCF to None')
variantPos = None
################################################################################
# Make fusion models if requested
################################################################################
# TODO: add fusion model
fusionModel=None
################################################################################
# MAKE WIGGLES
################################################################################
mcwiggle.plot_wiggle(avgPileup, args.oname, chrom, start, end, geneModel=geneModel, fusionModel=fusionModel, variantPos=variantPos, title=args.sample)
if __name__ == '__main__':
# Turn on Logging if option -g was given
args = getOptions()
# Turn on logging
logger = logging.getLogger()
if args.debug:
mclib.logger.setLogger(logger, args.log, 'debug')
else:
mclib.logger.setLogger(logger, args.log)
# Output git commit version to log, if user has access
mclib.git.git_to_log(__file__)
# Run Main part of the script
main(args)
logger.info("Script complete.")
| lgpl-3.0 |
JFriel/honours_project | networkx/build/lib/networkx/convert.py | 9 | 13221 | """Functions to convert NetworkX graphs to and from other formats.
The preferred way of converting data to a NetworkX graph is through the
graph constuctor. The constructor calls the to_networkx_graph() function
which attempts to guess the input type and convert it automatically.
Examples
--------
Create a graph with a single edge from a dictionary of dictionaries
>>> d={0: {1: 1}} # dict-of-dicts single edge (0,1)
>>> G=nx.Graph(d)
See Also
--------
nx_agraph, nx_pydot
"""
# Copyright (C) 2006-2013 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
import warnings
import networkx as nx
__author__ = """\n""".join(['Aric Hagberg <[email protected]>',
'Pieter Swart ([email protected])',
'Dan Schult([email protected])'])
__all__ = ['to_networkx_graph',
'from_dict_of_dicts', 'to_dict_of_dicts',
'from_dict_of_lists', 'to_dict_of_lists',
'from_edgelist', 'to_edgelist']
def _prep_create_using(create_using):
"""Return a graph object ready to be populated.
If create_using is None return the default (just networkx.Graph())
If create_using.clear() works, assume it returns a graph object.
Otherwise raise an exception because create_using is not a networkx graph.
"""
if create_using is None:
return nx.Graph()
try:
create_using.clear()
except:
raise TypeError("Input graph is not a networkx graph type")
return create_using
def to_networkx_graph(data,create_using=None,multigraph_input=False):
"""Make a NetworkX graph from a known data structure.
The preferred way to call this is automatically
from the class constructor
>>> d={0: {1: {'weight':1}}} # dict-of-dicts single edge (0,1)
>>> G=nx.Graph(d)
instead of the equivalent
>>> G=nx.from_dict_of_dicts(d)
Parameters
----------
data : a object to be converted
Current known types are:
any NetworkX graph
dict-of-dicts
dist-of-lists
list of edges
numpy matrix
numpy ndarray
scipy sparse matrix
pygraphviz agraph
create_using : NetworkX graph
Use specified graph for result. Otherwise a new graph is created.
multigraph_input : bool (default False)
If True and data is a dict_of_dicts,
try to create a multigraph assuming dict_of_dict_of_lists.
If data and create_using are both multigraphs then create
a multigraph from a multigraph.
"""
# NX graph
if hasattr(data,"adj"):
try:
result= from_dict_of_dicts(data.adj,\
create_using=create_using,\
multigraph_input=data.is_multigraph())
if hasattr(data,'graph') and isinstance(data.graph,dict):
result.graph=data.graph.copy()
if hasattr(data,'node') and isinstance(data.node,dict):
result.node=dict( (n,dd.copy()) for n,dd in data.node.items() )
return result
except:
raise nx.NetworkXError("Input is not a correct NetworkX graph.")
# pygraphviz agraph
if hasattr(data,"is_strict"):
try:
return nx.nx_agraph.from_agraph(data,create_using=create_using)
except:
raise nx.NetworkXError("Input is not a correct pygraphviz graph.")
# dict of dicts/lists
if isinstance(data,dict):
try:
return from_dict_of_dicts(data,create_using=create_using,\
multigraph_input=multigraph_input)
except:
try:
return from_dict_of_lists(data,create_using=create_using)
except:
raise TypeError("Input is not known type.")
# list or generator of edges
if (isinstance(data,list)
or isinstance(data,tuple)
or hasattr(data,'next')
or hasattr(data, '__next__')):
try:
return from_edgelist(data,create_using=create_using)
except:
raise nx.NetworkXError("Input is not a valid edge list")
# Pandas DataFrame
try:
import pandas as pd
if isinstance(data, pd.DataFrame):
try:
return nx.from_pandas_dataframe(data, create_using=create_using)
except:
msg = "Input is not a correct Pandas DataFrame."
raise nx.NetworkXError(msg)
except ImportError:
msg = 'pandas not found, skipping conversion test.'
warnings.warn(msg, ImportWarning)
# numpy matrix or ndarray
try:
import numpy
if isinstance(data,numpy.matrix) or \
isinstance(data,numpy.ndarray):
try:
return nx.from_numpy_matrix(data,create_using=create_using)
except:
raise nx.NetworkXError(\
"Input is not a correct numpy matrix or array.")
except ImportError:
warnings.warn('numpy not found, skipping conversion test.',
ImportWarning)
# scipy sparse matrix - any format
try:
import scipy
if hasattr(data,"format"):
try:
return nx.from_scipy_sparse_matrix(data,create_using=create_using)
except:
raise nx.NetworkXError(\
"Input is not a correct scipy sparse matrix type.")
except ImportError:
warnings.warn('scipy not found, skipping conversion test.',
ImportWarning)
raise nx.NetworkXError(\
"Input is not a known data type for conversion.")
return
def convert_to_undirected(G):
"""Return a new undirected representation of the graph G."""
return G.to_undirected()
def convert_to_directed(G):
"""Return a new directed representation of the graph G."""
return G.to_directed()
def to_dict_of_lists(G,nodelist=None):
"""Return adjacency representation of graph as a dictionary of lists.
Parameters
----------
G : graph
A NetworkX graph
nodelist : list
Use only nodes specified in nodelist
Notes
-----
Completely ignores edge data for MultiGraph and MultiDiGraph.
"""
if nodelist is None:
nodelist=G
d = {}
for n in nodelist:
d[n]=[nbr for nbr in G.neighbors(n) if nbr in nodelist]
return d
def from_dict_of_lists(d,create_using=None):
"""Return a graph from a dictionary of lists.
Parameters
----------
d : dictionary of lists
A dictionary of lists adjacency representation.
create_using : NetworkX graph
Use specified graph for result. Otherwise a new graph is created.
Examples
--------
>>> dol= {0:[1]} # single edge (0,1)
>>> G=nx.from_dict_of_lists(dol)
or
>>> G=nx.Graph(dol) # use Graph constructor
"""
G=_prep_create_using(create_using)
G.add_nodes_from(d)
if G.is_multigraph() and not G.is_directed():
# a dict_of_lists can't show multiedges. BUT for undirected graphs,
# each edge shows up twice in the dict_of_lists.
# So we need to treat this case separately.
seen={}
for node,nbrlist in d.items():
for nbr in nbrlist:
if nbr not in seen:
G.add_edge(node,nbr)
seen[node]=1 # don't allow reverse edge to show up
else:
G.add_edges_from( ((node,nbr) for node,nbrlist in d.items()
for nbr in nbrlist) )
return G
def to_dict_of_dicts(G,nodelist=None,edge_data=None):
"""Return adjacency representation of graph as a dictionary of dictionaries.
Parameters
----------
G : graph
A NetworkX graph
nodelist : list
Use only nodes specified in nodelist
edge_data : list, optional
If provided, the value of the dictionary will be
set to edge_data for all edges. This is useful to make
an adjacency matrix type representation with 1 as the edge data.
If edgedata is None, the edgedata in G is used to fill the values.
If G is a multigraph, the edgedata is a dict for each pair (u,v).
"""
dod={}
if nodelist is None:
if edge_data is None:
for u,nbrdict in G.adjacency_iter():
dod[u]=nbrdict.copy()
else: # edge_data is not None
for u,nbrdict in G.adjacency_iter():
dod[u]=dod.fromkeys(nbrdict, edge_data)
else: # nodelist is not None
if edge_data is None:
for u in nodelist:
dod[u]={}
for v,data in ((v,data) for v,data in G[u].items() if v in nodelist):
dod[u][v]=data
else: # nodelist and edge_data are not None
for u in nodelist:
dod[u]={}
for v in ( v for v in G[u] if v in nodelist):
dod[u][v]=edge_data
return dod
def from_dict_of_dicts(d,create_using=None,multigraph_input=False):
"""Return a graph from a dictionary of dictionaries.
Parameters
----------
d : dictionary of dictionaries
A dictionary of dictionaries adjacency representation.
create_using : NetworkX graph
Use specified graph for result. Otherwise a new graph is created.
multigraph_input : bool (default False)
When True, the values of the inner dict are assumed
to be containers of edge data for multiple edges.
Otherwise this routine assumes the edge data are singletons.
Examples
--------
>>> dod= {0: {1:{'weight':1}}} # single edge (0,1)
>>> G=nx.from_dict_of_dicts(dod)
or
>>> G=nx.Graph(dod) # use Graph constructor
"""
G=_prep_create_using(create_using)
G.add_nodes_from(d)
# is dict a MultiGraph or MultiDiGraph?
if multigraph_input:
# make a copy of the list of edge data (but not the edge data)
if G.is_directed():
if G.is_multigraph():
G.add_edges_from( (u,v,key,data)
for u,nbrs in d.items()
for v,datadict in nbrs.items()
for key,data in datadict.items()
)
else:
G.add_edges_from( (u,v,data)
for u,nbrs in d.items()
for v,datadict in nbrs.items()
for key,data in datadict.items()
)
else: # Undirected
if G.is_multigraph():
seen=set() # don't add both directions of undirected graph
for u,nbrs in d.items():
for v,datadict in nbrs.items():
if (u,v) not in seen:
G.add_edges_from( (u,v,key,data)
for key,data in datadict.items()
)
seen.add((v,u))
else:
seen=set() # don't add both directions of undirected graph
for u,nbrs in d.items():
for v,datadict in nbrs.items():
if (u,v) not in seen:
G.add_edges_from( (u,v,data)
for key,data in datadict.items() )
seen.add((v,u))
else: # not a multigraph to multigraph transfer
if G.is_multigraph() and not G.is_directed():
# d can have both representations u-v, v-u in dict. Only add one.
# We don't need this check for digraphs since we add both directions,
# or for Graph() since it is done implicitly (parallel edges not allowed)
seen=set()
for u,nbrs in d.items():
for v,data in nbrs.items():
if (u,v) not in seen:
G.add_edge(u,v,attr_dict=data)
seen.add((v,u))
else:
G.add_edges_from( ( (u,v,data)
for u,nbrs in d.items()
for v,data in nbrs.items()) )
return G
def to_edgelist(G,nodelist=None):
"""Return a list of edges in the graph.
Parameters
----------
G : graph
A NetworkX graph
nodelist : list
Use only nodes specified in nodelist
"""
if nodelist is None:
return G.edges(data=True)
else:
return G.edges(nodelist,data=True)
def from_edgelist(edgelist,create_using=None):
"""Return a graph from a list of edges.
Parameters
----------
edgelist : list or iterator
Edge tuples
create_using : NetworkX graph
Use specified graph for result. Otherwise a new graph is created.
Examples
--------
>>> edgelist= [(0,1)] # single edge (0,1)
>>> G=nx.from_edgelist(edgelist)
or
>>> G=nx.Graph(edgelist) # use Graph constructor
"""
G=_prep_create_using(create_using)
G.add_edges_from(edgelist)
return G
| gpl-3.0 |
matt77hias/QuadratureExperiments | src/visstochastic.py | 1 | 2485 | import numpy as np
import matplotlib.pyplot as plt
import mc
from visdeterministic import VisRecord
vr_mc = VisRecord(Q=mc.mc, ns=range(1, 34), label='monte carlo', color='g', marker='o', ls='-')
vrs = [vr_mc]
vr1_mc = VisRecord(Q=mc.mc, ns=range(1, 10001), label='monte carlo', color='g', marker='o', ls='-')
vr1s = [vr1_mc]
###############################################################################
# STOCHASTIC
# ------------------------------
# ERRORS AND SIGNIFICANT DIGITS
###############################################################################
def vis_relative_error(f, I, mcs=vrs):
plt.figure()
for m in mcs:
fxs = np.zeros(len(m.ns))
Es = np.zeros(len(m.ns))
for j in range(len(m.ns)):
fxs[j] = m.ntfx(m.ns[j])
Es[j] = np.divide(abs(I - m.Q(f, m.ns[j])), abs(I))
plt.semilogy(fxs, Es, label=m.label, color=m.color, marker=m.marker, ls=m.ls)
plt.legend(loc=1)
plt.title('Relative error ' + f.func_name + '(x)')
plt.xlabel('#Function evaluations')
plt.ylabel('|I-In|/I')
plt.show()
def vis_absolute_error(f, I, mcs=vrs):
plt.figure()
for m in mcs:
fxs = np.zeros(len(m.ns))
Es = np.zeros(len(m.ns))
for j in range(len(m.ns)):
fxs[j] = m.ntfx(m.ns[j])
Es[j] = abs(I - m.Q(f, m.ns[j]))
plt.semilogy(fxs, Es, label=m.label, color=m.color, marker=m.marker, ls=m.ls)
plt.legend(loc=1)
plt.title('Absolute error: ' + f.func_name + '(x)')
plt.xlabel('#Function evaluations')
plt.ylabel('|I-In|')
plt.show()
def vis_sds(f, I, mcs=vrs):
plt.figure()
for m in mcs:
fxs = np.zeros(len(m.ns))
Es = np.zeros(len(m.ns))
for j in range(len(m.ns)):
fxs[j] = m.ntfx(m.ns[j])
Es[j] = np.log10(np.divide(abs(I - m.Q(f, m.ns[j])), abs(I)))
plt.plot(fxs, Es, label=m.label, color=m.color, marker=m.marker, ls=m.ls)
plt.legend(loc=1)
plt.title('Number of significant digits ' + f.func_name + '(x)')
plt.xlabel('#Function evaluations')
plt.ylabel('#SDs')
plt.show()
def nb_of_functionevaluations(f, I, s=-7, mcs=vr1s):
fxs = np.ones(len(mcs)) * -1
i = 0
for m in mcs:
for j in range(len(m.ns)):
if np.log10(np.divide(abs(I - m.Q(f, m.ns[j])), abs(I))) <= s:
fxs[i] = m.ntfx(m.ns[j])
break
i = i + 1
return fxs | gpl-3.0 |
yejingxin/PyKrige | pykrige/ok3d.py | 1 | 33404 | __doc__ = """Code by Benjamin S. Murphy
[email protected]
Dependencies:
numpy
scipy
matplotlib
Classes:
OrdinaryKriging3D: Support for 3D Ordinary Kriging.
References:
P.K. Kitanidis, Introduction to Geostatistcs: Applications in Hydrogeology,
(Cambridge University Press, 1997) 272 p.
Copyright (c) 2015 Benjamin S. Murphy
"""
import numpy as np
import scipy.linalg
from scipy.spatial.distance import cdist
import matplotlib.pyplot as plt
import variogram_models
import core
class OrdinaryKriging3D:
"""class OrdinaryKriging3D
Three-dimensional ordinary kriging
Dependencies:
numpy
scipy
matplotlib
Inputs:
X (array-like): X-coordinates of data points.
Y (array-like): Y-coordinates of data points.
Z (array-like): Z-coordinates of data points.
Val (array-like): Values at data points.
variogram_model (string, optional): Specified which variogram model to use;
may be one of the following: linear, power, gaussian, spherical,
exponential. Default is linear variogram model. To utilize as custom variogram
model, specify 'custom'; you must also provide variogram_parameters and
variogram_function.
variogram_parameters (list, optional): Parameters that define the
specified variogram model. If not provided, parameters will be automatically
calculated such that the root-mean-square error for the fit variogram
function is minimized.
linear - [slope, nugget]
power - [scale, exponent, nugget]
gaussian - [sill, range, nugget]
spherical - [sill, range, nugget]
exponential - [sill, range, nugget]
For a custom variogram model, the parameters are required, as custom variogram
models currently will not automatically be fit to the data. The code does not
check that the provided list contains the appropriate number of parameters for
the custom variogram model, so an incorrect parameter list in such a case will
probably trigger an esoteric exception someplace deep in the code.
variogram_function (callable, optional): A callable function that must be provided
if variogram_model is specified as 'custom'. The function must take only two
arguments: first, a list of parameters for the variogram model; second, the
distances at which to calculate the variogram model. The list provided in
variogram_parameters will be passed to the function as the first argument.
nlags (int, optional): Number of averaging bins for the semivariogram.
Default is 6.
weight (boolean, optional): Flag that specifies if semivariance at smaller lags
should be weighted more heavily when automatically calculating variogram model.
True indicates that weights will be applied. Default is False.
(Kitanidis suggests that the values at smaller lags are more important in
fitting a variogram model, so the option is provided to enable such weighting.)
anisotropy_scaling_y (float, optional): Scalar stretching value to take
into account anisotropy in the y direction. Default is 1 (effectively no stretching).
Scaling is applied in the y direction in the rotated data frame
(i.e., after adjusting for the anisotropy_angle_x/y/z, if anisotropy_angle_x/y/z
is/are not 0).
anisotropy_scaling_z (float, optional): Scalar stretching value to take
into account anisotropy in the z direction. Default is 1 (effectively no stretching).
Scaling is applied in the z direction in the rotated data frame
(i.e., after adjusting for the anisotropy_angle_x/y/z, if anisotropy_angle_x/y/z
is/are not 0).
anisotropy_angle_x (float, optional): CCW angle (in degrees) by which to
rotate coordinate system about the x axis in order to take into account anisotropy.
Default is 0 (no rotation). Note that the coordinate system is rotated. X rotation
is applied first, then y rotation, then z rotation. Scaling is applied after rotation.
anisotropy_angle_y (float, optional): CCW angle (in degrees) by which to
rotate coordinate system about the y axis in order to take into account anisotropy.
Default is 0 (no rotation). Note that the coordinate system is rotated. X rotation
is applied first, then y rotation, then z rotation. Scaling is applied after rotation.
anisotropy_angle_z (float, optional): CCW angle (in degrees) by which to
rotate coordinate system about the z axis in order to take into account anisotropy.
Default is 0 (no rotation). Note that the coordinate system is rotated. X rotation
is applied first, then y rotation, then z rotation. Scaling is applied after rotation.
verbose (Boolean, optional): Enables program text output to monitor
kriging process. Default is False (off).
enable_plotting (Boolean, optional): Enables plotting to display
variogram. Default is False (off).
Callable Methods:
display_variogram_model(): Displays semivariogram and variogram model.
update_variogram_model(variogram_model, variogram_parameters=None, nlags=6,
anisotropy_scaling=1.0, anisotropy_angle=0.0):
Changes the variogram model and variogram parameters for
the kriging system.
Inputs:
variogram_model (string): May be any of the variogram models
listed above. May also be 'custom', in which case variogram_parameters
and variogram_function must be specified.
variogram_parameters (list, optional): List of variogram model
parameters, as listed above. If not provided, a best fit model
will be calculated as described above.
variogram_function (callable, optional): A callable function that must be
provided if variogram_model is specified as 'custom'. See above for
more information.
nlags (int, optional): Number of averaging bins for the semivariogram.
Defualt is 6.
weight (boolean, optional): Flag that specifies if semivariance at smaller lags
should be weighted more heavily when automatically calculating variogram model.
True indicates that weights will be applied. Default is False.
anisotropy_scaling (float, optional): Scalar stretching value to
take into account anisotropy. Default is 1 (effectively no
stretching). Scaling is applied in the y-direction.
anisotropy_angle (float, optional): Angle (in degrees) by which to
rotate coordinate system in order to take into account
anisotropy. Default is 0 (no rotation).
switch_verbose(): Enables/disables program text output. No arguments.
switch_plotting(): Enables/disable variogram plot display. No arguments.
get_epsilon_residuals(): Returns the epsilon residuals of the
variogram fit. No arguments.
plot_epsilon_residuals(): Plots the epsilon residuals of the variogram
fit in the order in which they were calculated. No arguments.
get_statistics(): Returns the Q1, Q2, and cR statistics for the
variogram fit (in that order). No arguments.
print_statistics(): Prints out the Q1, Q2, and cR statistics for
the variogram fit. NOTE that ideally Q1 is close to zero,
Q2 is close to 1, and cR is as small as possible.
execute(style, xpoints, ypoints, mask=None): Calculates a kriged grid.
Inputs:
style (string): Specifies how to treat input kriging points.
Specifying 'grid' treats xpoints, ypoints, and zpoints as
arrays of x, y,z coordinates that define a rectangular grid.
Specifying 'points' treats xpoints, ypoints, and zpoints as arrays
that provide coordinates at which to solve the kriging system.
Specifying 'masked' treats xpoints, ypoints, zpoints as arrays of
x, y, z coordinates that define a rectangular grid and uses mask
to only evaluate specific points in the grid.
xpoints (array-like, dim N): If style is specific as 'grid' or 'masked',
x-coordinates of LxMxN grid. If style is specified as 'points',
x-coordinates of specific points at which to solve kriging system.
ypoints (array-like, dim M): If style is specified as 'grid' or 'masked',
y-coordinates of LxMxN grid. If style is specified as 'points',
y-coordinates of specific points at which to solve kriging system.
Note that in this case, xpoints, ypoints, and zpoints must have the
same dimensions (i.e., L = M = N).
zpoints (array-like, dim L): If style is specified as 'grid' or 'masked',
z-coordinates of LxMxN grid. If style is specified as 'points',
z-coordinates of specific points at which to solve kriging system.
Note that in this case, xpoints, ypoints, and zpoints must have the
same dimensions (i.e., L = M = N).
mask (boolean array, dim LxMxN, optional): Specifies the points in the rectangular
grid defined by xpoints, ypoints, and zpoints that are to be excluded in the
kriging calculations. Must be provided if style is specified as 'masked'.
False indicates that the point should not be masked; True indicates that
the point should be masked.
backend (string, optional): Specifies which approach to use in kriging.
Specifying 'vectorized' will solve the entire kriging problem at once in a
vectorized operation. This approach is faster but also can consume a
significant amount of memory for large grids and/or large datasets.
Specifying 'loop' will loop through each point at which the kriging system
is to be solved. This approach is slower but also less memory-intensive.
Default is 'vectorized'.
Outputs:
kvalues (numpy array, dim LxMxN or dim Nx1): Interpolated values of specified grid
or at the specified set of points. If style was specified as 'masked',
kvalues will be a numpy masked array.
sigmasq (numpy array, dim LxMxN or dim Nx1): Variance at specified grid points or
at the specified set of points. If style was specified as 'masked', sigmasq
will be a numpy masked array.
References:
P.K. Kitanidis, Introduction to Geostatistcs: Applications in Hydrogeology,
(Cambridge University Press, 1997) 272 p.
"""
eps = 1.e-10 # Cutoff for comparison to zero
variogram_dict = {'linear': variogram_models.linear_variogram_model,
'power': variogram_models.power_variogram_model,
'gaussian': variogram_models.gaussian_variogram_model,
'spherical': variogram_models.spherical_variogram_model,
'exponential': variogram_models.exponential_variogram_model}
def __init__(self, x, y, z, val, variogram_model='linear', variogram_parameters=None,
variogram_function=None, nlags=6, weight=False, anisotropy_scaling_y=1.0,
anisotropy_scaling_z=1.0, anisotropy_angle_x=0.0, anisotropy_angle_y=0.0,
anisotropy_angle_z=0.0, verbose=False, enable_plotting=False):
# Code assumes 1D input arrays. Ensures that any extraneous dimensions
# don't get in the way. Copies are created to avoid any problems with
# referencing the original passed arguments.
self.X_ORIG = np.atleast_1d(np.squeeze(np.array(x, copy=True)))
self.Y_ORIG = np.atleast_1d(np.squeeze(np.array(y, copy=True)))
self.Z_ORIG = np.atleast_1d(np.squeeze(np.array(z, copy=True)))
self.VALUES = np.atleast_1d(np.squeeze(np.array(val, copy=True)))
self.verbose = verbose
self.enable_plotting = enable_plotting
if self.enable_plotting and self.verbose:
print "Plotting Enabled\n"
self.XCENTER = (np.amax(self.X_ORIG) + np.amin(self.X_ORIG))/2.0
self.YCENTER = (np.amax(self.Y_ORIG) + np.amin(self.Y_ORIG))/2.0
self.ZCENTER = (np.amax(self.Z_ORIG) + np.amin(self.Z_ORIG))/2.0
self.anisotropy_scaling_y = anisotropy_scaling_y
self.anisotropy_scaling_z = anisotropy_scaling_z
self.anisotropy_angle_x = anisotropy_angle_x
self.anisotropy_angle_y = anisotropy_angle_y
self.anisotropy_angle_z = anisotropy_angle_z
if self.verbose:
print "Adjusting data for anisotropy..."
self.X_ADJUSTED, self.Y_ADJUSTED, self.Z_ADJUSTED = \
core.adjust_for_anisotropy_3d(np.copy(self.X_ORIG), np.copy(self.Y_ORIG), np.copy(self.Z_ORIG),
self.XCENTER, self.YCENTER, self.ZCENTER, self.anisotropy_scaling_y,
self.anisotropy_scaling_z, self.anisotropy_angle_x, self.anisotropy_angle_y,
self.anisotropy_angle_z)
self.variogram_model = variogram_model
if self.variogram_model not in self.variogram_dict.keys() and self.variogram_model != 'custom':
raise ValueError("Specified variogram model '%s' is not supported." % variogram_model)
elif self.variogram_model == 'custom':
if variogram_function is None or not callable(variogram_function):
raise ValueError("Must specify callable function for custom variogram model.")
else:
self.variogram_function = variogram_function
else:
self.variogram_function = self.variogram_dict[self.variogram_model]
if self.verbose:
print "Initializing variogram model..."
self.lags, self.semivariance, self.variogram_model_parameters = \
core.initialize_variogram_model_3d(self.X_ADJUSTED, self.Y_ADJUSTED, self.Z_ADJUSTED, self.VALUES,
self.variogram_model, variogram_parameters, self.variogram_function,
nlags, weight)
if self.verbose:
if self.variogram_model == 'linear':
print "Using '%s' Variogram Model" % 'linear'
print "Slope:", self.variogram_model_parameters[0]
print "Nugget:", self.variogram_model_parameters[1], '\n'
elif self.variogram_model == 'power':
print "Using '%s' Variogram Model" % 'power'
print "Scale:", self.variogram_model_parameters[0]
print "Exponent:", self.variogram_model_parameters[1]
print "Nugget:", self.variogram_model_parameters[2], '\n'
elif self.variogram_model == 'custom':
print "Using Custom Variogram Model"
else:
print "Using '%s' Variogram Model" % self.variogram_model
print "Sill:", self.variogram_model_parameters[0]
print "Range:", self.variogram_model_parameters[1]
print "Nugget:", self.variogram_model_parameters[2], '\n'
if self.enable_plotting:
self.display_variogram_model()
if self.verbose:
print "Calculating statistics on variogram model fit..."
self.delta, self.sigma, self.epsilon = core.find_statistics_3d(self.X_ADJUSTED, self.Y_ADJUSTED,
self.Z_ADJUSTED, self.VALUES,
self.variogram_function,
self.variogram_model_parameters)
self.Q1 = core.calcQ1(self.epsilon)
self.Q2 = core.calcQ2(self.epsilon)
self.cR = core.calc_cR(self.Q2, self.sigma)
if self.verbose:
print "Q1 =", self.Q1
print "Q2 =", self.Q2
print "cR =", self.cR, '\n'
def update_variogram_model(self, variogram_model, variogram_parameters=None, variogram_function=None,
nlags=6, weight=False, anisotropy_scaling_y=1.0, anisotropy_scaling_z=1.0,
anisotropy_angle_x=0.0, anisotropy_angle_y=0.0, anisotropy_angle_z=0.0):
"""Allows user to update variogram type and/or variogram model parameters."""
if anisotropy_scaling_y != self.anisotropy_scaling_y or anisotropy_scaling_z != self.anisotropy_scaling_z or \
anisotropy_angle_x != self.anisotropy_angle_x or anisotropy_angle_y != self.anisotropy_angle_y or \
anisotropy_angle_z != self.anisotropy_angle_z:
if self.verbose:
print "Adjusting data for anisotropy..."
self.anisotropy_scaling_y = anisotropy_scaling_y
self.anisotropy_scaling_z = anisotropy_scaling_z
self.anisotropy_angle_x = anisotropy_angle_x
self.anisotropy_angle_y = anisotropy_angle_y
self.anisotropy_angle_z = anisotropy_angle_z
self.X_ADJUSTED, self.Y_ADJUSTED, self.Z_ADJUSTED = \
core.adjust_for_anisotropy_3d(np.copy(self.X_ORIG), np.copy(self.Y_ORIG), np.copy(self.Z_ORIG),
self.XCENTER, self.YCENTER, self.ZCENTER, self.anisotropy_scaling_y,
self.anisotropy_scaling_z, self.anisotropy_angle_x,
self.anisotropy_angle_y, self.anisotropy_angle_z)
self.variogram_model = variogram_model
if self.variogram_model not in self.variogram_dict.keys() and self.variogram_model != 'custom':
raise ValueError("Specified variogram model '%s' is not supported." % variogram_model)
elif self.variogram_model == 'custom':
if variogram_function is None or not callable(variogram_function):
raise ValueError("Must specify callable function for custom variogram model.")
else:
self.variogram_function = variogram_function
else:
self.variogram_function = self.variogram_dict[self.variogram_model]
if self.verbose:
print "Updating variogram mode..."
self.lags, self.semivariance, self.variogram_model_parameters = \
core.initialize_variogram_model_3d(self.X_ADJUSTED, self.Y_ADJUSTED, self.Z_ADJUSTED, self.VALUES,
self.variogram_model, variogram_parameters, self.variogram_function,
nlags, weight)
if self.verbose:
if self.variogram_model == 'linear':
print "Using '%s' Variogram Model" % 'linear'
print "Slope:", self.variogram_model_parameters[0]
print "Nugget:", self.variogram_model_parameters[1], '\n'
elif self.variogram_model == 'power':
print "Using '%s' Variogram Model" % 'power'
print "Scale:", self.variogram_model_parameters[0]
print "Exponent:", self.variogram_model_parameters[1]
print "Nugget:", self.variogram_model_parameters[2], '\n'
elif self.variogram_model == 'custom':
print "Using Custom Variogram Model"
else:
print "Using '%s' Variogram Model" % self.variogram_model
print "Sill:", self.variogram_model_parameters[0]
print "Range:", self.variogram_model_parameters[1]
print "Nugget:", self.variogram_model_parameters[2], '\n'
if self.enable_plotting:
self.display_variogram_model()
if self.verbose:
print "Calculating statistics on variogram model fit..."
self.delta, self.sigma, self.epsilon = core.find_statistics_3d(self.X_ADJUSTED, self.Y_ADJUSTED,
self.Z_ADJUSTED, self.VALUES,
self.variogram_function,
self.variogram_model_parameters)
self.Q1 = core.calcQ1(self.epsilon)
self.Q2 = core.calcQ2(self.epsilon)
self.cR = core.calc_cR(self.Q2, self.sigma)
if self.verbose:
print "Q1 =", self.Q1
print "Q2 =", self.Q2
print "cR =", self.cR, '\n'
def display_variogram_model(self):
"""Displays variogram model with the actual binned data"""
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(self.lags, self.semivariance, 'r*')
ax.plot(self.lags,
self.variogram_function(self.variogram_model_parameters, self.lags), 'k-')
plt.show()
def switch_verbose(self):
"""Allows user to switch code talk-back on/off. Takes no arguments."""
self.verbose = not self.verbose
def switch_plotting(self):
"""Allows user to switch plot display on/off. Takes no arguments."""
self.enable_plotting = not self.enable_plotting
def get_epsilon_residuals(self):
"""Returns the epsilon residuals for the variogram fit."""
return self.epsilon
def plot_epsilon_residuals(self):
"""Plots the epsilon residuals for the variogram fit."""
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(range(self.epsilon.size), self.epsilon, c='k', marker='*')
ax.axhline(y=0.0)
plt.show()
def get_statistics(self):
return self.Q1, self.Q2, self.cR
def print_statistics(self):
print "Q1 =", self.Q1
print "Q2 =", self.Q2
print "cR =", self.cR
def _get_kriging_matrix(self, n):
"""Assembles the kriging matrix."""
xyz = np.concatenate((self.X_ADJUSTED[:, np.newaxis], self.Y_ADJUSTED[:, np.newaxis],
self.Z_ADJUSTED[:, np.newaxis]), axis=1)
d = cdist(xyz, xyz, 'euclidean')
a = np.zeros((n+1, n+1))
a[:n, :n] = - self.variogram_function(self.variogram_model_parameters, d)
np.fill_diagonal(a, 0.)
a[n, :] = 1.0
a[:, n] = 1.0
a[n, n] = 0.0
return a
def _exec_vector(self, a, bd, mask):
"""Solves the kriging system as a vectorized operation. This method
can take a lot of memory for large grids and/or large datasets."""
npt = bd.shape[0]
n = self.X_ADJUSTED.shape[0]
zero_index = None
zero_value = False
a_inv = scipy.linalg.inv(a)
if np.any(np.absolute(bd) <= self.eps):
zero_value = True
zero_index = np.where(np.absolute(bd) <= self.eps)
b = np.zeros((npt, n+1, 1))
b[:, :n, 0] = - self.variogram_function(self.variogram_model_parameters, bd)
if zero_value:
b[zero_index[0], zero_index[1], 0] = 0.0
b[:, n, 0] = 1.0
if (~mask).any():
mask_b = np.repeat(mask[:, np.newaxis, np.newaxis], n+1, axis=1)
b = np.ma.array(b, mask=mask_b)
x = np.dot(a_inv, b.reshape((npt, n+1)).T).reshape((1, n+1, npt)).T
kvalues = np.sum(x[:, :n, 0] * self.VALUES, axis=1)
sigmasq = np.sum(x[:, :, 0] * -b[:, :, 0], axis=1)
return kvalues, sigmasq
def _exec_loop(self, a, bd_all, mask):
"""Solves the kriging system by looping over all specified points.
Less memory-intensive, but involves a Python-level loop."""
npt = bd_all.shape[0]
n = self.X_ADJUSTED.shape[0]
kvalues = np.zeros(npt)
sigmasq = np.zeros(npt)
a_inv = scipy.linalg.inv(a)
for j in np.nonzero(~mask)[0]: # Note that this is the same thing as range(npt) if mask is not defined,
bd = bd_all[j] # otherwise it takes the non-masked elements.
if np.any(np.absolute(bd) <= self.eps):
zero_value = True
zero_index = np.where(np.absolute(bd) <= self.eps)
else:
zero_value = False
zero_index = None
b = np.zeros((n+1, 1))
b[:n, 0] = - self.variogram_function(self.variogram_model_parameters, bd)
if zero_value:
b[zero_index[0], 0] = 0.0
b[n, 0] = 1.0
x = np.dot(a_inv, b)
kvalues[j] = np.sum(x[:n, 0] * self.VALUES)
sigmasq[j] = np.sum(x[:, 0] * -b[:, 0])
return kvalues, sigmasq
def execute(self, style, xpoints, ypoints, zpoints, mask=None, backend='vectorized'):
"""Calculates a kriged grid and the associated variance.
This is now the method that performs the main kriging calculation. Note that currently
measurements (i.e., z values) are considered 'exact'. This means that, when a specified
coordinate for interpolation is exactly the same as one of the data points, the variogram
evaluated at the point is forced to be zero. Also, the diagonal of the kriging matrix is
also always forced to be zero. In forcing the variogram evaluated at data points to be zero,
we are effectively saying that there is no variance at that point (no uncertainty,
so the value is 'exact').
In the future, the code may include an extra 'exact_values' boolean flag that can be
adjusted to specify whether to treat the measurements as 'exact'. Setting the flag
to false would indicate that the variogram should not be forced to be zero at zero distance
(i.e., when evaluated at data points). Instead, the uncertainty in the point will be
equal to the nugget. This would mean that the diagonal of the kriging matrix would be set to
the nugget instead of to zero.
Inputs:
style (string): Specifies how to treat input kriging points.
Specifying 'grid' treats xpoints, ypoints, and zpoints as arrays of
x, y, and z coordinates that define a rectangular grid.
Specifying 'points' treats xpoints, ypoints, and zpoints as arrays
that provide coordinates at which to solve the kriging system.
Specifying 'masked' treats xpoints, ypoints, and zpoints as arrays of
x, y, and z coordinates that define a rectangular grid and uses mask
to only evaluate specific points in the grid.
xpoints (array-like, dim N): If style is specific as 'grid' or 'masked',
x-coordinates of MxNxL grid. If style is specified as 'points',
x-coordinates of specific points at which to solve kriging system.
ypoints (array-like, dim M): If style is specified as 'grid' or 'masked',
y-coordinates of LxMxN grid. If style is specified as 'points',
y-coordinates of specific points at which to solve kriging system.
Note that in this case, xpoints, ypoints, and zpoints must have the
same dimensions (i.e., L = M = N).
zpoints (array-like, dim L): If style is specified as 'grid' or 'masked',
z-coordinates of LxMxN grid. If style is specified as 'points',
z-coordinates of specific points at which to solve kriging system.
Note that in this case, xpoints, ypoints, and zpoints must have the
same dimensions (i.e., L = M = N).
mask (boolean array, dim LxMxN, optional): Specifies the points in the rectangular
grid defined by xpoints, ypoints, zpoints that are to be excluded in the
kriging calculations. Must be provided if style is specified as 'masked'.
False indicates that the point should not be masked, so the kriging system
will be solved at the point.
True indicates that the point should be masked, so the kriging system should
will not be solved at the point.
backend (string, optional): Specifies which approach to use in kriging.
Specifying 'vectorized' will solve the entire kriging problem at once in a
vectorized operation. This approach is faster but also can consume a
significant amount of memory for large grids and/or large datasets.
Specifying 'loop' will loop through each point at which the kriging system
is to be solved. This approach is slower but also less memory-intensive.
Default is 'vectorized'.
Outputs:
kvalues (numpy array, dim LxMxN or dim Nx1): Interpolated values of specified grid
or at the specified set of points. If style was specified as 'masked',
kvalues will be a numpy masked array.
sigmasq (numpy array, dim LxMxN or dim Nx1): Variance at specified grid points or
at the specified set of points. If style was specified as 'masked', sigmasq
will be a numpy masked array.
"""
if self.verbose:
print "Executing Ordinary Kriging...\n"
if style != 'grid' and style != 'masked' and style != 'points':
raise ValueError("style argument must be 'grid', 'points', or 'masked'")
xpts = np.atleast_1d(np.squeeze(np.array(xpoints, copy=True)))
ypts = np.atleast_1d(np.squeeze(np.array(ypoints, copy=True)))
zpts = np.atleast_1d(np.squeeze(np.array(zpoints, copy=True)))
n = self.X_ADJUSTED.shape[0]
nx = xpts.size
ny = ypts.size
nz = zpts.size
a = self._get_kriging_matrix(n)
if style in ['grid', 'masked']:
if style == 'masked':
if mask is None:
raise IOError("Must specify boolean masking array when style is 'masked'.")
if mask.ndim != 3:
raise ValueError("Mask is not three-dimensional.")
if mask.shape[0] != nz or mask.shape[1] != ny or mask.shape[2] != nx:
if mask.shape[0] == nx and mask.shape[2] == nz and mask.shape[1] == ny:
mask = mask.swapaxes(0, 2)
else:
raise ValueError("Mask dimensions do not match specified grid dimensions.")
mask = mask.flatten()
npt = nz * ny * nx
grid_z, grid_y, grid_x = np.meshgrid(zpts, ypts, xpts, indexing='ij')
xpts = grid_x.flatten()
ypts = grid_y.flatten()
zpts = grid_z.flatten()
elif style == 'points':
if xpts.size != ypts.size and ypts.size != zpts.size:
raise ValueError("xpoints and ypoints must have same dimensions "
"when treated as listing discrete points.")
npt = nx
else:
raise ValueError("style argument must be 'grid', 'points', or 'masked'")
xpts, ypts, zpts = core.adjust_for_anisotropy_3d(xpts, ypts, zpts, self.XCENTER, self.YCENTER, self.ZCENTER,
self.anisotropy_scaling_y, self.anisotropy_scaling_z,
self.anisotropy_angle_x, self.anisotropy_angle_y,
self.anisotropy_angle_z)
if style != 'masked':
mask = np.zeros(npt, dtype='bool')
xyz_points = np.concatenate((zpts[:, np.newaxis], ypts[:, np.newaxis], xpts[:, np.newaxis]), axis=1)
xyz_data = np.concatenate((self.Z_ADJUSTED[:, np.newaxis], self.Y_ADJUSTED[:, np.newaxis],
self.X_ADJUSTED[:, np.newaxis]), axis=1)
bd = cdist(xyz_points, xyz_data, 'euclidean')
if backend == 'vectorized':
kvalues, sigmasq = self._exec_vector(a, bd, mask)
elif backend == 'loop':
kvalues, sigmasq = self._exec_loop(a, bd, mask)
else:
raise ValueError('Specified backend {} is not supported for 3D ordinary kriging.'.format(backend))
if style == 'masked':
kvalues = np.ma.array(kvalues, mask=mask)
sigmasq = np.ma.array(sigmasq, mask=mask)
if style in ['masked', 'grid']:
kvalues = kvalues.reshape((nz, ny, nx))
sigmasq = sigmasq.reshape((nz, ny, nx))
return kvalues, sigmasq | bsd-3-clause |
alvarofierroclavero/scikit-learn | sklearn/utils/setup.py | 296 | 2884 | import os
from os.path import join
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
import numpy
from numpy.distutils.misc_util import Configuration
config = Configuration('utils', parent_package, top_path)
config.add_subpackage('sparsetools')
cblas_libs, blas_info = get_blas_info()
cblas_compile_args = blas_info.pop('extra_compile_args', [])
cblas_includes = [join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])]
libraries = []
if os.name == 'posix':
libraries.append('m')
cblas_libs.append('m')
config.add_extension('sparsefuncs_fast', sources=['sparsefuncs_fast.c'],
libraries=libraries)
config.add_extension('arrayfuncs',
sources=['arrayfuncs.c'],
depends=[join('src', 'cholesky_delete.h')],
libraries=cblas_libs,
include_dirs=cblas_includes,
extra_compile_args=cblas_compile_args,
**blas_info
)
config.add_extension(
'murmurhash',
sources=['murmurhash.c', join('src', 'MurmurHash3.cpp')],
include_dirs=['src'])
config.add_extension('lgamma',
sources=['lgamma.c', join('src', 'gamma.c')],
include_dirs=['src'],
libraries=libraries)
config.add_extension('graph_shortest_path',
sources=['graph_shortest_path.c'],
include_dirs=[numpy.get_include()])
config.add_extension('fast_dict',
sources=['fast_dict.cpp'],
language="c++",
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension('seq_dataset',
sources=['seq_dataset.c'],
include_dirs=[numpy.get_include()])
config.add_extension('weight_vector',
sources=['weight_vector.c'],
include_dirs=cblas_includes,
libraries=cblas_libs,
**blas_info)
config.add_extension("_random",
sources=["_random.c"],
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension("_logistic_sigmoid",
sources=["_logistic_sigmoid.c"],
include_dirs=[numpy.get_include()],
libraries=libraries)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
fabioticconi/scikit-learn | sklearn/feature_extraction/tests/test_dict_vectorizer.py | 110 | 3768 | # Authors: Lars Buitinck
# Dan Blanchard <[email protected]>
# License: BSD 3 clause
from random import Random
import numpy as np
import scipy.sparse as sp
from numpy.testing import assert_array_equal
from sklearn.utils.testing import (assert_equal, assert_in,
assert_false, assert_true)
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_selection import SelectKBest, chi2
def test_dictvectorizer():
D = [{"foo": 1, "bar": 3},
{"bar": 4, "baz": 2},
{"bar": 1, "quux": 1, "quuux": 2}]
for sparse in (True, False):
for dtype in (int, np.float32, np.int16):
for sort in (True, False):
for iterable in (True, False):
v = DictVectorizer(sparse=sparse, dtype=dtype, sort=sort)
X = v.fit_transform(iter(D) if iterable else D)
assert_equal(sp.issparse(X), sparse)
assert_equal(X.shape, (3, 5))
assert_equal(X.sum(), 14)
assert_equal(v.inverse_transform(X), D)
if sparse:
# CSR matrices can't be compared for equality
assert_array_equal(X.A, v.transform(iter(D) if iterable
else D).A)
else:
assert_array_equal(X, v.transform(iter(D) if iterable
else D))
if sort:
assert_equal(v.feature_names_,
sorted(v.feature_names_))
def test_feature_selection():
# make two feature dicts with two useful features and a bunch of useless
# ones, in terms of chi2
d1 = dict([("useless%d" % i, 10) for i in range(20)],
useful1=1, useful2=20)
d2 = dict([("useless%d" % i, 10) for i in range(20)],
useful1=20, useful2=1)
for indices in (True, False):
v = DictVectorizer().fit([d1, d2])
X = v.transform([d1, d2])
sel = SelectKBest(chi2, k=2).fit(X, [0, 1])
v.restrict(sel.get_support(indices=indices), indices=indices)
assert_equal(v.get_feature_names(), ["useful1", "useful2"])
def test_one_of_k():
D_in = [{"version": "1", "ham": 2},
{"version": "2", "spam": .3},
{"version=3": True, "spam": -1}]
v = DictVectorizer()
X = v.fit_transform(D_in)
assert_equal(X.shape, (3, 5))
D_out = v.inverse_transform(X)
assert_equal(D_out[0], {"version=1": 1, "ham": 2})
names = v.get_feature_names()
assert_true("version=2" in names)
assert_false("version" in names)
def test_unseen_or_no_features():
D = [{"camelot": 0, "spamalot": 1}]
for sparse in [True, False]:
v = DictVectorizer(sparse=sparse).fit(D)
X = v.transform({"push the pram a lot": 2})
if sparse:
X = X.toarray()
assert_array_equal(X, np.zeros((1, 2)))
X = v.transform({})
if sparse:
X = X.toarray()
assert_array_equal(X, np.zeros((1, 2)))
try:
v.transform([])
except ValueError as e:
assert_in("empty", str(e))
def test_deterministic_vocabulary():
# Generate equal dictionaries with different memory layouts
items = [("%03d" % i, i) for i in range(1000)]
rng = Random(42)
d_sorted = dict(items)
rng.shuffle(items)
d_shuffled = dict(items)
# check that the memory layout does not impact the resulting vocabulary
v_1 = DictVectorizer().fit([d_sorted])
v_2 = DictVectorizer().fit([d_shuffled])
assert_equal(v_1.vocabulary_, v_2.vocabulary_)
| bsd-3-clause |
mbayon/TFG-MachineLearning | vbig/lib/python2.7/site-packages/numpy/doc/creation.py | 52 | 5507 | """
==============
Array Creation
==============
Introduction
============
There are 5 general mechanisms for creating arrays:
1) Conversion from other Python structures (e.g., lists, tuples)
2) Intrinsic numpy array array creation objects (e.g., arange, ones, zeros,
etc.)
3) Reading arrays from disk, either from standard or custom formats
4) Creating arrays from raw bytes through the use of strings or buffers
5) Use of special library functions (e.g., random)
This section will not cover means of replicating, joining, or otherwise
expanding or mutating existing arrays. Nor will it cover creating object
arrays or structured arrays. Both of those are covered in their own sections.
Converting Python array_like Objects to NumPy Arrays
====================================================
In general, numerical data arranged in an array-like structure in Python can
be converted to arrays through the use of the array() function. The most
obvious examples are lists and tuples. See the documentation for array() for
details for its use. Some objects may support the array-protocol and allow
conversion to arrays this way. A simple way to find out if the object can be
converted to a numpy array using array() is simply to try it interactively and
see if it works! (The Python Way).
Examples: ::
>>> x = np.array([2,3,1,0])
>>> x = np.array([2, 3, 1, 0])
>>> x = np.array([[1,2.0],[0,0],(1+1j,3.)]) # note mix of tuple and lists,
and types
>>> x = np.array([[ 1.+0.j, 2.+0.j], [ 0.+0.j, 0.+0.j], [ 1.+1.j, 3.+0.j]])
Intrinsic NumPy Array Creation
==============================
NumPy has built-in functions for creating arrays from scratch:
zeros(shape) will create an array filled with 0 values with the specified
shape. The default dtype is float64.
``>>> np.zeros((2, 3))
array([[ 0., 0., 0.], [ 0., 0., 0.]])``
ones(shape) will create an array filled with 1 values. It is identical to
zeros in all other respects.
arange() will create arrays with regularly incrementing values. Check the
docstring for complete information on the various ways it can be used. A few
examples will be given here: ::
>>> np.arange(10)
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> np.arange(2, 10, dtype=np.float)
array([ 2., 3., 4., 5., 6., 7., 8., 9.])
>>> np.arange(2, 3, 0.1)
array([ 2. , 2.1, 2.2, 2.3, 2.4, 2.5, 2.6, 2.7, 2.8, 2.9])
Note that there are some subtleties regarding the last usage that the user
should be aware of that are described in the arange docstring.
linspace() will create arrays with a specified number of elements, and
spaced equally between the specified beginning and end values. For
example: ::
>>> np.linspace(1., 4., 6)
array([ 1. , 1.6, 2.2, 2.8, 3.4, 4. ])
The advantage of this creation function is that one can guarantee the
number of elements and the starting and end point, which arange()
generally will not do for arbitrary start, stop, and step values.
indices() will create a set of arrays (stacked as a one-higher dimensioned
array), one per dimension with each representing variation in that dimension.
An example illustrates much better than a verbal description: ::
>>> np.indices((3,3))
array([[[0, 0, 0], [1, 1, 1], [2, 2, 2]], [[0, 1, 2], [0, 1, 2], [0, 1, 2]]])
This is particularly useful for evaluating functions of multiple dimensions on
a regular grid.
Reading Arrays From Disk
========================
This is presumably the most common case of large array creation. The details,
of course, depend greatly on the format of data on disk and so this section
can only give general pointers on how to handle various formats.
Standard Binary Formats
-----------------------
Various fields have standard formats for array data. The following lists the
ones with known python libraries to read them and return numpy arrays (there
may be others for which it is possible to read and convert to numpy arrays so
check the last section as well)
::
HDF5: PyTables
FITS: PyFITS
Examples of formats that cannot be read directly but for which it is not hard to
convert are those formats supported by libraries like PIL (able to read and
write many image formats such as jpg, png, etc).
Common ASCII Formats
------------------------
Comma Separated Value files (CSV) are widely used (and an export and import
option for programs like Excel). There are a number of ways of reading these
files in Python. There are CSV functions in Python and functions in pylab
(part of matplotlib).
More generic ascii files can be read using the io package in scipy.
Custom Binary Formats
---------------------
There are a variety of approaches one can use. If the file has a relatively
simple format then one can write a simple I/O library and use the numpy
fromfile() function and .tofile() method to read and write numpy arrays
directly (mind your byteorder though!) If a good C or C++ library exists that
read the data, one can wrap that library with a variety of techniques though
that certainly is much more work and requires significantly more advanced
knowledge to interface with C or C++.
Use of Special Libraries
------------------------
There are libraries that can be used to generate arrays for special purposes
and it isn't possible to enumerate all of them. The most common uses are use
of the many array generation functions in random that can generate arrays of
random values, and some utility functions to generate special matrices (e.g.
diagonal).
"""
from __future__ import division, absolute_import, print_function
| mit |
dgketchum/MT_Rsense | obspnts/obsio/providers/ushcn.py | 1 | 7794 | from .. import LOCAL_DATA_PATH
from .generic import ObsIO
from StringIO import StringIO
import glob
import itertools
import numpy as np
import os
import pandas as pd
import subprocess
import tarfile
_RPATH_USHCN = 'ftp://ftp.ncdc.noaa.gov/pub/data/ushcn/v2.5/*'
_ELEMS_TO_USHCN_DATASET = {'tmin_mth_raw': 'raw', 'tmin_mth_tob': 'tob',
'tmin_mth_fls': 'FLs.52j', 'tmax_mth_raw': 'raw',
'tmax_mth_tob': 'tob', 'tmax_mth_fls': 'FLs.52j',
'tavg_mth_raw': 'raw', 'tavg_mth_tob': 'tob',
'tavg_mth_fls': 'FLs.52j', 'prcp_mth_raw': 'raw',
'prcp_mth_tob': 'tob', 'prcp_mth_fls': 'FLs.52j'}
_ELEMS_TO_USHCN_VNAME = {'tmin_mth_raw': 'tmin', 'tmin_mth_tob': 'tmin',
'tmin_mth_fls': 'tmin', 'tmax_mth_raw': 'tmax',
'tmax_mth_tob': 'tmax', 'tmax_mth_fls': 'tmax',
'tavg_mth_raw': 'tavg', 'tavg_mth_tob': 'tavg',
'tavg_mth_fls': 'tavg', 'prcp_mth_raw': 'prcp',
'prcp_mth_tob': 'prcp', 'prcp_mth_fls': 'prcp'}
_to_c = lambda x: x / 100.0
_to_mm = lambda x: x / 10.0
_ELEMS_CONVERT_FUNCT = {'tmin_mth_raw': _to_c, 'tmin_mth_tob': _to_c,
'tmin_mth_fls': _to_c, 'tmax_mth_raw': _to_c,
'tmax_mth_tob': _to_c, 'tmax_mth_fls': _to_c,
'tavg_mth_raw': _to_c, 'tavg_mth_tob': _to_c,
'tavg_mth_fls': _to_c, 'prcp_mth_raw': _to_mm,
'prcp_mth_tob': _to_mm, 'prcp_mth_fls': _to_mm}
class UshcnObsIO(ObsIO):
_avail_elems = ['tmin_mth_raw', 'tmin_mth_tob', 'tmin_mth_fls',
'tmax_mth_raw', 'tmax_mth_tob', 'tmax_mth_fls',
'tavg_mth_raw', 'tavg_mth_tob', 'tavg_mth_fls',
'prcp_mth_raw', 'prcp_mth_tob', 'prcp_mth_fls']
_requires_local = True
name = "USHCN"
def __init__(self, local_data_path=None, download_updates=True, **kwargs):
super(UshcnObsIO, self).__init__(**kwargs)
self.local_data_path = (local_data_path if local_data_path
else LOCAL_DATA_PATH)
self.path_ushcn_data = os.path.join(self.local_data_path, 'USHCN')
if not os.path.isdir(self.path_ushcn_data):
os.mkdir(self.path_ushcn_data)
self.download_updates = download_updates
self._download_run = False
self._a_obs_prefix_dirs = None
self._a_obs_tarfiles = None
self._a_df_tobs = None
@property
def _obs_tarfiles(self):
if self._a_obs_tarfiles is None:
self._a_obs_tarfiles = {}
for elem in self.elems:
fpath = os.path.join(self.path_ushcn_data,
'ushcn.%s.latest.%s.tar' %
(_ELEMS_TO_USHCN_VNAME[elem],
_ELEMS_TO_USHCN_DATASET[elem]))
tfile = tarfile.open(fpath)
self._a_obs_tarfiles[elem] = tfile
return self._a_obs_tarfiles
def _read_stns(self):
if self.download_updates and not self._download_run:
self.download_local()
stns = pd.read_fwf(os.path.join(self.path_ushcn_data,
'ushcn-v2.5-stations.txt'),
colspecs=[(0, 11), (12, 20), (21, 30), (31, 37),
(38, 40), (41, 71)], header=None,
names=['station_id', 'latitude', 'longitude',
'elevation', 'state', 'station_name'])
stns['station_name'] = stns.station_name.apply(unicode,
errors='ignore')
stns['provider'] = 'USHCN'
stns['sub_provider'] = ''
if self.bbox is not None:
mask_bnds = ((stns.latitude >= self.bbox.south) &
(stns.latitude <= self.bbox.north) &
(stns.longitude >= self.bbox.west) &
(stns.longitude <= self.bbox.east))
stns = stns[mask_bnds].copy()
stns = stns.set_index('station_id', drop=False)
return stns
@property
def _obs_prefix_dirs(self):
if self._a_obs_prefix_dirs is None:
self._a_obs_prefix_dirs = {elem: self._obs_tarfiles[elem].
getnames()[0].split('/')[1] for elem in
self.elems}
return self._a_obs_prefix_dirs
def download_local(self):
local_path = self.path_ushcn_data
print "Syncing USHCN data to local..."
subprocess.call(['wget', '-N', '--directory-prefix=' + local_path,
_RPATH_USHCN])
print "Unzipping files..."
fnames_tars = glob.glob(os.path.join(local_path, '*.gz'))
for fname in fnames_tars:
subprocess.call(['gunzip', '-f',
os.path.join(local_path, fname)])
self._download_run = True
def _parse_stn_obs(self, stn_id, elem):
fname = os.path.join('.', self._obs_prefix_dirs[elem],
'%s.%s.%s' % (stn_id, _ELEMS_TO_USHCN_DATASET[elem],
_ELEMS_TO_USHCN_VNAME[elem]))
obs_file = self._obs_tarfiles[elem].extractfile(fname)
obs = pd.read_fwf(StringIO(obs_file.read()),
colspecs=[(12, 16), (17, 17 + 5), (26, 26 + 5),
(35, 35 + 5), (44, 44 + 5), (53, 53 + 5),
(62, 62 + 5), (71, 71 + 5), (80, 80 + 5),
(89, 89 + 5), (98, 98 + 5), (107, 107 + 5),
(116, 116 + 5)], header=None, index_col=0,
names=['year'] + ['%.2d' % mth for
mth in np.arange(1, 13)],
na_values='-9999')
obs_file.close()
obs = obs.unstack().swaplevel(0, 1).sortlevel(0, sort_remaining=True)
obs = obs.reset_index()
obs['time'] = pd.to_datetime(obs.year.astype(np.str) + obs.level_1,
format='%Y%m')
obs.drop(['year', 'level_1'], axis=1, inplace=True)
obs.rename(columns={0: 'obs_value'}, inplace=True)
obs.dropna(axis=0, subset=['obs_value'], inplace=True)
if self.has_start_end_dates:
mask_time = ((obs.time >= self.start_date) &
(obs.time <= self.end_date))
obs.drop(obs[~mask_time].index, axis=0, inplace=True)
obs['obs_value'] = _ELEMS_CONVERT_FUNCT[elem](obs['obs_value'])
obs['station_id'] = stn_id
obs['elem'] = elem
return obs
def _read_obs(self, stns_ids=None):
# Saw extreme decreased performance due to garbage collection when
# pandas ran checks for a chained assignment. Turn off this check
# temporarily.
opt_val = pd.get_option('mode.chained_assignment')
pd.set_option('mode.chained_assignment', None)
try:
if stns_ids is None:
stns_obs = self.stns
else:
stns_obs = self.stns.loc[stns_ids]
obs = [self._parse_stn_obs(a_id, elem) for elem, a_id in
itertools.product(self.elems, stns_obs.station_id)]
obs = pd.concat(obs, ignore_index=True)
finally:
pd.set_option('mode.chained_assignment', opt_val)
obs = obs.set_index(['station_id', 'elem', 'time'])
obs = obs.sortlevel(0, sort_remaining=True)
return obs
| apache-2.0 |
xiaoxq/apollo | modules/tools/mapshow/libs/plot_smoothness.py | 3 | 2247 | #!/usr/bin/env python3
###############################################################################
# Copyright 2017 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import matplotlib.animation as animation
import matplotlib.pyplot as plt
from cyber.python.cyber_py3 import cyber
from modules.planning.proto import planning_pb2
from modules.tools.mapshow.libs.planning import Planning
from modules.tools.mapshow.libs.subplot_traj_acc import TrajAccSubplot
from modules.tools.mapshow.libs.subplot_traj_path import TrajPathSubplot
from modules.tools.mapshow.libs.subplot_traj_speed import TrajSpeedSubplot
planning = Planning()
def update(frame_number):
traj_speed_subplot.show(planning)
traj_acc_subplot.show(planning)
traj_path_subplot.show(planning)
def planning_callback(planning_pb):
planning.update_planning_pb(planning_pb)
planning.compute_traj_data()
def add_listener():
planning_sub = cyber.Node("st_plot")
planning_sub.create_reader('/apollo/planning', planning_pb2.ADCTrajectory,
planning_callback)
def press_key():
pass
if __name__ == '__main__':
cyber.init()
add_listener()
fig = plt.figure(figsize=(14, 6))
fig.canvas.mpl_connect('key_press_event', press_key)
ax = plt.subplot2grid((2, 2), (0, 0))
traj_speed_subplot = TrajSpeedSubplot(ax)
ax2 = plt.subplot2grid((2, 2), (0, 1))
traj_acc_subplot = TrajAccSubplot(ax2)
ax3 = plt.subplot2grid((2, 2), (1, 0))
traj_path_subplot = TrajPathSubplot(ax3)
ani = animation.FuncAnimation(fig, update, interval=100)
plt.show()
cyber.shutdown()
| apache-2.0 |
JT5D/scikit-learn | examples/ensemble/plot_forest_iris.py | 7 | 6244 | """
====================================================================
Plot the decision surfaces of ensembles of trees on the iris dataset
====================================================================
Plot the decision surfaces of forests of randomized trees trained on pairs of
features of the iris dataset.
This plot compares the decision surfaces learned by a decision tree classifier
(first column), by a random forest classifier (second column), by an extra-
trees classifier (third column) and by an AdaBoost classifier (fourth column).
In the first row, the classifiers are built using the sepal width and the sepal
length features only, on the second row using the petal length and sepal length
only, and on the third row using the petal width and the petal length only.
In descending order of quality, when trained (outside of this example) on all
4 features using 30 estimators and scored using 10 fold cross validation, we see::
ExtraTreesClassifier() # 0.95 score
RandomForestClassifier() # 0.94 score
AdaBoost(DecisionTree(max_depth=3)) # 0.94 score
DecisionTree(max_depth=None) # 0.94 score
Increasing `max_depth` for AdaBoost lowers the standard deviation of the scores (but
the average score does not improve).
See the console's output for further details about each model.
In this example you might try to:
1) vary the ``max_depth`` for the ``DecisionTreeClassifier`` and
``AdaBoostClassifier``, perhaps try ``max_depth=3`` for the
``DecisionTreeClassifier`` or ``max_depth=None`` for ``AdaBoostClassifier``
2) vary ``n_estimators``
It is worth noting that RandomForests and ExtraTrees can be fitted in parallel
on many cores as each tree is built independently of the others. AdaBoost's
samples are built sequentially and so do not use multiple cores.
"""
print(__doc__)
import numpy as np
import pylab as pl
from sklearn import clone
from sklearn.datasets import load_iris
from sklearn.ensemble import (RandomForestClassifier, ExtraTreesClassifier,
AdaBoostClassifier)
from sklearn.externals.six.moves import xrange
from sklearn.tree import DecisionTreeClassifier
# Parameters
n_classes = 3
n_estimators = 30
plot_colors = "ryb"
cmap = pl.cm.RdYlBu
plot_step = 0.02 # fine step width for decision surface contours
plot_step_coarser = 0.5 # step widths for coarse classifier guesses
RANDOM_SEED = 13 # fix the seed on each iteration
# Load data
iris = load_iris()
plot_idx = 1
models = [DecisionTreeClassifier(max_depth=None),
RandomForestClassifier(n_estimators=n_estimators),
ExtraTreesClassifier(n_estimators=n_estimators),
AdaBoostClassifier(DecisionTreeClassifier(max_depth=3),
n_estimators=n_estimators)]
for pair in ([0, 1], [0, 2], [2, 3]):
for model in models:
# We only take the two corresponding features
X = iris.data[:, pair]
y = iris.target
# Shuffle
idx = np.arange(X.shape[0])
np.random.seed(RANDOM_SEED)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# Standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
# Train
clf = clone(model)
clf = model.fit(X, y)
scores = clf.score(X, y)
# Create a title for each column and the console by using str() and
# slicing away useless parts of the string
model_title = str(type(model)).split(".")[-1][:-2][:-len("Classifier")]
model_details = model_title
if hasattr(model, "estimators_"):
model_details += " with {} estimators".format(len(model.estimators_))
print model_details + " with features", pair, "has a score of", scores
pl.subplot(3, 4, plot_idx)
if plot_idx <= len(models):
# Add a title at the top of each column
pl.title(model_title)
# Now plot the decision boundary using a fine mesh as input to a
# filled contour plot
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
# Plot either a single DecisionTreeClassifier or alpha blend the
# decision surfaces of the ensemble of classifiers
if isinstance(model, DecisionTreeClassifier):
Z = model.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = pl.contourf(xx, yy, Z, cmap=cmap)
else:
# Choose alpha blend level with respect to the number of estimators
# that are in use (noting that AdaBoost can use fewer estimators
# than its maximum if it achieves a good enough fit early on)
estimator_alpha = 1.0 / len(model.estimators_)
for tree in model.estimators_:
Z = tree.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = pl.contourf(xx, yy, Z, alpha=estimator_alpha, cmap=cmap)
# Build a coarser grid to plot a set of ensemble classifications
# to show how these are different to what we see in the decision
# surfaces. These points are regularly space and do not have a black outline
xx_coarser, yy_coarser = np.meshgrid(np.arange(x_min, x_max, plot_step_coarser),
np.arange(y_min, y_max, plot_step_coarser))
Z_points_coarser = model.predict(np.c_[xx_coarser.ravel(), yy_coarser.ravel()]).reshape(xx_coarser.shape)
cs_points = pl.scatter(xx_coarser, yy_coarser, s=15, c=Z_points_coarser, cmap=cmap, edgecolors="none")
# Plot the training points, these are clustered together and have a
# black outline
for i, c in zip(xrange(n_classes), plot_colors):
idx = np.where(y == i)
pl.scatter(X[idx, 0], X[idx, 1], c=c, label=iris.target_names[i],
cmap=cmap)
plot_idx += 1 # move on to the next plot in sequence
pl.suptitle("Classifiers on feature subsets of the Iris dataset")
pl.axis("tight")
pl.show()
| bsd-3-clause |
wkfwkf/statsmodels | examples/python/discrete_choice_example.py | 30 | 5786 |
## Discrete Choice Models
### Fair's Affair data
# A survey of women only was conducted in 1974 by *Redbook* asking about extramarital affairs.
from __future__ import print_function
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
import statsmodels.api as sm
from statsmodels.formula.api import logit, probit, poisson, ols
print(sm.datasets.fair.SOURCE)
print(sm.datasets.fair.NOTE)
dta = sm.datasets.fair.load_pandas().data
dta['affair'] = (dta['affairs'] > 0).astype(float)
print(dta.head(10))
print(dta.describe())
affair_mod = logit("affair ~ occupation + educ + occupation_husb"
"+ rate_marriage + age + yrs_married + children"
" + religious", dta).fit()
print(affair_mod.summary())
# How well are we predicting?
affair_mod.pred_table()
# The coefficients of the discrete choice model do not tell us much. What we're after is marginal effects.
mfx = affair_mod.get_margeff()
print(mfx.summary())
respondent1000 = dta.ix[1000]
print(respondent1000)
resp = dict(zip(range(1,9), respondent1000[["occupation", "educ",
"occupation_husb", "rate_marriage",
"age", "yrs_married", "children",
"religious"]].tolist()))
resp.update({0 : 1})
print(resp)
mfx = affair_mod.get_margeff(atexog=resp)
print(mfx.summary())
affair_mod.predict(respondent1000)
affair_mod.fittedvalues[1000]
affair_mod.model.cdf(affair_mod.fittedvalues[1000])
# The "correct" model here is likely the Tobit model. We have an work in progress branch "tobit-model" on github, if anyone is interested in censored regression models.
#### Exercise: Logit vs Probit
fig = plt.figure(figsize=(12,8))
ax = fig.add_subplot(111)
support = np.linspace(-6, 6, 1000)
ax.plot(support, stats.logistic.cdf(support), 'r-', label='Logistic')
ax.plot(support, stats.norm.cdf(support), label='Probit')
ax.legend();
fig = plt.figure(figsize=(12,8))
ax = fig.add_subplot(111)
support = np.linspace(-6, 6, 1000)
ax.plot(support, stats.logistic.pdf(support), 'r-', label='Logistic')
ax.plot(support, stats.norm.pdf(support), label='Probit')
ax.legend();
# Compare the estimates of the Logit Fair model above to a Probit model. Does the prediction table look better? Much difference in marginal effects?
#### Genarlized Linear Model Example
print(sm.datasets.star98.SOURCE)
print(sm.datasets.star98.DESCRLONG)
print(sm.datasets.star98.NOTE)
dta = sm.datasets.star98.load_pandas().data
print(dta.columns)
print(dta[['NABOVE', 'NBELOW', 'LOWINC', 'PERASIAN', 'PERBLACK', 'PERHISP', 'PERMINTE']].head(10))
print(dta[['AVYRSEXP', 'AVSALK', 'PERSPENK', 'PTRATIO', 'PCTAF', 'PCTCHRT', 'PCTYRRND']].head(10))
formula = 'NABOVE + NBELOW ~ LOWINC + PERASIAN + PERBLACK + PERHISP + PCTCHRT '
formula += '+ PCTYRRND + PERMINTE*AVYRSEXP*AVSALK + PERSPENK*PTRATIO*PCTAF'
##### Aside: Binomial distribution
# Toss a six-sided die 5 times, what's the probability of exactly 2 fours?
stats.binom(5, 1./6).pmf(2)
from scipy.misc import comb
comb(5,2) * (1/6.)**2 * (5/6.)**3
from statsmodels.formula.api import glm
glm_mod = glm(formula, dta, family=sm.families.Binomial()).fit()
print(glm_mod.summary())
# The number of trials
glm_mod.model.data.orig_endog.sum(1)
glm_mod.fittedvalues * glm_mod.model.data.orig_endog.sum(1)
# First differences: We hold all explanatory variables constant at their means and manipulate the percentage of low income households to assess its impact
# on the response variables:
exog = glm_mod.model.data.orig_exog # get the dataframe
means25 = exog.mean()
print(means25)
means25['LOWINC'] = exog['LOWINC'].quantile(.25)
print(means25)
means75 = exog.mean()
means75['LOWINC'] = exog['LOWINC'].quantile(.75)
print(means75)
resp25 = glm_mod.predict(means25)
resp75 = glm_mod.predict(means75)
diff = resp75 - resp25
# The interquartile first difference for the percentage of low income households in a school district is:
print("%2.4f%%" % (diff[0]*100))
nobs = glm_mod.nobs
y = glm_mod.model.endog
yhat = glm_mod.mu
from statsmodels.graphics.api import abline_plot
fig = plt.figure(figsize=(12,8))
ax = fig.add_subplot(111, ylabel='Observed Values', xlabel='Fitted Values')
ax.scatter(yhat, y)
y_vs_yhat = sm.OLS(y, sm.add_constant(yhat, prepend=True)).fit()
fig = abline_plot(model_results=y_vs_yhat, ax=ax)
##### Plot fitted values vs Pearson residuals
# Pearson residuals are defined to be
#
# $$\frac{(y - \mu)}{\sqrt{(var(\mu))}}$$
#
# where var is typically determined by the family. E.g., binomial variance is $np(1 - p)$
fig = plt.figure(figsize=(12,8))
ax = fig.add_subplot(111, title='Residual Dependence Plot', xlabel='Fitted Values',
ylabel='Pearson Residuals')
ax.scatter(yhat, stats.zscore(glm_mod.resid_pearson))
ax.axis('tight')
ax.plot([0.0, 1.0],[0.0, 0.0], 'k-');
##### Histogram of standardized deviance residuals with Kernel Density Estimate overlayed
# The definition of the deviance residuals depends on the family. For the Binomial distribution this is
#
# $$r_{dev} = sign\(Y-\mu\)*\sqrt{2n(Y\log\frac{Y}{\mu}+(1-Y)\log\frac{(1-Y)}{(1-\mu)}}$$
#
# They can be used to detect ill-fitting covariates
resid = glm_mod.resid_deviance
resid_std = stats.zscore(resid)
kde_resid = sm.nonparametric.KDEUnivariate(resid_std)
kde_resid.fit()
fig = plt.figure(figsize=(12,8))
ax = fig.add_subplot(111, title="Standardized Deviance Residuals")
ax.hist(resid_std, bins=25, normed=True);
ax.plot(kde_resid.support, kde_resid.density, 'r');
##### QQ-plot of deviance residuals
fig = plt.figure(figsize=(12,8))
ax = fig.add_subplot(111)
fig = sm.graphics.qqplot(resid, line='r', ax=ax)
| bsd-3-clause |
bolverk/white_dwarf_nova | draw_snapshots.py | 2 | 4775 | def plot_single(in_file, zfunc, zname, out_file):
import pylab
import numpy
import h5py
from matplotlib.collections import PolyCollection
import matplotlib as mpl
import matplotlib.pyplot as plt
import os
print(out_file)
if os.path.isfile(out_file):
return
with h5py.File(in_file,'r+') as f:
vert_idx_list = numpy.concatenate(([0],
numpy.cumsum(f['Number of vertices in cell'])))
verts = []
x_verts = numpy.array(f['x position of vertices'])
y_verts = numpy.array(f['y position of vertices'])
ghost_list = numpy.array(f['ghost'])
for i in range(len(f['density'])):
if ghost_list[i]<0.5:
lowbound = int(vert_idx_list[i])
upbound = int(vert_idx_list[i+1])
verts.append([[x,y] for x,y
in zip(x_verts[lowbound:upbound],
y_verts[lowbound:upbound])])
coll = PolyCollection(verts,
array=zfunc(f),
cmap = mpl.cm.jet,
edgecolors = 'none')
fig, ax = plt.subplots()
fig.suptitle(zname+' @ t = '+str(numpy.array(f['time'])[0]))
ax.add_collection(coll)
ax.autoscale_view()
ax.set_aspect('equal')
fig.colorbar(coll,ax=ax)
print(out_file)
if out_file==None:
plt.show()
else:
plt.savefig(out_file)
plt.clf()
plt.cla()
plt.close()
def plot_all(zfunc, zname):
import glob
import numpy
import joblib
flist = glob.glob('snapshot_*.h5')
joblib.Parallel(n_jobs=8)(joblib.delayed(plot_single)
(fname,
zfunc,
zname,
fname.replace('snapshot',zname).replace('.h5','.png')) for fname in flist)
#[plot_single(fname,zfunc,zname,
# fname.replace('snapshot',zname).replace('.h5','.png'))
# for fname in flist]
def log10_density_cgs(f):
import numpy
return numpy.log10(numpy.array(f['density']))[numpy.array(f['ghost'])<0.5]
def log10_temperature(f):
import numpy
return numpy.log10(f['temperature'])[numpy.array(f['ghost'])<0.5]
def x_velocity(f):
import numpy
return numpy.array(f['x_velocity'])[numpy.array(f['ghost'])<0.5]
def y_velocity(f):
import numpy
return numpy.array(f['y_velocity'])[numpy.array(f['ghost'])<0.5]
def He4_prof(f):
import numpy
return numpy.array(f['He4'])[numpy.array(f['ghost'])<0.5]
def C12_prof(f):
import numpy
return numpy.array(f['C12'])[numpy.array(f['ghost'])<0.5]
def O16_prof(f):
import numpy
return numpy.array(f['O16'])[numpy.array(f['ghost'])<0.5]
def Ne20_prof(f):
import numpy
return numpy.array(f['Ne20'])[numpy.array(f['ghost'])<0.5]
def Mg24_prof(f):
import numpy
return numpy.array(f['Mg24'])[numpy.array(f['ghost'])<0.5]
def Si28_prof(f):
import numpy
return numpy.array(f['Si28'])[numpy.array(f['ghost'])<0.5]
def S32_prof(f):
import numpy
return numpy.array(f['S32'])[numpy.array(f['ghost'])<0.5]
def Ar36_prof(f):
import numpy
return numpy.array(f['Ar36'])[numpy.array(f['ghost'])<0.5]
def Ca40_prof(f):
import numpy
return numpy.array(f['Ca40'])[numpy.array(f['ghost'])<0.5]
def Ti44_prof(f):
import numpy
return numpy.array(f['Ti44'])[numpy.array(f['ghost'])<0.5]
def Cr48_prof(f):
import numpy
return numpy.array(f['Cr48'])[numpy.array(f['ghost'])<0.5]
def Fe52_prof(f):
import numpy
return numpy.array(f['Fe52'])[numpy.array(f['ghost'])<0.5]
def Ni56_prof(f):
import numpy
return numpy.array(f['Ni56'])[numpy.array(f['ghost'])<0.5]
def main():
import matplotlib
matplotlib.use('Agg')
import numpy
#plot_single('snapshot_0.h5',
# log10_temperature,
# 'log10_temperature',
# 'log10_temperature_0.png')
plot_all(log10_density_cgs, 'log10_density')
plot_all(log10_temperature, 'log10_temperature')
plot_all(x_velocity, 'x_velocity')
plot_all(y_velocity, 'y_velocity')
#plot_all(He4_prof, 'He4')
#plot_all(C12_prof, 'C12')
#plot_all(O16_prof, 'O16')
#plot_all(S32_prof, 'S32')
#plot_all(Ne20_prof, 'Ne20')
#plot_all(Mg24_prof, 'Mg24')
#plot_all(Si28_prof, 'Si28')
#plot_all(Ar36_prof, 'Ar36')
#plot_all(Ca40_prof, 'Ca40')
#plot_all(Ti44_prof, 'Ti44')
#plot_all(Cr48_prof, 'Cr48')
#plot_all(Fe52_prof, 'Fe52')
#plot_all(Ni56_prof, 'Ni56')
if __name__ == '__main__':
main()
| mit |
zodiac/incubator-airflow | airflow/hooks/hive_hooks.py | 22 | 27917 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
from builtins import zip
from past.builtins import basestring
import collections
import unicodecsv as csv
import itertools
import logging
import re
import subprocess
import time
from tempfile import NamedTemporaryFile
import hive_metastore
from airflow.exceptions import AirflowException
from airflow.hooks.base_hook import BaseHook
from airflow.utils.helpers import as_flattened_list
from airflow.utils.file import TemporaryDirectory
from airflow import configuration
import airflow.security.utils as utils
HIVE_QUEUE_PRIORITIES = ['VERY_HIGH', 'HIGH', 'NORMAL', 'LOW', 'VERY_LOW']
class HiveCliHook(BaseHook):
"""Simple wrapper around the hive CLI.
It also supports the ``beeline``
a lighter CLI that runs JDBC and is replacing the heavier
traditional CLI. To enable ``beeline``, set the use_beeline param in the
extra field of your connection as in ``{ "use_beeline": true }``
Note that you can also set default hive CLI parameters using the
``hive_cli_params`` to be used in your connection as in
``{"hive_cli_params": "-hiveconf mapred.job.tracker=some.jobtracker:444"}``
Parameters passed here can be overridden by run_cli's hive_conf param
The extra connection parameter ``auth`` gets passed as in the ``jdbc``
connection string as is.
:param mapred_queue: queue used by the Hadoop Scheduler (Capacity or Fair)
:type mapred_queue: string
:param mapred_queue_priority: priority within the job queue.
Possible settings include: VERY_HIGH, HIGH, NORMAL, LOW, VERY_LOW
:type mapred_queue_priority: string
:param mapred_job_name: This name will appear in the jobtracker.
This can make monitoring easier.
:type mapred_job_name: string
"""
def __init__(
self,
hive_cli_conn_id="hive_cli_default",
run_as=None,
mapred_queue=None,
mapred_queue_priority=None,
mapred_job_name=None):
conn = self.get_connection(hive_cli_conn_id)
self.hive_cli_params = conn.extra_dejson.get('hive_cli_params', '')
self.use_beeline = conn.extra_dejson.get('use_beeline', False)
self.auth = conn.extra_dejson.get('auth', 'noSasl')
self.conn = conn
self.run_as = run_as
if mapred_queue_priority:
mapred_queue_priority = mapred_queue_priority.upper()
if mapred_queue_priority not in HIVE_QUEUE_PRIORITIES:
raise AirflowException(
"Invalid Mapred Queue Priority. Valid values are: "
"{}".format(', '.join(HIVE_QUEUE_PRIORITIES)))
self.mapred_queue = mapred_queue
self.mapred_queue_priority = mapred_queue_priority
self.mapred_job_name = mapred_job_name
def _prepare_cli_cmd(self):
"""
This function creates the command list from available information
"""
conn = self.conn
hive_bin = 'hive'
cmd_extra = []
if self.use_beeline:
hive_bin = 'beeline'
jdbc_url = "jdbc:hive2://{conn.host}:{conn.port}/{conn.schema}"
if configuration.get('core', 'security') == 'kerberos':
template = conn.extra_dejson.get(
'principal', "hive/[email protected]")
if "_HOST" in template:
template = utils.replace_hostname_pattern(
utils.get_components(template))
proxy_user = "" # noqa
if conn.extra_dejson.get('proxy_user') == "login" and conn.login:
proxy_user = "hive.server2.proxy.user={0}".format(conn.login)
elif conn.extra_dejson.get('proxy_user') == "owner" and self.run_as:
proxy_user = "hive.server2.proxy.user={0}".format(self.run_as)
jdbc_url += ";principal={template};{proxy_user}"
elif self.auth:
jdbc_url += ";auth=" + self.auth
jdbc_url = jdbc_url.format(**locals())
cmd_extra += ['-u', jdbc_url]
if conn.login:
cmd_extra += ['-n', conn.login]
if conn.password:
cmd_extra += ['-p', conn.password]
hive_params_list = self.hive_cli_params.split()
return [hive_bin] + cmd_extra + hive_params_list
def _prepare_hiveconf(self, d):
"""
This function prepares a list of hiveconf params
from a dictionary of key value pairs.
:param d:
:type d: dict
>>> hh = HiveCliHook()
>>> hive_conf = {"hive.exec.dynamic.partition": "true",
... "hive.exec.dynamic.partition.mode": "nonstrict"}
>>> hh._prepare_hiveconf(hive_conf)
["-hiveconf", "hive.exec.dynamic.partition=true",\
"-hiveconf", "hive.exec.dynamic.partition.mode=nonstrict"]
"""
if not d:
return []
return as_flattened_list(
itertools.izip(
["-hiveconf"] * len(d),
["{}={}".format(k, v) for k, v in d.items()]
)
)
def run_cli(self, hql, schema=None, verbose=True, hive_conf=None):
"""
Run an hql statement using the hive cli. If hive_conf is specified
it should be a dict and the entries will be set as key/value pairs
in HiveConf
:param hive_conf: if specified these key value pairs will be passed
to hive as ``-hiveconf "key"="value"``. Note that they will be
passed after the ``hive_cli_params`` and thus will override
whatever values are specified in the database.
:type hive_conf: dict
>>> hh = HiveCliHook()
>>> result = hh.run_cli("USE airflow;")
>>> ("OK" in result)
True
"""
conn = self.conn
schema = schema or conn.schema
if schema:
hql = "USE {schema};\n{hql}".format(**locals())
with TemporaryDirectory(prefix='airflow_hiveop_') as tmp_dir:
with NamedTemporaryFile(dir=tmp_dir) as f:
f.write(hql.encode('UTF-8'))
f.flush()
hive_cmd = self._prepare_cli_cmd()
hive_conf_params = self._prepare_hiveconf(hive_conf)
if self.mapred_queue:
hive_conf_params.extend(
['-hiveconf',
'mapreduce.job.queuename={}'
.format(self.mapred_queue)])
if self.mapred_queue_priority:
hive_conf_params.extend(
['-hiveconf',
'mapreduce.job.priority={}'
.format(self.mapred_queue_priority)])
if self.mapred_job_name:
hive_conf_params.extend(
['-hiveconf',
'mapred.job.name={}'
.format(self.mapred_job_name)])
hive_cmd.extend(hive_conf_params)
hive_cmd.extend(['-f', f.name])
if verbose:
logging.info(" ".join(hive_cmd))
sp = subprocess.Popen(
hive_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=tmp_dir)
self.sp = sp
stdout = ''
while True:
line = sp.stdout.readline()
if not line:
break
stdout += line.decode('UTF-8')
if verbose:
logging.info(line.decode('UTF-8').strip())
sp.wait()
if sp.returncode:
raise AirflowException(stdout)
return stdout
def test_hql(self, hql):
"""
Test an hql statement using the hive cli and EXPLAIN
"""
create, insert, other = [], [], []
for query in hql.split(';'): # naive
query_original = query
query = query.lower().strip()
if query.startswith('create table'):
create.append(query_original)
elif query.startswith(('set ',
'add jar ',
'create temporary function')):
other.append(query_original)
elif query.startswith('insert'):
insert.append(query_original)
other = ';'.join(other)
for query_set in [create, insert]:
for query in query_set:
query_preview = ' '.join(query.split())[:50]
logging.info("Testing HQL [{0} (...)]".format(query_preview))
if query_set == insert:
query = other + '; explain ' + query
else:
query = 'explain ' + query
try:
self.run_cli(query, verbose=False)
except AirflowException as e:
message = e.args[0].split('\n')[-2]
logging.info(message)
error_loc = re.search('(\d+):(\d+)', message)
if error_loc and error_loc.group(1).isdigit():
l = int(error_loc.group(1))
begin = max(l-2, 0)
end = min(l+3, len(query.split('\n')))
context = '\n'.join(query.split('\n')[begin:end])
logging.info("Context :\n {0}".format(context))
else:
logging.info("SUCCESS")
def load_df(
self,
df,
table,
create=True,
recreate=False,
field_dict=None,
delimiter=',',
encoding='utf8',
pandas_kwargs=None, **kwargs):
"""
Loads a pandas DataFrame into hive.
Hive data types will be inferred if not passed but column names will
not be sanitized.
:param table: target Hive table, use dot notation to target a
specific database
:type table: str
:param create: whether to create the table if it doesn't exist
:type create: bool
:param recreate: whether to drop and recreate the table at every
execution
:type recreate: bool
:param field_dict: mapping from column name to hive data type
:type field_dict: dict
:param encoding: string encoding to use when writing DataFrame to file
:type encoding: str
:param pandas_kwargs: passed to DataFrame.to_csv
:type pandas_kwargs: dict
:param kwargs: passed to self.load_file
"""
def _infer_field_types_from_df(df):
DTYPE_KIND_HIVE_TYPE = {
'b': 'BOOLEAN', # boolean
'i': 'BIGINT', # signed integer
'u': 'BIGINT', # unsigned integer
'f': 'DOUBLE', # floating-point
'c': 'STRING', # complex floating-point
'O': 'STRING', # object
'S': 'STRING', # (byte-)string
'U': 'STRING', # Unicode
'V': 'STRING' # void
}
return dict((col, DTYPE_KIND_HIVE_TYPE[dtype.kind]) for col, dtype in df.dtypes.iteritems())
if pandas_kwargs is None:
pandas_kwargs = {}
with TemporaryDirectory(prefix='airflow_hiveop_') as tmp_dir:
with NamedTemporaryFile(dir=tmp_dir) as f:
if field_dict is None and (create or recreate):
field_dict = _infer_field_types_from_df(df)
df.to_csv(f, sep=delimiter, **pandas_kwargs)
return self.load_file(filepath=f.name,
table=table,
delimiter=delimiter,
field_dict=field_dict,
**kwargs)
def load_file(
self,
filepath,
table,
delimiter=",",
field_dict=None,
create=True,
overwrite=True,
partition=None,
recreate=False):
"""
Loads a local file into Hive
Note that the table generated in Hive uses ``STORED AS textfile``
which isn't the most efficient serialization format. If a
large amount of data is loaded and/or if the tables gets
queried considerably, you may want to use this operator only to
stage the data into a temporary table before loading it into its
final destination using a ``HiveOperator``.
:param table: target Hive table, use dot notation to target a
specific database
:type table: str
:param create: whether to create the table if it doesn't exist
:type create: bool
:param recreate: whether to drop and recreate the table at every
execution
:type recreate: bool
:param partition: target partition as a dict of partition columns
and values
:type partition: dict
:param delimiter: field delimiter in the file
:type delimiter: str
"""
hql = ''
if recreate:
hql += "DROP TABLE IF EXISTS {table};\n"
if create or recreate:
if field_dict is None:
raise ValueError("Must provide a field dict when creating a table")
fields = ",\n ".join(
[k + ' ' + v for k, v in field_dict.items()])
hql += "CREATE TABLE IF NOT EXISTS {table} (\n{fields})\n"
if partition:
pfields = ",\n ".join(
[p + " STRING" for p in partition])
hql += "PARTITIONED BY ({pfields})\n"
hql += "ROW FORMAT DELIMITED\n"
hql += "FIELDS TERMINATED BY '{delimiter}'\n"
hql += "STORED AS textfile;"
hql = hql.format(**locals())
logging.info(hql)
self.run_cli(hql)
hql = "LOAD DATA LOCAL INPATH '{filepath}' "
if overwrite:
hql += "OVERWRITE "
hql += "INTO TABLE {table} "
if partition:
pvals = ", ".join(
["{0}='{1}'".format(k, v) for k, v in partition.items()])
hql += "PARTITION ({pvals});"
hql = hql.format(**locals())
logging.info(hql)
self.run_cli(hql)
def kill(self):
if hasattr(self, 'sp'):
if self.sp.poll() is None:
print("Killing the Hive job")
self.sp.terminate()
time.sleep(60)
self.sp.kill()
class HiveMetastoreHook(BaseHook):
""" Wrapper to interact with the Hive Metastore"""
def __init__(self, metastore_conn_id='metastore_default'):
self.metastore_conn = self.get_connection(metastore_conn_id)
self.metastore = self.get_metastore_client()
def __getstate__(self):
# This is for pickling to work despite the thirft hive client not
# being pickable
d = dict(self.__dict__)
del d['metastore']
return d
def __setstate__(self, d):
self.__dict__.update(d)
self.__dict__['metastore'] = self.get_metastore_client()
def get_metastore_client(self):
"""
Returns a Hive thrift client.
"""
from thrift.transport import TSocket, TTransport
from thrift.protocol import TBinaryProtocol
from hive_service import ThriftHive
ms = self.metastore_conn
auth_mechanism = ms.extra_dejson.get('authMechanism', 'NOSASL')
if configuration.get('core', 'security') == 'kerberos':
auth_mechanism = ms.extra_dejson.get('authMechanism', 'GSSAPI')
kerberos_service_name = ms.extra_dejson.get('kerberos_service_name', 'hive')
socket = TSocket.TSocket(ms.host, ms.port)
if configuration.get('core', 'security') == 'kerberos' and auth_mechanism == 'GSSAPI':
try:
import saslwrapper as sasl
except ImportError:
import sasl
def sasl_factory():
sasl_client = sasl.Client()
sasl_client.setAttr("host", ms.host)
sasl_client.setAttr("service", kerberos_service_name)
sasl_client.init()
return sasl_client
from thrift_sasl import TSaslClientTransport
transport = TSaslClientTransport(sasl_factory, "GSSAPI", socket)
else:
transport = TTransport.TBufferedTransport(socket)
protocol = TBinaryProtocol.TBinaryProtocol(transport)
return ThriftHive.Client(protocol)
def get_conn(self):
return self.metastore
def check_for_partition(self, schema, table, partition):
"""
Checks whether a partition exists
:param schema: Name of hive schema (database) @table belongs to
:type schema: string
:param table: Name of hive table @partition belongs to
:type schema: string
:partition: Expression that matches the partitions to check for
(eg `a = 'b' AND c = 'd'`)
:type schema: string
:rtype: boolean
>>> hh = HiveMetastoreHook()
>>> t = 'static_babynames_partitioned'
>>> hh.check_for_partition('airflow', t, "ds='2015-01-01'")
True
"""
self.metastore._oprot.trans.open()
partitions = self.metastore.get_partitions_by_filter(
schema, table, partition, 1)
self.metastore._oprot.trans.close()
if partitions:
return True
else:
return False
def check_for_named_partition(self, schema, table, partition_name):
"""
Checks whether a partition with a given name exists
:param schema: Name of hive schema (database) @table belongs to
:type schema: string
:param table: Name of hive table @partition belongs to
:type schema: string
:partition: Name of the partitions to check for (eg `a=b/c=d`)
:type schema: string
:rtype: boolean
>>> hh = HiveMetastoreHook()
>>> t = 'static_babynames_partitioned'
>>> hh.check_for_named_partition('airflow', t, "ds=2015-01-01")
True
>>> hh.check_for_named_partition('airflow', t, "ds=xxx")
False
"""
self.metastore._oprot.trans.open()
try:
self.metastore.get_partition_by_name(
schema, table, partition_name)
return True
except hive_metastore.ttypes.NoSuchObjectException:
return False
finally:
self.metastore._oprot.trans.close()
def get_table(self, table_name, db='default'):
"""Get a metastore table object
>>> hh = HiveMetastoreHook()
>>> t = hh.get_table(db='airflow', table_name='static_babynames')
>>> t.tableName
'static_babynames'
>>> [col.name for col in t.sd.cols]
['state', 'year', 'name', 'gender', 'num']
"""
self.metastore._oprot.trans.open()
if db == 'default' and '.' in table_name:
db, table_name = table_name.split('.')[:2]
table = self.metastore.get_table(dbname=db, tbl_name=table_name)
self.metastore._oprot.trans.close()
return table
def get_tables(self, db, pattern='*'):
"""
Get a metastore table object
"""
self.metastore._oprot.trans.open()
tables = self.metastore.get_tables(db_name=db, pattern=pattern)
objs = self.metastore.get_table_objects_by_name(db, tables)
self.metastore._oprot.trans.close()
return objs
def get_databases(self, pattern='*'):
"""
Get a metastore table object
"""
self.metastore._oprot.trans.open()
dbs = self.metastore.get_databases(pattern)
self.metastore._oprot.trans.close()
return dbs
def get_partitions(
self, schema, table_name, filter=None):
"""
Returns a list of all partitions in a table. Works only
for tables with less than 32767 (java short max val).
For subpartitioned table, the number might easily exceed this.
>>> hh = HiveMetastoreHook()
>>> t = 'static_babynames_partitioned'
>>> parts = hh.get_partitions(schema='airflow', table_name=t)
>>> len(parts)
1
>>> parts
[{'ds': '2015-01-01'}]
"""
self.metastore._oprot.trans.open()
table = self.metastore.get_table(dbname=schema, tbl_name=table_name)
if len(table.partitionKeys) == 0:
raise AirflowException("The table isn't partitioned")
else:
if filter:
parts = self.metastore.get_partitions_by_filter(
db_name=schema, tbl_name=table_name,
filter=filter, max_parts=32767)
else:
parts = self.metastore.get_partitions(
db_name=schema, tbl_name=table_name, max_parts=32767)
self.metastore._oprot.trans.close()
pnames = [p.name for p in table.partitionKeys]
return [dict(zip(pnames, p.values)) for p in parts]
def max_partition(self, schema, table_name, field=None, filter=None):
"""
Returns the maximum value for all partitions in a table. Works only
for tables that have a single partition key. For subpartitioned
table, we recommend using signal tables.
>>> hh = HiveMetastoreHook()
>>> t = 'static_babynames_partitioned'
>>> hh.max_partition(schema='airflow', table_name=t)
'2015-01-01'
"""
parts = self.get_partitions(schema, table_name, filter)
if not parts:
return None
elif len(parts[0]) == 1:
field = list(parts[0].keys())[0]
elif not field:
raise AirflowException(
"Please specify the field you want the max "
"value for")
return max([p[field] for p in parts])
def table_exists(self, table_name, db='default'):
"""
Check if table exists
>>> hh = HiveMetastoreHook()
>>> hh.table_exists(db='airflow', table_name='static_babynames')
True
>>> hh.table_exists(db='airflow', table_name='does_not_exist')
False
"""
try:
t = self.get_table(table_name, db)
return True
except Exception as e:
return False
class HiveServer2Hook(BaseHook):
"""
Wrapper around the impyla library
Note that the default authMechanism is PLAIN, to override it you
can specify it in the ``extra`` of your connection in the UI as in
"""
def __init__(self, hiveserver2_conn_id='hiveserver2_default'):
self.hiveserver2_conn_id = hiveserver2_conn_id
def get_conn(self, schema=None):
db = self.get_connection(self.hiveserver2_conn_id)
auth_mechanism = db.extra_dejson.get('authMechanism', 'PLAIN')
kerberos_service_name = None
if configuration.get('core', 'security') == 'kerberos':
auth_mechanism = db.extra_dejson.get('authMechanism', 'GSSAPI')
kerberos_service_name = db.extra_dejson.get('kerberos_service_name', 'hive')
# impyla uses GSSAPI instead of KERBEROS as a auth_mechanism identifier
if auth_mechanism == 'KERBEROS':
logging.warning("Detected deprecated 'KERBEROS' for authMechanism for %s. Please use 'GSSAPI' instead",
self.hiveserver2_conn_id)
auth_mechanism = 'GSSAPI'
from impala.dbapi import connect
return connect(
host=db.host,
port=db.port,
auth_mechanism=auth_mechanism,
kerberos_service_name=kerberos_service_name,
user=db.login,
database=schema or db.schema or 'default')
def get_results(self, hql, schema='default', arraysize=1000):
from impala.error import ProgrammingError
with self.get_conn(schema) as conn:
if isinstance(hql, basestring):
hql = [hql]
results = {
'data': [],
'header': [],
}
cur = conn.cursor()
for statement in hql:
cur.execute(statement)
records = []
try:
# impala Lib raises when no results are returned
# we're silencing here as some statements in the list
# may be `SET` or DDL
records = cur.fetchall()
except ProgrammingError:
logging.debug("get_results returned no records")
if records:
results = {
'data': records,
'header': cur.description,
}
return results
def to_csv(
self,
hql,
csv_filepath,
schema='default',
delimiter=',',
lineterminator='\r\n',
output_header=True,
fetch_size=1000):
schema = schema or 'default'
with self.get_conn(schema) as conn:
with conn.cursor() as cur:
logging.info("Running query: " + hql)
cur.execute(hql)
schema = cur.description
with open(csv_filepath, 'wb') as f:
writer = csv.writer(f,
delimiter=delimiter,
lineterminator=lineterminator,
encoding='utf-8')
if output_header:
writer.writerow([c[0] for c in cur.description])
i = 0
while True:
rows = [row for row in cur.fetchmany(fetch_size) if row]
if not rows:
break
writer.writerows(rows)
i += len(rows)
logging.info("Written {0} rows so far.".format(i))
logging.info("Done. Loaded a total of {0} rows.".format(i))
def get_records(self, hql, schema='default'):
"""
Get a set of records from a Hive query.
>>> hh = HiveServer2Hook()
>>> sql = "SELECT * FROM airflow.static_babynames LIMIT 100"
>>> len(hh.get_records(sql))
100
"""
return self.get_results(hql, schema=schema)['data']
def get_pandas_df(self, hql, schema='default'):
"""
Get a pandas dataframe from a Hive query
>>> hh = HiveServer2Hook()
>>> sql = "SELECT * FROM airflow.static_babynames LIMIT 100"
>>> df = hh.get_pandas_df(sql)
>>> len(df.index)
100
"""
import pandas as pd
res = self.get_results(hql, schema=schema)
df = pd.DataFrame(res['data'])
df.columns = [c[0] for c in res['header']]
return df
| apache-2.0 |
uvchik/pvlib-python | pvlib/test/test_tracking.py | 1 | 12732 | import datetime
import numpy as np
from numpy import nan
import pandas as pd
import pytest
from pandas.util.testing import assert_frame_equal
from pvlib.location import Location
from pvlib import solarposition
from pvlib import tracking
def test_solar_noon():
apparent_zenith = pd.Series([10])
apparent_azimuth = pd.Series([180])
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=0, axis_azimuth=0,
max_angle=90, backtrack=True,
gcr=2.0/7.0)
expect = pd.DataFrame({'aoi': 10, 'surface_azimuth': 90,
'surface_tilt': 0, 'tracker_theta': 0},
index=[0], dtype=np.float64)
assert_frame_equal(expect, tracker_data)
def test_azimuth_north_south():
apparent_zenith = pd.Series([60])
apparent_azimuth = pd.Series([90])
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=0, axis_azimuth=180,
max_angle=90, backtrack=True,
gcr=2.0/7.0)
expect = pd.DataFrame({'aoi': 0, 'surface_azimuth': 90,
'surface_tilt': 60, 'tracker_theta': -60},
index=[0], dtype=np.float64)
assert_frame_equal(expect, tracker_data)
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=0, axis_azimuth=0,
max_angle=90, backtrack=True,
gcr=2.0/7.0)
expect['tracker_theta'] *= -1
assert_frame_equal(expect, tracker_data)
def test_max_angle():
apparent_zenith = pd.Series([60])
apparent_azimuth = pd.Series([90])
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=0, axis_azimuth=0,
max_angle=45, backtrack=True,
gcr=2.0/7.0)
expect = pd.DataFrame({'aoi': 15, 'surface_azimuth': 90,
'surface_tilt': 45, 'tracker_theta': 45},
index=[0], dtype=np.float64)
assert_frame_equal(expect, tracker_data)
def test_backtrack():
apparent_zenith = pd.Series([80])
apparent_azimuth = pd.Series([90])
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=0, axis_azimuth=0,
max_angle=90, backtrack=False,
gcr=2.0/7.0)
expect = pd.DataFrame({'aoi': 0, 'surface_azimuth': 90,
'surface_tilt': 80, 'tracker_theta': 80},
index=[0], dtype=np.float64)
assert_frame_equal(expect, tracker_data)
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=0, axis_azimuth=0,
max_angle=90, backtrack=True,
gcr=2.0/7.0)
expect = pd.DataFrame({'aoi': 52.5716, 'surface_azimuth': 90,
'surface_tilt': 27.42833, 'tracker_theta': 27.4283},
index=[0], dtype=np.float64)
assert_frame_equal(expect, tracker_data)
def test_axis_tilt():
apparent_zenith = pd.Series([30])
apparent_azimuth = pd.Series([135])
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=30, axis_azimuth=180,
max_angle=90, backtrack=True,
gcr=2.0/7.0)
expect = pd.DataFrame({'aoi': 7.286245, 'surface_azimuth': 142.65730,
'surface_tilt': 35.98741, 'tracker_theta': -20.88121},
index=[0], dtype=np.float64)
assert_frame_equal(expect, tracker_data)
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=30, axis_azimuth=0,
max_angle=90, backtrack=True,
gcr=2.0/7.0)
expect = pd.DataFrame({'aoi': 47.6632, 'surface_azimuth': 50.96969,
'surface_tilt': 42.5152, 'tracker_theta': 31.6655},
index=[0], dtype=np.float64)
assert_frame_equal(expect, tracker_data)
def test_axis_azimuth():
apparent_zenith = pd.Series([30])
apparent_azimuth = pd.Series([90])
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=0, axis_azimuth=90,
max_angle=90, backtrack=True,
gcr=2.0/7.0)
expect = pd.DataFrame({'aoi': 30, 'surface_azimuth': 180,
'surface_tilt': 0, 'tracker_theta': 0},
index=[0], dtype=np.float64)
assert_frame_equal(expect, tracker_data)
apparent_zenith = pd.Series([30])
apparent_azimuth = pd.Series([180])
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=0, axis_azimuth=90,
max_angle=90, backtrack=True,
gcr=2.0/7.0)
expect = pd.DataFrame({'aoi': 0, 'surface_azimuth': 180,
'surface_tilt': 30, 'tracker_theta': 30},
index=[0], dtype=np.float64)
assert_frame_equal(expect, tracker_data)
def test_index_mismatch():
apparent_zenith = pd.Series([30])
apparent_azimuth = pd.Series([90,180])
with pytest.raises(ValueError):
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=0, axis_azimuth=90,
max_angle=90, backtrack=True,
gcr=2.0/7.0)
def test_SingleAxisTracker_creation():
system = tracking.SingleAxisTracker(max_angle=45,
gcr=.25,
module='blah',
inverter='blarg')
assert system.max_angle == 45
assert system.gcr == .25
assert system.module == 'blah'
assert system.inverter == 'blarg'
def test_SingleAxisTracker_tracking():
system = tracking.SingleAxisTracker(max_angle=90, axis_tilt=30,
axis_azimuth=180, gcr=2.0/7.0,
backtrack=True)
apparent_zenith = pd.Series([30])
apparent_azimuth = pd.Series([135])
tracker_data = system.singleaxis(apparent_zenith, apparent_azimuth)
expect = pd.DataFrame({'aoi': 7.286245, 'surface_azimuth': 142.65730 ,
'surface_tilt': 35.98741, 'tracker_theta': -20.88121},
index=[0], dtype=np.float64)
assert_frame_equal(expect, tracker_data)
### results calculated using PVsyst
pvsyst_solar_azimuth = 7.1609
pvsyst_solar_height = 27.315
pvsyst_axis_tilt = 20.
pvsyst_axis_azimuth = 20.
pvsyst_system = tracking.SingleAxisTracker(max_angle=60.,
axis_tilt=pvsyst_axis_tilt,
axis_azimuth=180+pvsyst_axis_azimuth,
backtrack=False)
# the definition of azimuth is different from PYsyst
apparent_azimuth = pd.Series([180+pvsyst_solar_azimuth])
apparent_zenith = pd.Series([90-pvsyst_solar_height])
tracker_data = pvsyst_system.singleaxis(apparent_zenith, apparent_azimuth)
expect = pd.DataFrame({'aoi': 41.07852 , 'surface_azimuth': 180-18.432 ,
'surface_tilt': 24.92122 , 'tracker_theta': -15.18391},
index=[0], dtype=np.float64)
assert_frame_equal(expect, tracker_data)
def test_LocalizedSingleAxisTracker_creation():
localized_system = tracking.LocalizedSingleAxisTracker(latitude=32,
longitude=-111,
module='blah',
inverter='blarg')
assert localized_system.module == 'blah'
assert localized_system.inverter == 'blarg'
assert localized_system.latitude == 32
assert localized_system.longitude == -111
def test_SingleAxisTracker_localize():
system = tracking.SingleAxisTracker(max_angle=45, gcr=.25,
module='blah', inverter='blarg')
localized_system = system.localize(latitude=32, longitude=-111)
assert localized_system.module == 'blah'
assert localized_system.inverter == 'blarg'
assert localized_system.latitude == 32
assert localized_system.longitude == -111
def test_SingleAxisTracker_localize_location():
system = tracking.SingleAxisTracker(max_angle=45, gcr=.25,
module='blah', inverter='blarg')
location = Location(latitude=32, longitude=-111)
localized_system = system.localize(location=location)
assert localized_system.module == 'blah'
assert localized_system.inverter == 'blarg'
assert localized_system.latitude == 32
assert localized_system.longitude == -111
def test_get_irradiance():
system = tracking.SingleAxisTracker(max_angle=90, axis_tilt=30,
axis_azimuth=180, gcr=2.0/7.0,
backtrack=True)
times = pd.DatetimeIndex(start='20160101 1200-0700',
end='20160101 1800-0700', freq='6H')
location = Location(latitude=32, longitude=-111)
solar_position = location.get_solarposition(times)
irrads = pd.DataFrame({'dni':[900,0], 'ghi':[600,0], 'dhi':[100,0]},
index=times)
solar_zenith = solar_position['apparent_zenith']
solar_azimuth = solar_position['azimuth']
tracker_data = system.singleaxis(solar_zenith, solar_azimuth)
irradiance = system.get_irradiance(irrads['dni'],
irrads['ghi'],
irrads['dhi'],
solar_zenith=solar_zenith,
solar_azimuth=solar_azimuth,
surface_tilt=tracker_data['surface_tilt'],
surface_azimuth=tracker_data['surface_azimuth'])
expected = pd.DataFrame(data=np.array(
[[ 961.80070, 815.94490, 145.85580, 135.32820,
10.52757492],
[ nan, nan, nan, nan,
nan]]),
columns=['poa_global', 'poa_direct',
'poa_diffuse', 'poa_sky_diffuse',
'poa_ground_diffuse'],
index=times)
assert_frame_equal(irradiance, expected, check_less_precise=2)
def test_SingleAxisTracker___repr__():
system = tracking.SingleAxisTracker(max_angle=45, gcr=.25,
module='blah', inverter='blarg')
expected = 'SingleAxisTracker: \n axis_tilt: 0\n axis_azimuth: 0\n max_angle: 45\n backtrack: True\n gcr: 0.25\n name: None\n surface_tilt: 0\n surface_azimuth: 180\n module: blah\n inverter: blarg\n albedo: 0.25\n racking_model: open_rack_cell_glassback'
assert system.__repr__() == expected
def test_LocalizedSingleAxisTracker___repr__():
localized_system = tracking.LocalizedSingleAxisTracker(latitude=32,
longitude=-111,
module='blah',
inverter='blarg',
gcr=0.25)
expected = 'LocalizedSingleAxisTracker: \n axis_tilt: 0\n axis_azimuth: 0\n max_angle: 90\n backtrack: True\n gcr: 0.25\n name: None\n surface_tilt: 0\n surface_azimuth: 180\n module: blah\n inverter: blarg\n albedo: 0.25\n racking_model: open_rack_cell_glassback\n latitude: 32\n longitude: -111\n altitude: 0\n tz: UTC'
assert localized_system.__repr__() == expected
| bsd-3-clause |
tmerrick1/spack | var/spack/repos/builtin/packages/py-multiqc/package.py | 5 | 2328 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PyMultiqc(PythonPackage):
"""MultiQC is a tool to aggregate bioinformatics results across many
samples into a single report. It is written in Python and contains modules
for a large number of common bioinformatics tools."""
homepage = "https://multiqc.info"
url = "https://pypi.io/packages/source/m/multiqc/multiqc-1.0.tar.gz"
version('1.5', 'c9fc5f54a75b1d0c3e119e0db7f5fe72')
version('1.3', '78fef8a89c0bd40d559b10c1f736bbcd')
version('1.0', '0b7310b3f75595e5be8099fbed2d2515')
depends_on('[email protected]:')
depends_on('py-setuptools', type='build')
depends_on('py-click', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('py-lzstring', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('py-spectra', type=('build', 'run'))
depends_on('py-matplotlib', type=('build', 'run'))
depends_on('py-numpy', type=('build', 'run'))
depends_on('py-pyyaml', type=('build', 'run'))
depends_on('py-simplejson', type=('build', 'run'))
| lgpl-2.1 |
sekikn/incubator-airflow | tests/providers/vertica/hooks/test_vertica.py | 5 | 3835 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from unittest import mock
from unittest.mock import patch
from airflow.models import Connection
from airflow.providers.vertica.hooks.vertica import VerticaHook
class TestVerticaHookConn(unittest.TestCase):
def setUp(self):
super().setUp()
self.connection = Connection(
login='login',
password='password',
host='host',
schema='vertica',
)
class UnitTestVerticaHook(VerticaHook):
conn_name_attr = 'vertica_conn_id'
self.db_hook = UnitTestVerticaHook()
self.db_hook.get_connection = mock.Mock()
self.db_hook.get_connection.return_value = self.connection
@patch('airflow.providers.vertica.hooks.vertica.connect')
def test_get_conn(self, mock_connect):
self.db_hook.get_conn()
mock_connect.assert_called_once_with(
host='host', port=5433, database='vertica', user='login', password="password"
)
class TestVerticaHook(unittest.TestCase):
def setUp(self):
super().setUp()
self.cur = mock.MagicMock()
self.conn = mock.MagicMock()
self.conn.cursor.return_value = self.cur
conn = self.conn
class UnitTestVerticaHook(VerticaHook):
conn_name_attr = 'test_conn_id'
def get_conn(self):
return conn
self.db_hook = UnitTestVerticaHook()
@patch('airflow.hooks.dbapi.DbApiHook.insert_rows')
def test_insert_rows(self, mock_insert_rows):
table = "table"
rows = [("hello",), ("world",)]
target_fields = None
commit_every = 10
self.db_hook.insert_rows(table, rows, target_fields, commit_every)
mock_insert_rows.assert_called_once_with(table, rows, None, 10)
def test_get_first_record(self):
statement = 'SQL'
result_sets = [('row1',), ('row2',)]
self.cur.fetchone.return_value = result_sets[0]
self.assertEqual(result_sets[0], self.db_hook.get_first(statement))
self.conn.close.assert_called_once_with()
self.cur.close.assert_called_once_with()
self.cur.execute.assert_called_once_with(statement)
def test_get_records(self):
statement = 'SQL'
result_sets = [('row1',), ('row2',)]
self.cur.fetchall.return_value = result_sets
self.assertEqual(result_sets, self.db_hook.get_records(statement))
self.conn.close.assert_called_once_with()
self.cur.close.assert_called_once_with()
self.cur.execute.assert_called_once_with(statement)
def test_get_pandas_df(self):
statement = 'SQL'
column = 'col'
result_sets = [('row1',), ('row2',)]
self.cur.description = [(column,)]
self.cur.fetchall.return_value = result_sets
df = self.db_hook.get_pandas_df(statement)
self.assertEqual(column, df.columns[0])
self.assertEqual(result_sets[0][0], df.values.tolist()[0][0])
self.assertEqual(result_sets[1][0], df.values.tolist()[1][0])
| apache-2.0 |
VillarrealA/pyoptools | pyoptools/raytrace/_comp_lib/ccd.py | 9 | 8557 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
#------------------------------------------------------------------------------
# Copyright (c) 2007, Ricardo Amézquita Orozco
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license.
#
#
# Author: Ricardo Amézquita Orozco
# Description: CCD definitión module
# Symbols Defined: CCD
#------------------------------------------------------------------------------
#
'''
Definition of a CCD like object and helper functions
'''
#from enthought.traits.api import Float, Instance, HasTraits, Tuple, Int, Bool, Property
from scipy.misc import toimage
from scipy.interpolate import interp2d,bisplrep,bisplev
from numpy import arange, ma, meshgrid, linspace
from pyoptools.raytrace.component import Component
from pyoptools.raytrace.surface import ArrayDetector,Plane
from pyoptools.misc.pmisc import wavelength2RGB
from pyoptools.misc.lsq import polyfit2d
from pyoptools.raytrace.shape import Shape
from pyoptools.raytrace.shape import Rectangular
#from gui.plotutils import plot, figure, cm, legend
class CCD(Component):
'''**Class to define a CCD like detector**
*Attributes:*
*size*
Tuple with the phisical size of the CCD chip
*transparent*
Boolean to set the detector transparent characteristic. Not implemented
Using the same CCD, images of different resolutions can be simulated. See
the im_show and spot_diagram methods
'''
# Geometrical size of the CCD chip
#size = Tuple(Float(5),Float(5))
# Setting this attribute to *False*, make the CCD detector opaque
#transparent=Bool(True)
# Private attributes
# detector surface
#__d_surf = Instance(ArrayDetector)
#__d_surf = Instance(Plane)
def _get_hitlist(self):
return tuple(self.__d_surf.hit_list)
hit_list=property(_get_hitlist)
def __init__(self, size=(10,10), transparent=True,*args,**kwargs):
Component.__init__(self, *args, **kwargs)
self.__d_surf= Plane(shape=Rectangular(size=size))#ArrayDetector (size=self.size, transparent=self.transparent)
self.size=size
self.surflist["S1"]=(self.__d_surf,(0,0,0),(0,0,0))
self.material=1.
#~ def __reduce__(self):
#~ args=() #self.intensity,self.wavelength,self.n ,self.label,self.parent,self.pop,self.orig_surf)
#~ return(type(self),args,self.__getstate__())
#~
#~
#~ #TODO: Check if there is a better way to do this, because we are
#~ #rewriting the constructor values here
#~
#~ def __getstate__(self):
#~ return self.__d_surf,self.size,self.surflist,self.material
#~
#~ def __setstate__(self,state):
#~ self.__d_surf,self.size,self.surflist,self.material=state
def get_image(self,size=(256,256)):
"""
Returns the ccd hit_list as a grayscale PIL image
*Attributes:*
*size*
Tuple (dx,dy) containing the image size in pixels. Use this
attribute to set the simulated resolution.
"""
data= self.__d_surf.get_histogram(size)
return(toimage(data, high=255, low=0,cmin=0,cmax=data.max()))
def get_color_image(self, size=(256,256)):
"""
Returns the CCD hit_list as a color image, using the rays wavelenght.
*Attributes*
*size*
Tuple (dx,dy) containing the image size in pixels. Use this
attribute to set the simulated resolution.
"""
data= self.__d_surf.get_color_histogram(size)
return(toimage(data, high=255, low=0))
#~ def im_show(self,fig=None, size=(256,256),cmap=cm.gray,title='Image',color=False):
#~ """Shows a simulated image
#~
#~ *Attributes:*
#~
#~ *size*
#~ Tuple (dx,dy) containing the image size in pixels. Use this
#~ attribute to set the simulated resolution.
#~ *cmap*
#~ Color map to use in the image simulation. See the matplotlib.cm
#~ module for information about colormaps.
#~ *fig*
#~ Pylab figure where the plot will be made. If set to None
#~ a new figure will be created.
#~ """
#~ if fig == None:
#~ fig=figure()
#~
#~ self.__d_surf.im_show(size,cmap,title,color)
#~
#~
#~ def spot_diagram(self,fig=None, style="o", label=None):
#~ '''Plot a spot diagram in a pylab figure
#~
#~ Method that plots a spot diagram of the rays hitting the CCD.
#~
#~ *Attributes:*
#~
#~ *fig*
#~ Pylab figure where the plot will be made. If set to None
#~ a new figure will be created.
#~
#~ *style*
#~ Symbol to be used to represent the spot. See the pylab plot
#~ documentation for more information.
#~
#~ *label*
#~ String containig the label to show in the figure for this spot diagram.
#~ Can be used to identify diferent spot diagrams on the same figure.
#~ '''
#~
#~ if fig == None:
#~ fig=figure()
#~ X=[]
#~ Y=[]
#~ COL=[]
#~ if len(self.__d_surf._hit_list) >0:
#~ for i in self.__d_surf._hit_list:
#~ p=i[0]
#~ # Hitlist[1] points to the incident ray
#~ col=wavelength2RGB(i[1].wavelength)
#~ X.append(p[0])
#~ Y.append(p[1])
#~ COL.append(col)
#~ if label== None:
#~ plot(X, Y, style, figure=fig)
#~ else:
#~ plot(X, Y, style,label=label,figure=fig)
#~ legend()
#~ return fig
def get_optical_path_map(self,size=(20, 20), mask=None):
"""Return the optical path of the rays hitting the detector.
This method uses the optical path of the rays hitting the surface, to
create a optical path map. The returned value is an interpolation of the
values obtained by the rays.
Warning:
If the rays hitting the surface are produced by more than one
optical source, the returned map migth not be valid.
*Atributes*
*size*
Tuple (nx,ny) containing the number of samples of the returned map.
The map size will be the same as the CCD
*mask*
Shape instance containig the mask of the apperture. If not given,
the mask will be automatically calculated.
*Return value*
A masked array as defined in the numpy.ma module, containig the optical paths
"""
X,Y,Z=self.get_optical_path_data()
rv=bisplrep(X,Y,Z)
nx, ny=size
xs, ys=self.size
xi=-xs/2.
xf=-xi
yi=-ys/2.
yf=-yi
xd=linspace(xi, xf,nx)
yd=linspace(yi, yf,ny)
data=bisplev(xd,yd,rv)
if mask!=None:
assert(isinstance(mask, Shape))
X, Y=meshgrid(xd, yd)
m= ~mask.hit((X, Y, 0))
retval= ma.array(data, mask=m)
else:
retval=data
return retval
def get_optical_path_map_lsq(self,order=10):
"""Return the optical path of the rays hitting the detector.
*Atributes*
"""
X,Y,Z=self.get_optical_path_data()
e,p=polyfit2d(X, Y, Z, order=order)
return e,p
def get_optical_path_data(self):
"""Return the optical path of the rays hitting the detector.
This method returns a tuple X,Y,D, containing the X,Y hit points, and
D containing tha optical path data
Warning:
If the rays hitting the surface are produced by more than one
optical source, the information may not be valid.
"""
X=[]
Y=[]
Z=[]
for ip,r in self.hit_list:
x,y,z= ip
d= r.optical_path()
X.append(x)
Y.append(y)
Z.append(d)
return X,Y,Z
| bsd-3-clause |
XiaoLiuAI/RUPEE | src/python/model/statsmodel_wrapper.py | 1 | 1509 | import copy
import numpy as np
import statsmodels.api as sm
import sklearn
class MNLogit(sklearn.base.ClassifierMixin, sklearn.base.BaseEstimator):
def __init__(self):
self.algoModule = sm.MNLogit
def choose_opt_params(self, X, y, params, cross_val_score, average_measure, metric_func):
score_opt = -1
for param in params:
estimator = sklearn.base.clone(self)
estimator.set_params(**param)
score = average_measure(cross_val_score(estimator, X, y, score_func=metric_func, cv=10, n_jobs= -1))
if score > score_opt:
score_opt = score
opt_params = param
print 'best cross validation score of ', self.__class__.__name__, 'is:', score_opt, ' and it is generated with ', opt_params
return opt_params
def set_params(self, **params):
self.__dict__.update(params)
def fit(self, X, y):
self.lb = sklearn.preprocessing.LabelBinarizer()
self.lb.fit_transform(y)
self.model_params = self.algoModule(y, X).fit_regularized().params
def out_score(self, X):
return self.algoModule(np.ones((4,1)), np.ones((4,1))).predict(self.model_params, X)
def clone(self):
return copy.deepcopy(self)
def predict(self, X):
y = self.lb.inverse_transform(self.out_score(X))
return y
def highestScoreLabel(self, X):
score = self.out_score(X)
return np.max(score, axis=1), self.lb.inverse_transform(score)
| gpl-2.0 |
Vimos/scikit-learn | sklearn/neural_network/tests/test_mlp.py | 28 | 22183 | """
Testing for Multi-layer Perceptron module (sklearn.neural_network)
"""
# Author: Issam H. Laradji
# License: BSD 3 clause
import sys
import warnings
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_equal
from sklearn.datasets import load_digits, load_boston, load_iris
from sklearn.datasets import make_regression, make_multilabel_classification
from sklearn.exceptions import ConvergenceWarning
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.metrics import roc_auc_score
from sklearn.neural_network import MLPClassifier
from sklearn.neural_network import MLPRegressor
from sklearn.preprocessing import LabelBinarizer
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from scipy.sparse import csr_matrix
from sklearn.utils.testing import (assert_raises, assert_greater, assert_equal,
assert_false, ignore_warnings)
from sklearn.utils.testing import assert_raise_message
np.seterr(all='warn')
ACTIVATION_TYPES = ["identity", "logistic", "tanh", "relu"]
digits_dataset_multi = load_digits(n_class=3)
X_digits_multi = MinMaxScaler().fit_transform(digits_dataset_multi.data[:200])
y_digits_multi = digits_dataset_multi.target[:200]
digits_dataset_binary = load_digits(n_class=2)
X_digits_binary = MinMaxScaler().fit_transform(
digits_dataset_binary.data[:200])
y_digits_binary = digits_dataset_binary.target[:200]
classification_datasets = [(X_digits_multi, y_digits_multi),
(X_digits_binary, y_digits_binary)]
boston = load_boston()
Xboston = StandardScaler().fit_transform(boston.data)[: 200]
yboston = boston.target[:200]
iris = load_iris()
X_iris = iris.data
y_iris = iris.target
def test_alpha():
# Test that larger alpha yields weights closer to zero
X = X_digits_binary[:100]
y = y_digits_binary[:100]
alpha_vectors = []
alpha_values = np.arange(2)
absolute_sum = lambda x: np.sum(np.abs(x))
for alpha in alpha_values:
mlp = MLPClassifier(hidden_layer_sizes=10, alpha=alpha, random_state=1)
with ignore_warnings(category=ConvergenceWarning):
mlp.fit(X, y)
alpha_vectors.append(np.array([absolute_sum(mlp.coefs_[0]),
absolute_sum(mlp.coefs_[1])]))
for i in range(len(alpha_values) - 1):
assert (alpha_vectors[i] > alpha_vectors[i + 1]).all()
def test_fit():
# Test that the algorithm solution is equal to a worked out example.
X = np.array([[0.6, 0.8, 0.7]])
y = np.array([0])
mlp = MLPClassifier(solver='sgd', learning_rate_init=0.1, alpha=0.1,
activation='logistic', random_state=1, max_iter=1,
hidden_layer_sizes=2, momentum=0)
# set weights
mlp.coefs_ = [0] * 2
mlp.intercepts_ = [0] * 2
mlp.n_outputs_ = 1
mlp.coefs_[0] = np.array([[0.1, 0.2], [0.3, 0.1], [0.5, 0]])
mlp.coefs_[1] = np.array([[0.1], [0.2]])
mlp.intercepts_[0] = np.array([0.1, 0.1])
mlp.intercepts_[1] = np.array([1.0])
mlp._coef_grads = [] * 2
mlp._intercept_grads = [] * 2
# Initialize parameters
mlp.n_iter_ = 0
mlp.learning_rate_ = 0.1
# Compute the number of layers
mlp.n_layers_ = 3
# Pre-allocate gradient matrices
mlp._coef_grads = [0] * (mlp.n_layers_ - 1)
mlp._intercept_grads = [0] * (mlp.n_layers_ - 1)
mlp.out_activation_ = 'logistic'
mlp.t_ = 0
mlp.best_loss_ = np.inf
mlp.loss_curve_ = []
mlp._no_improvement_count = 0
mlp._intercept_velocity = [np.zeros_like(intercepts) for
intercepts in
mlp.intercepts_]
mlp._coef_velocity = [np.zeros_like(coefs) for coefs in
mlp.coefs_]
mlp.partial_fit(X, y, classes=[0, 1])
# Manually worked out example
# h1 = g(X1 * W_i1 + b11) = g(0.6 * 0.1 + 0.8 * 0.3 + 0.7 * 0.5 + 0.1)
# = 0.679178699175393
# h2 = g(X2 * W_i2 + b12) = g(0.6 * 0.2 + 0.8 * 0.1 + 0.7 * 0 + 0.1)
# = 0.574442516811659
# o1 = g(h * W2 + b21) = g(0.679 * 0.1 + 0.574 * 0.2 + 1)
# = 0.7654329236196236
# d21 = -(0 - 0.765) = 0.765
# d11 = (1 - 0.679) * 0.679 * 0.765 * 0.1 = 0.01667
# d12 = (1 - 0.574) * 0.574 * 0.765 * 0.2 = 0.0374
# W1grad11 = X1 * d11 + alpha * W11 = 0.6 * 0.01667 + 0.1 * 0.1 = 0.0200
# W1grad11 = X1 * d12 + alpha * W12 = 0.6 * 0.0374 + 0.1 * 0.2 = 0.04244
# W1grad21 = X2 * d11 + alpha * W13 = 0.8 * 0.01667 + 0.1 * 0.3 = 0.043336
# W1grad22 = X2 * d12 + alpha * W14 = 0.8 * 0.0374 + 0.1 * 0.1 = 0.03992
# W1grad31 = X3 * d11 + alpha * W15 = 0.6 * 0.01667 + 0.1 * 0.5 = 0.060002
# W1grad32 = X3 * d12 + alpha * W16 = 0.6 * 0.0374 + 0.1 * 0 = 0.02244
# W2grad1 = h1 * d21 + alpha * W21 = 0.679 * 0.765 + 0.1 * 0.1 = 0.5294
# W2grad2 = h2 * d21 + alpha * W22 = 0.574 * 0.765 + 0.1 * 0.2 = 0.45911
# b1grad1 = d11 = 0.01667
# b1grad2 = d12 = 0.0374
# b2grad = d21 = 0.765
# W1 = W1 - eta * [W1grad11, .., W1grad32] = [[0.1, 0.2], [0.3, 0.1],
# [0.5, 0]] - 0.1 * [[0.0200, 0.04244], [0.043336, 0.03992],
# [0.060002, 0.02244]] = [[0.098, 0.195756], [0.2956664,
# 0.096008], [0.4939998, -0.002244]]
# W2 = W2 - eta * [W2grad1, W2grad2] = [[0.1], [0.2]] - 0.1 *
# [[0.5294], [0.45911]] = [[0.04706], [0.154089]]
# b1 = b1 - eta * [b1grad1, b1grad2] = 0.1 - 0.1 * [0.01667, 0.0374]
# = [0.098333, 0.09626]
# b2 = b2 - eta * b2grad = 1.0 - 0.1 * 0.765 = 0.9235
assert_almost_equal(mlp.coefs_[0], np.array([[0.098, 0.195756],
[0.2956664, 0.096008],
[0.4939998, -0.002244]]),
decimal=3)
assert_almost_equal(mlp.coefs_[1], np.array([[0.04706], [0.154089]]),
decimal=3)
assert_almost_equal(mlp.intercepts_[0],
np.array([0.098333, 0.09626]), decimal=3)
assert_almost_equal(mlp.intercepts_[1], np.array(0.9235), decimal=3)
# Testing output
# h1 = g(X1 * W_i1 + b11) = g(0.6 * 0.098 + 0.8 * 0.2956664 +
# 0.7 * 0.4939998 + 0.098333) = 0.677
# h2 = g(X2 * W_i2 + b12) = g(0.6 * 0.195756 + 0.8 * 0.096008 +
# 0.7 * -0.002244 + 0.09626) = 0.572
# o1 = h * W2 + b21 = 0.677 * 0.04706 +
# 0.572 * 0.154089 + 0.9235 = 1.043
# prob = sigmoid(o1) = 0.739
assert_almost_equal(mlp.predict_proba(X)[0, 1], 0.739, decimal=3)
def test_gradient():
# Test gradient.
# This makes sure that the activation functions and their derivatives
# are correct. The numerical and analytical computation of the gradient
# should be close.
for n_labels in [2, 3]:
n_samples = 5
n_features = 10
X = np.random.random((n_samples, n_features))
y = 1 + np.mod(np.arange(n_samples) + 1, n_labels)
Y = LabelBinarizer().fit_transform(y)
for activation in ACTIVATION_TYPES:
mlp = MLPClassifier(activation=activation, hidden_layer_sizes=10,
solver='lbfgs', alpha=1e-5,
learning_rate_init=0.2, max_iter=1,
random_state=1)
mlp.fit(X, y)
theta = np.hstack([l.ravel() for l in mlp.coefs_ +
mlp.intercepts_])
layer_units = ([X.shape[1]] + [mlp.hidden_layer_sizes] +
[mlp.n_outputs_])
activations = []
deltas = []
coef_grads = []
intercept_grads = []
activations.append(X)
for i in range(mlp.n_layers_ - 1):
activations.append(np.empty((X.shape[0],
layer_units[i + 1])))
deltas.append(np.empty((X.shape[0],
layer_units[i + 1])))
fan_in = layer_units[i]
fan_out = layer_units[i + 1]
coef_grads.append(np.empty((fan_in, fan_out)))
intercept_grads.append(np.empty(fan_out))
# analytically compute the gradients
def loss_grad_fun(t):
return mlp._loss_grad_lbfgs(t, X, Y, activations, deltas,
coef_grads, intercept_grads)
[value, grad] = loss_grad_fun(theta)
numgrad = np.zeros(np.size(theta))
n = np.size(theta, 0)
E = np.eye(n)
epsilon = 1e-5
# numerically compute the gradients
for i in range(n):
dtheta = E[:, i] * epsilon
numgrad[i] = ((loss_grad_fun(theta + dtheta)[0] -
loss_grad_fun(theta - dtheta)[0]) /
(epsilon * 2.0))
assert_almost_equal(numgrad, grad)
def test_lbfgs_classification():
# Test lbfgs on classification.
# It should achieve a score higher than 0.95 for the binary and multi-class
# versions of the digits dataset.
for X, y in classification_datasets:
X_train = X[:150]
y_train = y[:150]
X_test = X[150:]
expected_shape_dtype = (X_test.shape[0], y_train.dtype.kind)
for activation in ACTIVATION_TYPES:
mlp = MLPClassifier(solver='lbfgs', hidden_layer_sizes=50,
max_iter=150, shuffle=True, random_state=1,
activation=activation)
mlp.fit(X_train, y_train)
y_predict = mlp.predict(X_test)
assert_greater(mlp.score(X_train, y_train), 0.95)
assert_equal((y_predict.shape[0], y_predict.dtype.kind),
expected_shape_dtype)
def test_lbfgs_regression():
# Test lbfgs on the boston dataset, a regression problems.
X = Xboston
y = yboston
for activation in ACTIVATION_TYPES:
mlp = MLPRegressor(solver='lbfgs', hidden_layer_sizes=50,
max_iter=150, shuffle=True, random_state=1,
activation=activation)
mlp.fit(X, y)
if activation == 'identity':
assert_greater(mlp.score(X, y), 0.84)
else:
# Non linear models perform much better than linear bottleneck:
assert_greater(mlp.score(X, y), 0.95)
def test_learning_rate_warmstart():
# Tests that warm_start reuse past solutions.
X = [[3, 2], [1, 6], [5, 6], [-2, -4]]
y = [1, 1, 1, 0]
for learning_rate in ["invscaling", "constant"]:
mlp = MLPClassifier(solver='sgd', hidden_layer_sizes=4,
learning_rate=learning_rate, max_iter=1,
power_t=0.25, warm_start=True)
with ignore_warnings(category=ConvergenceWarning):
mlp.fit(X, y)
prev_eta = mlp._optimizer.learning_rate
mlp.fit(X, y)
post_eta = mlp._optimizer.learning_rate
if learning_rate == 'constant':
assert_equal(prev_eta, post_eta)
elif learning_rate == 'invscaling':
assert_equal(mlp.learning_rate_init / pow(8 + 1, mlp.power_t),
post_eta)
def test_multilabel_classification():
# Test that multi-label classification works as expected.
# test fit method
X, y = make_multilabel_classification(n_samples=50, random_state=0,
return_indicator=True)
mlp = MLPClassifier(solver='lbfgs', hidden_layer_sizes=50, alpha=1e-5,
max_iter=150, random_state=0, activation='logistic',
learning_rate_init=0.2)
mlp.fit(X, y)
assert_equal(mlp.score(X, y), 1)
# test partial fit method
mlp = MLPClassifier(solver='sgd', hidden_layer_sizes=50, max_iter=150,
random_state=0, activation='logistic', alpha=1e-5,
learning_rate_init=0.2)
for i in range(100):
mlp.partial_fit(X, y, classes=[0, 1, 2, 3, 4])
assert_greater(mlp.score(X, y), 0.9)
def test_multioutput_regression():
# Test that multi-output regression works as expected
X, y = make_regression(n_samples=200, n_targets=5)
mlp = MLPRegressor(solver='lbfgs', hidden_layer_sizes=50, max_iter=200,
random_state=1)
mlp.fit(X, y)
assert_greater(mlp.score(X, y), 0.9)
def test_partial_fit_classes_error():
# Tests that passing different classes to partial_fit raises an error
X = [[3, 2]]
y = [0]
clf = MLPClassifier(solver='sgd')
clf.partial_fit(X, y, classes=[0, 1])
assert_raises(ValueError, clf.partial_fit, X, y, classes=[1, 2])
def test_partial_fit_classification():
# Test partial_fit on classification.
# `partial_fit` should yield the same results as 'fit' for binary and
# multi-class classification.
for X, y in classification_datasets:
X = X
y = y
mlp = MLPClassifier(solver='sgd', max_iter=100, random_state=1,
tol=0, alpha=1e-5, learning_rate_init=0.2)
with ignore_warnings(category=ConvergenceWarning):
mlp.fit(X, y)
pred1 = mlp.predict(X)
mlp = MLPClassifier(solver='sgd', random_state=1, alpha=1e-5,
learning_rate_init=0.2)
for i in range(100):
mlp.partial_fit(X, y, classes=np.unique(y))
pred2 = mlp.predict(X)
assert_array_equal(pred1, pred2)
assert_greater(mlp.score(X, y), 0.95)
def test_partial_fit_unseen_classes():
# Non regression test for bug 6994
# Tests for labeling errors in partial fit
clf = MLPClassifier(random_state=0)
clf.partial_fit([[1], [2], [3]], ["a", "b", "c"],
classes=["a", "b", "c", "d"])
clf.partial_fit([[4]], ["d"])
assert_greater(clf.score([[1], [2], [3], [4]], ["a", "b", "c", "d"]), 0)
def test_partial_fit_regression():
# Test partial_fit on regression.
# `partial_fit` should yield the same results as 'fit' for regression.
X = Xboston
y = yboston
for momentum in [0, .9]:
mlp = MLPRegressor(solver='sgd', max_iter=100, activation='relu',
random_state=1, learning_rate_init=0.01,
batch_size=X.shape[0], momentum=momentum)
with warnings.catch_warnings(record=True):
# catch convergence warning
mlp.fit(X, y)
pred1 = mlp.predict(X)
mlp = MLPRegressor(solver='sgd', activation='relu',
learning_rate_init=0.01, random_state=1,
batch_size=X.shape[0], momentum=momentum)
for i in range(100):
mlp.partial_fit(X, y)
pred2 = mlp.predict(X)
assert_almost_equal(pred1, pred2, decimal=2)
score = mlp.score(X, y)
assert_greater(score, 0.75)
def test_partial_fit_errors():
# Test partial_fit error handling.
X = [[3, 2], [1, 6]]
y = [1, 0]
# no classes passed
assert_raises(ValueError,
MLPClassifier(solver='sgd').partial_fit, X, y, classes=[2])
# lbfgs doesn't support partial_fit
assert_false(hasattr(MLPClassifier(solver='lbfgs'), 'partial_fit'))
def test_params_errors():
# Test that invalid parameters raise value error
X = [[3, 2], [1, 6]]
y = [1, 0]
clf = MLPClassifier
assert_raises(ValueError, clf(hidden_layer_sizes=-1).fit, X, y)
assert_raises(ValueError, clf(max_iter=-1).fit, X, y)
assert_raises(ValueError, clf(shuffle='true').fit, X, y)
assert_raises(ValueError, clf(alpha=-1).fit, X, y)
assert_raises(ValueError, clf(learning_rate_init=-1).fit, X, y)
assert_raises(ValueError, clf(momentum=2).fit, X, y)
assert_raises(ValueError, clf(momentum=-0.5).fit, X, y)
assert_raises(ValueError, clf(nesterovs_momentum='invalid').fit, X, y)
assert_raises(ValueError, clf(early_stopping='invalid').fit, X, y)
assert_raises(ValueError, clf(validation_fraction=1).fit, X, y)
assert_raises(ValueError, clf(validation_fraction=-0.5).fit, X, y)
assert_raises(ValueError, clf(beta_1=1).fit, X, y)
assert_raises(ValueError, clf(beta_1=-0.5).fit, X, y)
assert_raises(ValueError, clf(beta_2=1).fit, X, y)
assert_raises(ValueError, clf(beta_2=-0.5).fit, X, y)
assert_raises(ValueError, clf(epsilon=-0.5).fit, X, y)
assert_raises(ValueError, clf(solver='hadoken').fit, X, y)
assert_raises(ValueError, clf(learning_rate='converge').fit, X, y)
assert_raises(ValueError, clf(activation='cloak').fit, X, y)
def test_predict_proba_binary():
# Test that predict_proba works as expected for binary class.
X = X_digits_binary[:50]
y = y_digits_binary[:50]
clf = MLPClassifier(hidden_layer_sizes=5)
with ignore_warnings(category=ConvergenceWarning):
clf.fit(X, y)
y_proba = clf.predict_proba(X)
y_log_proba = clf.predict_log_proba(X)
(n_samples, n_classes) = y.shape[0], 2
proba_max = y_proba.argmax(axis=1)
proba_log_max = y_log_proba.argmax(axis=1)
assert_equal(y_proba.shape, (n_samples, n_classes))
assert_array_equal(proba_max, proba_log_max)
assert_array_equal(y_log_proba, np.log(y_proba))
assert_equal(roc_auc_score(y, y_proba[:, 1]), 1.0)
def test_predict_proba_multiclass():
# Test that predict_proba works as expected for multi class.
X = X_digits_multi[:10]
y = y_digits_multi[:10]
clf = MLPClassifier(hidden_layer_sizes=5)
with ignore_warnings(category=ConvergenceWarning):
clf.fit(X, y)
y_proba = clf.predict_proba(X)
y_log_proba = clf.predict_log_proba(X)
(n_samples, n_classes) = y.shape[0], np.unique(y).size
proba_max = y_proba.argmax(axis=1)
proba_log_max = y_log_proba.argmax(axis=1)
assert_equal(y_proba.shape, (n_samples, n_classes))
assert_array_equal(proba_max, proba_log_max)
assert_array_equal(y_log_proba, np.log(y_proba))
def test_predict_proba_multilabel():
# Test that predict_proba works as expected for multilabel.
# Multilabel should not use softmax which makes probabilities sum to 1
X, Y = make_multilabel_classification(n_samples=50, random_state=0,
return_indicator=True)
n_samples, n_classes = Y.shape
clf = MLPClassifier(solver='lbfgs', hidden_layer_sizes=30,
random_state=0)
clf.fit(X, Y)
y_proba = clf.predict_proba(X)
assert_equal(y_proba.shape, (n_samples, n_classes))
assert_array_equal(y_proba > 0.5, Y)
y_log_proba = clf.predict_log_proba(X)
proba_max = y_proba.argmax(axis=1)
proba_log_max = y_log_proba.argmax(axis=1)
assert_greater((y_proba.sum(1) - 1).dot(y_proba.sum(1) - 1), 1e-10)
assert_array_equal(proba_max, proba_log_max)
assert_array_equal(y_log_proba, np.log(y_proba))
def test_sparse_matrices():
# Test that sparse and dense input matrices output the same results.
X = X_digits_binary[:50]
y = y_digits_binary[:50]
X_sparse = csr_matrix(X)
mlp = MLPClassifier(solver='lbfgs', hidden_layer_sizes=15,
random_state=1)
mlp.fit(X, y)
pred1 = mlp.predict(X)
mlp.fit(X_sparse, y)
pred2 = mlp.predict(X_sparse)
assert_almost_equal(pred1, pred2)
pred1 = mlp.predict(X)
pred2 = mlp.predict(X_sparse)
assert_array_equal(pred1, pred2)
def test_tolerance():
# Test tolerance.
# It should force the solver to exit the loop when it converges.
X = [[3, 2], [1, 6]]
y = [1, 0]
clf = MLPClassifier(tol=0.5, max_iter=3000, solver='sgd')
clf.fit(X, y)
assert_greater(clf.max_iter, clf.n_iter_)
def test_verbose_sgd():
# Test verbose.
X = [[3, 2], [1, 6]]
y = [1, 0]
clf = MLPClassifier(solver='sgd', max_iter=2, verbose=10,
hidden_layer_sizes=2)
old_stdout = sys.stdout
sys.stdout = output = StringIO()
with ignore_warnings(category=ConvergenceWarning):
clf.fit(X, y)
clf.partial_fit(X, y)
sys.stdout = old_stdout
assert 'Iteration' in output.getvalue()
def test_early_stopping():
X = X_digits_binary[:100]
y = y_digits_binary[:100]
tol = 0.2
clf = MLPClassifier(tol=tol, max_iter=3000, solver='sgd',
early_stopping=True)
clf.fit(X, y)
assert_greater(clf.max_iter, clf.n_iter_)
valid_scores = clf.validation_scores_
best_valid_score = clf.best_validation_score_
assert_equal(max(valid_scores), best_valid_score)
assert_greater(best_valid_score + tol, valid_scores[-2])
assert_greater(best_valid_score + tol, valid_scores[-1])
def test_adaptive_learning_rate():
X = [[3, 2], [1, 6]]
y = [1, 0]
clf = MLPClassifier(tol=0.5, max_iter=3000, solver='sgd',
learning_rate='adaptive')
clf.fit(X, y)
assert_greater(clf.max_iter, clf.n_iter_)
assert_greater(1e-6, clf._optimizer.learning_rate)
@ignore_warnings(RuntimeError)
def test_warm_start():
X = X_iris
y = y_iris
y_2classes = np.array([0] * 75 + [1] * 75)
y_3classes = np.array([0] * 40 + [1] * 40 + [2] * 70)
y_3classes_alt = np.array([0] * 50 + [1] * 50 + [3] * 50)
y_4classes = np.array([0] * 37 + [1] * 37 + [2] * 38 + [3] * 38)
y_5classes = np.array([0] * 30 + [1] * 30 + [2] * 30 + [3] * 30 + [4] * 30)
# No error raised
clf = MLPClassifier(hidden_layer_sizes=2, solver='lbfgs',
warm_start=True).fit(X, y)
clf.fit(X, y)
clf.fit(X, y_3classes)
for y_i in (y_2classes, y_3classes_alt, y_4classes, y_5classes):
clf = MLPClassifier(hidden_layer_sizes=2, solver='lbfgs',
warm_start=True).fit(X, y)
message = ('warm_start can only be used where `y` has the same '
'classes as in the previous call to fit.'
' Previously got [0 1 2], `y` has %s' % np.unique(y_i))
assert_raise_message(ValueError, message, clf.fit, X, y_i)
| bsd-3-clause |
kevin-intel/scikit-learn | maint_tools/check_pxd_in_installation.py | 17 | 1962 | """Utility for testing presence and usability of .pxd files in the installation
Usage:
------
python check_pxd_in_installation.py path/to/install_dir/of/scikit-learn
"""
import os
import sys
import pathlib
import tempfile
import textwrap
import subprocess
sklearn_dir = pathlib.Path(sys.argv[1])
pxd_files = list(sklearn_dir.glob("**/*.pxd"))
print("> Found pxd files:")
for pxd_file in pxd_files:
print(' -', pxd_file)
print("\n> Trying to compile a cython extension cimporting all corresponding "
"modules\n")
with tempfile.TemporaryDirectory() as tmpdir:
tmpdir = pathlib.Path(tmpdir)
# A cython test file which cimports all modules corresponding to found
# pxd files.
# e.g. sklearn/tree/_utils.pxd becomes `cimport sklearn.tree._utils`
with open(tmpdir / 'tst.pyx', 'w') as f:
for pxd_file in pxd_files:
to_import = str(pxd_file.relative_to(sklearn_dir))
to_import = to_import.replace(os.path.sep, '.')
to_import = to_import.replace('.pxd', '')
f.write('cimport sklearn.' + to_import + '\n')
# A basic setup file to build the test file.
# We set the language to c++ and we use numpy.get_include() because
# some modules require it.
with open(tmpdir / 'setup_tst.py', 'w') as f:
f.write(textwrap.dedent(
"""
from distutils.core import setup
from distutils.extension import Extension
from Cython.Build import cythonize
import numpy
extensions = [Extension("tst",
sources=["tst.pyx"],
language="c++",
include_dirs=[numpy.get_include()])]
setup(ext_modules=cythonize(extensions))
"""))
subprocess.run(["python", "setup_tst.py", "build_ext", "-i"],
check=True, cwd=tmpdir)
print("\n> Compilation succeeded !")
| bsd-3-clause |
rubensmachado/nolearn | nolearn/grid_search.py | 9 | 2364 | """:func:`grid_search` is a wrapper around
:class:`sklearn.grid_search.GridSearchCV`.
:func:`grid_search` adds a printed report to the standard
:class:`GridSearchCV` functionality, so you know about the best score
and parameters.
Usage example:
.. doctest::
>>> import numpy as np
>>> class Dataset:
... def __init__(self, data, target):
... self.data, self.target = data, target
...
>>> from sklearn.linear_model import LogisticRegression
>>> data = np.array([[1, 2, 3], [3, 3, 3]] * 20)
>>> target = np.array([0, 1] * 20)
>>> dataset = Dataset(data, target)
>>> model = LogisticRegression()
>>> parameters = dict(C=[1.0, 3.0])
>>> grid_search(dataset, model, parameters) # doctest: +ELLIPSIS
parameters:
{'C': [1.0, 3.0]}
...
Best score: 1.0000
Best grid parameters:
C=1.0,
...
"""
from __future__ import print_function
from pprint import pprint
import warnings
from sklearn.base import BaseEstimator
from sklearn.grid_search import GridSearchCV
warnings.warn("""\
The nolearn.grid_search module will be removed in nolearn 0.6. If you want to
continue to use this module, please consider copying the code into
your own project.
""")
def print_report(grid_search, parameters):
print()
print("== " * 20)
print("All parameters:")
best_parameters = grid_search.best_estimator_.get_params()
for param_name, value in sorted(best_parameters.items()):
if not isinstance(value, BaseEstimator):
print(" %s=%r," % (param_name, value))
print()
print("== " * 20)
print("Best score: %0.4f" % grid_search.best_score_)
print("Best grid parameters:")
for param_name in sorted(parameters.keys()):
print(" %s=%r," % (param_name, best_parameters[param_name]))
print("== " * 20)
return grid_search
def grid_search(dataset, clf, parameters, cv=None, verbose=4, n_jobs=1,
**kwargs):
# See http://scikit-learn.org/stable/modules/grid_search.html
grid_search = GridSearchCV(
clf,
parameters,
cv=cv,
verbose=verbose,
n_jobs=n_jobs,
**kwargs
)
if verbose:
print("parameters:")
pprint(parameters)
grid_search.fit(dataset.data, dataset.target)
if verbose:
print_report(grid_search, parameters)
return grid_search
| mit |
caseyclements/blaze | blaze/compute/tests/test_bcolz_compute.py | 9 | 5874 | from __future__ import absolute_import, division, print_function
import pytest
bcolz = pytest.importorskip('bcolz')
from datashape import discover, dshape
import numpy as np
import pandas.util.testing as tm
from odo import into
from blaze import by
from blaze.expr import symbol
from blaze.compute.core import compute, pre_compute
from blaze.compute.bcolz import get_chunksize
b = bcolz.ctable(np.array([(1, 1., np.datetime64('2010-01-01')),
(2, 2., np.datetime64('NaT')),
(3, 3., np.datetime64('2010-01-03'))],
dtype=[('a', 'i8'),
('b', 'f8'),
('date', 'datetime64[D]')]))
t = symbol('t', 'var * {a: int64, b: float64, date: ?date}')
to = symbol('to', 'var * {a: int64, b: float64}')
bo = bcolz.ctable(np.array([(1, 1.), (2, 2.), (3, np.nan)],
dtype=[('a', 'i8'), ('b', 'f8')]))
def test_discover():
assert discover(b) == dshape('3 * {a: int64, b: float64, date: date}')
assert discover(b['a']) == dshape('3 * int64')
def test_reductions():
assert compute(t.a.sum(), b) == 6
assert compute(t.a.min(), b) == 1
assert compute(t.a.max(), b) == 3
assert compute(t.a.mean(), b) == 2.0
assert abs(compute(t.a.std(), b) - np.std([1, 2, 3])) < 1e-5
assert abs(compute(t.a.var(), b) - np.var([1, 2, 3])) < 1e-5
assert abs(compute(t.a.std(unbiased=True), b) - np.std([1, 2, 3],
ddof=1)) < 1e-5
assert abs(compute(t.a.var(unbiased=True), b) - np.var([1, 2, 3],
ddof=1)) < 1e-5
assert len(list(compute(t.distinct(), b))) == 3
assert len(list(compute(t.a.distinct(), b))) == 3
assert compute(t.a.nunique(), b) == 3
assert isinstance(compute(t.a.nunique(), b), np.integer)
assert compute(t.a.count(), b) == 3
assert isinstance(compute(t.date.count(), b), np.integer)
assert compute(t.date.nunique(), b) == 2
assert isinstance(compute(t.date.nunique(), b), np.integer)
assert compute(t.date.count(), b) == 2
assert isinstance(compute(t.a.count(), b), np.integer)
assert compute(t.a[0], b) == 1
assert compute(t.a[-1], b) == 3
assert compute(t[0], b) == compute(t[0], b)
assert compute(t[-1], b) == compute(t[-1], b)
def test_nunique():
assert compute(t.a.nunique(), b) == 3
assert compute(t.nunique(), b) == 3
def test_selection_head():
ds = dshape('var * {a: int32, b: int32, c: float64}')
b = into(bcolz.ctable,
[(i, i + 1, float(i) ** 2) for i in range(10)],
dshape=ds)
t = symbol('t', ds)
# numpy reductions return numpy scalars
assert compute((t.a < t.b).all(), b).item() is True
assert list(compute(t[t.a < t.b].a.head(10), b)) == list(range(10))
assert list(compute(t[t.a > t.b].a.head(10), b)) == []
assert into([], compute(t[t.a + t.b > t.c], b)) == [(0, 1, 0),
(1, 2, 1),
(2, 3, 4)]
assert len(compute(t[t.a + t.b > t.c].head(10), b)) # non-empty
assert len(compute(t[t.a + t.b < t.c].head(10), b)) # non-empty
def test_selection_isnan():
b = bcolz.ctable([[1, np.nan, 3], [1., 2., np.nan]], names=['a', 'b'])
t = symbol('t', discover(b))
lhs = compute(t[t.a.isnan()], b)
rhs = np.array([(np.nan, 2.0)], dtype=b.dtype)
for n in b.dtype.names:
assert np.isclose(lhs[n], rhs[n], equal_nan=True).all()
assert np.isclose(compute(t[~t.b.isnan()], b)[n],
np.array(
[(1, 1.0), (np.nan, 2.0)], dtype=b.dtype)[n],
equal_nan=True).all()
def test_count_isnan():
assert compute(to.a[~to.b.isnan()].count(), bo) == 2
def test_count_isnan_object():
assert compute(to.a[~to.b.isnan()].count(), bo) == 2
def test_count_isnan_struct():
assert compute(t[~t.b.isnan()].count(), b) == 3
def test_nrows():
assert compute(t.nrows, b) == len(b)
def test_nelements():
assert compute(t.nelements(axis=0), b) == len(b)
assert compute(t.nelements(), b) == len(b)
# This is no longer desired. Handled by compute_up
def dont_test_pre_compute():
b = bcolz.ctable(np.array([(1, 1., 10.), (2, 2., 20.), (3, 3., 30.)],
dtype=[('a', 'i8'), ('b', 'f8'), ('c', 'f8')]))
s = symbol('s', discover(b))
result = pre_compute(s[['a', 'b']], b)
assert result.names == ['a', 'b']
def eq(a, b):
return np.array_equal(a, b)
def test_unicode_field_names():
b = bcolz.ctable(np.array([(1, 1., 10.), (2, 2., 20.), (3, 3., 30.)],
dtype=[('a', 'i8'), ('b', 'f8'), ('c', 'f8')]))
s = symbol('s', discover(b))
assert eq(compute(s[u'a'], b)[:], compute(s['a'], b)[:])
assert eq(compute(s[[u'a', u'c']], b)[:], compute(s[['a', 'c']], b)[:])
assert eq(compute(s[u'a'], b)[:],
compute(s['a'], b)[:])
assert eq(compute(s[[u'a', u'c']], b)[:],
compute(s[['a', 'c']], b)[:])
def test_chunksize_inference():
b = bcolz.ctable(np.array([(1, 1., 10.), (2, 2., 20.), (3, 3., 30.)],
dtype=[('a', 'i8'), ('b', 'f8'), ('c', 'f8')]),
chunklen=2)
assert get_chunksize(b) == 2
def test_notnull():
with pytest.raises(AttributeError):
t.b.notnull
def test_by_with_single_row():
ct = bcolz.ctable([[1, 1, 3, 3], [1, 2, 3, 4]], names=list('ab'))
t = symbol('t', discover(ct))
subset = t[t.a == 3]
expr = by(subset.a, b_sum=subset.b.sum())
result = compute(expr, ct)
expected = compute(expr, ct, optimize=False)
tm.assert_frame_equal(result, expected)
| bsd-3-clause |
JosmanPS/scikit-learn | sklearn/metrics/cluster/tests/test_supervised.py | 206 | 7643 | import numpy as np
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.metrics.cluster import homogeneity_score
from sklearn.metrics.cluster import completeness_score
from sklearn.metrics.cluster import v_measure_score
from sklearn.metrics.cluster import homogeneity_completeness_v_measure
from sklearn.metrics.cluster import adjusted_mutual_info_score
from sklearn.metrics.cluster import normalized_mutual_info_score
from sklearn.metrics.cluster import mutual_info_score
from sklearn.metrics.cluster import expected_mutual_information
from sklearn.metrics.cluster import contingency_matrix
from sklearn.metrics.cluster import entropy
from sklearn.utils.testing import assert_raise_message
from nose.tools import assert_almost_equal
from nose.tools import assert_equal
from numpy.testing import assert_array_almost_equal
score_funcs = [
adjusted_rand_score,
homogeneity_score,
completeness_score,
v_measure_score,
adjusted_mutual_info_score,
normalized_mutual_info_score,
]
def test_error_messages_on_wrong_input():
for score_func in score_funcs:
expected = ('labels_true and labels_pred must have same size,'
' got 2 and 3')
assert_raise_message(ValueError, expected, score_func,
[0, 1], [1, 1, 1])
expected = "labels_true must be 1D: shape is (2"
assert_raise_message(ValueError, expected, score_func,
[[0, 1], [1, 0]], [1, 1, 1])
expected = "labels_pred must be 1D: shape is (2"
assert_raise_message(ValueError, expected, score_func,
[0, 1, 0], [[1, 1], [0, 0]])
def test_perfect_matches():
for score_func in score_funcs:
assert_equal(score_func([], []), 1.0)
assert_equal(score_func([0], [1]), 1.0)
assert_equal(score_func([0, 0, 0], [0, 0, 0]), 1.0)
assert_equal(score_func([0, 1, 0], [42, 7, 42]), 1.0)
assert_equal(score_func([0., 1., 0.], [42., 7., 42.]), 1.0)
assert_equal(score_func([0., 1., 2.], [42., 7., 2.]), 1.0)
assert_equal(score_func([0, 1, 2], [42, 7, 2]), 1.0)
def test_homogeneous_but_not_complete_labeling():
# homogeneous but not complete clustering
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 2, 2])
assert_almost_equal(h, 1.00, 2)
assert_almost_equal(c, 0.69, 2)
assert_almost_equal(v, 0.81, 2)
def test_complete_but_not_homogeneous_labeling():
# complete but not homogeneous clustering
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 1, 1, 2, 2],
[0, 0, 1, 1, 1, 1])
assert_almost_equal(h, 0.58, 2)
assert_almost_equal(c, 1.00, 2)
assert_almost_equal(v, 0.73, 2)
def test_not_complete_and_not_homogeneous_labeling():
# neither complete nor homogeneous but not so bad either
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 1, 1, 1],
[0, 1, 0, 1, 2, 2])
assert_almost_equal(h, 0.67, 2)
assert_almost_equal(c, 0.42, 2)
assert_almost_equal(v, 0.52, 2)
def test_non_consicutive_labels():
# regression tests for labels with gaps
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 2, 2, 2],
[0, 1, 0, 1, 2, 2])
assert_almost_equal(h, 0.67, 2)
assert_almost_equal(c, 0.42, 2)
assert_almost_equal(v, 0.52, 2)
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 1, 1, 1],
[0, 4, 0, 4, 2, 2])
assert_almost_equal(h, 0.67, 2)
assert_almost_equal(c, 0.42, 2)
assert_almost_equal(v, 0.52, 2)
ari_1 = adjusted_rand_score([0, 0, 0, 1, 1, 1], [0, 1, 0, 1, 2, 2])
ari_2 = adjusted_rand_score([0, 0, 0, 1, 1, 1], [0, 4, 0, 4, 2, 2])
assert_almost_equal(ari_1, 0.24, 2)
assert_almost_equal(ari_2, 0.24, 2)
def uniform_labelings_scores(score_func, n_samples, k_range, n_runs=10,
seed=42):
# Compute score for random uniform cluster labelings
random_labels = np.random.RandomState(seed).random_integers
scores = np.zeros((len(k_range), n_runs))
for i, k in enumerate(k_range):
for j in range(n_runs):
labels_a = random_labels(low=0, high=k - 1, size=n_samples)
labels_b = random_labels(low=0, high=k - 1, size=n_samples)
scores[i, j] = score_func(labels_a, labels_b)
return scores
def test_adjustment_for_chance():
# Check that adjusted scores are almost zero on random labels
n_clusters_range = [2, 10, 50, 90]
n_samples = 100
n_runs = 10
scores = uniform_labelings_scores(
adjusted_rand_score, n_samples, n_clusters_range, n_runs)
max_abs_scores = np.abs(scores).max(axis=1)
assert_array_almost_equal(max_abs_scores, [0.02, 0.03, 0.03, 0.02], 2)
def test_adjusted_mutual_info_score():
# Compute the Adjusted Mutual Information and test against known values
labels_a = np.array([1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3])
labels_b = np.array([1, 1, 1, 1, 2, 1, 2, 2, 2, 2, 3, 1, 3, 3, 3, 2, 2])
# Mutual information
mi = mutual_info_score(labels_a, labels_b)
assert_almost_equal(mi, 0.41022, 5)
# Expected mutual information
C = contingency_matrix(labels_a, labels_b)
n_samples = np.sum(C)
emi = expected_mutual_information(C, n_samples)
assert_almost_equal(emi, 0.15042, 5)
# Adjusted mutual information
ami = adjusted_mutual_info_score(labels_a, labels_b)
assert_almost_equal(ami, 0.27502, 5)
ami = adjusted_mutual_info_score([1, 1, 2, 2], [2, 2, 3, 3])
assert_equal(ami, 1.0)
# Test with a very large array
a110 = np.array([list(labels_a) * 110]).flatten()
b110 = np.array([list(labels_b) * 110]).flatten()
ami = adjusted_mutual_info_score(a110, b110)
# This is not accurate to more than 2 places
assert_almost_equal(ami, 0.37, 2)
def test_entropy():
ent = entropy([0, 0, 42.])
assert_almost_equal(ent, 0.6365141, 5)
assert_almost_equal(entropy([]), 1)
def test_contingency_matrix():
labels_a = np.array([1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3])
labels_b = np.array([1, 1, 1, 1, 2, 1, 2, 2, 2, 2, 3, 1, 3, 3, 3, 2, 2])
C = contingency_matrix(labels_a, labels_b)
C2 = np.histogram2d(labels_a, labels_b,
bins=(np.arange(1, 5),
np.arange(1, 5)))[0]
assert_array_almost_equal(C, C2)
C = contingency_matrix(labels_a, labels_b, eps=.1)
assert_array_almost_equal(C, C2 + .1)
def test_exactly_zero_info_score():
# Check numerical stability when information is exactly zero
for i in np.logspace(1, 4, 4).astype(np.int):
labels_a, labels_b = np.ones(i, dtype=np.int),\
np.arange(i, dtype=np.int)
assert_equal(normalized_mutual_info_score(labels_a, labels_b), 0.0)
assert_equal(v_measure_score(labels_a, labels_b), 0.0)
assert_equal(adjusted_mutual_info_score(labels_a, labels_b), 0.0)
assert_equal(normalized_mutual_info_score(labels_a, labels_b), 0.0)
def test_v_measure_and_mutual_information(seed=36):
# Check relation between v_measure, entropy and mutual information
for i in np.logspace(1, 4, 4).astype(np.int):
random_state = np.random.RandomState(seed)
labels_a, labels_b = random_state.random_integers(0, 10, i),\
random_state.random_integers(0, 10, i)
assert_almost_equal(v_measure_score(labels_a, labels_b),
2.0 * mutual_info_score(labels_a, labels_b) /
(entropy(labels_a) + entropy(labels_b)), 0)
| bsd-3-clause |
jpautom/scikit-learn | sklearn/covariance/tests/test_covariance.py | 34 | 11120 | # Author: Alexandre Gramfort <[email protected]>
# Gael Varoquaux <[email protected]>
# Virgile Fritsch <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_greater
from sklearn import datasets
from sklearn.covariance import empirical_covariance, EmpiricalCovariance, \
ShrunkCovariance, shrunk_covariance, \
LedoitWolf, ledoit_wolf, ledoit_wolf_shrinkage, OAS, oas
X = datasets.load_diabetes().data
X_1d = X[:, 0]
n_samples, n_features = X.shape
def test_covariance():
# Tests Covariance module on a simple dataset.
# test covariance fit from data
cov = EmpiricalCovariance()
cov.fit(X)
emp_cov = empirical_covariance(X)
assert_array_almost_equal(emp_cov, cov.covariance_, 4)
assert_almost_equal(cov.error_norm(emp_cov), 0)
assert_almost_equal(
cov.error_norm(emp_cov, norm='spectral'), 0)
assert_almost_equal(
cov.error_norm(emp_cov, norm='frobenius'), 0)
assert_almost_equal(
cov.error_norm(emp_cov, scaling=False), 0)
assert_almost_equal(
cov.error_norm(emp_cov, squared=False), 0)
assert_raises(NotImplementedError,
cov.error_norm, emp_cov, norm='foo')
# Mahalanobis distances computation test
mahal_dist = cov.mahalanobis(X)
assert_greater(np.amin(mahal_dist), 0)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
cov = EmpiricalCovariance()
cov.fit(X_1d)
assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
assert_almost_equal(cov.error_norm(empirical_covariance(X_1d)), 0)
assert_almost_equal(
cov.error_norm(empirical_covariance(X_1d), norm='spectral'), 0)
# test with one sample
# Create X with 1 sample and 5 features
X_1sample = np.arange(5).reshape(1, 5)
cov = EmpiricalCovariance()
assert_warns(UserWarning, cov.fit, X_1sample)
assert_array_almost_equal(cov.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test integer type
X_integer = np.asarray([[0, 1], [1, 0]])
result = np.asarray([[0.25, -0.25], [-0.25, 0.25]])
assert_array_almost_equal(empirical_covariance(X_integer), result)
# test centered case
cov = EmpiricalCovariance(assume_centered=True)
cov.fit(X)
assert_array_equal(cov.location_, np.zeros(X.shape[1]))
def test_shrunk_covariance():
# Tests ShrunkCovariance module on a simple dataset.
# compare shrunk covariance obtained from data and from MLE estimate
cov = ShrunkCovariance(shrinkage=0.5)
cov.fit(X)
assert_array_almost_equal(
shrunk_covariance(empirical_covariance(X), shrinkage=0.5),
cov.covariance_, 4)
# same test with shrinkage not provided
cov = ShrunkCovariance()
cov.fit(X)
assert_array_almost_equal(
shrunk_covariance(empirical_covariance(X)), cov.covariance_, 4)
# same test with shrinkage = 0 (<==> empirical_covariance)
cov = ShrunkCovariance(shrinkage=0.)
cov.fit(X)
assert_array_almost_equal(empirical_covariance(X), cov.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
cov = ShrunkCovariance(shrinkage=0.3)
cov.fit(X_1d)
assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
cov = ShrunkCovariance(shrinkage=0.5, store_precision=False)
cov.fit(X)
assert(cov.precision_ is None)
def test_ledoit_wolf():
# Tests LedoitWolf module on a simple dataset.
# test shrinkage coeff on a simple data set
X_centered = X - X.mean(axis=0)
lw = LedoitWolf(assume_centered=True)
lw.fit(X_centered)
shrinkage_ = lw.shrinkage_
score_ = lw.score(X_centered)
assert_almost_equal(ledoit_wolf_shrinkage(X_centered,
assume_centered=True),
shrinkage_)
assert_almost_equal(ledoit_wolf_shrinkage(X_centered, assume_centered=True,
block_size=6),
shrinkage_)
# compare shrunk covariance obtained from data and from MLE estimate
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_centered,
assume_centered=True)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
# compare estimates given by LW and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=lw.shrinkage_, assume_centered=True)
scov.fit(X_centered)
assert_array_almost_equal(scov.covariance_, lw.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
lw = LedoitWolf(assume_centered=True)
lw.fit(X_1d)
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_1d,
assume_centered=True)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
assert_array_almost_equal((X_1d ** 2).sum() / n_samples, lw.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
lw = LedoitWolf(store_precision=False, assume_centered=True)
lw.fit(X_centered)
assert_almost_equal(lw.score(X_centered), score_, 4)
assert(lw.precision_ is None)
# Same tests without assuming centered data
# test shrinkage coeff on a simple data set
lw = LedoitWolf()
lw.fit(X)
assert_almost_equal(lw.shrinkage_, shrinkage_, 4)
assert_almost_equal(lw.shrinkage_, ledoit_wolf_shrinkage(X))
assert_almost_equal(lw.shrinkage_, ledoit_wolf(X)[1])
assert_almost_equal(lw.score(X), score_, 4)
# compare shrunk covariance obtained from data and from MLE estimate
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
# compare estimates given by LW and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=lw.shrinkage_)
scov.fit(X)
assert_array_almost_equal(scov.covariance_, lw.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
lw = LedoitWolf()
lw.fit(X_1d)
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_1d)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
assert_array_almost_equal(empirical_covariance(X_1d), lw.covariance_, 4)
# test with one sample
# warning should be raised when using only 1 sample
X_1sample = np.arange(5).reshape(1, 5)
lw = LedoitWolf()
assert_warns(UserWarning, lw.fit, X_1sample)
assert_array_almost_equal(lw.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test shrinkage coeff on a simple data set (without saving precision)
lw = LedoitWolf(store_precision=False)
lw.fit(X)
assert_almost_equal(lw.score(X), score_, 4)
assert(lw.precision_ is None)
def test_ledoit_wolf_large():
# test that ledoit_wolf doesn't error on data that is wider than block_size
rng = np.random.RandomState(0)
# use a number of features that is larger than the block-size
X = rng.normal(size=(10, 20))
lw = LedoitWolf(block_size=10).fit(X)
# check that covariance is about diagonal (random normal noise)
assert_almost_equal(lw.covariance_, np.eye(20), 0)
cov = lw.covariance_
# check that the result is consistent with not splitting data into blocks.
lw = LedoitWolf(block_size=25).fit(X)
assert_almost_equal(lw.covariance_, cov)
def test_oas():
# Tests OAS module on a simple dataset.
# test shrinkage coeff on a simple data set
X_centered = X - X.mean(axis=0)
oa = OAS(assume_centered=True)
oa.fit(X_centered)
shrinkage_ = oa.shrinkage_
score_ = oa.score(X_centered)
# compare shrunk covariance obtained from data and from MLE estimate
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_centered,
assume_centered=True)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
# compare estimates given by OAS and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=oa.shrinkage_, assume_centered=True)
scov.fit(X_centered)
assert_array_almost_equal(scov.covariance_, oa.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0:1]
oa = OAS(assume_centered=True)
oa.fit(X_1d)
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_1d, assume_centered=True)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
assert_array_almost_equal((X_1d ** 2).sum() / n_samples, oa.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
oa = OAS(store_precision=False, assume_centered=True)
oa.fit(X_centered)
assert_almost_equal(oa.score(X_centered), score_, 4)
assert(oa.precision_ is None)
# Same tests without assuming centered data--------------------------------
# test shrinkage coeff on a simple data set
oa = OAS()
oa.fit(X)
assert_almost_equal(oa.shrinkage_, shrinkage_, 4)
assert_almost_equal(oa.score(X), score_, 4)
# compare shrunk covariance obtained from data and from MLE estimate
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
# compare estimates given by OAS and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=oa.shrinkage_)
scov.fit(X)
assert_array_almost_equal(scov.covariance_, oa.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
oa = OAS()
oa.fit(X_1d)
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_1d)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
assert_array_almost_equal(empirical_covariance(X_1d), oa.covariance_, 4)
# test with one sample
# warning should be raised when using only 1 sample
X_1sample = np.arange(5).reshape(1, 5)
oa = OAS()
assert_warns(UserWarning, oa.fit, X_1sample)
assert_array_almost_equal(oa.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test shrinkage coeff on a simple data set (without saving precision)
oa = OAS(store_precision=False)
oa.fit(X)
assert_almost_equal(oa.score(X), score_, 4)
assert(oa.precision_ is None)
| bsd-3-clause |
lxsmnv/spark | python/pyspark/sql/utils.py | 6 | 5619 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import py4j
class CapturedException(Exception):
def __init__(self, desc, stackTrace):
self.desc = desc
self.stackTrace = stackTrace
def __str__(self):
return repr(self.desc)
class AnalysisException(CapturedException):
"""
Failed to analyze a SQL query plan.
"""
class ParseException(CapturedException):
"""
Failed to parse a SQL command.
"""
class IllegalArgumentException(CapturedException):
"""
Passed an illegal or inappropriate argument.
"""
class StreamingQueryException(CapturedException):
"""
Exception that stopped a :class:`StreamingQuery`.
"""
class QueryExecutionException(CapturedException):
"""
Failed to execute a query.
"""
def capture_sql_exception(f):
def deco(*a, **kw):
try:
return f(*a, **kw)
except py4j.protocol.Py4JJavaError as e:
s = e.java_exception.toString()
stackTrace = '\n\t at '.join(map(lambda x: x.toString(),
e.java_exception.getStackTrace()))
if s.startswith('org.apache.spark.sql.AnalysisException: '):
raise AnalysisException(s.split(': ', 1)[1], stackTrace)
if s.startswith('org.apache.spark.sql.catalyst.analysis'):
raise AnalysisException(s.split(': ', 1)[1], stackTrace)
if s.startswith('org.apache.spark.sql.catalyst.parser.ParseException: '):
raise ParseException(s.split(': ', 1)[1], stackTrace)
if s.startswith('org.apache.spark.sql.streaming.StreamingQueryException: '):
raise StreamingQueryException(s.split(': ', 1)[1], stackTrace)
if s.startswith('org.apache.spark.sql.execution.QueryExecutionException: '):
raise QueryExecutionException(s.split(': ', 1)[1], stackTrace)
if s.startswith('java.lang.IllegalArgumentException: '):
raise IllegalArgumentException(s.split(': ', 1)[1], stackTrace)
raise
return deco
def install_exception_handler():
"""
Hook an exception handler into Py4j, which could capture some SQL exceptions in Java.
When calling Java API, it will call `get_return_value` to parse the returned object.
If any exception happened in JVM, the result will be Java exception object, it raise
py4j.protocol.Py4JJavaError. We replace the original `get_return_value` with one that
could capture the Java exception and throw a Python one (with the same error message).
It's idempotent, could be called multiple times.
"""
original = py4j.protocol.get_return_value
# The original `get_return_value` is not patched, it's idempotent.
patched = capture_sql_exception(original)
# only patch the one used in py4j.java_gateway (call Java API)
py4j.java_gateway.get_return_value = patched
def toJArray(gateway, jtype, arr):
"""
Convert python list to java type array
:param gateway: Py4j Gateway
:param jtype: java type of element in array
:param arr: python type list
"""
jarr = gateway.new_array(jtype, len(arr))
for i in range(0, len(arr)):
jarr[i] = arr[i]
return jarr
def require_minimum_pandas_version():
""" Raise ImportError if minimum version of Pandas is not installed
"""
# TODO(HyukjinKwon): Relocate and deduplicate the version specification.
minimum_pandas_version = "0.19.2"
from distutils.version import LooseVersion
try:
import pandas
have_pandas = True
except ImportError:
have_pandas = False
if not have_pandas:
raise ImportError("Pandas >= %s must be installed; however, "
"it was not found." % minimum_pandas_version)
if LooseVersion(pandas.__version__) < LooseVersion(minimum_pandas_version):
raise ImportError("Pandas >= %s must be installed; however, "
"your version was %s." % (minimum_pandas_version, pandas.__version__))
def require_minimum_pyarrow_version():
""" Raise ImportError if minimum version of pyarrow is not installed
"""
# TODO(HyukjinKwon): Relocate and deduplicate the version specification.
minimum_pyarrow_version = "0.8.0"
from distutils.version import LooseVersion
try:
import pyarrow
have_arrow = True
except ImportError:
have_arrow = False
if not have_arrow:
raise ImportError("PyArrow >= %s must be installed; however, "
"it was not found." % minimum_pyarrow_version)
if LooseVersion(pyarrow.__version__) < LooseVersion(minimum_pyarrow_version):
raise ImportError("PyArrow >= %s must be installed; however, "
"your version was %s." % (minimum_pyarrow_version, pyarrow.__version__))
| apache-2.0 |
lizardsystem/lizard-measure | lizard_measure/views.py | 1 | 36112 | # (c) Nelen & Schuurmans. GPL licensed, see LICENSE.txt.
import json
import logging
import datetime
import math
import iso8601
from django.db.models.query_utils import Q
from django.shortcuts import get_object_or_404
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.http import HttpResponse
from django.template.loader import get_template
from django.views.generic.base import View
from matplotlib.dates import date2num
from matplotlib.lines import Line2D
from lizard_area.models import Area
from lizard_measure.models import Measure, MeasureStatus
from lizard_measure.models import WaterBody
from lizard_measure.models import MeasureType
from lizard_measure.models import MeasurePeriod
from lizard_measure.models import MeasureCategory
from lizard_measure.models import Unit
from lizard_measure.models import HorizontalBarGraph
from lizard_measure.models import Score
from lizard_measure.suitable_measures import get_suitable_measures
from lizard_measure.models import SteeringParameterFree
from lizard_measure.models import SteeringParameterPredefinedGraph
from lizard_measure.models import PredefinedGraphSelection
from lizard_measure.models import WatertypeGroup
from lizard_measure.models import EsfPattern
from lizard_registration.utils import get_user_permissions_overall
from lizard_security.models import DataSet
from lizard_area.models import Area
from nens_graph.common import DateGridGraph
from nens_graph.common import dates_values_comments
from lizard_map.views import AppView
from lizard_graph.views import TimeSeriesViewMixin
from lizard_fewsnorm.models import GeoLocationCache
from lizard_history.utils import get_history
logger = logging.getLogger(__name__)
# HOMEPAGE_KEY = 1 # Primary key of the Workspace for rendering the homepage.
CRUMB_HOMEPAGE = {'name': 'home', 'url': '/'}
# EKR Colors
COLOR_1 = '#ff0000'
COLOR_2 = '#ffaa00'
COLOR_3 = '#ffff00'
COLOR_4 = '#00ff00'
COLOR_5 = '#0000ff'
# def waterbody_shapefile_search(request):
# """Return url to redirect to if a waterbody is found.
# Only works with adapter lizard_shape.
# """
# google_x = float(request.GET.get('x'))
# google_y = float(request.GET.get('y'))
# # Set up a basic map as only map can search...
# mapnik_map = mapnik.Map(400, 400)
# mapnik_map.srs = coordinates.GOOGLE
# workspace = Workspace.objects.get(name="Homepage")
# # The following adapter should be available in the fixture.
# adapter = workspace.workspace_items.all()[0].adapter
# search_results = adapter.search(google_x, google_y)
# # Return url of first found object.
# for search_result in search_results:
# #name_in_shapefile = search_result['name']
# id_in_shapefile = search_result['identifier']['id']
# water_body = WaterBody.objects.get(ident=id_in_shapefile)
# return HttpResponse(water_body.get_absolute_url())
# # Nothing found? Return an empty response and the
# # javascript popup handler
# # will fire.
# return HttpResponse('')
def _sorted_measures(area):
"""
Return list of measures that relate to area. Parent measures ordered
alphabetically, child measures directly after parent measures,
and loose child measures (parent not in list) at the end.
"""
# These must all occur in the list. Also related child measures
# whose parent are with a different area.
all_related_measures = Measure.objects.filter(Q(waterbodies__area=area)|Q(areas=area)
).distinct()
all_related_measures_dict = dict([(m.id, m) for m in all_related_measures])
# get measures without parent: main measures
parent_measures = all_related_measures.filter(
parent__isnull=True,
).order_by(
'title',
)
result_measures = []
for p in parent_measures:
result_measures.append(p)
child_measures = p.measure_set.filter(Q(waterbodies__area=area)|Q(areas=area)).distinct().order_by('title')
result_measures.extend(child_measures)
# Keep track of added measures
if p.id in all_related_measures_dict:
del all_related_measures_dict[p.id]
for child_measure in child_measures:
if child_measure.id in all_related_measures_dict:
del all_related_measures_dict[child_measure.id]
# Now all_related_measures_dict contains only the left-over measures
left_over_measures = all_related_measures_dict.values()
sorted(left_over_measures, key=lambda m: m.title)
for measure in left_over_measures:
measure.parent_other_area = True
result_measures.append(measure)
return result_measures
class MeasureDetailView(AppView):
"""
Show measure details
"""
template_name='lizard_measure/measure.html'
def measure(self):
"""Return a measure"""
if not hasattr(self, '_measure'):
self._measure = Measure.objects.get(
pk=self.measure_id)
return self._measure
def get(self, request, *args, **kwargs):
self.measure_id = kwargs['measure_id']
return super(MeasureDetailView, self).get(
request, *args, **kwargs)
class MeasureHistoryView(MeasureDetailView):
"""
Show measure history
"""
template_name='lizard_measure/measure_history.html'
def history(self):
"""
Return full history, if possible cached
"""
if not hasattr(self, '_log_entry'):
self._history = get_history(
obj=self.measure(),
)
return self._history
class MeasureHistoryDetailView(MeasureDetailView):
"""
Show measure history details
"""
template_name='lizard_measure/measure_history_details.html'
def action(self):
"""
Return history details dict
"""
if not hasattr(self, '_action'):
self._action = get_history(
log_entry_id=self.log_entry_id,
)
return self._action
class MeasureArchiveView(AppView):
"""
Readonly measure form.
"""
def get(self, request, *args, **kwargs):
"""
Return read only form for measure corresponding to specific log_entry.
"""
if request.user.is_authenticated():
self.template_name = 'lizard_measure/measure_form_read_only.js'
self.measure_id = kwargs.get('measure_id')
self.log_entry_id = kwargs.get('log_entry_id')
else:
self.template_name = 'portals/geen_toegang.js'
return super(MeasureArchiveView, self).get(request, *args, **kwargs)
class MeasureHistoryDetailView(MeasureDetailView):
"""
Show measure history details
"""
template_name='lizard_measure/measure_history_details.html'
def action(self):
"""
Return history details dict
"""
if not hasattr(self, '_action'):
self._action = get_history(
log_entry_id=self.log_entry_id,
)
return self._action
def changes(self):
"""
Return list of changes using verbose names of fields
"""
result = [(self.measure()._meta.get_field(f).verbose_name, v)
for f, v in self.action()['changes'].items()]
return result
def get(self, request, *args, **kwargs):
"""
Pick the log_entry_id from the url
"""
self.log_entry_id = kwargs['log_entry_id']
return super(MeasureHistoryDetailView, self).get(
request, *args, **kwargs)
# def measure_detail(request, measure_id,
# template='lizard_measure/measure.html'):
# measure = get_object_or_404(Measure, pk=measure_id)
# return render_to_response(
# template,
# {'measure': measure},
# context_instance=RequestContext(request))
def krw_waterbody_measures(request, area_ident,
template='lizard_measure/waterbody_measures.html'):
"""
Show list of measures for an area_ident.
"""
area = get_object_or_404(Area, ident=area_ident)
result_measures = _sorted_measures(area)
perms = dict(get_user_permissions_overall(request.user, 'user', as_list=True))
return render_to_response(
template,
{'waterbody': area,
'main_measures': result_measures,
'perm': perms
},
context_instance=RequestContext(request))
def suited_measures(request, area_ident,
template='lizard_measure/suited_measures.html'):
# for testing purposes, we retrieve all measures
area = get_object_or_404(Area, ident=area_ident)
suitable_measure_types = get_suitable_measures(area)
logger.debug("found %d suitable measures", len(suitable_measure_types))
return render_to_response(
template,
{'suitable_measure_types': suitable_measure_types},
context_instance=RequestContext(request))
def value_to_judgement(value, a=None, b=None, c=None, d=None):
"""
Simple classifier for judgements.
"""
if value < a:
return "slecht"
if value < b:
return "ontoereikend"
if value < c:
return "matig"
if value < d:
return "goed"
return "zeer goed"
def value_to_html_color(value, a=None, b=None, c=None, d=None):
"""
Simple classifier for colors. All values will return a color.
"""
if value < a:
return COLOR_1
if value < b:
return COLOR_2
if value < c:
return COLOR_3
if value < d:
return COLOR_4
return COLOR_5
def comment_to_html_color(comment):
"""
Lookup the EKR color for a fewsnorm comment.
Defaults to grey.
"""
return {
'slecht': COLOR_1,
'ontoereikend': COLOR_2,
'matig': COLOR_3,
'goed': COLOR_4,
'zeer goed': COLOR_5}.get(comment, '#cccccc')
def krw_waterbody_ekr_scores(
request, area_ident, horizontal_bar_graph_slug='ekr-extended',
template='lizard_measure/waterbody_ekr_scores.html'):
"""
Show screen for ekr scores.
A HorizontalBarGraph with slug 'ekr-extended' must be defined.
"""
area = get_object_or_404(Area, ident=area_ident)
location = None
try:
location = GeoLocationCache.objects.get(ident=area_ident)
except GeoLocationCache.DoesNotExist:
pass
hor_bar_graph = HorizontalBarGraph.objects.get(
slug=horizontal_bar_graph_slug)
graph_items = hor_bar_graph.horizontalbargraphitem_set.all()
for graph_item in graph_items:
if not graph_item.location and location:
graph_item.location = location
ekr_scores = []
try:
ekr_scores = [(graph_item.time_series(with_comments=True),
Score.from_graph_item(graph_item),
graph_item)
for graph_item in graph_items]
except AttributeError:
# Occurs when above location =
# GeoLocationCache... fails... just return nothing.
pass
score_tables = []
for ts, score, graph_item in ekr_scores:
new_score_table = {
'title': str(graph_item),
'score': score,
'data': []}
# We assume there is only one.
len_ts_values = len(ts.values())
if len_ts_values != 1:
logger.error('Number of TimeSeries for HorizontalBarGraphItem %s is %d' % (
graph_item, len_ts_values))
if len_ts_values == 0:
new_score_table['data'] = [{'timestamp': 'Geen tijdreeks beschikbaar', 'value': None}]
# a, b, c, d = score.borders
for single_ts in ts.values():
data_table = []
for timestamp, (value, flag, comment) in single_ts.get_events():
# value = math.trunc(10 * value) / 10.0 # Floor at 1 decimal
data_table.append({'timestamp': timestamp,
'value': value,
'color': comment_to_html_color(comment),
'comment': comment})
new_score_table['data'] = data_table
new_score_table['color_target_2015'] = comment_to_html_color(score.target_2015)
new_score_table['color_target_2027'] = comment_to_html_color(score.target_2027)
score_tables.append(new_score_table)
return render_to_response(
template,
{'waterbody': area,
'score_tables': score_tables,
'COLOR_1': COLOR_1,
'COLOR_2': COLOR_2,
'COLOR_3': COLOR_3,
'COLOR_4': COLOR_4,
'COLOR_5': COLOR_5,
},
context_instance=RequestContext(request))
def _image_measures(graph, measures, start_date, end_date,
end_date_realized=None, legend_location=-1,
title=None, wide_left_ticks=False):
"""Function to draw measures
TODO: when a single measure is drawn, sometimes the whole
picture is stretched out
!attn! measure statuses are aggregated from child measures
"""
def calc_bar_colors(measure, end_date, is_planning):
"""Returns calculated bars. The bars are aggregated from
measure_status_moments from sub measures.
** measure can also be a measure_collection. It uses the
status_moment function only.
"""
measure_bar = []
measure_colors = []
measure_status_moments = measure.measure_status_moments(
end_date=end_date, is_planning=is_planning)
for msm_index, msm in enumerate(measure_status_moments):
# drawing enddate: "infinity" or next status moment
if msm_index == len(measure_status_moments) - 1:
msm_end_date = end_date
else:
if is_planning:
msm_end_date = measure_status_moments[
msm_index + 1].planning_date
else:
msm_end_date = measure_status_moments[
msm_index + 1].realisation_date
if is_planning:
begin = msm.planning_date
else:
begin = msm.realisation_date
date_length = date2num(msm_end_date) - date2num(begin)
measure_bar.append((date2num(begin), date_length))
measure_colors.append(msm.status.color.html)
return measure_bar, measure_colors
if end_date_realized is None:
end_date_realized = min(end_date, datetime.datetime.now().date())
if title is None:
title = "maatregel(en)"
if wide_left_ticks:
graph.margin_left_extra = 200
measure_name_length = 55
else:
measure_name_length = 17
graph.figure.suptitle(
title, x=0.5, y=1,
horizontalalignment='center', verticalalignment='top')
for index, measure in enumerate(measures):
# realized
measure_bar, measure_colors = calc_bar_colors(
measure, end_date_realized, False)
graph.axes.broken_barh(measure_bar,
(-index - 0.2, 0.4),
facecolors=measure_colors,
edgecolors=measure_colors)
# planning
measure_bar_p, measure_colors_p = calc_bar_colors(
measure, end_date, True)
graph.axes.broken_barh(measure_bar_p,
(-index - 0.45, 0.1),
facecolors=measure_colors_p,
edgecolors=measure_colors_p)
# Y ticks
yticklabels = [measure.short_name(max_length=measure_name_length)
for measure in measures]
yticklabels.reverse()
graph.axes.set_yticks(range(int(-len(measures) + 0.5), 1))
graph.axes.set_yticklabels(yticklabels)
graph.axes.set_xlim(date2num((start_date, end_date)))
graph.axes.set_ylim(-len(measures) + 0.5, 0.5)
# Legend
if legend_location >= 0:
legend_handles, legend_labels = [], []
for measure_status in MeasureStatus.objects.filter(valid=True):
legend_handles.append(
Line2D([], [], color=measure_status.color.html, lw=10))
legend_labels.append(measure_status.name)
graph.legend(legend_handles, legend_labels, legend_location=legend_location)
def measure_graph_api(request):
"""
wrapper around measure_graph which get arguments from parameters instead of url
"""
area_ident = request.GET.get('location', None)
filter = request.GET.get('filter', 'all')
return measure_graph(request, area_ident, filter)
def measure_graph(request, area_ident, filter='all'):
"""
visualizes scores or measures in a graph
identifier_list: [{'waterbody_slug': ...}, ...]
start_end_dates: 2-tuple dates
each row is an area
"""
if filter == 'measure':
measures = Measure.objects.filter(
Q(pk=area_ident)|Q(parent__id=area_ident)).order_by('title')
else:
area = get_object_or_404(Area, ident=area_ident)
if filter == 'focus':
measures = [m for m in _sorted_measures(area)
if m.is_indicator == True]
else:
measures = _sorted_measures(area)
start_date = iso8601.parse_date(request.GET.get('dt_start', '2008-1-1T00:00:00')).date()
end_date = iso8601.parse_date(request.GET.get('dt_end', '2013-1-1T00:00:00')).date()
width = int(request.GET.get('width', 380))
height = int(request.GET.get('height', 170))
legend_location = int(request.GET.get('legend-location', -1))
wide_left_ticks = request.GET.get('wide_left_ticks', 'false') == 'true'
format = request.GET.get('format', None)
graph = DateGridGraph(width=width, height=height)
_image_measures(graph, measures, start_date, end_date,
legend_location=legend_location,
wide_left_ticks=wide_left_ticks)
graph.set_margins()
if format == 'ps':
return graph.render(
response=HttpResponse(content_type='application/postscript'),
format='ps')
else:
return graph.png_response(
response=HttpResponse(content_type='image/png'))
def measure_detailedit_portal(request):
"""
Return JSON for request.
"""
c = RequestContext(request)
measure_id = request.GET.get('measure_id', None)
init_parent = request.GET.get('parent_id', None)
area_id = request.GET.get('area_id', None)
if init_parent:
init_parent = Measure.objects.get(pk=init_parent)
init_area = None
init_waterbody = None
if area_id:
area = Area.objects.get(ident=area_id)
if area.area_class == Area.AREA_CLASS_AAN_AFVOERGEBIED:
init_area = area
else:
init_waterbody = WaterBody.objects.get(area=area)
try:
measure = Measure.objects.get(pk=measure_id)
except Measure.DoesNotExist:
measure = None
if request.user.is_authenticated():
t = get_template('portals/maatregelen_form.js')
c = RequestContext(request, {
'measure': measure,
'measure_types': json.dumps(
[{'id': r.id, 'name': str(r)}
for r in MeasureType.objects.all()]
),
'periods': json.dumps(
[{'id': r.id, 'name': str(r)}
for r in MeasurePeriod.objects.filter(valid=True)]
),
'aggregations': json.dumps(
[{'id': r[0], 'name': r[1]}
for r in Measure.AGGREGATION_TYPE_CHOICES]
),
'categories': json.dumps(
[{'id': r.id, 'name': str(r)}
for r in MeasureCategory.objects.filter(valid=True)]
),
'units': json.dumps(
[{'id': r.id, 'name': str(r)}
for r in Unit.objects.all()]
),
'init_parent': init_parent,
'init_area': init_area,
'init_waterbody': init_waterbody,
})
else:
t = get_template('portals/geen_toegang.js')
return HttpResponse(t.render(c), mimetype="text/plain")
def measure_groupedit_portal(request):
"""
Return JSON for request.
"""
c = RequestContext(request)
perms = dict(get_user_permissions_overall(request.user, 'user', as_list=True))
if request.user.is_authenticated():
t = get_template('portals/maatregelen-beheer.js')
c = RequestContext(request, {
'measure_types': json.dumps(
[{'id': r.id, 'name': str(r)}
for r in MeasureType.objects.all()],
),
'periods': json.dumps(
[{'id': r.id, 'name': str(r)}
for r in MeasurePeriod.objects.all()],
),
'aggregations': json.dumps(
[{'id': r[0], 'name': r[1]}
for r in Measure.AGGREGATION_TYPE_CHOICES],
),
'categories': json.dumps(
[{'id': r.id, 'name': str(r)}
for r in MeasureCategory.objects.all()],
),
'units': json.dumps(
[{'id': r.id, 'name': str(r)}
for r in Unit.objects.all()],
),
'perm': perms
})
else:
t = get_template('portals/geen_toegang.js')
return HttpResponse(t.render(c), mimetype="text/plain")
def organization_groupedit_portal(request):
"""
Return JSON for request.
"""
c = RequestContext(request)
if request.user.is_authenticated():
perms = dict(get_user_permissions_overall(request.user, 'user', as_list=True))
t = get_template('portals/organisatie-beheer.js')
c = RequestContext(request, {
'perm': perms
})
else:
t = get_template('portals/geen_toegang.js')
return HttpResponse(t.render(c), mimetype="text/plain")
def steering_parameter_form(request):
"""
Return JSON with editor for steering parameters .
"""
c = RequestContext(request)
perms = dict(get_user_permissions_overall(request.user, 'user', as_list=True))
object_id = request.GET.get('object_id', None)
area = get_object_or_404(Area,ident=object_id)
if request.user.is_authenticated():
t = get_template('portals/stuurparameter_form.js')
if area.area_class == Area.AREA_CLASS_KRW_WATERLICHAAM:
predefined_graphs = PredefinedGraphSelection.objects.filter(
Q(for_area_type=Area.AREA_CLASS_KRW_WATERLICHAAM)|Q(for_area_type=None)).distinct()
related_areas = Area.objects.filter(Q(arealink_a__area_b=area)|Q(arealink_b__area_a=area)).distinct()
elif area.area_class == Area.AREA_CLASS_AAN_AFVOERGEBIED:
predefined_graphs = PredefinedGraphSelection.objects.filter(
Q(for_area_type=Area.AREA_CLASS_AAN_AFVOERGEBIED)|Q(for_area_type=None)).distinct()
related_areas = Area.objects.filter(Q(arealink_a__area_b=area)|Q(arealink_b__area_a=area)|Q(id=area.id)).distinct()
c = RequestContext(request, {
'area': area,
'predefined_graphs': json.dumps(
[{'id': r.id, 'name': r.name, 'for_area_type': r.for_area_type}
for r in predefined_graphs]),
'related_areas': json.dumps(
[{'id': r.id, 'name': r.name}
for r in related_areas]),
'perm': perms
})
else:
t = get_template('portals/geen_toegang.js')
return HttpResponse(t.render(c), mimetype="text/plain")
def esfpattern_detailedit_portal(request):
"""
Return JSON for request.
"""
c = RequestContext(request)
pattern_id = request.GET.get('esfpattern_id', None)
try:
pattern = EsfPattern.objects.get(pk=pattern_id)
except EsfPattern.DoesNotExist:
pattern = None
if request.user.is_authenticated():
data_sets = [{'id': r.id, 'name': str(r)} for r in DataSet.objects.filter(
pk__in=list(request.allowed_data_set_ids))]
if request.user.is_superuser:
data_sets = [{'id': r.id, 'name': str(r)} for r in DataSet.objects.all()]
data_sets.append({'id':None, 'name': 'landelijk'})
t = get_template('portals/esfpattern_form.js')
c = RequestContext(request, {
'pattern': pattern,
'measure_types': json.dumps(
[{'id': r.id, 'name': str(r)}
for r in MeasureType.objects.all()]
),
'watertype_group': json.dumps(
[{'id': r.id, 'name': str(r)}
for r in WatertypeGroup.objects.all()]
),
'data_sets': json.dumps(data_sets
),
})
else:
t = get_template('portals/geen_toegang.js')
return HttpResponse(t.render(c), mimetype="text/plain")
def steerparameter_overview(request):
"""
Return JSON for request.
"""
c = RequestContext(request)
areas = Area.objects.all()
predefined_graphs = PredefinedGraphSelection.objects.all().values_list('name', flat=True)
#predefined_graphs = ['testa', 'testb']
parameters = SteeringParameterFree.objects.filter(area__in=areas).distinct().values_list('parameter_code', flat=True)
parameters = [{'org': par, 'no_point': par.replace('.','_')} for par in parameters]
perms = dict(get_user_permissions_overall(request.user, 'user', as_list=True))
#parameters = ['test','test2']
if request.user.is_authenticated():
t = get_template('portals/stuurparameter-overzicht.js')
c = RequestContext(request, {
'predefined_graphs': predefined_graphs,
'parameters': parameters,
'perm': perms
})
else:
t = get_template('portals/geen_toegang.js')
return HttpResponse(t.render(c), mimetype="text/plain")
################ EKR GRAPHS
class HorizontalBarGraphView(View, TimeSeriesViewMixin):
"""
Display horizontal bars
"""
def _graph_items_from_request(self):
"""
Return graph_items and graph_settings
graph_items must be a list with for each item a function
time_series. This function accepts keyword arguments dt_start
and dt_end and returns a list of timeseries.
"""
get = self.request.GET
graph_settings = {
'width': 1200,
'height': 500,
'location': None,
'format': 'png',
}
location = get.get('location', None)
if location is not None:
try:
location = GeoLocationCache.objects.filter(ident=location)[0]
except IndexError:
logger.exception(
("Tried to fetch a non existing "
"GeoLocationCache %s, created dummy one") %
location)
location = GeoLocationCache(ident=location)
graph_settings['location'] = location
format = get.get('format', None)
graph_settings['format'] = format
graph_items = []
# Using the shortcut graph=<graph-slug>
hor_graph_slug = get.get('graph', None)
if hor_graph_slug is not None:
# Add all graph items of graph to result
try:
hor_graph = HorizontalBarGraph.objects.get(
slug=hor_graph_slug)
graph_items.extend(hor_graph.horizontalbargraphitem_set.all())
except HorizontalBarGraph.DoesNotExist:
logger.exception("Tried to fetch a non-existing hor.bar."
"graph %s" % hor_graph_slug)
# Graph settings can be overruled
graph_parameters = ['width', 'height']
for graph_parameter in graph_parameters:
if graph_parameter in get:
graph_settings[graph_parameter] = get[graph_parameter]
return graph_items, graph_settings
def get(self, request, *args, **kwargs):
"""
Draw the EKR graph
"""
dt_start, dt_end = self._dt_from_request()
graph_items, graph_settings = self._graph_items_from_request()
graph = DateGridGraph(
width=int(graph_settings['width']),
height=int(graph_settings['height']))
# # Legend. Must do this before using graph location calculations
# legend_handles = [
# Line2D([], [], color=value_to_html_color(0.8), lw=10),
# Line2D([], [], color=value_to_html_color(0.6), lw=10),
# Line2D([], [], color=value_to_html_color(0.4), lw=10),
# Line2D([], [], color=value_to_html_color(0.2), lw=10),
# Line2D([], [], color=value_to_html_color(0.0), lw=10),
# ]
# legend_labels = [
# 'Zeer goed', 'Goed', 'Matig', 'Ontoereikend', 'Slecht']
# graph.legend(legend_handles, legend_labels, legend_location=6)
yticklabels = []
block_width = (date2num(dt_end) - date2num(dt_start)) / 50
# Legend
#graph.margin_right_extra += 90 # Room for legend. See also nens_graph.
legend_handles = [
Line2D([], [], color=COLOR_5, lw=10),
Line2D([], [], color=COLOR_4, lw=10),
Line2D([], [], color=COLOR_3, lw=10),
Line2D([], [], color=COLOR_2, lw=10),
Line2D([], [], color=COLOR_1, lw=10),
]
legend_labels = [
'zeer goed',
'goed',
'matig',
'ontoereikend',
'slecht',
]
graph.legend(legend_handles, legend_labels, legend_location=7)
for index, graph_item in enumerate(graph_items):
if not graph_item.location:
graph_item.location = graph_settings['location']
# Find the corresponding Score.
score = Score.from_graph_item(graph_item)
if score.id is None:
graph_item.label = '(%s)' % graph_item.label
yticklabels.append(graph_item.label)
# We want to draw a shadow past the end of the last
# event. That's why we ignore dt_start.
try:
ts = graph_item.time_series(dt_end=dt_end, with_comments=True)
except:
logger.exception(
'HorizontalBarView crashed on graph_item.time_series of %s' %
graph_item)
ts = {}
if len(ts) != 1:
logger.warn('Warning: drawing %d timeseries on a single bar '
'HorizontalBarView', len(ts))
# We assume there is only one timeseries.
for (loc, par, unit), single_ts in ts.items():
dates, values, comments, flag_dates, flag_values, flag_comments = (
dates_values_comments(single_ts))
if not dates:
logger.warning('Tried to draw empty timeseries %s %s',
loc, par)
continue
block_dates = []
block_dates_shadow = []
for date_index in range(len(dates) - 1):
dist_to_next = (date2num(dates[date_index + 1]) -
date2num(dates[date_index]))
this_block_width = min(block_width, dist_to_next)
block_dates.append(
(date2num(dates[date_index]), this_block_width))
block_dates_shadow.append(
(date2num(dates[date_index]), dist_to_next))
block_dates.append(
(date2num(dates[-1]), block_width))
# Ignoring tzinfo, otherwise we can't compare.
last_date = max(dt_start.replace(tzinfo=None), dates[-1])
block_dates_shadow.append(
(date2num(last_date),
(date2num(dt_end) - date2num(dt_start))))
a, b, c, d = score.borders
block_colors = [comment_to_html_color(comment)
for comment in comments]
# Block shadow
graph.axes.broken_barh(
block_dates_shadow, (index - 0.2, 0.4),
facecolors=block_colors, edgecolors=block_colors,
alpha=0.2)
# The 'real' block
graph.axes.broken_barh(
block_dates, (index - 0.4, 0.8),
facecolors=block_colors, edgecolors='grey')
# for goal in graph_item.goals.all():
# collected_goal_timestamps.update([goal.timestamp, ])
# For each unique bar goal timestamp, generate a mini
# graph. The graphs are ordered by timestamp.
goal_timestamps = [
datetime.datetime(2015, 1, 1, 0, 0),
datetime.datetime(2027, 1, 1, 0, 0),
]
subplot_numbers = [312, 313]
for index, goal_timestamp in enumerate(goal_timestamps):
axes_goal = graph.figure.add_subplot(subplot_numbers[index])
axes_goal.set_yticks(range(len(yticklabels)))
axes_goal.set_yticklabels('')
axes_goal.set_xticks([0, ])
axes_goal.set_xticklabels([goal_timestamp.year, ])
for graph_item_index, graph_item in enumerate(graph_items):
# TODO: make more efficient; score is retrieved twice
# in this function.
score = Score.from_graph_item(graph_item)
#print 'score: %s' % score
#print 'doel scores: %s' % str(score.targets)
#a, b, c, d = score.borders
goal = score.targets[index]
if goal is not None:
axes_goal.broken_barh(
[(-0.5, 1)], (graph_item_index - 0.4, 0.8),
facecolors=comment_to_html_color(goal),
edgecolors='grey')
# # 0 or 1 items
# goals = graph_item.goals.filter(timestamp=goal_timestamp)
# for goal in goals:
# axes_goal.broken_barh(
# [(-0.5, 1)], (graph_item_index - 0.4, 0.8),
# facecolors=value_to_html_color(goal.value),
# edgecolors='grey')
axes_goal.set_xlim((-0.5, 0.5))
axes_goal.set_ylim(-0.5, len(yticklabels) - 0.5)
# Coordinates are related to the graph size - not graph 311
bar_width_px = 12
axes_x = float(graph.width -
(graph.MARGIN_RIGHT + graph.margin_right_extra) +
bar_width_px +
2 * bar_width_px * index
) / graph.width
axes_y = float(graph.MARGIN_BOTTOM +
graph.margin_bottom_extra) / graph.height
axes_width = float(bar_width_px) / graph.width
axes_height = float(graph.graph_height()) / graph.height
axes_goal.set_position((axes_x, axes_y,
axes_width, axes_height))
graph.axes.set_yticks(range(len(yticklabels)))
graph.axes.set_yticklabels(yticklabels)
graph.axes.set_xlim(date2num((dt_start, dt_end)))
graph.axes.set_ylim(-0.5, len(yticklabels) - 0.5)
# Set the margins, including legend.
graph.set_margins()
response_format = graph_settings['format']
if response_format == 'ps':
return graph.render(
response=HttpResponse(content_type='application/postscript'),
format='ps')
else:
return graph.png_response(
response=HttpResponse(content_type='image/png'))
| gpl-3.0 |
annayqho/TheCannon | code/lamost/mass_age/paper_plots/residuals_grid_LA.py | 1 | 3321 | import matplotlib.pyplot as plt
from matplotlib import rc
import matplotlib.gridspec as gridspec
from matplotlib.colors import LogNorm
from math import log10, floor
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
import numpy as np
def round_sig(x, sig=2):
if x < 0:
return -round(-x, sig-int(floor(log10(-x)))-1)
return round(x, sig-int(floor(log10(x)))-1)
names = ['\mbox{T}_{\mbox{eff}},', '\mbox{log g}', '\mbox{[Fe/H]}']
units = ['\mbox{K}', '\mbox{dex}', '\mbox{dex}']
snr_str = [r'SNR $\textless$ 50', r'50 $\textless$ SNR $\textless$ 100', r'SNR $\textgreater$ 100']
snr_str = snr_str[::-1]
cutoffs = [0, 50, 100, 10000]
#cutoffs = [0,50,100]
cutoffs = cutoffs[::-1]
y_highs = [300, 0.7, 0.5]
x_lows = [4000, 1.1, -2.0, -0.08]
x_highs = [5300, 3.8, 0.5, 0.4]
direc = "../run_9_more_metal_poor"
all_ids = np.load("../run_2_train_on_good/all_ids.npz")['arr_0']
all_apogee = np.load("../run_2_train_on_good/all_label.npz")['arr_0']
good_id = np.load("%s/tr_id.npz" %direc)['arr_0']
all_snr = np.load("../run_2_train_on_good/SNRs.npz")['arr_0']
IDs_lamost = np.loadtxt(
"../../examples/test_training_overlap/lamost_sorted_by_ra_with_dr2_params.txt",
usecols=(0,), dtype=(str))
labels_all_lamost = np.loadtxt(
"../../examples/test_training_overlap/lamost_sorted_by_ra_with_dr2_params.txt",
usecols=(3,4,5), dtype=(float))
inds = np.array([np.where(IDs_lamost==a)[0][0] for a in good_id])
lamost = labels_all_lamost[inds,:]
choose = np.array([np.where(all_ids==val)[0][0] for val in good_id])
apogee = all_apogee[choose]
snr = all_snr[choose]
fig = plt.figure(figsize=(15,15))
gs = gridspec.GridSpec(3,3, wspace=0.3, hspace=0)
props = dict(boxstyle='round', facecolor='white', alpha=0.3)
for i in range(0, len(names)):
name = names[i]
unit = units[i]
for j in range(0, len(cutoffs)-1):
ax = plt.subplot(gs[j,i])
ax.axhline(y=0, c='k')
#ax.legend(fontsize=14)
choose = np.logical_and(snr < cutoffs[j], snr > cutoffs[j+1])
#choose = snr > cutoffs[j]
diff = (lamost[:,i] - apogee[:,i])[choose]
scatter = round_sig(np.std(diff), 3)
bias = round_sig(np.mean(diff), 3)
ax.hist2d(
apogee[:,i][choose], diff, range=[[x_lows[i], x_highs[i]], [-y_highs[i], y_highs[i]]], bins=30, norm=LogNorm(), cmap="gray_r")
if j < len(cutoffs) - 2:
ax.get_xaxis().set_visible(False)
ax.locator_params(nbins=5)
ax.tick_params(axis='y', labelsize=20)
ax.tick_params(axis='x', labelsize=20)
if j == 0:
ax.set_title(r"$\Delta %s_{\mbox{L-A}}$ [%s]" %(name, unit), fontsize=30)
if j == 2:
ax.set_xlabel("$%s$ [%s] from APOGEE" %(name, unit), fontsize=20)
textstr1 = '%s' %(snr_str[j])
ax.text(0.05, 0.95, textstr1, transform=ax.transAxes,
fontsize=20, verticalalignment='top', bbox=props)
textstr2 = 'Scatter: %s \nBias: %s' %(scatter, bias)
ax.text(0.05, 0.05, textstr2, transform=ax.transAxes,
fontsize=20, verticalalignment='bottom', bbox=props)
#ax.set_xlabel(r"APOGEE %s $(%s)$" %(name, unit))
#ax.set_ylabel(r"Cannon-LAMOST %s $(%s)$" %(name, unit))
plt.savefig("residuals_grid_la.png")
#plt.show()
| mit |
mitschabaude/nanopores | scripts/pughpore/randomwalk/test/create_compare_nobind.py | 1 | 4042 | # -*- coding: utf-8 -*-
from matplotlib import gridspec
import math
import matplotlib
import nanopores
import nanopores.geometries.pughpore as pughpore
import matplotlib.pyplot as plt
from matplotlib.ticker import FormatStrFormatter
import numpy as np
import os
import sys
import nanopores.tools.fields as f
HOME = os.path.expanduser("~")
PAPERDIR = os.path.join(HOME, "Dropbox", "nanopores")
FIGDIR = os.path.join(PAPERDIR, "figures", "")
DATADIR = os.path.join(HOME,"Dropbox", "nanopores", "fields")
f.set_dir_mega()
number=False
geop = nanopores.Params(pughpore.params)
hpore=geop.hpore
fieldsname='eventspara_nobind'
params=dict(avgbind1=17.2e6,avgbind2=3e4,P_bind1=0.193,P_bind2=3e-1,z0=hpore/2.+0.)
drop, th = f.get("events_pugh_experiment", "drop", "t")
th = [1e3*time for time in th]
#cmap=matplotlib.cm.get_cmap('viridis')
data=f.get_fields(fieldsname,**params)
figname = fieldsname+'_%.1e_%.1e_%.1e_%.1e'%(params["avgbind1"],params["avgbind2"],params["P_bind1"],params["P_bind2"])+str(params["z0"])
t = data["t"]
a = data["a"]
ood = data["ood"]
lendata=len(t)
fac=1.
if max(t)<1e-2:
fac=1e3
t = [x*1e3 for x in t]
P_bind1=params["P_bind1"]
P_bind2=params["P_bind2"]
avgbind1=params["avgbind1"]*1e-6
avgbind2=params["avgbind2"]*1e-6
color2='green'
color1='lightgreen'
color3='red'
plt.figure(figsize=(7,5),dpi=80)
gs = gridspec.GridSpec(2,3,width_ratios=[4,2,1],height_ratios=[1,2.5])
gs.update(wspace=0.,hspace=0.)
minperc=0.
maxperc=40.
#plt1=plt.subplot(gs[1,0])
plt1=plt.subplot()
for k in range(lendata):
if ood[k]==0:
type1 = plt1.scatter([t[k]],[a[k]],color=color2,s=8)
else:
type0 = plt1.scatter([t[k]],[a[k]],color=color3,s=8)
experiment = plt1.scatter(th,drop,color='#888888',s=8)
plt.legend([experiment,type0,type1],['experimental data','did not translocate','successful translocation'],scatterpoints=4,loc=(0.01,0.01),frameon=False)
xfmt=FormatStrFormatter('%g')
plt1.set_xlim([.2*min(t),max(max(t),max(th))*2.])
plt1.set_ylim([minperc,maxperc])
plt1.set_xscale('log')
plt1.xaxis.set_major_formatter(xfmt)
plt1.invert_yaxis()
plt1.plot([17600],[26],marker='o',ms=115,mfc='None',mec='black')
plt1.text(17600,24,"Assumed\n long bindings",fontsize=13,horizontalalignment='center')
plt1.set_ylabel(r'A/I$_0$ [%]',fontsize=15)
if fac==1.:
# if P_bind1!=0.:
# plt1.text(avgbind1*.5,27.,'Long binding',fontsize=9,horizontalalignment='center')
# k=1.0
# plt1.add_patch(matplotlib.patches.Rectangle((avgbind1*10**(-k*2),0.),avgbind1*(10**(k)-10**(-k)),maxperc,facecolor=cmap(.7),alpha=.15))
# if P_bind2!=0.:
# plt1.text(avgbind2*.5,27.,'Short binding',fontsize=9,horizontalalignment='center')
# k=1.0
# plt1.add_patch(matplotlib.patches.Rectangle((avgbind2*10**(-k),0.),avgbind2*(10**(k)-10**(-k)),maxperc,facecolor=cmap(.4),alpha=.15))
plt1.set_xlabel(r'$\tau_{off}$ [ms]',fontsize=15)
else:
plt1.set_xlabel(ur'$\tau_{off}$ [µs]',fontsize=15)
#plt2=plt.subplot(gs[1,1])
#for k in range(lendata):
# if ood[k]==0:
# type1 = plt2.scatter([t[k]],[a[k]],color=color2,s=8)
# else:
# type0 = plt2.scatter([t[k]],[a[k]],color=color3,s=8)
#plt2.invert_yaxis()
#plt2.set_ylim([maxperc,minperc])
#plt2.set_xlim([-2e-2*max(t),max(t)*(1.+2e-2)])
#plt2.axes.get_yaxis().set_visible(False)
#plt2.axes.get_xaxis().major.locator.set_params(nbins=6)
#
#plt3=plt.subplot(gs[1,2])
#n, bins, patches = plt3.hist(np.array(a),15,normed=1,orientation='horizontal',color=color1,alpha=.5)
#plt3.invert_yaxis()
#plt3.set_xlim([0.,max(n)*1.2])
#plt3.set_ylim([maxperc,minperc])
#plt3.axes.get_xaxis().set_visible(False)
#plt3.axes.get_yaxis().set_visible(False)
#
#
#
#plt4=plt.subplot(gs[0,1])
#n, bins, patches = plt4.hist(np.array(t),20,normed=1,color=color1,alpha=.5)
#plt4.set_xlim([-2e-2*max(t),max(t)*(1.+2e-2)])
#plt4.axes.get_xaxis().set_visible(False)
#plt4.axes.get_yaxis().set_visible(False)
#plt.tight_layout()
#plt.show()
#plt.savefig('events_nobind_compare_2.pdf')
nanopores.savefigs("rw/events", FIGDIR, ending=".pdf") | mit |
IndraVikas/scikit-learn | sklearn/decomposition/tests/test_dict_learning.py | 85 | 8565 | import numpy as np
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import TempMemmap
from sklearn.decomposition import DictionaryLearning
from sklearn.decomposition import MiniBatchDictionaryLearning
from sklearn.decomposition import SparseCoder
from sklearn.decomposition import dict_learning_online
from sklearn.decomposition import sparse_encode
rng_global = np.random.RandomState(0)
n_samples, n_features = 10, 8
X = rng_global.randn(n_samples, n_features)
def test_dict_learning_shapes():
n_components = 5
dico = DictionaryLearning(n_components, random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_overcomplete():
n_components = 12
dico = DictionaryLearning(n_components, random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_reconstruction():
n_components = 12
dico = DictionaryLearning(n_components, transform_algorithm='omp',
transform_alpha=0.001, random_state=0)
code = dico.fit(X).transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X)
dico.set_params(transform_algorithm='lasso_lars')
code = dico.transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2)
# used to test lars here too, but there's no guarantee the number of
# nonzero atoms is right.
def test_dict_learning_reconstruction_parallel():
# regression test that parallel reconstruction works with n_jobs=-1
n_components = 12
dico = DictionaryLearning(n_components, transform_algorithm='omp',
transform_alpha=0.001, random_state=0, n_jobs=-1)
code = dico.fit(X).transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X)
dico.set_params(transform_algorithm='lasso_lars')
code = dico.transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2)
def test_dict_learning_lassocd_readonly_data():
n_components = 12
with TempMemmap(X) as X_read_only:
dico = DictionaryLearning(n_components, transform_algorithm='lasso_cd',
transform_alpha=0.001, random_state=0, n_jobs=-1)
code = dico.fit(X_read_only).transform(X_read_only)
assert_array_almost_equal(np.dot(code, dico.components_), X_read_only, decimal=2)
def test_dict_learning_nonzero_coefs():
n_components = 4
dico = DictionaryLearning(n_components, transform_algorithm='lars',
transform_n_nonzero_coefs=3, random_state=0)
code = dico.fit(X).transform(X[1])
assert_true(len(np.flatnonzero(code)) == 3)
dico.set_params(transform_algorithm='omp')
code = dico.transform(X[1])
assert_equal(len(np.flatnonzero(code)), 3)
def test_dict_learning_unknown_fit_algorithm():
n_components = 5
dico = DictionaryLearning(n_components, fit_algorithm='<unknown>')
assert_raises(ValueError, dico.fit, X)
def test_dict_learning_split():
n_components = 5
dico = DictionaryLearning(n_components, transform_algorithm='threshold',
random_state=0)
code = dico.fit(X).transform(X)
dico.split_sign = True
split_code = dico.transform(X)
assert_array_equal(split_code[:, :n_components] -
split_code[:, n_components:], code)
def test_dict_learning_online_shapes():
rng = np.random.RandomState(0)
n_components = 8
code, dictionary = dict_learning_online(X, n_components=n_components,
alpha=1, random_state=rng)
assert_equal(code.shape, (n_samples, n_components))
assert_equal(dictionary.shape, (n_components, n_features))
assert_equal(np.dot(code, dictionary).shape, X.shape)
def test_dict_learning_online_verbosity():
n_components = 5
# test verbosity
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
try:
sys.stdout = StringIO()
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=1,
random_state=0)
dico.fit(X)
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=2,
random_state=0)
dico.fit(X)
dict_learning_online(X, n_components=n_components, alpha=1, verbose=1,
random_state=0)
dict_learning_online(X, n_components=n_components, alpha=1, verbose=2,
random_state=0)
finally:
sys.stdout = old_stdout
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_estimator_shapes():
n_components = 5
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, random_state=0)
dico.fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_overcomplete():
n_components = 12
dico = MiniBatchDictionaryLearning(n_components, n_iter=20,
random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_initialization():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features)
dico = MiniBatchDictionaryLearning(n_components, n_iter=0,
dict_init=V, random_state=0).fit(X)
assert_array_equal(dico.components_, V)
def test_dict_learning_online_partial_fit():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
dict1 = MiniBatchDictionaryLearning(n_components, n_iter=10 * len(X),
batch_size=1,
alpha=1, shuffle=False, dict_init=V,
random_state=0).fit(X)
dict2 = MiniBatchDictionaryLearning(n_components, alpha=1,
n_iter=1, dict_init=V,
random_state=0)
for i in range(10):
for sample in X:
dict2.partial_fit(sample)
assert_true(not np.all(sparse_encode(X, dict1.components_, alpha=1) ==
0))
assert_array_almost_equal(dict1.components_, dict2.components_,
decimal=2)
def test_sparse_encode_shapes():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
for algo in ('lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'):
code = sparse_encode(X, V, algorithm=algo)
assert_equal(code.shape, (n_samples, n_components))
def test_sparse_encode_error():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
code = sparse_encode(X, V, alpha=0.001)
assert_true(not np.all(code == 0))
assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1)
def test_sparse_encode_error_default_sparsity():
rng = np.random.RandomState(0)
X = rng.randn(100, 64)
D = rng.randn(2, 64)
code = ignore_warnings(sparse_encode)(X, D, algorithm='omp',
n_nonzero_coefs=None)
assert_equal(code.shape, (100, 2))
def test_unknown_method():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
assert_raises(ValueError, sparse_encode, X, V, algorithm="<unknown>")
def test_sparse_coder_estimator():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
code = SparseCoder(dictionary=V, transform_algorithm='lasso_lars',
transform_alpha=0.001).transform(X)
assert_true(not np.all(code == 0))
assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1) | bsd-3-clause |
probstj/pyMPB | examples/2D_PhC_quick.py | 1 | 1525 | #Copyright 2016 Juergen Probst
#This program is free software; you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation; either version 3 of the License, or
#(at your option) any later version.
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#You should have received a copy of the GNU General Public License
#along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import division
import sys
from os import path
import numpy as np
import matplotlib.pyplot as plt
from pympb.phc_simulations import TriHoles2D
from pympb import log, defaults
def main():
if len(sys.argv) > 1:
mode=sys.argv[1]
else:
mode='sim'
defaults.add_epsilon_as_inset = True
sim = TriHoles2D(
material='SiN',
radius=0.34,
numbands=4,#8,
k_interpolation=5,#31,
resolution=16,
mesh_size=7,
runmode=mode,
num_processors=2,
save_field_patterns=False,
convert_field_patterns=False)
if not sim:
log.error('an error occurred during simulation. See the .out file')
return
log.info(' ##### success! #####\n\n')
if __name__ == '__main__':
main()
| gpl-3.0 |
lkishline/expyfun | examples/sync_test.py | 1 | 1368 | """
=============
A-V sync test
=============
This example tests synchronization between the screen and the audio playback.
NOTE: On Linux (w/NVIDIA), XFCE has been observed to give consistent timings,
whereas Compiz WMs did not (doubled timings).
"""
# Author: Dan McCloy <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
from expyfun import ExperimentController
import expyfun.analyze as ea
print(__doc__)
# Fullscreen MUST be used to guarantee flip accuracy!
with ExperimentController('SyncTest', full_screen=True, noise_db=-np.inf,
participant='s', session='0', output_dir=None,
suppress_resamp=True, check_rms=None) as ec:
ec.load_buffer(np.r_[0.1, np.zeros(99)]) # RMS == 0.01
pressed = None
screenshot = None
while pressed != '8': # enable a clean quit if required
ec.set_background_color('white')
t1 = ec.start_stimulus(start_of_trial=False) # skip checks
ec.set_background_color('black')
t2 = ec.flip()
diff = round(1000 * (t2 - t1), 2)
ec.screen_text('IFI (ms): {}'.format(diff), wrap=False)
screenshot = ec.screenshot() if screenshot is None else screenshot
ec.flip()
pressed = ec.wait_one_press(0.5)[0]
ec.stop()
plt.ion()
ea.plot_screen(screenshot)
| bsd-3-clause |
jakirkham/bokeh | bokeh/core/json_encoder.py | 3 | 9053 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2018, Anaconda, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Provide a functions and classes to implement a custom JSON encoder for
serializing objects for BokehJS.
The primary interface is provided by the |serialize_json| function, which
uses the custom |BokehJSONEncoder| to produce JSON output.
In general, functions in this module convert values in the following way:
* Datetime values (Python, Pandas, NumPy) are converted to floating point
milliseconds since epoch.
* TimeDelta values are converted to absolute floating point milliseconds.
* RelativeDelta values are converted to dictionaries.
* Decimal values are converted to floating point.
* Sequences (Pandas Series, NumPy arrays, python sequences) that are passed
though this interface are converted to lists. Note, however, that arrays in
data sources inside Bokeh Documents are converted elsewhere, and by default
use a binary encoded format.
* Bokeh ``Model`` instances are usually serialized elsewhere in the context
of an entire Bokeh Document. Models passed trough this interface are
converted to references.
* ``HasProps`` (that are not Bokeh models) are converted to key/value dicts or
all their properties and values.
* ``Color`` instances are converted to CSS color values.
.. |serialize_json| replace:: :class:`~bokeh.core.json_encoder.serialize_json`
.. |BokehJSONEncoder| replace:: :class:`~bokeh.core.json_encoder.BokehJSONEncoder`
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
import collections
import decimal
import json
# External imports
import numpy as np
# Bokeh imports
from ..settings import settings
from ..util.dependencies import import_optional
from ..util.serialization import convert_datetime_type, convert_timedelta_type, is_datetime_type, is_timedelta_type, transform_series, transform_array
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
rd = import_optional("dateutil.relativedelta")
pd = import_optional('pandas')
__all__ = (
'BokehJSONEncoder',
'serialize_json',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
def serialize_json(obj, pretty=None, indent=None, **kwargs):
''' Return a serialized JSON representation of objects, suitable to
send to BokehJS.
This function is typically used to serialize single python objects in
the manner expected by BokehJS. In particular, many datetime values are
automatically normalized to an expected format. Some Bokeh objects can
also be passed, but note that Bokeh models are typically properly
serialized in the context of an entire Bokeh document.
The resulting JSON always has sorted keys. By default. the output is
as compact as possible unless pretty output or indentation is requested.
Args:
obj (obj) : the object to serialize to JSON format
pretty (bool, optional) :
Whether to generate prettified output. If ``True``, spaces are
added after added after separators, and indentation and newlines
are applied. (default: False)
Pretty output can also be enabled with the environment variable
``BOKEH_PRETTY``, which overrides this argument, if set.
indent (int or None, optional) :
Amount of indentation to use in generated JSON output. If ``None``
then no indentation is used, unless pretty output is enabled,
in which case two spaces are used. (default: None)
Any additional keyword arguments are passed to ``json.dumps``, except for
some that are computed internally, and cannot be overridden:
* allow_nan
* indent
* separators
* sort_keys
Examples:
.. code-block:: python
>>> data = dict(b=np.datetime64('2017-01-01'), a = np.arange(3))
>>>print(serialize_json(data))
{"a":[0,1,2],"b":1483228800000.0}
>>> print(serialize_json(data, pretty=True))
{
"a": [
0,
1,
2
],
"b": 1483228800000.0
}
'''
# these args to json.dumps are computed internally and should not be passed along
for name in ['allow_nan', 'separators', 'sort_keys']:
if name in kwargs:
raise ValueError("The value of %r is computed internally, overriding is not permissable." % name)
if pretty is None:
pretty = settings.pretty(False)
if pretty:
separators=(",", ": ")
else:
separators=(",", ":")
if pretty and indent is None:
indent = 2
return json.dumps(obj, cls=BokehJSONEncoder, allow_nan=False, indent=indent, separators=separators, sort_keys=True, **kwargs)
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
class BokehJSONEncoder(json.JSONEncoder):
''' A custom ``json.JSONEncoder`` subclass for encoding objects in
accordance with the BokehJS protocol.
'''
def transform_python_types(self, obj):
''' Handle special scalars such as (Python, NumPy, or Pandas)
datetimes, or Decimal values.
Args:
obj (obj) :
The object to encode. Anything not specifically handled in
this method is passed on to the default system JSON encoder.
'''
# date/time values that get serialized as milliseconds
if is_datetime_type(obj):
return convert_datetime_type(obj)
if is_timedelta_type(obj):
return convert_timedelta_type(obj)
# slice objects
elif isinstance(obj, slice):
return dict(start=obj.start, stop=obj.stop, step=obj.step)
# NumPy scalars
elif np.issubdtype(type(obj), np.floating):
return float(obj)
elif np.issubdtype(type(obj), np.integer):
return int(obj)
elif np.issubdtype(type(obj), np.bool_):
return bool(obj)
# Decimal values
elif isinstance(obj, decimal.Decimal):
return float(obj)
# RelativeDelta gets serialized as a dict
elif rd and isinstance(obj, rd.relativedelta):
return dict(years=obj.years,
months=obj.months,
days=obj.days,
hours=obj.hours,
minutes=obj.minutes,
seconds=obj.seconds,
microseconds=obj.microseconds)
else:
return super(BokehJSONEncoder, self).default(obj)
def default(self, obj):
''' The required ``default`` method for JSONEncoder subclasses.
Args:
obj (obj) :
The object to encode. Anything not specifically handled in
this method is passed on to the default system JSON encoder.
'''
from ..model import Model
from ..colors import Color
from .has_props import HasProps
# array types -- use force_list here, only binary
# encoding CDS columns for now
if pd and isinstance(obj, (pd.Series, pd.Index)):
return transform_series(obj, force_list=True)
elif isinstance(obj, np.ndarray):
return transform_array(obj, force_list=True)
elif isinstance(obj, collections.deque):
return list(map(self.default, obj))
elif isinstance(obj, Model):
return obj.ref
elif isinstance(obj, HasProps):
return obj.properties_with_values(include_defaults=False)
elif isinstance(obj, Color):
return obj.to_css()
else:
return self.transform_python_types(obj)
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| bsd-3-clause |
irhete/predictive-monitoring-benchmark | preprocessing/preprocess_logs_production.py | 1 | 4021 | import pandas as pd
import numpy as np
import os
input_data_folder = "../orig_logs"
output_data_folder = "../labeled_logs_csv_processed"
filenames = ["Production.csv"]
case_id_col = "Case ID"
activity_col = "Activity"
resource_col = "Resource"
timestamp_col = "Complete Timestamp"
label_col = "label"
pos_label = "deviant"
neg_label = "regular"
freq_threshold = 10
timeunit = 'm'
# features for classifier
static_cat_cols = ["Part_Desc_", "Rework"]
static_num_cols = ["Work_Order_Qty"]
dynamic_cat_cols = [activity_col, resource_col, "Report_Type", "Resource.1"]
dynamic_num_cols = ["Qty_Completed", "Qty_for_MRB", "activity_duration"]
static_cols = static_cat_cols + static_num_cols + [case_id_col, label_col]
dynamic_cols = dynamic_cat_cols + dynamic_num_cols + [timestamp_col]
cat_cols = dynamic_cat_cols + static_cat_cols
def assign_label(group):
tmp = group["Qty_Rejected"] > 0
tmp = tmp.reset_index()["Qty_Rejected"]
if sum(tmp) > 0:
idx = tmp[tmp==True].index[0]
group = group.iloc[:idx,:]
group[label_col] = pos_label
else:
group[label_col] = neg_label
return group
def extract_timestamp_features(group):
group = group.sort_values(timestamp_col, ascending=False, kind='mergesort')
tmp = group[timestamp_col] - group[timestamp_col].shift(-1)
tmp = tmp.fillna(0)
group["timesincelastevent"] = tmp.apply(lambda x: float(x / np.timedelta64(1, 'm'))) # m is for minutes
tmp = group[timestamp_col] - group[timestamp_col].iloc[-1]
tmp = tmp.fillna(0)
group["timesincecasestart"] = tmp.apply(lambda x: float(x / np.timedelta64(1, 'm'))) # m is for minutes
group = group.sort_values(timestamp_col, ascending=True, kind='mergesort')
group["event_nr"] = range(1, len(group) + 1)
return group
def get_open_cases(date):
return sum((dt_first_last_timestamps["start_time"] <= date) & (dt_first_last_timestamps["end_time"] > date))
for filename in filenames:
data = pd.read_csv(os.path.join(input_data_folder,filename), sep=";")
# add event duration
data["Complete Timestamp"] = pd.to_datetime(data["Complete Timestamp"])
data["Start Timestamp"] = pd.to_datetime(data["Start Timestamp"])
tmp = data["Complete Timestamp"] - data["Start Timestamp"]
tmp = tmp.fillna(0)
data["activity_duration"] = tmp.apply(lambda x: float(x / np.timedelta64(1, timeunit)))
# assign labels
data = data.sort_values(timestamp_col, ascending=True, kind='mergesort').groupby(case_id_col).apply(assign_label)
data = data[static_cols + dynamic_cols]
# add features extracted from timestamp
data[timestamp_col] = pd.to_datetime(data[timestamp_col])
data["timesincemidnight"] = data[timestamp_col].dt.hour * 60 + data[timestamp_col].dt.minute
data["month"] = data[timestamp_col].dt.month
data["weekday"] = data[timestamp_col].dt.weekday
data["hour"] = data[timestamp_col].dt.hour
data = data.groupby(case_id_col).apply(extract_timestamp_features)
# add inter-case features
data = data.sort_values([timestamp_col], ascending=True, kind='mergesort')
dt_first_last_timestamps = data.groupby(case_id_col)[timestamp_col].agg([min, max])
dt_first_last_timestamps.columns = ["start_time", "end_time"]
data["open_cases"] = data[timestamp_col].apply(get_open_cases)
# impute missing values
grouped = data.sort_values(timestamp_col, ascending=True, kind='mergesort').groupby(case_id_col)
for col in static_cols + dynamic_cols:
data[col] = grouped[col].transform(lambda grp: grp.fillna(method='ffill'))
data[cat_cols] = data[cat_cols].fillna('missing')
data = data.fillna(0)
# set infrequent factor levels to "other"
for col in cat_cols:
counts = data[col].value_counts()
mask = data[col].isin(counts[counts >= freq_threshold].index)
data.loc[~mask, col] = "other"
data.to_csv(os.path.join(output_data_folder,filename), sep=";", index=False)
| apache-2.0 |
codein/poc | github_poller.py | 1 | 2466 | import os
import requests
import json
import urllib
from urllib import urlencode
import pandas as pd
import argparse
import numpy as np
import datetime
import settings
import utils
base_url = 'https://api.github.com'
issue_attributes = ['title', 'html_url']
def extract_issue_attributes(raw_issue):
try:
issue = {}
for key in issue_attributes:
issue[key] = raw_issue[key]
hours = raw_issue['body'].split('\n')[0].lower().replace('p', '')
issue['hours'] = int(hours)
raw_date = raw_issue['closed_at']
date = datetime.datetime.strptime(raw_date, "%Y-%m-%dT%H:%M:%SZ")
issue['date'] = date.strftime('%m/%d/%y')
return issue
except Exception as e:
print(e)
print(raw_issue['html_url'])
def get_issues(url, issues=None):
if issues is None:
issues = []
request_headers = {
'content-type': 'application/json; charset=utf8',
# 'Accept': 'application/vnd.bindo-' + settings.BINDO_API['api_version'] + '+json',
'Authorization': 'token ' + settings.GITHUB_OAUTH_TOKEN,
}
print(url)
payload = {
'state': 'closed',
'sort': 'created',
}
response = requests.get(url, headers=request_headers, params=payload)
if response.status_code == requests.codes.ok:
issues= issues + (response.json())
if 'next' in response.links:
next_url = response.links['next']
issues = get_issues(next_url['url'], issues)
return issues
for repo_dict in settings.GITHUB_REPOSITORIES:
issues_url = '/repos/{org}/{repo}/issues'.format(**repo_dict)
url = urllib.basejoin(base_url, issues_url)
raw_issues = get_issues(url)
issues = []
if len(raw_issues) > 0:
for raw_issue in raw_issues:
issue = extract_issue_attributes(raw_issue)
if issue:
issues.append(issue)
df = pd.DataFrame(issues)
copy_columns = [
('title', 'comments'),
]
for from_column, to_column in copy_columns:
df[to_column] = df[from_column]
output_columns = [
'date',
'hours',
'comments',
'html_url',
]
df = df[output_columns]
output_file_location = '~/{repo}-issues.xlsx'.format(**repo_dict)
output_file_location = utils.expanduser(output_file_location)
utils.df_to_excel(output_file_location, df)
| mit |
srinathv/bokeh | bokeh/compat/mplexporter/tools.py | 75 | 1732 | """
Tools for matplotlib plot exporting
"""
def ipynb_vega_init():
"""Initialize the IPython notebook display elements
This function borrows heavily from the excellent vincent package:
http://github.com/wrobstory/vincent
"""
try:
from IPython.core.display import display, HTML
except ImportError:
print('IPython Notebook could not be loaded.')
require_js = '''
if (window['d3'] === undefined) {{
require.config({{ paths: {{d3: "http://d3js.org/d3.v3.min"}} }});
require(["d3"], function(d3) {{
window.d3 = d3;
{0}
}});
}};
if (window['topojson'] === undefined) {{
require.config(
{{ paths: {{topojson: "http://d3js.org/topojson.v1.min"}} }}
);
require(["topojson"], function(topojson) {{
window.topojson = topojson;
}});
}};
'''
d3_geo_projection_js_url = "http://d3js.org/d3.geo.projection.v0.min.js"
d3_layout_cloud_js_url = ("http://wrobstory.github.io/d3-cloud/"
"d3.layout.cloud.js")
topojson_js_url = "http://d3js.org/topojson.v1.min.js"
vega_js_url = 'http://trifacta.github.com/vega/vega.js'
dep_libs = '''$.getScript("%s", function() {
$.getScript("%s", function() {
$.getScript("%s", function() {
$.getScript("%s", function() {
$([IPython.events]).trigger("vega_loaded.vincent");
})
})
})
});''' % (d3_geo_projection_js_url, d3_layout_cloud_js_url,
topojson_js_url, vega_js_url)
load_js = require_js.format(dep_libs)
html = '<script>'+load_js+'</script>'
display(HTML(html))
| bsd-3-clause |
google/telluride_decoding | telluride_decoding/plot_util.py | 1 | 3811 | # Copyright 2020 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Lint as: python2 python3
"""Utilities to plot results."""
# To prevent tkinter errors as per: https://stackoverflow.com/a/37605654
import os
import matplotlib
matplotlib.use('Agg')
import tensorflow.compat.v2 as tf # pylint: disable=g-import-not-at-top
# User should call tf.compat.v1.enable_v2_behavior()
def matplotlib_pyplot():
"""Imports matplotlib pyplot."""
import matplotlib.pyplot as plt # pylint: disable=g-import-not-at-top
return plt
def plot_mean_std(test_name,
regularization_list,
run_mean,
run_std,
golden_mean_std_dict=None,
png_file_name=None,
show_plot=False):
"""Plots the mean and standard deviation as an error bar figure.
Args:
test_name: The name of the test for use in plot title.
regularization_list: A list of regularization values.
run_mean: the mean from each regularization value, over tests
run_std: the standard deviation from each regularization value.
golden_mean_std_dict: The golden results as an ordered dictionary with the
regularization values as the keys and tuples with these stats of the mean
and standard deviation estimates:
mean(m), means(s), std(m), std(s)
png_file_name: If file name is not empty, save the plot to the PNG file.
show_plot: If true, show the plot in a window.
Raises:
TypeError: If the input parameters are not correct.
"""
if not png_file_name and not show_plot:
raise TypeError('PNG file name is empty and show_plot is false.')
if len(regularization_list) != len(run_mean):
raise TypeError('Lengths of regularizations (%d) and means (%d) are not '
'equal.' % (len(regularization_list), len(run_mean)))
if len(regularization_list) != len(run_std):
raise TypeError('Lengths of regularizations (%d) and stds (%d) are not '
'equal.' % (len(regularization_list), len(run_std)))
plt = matplotlib_pyplot()
plt.figure()
if golden_mean_std_dict:
golden_regularization_list = []
golden_mean_list = []
golden_std_list = []
for regularization_value, (mean_m, mean_s,
_, _) in golden_mean_std_dict.items():
golden_regularization_list.append(regularization_value)
golden_mean_list.append(mean_m)
golden_std_list.append(mean_s)
plt.errorbar(
golden_regularization_list,
golden_mean_list,
golden_std_list,
color='orange',
uplims=True,
lolims=True,
label='golden')
plt.errorbar(
regularization_list, run_mean, run_std, color='blue', label='actual')
plt.xscale('log')
plt.xlabel('Regularization lambda (log10)')
plt.ylabel('Mean correlation')
plt.title(test_name + ' experiment correlation')
plt.legend(loc='lower right')
if png_file_name:
base_dir = os.path.split(png_file_name)[0]
if base_dir and not tf.io.gfile.exists(base_dir):
tf.io.gfile.makedirs(base_dir)
with tf.io.gfile.GFile(png_file_name, 'wb') as png_file:
plt.savefig(png_file, format='png')
if show_plot:
plt.show()
| apache-2.0 |
Didou09/tofu | setup.py | 1 | 13075 | """ A tomography library for fusion devices
See:
https://github.com/ToFuProject/tofu
"""
import os
import glob
import shutil
import logging
import platform
import subprocess
from codecs import open
# ... setup tools
from setuptools import setup, find_packages
from setuptools import Extension
# ... packages that need to be in pyproject.toml
from Cython.Distutils import build_ext
import numpy as np
# ... local script
import _updateversion as up
# ... for `clean` command
from distutils.command.clean import clean as Clean
# == Checking platform ========================================================
is_platform_windows = False
if platform.system() == "Windows":
is_platform_windows = True
# === Setting clean command ===================================================
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger("tofu.setup")
class CleanCommand(Clean):
description = "Remove build artifacts from the source tree"
def expand(self, path_list):
"""
Expand a list of path using glob magic.
:param list[str] path_list: A list of path which may contains magic
:rtype: list[str]
:returns: A list of path without magic
"""
path_list2 = []
for path in path_list:
if glob.has_magic(path):
iterator = glob.iglob(path)
path_list2.extend(iterator)
else:
path_list2.append(path)
return path_list2
def find(self, path_list):
"""Find a file pattern if directories.
Could be done using "**/*.c" but it is only supported in Python 3.5.
:param list[str] path_list: A list of path which may contains magic
:rtype: list[str]
:returns: A list of path without magic
"""
import fnmatch
path_list2 = []
for pattern in path_list:
for root, _, filenames in os.walk("."):
for filename in fnmatch.filter(filenames, pattern):
path_list2.append(os.path.join(root, filename))
return path_list2
def run(self):
Clean.run(self)
cython_files = self.find(["*.pyx"])
cythonized_files = [
path.replace(".pyx", ".c") for path in cython_files
]
cythonized_files += [
path.replace(".pyx", ".cpp") for path in cython_files
]
so_files = self.find(["*.so"])
# really remove the directories
# and not only if they are empty
to_remove = [self.build_base]
to_remove = self.expand(to_remove)
to_remove += cythonized_files
to_remove += so_files
if not self.dry_run:
for path in to_remove:
try:
if os.path.isdir(path):
shutil.rmtree(path)
else:
os.remove(path)
logger.info("removing '%s'", path)
except OSError:
pass
# =============================================================================
# =============================================================================
# Check if openmp available
# see http://openmp.org/wp/openmp-compilers/
omp_test = r"""
#include <omp.h>
#include <stdio.h>
int main() {
#pragma omp parallel
printf("Hello from thread %d, nthreads %d\n", omp_get_thread_num(),
omp_get_num_threads());
}
"""
def check_for_openmp(cc_var):
import tempfile
tmpdir = tempfile.mkdtemp()
curdir = os.getcwd()
os.chdir(tmpdir)
filename = r"test.c"
with open(filename, "w") as file:
file.write(omp_test)
with open(os.devnull, "w") as fnull:
result = subprocess.call(
[cc_var, "-fopenmp", filename], stdout=fnull, stderr=fnull
)
os.chdir(curdir)
# clean up
shutil.rmtree(tmpdir)
return result
# ....... Using function
if is_platform_windows:
openmp_installed = False
else:
openmp_installed = not check_for_openmp("cc")
# =============================================================================
# == Getting tofu version =====================================================
_HERE = os.path.abspath(os.path.dirname(__file__))
def get_version_tofu(path=_HERE):
# Try from git
isgit = ".git" in os.listdir(path)
if isgit:
try:
git_branch = (
subprocess.check_output(
[
"git",
"rev-parse",
"--abbrev-ref",
"HEAD",
]
)
.rstrip()
.decode()
)
deploy_branches = ["master", "deploy-test"]
if (git_branch in deploy_branches or "TRAVIS_TAG" in os.environ):
version_tofu = up.updateversion()
else:
isgit = False
except Exception:
isgit = False
if not isgit:
version_tofu = os.path.join(path, "tofu")
version_tofu = os.path.join(version_tofu, "version.py")
with open(version_tofu, "r") as fh:
version_tofu = fh.read().strip().split("=")[-1].replace("'", "")
version_tofu = version_tofu.lower().replace("v", "").replace(" ", "")
return version_tofu
version_tofu = get_version_tofu(path=_HERE)
print("")
print("Version for setup.py : ", version_tofu)
print("")
# =============================================================================
# =============================================================================
# Get the long description from the README file
# Get the readme file whatever its extension (md vs rst)
_README = [
ff
for ff in os.listdir(_HERE)
if len(ff) <= 10 and ff[:7] == "README."
]
assert len(_README) == 1
_README = _README[0]
with open(os.path.join(_HERE, _README), encoding="utf-8") as f:
long_description = f.read()
if _README[-3:] == ".md":
long_description_content_type = "text/markdown"
else:
long_description_content_type = "text/x-rst"
# =============================================================================
# =============================================================================
# Compiling files
if openmp_installed:
extra_compile_args = ["-O3", "-Wall", "-fopenmp", "-fno-wrapv"]
extra_link_args = ["-fopenmp"]
else:
extra_compile_args = ["-O3", "-Wall", "-fno-wrapv"]
extra_link_args = []
extensions = [
Extension(
name="tofu.geom._GG",
sources=["tofu/geom/_GG.pyx"],
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args,
),
Extension(
name="tofu.geom._basic_geom_tools",
sources=["tofu/geom/_basic_geom_tools.pyx"],
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args,
),
Extension(
name="tofu.geom._distance_tools",
sources=["tofu/geom/_distance_tools.pyx"],
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args,
),
Extension(
name="tofu.geom._sampling_tools",
sources=["tofu/geom/_sampling_tools.pyx"],
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args,
),
Extension(
name="tofu.geom._raytracing_tools",
sources=["tofu/geom/_raytracing_tools.pyx"],
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args,
),
Extension(
name="tofu.geom._vignetting_tools",
sources=["tofu/geom/_vignetting_tools.pyx"],
language="c++",
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args,
),
]
setup(
name="tofu",
version="{ver}".format(ver=version_tofu),
# Use scm to get code version from git tags
# cf. https://pypi.python.org/pypi/setuptools_scm
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
# The version is stored only in the setup.py file and read from it (option
# 1 in https://packaging.python.org/en/latest/single_source_version.html)
use_scm_version=False,
# Description of what tofu does
description="A python library for Tomography for Fusion",
long_description=long_description,
long_description_content_type=long_description_content_type,
# The project's main homepage.
url="https://github.com/ToFuProject/tofu",
# Author details
author="Didier VEZINET and Laura MENDOZA",
author_email="[email protected]",
# Choose your license
license="MIT",
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
"Development Status :: 4 - Beta",
# Indicate who your project is intended for
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering :: Physics",
# Pick your license as you wish (should match "license" above)
"License :: OSI Approved :: MIT License",
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
# In which language most of the code is written ?
"Natural Language :: English",
],
# What does your project relate to?
keywords="tomography geometry 3D inversion synthetic fusion",
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(
exclude=[
"doc",
"_Old",
"_Old_doc",
"plugins",
"plugins.*",
"*.plugins.*",
"*.plugins",
"*.tests10_plugins",
"*.tests10_plugins.*",
"tests10_plugins.*",
"tests10_plugins",
]
),
# packages = ['tofu','tofu.geom'],
# Alternatively, if you want to distribute just a my_module.py, uncomment
# this:
# py_modules=["my_module"],
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=["numpy", "scipy", "matplotlib", "cython>=0.26"],
python_requires=">=3.6",
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
extras_require={
"dev": [
"check-manifest",
"coverage",
"pytest",
"sphinx",
"sphinx-gallery",
"sphinx_bootstrap_theme",
]
},
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
# package_data={
# # If any package contains *.txt, *.rst or *.npz files, include them:
# '': ['*.txt', '*.rst', '*.npz'],
# # And include any *.csv files found in the 'ITER' package, too:
# 'ITER': ['*.csv'],
# },
package_data={
"tofu.tests.tests01_geom.test_03_core_data": ["*.py", "*.txt"],
"tofu.geom.inputs": ["*.txt"],
"tofu.mag.mag_ripple": ['*.sh', '*.f']
},
include_package_data=True,
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html
# installing-additional-files # noqa
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
# data_files=[('my_data', ['data/data_file'])],
# executable scripts can be declared here
# They can be python or non-python scripts
# scripts=[
# ],
# entry_points point to functions in the package
# Theye are generally preferable over scripts because they provide
# cross-platform support and allow pip to create the appropriate form
# of executable for the target platform.
entry_points={
'console_scripts': [
'tofuplot=tofu.entrypoints.tofuplot:main',
'tofucalc=tofu.entrypoints.tofucalc:main',
'tofu-version=scripts.tofuversion:main',
'tofu-custom=scripts.tofucustom:main',
'tofu=scripts.tofu_bash:main',
],
},
py_modules=['_updateversion'],
# Extensions and commands
ext_modules=extensions,
cmdclass={"build_ext": build_ext,
"clean": CleanCommand},
include_dirs=[np.get_include()],
)
| mit |
elkingtonmcb/shogun | examples/undocumented/python_modular/graphical/interactive_svr_demo.py | 16 | 11301 | """
Shogun demo, based on PyQT Demo by Eli Bendersky
Christian Widmer
Soeren Sonnenburg
License: GPLv3
"""
import numpy
import sys, os, csv
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import matplotlib
from matplotlib import mpl
from matplotlib.colorbar import make_axes, Colorbar
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QTAgg as NavigationToolbar
from matplotlib.figure import Figure
from modshogun import *
from modshogun import *
from modshogun import *
class Form(QMainWindow):
def __init__(self, parent=None):
super(Form, self).__init__(parent)
self.setWindowTitle('SHOGUN interactive demo')
self.data = DataHolder()
self.series_list_model = QStandardItemModel()
self.create_menu()
self.create_main_frame()
self.create_status_bar()
self.on_show()
def load_file(self, filename=None):
filename = QFileDialog.getOpenFileName(self,
'Open a data file', '.', 'CSV files (*.csv);;All Files (*.*)')
if filename:
self.data.load_from_file(filename)
self.fill_series_list(self.data.series_names())
self.status_text.setText("Loaded " + filename)
def on_show(self):
self.axes.clear()
self.axes.grid(True)
self.axes.plot(self.data.x1, self.data.x2, 'bo')
self.axes.set_xlim((-5,5))
self.axes.set_ylim((-5,5))
self.canvas.draw()
self.fill_series_list(self.data.get_stats())
def on_about(self):
msg = __doc__
QMessageBox.about(self, "About the demo", msg.strip())
def fill_series_list(self, names):
self.series_list_model.clear()
for name in names:
item = QStandardItem(name)
item.setCheckState(Qt.Unchecked)
item.setCheckable(False)
self.series_list_model.appendRow(item)
def onclick(self, event):
print 'button=%d, x=%d, y=%d, xdata=%f, ydata=%f'%(event.button, event.x, event.y, event.xdata, event.ydata)
self.data.add_example(event.xdata, event.ydata)
self.on_show()
def clear(self):
self.data.clear()
self.on_show()
def enable_widgets(self):
kernel_name = self.kernel_combo.currentText()
if kernel_name == "LinearKernel":
self.sigma.setDisabled(True)
self.degree.setDisabled(True)
elif kernel_name == "PolynomialKernel":
self.sigma.setDisabled(True)
self.degree.setEnabled(True)
elif kernel_name == "GaussianKernel":
self.sigma.setEnabled(True)
self.degree.setDisabled(True)
def train_svm(self):
width = float(self.sigma.text())
degree = int(self.degree.text())
self.axes.clear()
self.axes.grid(True)
self.axes.plot(self.data.x1, self.data.x2, 'bo')
# train svm
labels = self.data.get_labels()
print type(labels)
lab = RegressionLabels(labels)
features = self.data.get_examples()
train = RealFeatures(features)
kernel_name = self.kernel_combo.currentText()
print "current kernel is %s" % (kernel_name)
if kernel_name == "LinearKernel":
gk = LinearKernel(train, train)
gk.set_normalizer(IdentityKernelNormalizer())
elif kernel_name == "PolynomialKernel":
gk = PolyKernel(train, train, degree, True)
gk.set_normalizer(IdentityKernelNormalizer())
elif kernel_name == "GaussianKernel":
gk = GaussianKernel(train, train, width)
cost = float(self.cost.text())
tubeeps = float(self.tubeeps.text())
print "cost", cost
svm = LibSVR(cost, tubeeps, gk, lab)
svm.train()
svm.set_epsilon(1e-2)
x=numpy.linspace(-5.0,5.0,100)
y=svm.apply(RealFeatures(numpy.array([x]))).get_labels()
self.axes.plot(x,y,'r-')
self.axes.set_xlim((-5,5))
self.axes.set_ylim((-5,5))
self.canvas.draw()
def create_main_frame(self):
self.main_frame = QWidget()
plot_frame = QWidget()
self.dpi = 100
self.fig = Figure((6.0, 6.0), dpi=self.dpi)
self.canvas = FigureCanvas(self.fig)
self.canvas.setParent(self.main_frame)
cid = self.canvas.mpl_connect('button_press_event', self.onclick)
self.axes = self.fig.add_subplot(111)
self.cax = None
#self.mpl_toolbar = NavigationToolbar(self.canvas, self.main_frame)
log_label = QLabel("Number of examples:")
self.series_list_view = QListView()
self.series_list_view.setModel(self.series_list_model)
cost_label = QLabel('C')
#self.cost = QSpinBox()#QLineEdit()
self.cost = QLineEdit()
self.cost.setText("1.0")
#self.cost.setMinimum(1)
spin_label2 = QLabel('tube')
self.tubeeps = QLineEdit()
self.tubeeps.setText("0.1")
spin_label3 = QLabel('sigma')
self.sigma = QLineEdit()
self.sigma.setText("1.2")
#self.sigma.setMinimum(1)
spin_label4 = QLabel('d')
self.degree = QLineEdit()
self.degree.setText("2")
#self.sigma.setMinimum(1)
spins_hbox = QHBoxLayout()
spins_hbox.addWidget(cost_label)
spins_hbox.addWidget(self.cost)
spins_hbox.addWidget(spin_label2)
spins_hbox.addWidget(self.tubeeps)
spins_hbox.addWidget(spin_label3)
spins_hbox.addWidget(self.sigma)
spins_hbox.addWidget(spin_label4)
spins_hbox.addWidget(self.degree)
spins_hbox.addStretch(1)
self.legend_cb = QCheckBox("Show Support Vectors")
self.legend_cb.setChecked(False)
self.show_button = QPushButton("&Train SVR")
self.connect(self.show_button, SIGNAL('clicked()'), self.train_svm)
self.clear_button = QPushButton("&Clear")
self.connect(self.clear_button, SIGNAL('clicked()'), self.clear)
self.kernel_combo = QComboBox()
self.kernel_combo.insertItem(-1, "GaussianKernel")
self.kernel_combo.insertItem(-1, "PolynomialKernel")
self.kernel_combo.insertItem(-1, "LinearKernel")
self.kernel_combo.maximumSize = QSize(300, 50)
self.connect(self.kernel_combo, SIGNAL("currentIndexChanged(QString)"), self.enable_widgets)
left_vbox = QVBoxLayout()
left_vbox.addWidget(self.canvas)
#left_vbox.addWidget(self.mpl_toolbar)
right0_vbox = QVBoxLayout()
right0_vbox.addWidget(log_label)
right0_vbox.addWidget(self.series_list_view)
#right0_vbox.addWidget(self.legend_cb)
right0_vbox.addStretch(1)
right2_vbox = QVBoxLayout()
right2_label = QLabel("Settings")
right2_vbox.addWidget(right2_label)
right2_vbox.addWidget(self.show_button)
right2_vbox.addWidget(self.kernel_combo)
right2_vbox.addLayout(spins_hbox)
right2_clearlabel = QLabel("Remove Data")
right2_vbox.addWidget(right2_clearlabel)
right2_vbox.addWidget(self.clear_button)
right2_vbox.addStretch(1)
right_vbox = QHBoxLayout()
right_vbox.addLayout(right0_vbox)
right_vbox.addLayout(right2_vbox)
hbox = QVBoxLayout()
hbox.addLayout(left_vbox)
hbox.addLayout(right_vbox)
self.main_frame.setLayout(hbox)
self.setCentralWidget(self.main_frame)
self.enable_widgets()
def create_status_bar(self):
self.status_text = QLabel("")
self.statusBar().addWidget(self.status_text, 1)
def create_menu(self):
self.file_menu = self.menuBar().addMenu("&File")
load_action = self.create_action("&Load file",
shortcut="Ctrl+L", slot=self.load_file, tip="Load a file")
quit_action = self.create_action("&Quit", slot=self.close,
shortcut="Ctrl+Q", tip="Close the application")
self.add_actions(self.file_menu,
(load_action, None, quit_action))
self.help_menu = self.menuBar().addMenu("&Help")
about_action = self.create_action("&About",
shortcut='F1', slot=self.on_about,
tip='About the demo')
self.add_actions(self.help_menu, (about_action,))
def add_actions(self, target, actions):
for action in actions:
if action is None:
target.addSeparator()
else:
target.addAction(action)
def create_action( self, text, slot=None, shortcut=None,
icon=None, tip=None, checkable=False,
signal="triggered()"):
action = QAction(text, self)
if icon is not None:
action.setIcon(QIcon(":/%s.png" % icon))
if shortcut is not None:
action.setShortcut(shortcut)
if tip is not None:
action.setToolTip(tip)
action.setStatusTip(tip)
if slot is not None:
self.connect(action, SIGNAL(signal), slot)
if checkable:
action.setCheckable(True)
return action
class DataHolder(object):
""" Just a thin wrapper over a dictionary that holds integer
data series. Each series has a name and a list of numbers
as its data. The length of all series is assumed to be
the same.
The series can be read from a CSV file, where each line
is a separate series. In each series, the first item in
the line is the name, and the rest are data numbers.
"""
def __init__(self, filename=None):
self.clear()
self.load_from_file(filename)
def clear(self):
self.x1 = []
self.x2 = []
def get_stats(self):
num = len(self.x1)
str_num = "num examples: %i" % num
return (str_num, str_num)
def get_labels(self):
return numpy.array(self.x2, dtype=numpy.float64)
def get_examples(self):
num = len(self.x1)
examples = numpy.zeros((1,num))
for i in xrange(num):
examples[0,i] = self.x1[i]
return examples
def add_example(self, x1, x2):
self.x1.append(x1)
self.x2.append(x2)
def load_from_file(self, filename=None):
self.data = {}
self.names = []
if filename:
for line in csv.reader(open(filename, 'rb')):
self.names.append(line[0])
self.data[line[0]] = map(int, line[1:])
self.datalen = len(line[1:])
def series_names(self):
""" Names of the data series
"""
return self.names
def series_len(self):
""" Length of a data series
"""
return self.datalen
def series_count(self):
return len(self.data)
def get_series_data(self, name):
return self.data[name]
def main():
app = QApplication(sys.argv)
form = Form()
form.show()
app.exec_()
if __name__ == "__main__":
main()
#~ dh = DataHolder('qt_mpl_data.csv')
#~ print dh.data
#~ print dh.get_series_data('1991 Sales')
#~ print dh.series_names()
#~ print dh.series_count()
| gpl-3.0 |
bzamecnik/sms-tools | lectures/04-STFT/plots-code/windows.py | 1 | 1203 | import math
# matplotlib without any blocking GUI
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
from smst.utils import audio
from smst.models import dft
(fs, x) = audio.read_wav('../../../sounds/oboe-A4.wav')
N = 512
pin = 5000
w = np.ones(501)
hM1 = int(math.floor((w.size + 1) / 2))
hM2 = int(math.floor(w.size / 2))
x1 = x[pin - hM1:pin + hM2]
plt.figure(1, figsize=(9.5, 7))
plt.subplot(4, 1, 1)
plt.plot(np.arange(-hM1, hM2), x1, lw=1.5)
plt.axis([-hM1, hM2, min(x1), max(x1)])
plt.title('x (oboe-A4.wav)')
mX, pX = dft.from_audio(x1, w, N)
mX = mX - max(mX)
plt.subplot(4, 1, 2)
plt.plot(np.arange(mX.size), mX, 'r', lw=1.5)
plt.axis([0, N / 4, -70, 0])
plt.title('mX (rectangular window)')
w = np.hamming(501)
mX, pX = dft.from_audio(x1, w, N)
mX = mX - max(mX)
plt.subplot(4, 1, 3)
plt.plot(np.arange(mX.size), mX, 'r', lw=1.5)
plt.axis([0, N / 4, -70, 0])
plt.title('mX (hamming window)')
w = np.blackman(501)
mX, pX = dft.from_audio(x1, w, N)
mX = mX - max(mX)
plt.subplot(4, 1, 4)
plt.plot(np.arange(mX.size), mX, 'r', lw=1.5)
plt.axis([0, N / 4, -70, 0])
plt.title('mX (blackman window)')
plt.tight_layout()
plt.savefig('windows.png')
| agpl-3.0 |
DiegoAsterio/speedy-Gonzales | data_science_functions.py | 1 | 18458 | import matplotlib.pyplot as plt
import numpy as np
from functools import reduce
import math
import pandas as pd
import random
class LabeledSet:
def __init__(self,input_dimension):
self.input_dimension=input_dimension
self.nb_examples=0
self.labels = dict()
self.label_count = []
def addExample(self,vector,label):
if label not in self.labels:
self.labels[label] = len(self.labels)
self.label_count.append(0)
if (self.nb_examples==0):
self.x=np.array([vector])
self.y=np.array([[label]])
else:
self.x=np.vstack((self.x,vector))
self.y=np.vstack((self.y,label))
self.nb_examples=self.nb_examples+1
self.label_count[self.labels[label]] += 1
#Renvoie la dimension de l'espace d'entrée
def getInputDimension(self):
return self.input_dimension
#Renvoie le nombre d'exemple dans le set
def size(self):
return self.nb_examples
#Renvoie la valeur de x_i
def getX(self,i):
return self.x[i]
#Renvoie la valeur de y_i
def getY(self,i):
return(self.y[i])
def getDistribution(self):
suma = reduce((lambda x, y: x + y),self.label_count)
return list(map((lambda x: float(x)/suma), self.label_count))
def getMaxLabel(self):
maxim = -1
label = None
for k, v in self.labels.items():
if self.label_count[v] > maxim:
label = k
maxim = self.label_count[v]
return label
def plot2DSet(set):
""" LabeledSet -> NoneType
Hypothèse: set est de dimension 2
affiche une représentation graphique du LabeledSet
remarque: l'ordre des labels dans set peut être quelconque
"""
S_pos = set.x[np.where(set.y == 1),:][0] # tous les exemples de label +1
S_neg = set.x[np.where(set.y == -1),:][0] # tous les exemples de label -1
plt.scatter(S_pos[:,0],S_pos[:,1],marker='o')
plt.scatter(S_neg[:,0],S_neg[:,1],marker='x')
class Classifier:
def __init__(self,input_dimension):
""" Constructeur """
raise NotImplementedError("Please Implement this method")
# Permet de calculer la prediction sur x => renvoie un score
def predict(self,x):
raise NotImplementedError("Please Implement this method")
# Permet d'entrainer le modele sur un ensemble de données
def train(self,labeledSet):
raise NotImplementedError("Please Implement this method")
# Permet de calculer la qualité du système
def accuracy(self,set):
nb_ok=0
for i in range(set.size()):
score=self.predict(set.getX(i))
if (score*set.getY(i)>0):
nb_ok=nb_ok+1
acc=nb_ok/(set.size() * 1.0)
return acc
def plot_frontiere(set,classifier,step=20):
""" LabeledSet * Classifier * int -> NoneType
Remarque: le 3e argument est optionnel et donne la "résolution" du tracé
affiche la frontière de décision associée au classifieur
"""
mmax=set.x.max(0)
mmin=set.x.min(0)
x1grid,x2grid=np.meshgrid(np.linspace(mmin[0],mmax[0],step),np.linspace(mmin[1],mmax[1],step))
grid=np.hstack((x1grid.reshape(x1grid.size,1),x2grid.reshape(x2grid.size,1)))
# calcul de la prediction pour chaque point de la grille
res=np.array([classifier.predict(grid[i,:]) for i in range(len(grid)) ])
res=res.reshape(x1grid.shape)
# tracer des frontieres
plt.contourf(x1grid,x2grid,res,colors=["red","cyan"],levels=[-1000,0,1000],linewidth=2)
def shannon(distr):
k = len(distr)
if k > 1:
f = lambda x: 0 if x == 0 else x*math.log(x,k)
logarithms = list((map (f, distr)))
return - reduce ((lambda x, y: x+y), logarithms)
else:
return 0
def entropie(aSet):
distr = aSet.getDistribution()
return shannon(distr)
def discretise(LSet, col):
""" LabelledSet * int -> tuple[float, float]
col est le numéro de colonne sur X à discrétiser
rend la valeur de coupure qui minimise l'entropie ainsi que son entropie.
"""
# initialisation:
min_entropie = 1.1 # on met à une valeur max car on veut minimiser
min_seuil = 0.0
# trie des valeurs:
ind= np.argsort(LSet.x,axis=0)
# calcul des distributions des classes pour E1 et E2:
inf_plus = 0 # nombre de +1 dans E1
inf_moins = 0 # nombre de -1 dans E1
sup_plus = 0 # nombre de +1 dans E2
sup_moins = 0 # nombre de -1 dans E2
# remarque: au départ on considère que E1 est vide et donc E2 correspond à E.
# Ainsi inf_plus et inf_moins valent 0. Il reste à calculer sup_plus et sup_moins
# dans E.
for j in range(0,LSet.size()):
if (LSet.getY(j) == -1):
sup_moins += 1
else:
sup_plus += 1
nb_total = (sup_plus + sup_moins) # nombre d'exemples total dans E
# parcours pour trouver le meilleur seuil:
for i in range(len(LSet.x)-1):
v_ind_i = ind[i] # vecteur d'indices
courant = LSet.getX(v_ind_i[col])[col]
lookahead = LSet.getX(ind[i+1][col])[col]
val_seuil = (courant + lookahead) / 2.0;
# M-A-J de la distrib. des classes:
# pour réduire les traitements: on retire un exemple de E2 et on le place
# dans E1, c'est ainsi que l'on déplace donc le seuil de coupure.
if LSet.getY(ind[i][col])[0] == -1:
inf_moins += 1
sup_moins -= 1
else:
inf_plus += 1
sup_plus -= 1
# calcul de la distribution des classes de chaque côté du seuil:
nb_inf = (inf_moins + inf_plus)*1.0 # rem: on en fait un float pour éviter
nb_sup = (sup_moins + sup_plus)*1.0 # que ce soit une division entière.
# calcul de l'entropie de la coupure
val_entropie_inf = shannon([inf_moins / nb_inf, inf_plus / nb_inf])
val_entropie_sup = shannon([sup_moins / nb_sup, sup_plus / nb_sup])
val_entropie = (nb_inf / nb_total) * val_entropie_inf + (nb_sup / nb_total) * val_entropie_sup
# si cette coupure minimise l'entropie, on mémorise ce seuil et son entropie:
if (min_entropie > val_entropie):
min_entropie = val_entropie
min_seuil = val_seuil
return (min_seuil, min_entropie)
def divise(LSet, att, seuil):
plus_petits = LabeledSet(LSet.getInputDimension())
plus_grands = LabeledSet(LSet.getInputDimension())
for i in range (LSet.size()):
if LSet.getX(i)[att] <= seuil:
plus_petits.addExample(LSet.getX(i), LSet.getY(i)[0])
else:
plus_grands.addExample(LSet.getX(i), LSet.getY(i)[0])
return plus_petits, plus_grands
class ArbreBinaire:
def __init__(self):
self.attribut = None # numéro de l'attribut
self.seuil = None
self.inferieur = None # ArbreBinaire Gauche (valeurs <= au seuil)
self.superieur = None # ArbreBinaire Gauche (valeurs > au seuil)
self.classe = None # Classe si c'est une feuille: -1 ou +1
def est_feuille(self):
""" rend True si l'arbre est une feuille """
return self.seuil == None
def ajoute_fils(self,ABinf,ABsup,att,seuil):
""" ABinf, ABsup: 2 arbres binaires
att: numéro d'attribut
seuil: valeur de seuil
"""
self.attribut = att
self.seuil = seuil
self.inferieur = ABinf
self.superieur = ABsup
def ajoute_feuille(self,classe):
""" classe: -1 ou + 1
"""
self.classe = classe
def classifie(self,exemple):
""" exemple : numpy.array
rend la classe de l'exemple: +1 ou -1
"""
if self.est_feuille():
return self.classe
if exemple[self.attribut] <= self.seuil:
return self.inferieur.classifie(exemple)
return self.superieur.classifie(exemple)
def to_graph(self, g, prefixe='A'):
""" construit une représentation de l'arbre pour pouvoir
l'afficher
"""
if self.est_feuille():
g.node(prefixe,str(self.classe),shape='box')
else:
g.node(prefixe, str(self.attribut))
self.inferieur.to_graph(g,prefixe+"g")
self.superieur.to_graph(g,prefixe+"d")
g.edge(prefixe,prefixe+"g", '<='+ str(self.seuil))
g.edge(prefixe,prefixe+"d", '>'+ str(self.seuil))
return g
def construit_AD(LSet, epsilon):
un_arbre = ArbreBinaire()
if shannon(LSet.getDistribution()) <= epsilon:
un_arbre.ajoute_feuille(LSet.getMaxLabel())
else:
dim = LSet.getInputDimension()
minim = float('inf')
min_seuil = min_index = 0
for i in range(dim):
seuil, entropie = discretise(LSet,i)
if entropie < minim:
min_seuil, minim, min_index = (seuil, entropie, i)
smaller, bigger = divise(LSet,min_index,min_seuil)
un_arbre.ajoute_fils(construit_AD(smaller,epsilon),construit_AD(bigger,epsilon),min_index, min_seuil)
return un_arbre
class ArbreDecision(Classifier):
# Constructeur
def __init__(self,epsilon):
# valeur seuil d'entropie pour arrêter la construction
self.epsilon= epsilon
self.racine = None
# Permet de calculer la prediction sur x => renvoie un score
def predict(self,x):
# classification de l'exemple x avec l'arbre de décision
# on rend 0 (classe -1) ou 1 (classe 1)
classe = self.racine.classifie(x)
if (classe == 1):
return(1)
else:
return(-1)
# Permet d'entrainer le modele sur un ensemble de données
def train(self,set):
# construction de l'arbre de décision
self.set=set
self.racine = construit_AD(set,self.epsilon)
# Permet d'afficher l'arbre
def plot(self):
gtree = gv.Digraph(format='png')
return self.racine.to_graph(gtree)
def createXOR(nb_points,covar):
a_set = LabeledSet(2)
var = [[covar,0], [0,covar]]
positive_center1 = [0,0]
positive_center2 = [1,1]
X = np.random.multivariate_normal(positive_center1, var, int(nb_points/4))
Y = np.random.multivariate_normal(positive_center2, var, int(nb_points/4))
for i in range(len(X)):
a_set.addExample(X[i],1)
a_set.addExample(Y[i],1)
negative_center1 = [1,0]
negative_center2 = [0,1]
X = np.random.multivariate_normal(negative_center1, var, int(nb_points/4))
Y = np.random.multivariate_normal(negative_center2, var, int(nb_points/4))
for i in range(len(X)):
a_set.addExample(X[i],-1)
a_set.addExample(Y[i],-1)
return a_set
class Perceptron(Classifier):
def dist_euclidienne_vect(self,x,y):
v = [(i-j)*(i-j) for i,j in np.column_stack([x,y])]
return np.sqrt(np.sum(v))
def __init__(self,input_dimension,learning_rate, eps=0.001, verb = False):
self.input_dimension = input_dimension
self.m = learning_rate
self.weights = np.zeros(input_dimension)
self.eps = eps
self.verbose = verb
#Permet de calculer la prediction sur x => renvoie un score
def predict(self,x):
value = np.vdot(self.weights,x)
if value > 0:
return 1
else :
return -1
#Permet d'entrainer le modele sur un ensemble de données
def train(self,labeledSet):
niter = 0
taille = labeledSet.size()
old_weights = [float('inf') for i in range(self.input_dimension)]
while(self.dist_euclidienne_vect(old_weights, self.weights)>self.eps and niter < 500):
old_weights = self.weights
for i in range(taille):
vector = labeledSet.getX(i)
label = labeledSet.getY(i)
prediction = self.predict(vector)
if (prediction != label):
self.weights = [self.weights[i] + self.m*label*vector[i] for i in range(self.input_dimension)]
niter += 1
if (self.verbose):
print ("L'accuracy est %f apres %i iterations" %(self.accuracy(labeledSet),niter))
def tirage(v, m, remise):
if remise:
ret = [random.choice(v) for _ in range(m)]
else:
ret = random.sample(v,m)
return ret
def echantillonLS(LSet, m, remise, plus_info = False):
index = tirage(range(LSet.size()),m,remise)
choix = LabeledSet(LSet.getInputDimension())
pas_choisis = LabeledSet(LSet.getInputDimension())
for i in index:
choix.addExample(LSet.x[i], LSet.y[i][0])
for i in range(LSet.size()):
if i not in index:
pas_choisis.addExample(LSet.x[i], LSet.y[i][0])
if not plus_info:
return choix
else:
return choix, pas_choisis
class ClassifierBaggingTree(Classifier):
def __init__(self,nArbres,pourcentageExemples,seuil,remise):
self.nArbres = nArbres
self.pourcentage = pourcentageExemples
self.seuil = seuil
self.remise = remise
self.foret = []
def train(self,LSet):
N = int(LSet.size() * self.pourcentage)
for _ in range(self.nArbres):
echantillon = echantillonLS(LSet,N,self.remise)
arb_dec = ArbreDecision(self.seuil)
arb_dec.train(echantillon)
self.foret.append(arb_dec)
def predict(self,x):
votes = np.array([arbre.predict(x) for arbre in self.foret])
if votes.mean() >= 0 :
return 1
else:
return -1
class ClassifierOOBPerceptron(Classifier):
def __init__(self,nPercep,pourcentageExemples,seuil,remise):
self.nPercep = nPercep
self.pourcentage = pourcentageExemples
self.seuil = seuil
self.remise = remise
self.ensemble = dict()
self.echantillons = dict()
self.size = 0
def train(self,LSet):
N = int(LSet.size() * self.pourcentage)
for _ in range(self.nPercep):
echantillon = echantillonLS(LSet,N,self.remise)
perc = Perceptron(LSet.getInputDimension(),0.05)
perc.train(echantillon)
self.ensemble[self.size] = perc
self.echantillons[self.size] = echantillon
self.size += 1
def can_vote(self, k, position):
echantillon = self.echantillons[k]
for x in echantillon.x:
foundInEchantillon = True
for i in range(len(x)):
if x[i] != position[i]:
foundInEchantillon = False
if foundInEchantillon:
return False
return True
def predict(self,x):
right_to_vote = [self.can_vote(key, x) for key in self.echantillons]
votes = np.array([self.ensemble[i].predict(x) for i in range(self.size) if right_to_vote[i] ])
try :
if votes.mean() >= 0 :
return 1
else:
return -1
except RuntimeWarning:
return 1
class ClassifierOOBTree(Classifier):
def __init__(self,nArbres,pourcentageExemples,seuil,remise):
self.nArbres = nArbres
self.pourcentage = pourcentageExemples
self.seuil = seuil
self.remise = remise
self.foret = dict()
self.echantillons = dict()
self.sizeForet = 0
def train(self,LSet):
N = int(LSet.size() * self.pourcentage)
for _ in range(self.nArbres):
echantillon = echantillonLS(LSet,N,self.remise)
arb_dec = ArbreDecision(self.seuil)
arb_dec.train(echantillon)
self.foret[self.sizeForet] = arb_dec
self.echantillons[self.sizeForet] = echantillon
self.sizeForet += 1
def can_vote(self, k, position):
echantillon = self.echantillons[k]
for x in echantillon.x:
foundInEchantillon = True
for i in range(len(x)):
if x[i] != position[i]:
foundInEchantillon = False
if foundInEchantillon:
return False
return True
def predict(self,x):
right_to_vote = [self.can_vote(key, x) for key in self.echantillons]
votes = np.array([self.foret[i].predict(x) for i in range(self.sizeForet) if right_to_vote[i] ])
try :
if votes.mean() >= 0 :
return 1
else:
return -1
except RuntimeWarning:
return 1
def dist_vect(x,y):
v = [(i-j)*(i-j) for i,j in np.column_stack([x,y])]
return np.sqrt(np.sum(v))
def initialisation(K, df):
choice = random.sample(range(len(df)),K)
return pd.DataFrame(df.iloc[choice])
def inertie_cluster(df):
cntr = mon_centroide(df)
ret=0
for i in range(len(df)):
d = dist_vect(cntr,df.iloc[i])
ret += d*d
return ret
def mon_centroide(m):
return m.mean(axis=0)
def plus_proche(ex,centroides):
minimum = float('inf')
pos = -1
for i in range(len(centroides)):
d = dist_vect(ex, centroides.iloc[i])
if minimum > d:
pos = i
minimum = d
return pos
def affecte_cluster(df,centr):
M_affect = dict()
for i in range(len(centr)):
M_affect[i]=[]
for i in range(len(df)):
proche = plus_proche(df.iloc[i],centr)
M_affect[proche].append(i)
return M_affect
def nouveaux_centroides(df, M):
centrs = [mon_centroide(df.iloc[M[i]]) for i in range(len(M))]
return pd.DataFrame(centrs)
def inertie_globale(df, M):
inrts = 0
for i in M:
tmp = pd.DataFrame(df.iloc[M[i]])
inrts += inertie_cluster(tmp)
return inrts
def kmoyennes(K, df, eps, iter_max, verbose=False):
diff = float('inf')
Centroides = initialisation(K, df)
M = affecte_cluster(df, Centroides)
prev_inert = inertie_globale(df,M)
while iter_max and diff>eps:
Centroides = nouveaux_centroides(df,M)
M = affecte_cluster(df, Centroides)
nouv_inert = inertie_globale(df,M)
diff = abs(nouv_inert - prev_inert)
if verbose:
print ("Inertie: %f Difference: %f "% (nouv_inert, diff))
prev_inert = nouv_inert
iter_max -= 1
return Centroides, M
| gpl-3.0 |
kiyoto/statsmodels | setup.py | 2 | 15932 | """
Much of the build system code was adapted from work done by the pandas
developers [1], which was in turn based on work done in pyzmq [2] and lxml [3].
[1] http://pandas.pydata.org
[2] http://zeromq.github.io/pyzmq/
[3] http://lxml.de/
"""
import os
from os.path import relpath, join as pjoin
import sys
import subprocess
import re
from distutils.version import StrictVersion, LooseVersion
# temporarily redirect config directory to prevent matplotlib importing
# testing that for writeable directory which results in sandbox error in
# certain easy_install versions
os.environ["MPLCONFIGDIR"] = "."
no_frills = (len(sys.argv) >= 2 and ('--help' in sys.argv[1:] or
sys.argv[1] in ('--help-commands',
'egg_info', '--version',
'clean')))
# try bootstrapping setuptools if it doesn't exist
try:
import pkg_resources
try:
pkg_resources.require("setuptools>=0.6c5")
except pkg_resources.VersionConflict:
from ez_setup import use_setuptools
use_setuptools(version="0.6c5")
from setuptools import setup, Command, find_packages
_have_setuptools = True
except ImportError:
# no setuptools installed
from distutils.core import setup, Command
_have_setuptools = False
if _have_setuptools:
setuptools_kwargs = {"zip_safe": False,
"test_suite": "nose.collector"}
else:
setuptools_kwargs = {}
if sys.version_info[0] >= 3:
sys.exit("Need setuptools to install statsmodels for Python 3.x")
curdir = os.path.abspath(os.path.dirname(__file__))
README = open(pjoin(curdir, "README.rst")).read()
DISTNAME = 'statsmodels'
DESCRIPTION = 'Statistical computations and models for use with SciPy'
LONG_DESCRIPTION = README
MAINTAINER = 'Skipper Seabold, Josef Perktold'
MAINTAINER_EMAIL ='[email protected]'
URL = 'http://statsmodels.sourceforge.net/'
LICENSE = 'BSD License'
DOWNLOAD_URL = ''
# These imports need to be here; setuptools needs to be imported first.
from distutils.extension import Extension
from distutils.command.build import build
from distutils.command.build_ext import build_ext as _build_ext
class build_ext(_build_ext):
def build_extensions(self):
numpy_incl = pkg_resources.resource_filename('numpy', 'core/include')
for ext in self.extensions:
if (hasattr(ext, 'include_dirs') and
not numpy_incl in ext.include_dirs):
ext.include_dirs.append(numpy_incl)
_build_ext.build_extensions(self)
def generate_cython():
cwd = os.path.abspath(os.path.dirname(__file__))
print("Cythonizing sources")
p = subprocess.call([sys.executable,
os.path.join(cwd, 'tools', 'cythonize.py'),
'statsmodels'],
cwd=cwd)
if p != 0:
raise RuntimeError("Running cythonize failed!")
def strip_rc(version):
return re.sub(r"rc\d+$", "", version)
def check_dependency_versions(min_versions):
"""
Don't let pip/setuptools do this all by itself. It's rude.
For all dependencies, try to import them and check if the versions of
installed dependencies match the minimum version requirements. If
installed but version too low, raise an error. If not installed at all,
return the correct ``setup_requires`` and ``install_requires`` arguments to
be added to the setuptools kwargs. This prevents upgrading installed
dependencies like numpy (that should be an explicit choice by the user and
never happen automatically), but make things work when installing into an
empty virtualenv for example.
"""
setup_requires = []
install_requires = []
try:
from numpy.version import short_version as npversion
except ImportError:
setup_requires.append('numpy')
install_requires.append('numpy')
else:
if not (LooseVersion(npversion) >= min_versions['numpy']):
raise ImportError("Numpy version is %s. Requires >= %s" %
(npversion, min_versions['numpy']))
try:
import scipy
except ImportError:
install_requires.append('scipy')
else:
try:
from scipy.version import short_version as spversion
except ImportError:
from scipy.version import version as spversion # scipy 0.7.0
if not (LooseVersion(spversion) >= min_versions['scipy']):
raise ImportError("Scipy version is %s. Requires >= %s" %
(spversion, min_versions['scipy']))
try:
from pandas import __version__ as pversion
except ImportError:
install_requires.append('pandas')
else:
if not (LooseVersion(pversion) >= min_versions['pandas']):
ImportError("Pandas version is %s. Requires >= %s" %
(pversion, min_versions['pandas']))
try:
from patsy import __version__ as patsy_version
except ImportError:
install_requires.append('patsy')
else:
# patsy dev looks like 0.1.0+dev
pversion = re.match("\d*\.\d*\.\d*", patsy_version).group()
if not (LooseVersion(pversion) >= min_versions['patsy']):
raise ImportError("Patsy version is %s. Requires >= %s" %
(pversion, min_versions["patsy"]))
return setup_requires, install_requires
MAJ = 0
MIN = 8
REV = 0
ISRELEASED = False
VERSION = '%d.%d.%d' % (MAJ,MIN,REV)
classifiers = [ 'Development Status :: 4 - Beta',
'Environment :: Console',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.2',
'Operating System :: OS Independent',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Topic :: Scientific/Engineering']
# Return the git revision as a string
def git_version():
def _minimal_ext_cmd(cmd):
# construct minimal environment
env = {}
for k in ['SYSTEMROOT', 'PATH']:
v = os.environ.get(k)
if v is not None:
env[k] = v
# LANGUAGE is used on win32
env['LANGUAGE'] = 'C'
env['LANG'] = 'C'
env['LC_ALL'] = 'C'
out = subprocess.Popen(" ".join(cmd), stdout = subprocess.PIPE, env=env,
shell=True).communicate()[0]
return out
try:
out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD'])
GIT_REVISION = out.strip().decode('ascii')
except OSError:
GIT_REVISION = "Unknown"
return GIT_REVISION
def write_version_py(filename=pjoin(curdir, 'statsmodels/version.py')):
cnt = "\n".join(["",
"# THIS FILE IS GENERATED FROM SETUP.PY",
"short_version = '%(version)s'",
"version = '%(version)s'",
"full_version = '%(full_version)s'",
"git_revision = '%(git_revision)s'",
"release = %(isrelease)s", "",
"if not release:",
" version = full_version"])
# Adding the git rev number needs to be done inside write_version_py(),
# otherwise the import of numpy.version messes up the build under Python 3.
FULLVERSION = VERSION
dowrite = True
if os.path.exists('.git'):
GIT_REVISION = git_version()
elif os.path.exists(filename):
# must be a source distribution, use existing version file
try:
from statsmodels.version import git_revision as GIT_REVISION
except ImportError:
dowrite = False
GIT_REVISION = "Unknown"
else:
GIT_REVISION = "Unknown"
if not ISRELEASED:
FULLVERSION += '.dev0+' + GIT_REVISION[:7]
if dowrite:
try:
a = open(filename, 'w')
a.write(cnt % {'version': VERSION,
'full_version' : FULLVERSION,
'git_revision' : GIT_REVISION,
'isrelease': str(ISRELEASED)})
finally:
a.close()
class CleanCommand(Command):
"""Custom distutils command to clean the .so and .pyc files."""
user_options = [("all", "a", "")]
def initialize_options(self):
self.all = True
self._clean_me = []
self._clean_trees = []
self._clean_exclude = ["bspline_ext.c",
"bspline_impl.c"]
for root, dirs, files in list(os.walk('statsmodels')):
for f in files:
if f in self._clean_exclude:
continue
if os.path.splitext(f)[-1] in ('.pyc', '.so', '.o',
'.pyo',
'.pyd', '.c', '.orig'):
self._clean_me.append(pjoin(root, f))
for d in dirs:
if d == '__pycache__':
self._clean_trees.append(pjoin(root, d))
for d in ('build',):
if os.path.exists(d):
self._clean_trees.append(d)
def finalize_options(self):
pass
def run(self):
for clean_me in self._clean_me:
try:
os.unlink(clean_me)
except Exception:
pass
for clean_tree in self._clean_trees:
try:
import shutil
shutil.rmtree(clean_tree)
except Exception:
pass
class CheckingBuildExt(build_ext):
"""Subclass build_ext to get clearer report if Cython is necessary."""
def check_cython_extensions(self, extensions):
for ext in extensions:
for src in ext.sources:
if not os.path.exists(src):
raise Exception("""Cython-generated file '%s' not found.
Cython is required to compile statsmodels from a development branch.
Please install Cython or download a source release of statsmodels.
""" % src)
def build_extensions(self):
self.check_cython_extensions(self.extensions)
build_ext.build_extensions(self)
class DummyBuildSrc(Command):
""" numpy's build_src command interferes with Cython's build_ext.
"""
user_options = []
def initialize_options(self):
self.py_modules_dict = {}
def finalize_options(self):
pass
def run(self):
pass
cmdclass = {'clean': CleanCommand,
'build': build}
cmdclass["build_src"] = DummyBuildSrc
cmdclass["build_ext"] = CheckingBuildExt
# some linux distros require it
#NOTE: we are not currently using this but add it to Extension, if needed.
# libraries = ['m'] if 'win32' not in sys.platform else []
from numpy.distutils.misc_util import get_info
npymath_info = get_info("npymath")
ext_data = dict(
kalman_loglike = {"name" : "statsmodels/tsa/kalmanf/kalman_loglike.c",
"depends" : ["statsmodels/src/capsule.h"],
"include_dirs": ["statsmodels/src"],
"sources" : []},
_statespace = {"name" : "statsmodels/tsa/statespace/_statespace.c",
"depends" : ["statsmodels/src/capsule.h"],
"include_dirs": ["statsmodels/src"] + npymath_info['include_dirs'],
"libraries": npymath_info['libraries'],
"library_dirs": npymath_info['library_dirs'],
"sources" : []},
linbin = {"name" : "statsmodels/nonparametric/linbin.c",
"depends" : [],
"sources" : []},
_smoothers_lowess = {"name" : "statsmodels/nonparametric/_smoothers_lowess.c",
"depends" : [],
"sources" : []}
)
extensions = []
for name, data in ext_data.items():
data['sources'] = data.get('sources', []) + [data['name']]
destdir = ".".join(os.path.dirname(data["name"]).split("/"))
data.pop('name')
obj = Extension('%s.%s' % (destdir, name), **data)
extensions.append(obj)
def get_data_files():
sep = os.path.sep
# install the datasets
data_files = {}
root = pjoin(curdir, "statsmodels", "datasets")
for i in os.listdir(root):
if i is "tests":
continue
path = pjoin(root, i)
if os.path.isdir(path):
data_files.update({relpath(path, start=curdir).replace(sep, ".") : ["*.csv",
"*.dta"]})
# add all the tests and results files
for r, ds, fs in os.walk(pjoin(curdir, "statsmodels")):
r_ = relpath(r, start=curdir)
if r_.endswith('results'):
data_files.update({r_.replace(sep, ".") : ["*.csv",
"*.txt"]})
return data_files
if __name__ == "__main__":
if os.path.exists('MANIFEST'):
os.unlink('MANIFEST')
min_versions = {
'numpy' : '1.4.0',
'scipy' : '0.7.0',
'pandas' : '0.7.1',
'patsy' : '0.1.0',
}
if sys.version_info[0] == 3 and sys.version_info[1] >= 3:
# 3.3 needs numpy 1.7+
min_versions.update({"numpy" : "1.7.0b2"})
(setup_requires,
install_requires) = check_dependency_versions(min_versions)
if _have_setuptools:
setuptools_kwargs['setup_requires'] = setup_requires
setuptools_kwargs['install_requires'] = install_requires
write_version_py()
# this adds *.csv and *.dta files in datasets folders
# and *.csv and *.txt files in test/results folders
package_data = get_data_files()
packages = find_packages()
packages.append("statsmodels.tsa.vector_ar.data")
package_data["statsmodels.datasets.tests"].append("*.zip")
package_data["statsmodels.iolib.tests.results"].append("*.dta")
package_data["statsmodels.stats.tests.results"].append("*.json")
package_data["statsmodels.tsa.vector_ar.tests.results"].append("*.npz")
# data files that don't follow the tests/results pattern. should fix.
package_data.update({"statsmodels.stats.tests" : ["*.txt"]})
package_data.update({"statsmodels.stats.libqsturng" :
["*.r", "*.txt", "*.dat"]})
package_data.update({"statsmodels.stats.libqsturng.tests" :
["*.csv", "*.dat"]})
package_data.update({"statsmodels.tsa.vector_ar.data" : ["*.dat"]})
package_data.update({"statsmodels.tsa.vector_ar.data" : ["*.dat"]})
# temporary, until moved:
package_data.update({"statsmodels.sandbox.regression.tests" :
["*.dta", "*.csv"]})
#TODO: deal with this. Not sure if it ever worked for bdists
#('docs/build/htmlhelp/statsmodelsdoc.chm',
# 'statsmodels/statsmodelsdoc.chm')
cwd = os.path.abspath(os.path.dirname(__file__))
if not os.path.exists(os.path.join(cwd, 'PKG-INFO')) and not no_frills:
# Generate Cython sources, unless building from source release
generate_cython()
setup(name = DISTNAME,
version = VERSION,
maintainer = MAINTAINER,
ext_modules = extensions,
maintainer_email = MAINTAINER_EMAIL,
description = DESCRIPTION,
license = LICENSE,
url = URL,
download_url = DOWNLOAD_URL,
long_description = LONG_DESCRIPTION,
classifiers = classifiers,
platforms = 'any',
cmdclass = cmdclass,
packages = packages,
package_data = package_data,
include_package_data=False, # True will install all files in repo
**setuptools_kwargs)
| bsd-3-clause |
fabioticconi/scikit-learn | sklearn/linear_model/tests/test_huber.py | 25 | 6981 | # Authors: Manoj Kumar [email protected]
# License: BSD 3 clause
import numpy as np
from scipy import optimize, sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_greater
from sklearn.datasets import make_regression
from sklearn.linear_model import (
HuberRegressor, LinearRegression, SGDRegressor, Ridge)
from sklearn.linear_model.huber import _huber_loss_and_gradient
def make_regression_with_outliers(n_samples=50, n_features=20):
rng = np.random.RandomState(0)
# Generate data with outliers by replacing 10% of the samples with noise.
X, y = make_regression(
n_samples=n_samples, n_features=n_features,
random_state=0, noise=0.05)
# Replace 10% of the sample with noise.
num_noise = int(0.1 * n_samples)
random_samples = rng.randint(0, n_samples, num_noise)
X[random_samples, :] = 2.0 * rng.normal(0, 1, (num_noise, X.shape[1]))
return X, y
def test_huber_equals_lr_for_high_epsilon():
# Test that Ridge matches LinearRegression for large epsilon
X, y = make_regression_with_outliers()
lr = LinearRegression(fit_intercept=True)
lr.fit(X, y)
huber = HuberRegressor(fit_intercept=True, epsilon=1e3, alpha=0.0)
huber.fit(X, y)
assert_almost_equal(huber.coef_, lr.coef_, 3)
assert_almost_equal(huber.intercept_, lr.intercept_, 2)
def test_huber_gradient():
# Test that the gradient calculated by _huber_loss_and_gradient is correct
rng = np.random.RandomState(1)
X, y = make_regression_with_outliers()
sample_weight = rng.randint(1, 3, (y.shape[0]))
loss_func = lambda x, *args: _huber_loss_and_gradient(x, *args)[0]
grad_func = lambda x, *args: _huber_loss_and_gradient(x, *args)[1]
# Check using optimize.check_grad that the gradients are equal.
for _ in range(5):
# Check for both fit_intercept and otherwise.
for n_features in [X.shape[1] + 1, X.shape[1] + 2]:
w = rng.randn(n_features)
w[-1] = np.abs(w[-1])
grad_same = optimize.check_grad(
loss_func, grad_func, w, X, y, 0.01, 0.1, sample_weight)
assert_almost_equal(grad_same, 1e-6, 4)
def test_huber_sample_weights():
# Test sample_weights implementation in HuberRegressor"""
X, y = make_regression_with_outliers()
huber = HuberRegressor(fit_intercept=True, alpha=0.1)
huber.fit(X, y)
huber_coef = huber.coef_
huber_intercept = huber.intercept_
huber.fit(X, y, sample_weight=np.ones(y.shape[0]))
assert_array_almost_equal(huber.coef_, huber_coef)
assert_array_almost_equal(huber.intercept_, huber_intercept)
X, y = make_regression_with_outliers(n_samples=5, n_features=20)
X_new = np.vstack((X, np.vstack((X[1], X[1], X[3]))))
y_new = np.concatenate((y, [y[1]], [y[1]], [y[3]]))
huber.fit(X_new, y_new)
huber_coef = huber.coef_
huber_intercept = huber.intercept_
huber.fit(X, y, sample_weight=[1, 3, 1, 2, 1])
assert_array_almost_equal(huber.coef_, huber_coef, 3)
assert_array_almost_equal(huber.intercept_, huber_intercept, 3)
# Test sparse implementation with sample weights.
X_csr = sparse.csr_matrix(X)
huber_sparse = HuberRegressor(fit_intercept=True, alpha=0.1)
huber_sparse.fit(X_csr, y, sample_weight=[1, 3, 1, 2, 1])
assert_array_almost_equal(huber_sparse.coef_, huber_coef, 3)
def test_huber_sparse():
X, y = make_regression_with_outliers()
huber = HuberRegressor(fit_intercept=True, alpha=0.1)
huber.fit(X, y)
X_csr = sparse.csr_matrix(X)
huber_sparse = HuberRegressor(fit_intercept=True, alpha=0.1)
huber_sparse.fit(X_csr, y)
assert_array_almost_equal(huber_sparse.coef_, huber.coef_)
def test_huber_scaling_invariant():
"""Test that outliers filtering is scaling independent."""
rng = np.random.RandomState(0)
X, y = make_regression_with_outliers()
huber = HuberRegressor(fit_intercept=False, alpha=0.0, max_iter=100,
epsilon=1.35)
huber.fit(X, y)
n_outliers_mask_1 = huber.outliers_
huber.fit(X, 2. * y)
n_outliers_mask_2 = huber.outliers_
huber.fit(2. * X, 2. * y)
n_outliers_mask_3 = huber.outliers_
assert_array_equal(n_outliers_mask_2, n_outliers_mask_1)
assert_array_equal(n_outliers_mask_3, n_outliers_mask_1)
def test_huber_and_sgd_same_results():
"""Test they should converge to same coefficients for same parameters"""
X, y = make_regression_with_outliers(n_samples=5, n_features=2)
# Fit once to find out the scale parameter. Scale down X and y by scale
# so that the scale parameter is optimized to 1.0
huber = HuberRegressor(fit_intercept=False, alpha=0.0, max_iter=100,
epsilon=1.35)
huber.fit(X, y)
X_scale = X / huber.scale_
y_scale = y / huber.scale_
huber.fit(X_scale, y_scale)
assert_almost_equal(huber.scale_, 1.0, 3)
sgdreg = SGDRegressor(
alpha=0.0, loss="huber", shuffle=True, random_state=0, n_iter=1000000,
fit_intercept=False, epsilon=1.35)
sgdreg.fit(X_scale, y_scale)
assert_array_almost_equal(huber.coef_, sgdreg.coef_, 1)
def test_huber_warm_start():
X, y = make_regression_with_outliers()
huber_warm = HuberRegressor(
fit_intercept=True, alpha=1.0, max_iter=10000, warm_start=True, tol=1e-1)
huber_warm.fit(X, y)
huber_warm_coef = huber_warm.coef_.copy()
huber_warm.fit(X, y)
# SciPy performs the tol check after doing the coef updates, so
# these would be almost same but not equal.
assert_array_almost_equal(huber_warm.coef_, huber_warm_coef, 1)
# No n_iter_ in old SciPy (<=0.9)
# And as said above, the first iteration seems to be run anyway.
if huber_warm.n_iter_ is not None:
assert_equal(1, huber_warm.n_iter_)
def test_huber_better_r2_score():
# Test that huber returns a better r2 score than non-outliers"""
X, y = make_regression_with_outliers()
huber = HuberRegressor(fit_intercept=True, alpha=0.01, max_iter=100)
huber.fit(X, y)
linear_loss = np.dot(X, huber.coef_) + huber.intercept_ - y
mask = np.abs(linear_loss) < huber.epsilon * huber.scale_
huber_score = huber.score(X[mask], y[mask])
huber_outlier_score = huber.score(X[~mask], y[~mask])
# The Ridge regressor should be influenced by the outliers and hence
# give a worse score on the non-outliers as compared to the huber regressor.
ridge = Ridge(fit_intercept=True, alpha=0.01)
ridge.fit(X, y)
ridge_score = ridge.score(X[mask], y[mask])
ridge_outlier_score = ridge.score(X[~mask], y[~mask])
assert_greater(huber_score, ridge_score)
# The huber model should also fit poorly on the outliers.
assert_greater(ridge_outlier_score, huber_outlier_score)
| bsd-3-clause |
macks22/scikit-learn | examples/calibration/plot_compare_calibration.py | 241 | 5008 | """
========================================
Comparison of Calibration of Classifiers
========================================
Well calibrated classifiers are probabilistic classifiers for which the output
of the predict_proba method can be directly interpreted as a confidence level.
For instance a well calibrated (binary) classifier should classify the samples
such that among the samples to which it gave a predict_proba value close to
0.8, approx. 80% actually belong to the positive class.
LogisticRegression returns well calibrated predictions as it directly
optimizes log-loss. In contrast, the other methods return biased probilities,
with different biases per method:
* GaussianNaiveBayes tends to push probabilties to 0 or 1 (note the counts in
the histograms). This is mainly because it makes the assumption that features
are conditionally independent given the class, which is not the case in this
dataset which contains 2 redundant features.
* RandomForestClassifier shows the opposite behavior: the histograms show
peaks at approx. 0.2 and 0.9 probability, while probabilities close to 0 or 1
are very rare. An explanation for this is given by Niculescu-Mizil and Caruana
[1]: "Methods such as bagging and random forests that average predictions from
a base set of models can have difficulty making predictions near 0 and 1
because variance in the underlying base models will bias predictions that
should be near zero or one away from these values. Because predictions are
restricted to the interval [0,1], errors caused by variance tend to be one-
sided near zero and one. For example, if a model should predict p = 0 for a
case, the only way bagging can achieve this is if all bagged trees predict
zero. If we add noise to the trees that bagging is averaging over, this noise
will cause some trees to predict values larger than 0 for this case, thus
moving the average prediction of the bagged ensemble away from 0. We observe
this effect most strongly with random forests because the base-level trees
trained with random forests have relatively high variance due to feature
subseting." As a result, the calibration curve shows a characteristic sigmoid
shape, indicating that the classifier could trust its "intuition" more and
return probabilties closer to 0 or 1 typically.
* Support Vector Classification (SVC) shows an even more sigmoid curve as
the RandomForestClassifier, which is typical for maximum-margin methods
(compare Niculescu-Mizil and Caruana [1]), which focus on hard samples
that are close to the decision boundary (the support vectors).
.. topic:: References:
.. [1] Predicting Good Probabilities with Supervised Learning,
A. Niculescu-Mizil & R. Caruana, ICML 2005
"""
print(__doc__)
# Author: Jan Hendrik Metzen <[email protected]>
# License: BSD Style.
import numpy as np
np.random.seed(0)
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import LinearSVC
from sklearn.calibration import calibration_curve
X, y = datasets.make_classification(n_samples=100000, n_features=20,
n_informative=2, n_redundant=2)
train_samples = 100 # Samples used for training the models
X_train = X[:train_samples]
X_test = X[train_samples:]
y_train = y[:train_samples]
y_test = y[train_samples:]
# Create classifiers
lr = LogisticRegression()
gnb = GaussianNB()
svc = LinearSVC(C=1.0)
rfc = RandomForestClassifier(n_estimators=100)
###############################################################################
# Plot calibration plots
plt.figure(figsize=(10, 10))
ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)
ax2 = plt.subplot2grid((3, 1), (2, 0))
ax1.plot([0, 1], [0, 1], "k:", label="Perfectly calibrated")
for clf, name in [(lr, 'Logistic'),
(gnb, 'Naive Bayes'),
(svc, 'Support Vector Classification'),
(rfc, 'Random Forest')]:
clf.fit(X_train, y_train)
if hasattr(clf, "predict_proba"):
prob_pos = clf.predict_proba(X_test)[:, 1]
else: # use decision function
prob_pos = clf.decision_function(X_test)
prob_pos = \
(prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())
fraction_of_positives, mean_predicted_value = \
calibration_curve(y_test, prob_pos, n_bins=10)
ax1.plot(mean_predicted_value, fraction_of_positives, "s-",
label="%s" % (name, ))
ax2.hist(prob_pos, range=(0, 1), bins=10, label=name,
histtype="step", lw=2)
ax1.set_ylabel("Fraction of positives")
ax1.set_ylim([-0.05, 1.05])
ax1.legend(loc="lower right")
ax1.set_title('Calibration plots (reliability curve)')
ax2.set_xlabel("Mean predicted value")
ax2.set_ylabel("Count")
ax2.legend(loc="upper center", ncol=2)
plt.tight_layout()
plt.show()
| bsd-3-clause |
7even7/DAT210x | Module5/assignment4.py | 1 | 10105 | import numpy as np
import pandas as pd
from sklearn import preprocessing
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
import matplotlib
#
# TODO: Parameters to play around with
PLOT_TYPE_TEXT = False # If you'd like to see indices
PLOT_VECTORS = True # If you'd like to see your original features in P.C.-Space
matplotlib.style.use('ggplot') # Look Pretty
c = ['red', 'green', 'blue', 'orange', 'yellow', 'brown']
def drawVectors(transformed_features, components_, columns, plt):
num_columns = len(columns)
# This function will project your *original* feature (columns)
# onto your principal component feature-space, so that you can
# visualize how "important" each one was in the
# multi-dimensional scaling
# Scale the principal components by the max value in
# the transformed set belonging to that component
xvector = components_[0] * max(transformed_features[:,0])
yvector = components_[1] * max(transformed_features[:,1])
## Visualize projections
# Sort each column by its length. These are your *original*
# columns, not the principal components.
import math
important_features = { columns[i] : math.sqrt(xvector[i]**2 + yvector[i]**2) for i in range(num_columns) }
important_features = sorted(zip(important_features.values(), important_features.keys()), reverse=True)
print "Projected Features by importance:\n", important_features
ax = plt.axes()
for i in range(num_columns):
# Use an arrow to project each original feature as a
# labeled vector on your principal component axes
plt.arrow(0, 0, xvector[i], yvector[i], color='b', width=0.0005, head_width=0.02, alpha=0.75, zorder=600000)
plt.text(xvector[i]*1.2, yvector[i]*1.2, list(columns)[i], color='b', alpha=0.75, zorder=600000)
return ax
def doPCA(data, dimensions=2):
from sklearn.decomposition import RandomizedPCA
model = RandomizedPCA(n_components=dimensions)
model.fit(data)
return model
def doKMeans(data, clusters):
#
# TODO: Do the KMeans clustering here, passing in the # of clusters parameter
# and fit it against your data. Then, return a tuple containing the cluster
# centers and the labels
#
# .. your code here ..
model = KMeans(n_clusters=clusters).fit(data)
return model.cluster_centers_, model.labels_
#
# TODO: Load up the dataset. It has may or may not have nans in it. Make
# sure you catch them and destroy them, by setting them to '0'. This is valid
# for this dataset, since if the value is missing, you can assume no $ was spent
# on it.
#
# .. your code here ..
df = pd.read_csv('C:\Data\Projektit\DAT210x\Module5\Datasets\Wholesale customers data.csv')
#
# TODO: As instructed, get rid of the 'Channel' and 'Region' columns, since
# you'll be investigating as if this were a single location wholesaler, rather
# than a national / international one. Leaving these fields in here would cause
# KMeans to examine and give weight to them.
#
# .. your code here ..
df.drop(df[['Channel','Region' ]], axis=1, inplace=True)
#
# TODO: Before unitizing / standardizing / normalizing your data in preparation for
# K-Means, it's a good idea to get a quick peek at it. You can do this using the
# .describe() method, or even by using the built-in pandas df.plot.hist()
#
# .. your code here ..
#df.describe()
#df.plot.hist()
#
# INFO: Having checked out your data, you may have noticed there's a pretty big gap
# between the top customers in each feature category and the rest. Some feature
# scaling algos won't get rid of outliers for you, so it's a good idea to handle that
# manually---particularly if your goal is NOT to determine the top customers. After
# all, you can do that with a simple Pandas .sort_values() and not a machine
# learning clustering algorithm. From a business perspective, you're probably more
# interested in clustering your +/- 2 standard deviation customers, rather than the
# creme dela creme, or bottom of the barrel'ers
#
# Remove top 5 and bottom 5 samples for each column:
drop = {}
for col in df.columns:
# Bottom 5
sort = df.sort_values(by=col, ascending=True)
if len(sort) > 5: sort=sort[:5]
for index in sort.index: drop[index] = True # Just store the index once
# Top 5
sort = df.sort_values(by=col, ascending=False)
if len(sort) > 5: sort=sort[:5]
for index in sort.index: drop[index] = True # Just store the index once
#
# INFO Drop rows by index. We do this all at once in case there is a
# collision. This way, we don't end up dropping more rows than we have
# to, if there is a single row that satisfies the drop for multiple columns.
# Since there are 6 rows, if we end up dropping < 5*6*2 = 60 rows, that means
# there indeed were collisions.
print "Dropping {0} Outliers...".format(len(drop))
df.drop(inplace=True, labels=drop.keys(), axis=0)
print df.describe()
#
# INFO: What are you interested in?
#
# Depending on what you're interested in, you might take a different approach
# to normalizing/standardizing your data.
#
# You should note that all columns left in the dataset are of the same unit.
# You might ask yourself, do I even need to normalize / standardize the data?
# The answer depends on what you're trying to accomplish. For instance, although
# all the units are the same (generic money unit), the price per item in your
# store isn't. There may be some cheap items and some expensive one. If your goal
# is to find out what items people buy tend to buy together but you didn't
# unitize properly before running kMeans, the contribution of the lesser priced
# item would be dwarfed by the more expensive item.
#
# For a great overview on a few of the normalization methods supported in SKLearn,
# please check out: https://stackoverflow.com/questions/30918781/right-function-for-normalizing-input-of-sklearn-svm
#
# Suffice to say, at the end of the day, you're going to have to know what question
# you want answered and what data you have available in order to select the best
# method for your purpose. Luckily, SKLearn's interfaces are easy to switch out
# so in the mean time, you can experiment with all of them and see how they alter
# your results.
#
#
# 5-sec summary before you dive deeper online:
#
# NORMALIZATION: Let's say your user spend a LOT. Normalization divides each item by
# the average overall amount of spending. Stated differently, your
# new feature is = the contribution of overall spending going into
# that particular item: $spent on feature / $overall spent by sample
#
# MINMAX: What % in the overall range of $spent by all users on THIS particular
# feature is the current sample's feature at? When you're dealing with
# all the same units, this will produce a near face-value amount. Be
# careful though: if you have even a single outlier, it can cause all
# your data to get squashed up in lower percentages.
# Imagine your buyers usually spend $100 on wholesale milk, but today
# only spent $20. This is the relationship you're trying to capture
# with MinMax. NOTE: MinMax doesn't standardize (std. dev.); it only
# normalizes / unitizes your feature, in the mathematical sense.
# MinMax can be used as an alternative to zero mean, unit variance scaling.
# [(sampleFeatureValue-min) / (max-min)] * (max-min) + min
# Where min and max are for the overall feature values for all samples.
#
# TODO: Un-comment just ***ONE*** of lines at a time and see how alters your results
# Pay attention to the direction of the arrows, as well as their LENGTHS
#T = preprocessing.StandardScaler().fit_transform(df)
#T = preprocessing.MinMaxScaler().fit_transform(df)
#T = preprocessing.MaxAbsScaler().fit_transform(df)
#T = preprocessing.Normalizer().fit_transform(df)
T = df # No Change
#
# INFO: Sometimes people perform PCA before doing KMeans, so that KMeans only
# operates on the most meaningful features. In our case, there are so few features
# that doing PCA ahead of time isn't really necessary, and you can do KMeans in
# feature space. But keep in mind you have the option to transform your data to
# bring down its dimensionality. If you take that route, then your Clusters will
# already be in PCA-transformed feature space, and you won't have to project them
# again for visualization.
# Do KMeans
n_clusters = 3
centroids, labels = doKMeans(T, n_clusters)
print centroids
#
# TODO: Print out your centroids. They're currently in feature-space, which
# is good. Print them out before you transform them into PCA space for viewing
#
# .. your code here ..
#plt.scatter(centroids[0], centroids[1], label='Centroids')
# Do PCA *after* to visualize the results. Project the centroids as well as
# the samples into the new 2D feature space for visualization purposes.
display_pca = doPCA(T)
T = display_pca.transform(T)
CC = display_pca.transform(centroids)
# Visualize all the samples. Give them the color of their cluster label
fig = plt.figure()
ax = fig.add_subplot(111)
if PLOT_TYPE_TEXT:
# Plot the index of the sample, so you can further investigate it in your dset
for i in range(len(T)): ax.text(T[i,0], T[i,1], df.index[i], color=c[labels[i]], alpha=0.75, zorder=600000)
ax.set_xlim(min(T[:,0])*1.2, max(T[:,0])*1.2)
ax.set_ylim(min(T[:,1])*1.2, max(T[:,1])*1.2)
else:
# Plot a regular scatter plot
sample_colors = [ c[labels[i]] for i in range(len(T)) ]
ax.scatter(T[:, 0], T[:, 1], c=sample_colors, marker='o', alpha=0.2)
# Plot the Centroids as X's, and label them
ax.scatter(CC[:, 0], CC[:, 1], marker='x', s=169, linewidths=3, zorder=1000, c=c)
for i in range(len(centroids)): ax.text(CC[i, 0], CC[i, 1], str(i), zorder=500010, fontsize=18, color=c[i])
# Display feature vectors for investigation:
if PLOT_VECTORS: drawVectors(T, display_pca.components_, df.columns, plt)
# Add the cluster label back into the dataframe and display it:
df['label'] = pd.Series(labels, index=df.index)
print df
plt.show()
| mit |
hsuantien/scikit-learn | sklearn/decomposition/tests/test_dict_learning.py | 47 | 8095 | import numpy as np
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn.decomposition import DictionaryLearning
from sklearn.decomposition import MiniBatchDictionaryLearning
from sklearn.decomposition import SparseCoder
from sklearn.decomposition import dict_learning_online
from sklearn.decomposition import sparse_encode
rng_global = np.random.RandomState(0)
n_samples, n_features = 10, 8
X = rng_global.randn(n_samples, n_features)
def test_dict_learning_shapes():
n_components = 5
dico = DictionaryLearning(n_components, random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_overcomplete():
n_components = 12
dico = DictionaryLearning(n_components, random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_reconstruction():
n_components = 12
dico = DictionaryLearning(n_components, transform_algorithm='omp',
transform_alpha=0.001, random_state=0)
code = dico.fit(X).transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X)
dico.set_params(transform_algorithm='lasso_lars')
code = dico.transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2)
# used to test lars here too, but there's no guarantee the number of
# nonzero atoms is right.
def test_dict_learning_reconstruction_parallel():
# regression test that parallel reconstruction works with n_jobs=-1
n_components = 12
dico = DictionaryLearning(n_components, transform_algorithm='omp',
transform_alpha=0.001, random_state=0, n_jobs=-1)
code = dico.fit(X).transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X)
dico.set_params(transform_algorithm='lasso_lars')
code = dico.transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2)
def test_dict_learning_nonzero_coefs():
n_components = 4
dico = DictionaryLearning(n_components, transform_algorithm='lars',
transform_n_nonzero_coefs=3, random_state=0)
code = dico.fit(X).transform(X[1])
assert_true(len(np.flatnonzero(code)) == 3)
dico.set_params(transform_algorithm='omp')
code = dico.transform(X[1])
assert_equal(len(np.flatnonzero(code)), 3)
def test_dict_learning_unknown_fit_algorithm():
n_components = 5
dico = DictionaryLearning(n_components, fit_algorithm='<unknown>')
assert_raises(ValueError, dico.fit, X)
def test_dict_learning_split():
n_components = 5
dico = DictionaryLearning(n_components, transform_algorithm='threshold',
random_state=0)
code = dico.fit(X).transform(X)
dico.split_sign = True
split_code = dico.transform(X)
assert_array_equal(split_code[:, :n_components] -
split_code[:, n_components:], code)
def test_dict_learning_online_shapes():
rng = np.random.RandomState(0)
n_components = 8
code, dictionary = dict_learning_online(X, n_components=n_components,
alpha=1, random_state=rng)
assert_equal(code.shape, (n_samples, n_components))
assert_equal(dictionary.shape, (n_components, n_features))
assert_equal(np.dot(code, dictionary).shape, X.shape)
def test_dict_learning_online_verbosity():
n_components = 5
# test verbosity
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
try:
sys.stdout = StringIO()
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=1,
random_state=0)
dico.fit(X)
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=2,
random_state=0)
dico.fit(X)
dict_learning_online(X, n_components=n_components, alpha=1, verbose=1,
random_state=0)
dict_learning_online(X, n_components=n_components, alpha=1, verbose=2,
random_state=0)
finally:
sys.stdout = old_stdout
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_estimator_shapes():
n_components = 5
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, random_state=0)
dico.fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_overcomplete():
n_components = 12
dico = MiniBatchDictionaryLearning(n_components, n_iter=20,
random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_initialization():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features)
dico = MiniBatchDictionaryLearning(n_components, n_iter=0,
dict_init=V, random_state=0).fit(X)
assert_array_equal(dico.components_, V)
def test_dict_learning_online_partial_fit():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
dict1 = MiniBatchDictionaryLearning(n_components, n_iter=10 * len(X),
batch_size=1,
alpha=1, shuffle=False, dict_init=V,
random_state=0).fit(X)
dict2 = MiniBatchDictionaryLearning(n_components, alpha=1,
n_iter=1, dict_init=V,
random_state=0)
for i in range(10):
for sample in X:
dict2.partial_fit(sample)
assert_true(not np.all(sparse_encode(X, dict1.components_, alpha=1) ==
0))
assert_array_almost_equal(dict1.components_, dict2.components_,
decimal=2)
def test_sparse_encode_shapes():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
for algo in ('lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'):
code = sparse_encode(X, V, algorithm=algo)
assert_equal(code.shape, (n_samples, n_components))
def test_sparse_encode_error():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
code = sparse_encode(X, V, alpha=0.001)
assert_true(not np.all(code == 0))
assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1)
def test_sparse_encode_error_default_sparsity():
rng = np.random.RandomState(0)
X = rng.randn(100, 64)
D = rng.randn(2, 64)
code = ignore_warnings(sparse_encode)(X, D, algorithm='omp',
n_nonzero_coefs=None)
assert_equal(code.shape, (100, 2))
def test_unknown_method():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
assert_raises(ValueError, sparse_encode, X, V, algorithm="<unknown>")
def test_sparse_coder_estimator():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
code = SparseCoder(dictionary=V, transform_algorithm='lasso_lars',
transform_alpha=0.001).transform(X)
assert_true(not np.all(code == 0))
assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1)
| bsd-3-clause |
ntung/ramp-elnino-Saigon | test.py | 1 | 10661 |
# coding: utf-8
# # <a href="http://www.datascience-paris-saclay.fr">Paris Saclay Center for Data Science</a>
# #<a href=http://www.datascience-paris-saclay.fr/en/site/newsView/12>RAMP</a> on El Nino prediction
#
# <i> Balázs Kégl (CNRS), Claire Monteleoni (GWU), Mahesh Mohan (GWU), Timothy DelSole (COLA), Kathleen Pegion (COLA), Julie Leloup (UPMC), Alex Gramfort (LTCI) </i>
# ## Introduction
#
# A climate index is real-valued time-series which has been designated of interest in the climate literature. For example, the El Niño–Southern Oscillation (ENSO) index has widespread uses for predictions of regional and seasonal conditions, as it tends to have strong (positive or negative) correlation with a variety of weather conditions and <a href=http://www.ipcc-wg2.gov/SREX/images/uploads/SREX-SPMbrochure_FINAL.pdf>extreme events</a> throughout the globe. The ENSO index is just one of the many climate indices studied. However there is currently significant room for improvement in predicting even this extremely well studied index with such high global impact. For example, most statistical and climatological models erred significantly in their predictions of the 2015 El Niño event; their predictions were off by several months. Better tools to predict such indices are critical for seasonal and regional climate prediction, and would thus address grand challenges in the study of climate change (<a href=http://wcrp-climate.org/grand-challenges>World Climate Research Programme: Grand Challenges, 2013)</a>.
#
# ### El Niño
#
# <a href="https://www.ncdc.noaa.gov/teleconnections/enso/indicators/sst.php">El Niño</a> (La Niña) is a phenomenon in the equatorial Pacific Ocean characterized by a five consecutive 3-month running mean of sea surface temperature (SST) anomalies in the <a href=http://www1.ncdc.noaa.gov/pub/data/cmb/teleconnections/nino-regions.gif>Niño 3.4 region</a> that is above (below) the threshold of $+0.5^\circ$C ($-0.5\circ$C). This standard of measure is known as the Oceanic Niño Index (ONI).
#
# <img src=http://www1.ncdc.noaa.gov/pub/data/cmb/teleconnections/nino-regions.gif>
#
# Mor information can be found <a href=https://www.ncdc.noaa.gov/teleconnections/enso/indicators/sst.php>here</a> on why it is an important region, and what is the history of the index.
#
# Here are the <a href = http://iri.columbia.edu/our-expertise/climate/forecasts/enso/current/>current ENSO predictions</a>, updated monthly.
#
#
# ### The CCSM4 simulator
#
# Our data is coming from the <a href=http://www.cesm.ucar.edu/models/ccsm4.0/>CCSM4.0</a> model (simulator). This allows us to access a full regular temperature map for a 500+ year period which makes the evaluation of the predictor more robust than if we used real measurements.
#
# ### The data
#
# The data is a time series of "images" $z_t$, consisting of temperature measurements (for a technical reason it is not SST that we will work with, rather air temperature) on a regular grid on the Earth, indexed by lon(gitude) and lat(itude) coordinates. The average temperatures are recorded every month for 501 years, giving 6012 time points. The goal is to predict the temperature in the El Nino region, <span style="color:red">6 months ahead</span>.
#
# ### The prediction task
#
# Similarly to the variable stars RAMP, the pipeline will consists of a feature extractor and a predictor. Since the task is regression, the predictor will be a regressor, and the score (to minimize) will be the <a href=http://en.wikipedia.org/wiki/Root-mean-square_deviation>root mean square error</a>. The feature extractor will have access to the whole data. It will construct a "classical" feature matrix where each row corresponds to a time point. You should collect all information into these features that you find relevant to the regressor. The feature extractor can take <span style="color:red">anything from the past</span>, that is, it will implement a function $x_t = f(z_1, \ldots, z_t)$. Since you will have access to the full data, in theory you can cheat (even inadvertantly) by using information from the future. Please do your best to avoid this since it would make the results irrelevant.
#
# ### Domain-knowledge suggestions
#
# You are of course free to explore any regression technique to improve the prediction. Since the input dimension is relatively large (2000+ dimensions per time point even after subsampling) sparse regression techniques (eg. LASSO) may be the best way to go, but this is just an a priori suggestion. The following list provides you other hints to start with, based on domain knowledge.
# <ul>
# <li>There is a strong seasonal cycle that must be taken into account.
# <li>There is little scientific/observational evidence that regions outside the Pacific play a role in NINO3.4 variability, so it is probably best to focus on Pacific SST for predictions.
# <li>The relation between tropical and extra-tropical Pacific SST is very unclear, so please explore!
# <li>The NINO3.4 index has an oscillatory character (cold followed by warm followed by cold), but this pattern does not repeat exactly. It would be useful to be able to predict periods when the oscillation is “strong” and when it “breaks down.”
# <li>A common shortcoming of empirical predictions is that they under-predict the <i>amplitude</i> of warm and cold events. Can this be improved?
# <li>There is evidence that the predictability is low when forecasts start in, or cross over, March and April (the so-called “spring barrier”). Improving predictions through the spring barrier would be important.
# <ul>
# # Exploratory data analysis
# Packages to install:
#
# conda install basemap<BR>
# conda install -c https://conda.binstar.org/anaconda xray<BR>
# conda install netcdf4 h5py<BR>
# pip install pyresample<BR>
# In[68]:
#get_ipython().magic(u'matplotlib inline')
#from mpl_toolkits.basemap import Basemap
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import xray # should be installed with pip
import pyresample # should be installed with pip
from sklearn.cross_validation import cross_val_score
# Let's start by reading the data into an xray Dataset object. You can find all information on how to access and manipulate <code>Dataset</code> and <code>DataArray</code> objects at the <a href=http://xray.readthedocs.org/en/stable/>xray site</a>.
# In[2]:
temperatures_xray = xray.open_dataset(
'../resampled_tas_Amon_CCSM4_piControl_r1i1p1_080001-130012.nc', decode_times=False)
#temperatures_xray = xray.open_dataset(
# 'COLA_data/tas_Amon_CCSM4_piControl_r2i1p1_095301-110812.nc', decode_times=False)
#temperatures_xray = xray.open_dataset(
# 'COLA_data/tas_Amon_CCSM4_piControl_r3i1p1_000101-012012.nc', decode_times=False)
# there is no way to convert a date starting with the year 800 into pd array so we
# shift the starting date to 1700
temperatures_xray['time'] = pd.date_range('1/1/1700',
periods=temperatures_xray['time'].shape[0],
freq='M') - np.timedelta64(15, 'D')
# Printing it, you can see that it contains all the data, indices, and other metadata.
# In[3]:
temperatures_xray
# The main data is in the 'tas' ("temperature at surface") DataArray.
# In[4]:
temperatures_xray['tas']
# You can index it in the same way as a <code>pandas</code> or <code>numpy</code> array. The result is always a <coda>DataArray</code>
# In[5]:
t = 123
lat = 13
lon = 29
temperatures_xray['tas'][t]
temperatures_xray['tas'][t, lat]
temperatures_xray['tas'][t, lat, lon]
temperatures_xray['tas'][:, lat, lon]
temperatures_xray['tas'][t, :, lon]
temperatures_xray['tas'][:, :, lon]
# You can convert any of these objects into a <code>numpy</code> array.
# In[6]:
temperatures_xray['tas'].values
temperatures_xray['tas'][t].values
temperatures_xray['tas'][t, lat].values
temperatures_xray['tas'][t, lat, lon].values
# You can also use slices, and slice bounds don't even have to be in the index arrays. The following function computes the target at time $t$. The input is an xray DataArray (3D panel) that contains the temperatures. We select the El Nino 3.4 region, and take the mean temperatures, specifying that we are taking the mean over the spatial (lat and lon) coordinates. The output is a vector with the same length as the original time series.
# In[7]:
en_lat_bottom = -5
en_lat_top = 5
en_lon_left = 360 - 170
en_lon_right = 360 - 120
def get_area_mean(tas, lat_bottom, lat_top, lon_left, lon_right):
"""The array of mean temperatures in a region at all time points."""
return tas.loc[:, lat_bottom:lat_top, lon_left:lon_right].mean(dim=('lat','lon'))
def get_enso_mean(tas):
"""The array of mean temperatures in the El Nino 3.4 region at all time points."""
return get_area_mean(tas, en_lat_bottom, en_lat_top, en_lon_left, en_lon_right)
# The following function plots the temperatures at a given $t$ (time_index).
# In[8]:
el_nino_lats = [en_lat_bottom, en_lat_top, en_lat_top, en_lat_bottom]
el_nino_lons = [en_lon_right, en_lon_right, en_lon_left, en_lon_left]
from matplotlib.patches import Polygon
def plot_map(temperatures_xray, time_index):
def draw_screen_poly(lats, lons, m):
x, y = m(lons, lats)
xy = zip(x,y)
poly = Polygon(xy, edgecolor='black', fill=False)
plt.gca().add_patch(poly)
lons, lats = np.meshgrid(temperatures_xray['lon'], temperatures_xray['lat'])
fig = plt.figure()
ax = fig.add_axes([0.05, 0.05, 0.9,0.9])
map = Basemap(llcrnrlon=0, llcrnrlat=-89, urcrnrlon=360, urcrnrlat=89, projection='mill')
# draw coastlines, country boundaries, fill continents.
map.drawcoastlines(linewidth=0.25)
#map.drawcountries(linewidth=0.25)
#map.fillcontinents(color='coral',lake_color='aqua')
# draw the edge of the map projection region (the projection limb)
#map.drawmapboundary(fill_color='aqua')
im = map.pcolormesh(lons, lats, temperatures_xray[time_index] - 273.15,
shading='flat', cmap=plt.cm.jet, latlon=True)
cb = map.colorbar(im,"bottom", size="5%", pad="2%")
draw_screen_poly(el_nino_lats, el_nino_lons, map)
time_str = str(pd.to_datetime(str(temperatures_xray['time'].values[time_index])))[:7]
ax.set_title("Temperature map " + time_str)
#plt.savefig("test_plot.pdf")
plt.show()
# Let's plot the temperature at a given time point. Feel free to change the time, play with the season, discover visually the variability of the temperature map.
# In[9]:
t = 12
plot_map(temperatures_xray['tas'], t)
| gpl-2.0 |
wanggang3333/scikit-learn | sklearn/linear_model/setup.py | 169 | 1567 | import os
from os.path import join
import numpy
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('linear_model', parent_package, top_path)
cblas_libs, blas_info = get_blas_info()
if os.name == 'posix':
cblas_libs.append('m')
config.add_extension('cd_fast', sources=['cd_fast.c'],
libraries=cblas_libs,
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=blas_info.pop('extra_compile_args',
[]), **blas_info)
config.add_extension('sgd_fast',
sources=['sgd_fast.c'],
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
libraries=cblas_libs,
extra_compile_args=blas_info.pop('extra_compile_args',
[]),
**blas_info)
# add other directories
config.add_subpackage('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
kvasukib/groupflow_simulator | groupflow_scripts/tree_aggregation_simulations/tree_aggregation_bidirectional.py | 2 | 31709 | from scipy.stats import truncnorm, tstd, poisson, expon
from numpy.random import randint, uniform
from datetime import datetime
from collections import defaultdict
from sets import Set
from heapq import heappop, heappush
from time import time
from scipy.cluster.hierarchy import *
from scipy.spatial.distance import pdist
import scipy.spatial.distance as ssd
import os
import sys
import signal
import numpy as np
import matplotlib.pyplot as plt
ONE_FORWARDING_ENTRY_PER_SWITCH = True
def get_cluster_group_aggregation(group_indexes, linkage_array, difference_threshold):
group_map = {}
for group_index in group_indexes:
group_map[group_index] = [group_index]
next_cluster_index = len(group_indexes)
for cluster in linkage_array:
if cluster[2] > difference_threshold:
break
new_cluster_list = []
for index in group_map[cluster[0]]:
new_cluster_list.append(index)
for index in group_map[cluster[1]]:
new_cluster_list.append(index)
del group_map[cluster[0]]
del group_map[cluster[1]]
group_map[next_cluster_index] = new_cluster_list
next_cluster_index += 1
#print 'Group Aggregations for Difference Threshold: ' + str(difference_threshold)
#for cluster_index in group_map:
# print 'Cluster Index: ' + str(cluster_index)
# for group_index in group_map[cluster_index]:
# print str(group_index) + ' ',
# print ' '
return group_map
def calc_best_rendevouz_point(topology, mcast_groups):
aggregated_group_nodes = []
for group in mcast_groups:
aggregated_group_nodes.append(group.src_node_id)
for receiver_id in group.receiver_ids:
aggregated_group_nodes.append(receiver_id)
aggregated_group_nodes = list(Set(aggregated_group_nodes))
min_sum_tree_length = sys.maxint
rv_node_id = None
sp_tree = None
for forwarding_element in topology.forwarding_elements:
no_rendevouz_path = False
potential_rv_node_id = forwarding_element.node_id
potential_tree = topology.get_shortest_path_tree(potential_rv_node_id, aggregated_group_nodes)
sum_tree_length = len(potential_tree)
if sum_tree_length <= min_sum_tree_length:
min_sum_tree_length = sum_tree_length
rv_node_id = potential_rv_node_id
sp_tree = potential_tree
return rv_node_id, aggregated_group_nodes, sp_tree
def aggregate_groups_via_tree_sim(topology, mcast_groups, bandwidth_overhead_threshold):
group_map = defaultdict(lambda : None)
next_agg_tree_index = 0
for group in mcast_groups:
if len(group_map) == 0:
# This is the first group to initialize, always uses a native multicast tree
group.rendevouz_point_node_id = group.src_node_id
group.rendevouz_point_shortest_path = []
group.aggregated_mcast_tree = topology.get_shortest_path_tree(group.src_node_id, list(group.receiver_ids))
group.aggregated_mcast_tree_index = next_agg_tree_index
group.aggregated_bandwidth_Mbps = len(group.aggregated_mcast_tree) * group.bandwidth_Mbps
group_map[next_agg_tree_index] = [group.group_index]
next_agg_tree_index += 1
continue
# If this is not the first group, iterate through all existing aggregated trees, and check if any can be extended
# to cover the group without exceeding the bandwidth overhead threshold
final_aggregated_groups = None
final_aggregated_tree_index = None
final_aggregated_mcast_tree = None
final_rv_node_id = None
final_aggregated_bandwidth_overhead = None
for agg_tree_index in group_map:
test_aggregated_groups = [group]
for group_index in group_map[agg_tree_index]:
test_aggregated_groups.append(mcast_groups[group_index])
rv_node_id, aggregated_group_nodes, aggregated_mcast_tree = calc_best_rendevouz_point(topology, test_aggregated_groups)
# Got a rendevouz node for this potential aggregation, now calculate the bandwidth overhead of this potential aggregation
native_bandwidth_Mbps = 0
aggregated_bandwidth_Mbps = 0
for test_group in test_aggregated_groups:
native_bandwidth_Mbps += test_group.native_bandwidth_Mbps
aggregated_bandwidth_Mbps += (len(aggregated_mcast_tree) * test_group.bandwidth_Mbps)
bandwidth_overhead_ratio = float(aggregated_bandwidth_Mbps) / native_bandwidth_Mbps;
if bandwidth_overhead_ratio > bandwidth_overhead_threshold:
continue # This aggregation causes the bandwidth overhead ratio to exceed the threshold
if final_aggregated_bandwidth_overhead is None or bandwidth_overhead_ratio < final_aggregated_bandwidth_overhead:
final_aggregated_bandwidth_overhead = bandwidth_overhead_ratio
final_aggregated_tree_index = agg_tree_index
final_aggregated_groups = test_aggregated_groups
final_aggregated_mcast_tree = aggregated_mcast_tree
final_rv_node_id = rv_node_id
# At this point, either a valid aggregation has been found (and stored in the "final" variables), or the group will
# be assigned to a new, native tree
if final_aggregated_tree_index is not None:
# A valid aggregation has been found
# print 'Assigning group #' + str(group.group_index) + ' to aggregated tree #' + str(final_aggregated_tree_index) + ' (BW Overhead: ' + str(final_aggregated_bandwidth_overhead) + ')'
group_map[final_aggregated_tree_index].append(group.group_index)
for agg_group in final_aggregated_groups:
agg_group.rendevouz_point_node_id = final_rv_node_id
agg_group.rendevouz_point_shortest_path = []
agg_group.aggregated_mcast_tree = final_aggregated_mcast_tree
agg_group.aggregated_mcast_tree_index = final_aggregated_tree_index
agg_group.aggregated_bandwidth_Mbps = (len(agg_group.aggregated_mcast_tree) * agg_group.bandwidth_Mbps)
else:
# Create a new aggregated tree index for the group
group.rendevouz_point_node_id = group.src_node_id
group.rendevouz_point_shortest_path = []
group.aggregated_mcast_tree = topology.get_shortest_path_tree(group.src_node_id, list(group.receiver_ids))
group.aggregated_mcast_tree_index = next_agg_tree_index
group.aggregated_bandwidth_Mbps = len(group.aggregated_mcast_tree) * group.bandwidth_Mbps
group_map[next_agg_tree_index] = [group.group_index]
next_agg_tree_index += 1
# print 'Tree similarity aggregation results:\n' + str(group_map)
return mcast_groups, group_map
def generate_cluster_aggregated_mcast_trees(topology, mcast_groups, group_map):
for group_aggregation in group_map:
# print 'Cluster #' + str(group_aggregation) + ' - Groups: ' + (str(group_map[group_aggregation]))
cluster_groups = []
for mcast_group_id in group_map[group_aggregation]:
mcast_groups[mcast_group_id].aggregated_mcast_tree_index = group_aggregation
cluster_groups.append(mcast_groups[mcast_group_id])
min_sum_path_length = sys.maxint
rv_node_id, aggregated_group_nodes, aggregated_mcast_tree = calc_best_rendevouz_point(topology, cluster_groups)
for mcast_group_id in group_map[group_aggregation]:
src_node_id = mcast_groups[mcast_group_id].src_node_id
mcast_groups[mcast_group_id].rendevouz_point_node_id = rv_node_id
mcast_groups[mcast_group_id].rendevouz_point_shortest_path = []
mcast_groups[mcast_group_id].aggregated_mcast_tree = aggregated_mcast_tree
mcast_groups[mcast_group_id].aggregated_bandwidth_Mbps = len(mcast_groups[mcast_group_id].aggregated_mcast_tree) * mcast_groups[mcast_group_id].bandwidth_Mbps
return mcast_groups, group_map
def get_vertex_set(edge_list):
vertex_set = Set()
for edge in edge_list:
vertex_set.add(edge[0])
vertex_set.add(edge[1])
return vertex_set
def get_num_connected_edges(edge_list, node_id):
num_edges = 0
for edge in edge_list:
if edge[0] == node_id:
num_edges += 1
if edge[1] == node_id:
num_edges += 1
return num_edges
def get_terminal_vertices(edge_list):
tail_set = Set()
head_set = Set()
for edge in edge_list:
tail_set.add(edge[0])
head_set.add(edge[1])
return (tail_set.union(head_set)) - (head_set.intersection(tail_set))
def get_origin_vertices(edge_list, origin_candidates):
node_edge_count = defaultdict(lambda : None)
for edge in edge_list:
if node_edge_count[edge[0]] is None:
node_edge_count[edge[0]] = 1
else:
node_edge_count[edge[0]] = node_edge_count[edge[0]] + 1
if node_edge_count[edge[1]] is None:
node_edge_count[edge[1]] = -1
else:
node_edge_count[edge[1]] = node_edge_count[edge[1]] - 1
origin_set = Set()
for node_id in origin_candidates:
if node_edge_count[node_id] is not None and node_edge_count[node_id] > 0:
origin_set.add(node_id)
return origin_set
def get_intermediate_vertices(edge_list):
tail_set = Set()
head_set = Set()
for edge in edge_list:
tail_set.add(edge[0])
head_set.add(edge[1])
return tail_set.intersection(head_set)
def aggregate_groups_via_clustering(groups, linkage_method, similarity_threshold, plot_dendrogram = False):
src_dist_clustering = False
if '_srcdist' in linkage_method:
src_dist_clustering = True
linkage_method = linkage_method[:-len('_srcdist')]
# Generate the distance matrix used for clustering
receivers_list = []
for group in groups:
receivers_list.append(list(group.receiver_ids))
receivers_array = np.array(receivers_list)
distance_matrix = []
group_index = 0
group_indexes = []
for group1 in groups:
distance_matrix.append([])
for group2 in groups:
jaccard_distance = group1.jaccard_distance(group2)
if src_dist_clustering:
src_distance = len(topo.get_shortest_path_tree(group1.src_node_id, [group2.src_node_id]))
src_distance_ratio = (float(src_distance)/topo.network_diameter) # Distance between source nodes as a percentage of the network diameter
distance_matrix[group_index].append(1 - ((1 - jaccard_distance) * (1 - src_distance_ratio)))
else:
distance_matrix[group_index].append(jaccard_distance)
group_indexes.append(group_index)
group_index += 1
comp_dist_array = ssd.squareform(distance_matrix)
# Perform clustering, and plot a dendrogram of the results if requested
z = linkage(comp_dist_array, method=linkage_method)
group_map = get_cluster_group_aggregation(group_indexes, z, similarity_threshold)
if plot_dendrogram:
plt.figure(1, figsize=(6, 5))
print 'Linkage Array:\n' + str(z)
print ' '
d = dendrogram(z, show_leaf_counts=True)
plt.title('Multicast Group Clustering')
plt.xlabel('Multicast Group Index')
plt.ylabel('Cluster Similarity')
plt.show()
# Generate aggregated multicast trees based on the generated clusters
generate_cluster_aggregated_mcast_trees(topo, groups, group_map)
return groups, group_map
def calc_network_performance_metrics(groups, group_map, debug_print = False):
native_network_flow_table_size = 0
aggregated_network_flow_table_size = 0
native_bandwidth_Mbps = 0
aggregated_bandwidth_Mbps = 0
seen_aggregated_tree_indexes = []
non_reducible_flow_table_entries = 0
for group in groups:
# print 'Calculating flow table size for group: ' + str(group.group_index)
non_reducible_flow_table_entries += len(group.receiver_ids) + 1
native_bandwidth_Mbps += group.native_bandwidth_Mbps
aggregated_bandwidth_Mbps += group.aggregated_bandwidth_Mbps
native_network_flow_table_size += len(group.native_mcast_tree) + 1
aggregated_network_flow_table_size += len(group.receiver_ids) + 1
#print 'Native flow table size: ' + str(len(group.native_mcast_tree) + 1)
#print 'Aggregated non-reducible state: ' + str(len(group.receiver_ids) + 1)
if group.aggregated_mcast_tree_index not in seen_aggregated_tree_indexes:
#print 'Calculating flow table size for aggregated tree: ' + str(group.aggregated_mcast_tree_index)
seen_aggregated_tree_indexes.append(group.aggregated_mcast_tree_index)
tree_terminal_vertices = get_terminal_vertices(group.aggregated_mcast_tree)
#print str(tree_terminal_vertices)
if ONE_FORWARDING_ENTRY_PER_SWITCH:
aggregated_network_flow_table_size += len(group.aggregated_mcast_tree) + 1 - len(get_terminal_vertices(group.aggregated_mcast_tree))
else:
non_terminal_vertices = get_vertex_set(group.aggregated_mcast_tree) - tree_terminal_vertices
for vertex in non_terminal_vertices:
aggregated_network_flow_table_size += get_num_connected_edges(group.aggregated_mcast_tree, vertex)
#print 'Aggregated reducible state: ' + str(len(group.aggregated_mcast_tree) + 1)
#if debug_print:
# print ' '
# group.debug_print()
reducible_native_network_flow_table_size = native_network_flow_table_size - non_reducible_flow_table_entries
reducible_aggregated_network_flow_table_size = aggregated_network_flow_table_size - non_reducible_flow_table_entries
bandwidth_overhead_ratio = float(aggregated_bandwidth_Mbps) / float(native_bandwidth_Mbps)
flow_table_reduction_ratio = 1 - float(aggregated_network_flow_table_size) / float(native_network_flow_table_size)
reducible_flow_table_reduction_ratio = 1 - float(reducible_aggregated_network_flow_table_size) / float(reducible_native_network_flow_table_size)
if debug_print:
print ' '
print 'Aggregated Network Bandwidth Utilization: ' + str(aggregated_bandwidth_Mbps) + ' Mbps'
print 'Native Network Bandwidth Utilization: ' + str(native_bandwidth_Mbps) + ' Mbps'
print 'Bandwidth Overhead Ratio: ' + str(bandwidth_overhead_ratio)
print ' '
print 'Native Network Flow Table Size: ' + str(native_network_flow_table_size)
print 'Aggregated Network Flow Table Size: ' + str(aggregated_network_flow_table_size)
print 'Flow Table Reduction Ratio: ' + str(flow_table_reduction_ratio)
print ' '
print 'Reducible Native Network Flow Table Size: ' + str(reducible_native_network_flow_table_size)
print 'Reducible Aggregated Network Flow Table Size: ' + str(reducible_aggregated_network_flow_table_size)
print 'Reducible Flow Table Reduction Ratio: ' + str(reducible_flow_table_reduction_ratio)
return bandwidth_overhead_ratio, flow_table_reduction_ratio, reducible_flow_table_reduction_ratio, len(group_map)
class ForwardingElement(object):
def __init__(self, node_id):
self.node_id = node_id
def __str__(self):
return 'Forwarding Element #' + str(self.node_id)
class Link(object):
def __init__(self, tail_node_id, head_node_id, cost):
self.tail_node_id = tail_node_id # Node ID from which the link originates
self.head_node_id = head_node_id # Node ID to which the link delivers traffic
self.cost = cost
def __str__(self):
return 'Link: ' + str(self.tail_node_id) + ' --> ' + str(self.head_node_id) + ' C:' + str(self.cost)
class SimTopo(object):
def __init__(self):
self.forwarding_elements = []
self.links = []
self.shortest_path_map = defaultdict(lambda : None)
self.network_diameter = 0
self.recalc_path_tree_map = True
def calc_shortest_path_tree(self):
self.shortest_path_map = defaultdict(lambda : None)
for source_forwarding_element in self.forwarding_elements:
src_node_id = source_forwarding_element.node_id
nodes = set(self.forwarding_elements)
edges = self.links
graph = defaultdict(list)
for link in edges:
graph[link.tail_node_id].append((link.cost, link.head_node_id))
src_path_tree_map = defaultdict(lambda : None)
queue, seen = [(0,src_node_id,())], set()
while queue:
(cost,node1,path) = heappop(queue)
if node1 not in seen:
seen.add(node1)
path = (node1, path)
src_path_tree_map[node1] = path
for next_cost, node2 in graph.get(node1, ()):
if node2 not in seen:
new_path_cost = cost + next_cost
heappush(queue, (new_path_cost, node2, path))
for dst_forwarding_element in self.forwarding_elements:
if self.shortest_path_map[src_node_id] is None:
self.shortest_path_map[src_node_id] = defaultdict(lambda : None)
dst_node_id = dst_forwarding_element.node_id
shortest_path_edges = []
if dst_node_id == src_node_id:
self.shortest_path_map[src_node_id][dst_node_id] = []
continue
receiver_path = src_path_tree_map[dst_node_id]
if receiver_path is None:
continue
while receiver_path[1]:
shortest_path_edges.append((receiver_path[1][0], receiver_path[0]))
receiver_path = receiver_path[1]
self.shortest_path_map[src_node_id][dst_node_id] = shortest_path_edges
self.recalc_path_tree_map = False
# Recalculate the network diameter
self.network_diameter = 0
for source_forwarding_element in self.forwarding_elements:
for dest_forwarding_element in self.forwarding_elements:
path = self.get_shortest_path_tree(source_forwarding_element.node_id, [dest_forwarding_element.node_id])
if path is not None and len(path) > self.network_diameter:
self.network_diameter = len(path)
# print 'Got network diameter: ' + str(self.network_diameter)
def get_shortest_path_tree(self, source_node_id, receiver_node_id_list):
if self.recalc_path_tree_map:
self.calc_shortest_path_tree()
if len(receiver_node_id_list) == 1:
return self.shortest_path_map[source_node_id][receiver_node_id_list[0]]
shortest_path_tree_edges = Set()
for receiver_node_id in receiver_node_id_list:
shortest_path = self.shortest_path_map[source_node_id][receiver_node_id]
if shortest_path is None:
print 'ERROR: No shortest path from node ' + str(source_node_id) + ' to ' + str(receiver_node_id)
return None
for edge in shortest_path:
shortest_path_tree_edges.add(edge)
# Return the set as a list of edges
shortest_path_tree_edges = list(shortest_path_tree_edges)
return shortest_path_tree_edges
def load_from_edge_list(self, edge_list):
self.forwarding_elements = []
self.links = []
seen_node_ids = []
for edge in edge_list:
self.links.append(Link(edge[0], edge[1], 1))
if edge[0] not in seen_node_ids:
self.forwarding_elements.append(ForwardingElement(edge[0]))
seen_node_ids.append(edge[0])
if edge[1] not in seen_node_ids:
self.forwarding_elements.append(ForwardingElement(edge[1]))
seen_node_ids.append(edge[1])
self.recalc_path_tree_map = True
def load_from_brite_topo(self, brite_filepath, debug_print = False):
self.forwarding_elements = []
self.links = []
print 'Parsing BRITE topology at filepath: ' + str(brite_filepath)
file = open(brite_filepath, 'r')
line = file.readline()
print 'BRITE ' + line
# Skip ahead until the nodes section is reached
in_node_section = False
while not in_node_section:
line = file.readline()
if 'Nodes:' in line:
in_node_section = True
break
# In the nodes section now, generate a forwarding element for each node
while in_node_section:
line = file.readline().strip()
if not line:
in_node_section = False
if debug_print:
print 'Finished parsing nodes'
break
line_split = line.split('\t')
node_id = int(line_split[0])
if debug_print:
print 'Generating forwarding element for ID: ' + str(node_id)
self.forwarding_elements.append(ForwardingElement(node_id))
# Skip ahead to the edges section
in_edge_section = False
while not in_edge_section:
line = file.readline()
if 'Edges:' in line:
in_edge_section = True
break
# In the edges section now, add all required links
# Note: This code assumes that all links are bidirectional with cost 1
while in_edge_section:
line = file.readline().strip()
if not line: # Empty string
in_edge_section = False
if debug_print:
print 'Finished parsing edges'
break
line_split = line.split('\t')
node_id_1 = int(line_split[1])
node_id_2 = int(line_split[2])
if debug_print:
print 'Adding bi-directional link between forwarding elements ' + str(node_id_1) + ' and ' + str(node_id_2)
self.links.append(Link(node_id_1, node_id_2, 1))
self.links.append(Link(node_id_2, node_id_1, 1))
file.close()
self.recalc_path_tree_map = True
def __str__(self):
return_str = '====================\nForwarding Elements:\n====================\n'
for forwarding_element in self.forwarding_elements:
return_str = return_str + str(forwarding_element) + '\n'
return_str = return_str + '======\nLinks:\n======\n'
for link in self.links:
return_str = return_str + str(link) + '\n'
return return_str
class McastGroup(object):
def __init__(self, topology, src_node_id, bandwidth_Mbps, mcast_group_index):
self.group_index = mcast_group_index
self.src_node_id = src_node_id
self.receiver_ids = Set()
self.topology = topology
self.bandwidth_Mbps = bandwidth_Mbps
self.native_mcast_tree = None
self.native_bandwidth_Mbps = None
self.aggregated_mcast_tree_index = None
self.aggregated_mcast_tree = None
self.rendevouz_point_node_id = None
self.rendevouz_point_shortest_path = None
self.aggregated_bandwidth_Mbps = None
def set_receiver_ids(self, receiver_ids):
self.receiver_ids = Set(receiver_ids)
self.native_mcast_tree = self.topology.get_shortest_path_tree(self.src_node_id, list(self.receiver_ids))
self.native_bandwidth_Mbps = len(self.native_mcast_tree) * self.bandwidth_Mbps
def generate_random_receiver_ids(self, num_receivers):
# KLUDGE: Receiver IDs will always be generated until there is at least one receiver which is not colocated with the source
# This prevents divide by 0 errors when calculating performance metrics
# TODO - AC: Find a better way to handle this situation
while len(self.receiver_ids) < num_receivers:
new_node_id = randint(0, len(topo.forwarding_elements))
if new_node_id != self.src_node_id and new_node_id not in self.receiver_ids:
self.receiver_ids.add(new_node_id)
self.native_mcast_tree = self.topology.get_shortest_path_tree(self.src_node_id, list(self.receiver_ids))
self.native_bandwidth_Mbps = len(self.native_mcast_tree) * self.bandwidth_Mbps
def jaccard_distance(self, mcast_group):
return 1.0 - (float(len(self.receiver_ids.intersection(mcast_group.receiver_ids))) / float(len(self.receiver_ids.union(mcast_group.receiver_ids))))
def debug_print(self):
print 'Multicast Group #' + str(self.group_index) + '\nSrc Node ID: ' + str(self.src_node_id) + '\nReceivers: ',
for receiver_id in self.receiver_ids:
print str(receiver_id) + ', ',
print ' '
print 'Native Mcast Tree:\n' + str(self.native_mcast_tree)
print 'Aggregated Mcast Tree Index: ' + str(self.aggregated_mcast_tree_index)
print 'Aggregated Mcast Tree:\n' + str(self.aggregated_mcast_tree)
print 'Rendevouz Point: Node #' + str(self.rendevouz_point_node_id) + '\nRendevouz Path: ' + str(self.rendevouz_point_shortest_path)
def run_multicast_aggregation_test(topo, num_groups, min_group_size, max_group_size, similarity_type, similarity_parameter, debug_print = False, plot_dendrogram = False):
# Generate random multicast groups
groups = []
for i in range(0, num_groups):
groups.append(McastGroup(topo, randint(0, len(topo.forwarding_elements)), 10, i))
groups[i].generate_random_receiver_ids(randint(min_group_size, max_group_size + 1))
#groups.append(McastGroup(topo, 0, 10, 0))
#groups[0].set_receiver_ids([6,7])
#groups.append(McastGroup(topo, 1, 10, 1))
#groups[1].set_receiver_ids([6,7])
#groups.append(McastGroup(topo, 8, 10, 2))
#groups[2].set_receiver_ids([6,7])
run_time_start = time()
if 'single' in similarity_type or 'complete' in similarity_type or 'average' in similarity_type:
groups, group_map = aggregate_groups_via_clustering(groups, similarity_type, similarity_parameter)
elif 'tree_sim' in similarity_type:
groups, group_map = aggregate_groups_via_tree_sim(topo, groups, similarity_parameter)
else:
print 'ERROR: Invalid similarity type - Supported options are "single", "average", "complete", or "tree_sim"'
sys.exit(1)
run_time = time() - run_time_start
# Calculate network performance metrics
bandwidth_overhead_ratio, flow_table_reduction_ratio, reducible_flow_table_reduction_ratio, num_trees = calc_network_performance_metrics(groups, group_map)
return bandwidth_overhead_ratio, flow_table_reduction_ratio, reducible_flow_table_reduction_ratio, num_trees, run_time
if __name__ == '__main__':
if len(sys.argv) < 5:
print 'Tree aggregation script requires the following 5 command line arguments:'
print '[1] Topology filepath (string)'
print '[2] Number of trials to run (integer)'
print '[3] Number of multicast groups (integer)'
print '[4] Group size range (string, in format "1-10"). If only a single number is specified, the minimum group size is set to 1'
print '[5] Similarity type (string): one of "single", "complete", "average", or "tree_sim"'
print '[6] Similarity parameter (float):'
print '\tFor the "single", "complete", and "average" similarity types this sets the similarity threshold to use for clustering'
print '\tFor the "tree_sim" similarity type this sets the bandwidth overhead threshold'
print
sys.exit(0)
# Import the topology from BRITE format
topo = SimTopo()
if 'abilene' in sys.argv[1]:
print 'Using hardcoded Abilene topology'
topo.load_from_edge_list([[0,1], [0,2], [1,0], [1,2], [1,3], [2,0], [2,1], [2,5], [3,1], [3,4], [4,3], [4,5], [4,10],
[5,2], [5,4], [5,6], [6,5], [6,10], [6,7], [7,6], [7,8], [8,7], [8,9], [9,8], [9,10], [10,6], [10,9], [10,4]])
else:
topo.load_from_brite_topo(sys.argv[1])
#topo.load_from_edge_list([[0,2],[1,2],[2,0],[2,1],[2,3],[3,2],[3,4],[4,3],[4,5],[5,6],[5,7], [8,0]])
#similarity_threshold = 0.5
#bandwidth_overhead_ratio, flow_table_reduction_ratio, num_clusters = run_multicast_aggregation_test(topo, similarity_threshold, 'single', True)
#sys.exit(0)
bandwidth_overhead_list = []
flow_table_reduction_list = []
reducible_flow_table_reduction_list = []
num_trees_list = []
run_time_list = []
min_group_size = 1
max_group_size = 10
group_range_split = sys.argv[4].split('-')
if len(group_range_split) == 1:
max_group_size = int(group_range_split[0])
else:
min_group_size = int(group_range_split[0])
max_group_size = int(group_range_split[1])
num_trials = int(sys.argv[2])
start_time = time()
print 'Simulations started at: ' + str(datetime.now())
for i in range(0, num_trials):
#if i % 20 == 0:
# print 'Running trial #' + str(i)
bandwidth_overhead_ratio, flow_table_reduction_ratio, reducible_flow_table_reduction_ratio, num_trees, run_time = \
run_multicast_aggregation_test(topo, int(sys.argv[3]), min_group_size, max_group_size, sys.argv[5], float(sys.argv[6]), False, False)
bandwidth_overhead_list.append(bandwidth_overhead_ratio)
flow_table_reduction_list.append(flow_table_reduction_ratio)
reducible_flow_table_reduction_list.append(reducible_flow_table_reduction_ratio)
num_trees_list.append(num_trees)
run_time_list.append(run_time)
end_time = time()
print ' '
print 'Similarity Type: ' + sys.argv[5]
print 'Similarity Threshold: ' + sys.argv[6]
print 'Average Bandwidth Overhead: ' + str(sum(bandwidth_overhead_list) / len(bandwidth_overhead_list))
print 'Average Flow Table Reduction: ' + str(sum(flow_table_reduction_list) / len(flow_table_reduction_list))
print 'Average Reducible Flow Table Reduction: ' + str(sum(reducible_flow_table_reduction_list) / len(reducible_flow_table_reduction_list))
print 'Average # Aggregated Trees: ' + str(float(sum(num_trees_list)) / len(num_trees_list))
print 'Average Tree Agg. Run-Time: ' + str(float(sum(run_time_list)) / len(run_time_list))
print 'Expected Sim Run-Time: ' + str((float(sum(run_time_list)) / len(run_time_list)) * num_trials)
print ' '
print 'Completed ' + str(num_trials) + ' trials in ' + str(end_time - start_time) + ' seconds (' + str(datetime.now()) + ')'
sys.exit() | apache-2.0 |
cwu2011/scikit-learn | sklearn/utils/tests/test_class_weight.py | 140 | 11909 | import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.datasets import make_blobs
from sklearn.utils.class_weight import compute_class_weight
from sklearn.utils.class_weight import compute_sample_weight
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns
def test_compute_class_weight():
# Test (and demo) compute_class_weight.
y = np.asarray([2, 2, 2, 3, 3, 4])
classes = np.unique(y)
cw = assert_warns(DeprecationWarning,
compute_class_weight, "auto", classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_true(cw[0] < cw[1] < cw[2])
cw = compute_class_weight("balanced", classes, y)
# total effect of samples is preserved
class_counts = np.bincount(y)[2:]
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_true(cw[0] < cw[1] < cw[2])
def test_compute_class_weight_not_present():
# Raise error when y does not contain all class labels
classes = np.arange(4)
y = np.asarray([0, 0, 0, 1, 1, 2])
assert_raises(ValueError, compute_class_weight, "auto", classes, y)
assert_raises(ValueError, compute_class_weight, "balanced", classes, y)
def test_compute_class_weight_invariance():
# Test that results with class_weight="balanced" is invariant wrt
# class imbalance if the number of samples is identical.
# The test uses a balanced two class dataset with 100 datapoints.
# It creates three versions, one where class 1 is duplicated
# resulting in 150 points of class 1 and 50 of class 0,
# one where there are 50 points in class 1 and 150 in class 0,
# and one where there are 100 points of each class (this one is balanced
# again).
# With balancing class weights, all three should give the same model.
X, y = make_blobs(centers=2, random_state=0)
# create dataset where class 1 is duplicated twice
X_1 = np.vstack([X] + [X[y == 1]] * 2)
y_1 = np.hstack([y] + [y[y == 1]] * 2)
# create dataset where class 0 is duplicated twice
X_0 = np.vstack([X] + [X[y == 0]] * 2)
y_0 = np.hstack([y] + [y[y == 0]] * 2)
# cuplicate everything
X_ = np.vstack([X] * 2)
y_ = np.hstack([y] * 2)
# results should be identical
logreg1 = LogisticRegression(class_weight="balanced").fit(X_1, y_1)
logreg0 = LogisticRegression(class_weight="balanced").fit(X_0, y_0)
logreg = LogisticRegression(class_weight="balanced").fit(X_, y_)
assert_array_almost_equal(logreg1.coef_, logreg0.coef_)
assert_array_almost_equal(logreg.coef_, logreg0.coef_)
def test_compute_class_weight_auto_negative():
# Test compute_class_weight when labels are negative
# Test with balanced class labels.
classes = np.array([-2, -1, 0])
y = np.asarray([-1, -1, 0, 0, -2, -2])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1., 1., 1.]))
cw = compute_class_weight("balanced", classes, y)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1., 1., 1.]))
# Test with unbalanced class labels.
y = np.asarray([-1, 0, 0, -2, -2, -2])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([0.545, 1.636, 0.818]), decimal=3)
cw = compute_class_weight("balanced", classes, y)
assert_equal(len(cw), len(classes))
class_counts = np.bincount(y + 2)
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_array_almost_equal(cw, [2. / 3, 2., 1.])
def test_compute_class_weight_auto_unordered():
# Test compute_class_weight when classes are unordered
classes = np.array([1, 0, 3])
y = np.asarray([1, 0, 0, 3, 3, 3])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1.636, 0.818, 0.545]), decimal=3)
cw = compute_class_weight("balanced", classes, y)
class_counts = np.bincount(y)[classes]
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_array_almost_equal(cw, [2., 1., 2. / 3])
def test_compute_sample_weight():
# Test (and demo) compute_sample_weight.
# Test with balanced classes
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with user-defined weights
sample_weight = compute_sample_weight({1: 2, 2: 1}, y)
assert_array_almost_equal(sample_weight, [2., 2., 2., 1., 1., 1.])
# Test with column vector of balanced classes
y = np.asarray([[1], [1], [1], [2], [2], [2]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with unbalanced classes
y = np.asarray([1, 1, 1, 2, 2, 2, 3])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
expected_auto = np.asarray([.6, .6, .6, .6, .6, .6, 1.8])
assert_array_almost_equal(sample_weight, expected_auto)
sample_weight = compute_sample_weight("balanced", y)
expected_balanced = np.array([0.7777, 0.7777, 0.7777, 0.7777, 0.7777, 0.7777, 2.3333])
assert_array_almost_equal(sample_weight, expected_balanced, decimal=4)
# Test with `None` weights
sample_weight = compute_sample_weight(None, y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 1.])
# Test with multi-output of balanced classes
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with multi-output with user-defined weights
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = compute_sample_weight([{1: 2, 2: 1}, {0: 1, 1: 2}], y)
assert_array_almost_equal(sample_weight, [2., 2., 2., 2., 2., 2.])
# Test with multi-output of unbalanced classes
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1], [3, -1]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, expected_auto ** 2)
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, expected_balanced ** 2, decimal=3)
def test_compute_sample_weight_with_subsample():
# Test compute_sample_weight with subsamples specified.
# Test with balanced classes and all samples present
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with column vector of balanced classes and all samples present
y = np.asarray([[1], [1], [1], [2], [2], [2]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with a subsample
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y, range(4))
assert_array_almost_equal(sample_weight, [.5, .5, .5, 1.5, 1.5, 1.5])
sample_weight = compute_sample_weight("balanced", y, range(4))
assert_array_almost_equal(sample_weight, [2. / 3, 2. / 3,
2. / 3, 2., 2., 2.])
# Test with a bootstrap subsample
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, [0, 1, 1, 2, 2, 3])
expected_auto = np.asarray([1 / 3., 1 / 3., 1 / 3., 5 / 3., 5 / 3., 5 / 3.])
assert_array_almost_equal(sample_weight, expected_auto)
sample_weight = compute_sample_weight("balanced", y, [0, 1, 1, 2, 2, 3])
expected_balanced = np.asarray([0.6, 0.6, 0.6, 3., 3., 3.])
assert_array_almost_equal(sample_weight, expected_balanced)
# Test with a bootstrap subsample for multi-output
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, [0, 1, 1, 2, 2, 3])
assert_array_almost_equal(sample_weight, expected_auto ** 2)
sample_weight = compute_sample_weight("balanced", y, [0, 1, 1, 2, 2, 3])
assert_array_almost_equal(sample_weight, expected_balanced ** 2)
# Test with a missing class
y = np.asarray([1, 1, 1, 2, 2, 2, 3])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
# Test with a missing class for multi-output
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1], [2, 2]])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
def test_compute_sample_weight_errors():
# Test compute_sample_weight raises errors expected.
# Invalid preset string
y = np.asarray([1, 1, 1, 2, 2, 2])
y_ = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
assert_raises(ValueError, compute_sample_weight, "ni", y)
assert_raises(ValueError, compute_sample_weight, "ni", y, range(4))
assert_raises(ValueError, compute_sample_weight, "ni", y_)
assert_raises(ValueError, compute_sample_weight, "ni", y_, range(4))
# Not "auto" for subsample
assert_raises(ValueError,
compute_sample_weight, {1: 2, 2: 1}, y, range(4))
# Not a list or preset for multi-output
assert_raises(ValueError, compute_sample_weight, {1: 2, 2: 1}, y_)
# Incorrect length list for multi-output
assert_raises(ValueError, compute_sample_weight, [{1: 2, 2: 1}], y_)
| bsd-3-clause |
karstenw/nodebox-pyobjc | examples/Extended Application/matplotlib/examples/api/joinstyle.py | 1 | 1625 | """
===========
Join styles
===========
Illustrate the three different join styles
"""
import numpy as np
import matplotlib.pyplot as plt
# nodebox section
if __name__ == '__builtin__':
# were in nodebox
import os
import tempfile
W = 800
inset = 20
size(W, 600)
plt.cla()
plt.clf()
plt.close('all')
def tempimage():
fob = tempfile.NamedTemporaryFile(mode='w+b', suffix='.png', delete=False)
fname = fob.name
fob.close()
return fname
imgx = 20
imgy = 0
def pltshow(plt, dpi=150):
global imgx, imgy
temppath = tempimage()
plt.savefig(temppath, dpi=dpi)
dx,dy = imagesize(temppath)
w = min(W,dx)
image(temppath,imgx,imgy,width=w)
imgy = imgy + dy + 20
os.remove(temppath)
size(W, HEIGHT+dy+40)
else:
def pltshow(mplpyplot):
mplpyplot.show()
# nodebox section end
def plot_angle(ax, x, y, angle, style):
phi = np.radians(angle)
xx = [x + .5, x, x + .5*np.cos(phi)]
yy = [y, y, y + .5*np.sin(phi)]
ax.plot(xx, yy, lw=8, color='blue', solid_joinstyle=style)
ax.plot(xx[1:], yy[1:], lw=1, color='black')
ax.plot(xx[1::-1], yy[1::-1], lw=1, color='black')
ax.plot(xx[1:2], yy[1:2], 'o', color='red', markersize=3)
ax.text(x, y + .2, '%.0f degrees' % angle)
fig, ax = plt.subplots()
ax.set_title('Join style')
for x, style in enumerate((('miter', 'round', 'bevel'))):
ax.text(x, 5, style)
for i in range(5):
plot_angle(ax, x, i, pow(2.0, 3 + i), style)
ax.set_xlim(-.5, 2.75)
ax.set_ylim(-.5, 5.5)
pltshow(plt)
| mit |
manipopopo/tensorflow | tensorflow/examples/tutorials/input_fn/boston.py | 76 | 2920 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DNNRegressor with custom input_fn for Housing dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import pandas as pd
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.INFO)
COLUMNS = ["crim", "zn", "indus", "nox", "rm", "age",
"dis", "tax", "ptratio", "medv"]
FEATURES = ["crim", "zn", "indus", "nox", "rm",
"age", "dis", "tax", "ptratio"]
LABEL = "medv"
def get_input_fn(data_set, num_epochs=None, shuffle=True):
return tf.estimator.inputs.pandas_input_fn(
x=pd.DataFrame({k: data_set[k].values for k in FEATURES}),
y=pd.Series(data_set[LABEL].values),
num_epochs=num_epochs,
shuffle=shuffle)
def main(unused_argv):
# Load datasets
training_set = pd.read_csv("boston_train.csv", skipinitialspace=True,
skiprows=1, names=COLUMNS)
test_set = pd.read_csv("boston_test.csv", skipinitialspace=True,
skiprows=1, names=COLUMNS)
# Set of 6 examples for which to predict median house values
prediction_set = pd.read_csv("boston_predict.csv", skipinitialspace=True,
skiprows=1, names=COLUMNS)
# Feature cols
feature_cols = [tf.feature_column.numeric_column(k) for k in FEATURES]
# Build 2 layer fully connected DNN with 10, 10 units respectively.
regressor = tf.estimator.DNNRegressor(feature_columns=feature_cols,
hidden_units=[10, 10],
model_dir="/tmp/boston_model")
# Train
regressor.train(input_fn=get_input_fn(training_set), steps=5000)
# Evaluate loss over one epoch of test_set.
ev = regressor.evaluate(
input_fn=get_input_fn(test_set, num_epochs=1, shuffle=False))
loss_score = ev["loss"]
print("Loss: {0:f}".format(loss_score))
# Print out predictions over a slice of prediction_set.
y = regressor.predict(
input_fn=get_input_fn(prediction_set, num_epochs=1, shuffle=False))
# .predict() returns an iterator of dicts; convert to a list and print
# predictions
predictions = list(p["predictions"] for p in itertools.islice(y, 6))
print("Predictions: {}".format(str(predictions)))
if __name__ == "__main__":
tf.app.run()
| apache-2.0 |
chrsrds/scikit-learn | sklearn/decomposition/tests/test_sparse_pca.py | 2 | 7777 | # Author: Vlad Niculae
# License: BSD 3 clause
import sys
import pytest
import numpy as np
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_allclose
from sklearn.utils.testing import if_safe_multiprocessing_with_blas
from sklearn.decomposition import SparsePCA, MiniBatchSparsePCA, PCA
from sklearn.utils import check_random_state
def generate_toy_data(n_components, n_samples, image_size, random_state=None):
n_features = image_size[0] * image_size[1]
rng = check_random_state(random_state)
U = rng.randn(n_samples, n_components)
V = rng.randn(n_components, n_features)
centers = [(3, 3), (6, 7), (8, 1)]
sz = [1, 2, 1]
for k in range(n_components):
img = np.zeros(image_size)
xmin, xmax = centers[k][0] - sz[k], centers[k][0] + sz[k]
ymin, ymax = centers[k][1] - sz[k], centers[k][1] + sz[k]
img[xmin:xmax][:, ymin:ymax] = 1.0
V[k, :] = img.ravel()
# Y is defined by : Y = UV + noise
Y = np.dot(U, V)
Y += 0.1 * rng.randn(Y.shape[0], Y.shape[1]) # Add noise
return Y, U, V
# SparsePCA can be a bit slow. To avoid having test times go up, we
# test different aspects of the code in the same test
def test_correct_shapes():
rng = np.random.RandomState(0)
X = rng.randn(12, 10)
spca = SparsePCA(n_components=8, random_state=rng)
U = spca.fit_transform(X)
assert spca.components_.shape == (8, 10)
assert U.shape == (12, 8)
# test overcomplete decomposition
spca = SparsePCA(n_components=13, random_state=rng)
U = spca.fit_transform(X)
assert spca.components_.shape == (13, 10)
assert U.shape == (12, 13)
def test_fit_transform():
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = SparsePCA(n_components=3, method='lars', alpha=alpha,
random_state=0)
spca_lars.fit(Y)
# Test that CD gives similar results
spca_lasso = SparsePCA(n_components=3, method='cd', random_state=0,
alpha=alpha)
spca_lasso.fit(Y)
assert_array_almost_equal(spca_lasso.components_, spca_lars.components_)
@if_safe_multiprocessing_with_blas
def test_fit_transform_parallel():
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = SparsePCA(n_components=3, method='lars', alpha=alpha,
random_state=0)
spca_lars.fit(Y)
U1 = spca_lars.transform(Y)
# Test multiple CPUs
spca = SparsePCA(n_components=3, n_jobs=2, method='lars', alpha=alpha,
random_state=0).fit(Y)
U2 = spca.transform(Y)
assert not np.all(spca_lars.components_ == 0)
assert_array_almost_equal(U1, U2)
def test_transform_nan():
# Test that SparsePCA won't return NaN when there is 0 feature in all
# samples.
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
Y[:, 0] = 0
estimator = SparsePCA(n_components=8)
assert not np.any(np.isnan(estimator.fit_transform(Y)))
def test_fit_transform_tall():
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 65, (8, 8), random_state=rng) # tall array
spca_lars = SparsePCA(n_components=3, method='lars', random_state=rng)
U1 = spca_lars.fit_transform(Y)
spca_lasso = SparsePCA(n_components=3, method='cd', random_state=rng)
U2 = spca_lasso.fit(Y).transform(Y)
assert_array_almost_equal(U1, U2)
def test_initialization():
rng = np.random.RandomState(0)
U_init = rng.randn(5, 3)
V_init = rng.randn(3, 4)
model = SparsePCA(n_components=3, U_init=U_init, V_init=V_init, max_iter=0,
random_state=rng)
model.fit(rng.randn(5, 4))
assert_allclose(model.components_,
V_init / np.linalg.norm(V_init, axis=1)[:, None])
def test_mini_batch_correct_shapes():
rng = np.random.RandomState(0)
X = rng.randn(12, 10)
pca = MiniBatchSparsePCA(n_components=8, random_state=rng)
U = pca.fit_transform(X)
assert pca.components_.shape == (8, 10)
assert U.shape == (12, 8)
# test overcomplete decomposition
pca = MiniBatchSparsePCA(n_components=13, random_state=rng)
U = pca.fit_transform(X)
assert pca.components_.shape == (13, 10)
assert U.shape == (12, 13)
# XXX: test always skipped
@pytest.mark.skipif(True, reason="skipping mini_batch_fit_transform.")
def test_mini_batch_fit_transform():
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = MiniBatchSparsePCA(n_components=3, random_state=0,
alpha=alpha).fit(Y)
U1 = spca_lars.transform(Y)
# Test multiple CPUs
if sys.platform == 'win32': # fake parallelism for win32
import joblib
_mp = joblib.parallel.multiprocessing
joblib.parallel.multiprocessing = None
try:
spca = MiniBatchSparsePCA(n_components=3, n_jobs=2, alpha=alpha,
random_state=0)
U2 = spca.fit(Y).transform(Y)
finally:
joblib.parallel.multiprocessing = _mp
else: # we can efficiently use parallelism
spca = MiniBatchSparsePCA(n_components=3, n_jobs=2, alpha=alpha,
random_state=0)
U2 = spca.fit(Y).transform(Y)
assert not np.all(spca_lars.components_ == 0)
assert_array_almost_equal(U1, U2)
# Test that CD gives similar results
spca_lasso = MiniBatchSparsePCA(n_components=3, method='cd', alpha=alpha,
random_state=0).fit(Y)
assert_array_almost_equal(spca_lasso.components_, spca_lars.components_)
def test_scaling_fit_transform():
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 1000, (8, 8), random_state=rng)
spca_lars = SparsePCA(n_components=3, method='lars', alpha=alpha,
random_state=rng)
results_train = spca_lars.fit_transform(Y)
results_test = spca_lars.transform(Y[:10])
assert_allclose(results_train[0], results_test[0])
def test_pca_vs_spca():
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 1000, (8, 8), random_state=rng)
Z, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng)
spca = SparsePCA(alpha=0, ridge_alpha=0, n_components=2)
pca = PCA(n_components=2)
pca.fit(Y)
spca.fit(Y)
results_test_pca = pca.transform(Z)
results_test_spca = spca.transform(Z)
assert_allclose(np.abs(spca.components_.dot(pca.components_.T)),
np.eye(2), atol=1e-5)
results_test_pca *= np.sign(results_test_pca[0, :])
results_test_spca *= np.sign(results_test_spca[0, :])
assert_allclose(results_test_pca, results_test_spca)
@pytest.mark.parametrize("spca", [SparsePCA, MiniBatchSparsePCA])
def test_spca_deprecation_warning(spca):
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng)
warn_msg = "'normalize_components' has been deprecated in 0.22"
with pytest.warns(DeprecationWarning, match=warn_msg):
spca(normalize_components=True).fit(Y)
@pytest.mark.parametrize("spca", [SparsePCA, MiniBatchSparsePCA])
def test_spca_error_unormalized_components(spca):
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng)
err_msg = "normalize_components=False is not supported starting "
with pytest.raises(NotImplementedError, match=err_msg):
spca(normalize_components=False).fit(Y)
| bsd-3-clause |
shujaatak/UAV_MissionPlanner | Lib/site-packages/numpy/lib/npyio.py | 53 | 59490 | __all__ = ['savetxt', 'loadtxt', 'genfromtxt', 'ndfromtxt', 'mafromtxt',
'recfromtxt', 'recfromcsv', 'load', 'loads', 'save', 'savez',
'savez_compressed', 'packbits', 'unpackbits', 'fromregex', 'DataSource']
import numpy as np
import format
import sys
import os
import sys
import itertools
import warnings
from operator import itemgetter
from cPickle import load as _cload, loads
from _datasource import DataSource
if sys.platform != 'cli':
from _compiled_base import packbits, unpackbits
else:
def packbits(*args, **kw):
raise NotImplementedError()
def unpackbits(*args, **kw):
raise NotImplementedError()
from _iotools import LineSplitter, NameValidator, StringConverter, \
ConverterError, ConverterLockError, ConversionWarning, \
_is_string_like, has_nested_fields, flatten_dtype, \
easy_dtype, _bytes_to_name
from numpy.compat import asbytes, asstr, asbytes_nested, bytes
if sys.version_info[0] >= 3:
from io import BytesIO
else:
from cStringIO import StringIO as BytesIO
_string_like = _is_string_like
def seek_gzip_factory(f):
"""Use this factory to produce the class so that we can do a lazy
import on gzip.
"""
import gzip
class GzipFile(gzip.GzipFile):
def seek(self, offset, whence=0):
# figure out new position (we can only seek forwards)
if whence == 1:
offset = self.offset + offset
if whence not in [0, 1]:
raise IOError, "Illegal argument"
if offset < self.offset:
# for negative seek, rewind and do positive seek
self.rewind()
count = offset - self.offset
for i in range(count // 1024):
self.read(1024)
self.read(count % 1024)
def tell(self):
return self.offset
if isinstance(f, str):
f = GzipFile(f)
elif isinstance(f, gzip.GzipFile):
# cast to our GzipFile if its already a gzip.GzipFile
g = GzipFile(fileobj=f.fileobj)
g.name = f.name
g.mode = f.mode
f = g
return f
class BagObj(object):
"""
BagObj(obj)
Convert attribute look-ups to getitems on the object passed in.
Parameters
----------
obj : class instance
Object on which attribute look-up is performed.
Examples
--------
>>> from numpy.lib.npyio import BagObj as BO
>>> class BagDemo(object):
... def __getitem__(self, key): # An instance of BagObj(BagDemo)
... # will call this method when any
... # attribute look-up is required
... result = "Doesn't matter what you want, "
... return result + "you're gonna get this"
...
>>> demo_obj = BagDemo()
>>> bagobj = BO(demo_obj)
>>> bagobj.hello_there
"Doesn't matter what you want, you're gonna get this"
>>> bagobj.I_can_be_anything
"Doesn't matter what you want, you're gonna get this"
"""
def __init__(self, obj):
self._obj = obj
def __getattribute__(self, key):
try:
return object.__getattribute__(self, '_obj')[key]
except KeyError:
raise AttributeError, key
def zipfile_factory(*args, **kwargs):
import zipfile
if sys.version_info >= (2, 5):
kwargs['allowZip64'] = True
return zipfile.ZipFile(*args, **kwargs)
class NpzFile(object):
"""
NpzFile(fid)
A dictionary-like object with lazy-loading of files in the zipped
archive provided on construction.
`NpzFile` is used to load files in the NumPy ``.npz`` data archive
format. It assumes that files in the archive have a ".npy" extension,
other files are ignored.
The arrays and file strings are lazily loaded on either
getitem access using ``obj['key']`` or attribute lookup using
``obj.f.key``. A list of all files (without ".npy" extensions) can
be obtained with ``obj.files`` and the ZipFile object itself using
``obj.zip``.
Attributes
----------
files : list of str
List of all files in the archive with a ".npy" extension.
zip : ZipFile instance
The ZipFile object initialized with the zipped archive.
f : BagObj instance
An object on which attribute can be performed as an alternative
to getitem access on the `NpzFile` instance itself.
Parameters
----------
fid : file or str
The zipped archive to open. This is either a file-like object
or a string containing the path to the archive.
own_fid : bool, optional
Whether NpzFile should close the file handle.
Requires that `fid` is a file-like object.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> y = np.sin(x)
>>> np.savez(outfile, x=x, y=y)
>>> outfile.seek(0)
>>> npz = np.load(outfile)
>>> isinstance(npz, np.lib.io.NpzFile)
True
>>> npz.files
['y', 'x']
>>> npz['x'] # getitem access
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> npz.f.x # attribute lookup
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
def __init__(self, fid, own_fid=False):
# Import is postponed to here since zipfile depends on gzip, an optional
# component of the so-called standard library.
_zip = zipfile_factory(fid)
self._files = _zip.namelist()
self.files = []
for x in self._files:
if x.endswith('.npy'):
self.files.append(x[:-4])
else:
self.files.append(x)
self.zip = _zip
self.f = BagObj(self)
if own_fid:
self.fid = fid
else:
self.fid = None
def close(self):
"""
Close the file.
"""
if self.zip is not None:
self.zip.close()
self.zip = None
if self.fid is not None:
self.fid.close()
self.fid = None
def __del__(self):
self.close()
def __getitem__(self, key):
# FIXME: This seems like it will copy strings around
# more than is strictly necessary. The zipfile
# will read the string and then
# the format.read_array will copy the string
# to another place in memory.
# It would be better if the zipfile could read
# (or at least uncompress) the data
# directly into the array memory.
member = 0
if key in self._files:
member = 1
elif key in self.files:
member = 1
key += '.npy'
if member:
bytes = self.zip.read(key)
if bytes.startswith(format.MAGIC_PREFIX):
value = BytesIO(bytes)
return format.read_array(value)
else:
return bytes
else:
raise KeyError, "%s is not a file in the archive" % key
def __iter__(self):
return iter(self.files)
def items(self):
"""
Return a list of tuples, with each tuple (filename, array in file).
"""
return [(f, self[f]) for f in self.files]
def iteritems(self):
"""Generator that returns tuples (filename, array in file)."""
for f in self.files:
yield (f, self[f])
def keys(self):
"""Return files in the archive with a ".npy" extension."""
return self.files
def iterkeys(self):
"""Return an iterator over the files in the archive."""
return self.__iter__()
def __contains__(self, key):
return self.files.__contains__(key)
def load(file, mmap_mode=None):
"""
Load a pickled, ``.npy``, or ``.npz`` binary file.
Parameters
----------
file : file-like object or string
The file to read. It must support ``seek()`` and ``read()`` methods.
If the filename extension is ``.gz``, the file is first decompressed.
mmap_mode: {None, 'r+', 'r', 'w+', 'c'}, optional
If not None, then memory-map the file, using the given mode
(see `numpy.memmap`). The mode has no effect for pickled or
zipped files.
A memory-mapped array is stored on disk, and not directly loaded
into memory. However, it can be accessed and sliced like any
ndarray. Memory mapping is especially useful for accessing
small fragments of large files without reading the entire file
into memory.
Returns
-------
result : array, tuple, dict, etc.
Data stored in the file.
Raises
------
IOError
If the input file does not exist or cannot be read.
See Also
--------
save, savez, loadtxt
memmap : Create a memory-map to an array stored in a file on disk.
Notes
-----
- If the file contains pickle data, then whatever is stored in the
pickle is returned.
- If the file is a ``.npy`` file, then an array is returned.
- If the file is a ``.npz`` file, then a dictionary-like object is
returned, containing ``{filename: array}`` key-value pairs, one for
each file in the archive.
Examples
--------
Store data to disk, and load it again:
>>> np.save('/tmp/123', np.array([[1, 2, 3], [4, 5, 6]]))
>>> np.load('/tmp/123.npy')
array([[1, 2, 3],
[4, 5, 6]])
Mem-map the stored array, and then access the second row
directly from disk:
>>> X = np.load('/tmp/123.npy', mmap_mode='r')
>>> X[1, :]
memmap([4, 5, 6])
"""
import gzip
own_fid = False
if isinstance(file, basestring):
fid = open(file, "rb")
own_fid = True
elif isinstance(file, gzip.GzipFile):
fid = seek_gzip_factory(file)
own_fid = True
else:
fid = file
try:
# Code to distinguish from NumPy binary files and pickles.
_ZIP_PREFIX = asbytes('PK\x03\x04')
N = len(format.MAGIC_PREFIX)
magic = fid.read(N)
fid.seek(-N, 1) # back-up
if magic.startswith(_ZIP_PREFIX): # zip-file (assume .npz)
own_fid = False
return NpzFile(fid, own_fid=True)
elif magic == format.MAGIC_PREFIX: # .npy file
if mmap_mode:
return format.open_memmap(file, mode=mmap_mode)
else:
return format.read_array(fid)
else: # Try a pickle
try:
return _cload(fid)
except:
raise IOError, \
"Failed to interpret file %s as a pickle" % repr(file)
finally:
if own_fid:
fid.close()
def save(file, arr):
"""
Save an array to a binary file in NumPy ``.npy`` format.
Parameters
----------
file : file or str
File or filename to which the data is saved. If file is a file-object,
then the filename is unchanged. If file is a string, a ``.npy``
extension will be appended to the file name if it does not already
have one.
arr : array_like
Array data to be saved.
See Also
--------
savez : Save several arrays into a ``.npz`` archive
savetxt, load
Notes
-----
For a description of the ``.npy`` format, see `format`.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> np.save(outfile, x)
>>> outfile.seek(0) # Only needed here to simulate closing & reopening file
>>> np.load(outfile)
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
own_fid = False
if isinstance(file, basestring):
if not file.endswith('.npy'):
file = file + '.npy'
fid = open(file, "wb")
own_fid = True
else:
fid = file
try:
arr = np.asanyarray(arr)
format.write_array(fid, arr)
finally:
if own_fid:
fid.close()
def savez(file, *args, **kwds):
"""
Save several arrays into a single file in uncompressed ``.npz`` format.
If arguments are passed in with no keywords, the corresponding variable
names, in the .npz file, are 'arr_0', 'arr_1', etc. If keyword arguments
are given, the corresponding variable names, in the ``.npz`` file will
match the keyword names.
Parameters
----------
file : str or file
Either the file name (string) or an open file (file-like object)
where the data will be saved. If file is a string, the ``.npz``
extension will be appended to the file name if it is not already there.
*args : Arguments, optional
Arrays to save to the file. Since it is not possible for Python to
know the names of the arrays outside `savez`, the arrays will be saved
with names "arr_0", "arr_1", and so on. These arguments can be any
expression.
**kwds : Keyword arguments, optional
Arrays to save to the file. Arrays will be saved in the file with the
keyword names.
Returns
-------
None
See Also
--------
save : Save a single array to a binary file in NumPy format.
savetxt : Save an array to a file as plain text.
Notes
-----
The ``.npz`` file format is a zipped archive of files named after the
variables they contain. The archive is not compressed and each file
in the archive contains one variable in ``.npy`` format. For a
description of the ``.npy`` format, see `format`.
When opening the saved ``.npz`` file with `load` a `NpzFile` object is
returned. This is a dictionary-like object which can be queried for
its list of arrays (with the ``.files`` attribute), and for the arrays
themselves.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> y = np.sin(x)
Using `savez` with *args, the arrays are saved with default names.
>>> np.savez(outfile, x, y)
>>> outfile.seek(0) # Only needed here to simulate closing & reopening file
>>> npzfile = np.load(outfile)
>>> npzfile.files
['arr_1', 'arr_0']
>>> npzfile['arr_0']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
Using `savez` with **kwds, the arrays are saved with the keyword names.
>>> outfile = TemporaryFile()
>>> np.savez(outfile, x=x, y=y)
>>> outfile.seek(0)
>>> npzfile = np.load(outfile)
>>> npzfile.files
['y', 'x']
>>> npzfile['x']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
See Also
--------
numpy.savez_compressed : Save several arrays into a compressed .npz file format
"""
_savez(file, args, kwds, False)
def savez_compressed(file, *args, **kwds):
"""
Save several arrays into a single file in compressed ``.npz`` format.
If keyword arguments are given, then filenames are taken from the keywords.
If arguments are passed in with no keywords, then stored file names are
arr_0, arr_1, etc.
Parameters
----------
file : string
File name of .npz file.
args : Arguments
Function arguments.
kwds : Keyword arguments
Keywords.
See Also
--------
numpy.savez : Save several arrays into an uncompressed .npz file format
"""
_savez(file, args, kwds, True)
def _savez(file, args, kwds, compress):
# Import is postponed to here since zipfile depends on gzip, an optional
# component of the so-called standard library.
import zipfile
# Import deferred for startup time improvement
import tempfile
if isinstance(file, basestring):
if not file.endswith('.npz'):
file = file + '.npz'
namedict = kwds
for i, val in enumerate(args):
key = 'arr_%d' % i
if key in namedict.keys():
raise ValueError, "Cannot use un-named variables and keyword %s" % key
namedict[key] = val
if compress:
compression = zipfile.ZIP_DEFLATED
else:
compression = zipfile.ZIP_STORED
zip = zipfile_factory(file, mode="w", compression=compression)
# Stage arrays in a temporary file on disk, before writing to zip.
fd, tmpfile = tempfile.mkstemp(suffix='-numpy.npy')
os.close(fd)
try:
for key, val in namedict.iteritems():
fname = key + '.npy'
fid = open(tmpfile, 'wb')
try:
format.write_array(fid, np.asanyarray(val))
fid.close()
fid = None
zip.write(tmpfile, arcname=fname)
finally:
if fid:
fid.close()
finally:
os.remove(tmpfile)
zip.close()
# Adapted from matplotlib
def _getconv(dtype):
typ = dtype.type
if issubclass(typ, np.bool_):
return lambda x: bool(int(x))
if issubclass(typ, np.integer):
return lambda x: int(float(x))
elif issubclass(typ, np.floating):
return float
elif issubclass(typ, np.complex):
return complex
elif issubclass(typ, np.bytes_):
return bytes
else:
return str
def loadtxt(fname, dtype=float, comments='#', delimiter=None,
converters=None, skiprows=0, usecols=None, unpack=False):
"""
Load data from a text file.
Each row in the text file must have the same number of values.
Parameters
----------
fname : file or str
File or filename to read. If the filename extension is ``.gz`` or
``.bz2``, the file is first decompressed.
dtype : data-type, optional
Data-type of the resulting array; default: float. If this is a record
data-type, the resulting array will be 1-dimensional, and each row
will be interpreted as an element of the array. In this case, the
number of columns used must match the number of fields in the
data-type.
comments : str, optional
The character used to indicate the start of a comment; default: '#'.
delimiter : str, optional
The string used to separate values. By default, this is any
whitespace.
converters : dict, optional
A dictionary mapping column number to a function that will convert
that column to a float. E.g., if column 0 is a date string:
``converters = {0: datestr2num}``. Converters can also be used to
provide a default value for missing data:
``converters = {3: lambda s: float(s or 0)}``. Default: None.
skiprows : int, optional
Skip the first `skiprows` lines; default: 0.
usecols : sequence, optional
Which columns to read, with 0 being the first. For example,
``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns.
The default, None, results in all columns being read.
unpack : bool, optional
If True, the returned array is transposed, so that arguments may be
unpacked using ``x, y, z = loadtxt(...)``. The default is False.
Returns
-------
out : ndarray
Data read from the text file.
See Also
--------
load, fromstring, fromregex
genfromtxt : Load data with missing values handled as specified.
scipy.io.loadmat : reads MATLAB data files
Notes
-----
This function aims to be a fast reader for simply formatted files. The
`genfromtxt` function provides more sophisticated handling of, e.g.,
lines with missing values.
Examples
--------
>>> from StringIO import StringIO # StringIO behaves like a file object
>>> c = StringIO("0 1\\n2 3")
>>> np.loadtxt(c)
array([[ 0., 1.],
[ 2., 3.]])
>>> d = StringIO("M 21 72\\nF 35 58")
>>> np.loadtxt(d, dtype={'names': ('gender', 'age', 'weight'),
... 'formats': ('S1', 'i4', 'f4')})
array([('M', 21, 72.0), ('F', 35, 58.0)],
dtype=[('gender', '|S1'), ('age', '<i4'), ('weight', '<f4')])
>>> c = StringIO("1,0,2\\n3,0,4")
>>> x, y = np.loadtxt(c, delimiter=',', usecols=(0, 2), unpack=True)
>>> x
array([ 1., 3.])
>>> y
array([ 2., 4.])
"""
# Type conversions for Py3 convenience
comments = asbytes(comments)
if delimiter is not None:
delimiter = asbytes(delimiter)
user_converters = converters
if usecols is not None:
usecols = list(usecols)
own_fh = False
if _is_string_like(fname):
own_fh = True
if fname.endswith('.gz'):
fh = seek_gzip_factory(fname)
elif fname.endswith('.bz2'):
import bz2
fh = bz2.BZ2File(fname)
else:
fh = open(fname, 'U')
elif hasattr(fname, 'readline'):
fh = fname
else:
raise ValueError('fname must be a string or file handle')
X = []
def flatten_dtype(dt):
"""Unpack a structured data-type."""
if dt.names is None:
# If the dtype is flattened, return.
# If the dtype has a shape, the dtype occurs
# in the list more than once.
return [dt.base] * int(np.prod(dt.shape))
else:
types = []
for field in dt.names:
tp, bytes = dt.fields[field]
flat_dt = flatten_dtype(tp)
types.extend(flat_dt)
return types
def split_line(line):
"""Chop off comments, strip, and split at delimiter."""
line = asbytes(line).split(comments)[0].strip()
if line:
return line.split(delimiter)
else:
return []
try:
# Make sure we're dealing with a proper dtype
dtype = np.dtype(dtype)
defconv = _getconv(dtype)
# Skip the first `skiprows` lines
for i in xrange(skiprows):
fh.readline()
# Read until we find a line with some values, and use
# it to estimate the number of columns, N.
first_vals = None
while not first_vals:
first_line = fh.readline()
if not first_line: # EOF reached
raise IOError('End-of-file reached before encountering data.')
first_vals = split_line(first_line)
N = len(usecols or first_vals)
dtype_types = flatten_dtype(dtype)
if len(dtype_types) > 1:
# We're dealing with a structured array, each field of
# the dtype matches a column
converters = [_getconv(dt) for dt in dtype_types]
else:
# All fields have the same dtype
converters = [defconv for i in xrange(N)]
# By preference, use the converters specified by the user
for i, conv in (user_converters or {}).iteritems():
if usecols:
try:
i = usecols.index(i)
except ValueError:
# Unused converter specified
continue
converters[i] = conv
# Parse each line, including the first
for i, line in enumerate(itertools.chain([first_line], fh)):
vals = split_line(line)
if len(vals) == 0:
continue
if usecols:
vals = [vals[i] for i in usecols]
# Convert each value according to its column and store
X.append(tuple([conv(val) for (conv, val) in zip(converters, vals)]))
finally:
if own_fh:
fh.close()
if len(dtype_types) > 1:
# We're dealing with a structured array, with a dtype such as
# [('x', int), ('y', [('s', int), ('t', float)])]
#
# First, create the array using a flattened dtype:
# [('x', int), ('s', int), ('t', float)]
#
# Then, view the array using the specified dtype.
try:
X = np.array(X, dtype=np.dtype([('', t) for t in dtype_types]))
X = X.view(dtype)
except TypeError:
# In the case we have an object dtype
X = np.array(X, dtype=dtype)
else:
X = np.array(X, dtype)
X = np.squeeze(X)
if unpack:
return X.T
else:
return X
def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n'):
"""
Save an array to a text file.
Parameters
----------
fname : filename or file handle
If the filename ends in ``.gz``, the file is automatically saved in
compressed gzip format. `loadtxt` understands gzipped files
transparently.
X : array_like
Data to be saved to a text file.
fmt : str or sequence of strs
A single format (%10.5f), a sequence of formats, or a
multi-format string, e.g. 'Iteration %d -- %10.5f', in which
case `delimiter` is ignored.
delimiter : str
Character separating columns.
newline : str
.. versionadded:: 1.5.0
Character separating lines.
See Also
--------
save : Save an array to a binary file in NumPy ``.npy`` format
savez : Save several arrays into a ``.npz`` compressed archive
Notes
-----
Further explanation of the `fmt` parameter
(``%[flag]width[.precision]specifier``):
flags:
``-`` : left justify
``+`` : Forces to preceed result with + or -.
``0`` : Left pad the number with zeros instead of space (see width).
width:
Minimum number of characters to be printed. The value is not truncated
if it has more characters.
precision:
- For integer specifiers (eg. ``d,i,o,x``), the minimum number of
digits.
- For ``e, E`` and ``f`` specifiers, the number of digits to print
after the decimal point.
- For ``g`` and ``G``, the maximum number of significant digits.
- For ``s``, the maximum number of characters.
specifiers:
``c`` : character
``d`` or ``i`` : signed decimal integer
``e`` or ``E`` : scientific notation with ``e`` or ``E``.
``f`` : decimal floating point
``g,G`` : use the shorter of ``e,E`` or ``f``
``o`` : signed octal
``s`` : string of characters
``u`` : unsigned decimal integer
``x,X`` : unsigned hexadecimal integer
This explanation of ``fmt`` is not complete, for an exhaustive
specification see [1]_.
References
----------
.. [1] `Format Specification Mini-Language
<http://docs.python.org/library/string.html#
format-specification-mini-language>`_, Python Documentation.
Examples
--------
>>> x = y = z = np.arange(0.0,5.0,1.0)
>>> np.savetxt('test.out', x, delimiter=',') # X is an array
>>> np.savetxt('test.out', (x,y,z)) # x,y,z equal sized 1D arrays
>>> np.savetxt('test.out', x, fmt='%1.4e') # use exponential notation
"""
# Py3 conversions first
if isinstance(fmt, bytes):
fmt = asstr(fmt)
delimiter = asstr(delimiter)
own_fh = False
if _is_string_like(fname):
own_fh = True
if fname.endswith('.gz'):
import gzip
fh = gzip.open(fname, 'wb')
else:
if sys.version_info[0] >= 3:
fh = open(fname, 'wb')
else:
fh = open(fname, 'w')
elif hasattr(fname, 'seek'):
fh = fname
else:
raise ValueError('fname must be a string or file handle')
try:
X = np.asarray(X)
# Handle 1-dimensional arrays
if X.ndim == 1:
# Common case -- 1d array of numbers
if X.dtype.names is None:
X = np.atleast_2d(X).T
ncol = 1
# Complex dtype -- each field indicates a separate column
else:
ncol = len(X.dtype.descr)
else:
ncol = X.shape[1]
# `fmt` can be a string with multiple insertion points or a
# list of formats. E.g. '%10.5f\t%10d' or ('%10.5f', '$10d')
if type(fmt) in (list, tuple):
if len(fmt) != ncol:
raise AttributeError('fmt has wrong shape. %s' % str(fmt))
format = asstr(delimiter).join(map(asstr, fmt))
elif type(fmt) is str:
if fmt.count('%') == 1:
fmt = [fmt, ]*ncol
format = delimiter.join(fmt)
elif fmt.count('%') != ncol:
raise AttributeError('fmt has wrong number of %% formats. %s'
% fmt)
else:
format = fmt
for row in X:
fh.write(asbytes(format % tuple(row) + newline))
finally:
if own_fh:
fh.close()
import re
def fromregex(file, regexp, dtype):
"""
Construct an array from a text file, using regular expression parsing.
The returned array is always a structured array, and is constructed from
all matches of the regular expression in the file. Groups in the regular
expression are converted to fields of the structured array.
Parameters
----------
file : str or file
File name or file object to read.
regexp : str or regexp
Regular expression used to parse the file.
Groups in the regular expression correspond to fields in the dtype.
dtype : dtype or list of dtypes
Dtype for the structured array.
Returns
-------
output : ndarray
The output array, containing the part of the content of `file` that
was matched by `regexp`. `output` is always a structured array.
Raises
------
TypeError
When `dtype` is not a valid dtype for a structured array.
See Also
--------
fromstring, loadtxt
Notes
-----
Dtypes for structured arrays can be specified in several forms, but all
forms specify at least the data type and field name. For details see
`doc.structured_arrays`.
Examples
--------
>>> f = open('test.dat', 'w')
>>> f.write("1312 foo\\n1534 bar\\n444 qux")
>>> f.close()
>>> regexp = r"(\\d+)\\s+(...)" # match [digits, whitespace, anything]
>>> output = np.fromregex('test.dat', regexp,
... [('num', np.int64), ('key', 'S3')])
>>> output
array([(1312L, 'foo'), (1534L, 'bar'), (444L, 'qux')],
dtype=[('num', '<i8'), ('key', '|S3')])
>>> output['num']
array([1312, 1534, 444], dtype=int64)
"""
own_fh = False
if not hasattr(file, "read"):
file = open(file, 'rb')
own_fh = True
try:
if not hasattr(regexp, 'match'):
regexp = re.compile(asbytes(regexp))
if not isinstance(dtype, np.dtype):
dtype = np.dtype(dtype)
seq = regexp.findall(file.read())
if seq and not isinstance(seq[0], tuple):
# Only one group is in the regexp.
# Create the new array as a single data-type and then
# re-interpret as a single-field structured array.
newdtype = np.dtype(dtype[dtype.names[0]])
output = np.array(seq, dtype=newdtype)
output.dtype = dtype
else:
output = np.array(seq, dtype=dtype)
return output
finally:
if own_fh:
fh.close()
#####--------------------------------------------------------------------------
#---- --- ASCII functions ---
#####--------------------------------------------------------------------------
def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
skiprows=0, skip_header=0, skip_footer=0, converters=None,
missing='', missing_values=None, filling_values=None,
usecols=None, names=None,
excludelist=None, deletechars=None, replace_space='_',
autostrip=False, case_sensitive=True, defaultfmt="f%i",
unpack=None, usemask=False, loose=True, invalid_raise=True):
"""
Load data from a text file, with missing values handled as specified.
Each line past the first `skiprows` lines is split at the `delimiter`
character, and characters following the `comments` character are discarded.
Parameters
----------
fname : file or str
File or filename to read. If the filename extension is `.gz` or
`.bz2`, the file is first decompressed.
dtype : dtype, optional
Data type of the resulting array.
If None, the dtypes will be determined by the contents of each
column, individually.
comments : str, optional
The character used to indicate the start of a comment.
All the characters occurring on a line after a comment are discarded
delimiter : str, int, or sequence, optional
The string used to separate values. By default, any consecutive
whitespaces act as delimiter. An integer or sequence of integers
can also be provided as width(s) of each field.
skip_header : int, optional
The numbers of lines to skip at the beginning of the file.
skip_footer : int, optional
The numbers of lines to skip at the end of the file
converters : variable or None, optional
The set of functions that convert the data of a column to a value.
The converters can also be used to provide a default value
for missing data: ``converters = {3: lambda s: float(s or 0)}``.
missing_values : variable or None, optional
The set of strings corresponding to missing data.
filling_values : variable or None, optional
The set of values to be used as default when the data are missing.
usecols : sequence or None, optional
Which columns to read, with 0 being the first. For example,
``usecols = (1, 4, 5)`` will extract the 2nd, 5th and 6th columns.
names : {None, True, str, sequence}, optional
If `names` is True, the field names are read from the first valid line
after the first `skiprows` lines.
If `names` is a sequence or a single-string of comma-separated names,
the names will be used to define the field names in a structured dtype.
If `names` is None, the names of the dtype fields will be used, if any.
excludelist : sequence, optional
A list of names to exclude. This list is appended to the default list
['return','file','print']. Excluded names are appended an underscore:
for example, `file` would become `file_`.
deletechars : str, optional
A string combining invalid characters that must be deleted from the
names.
defaultfmt : str, optional
A format used to define default field names, such as "f%i" or "f_%02i".
autostrip : bool, optional
Whether to automatically strip white spaces from the variables.
replace_space : char, optional
Character(s) used in replacement of white spaces in the variables names.
By default, use a '_'.
case_sensitive : {True, False, 'upper', 'lower'}, optional
If True, field names are case sensitive.
If False or 'upper', field names are converted to upper case.
If 'lower', field names are converted to lower case.
unpack : bool, optional
If True, the returned array is transposed, so that arguments may be
unpacked using ``x, y, z = loadtxt(...)``
usemask : bool, optional
If True, return a masked array.
If False, return a regular array.
invalid_raise : bool, optional
If True, an exception is raised if an inconsistency is detected in the
number of columns.
If False, a warning is emitted and the offending lines are skipped.
Returns
-------
out : ndarray
Data read from the text file. If `usemask` is True, this is a
masked array.
See Also
--------
numpy.loadtxt : equivalent function when no data is missing.
Notes
-----
* When spaces are used as delimiters, or when no delimiter has been given
as input, there should not be any missing data between two fields.
* When the variables are named (either by a flexible dtype or with `names`,
there must not be any header in the file (else a ValueError
exception is raised).
* Individual values are not stripped of spaces by default.
When using a custom converter, make sure the function does remove spaces.
Examples
---------
>>> from StringIO import StringIO
>>> import numpy as np
Comma delimited file with mixed dtype
>>> s = StringIO("1,1.3,abcde")
>>> data = np.genfromtxt(s, dtype=[('myint','i8'),('myfloat','f8'),
... ('mystring','S5')], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
Using dtype = None
>>> s.seek(0) # needed for StringIO example only
>>> data = np.genfromtxt(s, dtype=None,
... names = ['myint','myfloat','mystring'], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
Specifying dtype and names
>>> s.seek(0)
>>> data = np.genfromtxt(s, dtype="i8,f8,S5",
... names=['myint','myfloat','mystring'], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
An example with fixed-width columns
>>> s = StringIO("11.3abcde")
>>> data = np.genfromtxt(s, dtype=None, names=['intvar','fltvar','strvar'],
... delimiter=[1,3,5])
>>> data
array((1, 1.3, 'abcde'),
dtype=[('intvar', '<i8'), ('fltvar', '<f8'), ('strvar', '|S5')])
"""
# Py3 data conversions to bytes, for convenience
comments = asbytes(comments)
if isinstance(delimiter, unicode):
delimiter = asbytes(delimiter)
if isinstance(missing, unicode):
missing = asbytes(missing)
if isinstance(missing_values, (unicode, list, tuple)):
missing_values = asbytes_nested(missing_values)
#
if usemask:
from numpy.ma import MaskedArray, make_mask_descr
# Check the input dictionary of converters
user_converters = converters or {}
if not isinstance(user_converters, dict):
errmsg = "The input argument 'converter' should be a valid dictionary "\
"(got '%s' instead)"
raise TypeError(errmsg % type(user_converters))
# Initialize the filehandle, the LineSplitter and the NameValidator
own_fhd = False
if isinstance(fname, basestring):
fhd = np.lib._datasource.open(fname, 'U')
own_fhd = True
elif not hasattr(fname, 'read'):
raise TypeError("The input should be a string or a filehandle. "\
"(got %s instead)" % type(fname))
else:
fhd = fname
split_line = LineSplitter(delimiter=delimiter, comments=comments,
autostrip=autostrip)._handyman
validate_names = NameValidator(excludelist=excludelist,
deletechars=deletechars,
case_sensitive=case_sensitive,
replace_space=replace_space)
# Get the first valid lines after the first skiprows ones ..
if skiprows:
warnings.warn("The use of `skiprows` is deprecated.\n"\
"Please use `skip_header` instead.",
DeprecationWarning)
skip_header = skiprows
# Skip the first `skip_header` rows
for i in xrange(skip_header):
fhd.readline()
# Keep on until we find the first valid values
first_values = None
while not first_values:
first_line = fhd.readline()
if not first_line:
raise IOError('End-of-file reached before encountering data.')
if names is True:
if comments in first_line:
first_line = asbytes('').join(first_line.split(comments)[1:])
first_values = split_line(first_line)
# Should we take the first values as names ?
if names is True:
fval = first_values[0].strip()
if fval in comments:
del first_values[0]
# Check the columns to use: make sure `usecols` is a list
if usecols is not None:
try:
usecols = [_.strip() for _ in usecols.split(",")]
except AttributeError:
try:
usecols = list(usecols)
except TypeError:
usecols = [usecols, ]
nbcols = len(usecols or first_values)
# Check the names and overwrite the dtype.names if needed
if names is True:
names = validate_names([_bytes_to_name(_.strip())
for _ in first_values])
first_line = asbytes('')
elif _is_string_like(names):
names = validate_names([_.strip() for _ in names.split(',')])
elif names:
names = validate_names(names)
# Get the dtype
if dtype is not None:
dtype = easy_dtype(dtype, defaultfmt=defaultfmt, names=names)
# Make sure the names is a list (for 2.5)
if names is not None:
names = list(names)
if usecols:
for (i, current) in enumerate(usecols):
# if usecols is a list of names, convert to a list of indices
if _is_string_like(current):
usecols[i] = names.index(current)
elif current < 0:
usecols[i] = current + len(first_values)
# If the dtype is not None, make sure we update it
if (dtype is not None) and (len(dtype) > nbcols):
descr = dtype.descr
dtype = np.dtype([descr[_] for _ in usecols])
names = list(dtype.names)
# If `names` is not None, update the names
elif (names is not None) and (len(names) > nbcols):
names = [names[_] for _ in usecols]
elif (names is not None) and (dtype is not None):
names = dtype.names
# Process the missing values ...............................
# Rename missing_values for convenience
user_missing_values = missing_values or ()
# Define the list of missing_values (one column: one list)
missing_values = [list([asbytes('')]) for _ in range(nbcols)]
# We have a dictionary: process it field by field
if isinstance(user_missing_values, dict):
# Loop on the items
for (key, val) in user_missing_values.items():
# Is the key a string ?
if _is_string_like(key):
try:
# Transform it into an integer
key = names.index(key)
except ValueError:
# We couldn't find it: the name must have been dropped, then
continue
# Redefine the key as needed if it's a column number
if usecols:
try:
key = usecols.index(key)
except ValueError:
pass
# Transform the value as a list of string
if isinstance(val, (list, tuple)):
val = [str(_) for _ in val]
else:
val = [str(val), ]
# Add the value(s) to the current list of missing
if key is None:
# None acts as default
for miss in missing_values:
miss.extend(val)
else:
missing_values[key].extend(val)
# We have a sequence : each item matches a column
elif isinstance(user_missing_values, (list, tuple)):
for (value, entry) in zip(user_missing_values, missing_values):
value = str(value)
if value not in entry:
entry.append(value)
# We have a string : apply it to all entries
elif isinstance(user_missing_values, bytes):
user_value = user_missing_values.split(asbytes(","))
for entry in missing_values:
entry.extend(user_value)
# We have something else: apply it to all entries
else:
for entry in missing_values:
entry.extend([str(user_missing_values)])
# Process the deprecated `missing`
if missing != asbytes(''):
warnings.warn("The use of `missing` is deprecated.\n"\
"Please use `missing_values` instead.",
DeprecationWarning)
values = [str(_) for _ in missing.split(asbytes(","))]
for entry in missing_values:
entry.extend(values)
# Process the filling_values ...............................
# Rename the input for convenience
user_filling_values = filling_values or []
# Define the default
filling_values = [None] * nbcols
# We have a dictionary : update each entry individually
if isinstance(user_filling_values, dict):
for (key, val) in user_filling_values.items():
if _is_string_like(key):
try:
# Transform it into an integer
key = names.index(key)
except ValueError:
# We couldn't find it: the name must have been dropped, then
continue
# Redefine the key if it's a column number and usecols is defined
if usecols:
try:
key = usecols.index(key)
except ValueError:
pass
# Add the value to the list
filling_values[key] = val
# We have a sequence : update on a one-to-one basis
elif isinstance(user_filling_values, (list, tuple)):
n = len(user_filling_values)
if (n <= nbcols):
filling_values[:n] = user_filling_values
else:
filling_values = user_filling_values[:nbcols]
# We have something else : use it for all entries
else:
filling_values = [user_filling_values] * nbcols
# Initialize the converters ................................
if dtype is None:
# Note: we can't use a [...]*nbcols, as we would have 3 times the same
# ... converter, instead of 3 different converters.
converters = [StringConverter(None, missing_values=miss, default=fill)
for (miss, fill) in zip(missing_values, filling_values)]
else:
dtype_flat = flatten_dtype(dtype, flatten_base=True)
# Initialize the converters
if len(dtype_flat) > 1:
# Flexible type : get a converter from each dtype
zipit = zip(dtype_flat, missing_values, filling_values)
converters = [StringConverter(dt, locked=True,
missing_values=miss, default=fill)
for (dt, miss, fill) in zipit]
else:
# Set to a default converter (but w/ different missing values)
zipit = zip(missing_values, filling_values)
converters = [StringConverter(dtype, locked=True,
missing_values=miss, default=fill)
for (miss, fill) in zipit]
# Update the converters to use the user-defined ones
uc_update = []
for (i, conv) in user_converters.items():
# If the converter is specified by column names, use the index instead
if _is_string_like(i):
try:
i = names.index(i)
except ValueError:
continue
elif usecols:
try:
i = usecols.index(i)
except ValueError:
# Unused converter specified
continue
# Find the value to test:
if len(first_line):
testing_value = first_values[i]
else:
testing_value = None
converters[i].update(conv, locked=True,
testing_value=testing_value,
default=filling_values[i],
missing_values=missing_values[i],)
uc_update.append((i, conv))
# Make sure we have the corrected keys in user_converters...
user_converters.update(uc_update)
miss_chars = [_.missing_values for _ in converters]
# Initialize the output lists ...
# ... rows
rows = []
append_to_rows = rows.append
# ... masks
if usemask:
masks = []
append_to_masks = masks.append
# ... invalid
invalid = []
append_to_invalid = invalid.append
# Parse each line
for (i, line) in enumerate(itertools.chain([first_line, ], fhd)):
values = split_line(line)
nbvalues = len(values)
# Skip an empty line
if nbvalues == 0:
continue
# Select only the columns we need
if usecols:
try:
values = [values[_] for _ in usecols]
except IndexError:
append_to_invalid((i + skip_header + 1, nbvalues))
continue
elif nbvalues != nbcols:
append_to_invalid((i + skip_header + 1, nbvalues))
continue
# Store the values
append_to_rows(tuple(values))
if usemask:
append_to_masks(tuple([v.strip() in m
for (v, m) in zip(values, missing_values)]))
if own_fhd:
fhd.close()
# Upgrade the converters (if needed)
if dtype is None:
for (i, converter) in enumerate(converters):
current_column = map(itemgetter(i), rows)
try:
converter.iterupgrade(current_column)
except ConverterLockError:
errmsg = "Converter #%i is locked and cannot be upgraded: " % i
current_column = itertools.imap(itemgetter(i), rows)
for (j, value) in enumerate(current_column):
try:
converter.upgrade(value)
except (ConverterError, ValueError):
errmsg += "(occurred line #%i for value '%s')"
errmsg %= (j + 1 + skip_header, value)
raise ConverterError(errmsg)
# Check that we don't have invalid values
nbinvalid = len(invalid)
if nbinvalid > 0:
nbrows = len(rows) + nbinvalid - skip_footer
# Construct the error message
template = " Line #%%i (got %%i columns instead of %i)" % nbcols
if skip_footer > 0:
nbinvalid_skipped = len([_ for _ in invalid
if _[0] > nbrows + skip_header])
invalid = invalid[:nbinvalid - nbinvalid_skipped]
skip_footer -= nbinvalid_skipped
#
# nbrows -= skip_footer
# errmsg = [template % (i, nb)
# for (i, nb) in invalid if i < nbrows]
# else:
errmsg = [template % (i, nb)
for (i, nb) in invalid]
if len(errmsg):
errmsg.insert(0, "Some errors were detected !")
errmsg = "\n".join(errmsg)
# Raise an exception ?
if invalid_raise:
raise ValueError(errmsg)
# Issue a warning ?
else:
warnings.warn(errmsg, ConversionWarning)
# Strip the last skip_footer data
if skip_footer > 0:
rows = rows[:-skip_footer]
if usemask:
masks = masks[:-skip_footer]
# Convert each value according to the converter:
# We want to modify the list in place to avoid creating a new one...
# if loose:
# conversionfuncs = [conv._loose_call for conv in converters]
# else:
# conversionfuncs = [conv._strict_call for conv in converters]
# for (i, vals) in enumerate(rows):
# rows[i] = tuple([convert(val)
# for (convert, val) in zip(conversionfuncs, vals)])
if loose:
rows = zip(*[map(converter._loose_call, map(itemgetter(i), rows))
for (i, converter) in enumerate(converters)])
else:
rows = zip(*[map(converter._strict_call, map(itemgetter(i), rows))
for (i, converter) in enumerate(converters)])
# Reset the dtype
data = rows
if dtype is None:
# Get the dtypes from the types of the converters
column_types = [conv.type for conv in converters]
# Find the columns with strings...
strcolidx = [i for (i, v) in enumerate(column_types)
if v in (type('S'), np.string_)]
# ... and take the largest number of chars.
for i in strcolidx:
column_types[i] = "|S%i" % max(len(row[i]) for row in data)
#
if names is None:
# If the dtype is uniform, don't define names, else use ''
base = set([c.type for c in converters if c._checked])
if len(base) == 1:
(ddtype, mdtype) = (list(base)[0], np.bool)
else:
ddtype = [(defaultfmt % i, dt)
for (i, dt) in enumerate(column_types)]
if usemask:
mdtype = [(defaultfmt % i, np.bool)
for (i, dt) in enumerate(column_types)]
else:
ddtype = zip(names, column_types)
mdtype = zip(names, [np.bool] * len(column_types))
output = np.array(data, dtype=ddtype)
if usemask:
outputmask = np.array(masks, dtype=mdtype)
else:
# Overwrite the initial dtype names if needed
if names and dtype.names:
dtype.names = names
# Case 1. We have a structured type
if len(dtype_flat) > 1:
# Nested dtype, eg [('a', int), ('b', [('b0', int), ('b1', 'f4')])]
# First, create the array using a flattened dtype:
# [('a', int), ('b1', int), ('b2', float)]
# Then, view the array using the specified dtype.
if 'O' in (_.char for _ in dtype_flat):
if has_nested_fields(dtype):
errmsg = "Nested fields involving objects "\
"are not supported..."
raise NotImplementedError(errmsg)
else:
output = np.array(data, dtype=dtype)
else:
rows = np.array(data, dtype=[('', _) for _ in dtype_flat])
output = rows.view(dtype)
# Now, process the rowmasks the same way
if usemask:
rowmasks = np.array(masks,
dtype=np.dtype([('', np.bool)
for t in dtype_flat]))
# Construct the new dtype
mdtype = make_mask_descr(dtype)
outputmask = rowmasks.view(mdtype)
# Case #2. We have a basic dtype
else:
# We used some user-defined converters
if user_converters:
ishomogeneous = True
descr = []
for (i, ttype) in enumerate([conv.type for conv in converters]):
# Keep the dtype of the current converter
if i in user_converters:
ishomogeneous &= (ttype == dtype.type)
if ttype == np.string_:
ttype = "|S%i" % max(len(row[i]) for row in data)
descr.append(('', ttype))
else:
descr.append(('', dtype))
# So we changed the dtype ?
if not ishomogeneous:
# We have more than one field
if len(descr) > 1:
dtype = np.dtype(descr)
# We have only one field: drop the name if not needed.
else:
dtype = np.dtype(ttype)
#
output = np.array(data, dtype)
if usemask:
if dtype.names:
mdtype = [(_, np.bool) for _ in dtype.names]
else:
mdtype = np.bool
outputmask = np.array(masks, dtype=mdtype)
# Try to take care of the missing data we missed
names = output.dtype.names
if usemask and names:
for (name, conv) in zip(names or (), converters):
missing_values = [conv(_) for _ in conv.missing_values
if _ != asbytes('')]
for mval in missing_values:
outputmask[name] |= (output[name] == mval)
# Construct the final array
if usemask:
output = output.view(MaskedArray)
output._mask = outputmask
if unpack:
return output.squeeze().T
return output.squeeze()
def ndfromtxt(fname, **kwargs):
"""
Load ASCII data stored in a file and return it as a single array.
Complete description of all the optional input parameters is available in
the docstring of the `genfromtxt` function.
See Also
--------
numpy.genfromtxt : generic function.
"""
kwargs['usemask'] = False
return genfromtxt(fname, **kwargs)
def mafromtxt(fname, **kwargs):
"""
Load ASCII data stored in a text file and return a masked array.
For a complete description of all the input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function to load ASCII data.
"""
kwargs['usemask'] = True
return genfromtxt(fname, **kwargs)
def recfromtxt(fname, **kwargs):
"""
Load ASCII data from a file and return it in a record array.
If ``usemask=False`` a standard `recarray` is returned,
if ``usemask=True`` a MaskedRecords array is returned.
Complete description of all the optional input parameters is available in
the docstring of the `genfromtxt` function.
See Also
--------
numpy.genfromtxt : generic function
Notes
-----
By default, `dtype` is None, which means that the data-type of the output
array will be determined from the data.
"""
kwargs.update(dtype=kwargs.get('dtype', None))
usemask = kwargs.get('usemask', False)
output = genfromtxt(fname, **kwargs)
if usemask:
from numpy.ma.mrecords import MaskedRecords
output = output.view(MaskedRecords)
else:
output = output.view(np.recarray)
return output
def recfromcsv(fname, **kwargs):
"""
Load ASCII data stored in a comma-separated file.
The returned array is a record array (if ``usemask=False``, see
`recarray`) or a masked record array (if ``usemask=True``,
see `ma.mrecords.MaskedRecords`).
For a complete description of all the input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function to load ASCII data.
"""
case_sensitive = kwargs.get('case_sensitive', "lower") or "lower"
names = kwargs.get('names', True)
if names is None:
names = True
kwargs.update(dtype=kwargs.get('update', None),
delimiter=kwargs.get('delimiter', ",") or ",",
names=names,
case_sensitive=case_sensitive)
usemask = kwargs.get("usemask", False)
output = genfromtxt(fname, **kwargs)
if usemask:
from numpy.ma.mrecords import MaskedRecords
output = output.view(MaskedRecords)
else:
output = output.view(np.recarray)
return output
| gpl-2.0 |
karstenw/nodebox-pyobjc | examples/Extended Application/matplotlib/examples/pyplots/fig_axes_labels_simple.py | 1 | 1396 | """
======================
Fig Axes Labels Simple
======================
"""
import numpy as np
import matplotlib.pyplot as plt
# nodebox section
if __name__ == '__builtin__':
# were in nodebox
import os
import tempfile
W = 800
inset = 20
size(W, 600)
plt.cla()
plt.clf()
plt.close('all')
def tempimage():
fob = tempfile.NamedTemporaryFile(mode='w+b', suffix='.png', delete=False)
fname = fob.name
fob.close()
return fname
imgx = 20
imgy = 0
def pltshow(plt, dpi=150):
global imgx, imgy
temppath = tempimage()
plt.savefig(temppath, dpi=dpi)
dx,dy = imagesize(temppath)
w = min(W,dx)
image(temppath,imgx,imgy,width=w)
imgy = imgy + dy + 20
os.remove(temppath)
size(W, HEIGHT+dy+40)
else:
def pltshow(mplpyplot):
mplpyplot.show()
# nodebox section end
fig = plt.figure()
fig.subplots_adjust(top=0.8)
ax1 = fig.add_subplot(211)
ax1.set_ylabel('volts')
ax1.set_title('a sine wave')
t = np.arange(0.0, 1.0, 0.01)
s = np.sin(2*np.pi*t)
line, = ax1.plot(t, s, color='blue', lw=2)
# Fixing random state for reproducibility
np.random.seed(19680801)
ax2 = fig.add_axes([0.15, 0.1, 0.7, 0.3])
n, bins, patches = ax2.hist(np.random.randn(1000), 50,
facecolor='yellow', edgecolor='yellow')
ax2.set_xlabel('time (s)')
pltshow(plt)
| mit |
sanketloke/scikit-learn | sklearn/feature_selection/variance_threshold.py | 123 | 2572 | # Author: Lars Buitinck
# License: 3-clause BSD
import numpy as np
from ..base import BaseEstimator
from .base import SelectorMixin
from ..utils import check_array
from ..utils.sparsefuncs import mean_variance_axis
from ..utils.validation import check_is_fitted
class VarianceThreshold(BaseEstimator, SelectorMixin):
"""Feature selector that removes all low-variance features.
This feature selection algorithm looks only at the features (X), not the
desired outputs (y), and can thus be used for unsupervised learning.
Read more in the :ref:`User Guide <variance_threshold>`.
Parameters
----------
threshold : float, optional
Features with a training-set variance lower than this threshold will
be removed. The default is to keep all features with non-zero variance,
i.e. remove the features that have the same value in all samples.
Attributes
----------
variances_ : array, shape (n_features,)
Variances of individual features.
Examples
--------
The following dataset has integer features, two of which are the same
in every sample. These are removed with the default setting for threshold::
>>> X = [[0, 2, 0, 3], [0, 1, 4, 3], [0, 1, 1, 3]]
>>> selector = VarianceThreshold()
>>> selector.fit_transform(X)
array([[2, 0],
[1, 4],
[1, 1]])
"""
def __init__(self, threshold=0.):
self.threshold = threshold
def fit(self, X, y=None):
"""Learn empirical variances from X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Sample vectors from which to compute variances.
y : any
Ignored. This parameter exists only for compatibility with
sklearn.pipeline.Pipeline.
Returns
-------
self
"""
X = check_array(X, ('csr', 'csc'), dtype=np.float64)
if hasattr(X, "toarray"): # sparse matrix
_, self.variances_ = mean_variance_axis(X, axis=0)
else:
self.variances_ = np.var(X, axis=0)
if np.all(self.variances_ <= self.threshold):
msg = "No feature in X meets the variance threshold {0:.5f}"
if X.shape[0] == 1:
msg += " (X contains only one sample)"
raise ValueError(msg.format(self.threshold))
return self
def _get_support_mask(self):
check_is_fitted(self, 'variances_')
return self.variances_ > self.threshold
| bsd-3-clause |
hdmetor/scikit-learn | sklearn/linear_model/tests/test_randomized_l1.py | 214 | 4690 | # Authors: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.linear_model.randomized_l1 import (lasso_stability_path,
RandomizedLasso,
RandomizedLogisticRegression)
from sklearn.datasets import load_diabetes, load_iris
from sklearn.feature_selection import f_regression, f_classif
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model.base import center_data
diabetes = load_diabetes()
X = diabetes.data
y = diabetes.target
X = StandardScaler().fit_transform(X)
X = X[:, [2, 3, 6, 7, 8]]
# test that the feature score of the best features
F, _ = f_regression(X, y)
def test_lasso_stability_path():
# Check lasso stability path
# Load diabetes data and add noisy features
scaling = 0.3
coef_grid, scores_path = lasso_stability_path(X, y, scaling=scaling,
random_state=42,
n_resampling=30)
assert_array_equal(np.argsort(F)[-3:],
np.argsort(np.sum(scores_path, axis=1))[-3:])
def test_randomized_lasso():
# Check randomized lasso
scaling = 0.3
selection_threshold = 0.5
# or with 1 alpha
clf = RandomizedLasso(verbose=False, alpha=1, random_state=42,
scaling=scaling,
selection_threshold=selection_threshold)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(np.argsort(F)[-3:], np.argsort(feature_scores)[-3:])
# or with many alphas
clf = RandomizedLasso(verbose=False, alpha=[1, 0.8], random_state=42,
scaling=scaling,
selection_threshold=selection_threshold)
feature_scores = clf.fit(X, y).scores_
assert_equal(clf.all_scores_.shape, (X.shape[1], 2))
assert_array_equal(np.argsort(F)[-3:], np.argsort(feature_scores)[-3:])
X_r = clf.transform(X)
X_full = clf.inverse_transform(X_r)
assert_equal(X_r.shape[1], np.sum(feature_scores > selection_threshold))
assert_equal(X_full.shape, X.shape)
clf = RandomizedLasso(verbose=False, alpha='aic', random_state=42,
scaling=scaling)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(feature_scores, X.shape[1] * [1.])
clf = RandomizedLasso(verbose=False, scaling=-0.1)
assert_raises(ValueError, clf.fit, X, y)
clf = RandomizedLasso(verbose=False, scaling=1.1)
assert_raises(ValueError, clf.fit, X, y)
def test_randomized_logistic():
# Check randomized sparse logistic regression
iris = load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
X = X[y != 2]
y = y[y != 2]
F, _ = f_classif(X, y)
scaling = 0.3
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
X_orig = X.copy()
feature_scores = clf.fit(X, y).scores_
assert_array_equal(X, X_orig) # fit does not modify X
assert_array_equal(np.argsort(F), np.argsort(feature_scores))
clf = RandomizedLogisticRegression(verbose=False, C=[1., 0.5],
random_state=42, scaling=scaling,
n_resampling=50, tol=1e-3)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(np.argsort(F), np.argsort(feature_scores))
def test_randomized_logistic_sparse():
# Check randomized sparse logistic regression on sparse data
iris = load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
X = X[y != 2]
y = y[y != 2]
# center here because sparse matrices are usually not centered
X, y, _, _, _ = center_data(X, y, True, True)
X_sp = sparse.csr_matrix(X)
F, _ = f_classif(X, y)
scaling = 0.3
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
feature_scores = clf.fit(X, y).scores_
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
feature_scores_sp = clf.fit(X_sp, y).scores_
assert_array_equal(feature_scores, feature_scores_sp)
| bsd-3-clause |
justincassidy/scikit-learn | sklearn/tree/tests/test_export.py | 130 | 9950 | """
Testing for export functions of decision trees (sklearn.tree.export).
"""
from re import finditer
from numpy.testing import assert_equal
from nose.tools import assert_raises
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.tree import export_graphviz
from sklearn.externals.six import StringIO
from sklearn.utils.testing import assert_in
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
y2 = [[-1, 1], [-1, 2], [-1, 3], [1, 1], [1, 2], [1, 3]]
w = [1, 1, 1, .5, .5, .5]
def test_graphviz_toy():
# Check correctness of export_graphviz
clf = DecisionTreeClassifier(max_depth=3,
min_samples_split=1,
criterion="gini",
random_state=2)
clf.fit(X, y)
# Test export code
out = StringIO()
export_graphviz(clf, out_file=out)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="X[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]"] ;\n' \
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test with feature_names
out = StringIO()
export_graphviz(clf, out_file=out, feature_names=["feature0", "feature1"])
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="feature0 <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]"] ;\n' \
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test with class_names
out = StringIO()
export_graphviz(clf, out_file=out, class_names=["yes", "no"])
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="X[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]\\nclass = yes"] ;\n' \
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]\\n' \
'class = yes"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]\\n' \
'class = no"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test plot_options
out = StringIO()
export_graphviz(clf, out_file=out, filled=True, impurity=False,
proportion=True, special_characters=True, rounded=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled, rounded", color="black", ' \
'fontname=helvetica] ;\n' \
'edge [fontname=helvetica] ;\n' \
'0 [label=<X<SUB>0</SUB> ≤ 0.0<br/>samples = 100.0%<br/>' \
'value = [0.5, 0.5]>, fillcolor="#e5813900"] ;\n' \
'1 [label=<samples = 50.0%<br/>value = [1.0, 0.0]>, ' \
'fillcolor="#e58139ff"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label=<samples = 50.0%<br/>value = [0.0, 1.0]>, ' \
'fillcolor="#399de5ff"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test max_depth
out = StringIO()
export_graphviz(clf, out_file=out, max_depth=0, class_names=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="X[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]\\nclass = y[0]"] ;\n' \
'1 [label="(...)"] ;\n' \
'0 -> 1 ;\n' \
'2 [label="(...)"] ;\n' \
'0 -> 2 ;\n' \
'}'
assert_equal(contents1, contents2)
# Test max_depth with plot_options
out = StringIO()
export_graphviz(clf, out_file=out, max_depth=0, filled=True,
node_ids=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled", color="black"] ;\n' \
'0 [label="node #0\\nX[0] <= 0.0\\ngini = 0.5\\n' \
'samples = 6\\nvalue = [3, 3]", fillcolor="#e5813900"] ;\n' \
'1 [label="(...)", fillcolor="#C0C0C0"] ;\n' \
'0 -> 1 ;\n' \
'2 [label="(...)", fillcolor="#C0C0C0"] ;\n' \
'0 -> 2 ;\n' \
'}'
assert_equal(contents1, contents2)
# Test multi-output with weighted samples
clf = DecisionTreeClassifier(max_depth=2,
min_samples_split=1,
criterion="gini",
random_state=2)
clf = clf.fit(X, y2, sample_weight=w)
out = StringIO()
export_graphviz(clf, out_file=out, filled=True, impurity=False)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled", color="black"] ;\n' \
'0 [label="X[0] <= 0.0\\nsamples = 6\\n' \
'value = [[3.0, 1.5, 0.0]\\n' \
'[1.5, 1.5, 1.5]]", fillcolor="#e5813900"] ;\n' \
'1 [label="X[1] <= -1.5\\nsamples = 3\\n' \
'value = [[3, 0, 0]\\n[1, 1, 1]]", ' \
'fillcolor="#e5813965"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="samples = 1\\nvalue = [[1, 0, 0]\\n' \
'[0, 0, 1]]", fillcolor="#e58139ff"] ;\n' \
'1 -> 2 ;\n' \
'3 [label="samples = 2\\nvalue = [[2, 0, 0]\\n' \
'[1, 1, 0]]", fillcolor="#e581398c"] ;\n' \
'1 -> 3 ;\n' \
'4 [label="X[0] <= 1.5\\nsamples = 3\\n' \
'value = [[0.0, 1.5, 0.0]\\n[0.5, 0.5, 0.5]]", ' \
'fillcolor="#e5813965"] ;\n' \
'0 -> 4 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'5 [label="samples = 2\\nvalue = [[0.0, 1.0, 0.0]\\n' \
'[0.5, 0.5, 0.0]]", fillcolor="#e581398c"] ;\n' \
'4 -> 5 ;\n' \
'6 [label="samples = 1\\nvalue = [[0.0, 0.5, 0.0]\\n' \
'[0.0, 0.0, 0.5]]", fillcolor="#e58139ff"] ;\n' \
'4 -> 6 ;\n' \
'}'
assert_equal(contents1, contents2)
# Test regression output with plot_options
clf = DecisionTreeRegressor(max_depth=3,
min_samples_split=1,
criterion="mse",
random_state=2)
clf.fit(X, y)
out = StringIO()
export_graphviz(clf, out_file=out, filled=True, leaves_parallel=True,
rotate=True, rounded=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled, rounded", color="black", ' \
'fontname=helvetica] ;\n' \
'graph [ranksep=equally, splines=polyline] ;\n' \
'edge [fontname=helvetica] ;\n' \
'rankdir=LR ;\n' \
'0 [label="X[0] <= 0.0\\nmse = 1.0\\nsamples = 6\\n' \
'value = 0.0", fillcolor="#e581397f"] ;\n' \
'1 [label="mse = 0.0\\nsamples = 3\\nvalue = -1.0", ' \
'fillcolor="#e5813900"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="True"] ;\n' \
'2 [label="mse = 0.0\\nsamples = 3\\nvalue = 1.0", ' \
'fillcolor="#e58139ff"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=45, ' \
'headlabel="False"] ;\n' \
'{rank=same ; 0} ;\n' \
'{rank=same ; 1; 2} ;\n' \
'}'
assert_equal(contents1, contents2)
def test_graphviz_errors():
# Check for errors of export_graphviz
clf = DecisionTreeClassifier(max_depth=3, min_samples_split=1)
clf.fit(X, y)
# Check feature_names error
out = StringIO()
assert_raises(IndexError, export_graphviz, clf, out, feature_names=[])
# Check class_names error
out = StringIO()
assert_raises(IndexError, export_graphviz, clf, out, class_names=[])
def test_friedman_mse_in_graphviz():
clf = DecisionTreeRegressor(criterion="friedman_mse", random_state=0)
clf.fit(X, y)
dot_data = StringIO()
export_graphviz(clf, out_file=dot_data)
clf = GradientBoostingClassifier(n_estimators=2, random_state=0)
clf.fit(X, y)
for estimator in clf.estimators_:
export_graphviz(estimator[0], out_file=dot_data)
for finding in finditer("\[.*?samples.*?\]", dot_data.getvalue()):
assert_in("friedman_mse", finding.group())
| bsd-3-clause |
slabanja/ase | ase/calculators/jacapo/utils/bandstructure.py | 4 | 7089 | import os
import numpy as np
import matplotlib.pyplot as plt
from ase.calculators.jacapo import *
from ase.dft.dos import DOS
class BandStructure:
'''outline of class to facilitate band structure calculations
'''
def __init__(self,
atoms,
BZpath=[],
npoints=10,
outnc='harris.nc'):
"""Headline here ... XXX.
atoms is an ase.Atoms object with calculator
attached. Presumably the self-consistent charge density has
already been calculated, otherwise, it will be.
BZpath is a list of tuples describing the path through the
Brillouin zone. The tuples have the form (label, kpt), e.g. ::
[('$\Gamma$',[0.0, 0.0, 0.0]),
('X',[0.0, 0.5, 0.5]),
('L',[0.5, 0.0, 0.0]),
('$\Gamma$',[0.0, 0.0, 0.0])]
the label is used in the figure and can include latex markup.
npoints is the number of points on each segment. It can either
be a constant, which is used for every segment, or a list of
integers that is an integer for each segment.
"""
self.atoms = atoms
self.calc = atoms.get_calculator()
#first, we make sure the charge density is up to date.
self.calc.get_charge_density()
self.ef = self.calc.get_ef() #self-consistent fermi level
self.labels = [x[0] for x in BZpath]
self.kpt_path = [np.array(x[1],dtype=np.float) for x in BZpath]
self.npoints = npoints
#first, setup the kpt path
kpts = []
#start at second kpt and go to second to last segment
nsegments = len(self.kpt_path) - 1
for i in range(nsegments-1):
#get number of points on path. this counts the first point
try:
i_npt = npoints[i]
except TypeError:
i_npt = npoints
#this is the vector connecting the two endpoint kpts of a segment
kdiff = self.kpt_path[i+1] - self.kpt_path[i]
#make a vector of evenly spaced intervals, one longer than needed
#because we chop off the last entry.
for j in np.linspace(0,1,i_npt+1)[0:-1]:
k = self.kpt_path[i] + j*kdiff
#shift by small random amount to break symmetry and
#prevent time-inversion reduction
krand = (1. + np.random.random(3))/1.e4
k += krand
kpts.append(k)
#now fill in the last segment, and end on the last point
try:
i_npt = npoints[-1]
except TypeError:
i_npt = npoints
kdiff = self.kpt_path[-1] - self.kpt_path[-2]
for j in np.linspace(0,1,i_npt+1)[1:]:
k = self.kpt_path[-2] + j*kdiff
#shift by small random amount to break symmetry and
#prevent time-inversion reduction
krand = (1. + np.random.random(3))/1.e4
k += krand
kpts.append(k)
#these are now the points needed for the Harris calculation.
self.kpts = kpts
self.dos = DOS(self.calc)
self.dos_energies = self.dos.get_energies()
self.dos_dos = self.dos.get_dos()
#try to avoid rerunning the calculation if it is already done!
if os.path.exists(outnc):
self.calc = Jacapo(outnc)
else:
print 'calculation of harris required'
self.calc.set_nc(outnc)
#self.calc.debug=10
#save some time by not calculating stress
self.calc.set_stress(False)
#this seems to be necessary sometimes
self.calc.delete_ncattdimvar(outnc,
ncdims=['number_plane_waves'])
#this has to come after removing number_of_planewaves
self.calc.set_kpts(self.kpts)
#freeze charge density
self.calc.set_charge_mixing(updatecharge='No')
#and, run calculation
self.calc.calculate()
def plot(self):
'''
Make an interactive band-structure plot.
clicking on a band will make it thicker and print which band was selected.
'''
kpoints = self.calc.get_ibz_kpoints()
eigenvalues = self.calc.get_all_eigenvalues() - self.ef
#eigenvalues = np.array([self.calc.get_eigenvalues(kpt=i)-self.ef
# for i in range(len(kpoints))])
self.handles = [] #used to get band indexes from plot
fig = plt.figure()
#plot DOS in figure
ax = fig.add_subplot(122)
ax.plot(self.dos_dos,self.dos_energies)
plt.title('self-consistent Total DOS')
ax.set_xticks([])
ax.set_yticks([])
ax.set_ylim([-20,20])
ax = fig.add_subplot(121)
ax.set_title('Band structure')
def onpick(event):
'make picked line bolder, set oldline back to regular thickness'
self.lastartist.set_linewidth(1)
self.lastartist = thisline = event.artist
thisline.set_linewidth(5)
plt.draw() #needed to update linewidth
print 'Band %i selected' % self.handles.index(thisline)
#you could insert code here to plot wavefunction, etc...
fig.canvas.mpl_connect('pick_event',onpick)
#we use indices for x. the tick labels are not shown and the distance
#appears unimportant
xdata = range(len(eigenvalues))
nkpts, nbands = eigenvalues.shape
for i in range(nbands):
#eigenvalues has shape(nkpts,nbands)
#note the comma after line_handle
line_handle, = ax.plot(xdata,eigenvalues[:,i],'.-',ms=1,picker=2)
self.handles.append(line_handle)
self.lastartist = self.handles[-1]
#plot Fermi level
ax.plot([0,len(self.kpts)],[0,0],'k--',label='$E_f$')
plt.xlabel('|k|')
plt.ylabel('$E-E_f$ (eV)')
#set xtick locations and labels
xtick_locs = np.zeros(len(self.kpt_path))
try:
#this means the npoints is a list
i_npt = self.npoints[0]
for j,npt in enumerate(1,self.npoints):
xtick_locs[j] = xtick_locs[j-1] + npt
except TypeError:
#npoints is a single number
for j in range(1,len(self.labels)):
xtick_locs[j] = xtick_locs[j-1] + self.npoints
#the last location is off by one, so we fix it.
xtick_locs[-1] -= 1
ax.set_xlim([xtick_locs[0],xtick_locs[-1]])
ax.set_xticks(xtick_locs)
ax.set_xticklabels(self.labels)
#this seems reasonable to avoid very deep energy states and high energy states
ax.set_ylim([-20,20])
plt.show()
return fig
| gpl-2.0 |
ammarkhann/FinalSeniorCode | lib/python2.7/site-packages/scipy/optimize/_lsq/least_squares.py | 27 | 37725 | """Generic interface for least-square minimization."""
from __future__ import division, print_function, absolute_import
from warnings import warn
import numpy as np
from numpy.linalg import norm
from scipy.sparse import issparse, csr_matrix
from scipy.sparse.linalg import LinearOperator
from scipy.optimize import _minpack, OptimizeResult
from scipy.optimize._numdiff import approx_derivative, group_columns
from scipy._lib.six import string_types
from .trf import trf
from .dogbox import dogbox
from .common import EPS, in_bounds, make_strictly_feasible
TERMINATION_MESSAGES = {
-1: "Improper input parameters status returned from `leastsq`",
0: "The maximum number of function evaluations is exceeded.",
1: "`gtol` termination condition is satisfied.",
2: "`ftol` termination condition is satisfied.",
3: "`xtol` termination condition is satisfied.",
4: "Both `ftol` and `xtol` termination conditions are satisfied."
}
FROM_MINPACK_TO_COMMON = {
0: -1, # Improper input parameters from MINPACK.
1: 2,
2: 3,
3: 4,
4: 1,
5: 0
# There are 6, 7, 8 for too small tolerance parameters,
# but we guard against it by checking ftol, xtol, gtol beforehand.
}
def call_minpack(fun, x0, jac, ftol, xtol, gtol, max_nfev, x_scale, diff_step):
n = x0.size
if diff_step is None:
epsfcn = EPS
else:
epsfcn = diff_step**2
# Compute MINPACK's `diag`, which is inverse of our `x_scale` and
# ``x_scale='jac'`` corresponds to ``diag=None``.
if isinstance(x_scale, string_types) and x_scale == 'jac':
diag = None
else:
diag = 1 / x_scale
full_output = True
col_deriv = False
factor = 100.0
if jac is None:
if max_nfev is None:
# n squared to account for Jacobian evaluations.
max_nfev = 100 * n * (n + 1)
x, info, status = _minpack._lmdif(
fun, x0, (), full_output, ftol, xtol, gtol,
max_nfev, epsfcn, factor, diag)
else:
if max_nfev is None:
max_nfev = 100 * n
x, info, status = _minpack._lmder(
fun, jac, x0, (), full_output, col_deriv,
ftol, xtol, gtol, max_nfev, factor, diag)
f = info['fvec']
if callable(jac):
J = jac(x)
else:
J = np.atleast_2d(approx_derivative(fun, x))
cost = 0.5 * np.dot(f, f)
g = J.T.dot(f)
g_norm = norm(g, ord=np.inf)
nfev = info['nfev']
njev = info.get('njev', None)
status = FROM_MINPACK_TO_COMMON[status]
active_mask = np.zeros_like(x0, dtype=int)
return OptimizeResult(
x=x, cost=cost, fun=f, jac=J, grad=g, optimality=g_norm,
active_mask=active_mask, nfev=nfev, njev=njev, status=status)
def prepare_bounds(bounds, n):
lb, ub = [np.asarray(b, dtype=float) for b in bounds]
if lb.ndim == 0:
lb = np.resize(lb, n)
if ub.ndim == 0:
ub = np.resize(ub, n)
return lb, ub
def check_tolerance(ftol, xtol, gtol):
message = "{} is too low, setting to machine epsilon {}."
if ftol < EPS:
warn(message.format("`ftol`", EPS))
ftol = EPS
if xtol < EPS:
warn(message.format("`xtol`", EPS))
xtol = EPS
if gtol < EPS:
warn(message.format("`gtol`", EPS))
gtol = EPS
return ftol, xtol, gtol
def check_x_scale(x_scale, x0):
if isinstance(x_scale, string_types) and x_scale == 'jac':
return x_scale
try:
x_scale = np.asarray(x_scale, dtype=float)
valid = np.all(np.isfinite(x_scale)) and np.all(x_scale > 0)
except (ValueError, TypeError):
valid = False
if not valid:
raise ValueError("`x_scale` must be 'jac' or array_like with "
"positive numbers.")
if x_scale.ndim == 0:
x_scale = np.resize(x_scale, x0.shape)
if x_scale.shape != x0.shape:
raise ValueError("Inconsistent shapes between `x_scale` and `x0`.")
return x_scale
def check_jac_sparsity(jac_sparsity, m, n):
if jac_sparsity is None:
return None
if not issparse(jac_sparsity):
jac_sparsity = np.atleast_2d(jac_sparsity)
if jac_sparsity.shape != (m, n):
raise ValueError("`jac_sparsity` has wrong shape.")
return jac_sparsity, group_columns(jac_sparsity)
# Loss functions.
def huber(z, rho, cost_only):
mask = z <= 1
rho[0, mask] = z[mask]
rho[0, ~mask] = 2 * z[~mask]**0.5 - 1
if cost_only:
return
rho[1, mask] = 1
rho[1, ~mask] = z[~mask]**-0.5
rho[2, mask] = 0
rho[2, ~mask] = -0.5 * z[~mask]**-1.5
def soft_l1(z, rho, cost_only):
t = 1 + z
rho[0] = 2 * (t**0.5 - 1)
if cost_only:
return
rho[1] = t**-0.5
rho[2] = -0.5 * t**-1.5
def cauchy(z, rho, cost_only):
rho[0] = np.log1p(z)
if cost_only:
return
t = 1 + z
rho[1] = 1 / t
rho[2] = -1 / t**2
def arctan(z, rho, cost_only):
rho[0] = np.arctan(z)
if cost_only:
return
t = 1 + z**2
rho[1] = 1 / t
rho[2] = -2 * z / t**2
IMPLEMENTED_LOSSES = dict(linear=None, huber=huber, soft_l1=soft_l1,
cauchy=cauchy, arctan=arctan)
def construct_loss_function(m, loss, f_scale):
if loss == 'linear':
return None
if not callable(loss):
loss = IMPLEMENTED_LOSSES[loss]
rho = np.empty((3, m))
def loss_function(f, cost_only=False):
z = (f / f_scale) ** 2
loss(z, rho, cost_only=cost_only)
if cost_only:
return 0.5 * f_scale ** 2 * np.sum(rho[0])
rho[0] *= f_scale ** 2
rho[2] /= f_scale ** 2
return rho
else:
def loss_function(f, cost_only=False):
z = (f / f_scale) ** 2
rho = loss(z)
if cost_only:
return 0.5 * f_scale ** 2 * np.sum(rho[0])
rho[0] *= f_scale ** 2
rho[2] /= f_scale ** 2
return rho
return loss_function
def least_squares(
fun, x0, jac='2-point', bounds=(-np.inf, np.inf), method='trf',
ftol=1e-8, xtol=1e-8, gtol=1e-8, x_scale=1.0, loss='linear',
f_scale=1.0, diff_step=None, tr_solver=None, tr_options={},
jac_sparsity=None, max_nfev=None, verbose=0, args=(), kwargs={}):
"""Solve a nonlinear least-squares problem with bounds on the variables.
Given the residuals f(x) (an m-dimensional real function of n real
variables) and the loss function rho(s) (a scalar function), `least_squares`
finds a local minimum of the cost function F(x)::
minimize F(x) = 0.5 * sum(rho(f_i(x)**2), i = 0, ..., m - 1)
subject to lb <= x <= ub
The purpose of the loss function rho(s) is to reduce the influence of
outliers on the solution.
Parameters
----------
fun : callable
Function which computes the vector of residuals, with the signature
``fun(x, *args, **kwargs)``, i.e., the minimization proceeds with
respect to its first argument. The argument ``x`` passed to this
function is an ndarray of shape (n,) (never a scalar, even for n=1).
It must return a 1-d array_like of shape (m,) or a scalar. If the
argument ``x`` is complex or the function ``fun`` returns complex
residuals, it must be wrapped in a real function of real arguments,
as shown at the end of the Examples section.
x0 : array_like with shape (n,) or float
Initial guess on independent variables. If float, it will be treated
as a 1-d array with one element.
jac : {'2-point', '3-point', 'cs', callable}, optional
Method of computing the Jacobian matrix (an m-by-n matrix, where
element (i, j) is the partial derivative of f[i] with respect to
x[j]). The keywords select a finite difference scheme for numerical
estimation. The scheme '3-point' is more accurate, but requires
twice as much operations compared to '2-point' (default). The
scheme 'cs' uses complex steps, and while potentially the most
accurate, it is applicable only when `fun` correctly handles
complex inputs and can be analytically continued to the complex
plane. Method 'lm' always uses the '2-point' scheme. If callable,
it is used as ``jac(x, *args, **kwargs)`` and should return a
good approximation (or the exact value) for the Jacobian as an
array_like (np.atleast_2d is applied), a sparse matrix or a
`scipy.sparse.linalg.LinearOperator`.
bounds : 2-tuple of array_like, optional
Lower and upper bounds on independent variables. Defaults to no bounds.
Each array must match the size of `x0` or be a scalar, in the latter
case a bound will be the same for all variables. Use ``np.inf`` with
an appropriate sign to disable bounds on all or some variables.
method : {'trf', 'dogbox', 'lm'}, optional
Algorithm to perform minimization.
* 'trf' : Trust Region Reflective algorithm, particularly suitable
for large sparse problems with bounds. Generally robust method.
* 'dogbox' : dogleg algorithm with rectangular trust regions,
typical use case is small problems with bounds. Not recommended
for problems with rank-deficient Jacobian.
* 'lm' : Levenberg-Marquardt algorithm as implemented in MINPACK.
Doesn't handle bounds and sparse Jacobians. Usually the most
efficient method for small unconstrained problems.
Default is 'trf'. See Notes for more information.
ftol : float, optional
Tolerance for termination by the change of the cost function. Default
is 1e-8. The optimization process is stopped when ``dF < ftol * F``,
and there was an adequate agreement between a local quadratic model and
the true model in the last step.
xtol : float, optional
Tolerance for termination by the change of the independent variables.
Default is 1e-8. The exact condition depends on the `method` used:
* For 'trf' and 'dogbox' : ``norm(dx) < xtol * (xtol + norm(x))``
* For 'lm' : ``Delta < xtol * norm(xs)``, where ``Delta`` is
a trust-region radius and ``xs`` is the value of ``x``
scaled according to `x_scale` parameter (see below).
gtol : float, optional
Tolerance for termination by the norm of the gradient. Default is 1e-8.
The exact condition depends on a `method` used:
* For 'trf' : ``norm(g_scaled, ord=np.inf) < gtol``, where
``g_scaled`` is the value of the gradient scaled to account for
the presence of the bounds [STIR]_.
* For 'dogbox' : ``norm(g_free, ord=np.inf) < gtol``, where
``g_free`` is the gradient with respect to the variables which
are not in the optimal state on the boundary.
* For 'lm' : the maximum absolute value of the cosine of angles
between columns of the Jacobian and the residual vector is less
than `gtol`, or the residual vector is zero.
x_scale : array_like or 'jac', optional
Characteristic scale of each variable. Setting `x_scale` is equivalent
to reformulating the problem in scaled variables ``xs = x / x_scale``.
An alternative view is that the size of a trust region along j-th
dimension is proportional to ``x_scale[j]``. Improved convergence may
be achieved by setting `x_scale` such that a step of a given size
along any of the scaled variables has a similar effect on the cost
function. If set to 'jac', the scale is iteratively updated using the
inverse norms of the columns of the Jacobian matrix (as described in
[JJMore]_).
loss : str or callable, optional
Determines the loss function. The following keyword values are allowed:
* 'linear' (default) : ``rho(z) = z``. Gives a standard
least-squares problem.
* 'soft_l1' : ``rho(z) = 2 * ((1 + z)**0.5 - 1)``. The smooth
approximation of l1 (absolute value) loss. Usually a good
choice for robust least squares.
* 'huber' : ``rho(z) = z if z <= 1 else 2*z**0.5 - 1``. Works
similarly to 'soft_l1'.
* 'cauchy' : ``rho(z) = ln(1 + z)``. Severely weakens outliers
influence, but may cause difficulties in optimization process.
* 'arctan' : ``rho(z) = arctan(z)``. Limits a maximum loss on
a single residual, has properties similar to 'cauchy'.
If callable, it must take a 1-d ndarray ``z=f**2`` and return an
array_like with shape (3, m) where row 0 contains function values,
row 1 contains first derivatives and row 2 contains second
derivatives. Method 'lm' supports only 'linear' loss.
f_scale : float, optional
Value of soft margin between inlier and outlier residuals, default
is 1.0. The loss function is evaluated as follows
``rho_(f**2) = C**2 * rho(f**2 / C**2)``, where ``C`` is `f_scale`,
and ``rho`` is determined by `loss` parameter. This parameter has
no effect with ``loss='linear'``, but for other `loss` values it is
of crucial importance.
max_nfev : None or int, optional
Maximum number of function evaluations before the termination.
If None (default), the value is chosen automatically:
* For 'trf' and 'dogbox' : 100 * n.
* For 'lm' : 100 * n if `jac` is callable and 100 * n * (n + 1)
otherwise (because 'lm' counts function calls in Jacobian
estimation).
diff_step : None or array_like, optional
Determines the relative step size for the finite difference
approximation of the Jacobian. The actual step is computed as
``x * diff_step``. If None (default), then `diff_step` is taken to be
a conventional "optimal" power of machine epsilon for the finite
difference scheme used [NR]_.
tr_solver : {None, 'exact', 'lsmr'}, optional
Method for solving trust-region subproblems, relevant only for 'trf'
and 'dogbox' methods.
* 'exact' is suitable for not very large problems with dense
Jacobian matrices. The computational complexity per iteration is
comparable to a singular value decomposition of the Jacobian
matrix.
* 'lsmr' is suitable for problems with sparse and large Jacobian
matrices. It uses the iterative procedure
`scipy.sparse.linalg.lsmr` for finding a solution of a linear
least-squares problem and only requires matrix-vector product
evaluations.
If None (default) the solver is chosen based on the type of Jacobian
returned on the first iteration.
tr_options : dict, optional
Keyword options passed to trust-region solver.
* ``tr_solver='exact'``: `tr_options` are ignored.
* ``tr_solver='lsmr'``: options for `scipy.sparse.linalg.lsmr`.
Additionally ``method='trf'`` supports 'regularize' option
(bool, default is True) which adds a regularization term to the
normal equation, which improves convergence if the Jacobian is
rank-deficient [Byrd]_ (eq. 3.4).
jac_sparsity : {None, array_like, sparse matrix}, optional
Defines the sparsity structure of the Jacobian matrix for finite
difference estimation, its shape must be (m, n). If the Jacobian has
only few non-zero elements in *each* row, providing the sparsity
structure will greatly speed up the computations [Curtis]_. A zero
entry means that a corresponding element in the Jacobian is identically
zero. If provided, forces the use of 'lsmr' trust-region solver.
If None (default) then dense differencing will be used. Has no effect
for 'lm' method.
verbose : {0, 1, 2}, optional
Level of algorithm's verbosity:
* 0 (default) : work silently.
* 1 : display a termination report.
* 2 : display progress during iterations (not supported by 'lm'
method).
args, kwargs : tuple and dict, optional
Additional arguments passed to `fun` and `jac`. Both empty by default.
The calling signature is ``fun(x, *args, **kwargs)`` and the same for
`jac`.
Returns
-------
`OptimizeResult` with the following fields defined:
x : ndarray, shape (n,)
Solution found.
cost : float
Value of the cost function at the solution.
fun : ndarray, shape (m,)
Vector of residuals at the solution.
jac : ndarray, sparse matrix or LinearOperator, shape (m, n)
Modified Jacobian matrix at the solution, in the sense that J^T J
is a Gauss-Newton approximation of the Hessian of the cost function.
The type is the same as the one used by the algorithm.
grad : ndarray, shape (m,)
Gradient of the cost function at the solution.
optimality : float
First-order optimality measure. In unconstrained problems, it is always
the uniform norm of the gradient. In constrained problems, it is the
quantity which was compared with `gtol` during iterations.
active_mask : ndarray of int, shape (n,)
Each component shows whether a corresponding constraint is active
(that is, whether a variable is at the bound):
* 0 : a constraint is not active.
* -1 : a lower bound is active.
* 1 : an upper bound is active.
Might be somewhat arbitrary for 'trf' method as it generates a sequence
of strictly feasible iterates and `active_mask` is determined within a
tolerance threshold.
nfev : int
Number of function evaluations done. Methods 'trf' and 'dogbox' do not
count function calls for numerical Jacobian approximation, as opposed
to 'lm' method.
njev : int or None
Number of Jacobian evaluations done. If numerical Jacobian
approximation is used in 'lm' method, it is set to None.
status : int
The reason for algorithm termination:
* -1 : improper input parameters status returned from MINPACK.
* 0 : the maximum number of function evaluations is exceeded.
* 1 : `gtol` termination condition is satisfied.
* 2 : `ftol` termination condition is satisfied.
* 3 : `xtol` termination condition is satisfied.
* 4 : Both `ftol` and `xtol` termination conditions are satisfied.
message : str
Verbal description of the termination reason.
success : bool
True if one of the convergence criteria is satisfied (`status` > 0).
See Also
--------
leastsq : A legacy wrapper for the MINPACK implementation of the
Levenberg-Marquadt algorithm.
curve_fit : Least-squares minimization applied to a curve fitting problem.
Notes
-----
Method 'lm' (Levenberg-Marquardt) calls a wrapper over least-squares
algorithms implemented in MINPACK (lmder, lmdif). It runs the
Levenberg-Marquardt algorithm formulated as a trust-region type algorithm.
The implementation is based on paper [JJMore]_, it is very robust and
efficient with a lot of smart tricks. It should be your first choice
for unconstrained problems. Note that it doesn't support bounds. Also
it doesn't work when m < n.
Method 'trf' (Trust Region Reflective) is motivated by the process of
solving a system of equations, which constitute the first-order optimality
condition for a bound-constrained minimization problem as formulated in
[STIR]_. The algorithm iteratively solves trust-region subproblems
augmented by a special diagonal quadratic term and with trust-region shape
determined by the distance from the bounds and the direction of the
gradient. This enhancements help to avoid making steps directly into bounds
and efficiently explore the whole space of variables. To further improve
convergence, the algorithm considers search directions reflected from the
bounds. To obey theoretical requirements, the algorithm keeps iterates
strictly feasible. With dense Jacobians trust-region subproblems are
solved by an exact method very similar to the one described in [JJMore]_
(and implemented in MINPACK). The difference from the MINPACK
implementation is that a singular value decomposition of a Jacobian
matrix is done once per iteration, instead of a QR decomposition and series
of Givens rotation eliminations. For large sparse Jacobians a 2-d subspace
approach of solving trust-region subproblems is used [STIR]_, [Byrd]_.
The subspace is spanned by a scaled gradient and an approximate
Gauss-Newton solution delivered by `scipy.sparse.linalg.lsmr`. When no
constraints are imposed the algorithm is very similar to MINPACK and has
generally comparable performance. The algorithm works quite robust in
unbounded and bounded problems, thus it is chosen as a default algorithm.
Method 'dogbox' operates in a trust-region framework, but considers
rectangular trust regions as opposed to conventional ellipsoids [Voglis]_.
The intersection of a current trust region and initial bounds is again
rectangular, so on each iteration a quadratic minimization problem subject
to bound constraints is solved approximately by Powell's dogleg method
[NumOpt]_. The required Gauss-Newton step can be computed exactly for
dense Jacobians or approximately by `scipy.sparse.linalg.lsmr` for large
sparse Jacobians. The algorithm is likely to exhibit slow convergence when
the rank of Jacobian is less than the number of variables. The algorithm
often outperforms 'trf' in bounded problems with a small number of
variables.
Robust loss functions are implemented as described in [BA]_. The idea
is to modify a residual vector and a Jacobian matrix on each iteration
such that computed gradient and Gauss-Newton Hessian approximation match
the true gradient and Hessian approximation of the cost function. Then
the algorithm proceeds in a normal way, i.e. robust loss functions are
implemented as a simple wrapper over standard least-squares algorithms.
.. versionadded:: 0.17.0
References
----------
.. [STIR] M. A. Branch, T. F. Coleman, and Y. Li, "A Subspace, Interior,
and Conjugate Gradient Method for Large-Scale Bound-Constrained
Minimization Problems," SIAM Journal on Scientific Computing,
Vol. 21, Number 1, pp 1-23, 1999.
.. [NR] William H. Press et. al., "Numerical Recipes. The Art of Scientific
Computing. 3rd edition", Sec. 5.7.
.. [Byrd] R. H. Byrd, R. B. Schnabel and G. A. Shultz, "Approximate
solution of the trust region problem by minimization over
two-dimensional subspaces", Math. Programming, 40, pp. 247-263,
1988.
.. [Curtis] A. Curtis, M. J. D. Powell, and J. Reid, "On the estimation of
sparse Jacobian matrices", Journal of the Institute of
Mathematics and its Applications, 13, pp. 117-120, 1974.
.. [JJMore] J. J. More, "The Levenberg-Marquardt Algorithm: Implementation
and Theory," Numerical Analysis, ed. G. A. Watson, Lecture
Notes in Mathematics 630, Springer Verlag, pp. 105-116, 1977.
.. [Voglis] C. Voglis and I. E. Lagaris, "A Rectangular Trust Region
Dogleg Approach for Unconstrained and Bound Constrained
Nonlinear Optimization", WSEAS International Conference on
Applied Mathematics, Corfu, Greece, 2004.
.. [NumOpt] J. Nocedal and S. J. Wright, "Numerical optimization,
2nd edition", Chapter 4.
.. [BA] B. Triggs et. al., "Bundle Adjustment - A Modern Synthesis",
Proceedings of the International Workshop on Vision Algorithms:
Theory and Practice, pp. 298-372, 1999.
Examples
--------
In this example we find a minimum of the Rosenbrock function without bounds
on independed variables.
>>> def fun_rosenbrock(x):
... return np.array([10 * (x[1] - x[0]**2), (1 - x[0])])
Notice that we only provide the vector of the residuals. The algorithm
constructs the cost function as a sum of squares of the residuals, which
gives the Rosenbrock function. The exact minimum is at ``x = [1.0, 1.0]``.
>>> from scipy.optimize import least_squares
>>> x0_rosenbrock = np.array([2, 2])
>>> res_1 = least_squares(fun_rosenbrock, x0_rosenbrock)
>>> res_1.x
array([ 1., 1.])
>>> res_1.cost
9.8669242910846867e-30
>>> res_1.optimality
8.8928864934219529e-14
We now constrain the variables, in such a way that the previous solution
becomes infeasible. Specifically, we require that ``x[1] >= 1.5``, and
``x[0]`` left unconstrained. To this end, we specify the `bounds` parameter
to `least_squares` in the form ``bounds=([-np.inf, 1.5], np.inf)``.
We also provide the analytic Jacobian:
>>> def jac_rosenbrock(x):
... return np.array([
... [-20 * x[0], 10],
... [-1, 0]])
Putting this all together, we see that the new solution lies on the bound:
>>> res_2 = least_squares(fun_rosenbrock, x0_rosenbrock, jac_rosenbrock,
... bounds=([-np.inf, 1.5], np.inf))
>>> res_2.x
array([ 1.22437075, 1.5 ])
>>> res_2.cost
0.025213093946805685
>>> res_2.optimality
1.5885401433157753e-07
Now we solve a system of equations (i.e., the cost function should be zero
at a minimum) for a Broyden tridiagonal vector-valued function of 100000
variables:
>>> def fun_broyden(x):
... f = (3 - x) * x + 1
... f[1:] -= x[:-1]
... f[:-1] -= 2 * x[1:]
... return f
The corresponding Jacobian matrix is sparse. We tell the algorithm to
estimate it by finite differences and provide the sparsity structure of
Jacobian to significantly speed up this process.
>>> from scipy.sparse import lil_matrix
>>> def sparsity_broyden(n):
... sparsity = lil_matrix((n, n), dtype=int)
... i = np.arange(n)
... sparsity[i, i] = 1
... i = np.arange(1, n)
... sparsity[i, i - 1] = 1
... i = np.arange(n - 1)
... sparsity[i, i + 1] = 1
... return sparsity
...
>>> n = 100000
>>> x0_broyden = -np.ones(n)
...
>>> res_3 = least_squares(fun_broyden, x0_broyden,
... jac_sparsity=sparsity_broyden(n))
>>> res_3.cost
4.5687069299604613e-23
>>> res_3.optimality
1.1650454296851518e-11
Let's also solve a curve fitting problem using robust loss function to
take care of outliers in the data. Define the model function as
``y = a + b * exp(c * t)``, where t is a predictor variable, y is an
observation and a, b, c are parameters to estimate.
First, define the function which generates the data with noise and
outliers, define the model parameters, and generate data:
>>> def gen_data(t, a, b, c, noise=0, n_outliers=0, random_state=0):
... y = a + b * np.exp(t * c)
...
... rnd = np.random.RandomState(random_state)
... error = noise * rnd.randn(t.size)
... outliers = rnd.randint(0, t.size, n_outliers)
... error[outliers] *= 10
...
... return y + error
...
>>> a = 0.5
>>> b = 2.0
>>> c = -1
>>> t_min = 0
>>> t_max = 10
>>> n_points = 15
...
>>> t_train = np.linspace(t_min, t_max, n_points)
>>> y_train = gen_data(t_train, a, b, c, noise=0.1, n_outliers=3)
Define function for computing residuals and initial estimate of
parameters.
>>> def fun(x, t, y):
... return x[0] + x[1] * np.exp(x[2] * t) - y
...
>>> x0 = np.array([1.0, 1.0, 0.0])
Compute a standard least-squares solution:
>>> res_lsq = least_squares(fun, x0, args=(t_train, y_train))
Now compute two solutions with two different robust loss functions. The
parameter `f_scale` is set to 0.1, meaning that inlier residuals should
not significantly exceed 0.1 (the noise level used).
>>> res_soft_l1 = least_squares(fun, x0, loss='soft_l1', f_scale=0.1,
... args=(t_train, y_train))
>>> res_log = least_squares(fun, x0, loss='cauchy', f_scale=0.1,
... args=(t_train, y_train))
And finally plot all the curves. We see that by selecting an appropriate
`loss` we can get estimates close to optimal even in the presence of
strong outliers. But keep in mind that generally it is recommended to try
'soft_l1' or 'huber' losses first (if at all necessary) as the other two
options may cause difficulties in optimization process.
>>> t_test = np.linspace(t_min, t_max, n_points * 10)
>>> y_true = gen_data(t_test, a, b, c)
>>> y_lsq = gen_data(t_test, *res_lsq.x)
>>> y_soft_l1 = gen_data(t_test, *res_soft_l1.x)
>>> y_log = gen_data(t_test, *res_log.x)
...
>>> import matplotlib.pyplot as plt
>>> plt.plot(t_train, y_train, 'o')
>>> plt.plot(t_test, y_true, 'k', linewidth=2, label='true')
>>> plt.plot(t_test, y_lsq, label='linear loss')
>>> plt.plot(t_test, y_soft_l1, label='soft_l1 loss')
>>> plt.plot(t_test, y_log, label='cauchy loss')
>>> plt.xlabel("t")
>>> plt.ylabel("y")
>>> plt.legend()
>>> plt.show()
In the next example, we show how complex-valued residual functions of
complex variables can be optimized with ``least_squares()``. Consider the
following function:
>>> def f(z):
... return z - (0.5 + 0.5j)
We wrap it into a function of real variables that returns real residuals
by simply handling the real and imaginary parts as independent variables:
>>> def f_wrap(x):
... fx = f(x[0] + 1j*x[1])
... return np.array([fx.real, fx.imag])
Thus, instead of the original m-dimensional complex function of n complex
variables we optimize a 2m-dimensional real function of 2n real variables:
>>> from scipy.optimize import least_squares
>>> res_wrapped = least_squares(f_wrap, (0.1, 0.1), bounds=([0, 0], [1, 1]))
>>> z = res_wrapped.x[0] + res_wrapped.x[1]*1j
>>> z
(0.49999999999925893+0.49999999999925893j)
"""
if method not in ['trf', 'dogbox', 'lm']:
raise ValueError("`method` must be 'trf', 'dogbox' or 'lm'.")
if jac not in ['2-point', '3-point', 'cs'] and not callable(jac):
raise ValueError("`jac` must be '2-point', '3-point', 'cs' or "
"callable.")
if tr_solver not in [None, 'exact', 'lsmr']:
raise ValueError("`tr_solver` must be None, 'exact' or 'lsmr'.")
if loss not in IMPLEMENTED_LOSSES and not callable(loss):
raise ValueError("`loss` must be one of {0} or a callable."
.format(IMPLEMENTED_LOSSES.keys()))
if method == 'lm' and loss != 'linear':
raise ValueError("method='lm' supports only 'linear' loss function.")
if verbose not in [0, 1, 2]:
raise ValueError("`verbose` must be in [0, 1, 2].")
if len(bounds) != 2:
raise ValueError("`bounds` must contain 2 elements.")
if max_nfev is not None and max_nfev <= 0:
raise ValueError("`max_nfev` must be None or positive integer.")
if np.iscomplexobj(x0):
raise ValueError("`x0` must be real.")
x0 = np.atleast_1d(x0).astype(float)
if x0.ndim > 1:
raise ValueError("`x0` must have at most 1 dimension.")
lb, ub = prepare_bounds(bounds, x0.shape[0])
if method == 'lm' and not np.all((lb == -np.inf) & (ub == np.inf)):
raise ValueError("Method 'lm' doesn't support bounds.")
if lb.shape != x0.shape or ub.shape != x0.shape:
raise ValueError("Inconsistent shapes between bounds and `x0`.")
if np.any(lb >= ub):
raise ValueError("Each lower bound must be strictly less than each "
"upper bound.")
if not in_bounds(x0, lb, ub):
raise ValueError("`x0` is infeasible.")
x_scale = check_x_scale(x_scale, x0)
ftol, xtol, gtol = check_tolerance(ftol, xtol, gtol)
def fun_wrapped(x):
return np.atleast_1d(fun(x, *args, **kwargs))
if method == 'trf':
x0 = make_strictly_feasible(x0, lb, ub)
f0 = fun_wrapped(x0)
if f0.ndim != 1:
raise ValueError("`fun` must return at most 1-d array_like.")
if not np.all(np.isfinite(f0)):
raise ValueError("Residuals are not finite in the initial point.")
n = x0.size
m = f0.size
if method == 'lm' and m < n:
raise ValueError("Method 'lm' doesn't work when the number of "
"residuals is less than the number of variables.")
loss_function = construct_loss_function(m, loss, f_scale)
if callable(loss):
rho = loss_function(f0)
if rho.shape != (3, m):
raise ValueError("The return value of `loss` callable has wrong "
"shape.")
initial_cost = 0.5 * np.sum(rho[0])
elif loss_function is not None:
initial_cost = loss_function(f0, cost_only=True)
else:
initial_cost = 0.5 * np.dot(f0, f0)
if callable(jac):
J0 = jac(x0, *args, **kwargs)
if issparse(J0):
J0 = csr_matrix(J0)
def jac_wrapped(x, _=None):
return csr_matrix(jac(x, *args, **kwargs))
elif isinstance(J0, LinearOperator):
def jac_wrapped(x, _=None):
return jac(x, *args, **kwargs)
else:
J0 = np.atleast_2d(J0)
def jac_wrapped(x, _=None):
return np.atleast_2d(jac(x, *args, **kwargs))
else: # Estimate Jacobian by finite differences.
if method == 'lm':
if jac_sparsity is not None:
raise ValueError("method='lm' does not support "
"`jac_sparsity`.")
if jac != '2-point':
warn("jac='{0}' works equivalently to '2-point' "
"for method='lm'.".format(jac))
J0 = jac_wrapped = None
else:
if jac_sparsity is not None and tr_solver == 'exact':
raise ValueError("tr_solver='exact' is incompatible "
"with `jac_sparsity`.")
jac_sparsity = check_jac_sparsity(jac_sparsity, m, n)
def jac_wrapped(x, f):
J = approx_derivative(fun, x, rel_step=diff_step, method=jac,
f0=f, bounds=bounds, args=args,
kwargs=kwargs, sparsity=jac_sparsity)
if J.ndim != 2: # J is guaranteed not sparse.
J = np.atleast_2d(J)
return J
J0 = jac_wrapped(x0, f0)
if J0 is not None:
if J0.shape != (m, n):
raise ValueError(
"The return value of `jac` has wrong shape: expected {0}, "
"actual {1}.".format((m, n), J0.shape))
if not isinstance(J0, np.ndarray):
if method == 'lm':
raise ValueError("method='lm' works only with dense "
"Jacobian matrices.")
if tr_solver == 'exact':
raise ValueError(
"tr_solver='exact' works only with dense "
"Jacobian matrices.")
jac_scale = isinstance(x_scale, string_types) and x_scale == 'jac'
if isinstance(J0, LinearOperator) and jac_scale:
raise ValueError("x_scale='jac' can't be used when `jac` "
"returns LinearOperator.")
if tr_solver is None:
if isinstance(J0, np.ndarray):
tr_solver = 'exact'
else:
tr_solver = 'lsmr'
if method == 'lm':
result = call_minpack(fun_wrapped, x0, jac_wrapped, ftol, xtol, gtol,
max_nfev, x_scale, diff_step)
elif method == 'trf':
result = trf(fun_wrapped, jac_wrapped, x0, f0, J0, lb, ub, ftol, xtol,
gtol, max_nfev, x_scale, loss_function, tr_solver,
tr_options.copy(), verbose)
elif method == 'dogbox':
if tr_solver == 'lsmr' and 'regularize' in tr_options:
warn("The keyword 'regularize' in `tr_options` is not relevant "
"for 'dogbox' method.")
tr_options = tr_options.copy()
del tr_options['regularize']
result = dogbox(fun_wrapped, jac_wrapped, x0, f0, J0, lb, ub, ftol,
xtol, gtol, max_nfev, x_scale, loss_function,
tr_solver, tr_options, verbose)
result.message = TERMINATION_MESSAGES[result.status]
result.success = result.status > 0
if verbose >= 1:
print(result.message)
print("Function evaluations {0}, initial cost {1:.4e}, final cost "
"{2:.4e}, first-order optimality {3:.2e}."
.format(result.nfev, initial_cost, result.cost,
result.optimality))
return result
| mit |
cainiaocome/scikit-learn | sklearn/metrics/cluster/bicluster.py | 359 | 2797 | from __future__ import division
import numpy as np
from sklearn.utils.linear_assignment_ import linear_assignment
from sklearn.utils.validation import check_consistent_length, check_array
__all__ = ["consensus_score"]
def _check_rows_and_columns(a, b):
"""Unpacks the row and column arrays and checks their shape."""
check_consistent_length(*a)
check_consistent_length(*b)
checks = lambda x: check_array(x, ensure_2d=False)
a_rows, a_cols = map(checks, a)
b_rows, b_cols = map(checks, b)
return a_rows, a_cols, b_rows, b_cols
def _jaccard(a_rows, a_cols, b_rows, b_cols):
"""Jaccard coefficient on the elements of the two biclusters."""
intersection = ((a_rows * b_rows).sum() *
(a_cols * b_cols).sum())
a_size = a_rows.sum() * a_cols.sum()
b_size = b_rows.sum() * b_cols.sum()
return intersection / (a_size + b_size - intersection)
def _pairwise_similarity(a, b, similarity):
"""Computes pairwise similarity matrix.
result[i, j] is the Jaccard coefficient of a's bicluster i and b's
bicluster j.
"""
a_rows, a_cols, b_rows, b_cols = _check_rows_and_columns(a, b)
n_a = a_rows.shape[0]
n_b = b_rows.shape[0]
result = np.array(list(list(similarity(a_rows[i], a_cols[i],
b_rows[j], b_cols[j])
for j in range(n_b))
for i in range(n_a)))
return result
def consensus_score(a, b, similarity="jaccard"):
"""The similarity of two sets of biclusters.
Similarity between individual biclusters is computed. Then the
best matching between sets is found using the Hungarian algorithm.
The final score is the sum of similarities divided by the size of
the larger set.
Read more in the :ref:`User Guide <biclustering>`.
Parameters
----------
a : (rows, columns)
Tuple of row and column indicators for a set of biclusters.
b : (rows, columns)
Another set of biclusters like ``a``.
similarity : string or function, optional, default: "jaccard"
May be the string "jaccard" to use the Jaccard coefficient, or
any function that takes four arguments, each of which is a 1d
indicator vector: (a_rows, a_columns, b_rows, b_columns).
References
----------
* Hochreiter, Bodenhofer, et. al., 2010. `FABIA: factor analysis
for bicluster acquisition
<https://www.ncbi.nlm.nih.gov/pmc/articles/PMC2881408/>`__.
"""
if similarity == "jaccard":
similarity = _jaccard
matrix = _pairwise_similarity(a, b, similarity)
indices = linear_assignment(1. - matrix)
n_a = len(a[0])
n_b = len(b[0])
return matrix[indices[:, 0], indices[:, 1]].sum() / max(n_a, n_b)
| bsd-3-clause |
fenglu-g/incubator-airflow | tests/contrib/operators/test_hive_to_dynamodb_operator.py | 7 | 5053 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import json
import unittest
import datetime
import mock
import pandas as pd
from airflow import configuration, DAG
from airflow.contrib.hooks.aws_dynamodb_hook import AwsDynamoDBHook
import airflow.contrib.operators.hive_to_dynamodb
configuration.load_test_config()
DEFAULT_DATE = datetime.datetime(2015, 1, 1)
DEFAULT_DATE_ISO = DEFAULT_DATE.isoformat()
DEFAULT_DATE_DS = DEFAULT_DATE_ISO[:10]
try:
from moto import mock_dynamodb2
except ImportError:
mock_dynamodb2 = None
class HiveToDynamoDBTransferOperatorTest(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
args = {'owner': 'airflow', 'start_date': DEFAULT_DATE}
dag = DAG('test_dag_id', default_args=args)
self.dag = dag
self.sql = 'SELECT 1'
self.hook = AwsDynamoDBHook(
aws_conn_id='aws_default', region_name='us-east-1')
@staticmethod
def process_data(data, *args, **kwargs):
return json.loads(data.to_json(orient='records'))
@unittest.skipIf(mock_dynamodb2 is None, 'mock_dynamodb2 package not present')
@mock_dynamodb2
def test_get_conn_returns_a_boto3_connection(self):
hook = AwsDynamoDBHook(aws_conn_id='aws_default')
self.assertIsNotNone(hook.get_conn())
@mock.patch('airflow.hooks.hive_hooks.HiveServer2Hook.get_pandas_df',
return_value=pd.DataFrame(data=[('1', 'sid')], columns=['id', 'name']))
@unittest.skipIf(mock_dynamodb2 is None, 'mock_dynamodb2 package not present')
@mock_dynamodb2
def test_get_records_with_schema(self, get_results_mock):
# this table needs to be created in production
self.hook.get_conn().create_table(
TableName='test_airflow',
KeySchema=[
{
'AttributeName': 'id',
'KeyType': 'HASH'
},
],
AttributeDefinitions=[
{
'AttributeName': 'name',
'AttributeType': 'S'
}
],
ProvisionedThroughput={
'ReadCapacityUnits': 10,
'WriteCapacityUnits': 10
}
)
operator = airflow.contrib.operators.hive_to_dynamodb.HiveToDynamoDBTransferOperator(
sql=self.sql,
table_name="test_airflow",
task_id='hive_to_dynamodb_check',
table_keys=['id'],
dag=self.dag)
operator.execute(None)
table = self.hook.get_conn().Table('test_airflow')
table.meta.client.get_waiter(
'table_exists').wait(TableName='test_airflow')
self.assertEqual(table.item_count, 1)
@mock.patch('airflow.hooks.hive_hooks.HiveServer2Hook.get_pandas_df',
return_value=pd.DataFrame(data=[('1', 'sid'), ('1', 'gupta')], columns=['id', 'name']))
@unittest.skipIf(mock_dynamodb2 is None, 'mock_dynamodb2 package not present')
@mock_dynamodb2
def test_pre_process_records_with_schema(self, get_results_mock):
# this table needs to be created in production
self.hook.get_conn().create_table(
TableName='test_airflow',
KeySchema=[
{
'AttributeName': 'id',
'KeyType': 'HASH'
},
],
AttributeDefinitions=[
{
'AttributeName': 'name',
'AttributeType': 'S'
}
],
ProvisionedThroughput={
'ReadCapacityUnits': 10,
'WriteCapacityUnits': 10
}
)
operator = airflow.contrib.operators.hive_to_dynamodb.HiveToDynamoDBTransferOperator(
sql=self.sql,
table_name='test_airflow',
task_id='hive_to_dynamodb_check',
table_keys=['id'],
pre_process=self.process_data,
dag=self.dag)
operator.execute(None)
table = self.hook.get_conn().Table('test_airflow')
table.meta.client.get_waiter('table_exists').wait(TableName='test_airflow')
self.assertEqual(table.item_count, 1)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
molpopgen/fwdpy11 | examples/tskit/precapitate.py | 1 | 6357 | #
# Copyright (C) 2020 Kevin Thornton <[email protected]>
#
# This file is part of fwdpy11.
#
# fwdpy11 is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# fwdpy11 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with fwdpy11. If not, see <http://www.gnu.org/licenses/>.
#
import argparse
import concurrent.futures
import sys
from collections import namedtuple
import msprime
import numpy as np
import pandas as pd
import fwdpy11
SimOutput = namedtuple("SimOutput", ["S2N", "Pi2N", "Sn", "Pin"])
def make_parser():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("-N", type=int, help="Diploid population size")
parser.add_argument("--rho", type=float, help="4Nr")
parser.add_argument("--theta", type=float, help="4Nu")
parser.add_argument(
"--simlen", type=int, help="Generations to run the forward simulation"
)
parser.add_argument("--seed", type=int, help="Random number seed")
parser.add_argument("--nreps", type=int, help="Number of simulation replicates")
parser.add_argument("--nsam", type=int, help="Sample size to analyze")
optional = parser.add_argument_group("Optional arguments")
optional.add_argument("--model", type=str, help="msprime model", default="dtwf")
return parser
def run_msprime(Ne, rho, theta, model, seed):
ts = msprime.simulate(
2 * Ne,
Ne=Ne,
model=model,
random_seed=seed,
recombination_rate=rho / Ne / 4,
mutation_rate=theta / Ne / 4,
)
return ts
def run_msprime_get_stats(Ne, nsam, rho, theta, model, seed):
ts = run_msprime(Ne, rho, theta, model, seed)
samples = [i for i in ts.samples()]
S1 = len(ts.tables.sites)
pi1 = ts.diversity([samples])[0]
rsample = np.random.choice(samples, nsam, replace=False)
S2 = ts.segregating_sites([rsample])[0]
pi2 = ts.diversity([rsample])[0]
return SimOutput(S1, pi1, S2, pi2)
def pi_from_fs(fs):
nsam = len(fs) - 2
i = np.arange(nsam) + 1
pi = fs[1:-1] * 2 * (i / nsam) * ((nsam - i) / (nsam - 1))
return pi.sum()
def process_sim(pop, sample, check_fixations):
"""
Get number of segregating sites and
heterozyosity from the frequency spectrum
"""
fs = pop.tables.fs([sample])
if check_fixations:
assert fs.data[-1] == 0, "hmmm...shouldn't be any fixations!"
S = fs.sum()
pi = pi_from_fs(fs)
return S, pi
def run_sim(N, rho, theta, simlen, model, nsam, fwdpy11_seed, msprime_seed):
np.random.seed(msprime_seed)
ts = run_msprime(N, rho, 0.0, model, msprime_seed)
pop = fwdpy11.DiploidPopulation.create_from_tskit(ts)
del ts # No longer needed
pdict = {
"nregions": [],
"sregions": [],
"recregions": [
fwdpy11.PoissonInterval(0, pop.tables.genome_length, rho / pop.N / 4)
],
"rates": (0, 0, None),
"gvalue": fwdpy11.Multiplicative(1),
"demography": fwdpy11.DiscreteDemography(),
"simlen": simlen,
}
params = fwdpy11.ModelParams(**pdict)
rng = fwdpy11.GSLrng(fwdpy11_seed)
fwdpy11.evolvets(rng, pop, params, 100)
fwdpy11.infinite_sites(rng, pop, theta / pop.N / 4)
an = pop.alive_nodes
S2N, pi2N = process_sim(pop, an, check_fixations=True)
rn = np.random.choice(an, size=nsam, replace=False)
Sn, pin = process_sim(pop, rn, check_fixations=False)
return SimOutput(S2N, pi2N, Sn, pin)
if __name__ == "__main__":
parser = make_parser()
args = parser.parse_args(sys.argv[1:])
np.random.seed(args.seed)
msprime_seeds = []
fwdpy11_seeds = []
for i in range(args.nreps):
candidate = np.random.randint(0, np.iinfo(np.uint32).max)
while candidate in msprime_seeds:
candidate = np.random.randint(0, np.iinfo(np.uint32).max)
msprime_seeds.append(candidate)
candidate = np.random.randint(0, np.iinfo(np.uint32).max)
while candidate in fwdpy11_seeds:
candidate = np.random.randint(0, np.iinfo(np.uint32).max)
fwdpy11_seeds.append(candidate)
results = []
with concurrent.futures.ProcessPoolExecutor() as executor:
futures = {
executor.submit(
run_sim,
args.N,
args.rho,
args.theta,
args.simlen,
args.model,
args.nsam,
i,
j,
)
for i, j in zip(msprime_seeds, fwdpy11_seeds)
}
for future in concurrent.futures.as_completed(futures):
results.append(future.result())
results = pd.DataFrame(results, columns=SimOutput._fields)
results["watterson_n"] = results.Sn / (1.0 / (np.arange(args.nsam - 1) + 1)).sum()
print(f"Means from fwdpy11:\n{results.mean()}")
msprime_seeds = []
for i in range(args.nreps):
candidate = np.random.randint(0, np.iinfo(np.uint32).max)
while candidate in msprime_seeds:
candidate = np.random.randint(0, np.iinfo(np.uint32).max)
msprime_seeds.append(candidate)
msprime_results = []
with concurrent.futures.ProcessPoolExecutor() as executor:
futures = {
executor.submit(
run_msprime_get_stats,
args.N,
args.nsam,
args.rho,
args.theta,
args.model,
i,
)
for i in msprime_seeds
}
for future in concurrent.futures.as_completed(futures):
msprime_results.append(future.result())
msprime_results = pd.DataFrame(msprime_results, columns=SimOutput._fields)
msprime_results["watterson_n"] = (
msprime_results.Sn / (1.0 / (np.arange(args.nsam - 1) + 1)).sum()
)
print(f"Means from msprime:\n{msprime_results.mean()}")
| gpl-3.0 |
moinulkuet/machine-learning | Part 3 - Classification/Section 16 - Support Vector Machine (SVM)/classification_template.py | 37 | 2538 | # Classification template
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('Social_Network_Ads.csv')
X = dataset.iloc[:, [2, 3]].values
y = dataset.iloc[:, 4].values
# Splitting the dataset into the Training set and Test set
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0)
# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
# Fitting classifier to the Training set
# Create your classifier here
# Predicting the Test set results
y_pred = classifier.predict(X_test)
# Making the Confusion Matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
# Visualising the Training set results
from matplotlib.colors import ListedColormap
X_set, y_set = X_train, y_train
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green'))(i), label = j)
plt.title('Classifier (Training set)')
plt.xlabel('Age')
plt.ylabel('Estimated Salary')
plt.legend()
plt.show()
# Visualising the Test set results
from matplotlib.colors import ListedColormap
X_set, y_set = X_test, y_test
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green'))(i), label = j)
plt.title('Classifier (Test set)')
plt.xlabel('Age')
plt.ylabel('Estimated Salary')
plt.legend()
plt.show() | gpl-3.0 |
uglyboxer/linear_neuron | net-p3/lib/python3.5/site-packages/matplotlib/tests/test_legend.py | 9 | 9232 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import xrange
try:
# mock in python 3.3+
from unittest import mock
except ImportError:
import mock
from nose.tools import assert_equal
import numpy as np
from matplotlib.testing.decorators import image_comparison, cleanup
from matplotlib.cbook import MatplotlibDeprecationWarning
import matplotlib.pyplot as plt
import matplotlib as mpl
import matplotlib.patches as mpatches
@image_comparison(baseline_images=['legend_auto1'], remove_text=True)
def test_legend_auto1():
'Test automatic legend placement'
fig = plt.figure()
ax = fig.add_subplot(111)
x = np.arange(100)
ax.plot(x, 50 - x, 'o', label='y=1')
ax.plot(x, x - 50, 'o', label='y=-1')
ax.legend(loc=0)
@image_comparison(baseline_images=['legend_auto2'], remove_text=True)
def test_legend_auto2():
'Test automatic legend placement'
fig = plt.figure()
ax = fig.add_subplot(111)
x = np.arange(100)
b1 = ax.bar(x, x, color='m')
b2 = ax.bar(x, x[::-1], color='g')
ax.legend([b1[0], b2[0]], ['up', 'down'], loc=0)
@image_comparison(baseline_images=['legend_auto3'])
def test_legend_auto3():
'Test automatic legend placement'
fig = plt.figure()
ax = fig.add_subplot(111)
x = [0.9, 0.1, 0.1, 0.9, 0.9, 0.5]
y = [0.95, 0.95, 0.05, 0.05, 0.5, 0.5]
ax.plot(x, y, 'o-', label='line')
ax.set_xlim(0.0, 1.0)
ax.set_ylim(0.0, 1.0)
ax.legend(loc=0)
@image_comparison(baseline_images=['legend_various_labels'], remove_text=True)
def test_various_labels():
# tests all sorts of label types
fig = plt.figure()
ax = fig.add_subplot(121)
ax.plot(list(xrange(4)), 'o', label=1)
ax.plot(np.linspace(4, 4.1), 'o', label='D\xe9velopp\xe9s')
ax.plot(list(xrange(4, 1, -1)), 'o', label='__nolegend__')
ax.legend(numpoints=1, loc=0)
@image_comparison(baseline_images=['rgba_alpha'],
extensions=['png'], remove_text=True)
def test_alpha_rgba():
import matplotlib.pyplot as plt
fig, ax = plt.subplots(1, 1)
ax.plot(range(10), lw=5)
leg = plt.legend(['Longlabel that will go away'], loc=10)
leg.legendPatch.set_facecolor([1, 0, 0, 0.5])
@image_comparison(baseline_images=['rcparam_alpha'],
extensions=['png'], remove_text=True)
def test_alpha_rcparam():
import matplotlib.pyplot as plt
fig, ax = plt.subplots(1, 1)
ax.plot(range(10), lw=5)
with mpl.rc_context(rc={'legend.framealpha': .75}):
leg = plt.legend(['Longlabel that will go away'], loc=10)
# this alpha is going to be over-ridden by the rcparam whith
# sets the alpha of the patch to be non-None which causes the alpha
# value of the face color to be discarded. This behavior may not be
# ideal, but it is what it is and we should keep track of it changing
leg.legendPatch.set_facecolor([1, 0, 0, 0.5])
@image_comparison(baseline_images=['fancy'], remove_text=True)
def test_fancy():
# using subplot triggers some offsetbox functionality untested elsewhere
plt.subplot(121)
plt.scatter(list(xrange(10)), list(xrange(10, 0, -1)), label='XX\nXX')
plt.plot([5] * 10, 'o--', label='XX')
plt.errorbar(list(xrange(10)), list(xrange(10)), xerr=0.5, yerr=0.5, label='XX')
plt.legend(loc="center left", bbox_to_anchor=[1.0, 0.5],
ncol=2, shadow=True, title="My legend", numpoints=1)
@image_comparison(baseline_images=['framealpha'], remove_text=True)
def test_framealpha():
x = np.linspace(1, 100, 100)
y = x
plt.plot(x, y, label='mylabel', lw=10)
plt.legend(framealpha=0.5)
@image_comparison(baseline_images=['scatter_rc3', 'scatter_rc1'], remove_text=True)
def test_rc():
# using subplot triggers some offsetbox functionality untested elsewhere
fig = plt.figure()
ax = plt.subplot(121)
ax.scatter(list(xrange(10)), list(xrange(10, 0, -1)), label='three')
ax.legend(loc="center left", bbox_to_anchor=[1.0, 0.5],
title="My legend")
mpl.rcParams['legend.scatterpoints'] = 1
fig = plt.figure()
ax = plt.subplot(121)
ax.scatter(list(xrange(10)), list(xrange(10, 0, -1)), label='one')
ax.legend(loc="center left", bbox_to_anchor=[1.0, 0.5],
title="My legend")
@image_comparison(baseline_images=['legend_expand'], remove_text=True)
def test_legend_expand():
'Test expand mode'
legend_modes = [None, "expand"]
fig, axes_list = plt.subplots(len(legend_modes), 1)
x = np.arange(100)
for ax, mode in zip(axes_list, legend_modes):
ax.plot(x, 50 - x, 'o', label='y=1')
l1 = ax.legend(loc=2, mode=mode)
ax.add_artist(l1)
ax.plot(x, x - 50, 'o', label='y=-1')
l2 = ax.legend(loc=5, mode=mode)
ax.add_artist(l2)
ax.legend(loc=3, mode=mode, ncol=2)
@cleanup
def test_legend_remove():
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
lines = ax.plot(range(10))
leg = fig.legend(lines, "test")
leg.remove()
assert_equal(fig.legends, [])
leg = ax.legend("test")
leg.remove()
assert ax.get_legend() is None
class TestLegendFunction(object):
# Tests the legend function on the Axes and pyplot.
deprecation_message = ('The "loc" positional argument '
'to legend is deprecated. Please use '
'the "loc" keyword instead.')
@cleanup
def test_legend_label_loc_args(self):
# Check the deprecated warning is created and that the appropriate
# call to Legend is made. This wouldn't actually create a valid
# legend as there is no artist to legendify, but that doesn't matter.
with mock.patch('matplotlib.cbook.warn_deprecated') as deprecation:
with mock.patch('matplotlib.legend.Legend') as Legend:
plt.legend(['hello world'], 1)
deprecation.assert_called_with('1.4', self.deprecation_message)
Legend.assert_called_with(plt.gca(), [], ['hello world'], loc=1)
@cleanup
def test_old_legend_handler_interface(self):
# Check the deprecated warning is created and that the appropriate
# call to the legend handler is made.
class AnyObject(object):
pass
class AnyObjectHandler(object):
def __call__(self, legend, orig_handle, fontsize, handlebox):
x0, y0 = handlebox.xdescent, handlebox.ydescent
width, height = handlebox.width, handlebox.height
patch = mpatches.Rectangle([x0, y0], width, height, facecolor='red',
edgecolor='black', hatch='xx', lw=3,
transform=handlebox.get_transform())
handlebox.add_artist(patch)
return patch
with mock.patch('warnings.warn') as warn:
plt.legend([None], ['My first handler'],
handler_map={None: AnyObjectHandler()})
warn.assert_called_with('Legend handers must now implement a '
'"legend_artist" method rather than '
'being a callable.',
MatplotlibDeprecationWarning,
stacklevel=1)
@cleanup
def test_legend_handle_label_loc_args(self):
# Check the deprecated warning is created and that the appropriate
# call to Legend is made.
lines = plt.plot(range(10))
with mock.patch('matplotlib.cbook.warn_deprecated') as deprecation:
with mock.patch('matplotlib.legend.Legend') as Legend:
plt.legend(lines, ['hello world'], 1)
deprecation.assert_called_with('1.4', self.deprecation_message)
Legend.assert_called_with(plt.gca(), lines, ['hello world'], loc=1)
@cleanup
def test_legend_handle_label(self):
lines = plt.plot(range(10))
with mock.patch('matplotlib.legend.Legend') as Legend:
plt.legend(lines, ['hello world'])
Legend.assert_called_with(plt.gca(), lines, ['hello world'])
@cleanup
def test_legend_no_args(self):
lines = plt.plot(range(10), label='hello world')
with mock.patch('matplotlib.legend.Legend') as Legend:
plt.legend()
Legend.assert_called_with(plt.gca(), lines, ['hello world'])
@cleanup
def test_legend_label_args(self):
lines = plt.plot(range(10), label='hello world')
with mock.patch('matplotlib.legend.Legend') as Legend:
plt.legend(['foobar'])
Legend.assert_called_with(plt.gca(), lines, ['foobar'])
@cleanup
def test_legend_handler_map(self):
lines = plt.plot(range(10), label='hello world')
with mock.patch('matplotlib.axes.Axes.'
'get_legend_handles_labels') as handles_labels:
handles_labels.return_value = lines, ['hello world']
plt.legend(handler_map={'1': 2})
handles_labels.assert_called_with({'1': 2})
if __name__ == '__main__':
import nose
nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
| mit |
brorfred/palettable | palettable/cubehelix/cubehelix.py | 4 | 13383 | # coding: utf-8
"""
Cubehelix color maps and palettes.
The Cubehelix algorithm makes color scales with monotonic changes in perceived
brightness. This means that a cubehelix color map gracefully degrades into
a monotonic grayscale color map when rendered without color.
Cubehelix maps are generated algorithmically, giving the user flexibility
in desiging a color map that is also safe for grayscale printers. This
module provides several cubehelix realizations, while also exposing the
algorithm directly through the :class:`Cubehelix` class.
Cubehelix was developed by `D.A Green, 2011, BASI, 39, 289
<http://adsabs.harvard.edu/abs/2011arXiv1108.5083G>`_. The original Python
port was done by James R. A. Davenport (see License).
Original License
----------------
Copyright (c) 2014, James R. A. Davenport and contributors
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from __future__ import absolute_import, print_function
try:
import numpy as np
except ImportError: # pragma: no cover
HAVE_NPY = False
else:
HAVE_NPY = True
from ..palette import Palette
url = 'http://adsabs.harvard.edu/abs/2011arXiv1108.5083G'
palette_type = 'sequential'
palette_names = [
'classic_16',
'perceptual_rainbow_16',
'purple_16',
'jim_special_16',
'red_16',
'cubehelix1_16',
'cubehelix2_16',
'cubehelix3_16'
]
palette_rgb = dict((
('classic_16',
# dict(start=0.5, rotation=-1.5, gamma=1.0, sat=1.2,
# min_light=0., max_light=1., n=16)
[[0, 0, 0],
[22, 10, 34],
[24, 32, 68],
[16, 62, 83],
[14, 94, 74],
[35, 116, 51],
[80, 125, 35],
[138, 122, 45],
[190, 117, 85],
[218, 121, 145],
[219, 138, 203],
[204, 167, 240],
[191, 201, 251],
[195, 229, 244],
[220, 246, 239],
[255, 255, 255]]),
('perceptual_rainbow_16',
# Similar to Matteo Niccoli's Perceptual Rainbow:
# http://mycarta.wordpress.com/2013/02/21/perceptual-rainbow-palette-the-method/
# https://github.com/jradavenport/cubehelix
# dict(start_hue=240., end_hue=-300., min_sat=1., max_sat=2.5,
# min_light=0.3, max_light=0.8, gamma=.9, n=16)
[[135, 59, 97],
[143, 64, 127],
[143, 72, 157],
[135, 85, 185],
[121, 102, 207],
[103, 123, 220],
[84, 146, 223],
[69, 170, 215],
[59, 192, 197],
[60, 210, 172],
[71, 223, 145],
[93, 229, 120],
[124, 231, 103],
[161, 227, 95],
[198, 220, 100],
[233, 213, 117]]),
('purple_16',
# dict(start=0., rotation=0.0, n=16)
[[0, 0, 0],
[15, 14, 35],
[31, 28, 68],
[47, 43, 99],
[63, 59, 127],
[79, 75, 152],
[96, 91, 174],
[113, 107, 194],
[130, 124, 211],
[147, 142, 225],
[164, 160, 237],
[182, 178, 246],
[200, 196, 252],
[218, 215, 255],
[236, 235, 255],
[255, 255, 255]]),
('jim_special_16',
# http://www.ifweassume.com/2014/04/cubehelix-colormap-for-python.html
# dict(start=0.3, rotation=-0.5, n=16)
[[0, 0, 0],
[22, 10, 34],
[37, 25, 68],
[47, 43, 99],
[52, 65, 125],
[55, 88, 146],
[59, 112, 160],
[64, 137, 169],
[74, 160, 173],
[89, 181, 175],
[109, 199, 177],
[134, 214, 180],
[163, 227, 189],
[195, 237, 203],
[226, 246, 225],
[255, 255, 255]]),
('red_16',
# http://www.ifweassume.com/2014/04/cubehelix-colormap-for-python.html
# dict(start=0., rotation=0.5, n=16)
[[0, 0, 0],
[19, 12, 35],
[44, 22, 65],
[73, 32, 90],
[104, 41, 107],
[134, 53, 118],
[162, 67, 124],
[185, 83, 126],
[204, 102, 128],
[216, 124, 130],
[225, 148, 136],
[229, 172, 147],
[232, 196, 164],
[236, 219, 189],
[242, 238, 219],
[255, 255, 255]]),
('cubehelix1_16',
# http://nbviewer.ipython.org/gist/anonymous/a4fa0adb08f9e9ea4f94
# dict(gamma=1.0, start=1.5, rotation=-1.0, sat=1.5, n=16)
[[0, 0, 0],
[27, 15, 0],
[65, 23, 4],
[104, 27, 32],
[133, 33, 75],
[147, 45, 126],
[144, 66, 175],
[129, 96, 210],
[111, 131, 227],
[99, 166, 226],
[101, 197, 211],
[120, 219, 194],
[153, 233, 185],
[193, 240, 191],
[230, 245, 216],
[255, 255, 255]]),
('cubehelix2_16',
# http://nbviewer.ipython.org/gist/anonymous/a4fa0adb08f9e9ea4f94
# dict(gamma=1.0, start=2.0, rotation=1.0, sat=1.5, n=16)
[[0, 0, 0],
[0, 28, 14],
[0, 51, 47],
[7, 65, 91],
[35, 71, 135],
[78, 72, 168],
[129, 72, 184],
[177, 77, 181],
[214, 90, 165],
[235, 113, 143],
[238, 142, 128],
[230, 175, 127],
[219, 206, 144],
[216, 231, 178],
[226, 247, 219],
[255, 255, 255]]),
('cubehelix3_16',
# http://nbviewer.ipython.org/gist/anonymous/a4fa0adb08f9e9ea4f94
# dict(gamma=1.0, start=2.0, rotation=1.0, sat=3, n=16)
[[0, 0, 0],
[0, 39, 12],
[0, 68, 60],
[0, 80, 131],
[3, 75, 202],
[72, 60, 252],
[156, 43, 255],
[235, 36, 244],
[255, 45, 194],
[255, 73, 134],
[255, 115, 86],
[255, 164, 67],
[235, 209, 85],
[211, 241, 135],
[215, 255, 200],
[255, 255, 255]]),
))
class Cubehelix(Palette):
"""
Representation of a Cubehelix color map with matplotlib compatible
views of the map.
Parameters
----------
name : str
colors : list
Colors as list of 0-255 RGB triplets.
Attributes
----------
name : str
type : str
number : int
Number of colors in color map.
colors : list
Colors as list of 0-255 RGB triplets.
hex_colors : list
mpl_colors : list
mpl_colormap : matplotlib LinearSegmentedColormap
"""
url = url
def __init__(self, name, colors):
super(Cubehelix, self).__init__(name, palette_type, colors)
@classmethod
def make(cls, start=0.5, rotation=-1.5, gamma=1.0,
start_hue=None, end_hue=None,
sat=None, min_sat=1.2, max_sat=1.2,
min_light=0., max_light=1.,
n=256., reverse=False, name='custom_cubehelix'):
"""
Create an arbitrary Cubehelix color palette from the algorithm.
See http://adsabs.harvard.edu/abs/2011arXiv1108.5083G for a technical
explanation of the algorithm.
Parameters
----------
start : scalar, optional
Sets the starting position in the RGB color space. 0=blue, 1=red,
2=green. Default is ``0.5`` (purple).
rotation : scalar, optional
The number of rotations through the rainbow. Can be positive
or negative, indicating direction of rainbow. Negative values
correspond to Blue->Red direction. Default is ``-1.5``.
start_hue : scalar, optional
Sets the starting color, ranging from [-360, 360]. Combined with
`end_hue`, this parameter overrides ``start`` and ``rotation``.
This parameter is based on the D3 implementation by @mbostock.
Default is ``None``.
end_hue : scalar, optional
Sets the ending color, ranging from [-360, 360]. Combined with
`start_hue`, this parameter overrides ``start`` and ``rotation``.
This parameter is based on the D3 implementation by @mbostock.
Default is ``None``.
gamma : scalar, optional
The gamma correction for intensity. Values of ``gamma < 1``
emphasize low intensities while ``gamma > 1`` emphasises high
intensities. Default is ``1.0``.
sat : scalar, optional
The uniform saturation intensity factor. ``sat=0`` produces
grayscale, while ``sat=1`` retains the full saturation. Setting
``sat>1`` oversaturates the color map, at the risk of clipping
the color scale. Note that ``sat`` overrides both ``min_stat``
and ``max_sat`` if set.
min_sat : scalar, optional
Saturation at the minimum level. Default is ``1.2``.
max_sat : scalar, optional
Satuation at the maximum level. Default is ``1.2``.
min_light : scalar, optional
Minimum lightness value. Default is ``0``.
max_light : scalar, optional
Maximum lightness value. Default is ``1``.
n : scalar, optional
Number of discrete rendered colors. Default is ``256``.
reverse : bool, optional
Set to ``True`` to reverse the color map. Will go from black to
white. Good for density plots where shade -> density.
Default is ``False``.
name : str, optional
Name of the color map (defaults to ``'custom_cubehelix'``).
Returns
-------
palette : `Cubehelix`
A Cubehelix color palette.
"""
if not HAVE_NPY: # pragma: no cover
raise RuntimeError('numpy not available.')
# start_hue/end_hue were popularized by D3's implementation
# and will override start/rotation if set
if start_hue is not None and end_hue is not None:
start = (start_hue / 360. - 1.) * 3.
rotation = end_hue / 360. - start / 3. - 1.
# lambd is effectively the color map grid
lambd = np.linspace(min_light, max_light, n)
# apply the gamma correction
lambd_gamma = lambd ** gamma
# Rotation angle
# NOTE the equation for phi in Green 2011 does not have an extra `+1`
# but the Fortran code does, as does the original cubehelix.py
# I'm leaving out the +1 to keep to the original equation, but
# worth investigating. In practice I see no difference!
phi = 2.0 * np.pi * (start / 3.0 + rotation * lambd)
if sat is None:
sat = np.linspace(min_sat, max_sat, n)
# Amplitude of helix from grayscale map
amp = sat * lambd_gamma * (1. - lambd_gamma) / 2.
# Compute the RGB vectors according to Green 2011 Eq 2
rot_matrix = np.array([[-0.14861, +1.78277],
[-0.29227, -0.90649],
[+1.97294, 0.0]])
sin_cos = np.array([np.cos(phi), np.sin(phi)])
rgb = (lambd_gamma + amp * np.dot(rot_matrix, sin_cos)).T * 255.
# Clipping is necessary in some cases when sat > 1
np.clip(rgb, 0., 255., out=rgb)
if reverse:
rgb = rgb[::-1, :]
colors = rgb.astype(int).tolist()
return cls(name, colors)
def print_maps():
"""
Print a list of pre-made Cubehelix palettes.
"""
namelen = max(len(name) for name in palette_names)
fmt = '{0:' + str(namelen + 4) + '}{1:16}'
for name in palette_names:
print(fmt.format(name, palette_type))
def get_map(name, reverse=False):
"""
Get a pre-made Cubehelix palette by name.
Parameters
----------
name : str
Name of map. Use `print_maps()` to see available names.
reverse : bool, optional
If True reverse colors from their default order.
Returns
-------
palette : Cubehelix
"""
try:
# Make everthing lower case for matching
index = [s.lower() for s in palette_names].index(name.lower())
except ValueError:
msg = "{0!r} is an unknown Cubehelix palette."
raise KeyError(msg.format(name))
real_name = palette_names[index]
colors = palette_rgb[real_name]
if reverse:
real_name = real_name + '_r'
colors = list(reversed(colors))
return Cubehelix(real_name, colors)
def _get_all_maps():
"""
Returns a dictionary of all Cubehelix palettes, including reversed ones.
These default palettes are rendered with 16 colours.
"""
d = {}
for name in palette_names:
d[name] = get_map(name)
d[name + '_r'] = get_map(name, reverse=True)
return d
| mit |
imperial-genomics-facility/data-management-python | igf_data/igfdb/analysisadaptor.py | 1 | 2825 | import json
import pandas as pd
from sqlalchemy.sql import column
from igf_data.igfdb.baseadaptor import BaseAdaptor
from igf_data.igfdb.igfTables import Project, Analysis
class AnalysisAdaptor(BaseAdaptor):
'''
An adaptor class for Analysis table
'''
def store_analysis_data(self,data,autosave=True):
'''
A methodd for storing analysis data
:param data: A dictionary or a dataframe. It sould have following columns
* project_igf_id / project_id
* analysis_type: (RNA_DIFFERENTIAL_EXPRESSION/RNA_TIME_SERIES/CHIP_PEAK_CALL/SOMATIC_VARIANT_CALLING)
* analysis_description
:param autosave: A toggle for autocommit, default True
:returns: None
'''
try:
if not isinstance(data, pd.DataFrame):
data=pd.DataFrame(data)
if 'project_igf_id' in data.columns:
project_map_function = \
lambda x: \
self.map_foreign_table_and_store_attribute(
data=x,
lookup_table=Project,
lookup_column_name='project_igf_id',
target_column_name='project_id') # prepare the function for project id
data['project_id'] = ''
data = \
data.apply(
project_map_function,
axis=1,
result_type=None) # map project id foreign key id
data.drop(
'project_igf_id',
axis=1,
inplace=True)
#data=new_data
self.store_records(
table=Analysis,
data=data)
if autosave:
self.commit_session()
except Exception as e:
if autosave:
self.rollback_session()
raise ValueError(
'Failed to store analysis data, error: {0}'.format(e))
def fetch_analysis_records_project_igf_id(
self,project_igf_id='',output_mode='dataframe'):
'''
A method for fetching analysis records based on project_igf_id
:param project_igf_id: default: null
:param output_mode: dataframe / object, default: dataframe
:returns: Analysis record
'''
try:
session = self.session
query = \
session.\
query(Project.project_igf_id,
Analysis.analysis_type,
Analysis.analysis_description).\
join(Analysis,
Project.project_id==Analysis.project_id)
if project_igf_id:
query = \
query.\
filter(Project.project_igf_id==project_igf_id)
results = \
self.fetch_records(
query=query,
output_mode=output_mode)
return results
except Exception as e:
raise ValueError(
'Failed to fetch analysis record, error: {0}'.format(e))
| apache-2.0 |
YihaoLu/statsmodels | statsmodels/emplike/descriptive.py | 19 | 38795 | """
Empirical likelihood inference on descriptive statistics
This module conducts hypothesis tests and constructs confidence
intervals for the mean, variance, skewness, kurtosis and correlation.
If matplotlib is installed, this module can also generate multivariate
confidence region plots as well as mean-variance contour plots.
See _OptFuncts docstring for technical details and optimization variable
definitions.
General References:
------------------
Owen, A. (2001). "Empirical Likelihood." Chapman and Hall
"""
from __future__ import division
import numpy as np
from scipy import optimize
from scipy.stats import chi2, skew, kurtosis
from statsmodels.base.optimizer import _fit_newton
import itertools
from statsmodels.graphics import utils
def DescStat(endog):
"""
Returns an instance to conduct inference on descriptive statistics
via empirical likelihood. See DescStatUV and DescStatMV for more
information.
Parameters
----------
endog : ndarray
Array of data
Returns : DescStat instance
If k=1, the function returns a univariate instance, DescStatUV.
If k>1, the function returns a multivariate instance, DescStatMV.
"""
if endog.ndim == 1:
endog = endog.reshape(len(endog), 1)
if endog.shape[1] == 1:
return DescStatUV(endog)
if endog.shape[1] > 1:
return DescStatMV(endog)
class _OptFuncts(object):
"""
A class that holds functions that are optimized/solved.
The general setup of the class is simple. Any method that starts with
_opt_ creates a vector of estimating equations named est_vect such that
np.dot(p, (est_vect))=0 where p is the weight on each
observation as a 1 x n array and est_vect is n x k. Then _modif_Newton is
called to determine the optimal p by solving for the Lagrange multiplier
(eta) in the profile likelihood maximization problem. In the presence
of nuisance parameters, _opt_ functions are optimized over to profile
out the nuisance parameters.
Any method starting with _ci_limits calculates the log likelihood
ratio for a specific value of a parameter and then subtracts a
pre-specified critical value. This is solved so that llr - crit = 0.
"""
def __init__(self, endog):
pass
def _log_star(self, eta, est_vect, weights, nobs):
"""
Transforms the log of observation probabilities in terms of the
Lagrange multiplier to the log 'star' of the probabilities.
Parameters
----------
eta : float
Lagrange multiplier
est_vect : ndarray (n,k)
Estimating equations vector
wts : nx1 array
Observation weights
Returns
------
data_star : array
The weighted logstar of the estimting equations
Notes
-----
This function is only a placeholder for the _fit_Newton.
The function value is not used in optimization and the optimal value
is disregarded when computing the log likelihood ratio.
"""
data_star = np.log(weights) + (np.sum(weights) +\
np.dot(est_vect, eta))
idx = data_star < 1. / nobs
not_idx = ~idx
nx = nobs * data_star[idx]
data_star[idx] = np.log(1. / nobs) - 1.5 + nx * (2. - nx / 2)
data_star[not_idx] = np.log(data_star[not_idx])
return data_star
def _hess(self, eta, est_vect, weights, nobs):
"""
Calculates the hessian of a weighted empirical likelihood
problem.
Parameters
----------
eta : ndarray, (1,m)
Lagrange multiplier in the profile likelihood maximization
est_vect : ndarray (n,k)
Estimating equations vector
weights : 1darray
Observation weights
Returns
-------
hess : m x m array
Weighted hessian used in _wtd_modif_newton
"""
#eta = np.squeeze(eta)
data_star_doub_prime = np.sum(weights) + np.dot(est_vect, eta)
idx = data_star_doub_prime < 1. / nobs
not_idx = ~idx
data_star_doub_prime[idx] = - nobs ** 2
data_star_doub_prime[not_idx] = - (data_star_doub_prime[not_idx]) ** -2
wtd_dsdp = weights * data_star_doub_prime
return np.dot(est_vect.T, wtd_dsdp[:, None] * est_vect)
def _grad(self, eta, est_vect, weights, nobs):
"""
Calculates the gradient of a weighted empirical likelihood
problem
Parameters
----------
eta : ndarray, (1,m)
Lagrange multiplier in the profile likelihood maximization
est_vect : ndarray, (n,k)
Estimating equations vector
weights : 1darray
Observation weights
Returns
-------
gradient : ndarray (m,1)
The gradient used in _wtd_modif_newton
"""
#eta = np.squeeze(eta)
data_star_prime = np.sum(weights) + np.dot(est_vect, eta)
idx = data_star_prime < 1. / nobs
not_idx = ~idx
data_star_prime[idx] = nobs * (2 - nobs * data_star_prime[idx])
data_star_prime[not_idx] = 1. / data_star_prime[not_idx]
return np.dot(weights * data_star_prime, est_vect)
def _modif_newton(self, eta, est_vect, weights):
"""
Modified Newton's method for maximizing the log 'star' equation. This
function calls _fit_newton to find the optimal values of eta.
Parameters
----------
eta : ndarray, (1,m)
Lagrange multiplier in the profile likelihood maximization
est_vect : ndarray, (n,k)
Estimating equations vector
weights : 1darray
Observation weights
Returns
-------
params : 1xm array
Lagrange multiplier that maximizes the log-likelihood
"""
nobs = len(est_vect)
f = lambda x0: - np.sum(self._log_star(x0, est_vect, weights, nobs))
grad = lambda x0: - self._grad(x0, est_vect, weights, nobs)
hess = lambda x0: - self._hess(x0, est_vect, weights, nobs)
kwds = {'tol': 1e-8}
eta = eta.squeeze()
res = _fit_newton(f, grad, eta, (), kwds, hess=hess, maxiter=50, \
disp=0)
return res[0]
def _find_eta(self, eta):
"""
Finding the root of sum(xi-h0)/(1+eta(xi-mu)) solves for
eta when computing ELR for univariate mean.
Parameters
----------
eta : float
Lagrange multiplier in the empirical likelihood maximization
Returns
-------
llr : float
n times the log likelihood value for a given value of eta
"""
return np.sum((self.endog - self.mu0) / \
(1. + eta * (self.endog - self.mu0)))
def _ci_limits_mu(self, mu):
"""
Calculates the difference between the log likelihood of mu_test and a
specified critical value.
Parameters
----------
mu : float
Hypothesized value of the mean.
Returns
-------
diff : float
The difference between the log likelihood value of mu0 and
a specified value.
"""
return self.test_mean(mu)[0] - self.r0
def _find_gamma(self, gamma):
"""
Finds gamma that satisfies
sum(log(n * w(gamma))) - log(r0) = 0
Used for confidence intervals for the mean
Parameters
----------
gamma : float
Lagrange multiplier when computing confidence interval
Returns
-------
diff : float
The difference between the log-liklihood when the Lagrange
multiplier is gamma and a pre-specified value
"""
denom = np.sum((self.endog - gamma) ** -1)
new_weights = (self.endog - gamma) ** -1 / denom
return -2 * np.sum(np.log(self.nobs * new_weights)) - \
self.r0
def _opt_var(self, nuisance_mu, pval=False):
"""
This is the function to be optimized over a nuisance mean parameter
to determine the likelihood ratio for the variance
Parameters
----------
nuisance_mu : float
Value of a nuisance mean parameter
Returns
-------
llr : float
Log likelihood of a pre-specified variance holding the nuisance
parameter constant
"""
endog = self.endog
nobs = self.nobs
sig_data = ((endog - nuisance_mu) ** 2 \
- self.sig2_0)
mu_data = (endog - nuisance_mu)
est_vect = np.column_stack((mu_data, sig_data))
eta_star = self._modif_newton(np.array([1. / nobs,
1. / nobs]), est_vect,
np.ones(nobs) * (1. / nobs))
denom = 1 + np.dot(eta_star, est_vect.T)
self.new_weights = 1. / nobs * 1. / denom
llr = np.sum(np.log(nobs * self.new_weights))
if pval: # Used for contour plotting
return chi2.sf(-2 * llr, 1)
return -2 * llr
def _ci_limits_var(self, var):
"""
Used to determine the confidence intervals for the variance.
It calls test_var and when called by an optimizer,
finds the value of sig2_0 that is chi2.ppf(significance-level)
Parameters
----------
var_test : float
Hypothesized value of the variance
Returns
-------
diff : float
The difference between the log likelihood ratio at var_test and a
pre-specified value.
"""
return self.test_var(var)[0] - self.r0
def _opt_skew(self, nuis_params):
"""
Called by test_skew. This function is optimized over
nuisance parameters mu and sigma
Parameters
----------
nuis_params : 1darray
An array with a nuisance mean and variance parameter
Returns
-------
llr : float
The log likelihood ratio of a pre-specified skewness holding
the nuisance parameters constant.
"""
endog = self.endog
nobs = self.nobs
mu_data = endog - nuis_params[0]
sig_data = ((endog - nuis_params[0]) ** 2) - nuis_params[1]
skew_data = ((((endog - nuis_params[0]) ** 3) /
(nuis_params[1] ** 1.5))) - self.skew0
est_vect = np.column_stack((mu_data, sig_data, skew_data))
eta_star = self._modif_newton(np.array([1. / nobs,
1. / nobs,
1. / nobs]), est_vect,
np.ones(nobs) * (1. / nobs))
denom = 1. + np.dot(eta_star, est_vect.T)
self.new_weights = 1. / nobs * 1. / denom
llr = np.sum(np.log(nobs * self.new_weights))
return -2 * llr
def _opt_kurt(self, nuis_params):
"""
Called by test_kurt. This function is optimized over
nuisance parameters mu and sigma
Parameters
----------
nuis_params : 1darray
An array with a nuisance mean and variance parameter
Returns
-------
llr : float
The log likelihood ratio of a pre-speified kurtosis holding the
nuisance parameters constant
"""
endog = self.endog
nobs = self.nobs
mu_data = endog - nuis_params[0]
sig_data = ((endog - nuis_params[0]) ** 2) - nuis_params[1]
kurt_data = (((((endog - nuis_params[0]) ** 4) / \
(nuis_params[1] ** 2))) - 3) - self.kurt0
est_vect = np.column_stack((mu_data, sig_data, kurt_data))
eta_star = self._modif_newton(np.array([1. / nobs,
1. / nobs,
1. / nobs]), est_vect,
np.ones(nobs) * (1. / nobs))
denom = 1 + np.dot(eta_star, est_vect.T)
self.new_weights = 1. / nobs * 1. / denom
llr = np.sum(np.log(nobs * self.new_weights))
return -2 * llr
def _opt_skew_kurt(self, nuis_params):
"""
Called by test_joint_skew_kurt. This function is optimized over
nuisance parameters mu and sigma
Parameters
-----------
nuis_params : 1darray
An array with a nuisance mean and variance parameter
Returns
------
llr : float
The log likelihood ratio of a pre-speified skewness and
kurtosis holding the nuisance parameters constant.
"""
endog = self.endog
nobs = self.nobs
mu_data = endog - nuis_params[0]
sig_data = ((endog - nuis_params[0]) ** 2) - nuis_params[1]
skew_data = ((((endog - nuis_params[0]) ** 3) / \
(nuis_params[1] ** 1.5))) - self.skew0
kurt_data = (((((endog - nuis_params[0]) ** 4) / \
(nuis_params[1] ** 2))) - 3) - self.kurt0
est_vect = np.column_stack((mu_data, sig_data, skew_data, kurt_data))
eta_star = self._modif_newton(np.array([1. / nobs,
1. / nobs,
1. / nobs,
1. / nobs]), est_vect,
np.ones(nobs) * (1. / nobs))
denom = 1. + np.dot(eta_star, est_vect.T)
self.new_weights = 1. / nobs * 1. / denom
llr = np.sum(np.log(nobs * self.new_weights))
return -2 * llr
def _ci_limits_skew(self, skew):
"""
Parameters
----------
skew0 : float
Hypothesized value of skewness
Returns
-------
diff : float
The difference between the log likelihood ratio at skew and a
pre-specified value.
"""
return self.test_skew(skew)[0] - self.r0
def _ci_limits_kurt(self, kurt):
"""
Parameters
---------
skew0 : float
Hypothesized value of kurtosis
Returns
-------
diff : float
The difference between the log likelihood ratio at kurt and a
pre-specified value.
"""
return self.test_kurt(kurt)[0] - self.r0
def _opt_correl(self, nuis_params, corr0, endog, nobs, x0, weights0):
"""
Parameters
----------
nuis_params : 1darray
Array containing two nuisance means and two nuisance variances
Returns
-------
llr : float
The log-likelihood of the correlation coefficient holding nuisance
parameters constant
"""
mu1_data, mu2_data = (endog - nuis_params[::2]).T
sig1_data = mu1_data ** 2 - nuis_params[1]
sig2_data = mu2_data ** 2 - nuis_params[3]
correl_data = ((mu1_data * mu2_data) - corr0 *
(nuis_params[1] * nuis_params[3]) ** .5)
est_vect = np.column_stack((mu1_data, sig1_data,
mu2_data, sig2_data, correl_data))
eta_star = self._modif_newton(x0, est_vect, weights0)
denom = 1. + np.dot(est_vect, eta_star)
self.new_weights = 1. / nobs * 1. / denom
llr = np.sum(np.log(nobs * self.new_weights))
return -2 * llr
def _ci_limits_corr(self, corr):
return self.test_corr(corr)[0] - self.r0
class DescStatUV(_OptFuncts):
"""
A class to compute confidence intervals and hypothesis tests involving
mean, variance, kurtosis and skewness of a univariate random variable.
Parameters
----------
endog : 1darray
Data to be analyzed
Attributes
----------
endog : 1darray
Data to be analyzed
nobs : float
Number of observations
"""
def __init__(self, endog):
self.endog = np.squeeze(endog)
self.nobs = endog.shape[0]
def test_mean(self, mu0, return_weights=False):
"""
Returns - 2 x log-likelihood ratio, p-value and weights
for a hypothesis test of the mean.
Parameters
----------
mu0 : float
Mean value to be tested
return_weights : bool
If return_weights is True the funtion returns
the weights of the observations under the null hypothesis.
Default is False
Returns
-------
test_results : tuple
The log-likelihood ratio and p-value of mu0
"""
self.mu0 = mu0
endog = self.endog
nobs = self.nobs
eta_min = (1. - (1. / nobs)) / (self.mu0 - max(endog))
eta_max = (1. - (1. / nobs)) / (self.mu0 - min(endog))
eta_star = optimize.brentq(self._find_eta, eta_min, eta_max)
new_weights = (1. / nobs) * 1. / (1. + eta_star * (endog - self.mu0))
llr = -2 * np.sum(np.log(nobs * new_weights))
if return_weights:
return llr, chi2.sf(llr, 1), new_weights
else:
return llr, chi2.sf(llr, 1)
def ci_mean(self, sig=.05, method='gamma', epsilon=10 ** -8,
gamma_low=-10 ** 10, gamma_high=10 ** 10):
"""
Returns the confidence interval for the mean.
Parameters
----------
sig : float
significance level. Default is .05
method : str
Root finding method, Can be 'nested-brent' or
'gamma'. Default is 'gamma'
'gamma' Tries to solve for the gamma parameter in the
Lagrange (see Owen pg 22) and then determine the weights.
'nested brent' uses brents method to find the confidence
intervals but must maximize the likelihhod ratio on every
iteration.
gamma is generally much faster. If the optimizations does not
converge, try expanding the gamma_high and gamma_low
variable.
gamma_low : float
Lower bound for gamma when finding lower limit.
If function returns f(a) and f(b) must have different signs,
consider lowering gamma_low.
gamma_high : float
Upper bound for gamma when finding upper limit.
If function returns f(a) and f(b) must have different signs,
consider raising gamma_high.
epsilon : float
When using 'nested-brent', amount to decrease (increase)
from the maximum (minimum) of the data when
starting the search. This is to protect against the
likelihood ratio being zero at the maximum (minimum)
value of the data. If data is very small in absolute value
(<10 ``**`` -6) consider shrinking epsilon
When using 'gamma', amount to decrease (increase) the
minimum (maximum) by to start the search for gamma.
If fucntion returns f(a) and f(b) must have differnt signs,
consider lowering epsilon.
Returns
-------
Interval : tuple
Confidence interval for the mean
"""
endog = self.endog
sig = 1 - sig
if method == 'nested-brent':
self.r0 = chi2.ppf(sig, 1)
middle = np.mean(endog)
epsilon_u = (max(endog) - np.mean(endog)) * epsilon
epsilon_l = (np.mean(endog) - min(endog)) * epsilon
ulim = optimize.brentq(self._ci_limits_mu, middle,
max(endog) - epsilon_u)
llim = optimize.brentq(self._ci_limits_mu, middle,
min(endog) + epsilon_l)
return llim, ulim
if method == 'gamma':
self.r0 = chi2.ppf(sig, 1)
gamma_star_l = optimize.brentq(self._find_gamma, gamma_low,
min(endog) - epsilon)
gamma_star_u = optimize.brentq(self._find_gamma, \
max(endog) + epsilon, gamma_high)
weights_low = ((endog - gamma_star_l) ** -1) / \
np.sum((endog - gamma_star_l) ** -1)
weights_high = ((endog - gamma_star_u) ** -1) / \
np.sum((endog - gamma_star_u) ** -1)
mu_low = np.sum(weights_low * endog)
mu_high = np.sum(weights_high * endog)
return mu_low, mu_high
def test_var(self, sig2_0, return_weights=False):
"""
Returns -2 x log-likelihoog ratio and the p-value for the
hypothesized variance
Parameters
----------
sig2_0 : float
Hypothesized variance to be tested
return_weights : bool
If True, returns the weights that maximize the
likelihood of observing sig2_0. Default is False
Returns
--------
test_results : tuple
The log-likelihood ratio and the p_value of sig2_0
Examples
--------
>>> random_numbers = np.random.standard_normal(1000)*100
>>> el_analysis = sm.emplike.DescStat(random_numbers)
>>> hyp_test = el_analysis.test_var(9500)
"""
self.sig2_0 = sig2_0
mu_max = max(self.endog)
mu_min = min(self.endog)
llr = optimize.fminbound(self._opt_var, mu_min, mu_max, \
full_output=1)[1]
p_val = chi2.sf(llr, 1)
if return_weights:
return llr, p_val, self.new_weights.T
else:
return llr, p_val
def ci_var(self, lower_bound=None, upper_bound=None, sig=.05):
"""
Returns the confidence interval for the variance.
Parameters
----------
lower_bound : float
The minimum value the lower confidence interval can
take. The p-value from test_var(lower_bound) must be lower
than 1 - significance level. Default is .99 confidence
limit assuming normality
upper_bound : float
The maximum value the upper confidence interval
can take. The p-value from test_var(upper_bound) must be lower
than 1 - significance level. Default is .99 confidence
limit assuming normality
sig : float
The significance level. Default is .05
Returns
--------
Interval : tuple
Confidence interval for the variance
Examples
--------
>>> random_numbers = np.random.standard_normal(100)
>>> el_analysis = sm.emplike.DescStat(random_numbers)
>>> el_analysis.ci_var()
>>> 'f(a) and f(b) must have different signs'
>>> el_analysis.ci_var(.5, 2)
Notes
-----
If the function returns the error f(a) and f(b) must have
different signs, consider lowering lower_bound and raising
upper_bound.
"""
endog = self.endog
if upper_bound is None:
upper_bound = ((self.nobs - 1) * endog.var()) / \
(chi2.ppf(.0001, self.nobs - 1))
if lower_bound is None:
lower_bound = ((self.nobs - 1) * endog.var()) / \
(chi2.ppf(.9999, self.nobs - 1))
self.r0 = chi2.ppf(1 - sig, 1)
llim = optimize.brentq(self._ci_limits_var, lower_bound, endog.var())
ulim = optimize.brentq(self._ci_limits_var, endog.var(), upper_bound)
return llim, ulim
def plot_contour(self, mu_low, mu_high, var_low, var_high, mu_step,
var_step,
levs=[.2, .1, .05, .01, .001]):
"""
Returns a plot of the confidence region for a univariate
mean and variance.
Parameters
----------
mu_low : float
Lowest value of the mean to plot
mu_high : float
Highest value of the mean to plot
var_low : float
Lowest value of the variance to plot
var_high : float
Highest value of the variance to plot
mu_step : float
Increments to evaluate the mean
var_step : float
Increments to evaluate the mean
levs : list
Which values of significance the contour lines will be drawn.
Default is [.2, .1, .05, .01, .001]
Returns
-------
fig : matplotlib figure instance
The contour plot
"""
fig, ax = utils.create_mpl_ax()
ax.set_ylabel('Variance')
ax.set_xlabel('Mean')
mu_vect = list(np.arange(mu_low, mu_high, mu_step))
var_vect = list(np.arange(var_low, var_high, var_step))
z = []
for sig0 in var_vect:
self.sig2_0 = sig0
for mu0 in mu_vect:
z.append(self._opt_var(mu0, pval=True))
z = np.asarray(z).reshape(len(var_vect), len(mu_vect))
ax.contour(mu_vect, var_vect, z, levels=levs)
return fig
def test_skew(self, skew0, return_weights=False):
"""
Returns -2 x log-likelihood and p-value for the hypothesized
skewness.
Parameters
----------
skew0 : float
Skewness value to be tested
return_weights : bool
If True, function also returns the weights that
maximize the likelihood ratio. Default is False.
Returns
--------
test_results : tuple
The log-likelihood ratio and p_value of skew0
"""
self.skew0 = skew0
start_nuisance = np.array([self.endog.mean(),
self.endog.var()])
llr = optimize.fmin_powell(self._opt_skew, start_nuisance,
full_output=1, disp=0)[1]
p_val = chi2.sf(llr, 1)
if return_weights:
return llr, p_val, self.new_weights.T
return llr, p_val
def test_kurt(self, kurt0, return_weights=False):
"""
Returns -2 x log-likelihood and the p-value for the hypothesized
kurtosis.
Parameters
----------
kurt0 : float
Kurtosis value to be tested
return_weights : bool
If True, function also returns the weights that
maximize the likelihood ratio. Default is False.
Returns
-------
test_results : tuple
The log-likelihood ratio and p-value of kurt0
"""
self.kurt0 = kurt0
start_nuisance = np.array([self.endog.mean(),
self.endog.var()])
llr = optimize.fmin_powell(self._opt_kurt, start_nuisance,
full_output=1, disp=0)[1]
p_val = chi2.sf(llr, 1)
if return_weights:
return llr, p_val, self.new_weights.T
return llr, p_val
def test_joint_skew_kurt(self, skew0, kurt0, return_weights=False):
"""
Returns - 2 x log-likelihood and the p-value for the joint
hypothesis test for skewness and kurtosis
Parameters
----------
skew0 : float
Skewness value to be tested
kurt0 : float
Kurtosis value to be tested
return_weights : bool
If True, function also returns the weights that
maximize the likelihood ratio. Default is False.
Returns
-------
test_results : tuple
The log-likelihood ratio and p-value of the joint hypothesis test.
"""
self.skew0 = skew0
self.kurt0 = kurt0
start_nuisance = np.array([self.endog.mean(),
self.endog.var()])
llr = optimize.fmin_powell(self._opt_skew_kurt, start_nuisance,
full_output=1, disp=0)[1]
p_val = chi2.sf(llr, 2)
if return_weights:
return llr, p_val, self.new_weights.T
return llr, p_val
def ci_skew(self, sig=.05, upper_bound=None, lower_bound=None):
"""
Returns the confidence interval for skewness.
Parameters
----------
sig : float
The significance level. Default is .05
upper_bound : float
Maximum value of skewness the upper limit can be.
Default is .99 confidence limit assuming normality.
lower_bound : float
Minimum value of skewness the lower limit can be.
Default is .99 confidence level assuming normality.
Returns
-------
Interval : tuple
Confidence interval for the skewness
Notes
-----
If function returns f(a) and f(b) must have different signs, consider
expanding lower and upper bounds
"""
nobs = self.nobs
endog = self.endog
if upper_bound is None:
upper_bound = skew(endog) + \
2.5 * ((6. * nobs * (nobs - 1.)) / \
((nobs - 2.) * (nobs + 1.) * \
(nobs + 3.))) ** .5
if lower_bound is None:
lower_bound = skew(endog) - \
2.5 * ((6. * nobs * (nobs - 1.)) / \
((nobs - 2.) * (nobs + 1.) * \
(nobs + 3.))) ** .5
self.r0 = chi2.ppf(1 - sig, 1)
llim = optimize.brentq(self._ci_limits_skew, lower_bound, skew(endog))
ulim = optimize.brentq(self._ci_limits_skew, skew(endog), upper_bound)
return llim, ulim
def ci_kurt(self, sig=.05, upper_bound=None, lower_bound=None):
"""
Returns the confidence interval for kurtosis.
Parameters
----------
sig : float
The significance level. Default is .05
upper_bound : float
Maximum value of kurtosis the upper limit can be.
Default is .99 confidence limit assuming normality.
lower_bound : float
Minimum value of kurtosis the lower limit can be.
Default is .99 confidence limit assuming normality.
Returns
--------
Interval : tuple
Lower and upper confidence limit
Notes
-----
For small n, upper_bound and lower_bound may have to be
provided by the user. Consider using test_kurt to find
values close to the desired significance level.
If function returns f(a) and f(b) must have different signs, consider
expanding the bounds.
"""
endog = self.endog
nobs = self.nobs
if upper_bound is None:
upper_bound = kurtosis(endog) + \
(2.5 * (2. * ((6. * nobs * (nobs - 1.)) / \
((nobs - 2.) * (nobs + 1.) * \
(nobs + 3.))) ** .5) * \
(((nobs ** 2.) - 1.) / ((nobs - 3.) *\
(nobs + 5.))) ** .5)
if lower_bound is None:
lower_bound = kurtosis(endog) - \
(2.5 * (2. * ((6. * nobs * (nobs - 1.)) / \
((nobs - 2.) * (nobs + 1.) * \
(nobs + 3.))) ** .5) * \
(((nobs ** 2.) - 1.) / ((nobs - 3.) *\
(nobs + 5.))) ** .5)
self.r0 = chi2.ppf(1 - sig, 1)
llim = optimize.brentq(self._ci_limits_kurt, lower_bound, \
kurtosis(endog))
ulim = optimize.brentq(self._ci_limits_kurt, kurtosis(endog), \
upper_bound)
return llim, ulim
class DescStatMV(_OptFuncts):
"""
A class for conducting inference on multivariate means and correlation.
Parameters
----------
endog : ndarray
Data to be analyzed
Attributes
----------
endog : ndarray
Data to be analyzed
nobs : float
Number of observations
"""
def __init__(self, endog):
self.endog = endog
self.nobs = endog.shape[0]
def mv_test_mean(self, mu_array, return_weights=False):
"""
Returns -2 x log likelihood and the p-value
for a multivariate hypothesis test of the mean
Parameters
----------
mu_array : 1d array
Hypothesized values for the mean. Must have same number of
elements as columns in endog
return_weights : bool
If True, returns the weights that maximize the
likelihood of mu_array. Default is False.
Returns
-------
test_results : tuple
The log-likelihood ratio and p-value for mu_array
"""
endog = self.endog
nobs = self.nobs
if len(mu_array) != endog.shape[1]:
raise Exception('mu_array must have the same number of \
elements as the columns of the data.')
mu_array = mu_array.reshape(1, endog.shape[1])
means = np.ones((endog.shape[0], endog.shape[1]))
means = mu_array * means
est_vect = endog - means
start_vals = 1. / nobs * np.ones(endog.shape[1])
eta_star = self._modif_newton(start_vals, est_vect,
np.ones(nobs) * (1. / nobs))
denom = 1 + np.dot(eta_star, est_vect.T)
self.new_weights = 1 / nobs * 1 / denom
llr = -2 * np.sum(np.log(nobs * self.new_weights))
p_val = chi2.sf(llr, mu_array.shape[1])
if return_weights:
return llr, p_val, self.new_weights.T
else:
return llr, p_val
def mv_mean_contour(self, mu1_low, mu1_upp, mu2_low, mu2_upp, step1, step2,
levs=[.2, .1, .05, .01, .001], var1_name=None,
var2_name=None, plot_dta=False):
"""
Creates a confidence region plot for the mean of bivariate data
Parameters
----------
m1_low : float
Minimum value of the mean for variable 1
m1_upp : float
Maximum value of the mean for variable 1
mu2_low : float
Minimum value of the mean for variable 2
mu2_upp : float
Maximum value of the mean for variable 2
step1 : float
Increment of evaluations for variable 1
step2 : float
Increment of evaluations for variable 2
levs : list
Levels to be drawn on the contour plot.
Default = [.2, .1 .05, .01, .001]
plot_dta : bool
If True, makes a scatter plot of the data on
top of the contour plot. Defaultis False.
var1_name : str
Name of variable 1 to be plotted on the x-axis
var2_name : str
Name of variable 2 to be plotted on the y-axis
Notes
-----
The smaller the step size, the more accurate the intervals
will be
If the function returns optimization failed, consider narrowing
the boundaries of the plot
Examples
--------
>>> two_rvs = np.random.standard_normal((20,2))
>>> el_analysis = sm.empllike.DescStat(two_rvs)
>>> contourp = el_analysis.mv_mean_contour(-2, 2, -2, 2, .1, .1)
>>> contourp.show()
"""
if self.endog.shape[1] != 2:
raise Exception('Data must contain exactly two variables')
fig, ax = utils.create_mpl_ax()
if var2_name is None:
ax.set_ylabel('Variable 2')
else:
ax.set_ylabel(var2_name)
if var1_name is None:
ax.set_xlabel('Variable 1')
else:
ax.set_xlabel(var1_name)
x = (np.arange(mu1_low, mu1_upp, step1))
y = (np.arange(mu2_low, mu2_upp, step2))
pairs = itertools.product(x, y)
z = []
for i in pairs:
z.append(self.mv_test_mean(np.asarray(i))[0])
X, Y = np.meshgrid(x, y)
z = np.asarray(z)
z = z.reshape(X.shape[1], Y.shape[0])
ax.contour(x, y, z.T, levels=levs)
if plot_dta:
ax.plot(self.endog[:, 0], self.endog[:, 1], 'bo')
return fig
def test_corr(self, corr0, return_weights=0):
"""
Returns -2 x log-likelihood ratio and p-value for the
correlation coefficient between 2 variables
Parameters
----------
corr0 : float
Hypothesized value to be tested
return_weights : bool
If true, returns the weights that maximize
the log-likelihood at the hypothesized value
"""
nobs = self.nobs
endog = self.endog
if endog.shape[1] != 2:
raise Exception('Correlation matrix not yet implemented')
nuis0 = np.array([endog[:, 0].mean(),
endog[:, 0].var(),
endog[:, 1].mean(),
endog[:, 1].var()])
x0 = np.zeros(5)
weights0 = np.array([1. / nobs] * int(nobs))
args = (corr0, endog, nobs, x0, weights0)
llr = optimize.fmin(self._opt_correl, nuis0, args=args,
full_output=1, disp=0)[1]
p_val = chi2.sf(llr, 1)
if return_weights:
return llr, p_val, self.new_weights.T
return llr, p_val
def ci_corr(self, sig=.05, upper_bound=None, lower_bound=None):
"""
Returns the confidence intervals for the correlation coefficient
Parameters
----------
sig : float
The significance level. Default is .05
upper_bound : float
Maximum value the upper confidence limit can be.
Default is 99% confidence limit assuming normality.
lower_bound : float
Minimum value the lower condidence limit can be.
Default is 99% confidence limit assuming normality.
Returns
-------
interval : tuple
Confidence interval for the correlation
"""
endog = self.endog
nobs = self.nobs
self.r0 = chi2.ppf(1 - sig, 1)
point_est = np.corrcoef(endog[:, 0], endog[:, 1])[0, 1]
if upper_bound is None:
upper_bound = min(.999, point_est + \
2.5 * ((1. - point_est ** 2.) / \
(nobs - 2.)) ** .5)
if lower_bound is None:
lower_bound = max(- .999, point_est - \
2.5 * (np.sqrt((1. - point_est ** 2.) / \
(nobs - 2.))))
llim = optimize.brenth(self._ci_limits_corr, lower_bound, point_est)
ulim = optimize.brenth(self._ci_limits_corr, point_est, upper_bound)
return llim, ulim
| bsd-3-clause |
blueburningcoder/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backend_bases.py | 69 | 69740 | """
Abstract base classes define the primitives that renderers and
graphics contexts must implement to serve as a matplotlib backend
:class:`RendererBase`
An abstract base class to handle drawing/rendering operations.
:class:`FigureCanvasBase`
The abstraction layer that separates the
:class:`matplotlib.figure.Figure` from the backend specific
details like a user interface drawing area
:class:`GraphicsContextBase`
An abstract base class that provides color, line styles, etc...
:class:`Event`
The base class for all of the matplotlib event
handling. Derived classes suh as :class:`KeyEvent` and
:class:`MouseEvent` store the meta data like keys and buttons
pressed, x and y locations in pixel and
:class:`~matplotlib.axes.Axes` coordinates.
"""
from __future__ import division
import os, warnings, time
import numpy as np
import matplotlib.cbook as cbook
import matplotlib.colors as colors
import matplotlib.transforms as transforms
import matplotlib.widgets as widgets
from matplotlib import rcParams
class RendererBase:
"""An abstract base class to handle drawing/rendering operations.
The following methods *must* be implemented in the backend:
* :meth:`draw_path`
* :meth:`draw_image`
* :meth:`draw_text`
* :meth:`get_text_width_height_descent`
The following methods *should* be implemented in the backend for
optimization reasons:
* :meth:`draw_markers`
* :meth:`draw_path_collection`
* :meth:`draw_quad_mesh`
"""
def __init__(self):
self._texmanager = None
def open_group(self, s):
"""
Open a grouping element with label *s*. Is only currently used by
:mod:`~matplotlib.backends.backend_svg`
"""
pass
def close_group(self, s):
"""
Close a grouping element with label *s*
Is only currently used by :mod:`~matplotlib.backends.backend_svg`
"""
pass
def draw_path(self, gc, path, transform, rgbFace=None):
"""
Draws a :class:`~matplotlib.path.Path` instance using the
given affine transform.
"""
raise NotImplementedError
def draw_markers(self, gc, marker_path, marker_trans, path, trans, rgbFace=None):
"""
Draws a marker at each of the vertices in path. This includes
all vertices, including control points on curves. To avoid
that behavior, those vertices should be removed before calling
this function.
*gc*
the :class:`GraphicsContextBase` instance
*marker_trans*
is an affine transform applied to the marker.
*trans*
is an affine transform applied to the path.
This provides a fallback implementation of draw_markers that
makes multiple calls to :meth:`draw_path`. Some backends may
want to override this method in order to draw the marker only
once and reuse it multiple times.
"""
tpath = trans.transform_path(path)
for vertices, codes in tpath.iter_segments():
if len(vertices):
x,y = vertices[-2:]
self.draw_path(gc, marker_path,
marker_trans + transforms.Affine2D().translate(x, y),
rgbFace)
def draw_path_collection(self, master_transform, cliprect, clippath,
clippath_trans, paths, all_transforms, offsets,
offsetTrans, facecolors, edgecolors, linewidths,
linestyles, antialiaseds, urls):
"""
Draws a collection of paths, selecting drawing properties from
the lists *facecolors*, *edgecolors*, *linewidths*,
*linestyles* and *antialiaseds*. *offsets* is a list of
offsets to apply to each of the paths. The offsets in
*offsets* are first transformed by *offsetTrans* before
being applied.
This provides a fallback implementation of
:meth:`draw_path_collection` that makes multiple calls to
draw_path. Some backends may want to override this in order
to render each set of path data only once, and then reference
that path multiple times with the different offsets, colors,
styles etc. The generator methods
:meth:`_iter_collection_raw_paths` and
:meth:`_iter_collection` are provided to help with (and
standardize) the implementation across backends. It is highly
recommended to use those generators, so that changes to the
behavior of :meth:`draw_path_collection` can be made globally.
"""
path_ids = []
for path, transform in self._iter_collection_raw_paths(
master_transform, paths, all_transforms):
path_ids.append((path, transform))
for xo, yo, path_id, gc, rgbFace in self._iter_collection(
path_ids, cliprect, clippath, clippath_trans,
offsets, offsetTrans, facecolors, edgecolors,
linewidths, linestyles, antialiaseds, urls):
path, transform = path_id
transform = transforms.Affine2D(transform.get_matrix()).translate(xo, yo)
self.draw_path(gc, path, transform, rgbFace)
def draw_quad_mesh(self, master_transform, cliprect, clippath,
clippath_trans, meshWidth, meshHeight, coordinates,
offsets, offsetTrans, facecolors, antialiased,
showedges):
"""
This provides a fallback implementation of
:meth:`draw_quad_mesh` that generates paths and then calls
:meth:`draw_path_collection`.
"""
from matplotlib.collections import QuadMesh
paths = QuadMesh.convert_mesh_to_paths(
meshWidth, meshHeight, coordinates)
if showedges:
edgecolors = np.array([[0.0, 0.0, 0.0, 1.0]], np.float_)
linewidths = np.array([1.0], np.float_)
else:
edgecolors = facecolors
linewidths = np.array([0.0], np.float_)
return self.draw_path_collection(
master_transform, cliprect, clippath, clippath_trans,
paths, [], offsets, offsetTrans, facecolors, edgecolors,
linewidths, [], [antialiased], [None])
def _iter_collection_raw_paths(self, master_transform, paths, all_transforms):
"""
This is a helper method (along with :meth:`_iter_collection`) to make
it easier to write a space-efficent :meth:`draw_path_collection`
implementation in a backend.
This method yields all of the base path/transform
combinations, given a master transform, a list of paths and
list of transforms.
The arguments should be exactly what is passed in to
:meth:`draw_path_collection`.
The backend should take each yielded path and transform and
create an object that can be referenced (reused) later.
"""
Npaths = len(paths)
Ntransforms = len(all_transforms)
N = max(Npaths, Ntransforms)
if Npaths == 0:
return
transform = transforms.IdentityTransform()
for i in xrange(N):
path = paths[i % Npaths]
if Ntransforms:
transform = all_transforms[i % Ntransforms]
yield path, transform + master_transform
def _iter_collection(self, path_ids, cliprect, clippath, clippath_trans,
offsets, offsetTrans, facecolors, edgecolors,
linewidths, linestyles, antialiaseds, urls):
"""
This is a helper method (along with
:meth:`_iter_collection_raw_paths`) to make it easier to write
a space-efficent :meth:`draw_path_collection` implementation in a
backend.
This method yields all of the path, offset and graphics
context combinations to draw the path collection. The caller
should already have looped over the results of
:meth:`_iter_collection_raw_paths` to draw this collection.
The arguments should be the same as that passed into
:meth:`draw_path_collection`, with the exception of
*path_ids*, which is a list of arbitrary objects that the
backend will use to reference one of the paths created in the
:meth:`_iter_collection_raw_paths` stage.
Each yielded result is of the form::
xo, yo, path_id, gc, rgbFace
where *xo*, *yo* is an offset; *path_id* is one of the elements of
*path_ids*; *gc* is a graphics context and *rgbFace* is a color to
use for filling the path.
"""
Npaths = len(path_ids)
Noffsets = len(offsets)
N = max(Npaths, Noffsets)
Nfacecolors = len(facecolors)
Nedgecolors = len(edgecolors)
Nlinewidths = len(linewidths)
Nlinestyles = len(linestyles)
Naa = len(antialiaseds)
Nurls = len(urls)
if (Nfacecolors == 0 and Nedgecolors == 0) or Npaths == 0:
return
if Noffsets:
toffsets = offsetTrans.transform(offsets)
gc = self.new_gc()
gc.set_clip_rectangle(cliprect)
if clippath is not None:
clippath = transforms.TransformedPath(clippath, clippath_trans)
gc.set_clip_path(clippath)
if Nfacecolors == 0:
rgbFace = None
if Nedgecolors == 0:
gc.set_linewidth(0.0)
xo, yo = 0, 0
for i in xrange(N):
path_id = path_ids[i % Npaths]
if Noffsets:
xo, yo = toffsets[i % Noffsets]
if Nfacecolors:
rgbFace = facecolors[i % Nfacecolors]
if Nedgecolors:
gc.set_foreground(edgecolors[i % Nedgecolors])
if Nlinewidths:
gc.set_linewidth(linewidths[i % Nlinewidths])
if Nlinestyles:
gc.set_dashes(*linestyles[i % Nlinestyles])
if rgbFace is not None and len(rgbFace)==4:
gc.set_alpha(rgbFace[-1])
rgbFace = rgbFace[:3]
gc.set_antialiased(antialiaseds[i % Naa])
if Nurls:
gc.set_url(urls[i % Nurls])
yield xo, yo, path_id, gc, rgbFace
def get_image_magnification(self):
"""
Get the factor by which to magnify images passed to :meth:`draw_image`.
Allows a backend to have images at a different resolution to other
artists.
"""
return 1.0
def draw_image(self, x, y, im, bbox, clippath=None, clippath_trans=None):
"""
Draw the image instance into the current axes;
*x*
is the distance in pixels from the left hand side of the canvas.
*y*
the distance from the origin. That is, if origin is
upper, y is the distance from top. If origin is lower, y
is the distance from bottom
*im*
the :class:`matplotlib._image.Image` instance
*bbox*
a :class:`matplotlib.transforms.Bbox` instance for clipping, or
None
"""
raise NotImplementedError
def option_image_nocomposite(self):
"""
overwrite this method for renderers that do not necessarily
want to rescale and composite raster images. (like SVG)
"""
return False
def draw_tex(self, gc, x, y, s, prop, angle, ismath='TeX!'):
raise NotImplementedError
def draw_text(self, gc, x, y, s, prop, angle, ismath=False):
"""
Draw the text instance
*gc*
the :class:`GraphicsContextBase` instance
*x*
the x location of the text in display coords
*y*
the y location of the text in display coords
*s*
a :class:`matplotlib.text.Text` instance
*prop*
a :class:`matplotlib.font_manager.FontProperties` instance
*angle*
the rotation angle in degrees
**backend implementers note**
When you are trying to determine if you have gotten your bounding box
right (which is what enables the text layout/alignment to work
properly), it helps to change the line in text.py::
if 0: bbox_artist(self, renderer)
to if 1, and then the actual bounding box will be blotted along with
your text.
"""
raise NotImplementedError
def flipy(self):
"""
Return true if y small numbers are top for renderer Is used
for drawing text (:mod:`matplotlib.text`) and images
(:mod:`matplotlib.image`) only
"""
return True
def get_canvas_width_height(self):
'return the canvas width and height in display coords'
return 1, 1
def get_texmanager(self):
"""
return the :class:`matplotlib.texmanager.TexManager` instance
"""
if self._texmanager is None:
from matplotlib.texmanager import TexManager
self._texmanager = TexManager()
return self._texmanager
def get_text_width_height_descent(self, s, prop, ismath):
"""
get the width and height, and the offset from the bottom to the
baseline (descent), in display coords of the string s with
:class:`~matplotlib.font_manager.FontProperties` prop
"""
raise NotImplementedError
def new_gc(self):
"""
Return an instance of a :class:`GraphicsContextBase`
"""
return GraphicsContextBase()
def points_to_pixels(self, points):
"""
Convert points to display units
*points*
a float or a numpy array of float
return points converted to pixels
You need to override this function (unless your backend
doesn't have a dpi, eg, postscript or svg). Some imaging
systems assume some value for pixels per inch::
points to pixels = points * pixels_per_inch/72.0 * dpi/72.0
"""
return points
def strip_math(self, s):
return cbook.strip_math(s)
def start_rasterizing(self):
pass
def stop_rasterizing(self):
pass
class GraphicsContextBase:
"""
An abstract base class that provides color, line styles, etc...
"""
# a mapping from dash styles to suggested offset, dash pairs
dashd = {
'solid' : (None, None),
'dashed' : (0, (6.0, 6.0)),
'dashdot' : (0, (3.0, 5.0, 1.0, 5.0)),
'dotted' : (0, (1.0, 3.0)),
}
def __init__(self):
self._alpha = 1.0
self._antialiased = 1 # use 0,1 not True, False for extension code
self._capstyle = 'butt'
self._cliprect = None
self._clippath = None
self._dashes = None, None
self._joinstyle = 'miter'
self._linestyle = 'solid'
self._linewidth = 1
self._rgb = (0.0, 0.0, 0.0)
self._hatch = None
self._url = None
self._snap = None
def copy_properties(self, gc):
'Copy properties from gc to self'
self._alpha = gc._alpha
self._antialiased = gc._antialiased
self._capstyle = gc._capstyle
self._cliprect = gc._cliprect
self._clippath = gc._clippath
self._dashes = gc._dashes
self._joinstyle = gc._joinstyle
self._linestyle = gc._linestyle
self._linewidth = gc._linewidth
self._rgb = gc._rgb
self._hatch = gc._hatch
self._url = gc._url
self._snap = gc._snap
def get_alpha(self):
"""
Return the alpha value used for blending - not supported on
all backends
"""
return self._alpha
def get_antialiased(self):
"Return true if the object should try to do antialiased rendering"
return self._antialiased
def get_capstyle(self):
"""
Return the capstyle as a string in ('butt', 'round', 'projecting')
"""
return self._capstyle
def get_clip_rectangle(self):
"""
Return the clip rectangle as a :class:`~matplotlib.transforms.Bbox` instance
"""
return self._cliprect
def get_clip_path(self):
"""
Return the clip path in the form (path, transform), where path
is a :class:`~matplotlib.path.Path` instance, and transform is
an affine transform to apply to the path before clipping.
"""
if self._clippath is not None:
return self._clippath.get_transformed_path_and_affine()
return None, None
def get_dashes(self):
"""
Return the dash information as an offset dashlist tuple.
The dash list is a even size list that gives the ink on, ink
off in pixels.
See p107 of to PostScript `BLUEBOOK
<http://www-cdf.fnal.gov/offline/PostScript/BLUEBOOK.PDF>`_
for more info.
Default value is None
"""
return self._dashes
def get_joinstyle(self):
"""
Return the line join style as one of ('miter', 'round', 'bevel')
"""
return self._joinstyle
def get_linestyle(self, style):
"""
Return the linestyle: one of ('solid', 'dashed', 'dashdot',
'dotted').
"""
return self._linestyle
def get_linewidth(self):
"""
Return the line width in points as a scalar
"""
return self._linewidth
def get_rgb(self):
"""
returns a tuple of three floats from 0-1. color can be a
matlab format string, a html hex color string, or a rgb tuple
"""
return self._rgb
def get_url(self):
"""
returns a url if one is set, None otherwise
"""
return self._url
def get_snap(self):
"""
returns the snap setting which may be:
* True: snap vertices to the nearest pixel center
* False: leave vertices as-is
* None: (auto) If the path contains only rectilinear line
segments, round to the nearest pixel center
"""
return self._snap
def set_alpha(self, alpha):
"""
Set the alpha value used for blending - not supported on
all backends
"""
self._alpha = alpha
def set_antialiased(self, b):
"""
True if object should be drawn with antialiased rendering
"""
# use 0, 1 to make life easier on extension code trying to read the gc
if b: self._antialiased = 1
else: self._antialiased = 0
def set_capstyle(self, cs):
"""
Set the capstyle as a string in ('butt', 'round', 'projecting')
"""
if cs in ('butt', 'round', 'projecting'):
self._capstyle = cs
else:
raise ValueError('Unrecognized cap style. Found %s' % cs)
def set_clip_rectangle(self, rectangle):
"""
Set the clip rectangle with sequence (left, bottom, width, height)
"""
self._cliprect = rectangle
def set_clip_path(self, path):
"""
Set the clip path and transformation. Path should be a
:class:`~matplotlib.transforms.TransformedPath` instance.
"""
assert path is None or isinstance(path, transforms.TransformedPath)
self._clippath = path
def set_dashes(self, dash_offset, dash_list):
"""
Set the dash style for the gc.
*dash_offset*
is the offset (usually 0).
*dash_list*
specifies the on-off sequence as points. ``(None, None)`` specifies a solid line
"""
self._dashes = dash_offset, dash_list
def set_foreground(self, fg, isRGB=False):
"""
Set the foreground color. fg can be a matlab format string, a
html hex color string, an rgb unit tuple, or a float between 0
and 1. In the latter case, grayscale is used.
The :class:`GraphicsContextBase` converts colors to rgb
internally. If you know the color is rgb already, you can set
``isRGB=True`` to avoid the performace hit of the conversion
"""
if isRGB:
self._rgb = fg
else:
self._rgb = colors.colorConverter.to_rgba(fg)
def set_graylevel(self, frac):
"""
Set the foreground color to be a gray level with *frac*
"""
self._rgb = (frac, frac, frac)
def set_joinstyle(self, js):
"""
Set the join style to be one of ('miter', 'round', 'bevel')
"""
if js in ('miter', 'round', 'bevel'):
self._joinstyle = js
else:
raise ValueError('Unrecognized join style. Found %s' % js)
def set_linewidth(self, w):
"""
Set the linewidth in points
"""
self._linewidth = w
def set_linestyle(self, style):
"""
Set the linestyle to be one of ('solid', 'dashed', 'dashdot',
'dotted').
"""
try:
offset, dashes = self.dashd[style]
except:
raise ValueError('Unrecognized linestyle: %s' % style)
self._linestyle = style
self.set_dashes(offset, dashes)
def set_url(self, url):
"""
Sets the url for links in compatible backends
"""
self._url = url
def set_snap(self, snap):
"""
Sets the snap setting which may be:
* True: snap vertices to the nearest pixel center
* False: leave vertices as-is
* None: (auto) If the path contains only rectilinear line
segments, round to the nearest pixel center
"""
self._snap = snap
def set_hatch(self, hatch):
"""
Sets the hatch style for filling
"""
self._hatch = hatch
def get_hatch(self):
"""
Gets the current hatch style
"""
return self._hatch
class Event:
"""
A matplotlib event. Attach additional attributes as defined in
:meth:`FigureCanvasBase.mpl_connect`. The following attributes
are defined and shown with their default values
*name*
the event name
*canvas*
the FigureCanvas instance generating the event
*guiEvent*
the GUI event that triggered the matplotlib event
"""
def __init__(self, name, canvas,guiEvent=None):
self.name = name
self.canvas = canvas
self.guiEvent = guiEvent
class IdleEvent(Event):
"""
An event triggered by the GUI backend when it is idle -- useful
for passive animation
"""
pass
class DrawEvent(Event):
"""
An event triggered by a draw operation on the canvas
In addition to the :class:`Event` attributes, the following event attributes are defined:
*renderer*
the :class:`RendererBase` instance for the draw event
"""
def __init__(self, name, canvas, renderer):
Event.__init__(self, name, canvas)
self.renderer = renderer
class ResizeEvent(Event):
"""
An event triggered by a canvas resize
In addition to the :class:`Event` attributes, the following event attributes are defined:
*width*
width of the canvas in pixels
*height*
height of the canvas in pixels
"""
def __init__(self, name, canvas):
Event.__init__(self, name, canvas)
self.width, self.height = canvas.get_width_height()
class LocationEvent(Event):
"""
A event that has a screen location
The following additional attributes are defined and shown with
their default values
In addition to the :class:`Event` attributes, the following event attributes are defined:
*x*
x position - pixels from left of canvas
*y*
y position - pixels from bottom of canvas
*inaxes*
the :class:`~matplotlib.axes.Axes` instance if mouse is over axes
*xdata*
x coord of mouse in data coords
*ydata*
y coord of mouse in data coords
"""
x = None # x position - pixels from left of canvas
y = None # y position - pixels from right of canvas
inaxes = None # the Axes instance if mouse us over axes
xdata = None # x coord of mouse in data coords
ydata = None # y coord of mouse in data coords
# the last event that was triggered before this one
lastevent = None
def __init__(self, name, canvas, x, y,guiEvent=None):
"""
*x*, *y* in figure coords, 0,0 = bottom, left
"""
Event.__init__(self, name, canvas,guiEvent=guiEvent)
self.x = x
self.y = y
if x is None or y is None:
# cannot check if event was in axes if no x,y info
self.inaxes = None
self._update_enter_leave()
return
# Find all axes containing the mouse
axes_list = [a for a in self.canvas.figure.get_axes() if a.in_axes(self)]
if len(axes_list) == 0: # None found
self.inaxes = None
self._update_enter_leave()
return
elif (len(axes_list) > 1): # Overlap, get the highest zorder
axCmp = lambda _x,_y: cmp(_x.zorder, _y.zorder)
axes_list.sort(axCmp)
self.inaxes = axes_list[-1] # Use the highest zorder
else: # Just found one hit
self.inaxes = axes_list[0]
try:
xdata, ydata = self.inaxes.transData.inverted().transform_point((x, y))
except ValueError:
self.xdata = None
self.ydata = None
else:
self.xdata = xdata
self.ydata = ydata
self._update_enter_leave()
def _update_enter_leave(self):
'process the figure/axes enter leave events'
if LocationEvent.lastevent is not None:
last = LocationEvent.lastevent
if last.inaxes!=self.inaxes:
# process axes enter/leave events
if last.inaxes is not None:
last.canvas.callbacks.process('axes_leave_event', last)
if self.inaxes is not None:
self.canvas.callbacks.process('axes_enter_event', self)
else:
# process a figure enter event
if self.inaxes is not None:
self.canvas.callbacks.process('axes_enter_event', self)
LocationEvent.lastevent = self
class MouseEvent(LocationEvent):
"""
A mouse event ('button_press_event', 'button_release_event', 'scroll_event',
'motion_notify_event').
In addition to the :class:`Event` and :class:`LocationEvent`
attributes, the following attributes are defined:
*button*
button pressed None, 1, 2, 3, 'up', 'down' (up and down are used for scroll events)
*key*
the key pressed: None, chr(range(255), 'shift', 'win', or 'control'
*step*
number of scroll steps (positive for 'up', negative for 'down')
Example usage::
def on_press(event):
print 'you pressed', event.button, event.xdata, event.ydata
cid = fig.canvas.mpl_connect('button_press_event', on_press)
"""
x = None # x position - pixels from left of canvas
y = None # y position - pixels from right of canvas
button = None # button pressed None, 1, 2, 3
inaxes = None # the Axes instance if mouse us over axes
xdata = None # x coord of mouse in data coords
ydata = None # y coord of mouse in data coords
step = None # scroll steps for scroll events
def __init__(self, name, canvas, x, y, button=None, key=None,
step=0, guiEvent=None):
"""
x, y in figure coords, 0,0 = bottom, left
button pressed None, 1, 2, 3, 'up', 'down'
"""
LocationEvent.__init__(self, name, canvas, x, y, guiEvent=guiEvent)
self.button = button
self.key = key
self.step = step
class PickEvent(Event):
"""
a pick event, fired when the user picks a location on the canvas
sufficiently close to an artist.
Attrs: all the :class:`Event` attributes plus
*mouseevent*
the :class:`MouseEvent` that generated the pick
*artist*
the :class:`~matplotlib.artist.Artist` picked
other
extra class dependent attrs -- eg a
:class:`~matplotlib.lines.Line2D` pick may define different
extra attributes than a
:class:`~matplotlib.collections.PatchCollection` pick event
Example usage::
line, = ax.plot(rand(100), 'o', picker=5) # 5 points tolerance
def on_pick(event):
thisline = event.artist
xdata, ydata = thisline.get_data()
ind = event.ind
print 'on pick line:', zip(xdata[ind], ydata[ind])
cid = fig.canvas.mpl_connect('pick_event', on_pick)
"""
def __init__(self, name, canvas, mouseevent, artist, guiEvent=None, **kwargs):
Event.__init__(self, name, canvas, guiEvent)
self.mouseevent = mouseevent
self.artist = artist
self.__dict__.update(kwargs)
class KeyEvent(LocationEvent):
"""
A key event (key press, key release).
Attach additional attributes as defined in
:meth:`FigureCanvasBase.mpl_connect`.
In addition to the :class:`Event` and :class:`LocationEvent`
attributes, the following attributes are defined:
*key*
the key pressed: None, chr(range(255), shift, win, or control
This interface may change slightly when better support for
modifier keys is included.
Example usage::
def on_key(event):
print 'you pressed', event.key, event.xdata, event.ydata
cid = fig.canvas.mpl_connect('key_press_event', on_key)
"""
def __init__(self, name, canvas, key, x=0, y=0, guiEvent=None):
LocationEvent.__init__(self, name, canvas, x, y, guiEvent=guiEvent)
self.key = key
class FigureCanvasBase:
"""
The canvas the figure renders into.
Public attributes
*figure*
A :class:`matplotlib.figure.Figure` instance
"""
events = [
'resize_event',
'draw_event',
'key_press_event',
'key_release_event',
'button_press_event',
'button_release_event',
'scroll_event',
'motion_notify_event',
'pick_event',
'idle_event',
'figure_enter_event',
'figure_leave_event',
'axes_enter_event',
'axes_leave_event'
]
def __init__(self, figure):
figure.set_canvas(self)
self.figure = figure
# a dictionary from event name to a dictionary that maps cid->func
self.callbacks = cbook.CallbackRegistry(self.events)
self.widgetlock = widgets.LockDraw()
self._button = None # the button pressed
self._key = None # the key pressed
self._lastx, self._lasty = None, None
self.button_pick_id = self.mpl_connect('button_press_event',self.pick)
self.scroll_pick_id = self.mpl_connect('scroll_event',self.pick)
if False:
## highlight the artists that are hit
self.mpl_connect('motion_notify_event',self.onHilite)
## delete the artists that are clicked on
#self.mpl_disconnect(self.button_pick_id)
#self.mpl_connect('button_press_event',self.onRemove)
def onRemove(self, ev):
"""
Mouse event processor which removes the top artist
under the cursor. Connect this to the 'mouse_press_event'
using::
canvas.mpl_connect('mouse_press_event',canvas.onRemove)
"""
def sort_artists(artists):
# This depends on stable sort and artists returned
# from get_children in z order.
L = [ (h.zorder, h) for h in artists ]
L.sort()
return [ h for zorder, h in L ]
# Find the top artist under the cursor
under = sort_artists(self.figure.hitlist(ev))
h = None
if under: h = under[-1]
# Try deleting that artist, or its parent if you
# can't delete the artist
while h:
print "Removing",h
if h.remove():
self.draw_idle()
break
parent = None
for p in under:
if h in p.get_children():
parent = p
break
h = parent
def onHilite(self, ev):
"""
Mouse event processor which highlights the artists
under the cursor. Connect this to the 'motion_notify_event'
using::
canvas.mpl_connect('motion_notify_event',canvas.onHilite)
"""
if not hasattr(self,'_active'): self._active = dict()
under = self.figure.hitlist(ev)
enter = [a for a in under if a not in self._active]
leave = [a for a in self._active if a not in under]
print "within:"," ".join([str(x) for x in under])
#print "entering:",[str(a) for a in enter]
#print "leaving:",[str(a) for a in leave]
# On leave restore the captured colour
for a in leave:
if hasattr(a,'get_color'):
a.set_color(self._active[a])
elif hasattr(a,'get_edgecolor'):
a.set_edgecolor(self._active[a][0])
a.set_facecolor(self._active[a][1])
del self._active[a]
# On enter, capture the color and repaint the artist
# with the highlight colour. Capturing colour has to
# be done first in case the parent recolouring affects
# the child.
for a in enter:
if hasattr(a,'get_color'):
self._active[a] = a.get_color()
elif hasattr(a,'get_edgecolor'):
self._active[a] = (a.get_edgecolor(),a.get_facecolor())
else: self._active[a] = None
for a in enter:
if hasattr(a,'get_color'):
a.set_color('red')
elif hasattr(a,'get_edgecolor'):
a.set_edgecolor('red')
a.set_facecolor('lightblue')
else: self._active[a] = None
self.draw_idle()
def pick(self, mouseevent):
if not self.widgetlock.locked():
self.figure.pick(mouseevent)
def blit(self, bbox=None):
"""
blit the canvas in bbox (default entire canvas)
"""
pass
def resize(self, w, h):
"""
set the canvas size in pixels
"""
pass
def draw_event(self, renderer):
"""
This method will be call all functions connected to the
'draw_event' with a :class:`DrawEvent`
"""
s = 'draw_event'
event = DrawEvent(s, self, renderer)
self.callbacks.process(s, event)
def resize_event(self):
"""
This method will be call all functions connected to the
'resize_event' with a :class:`ResizeEvent`
"""
s = 'resize_event'
event = ResizeEvent(s, self)
self.callbacks.process(s, event)
def key_press_event(self, key, guiEvent=None):
"""
This method will be call all functions connected to the
'key_press_event' with a :class:`KeyEvent`
"""
self._key = key
s = 'key_press_event'
event = KeyEvent(s, self, key, self._lastx, self._lasty, guiEvent=guiEvent)
self.callbacks.process(s, event)
def key_release_event(self, key, guiEvent=None):
"""
This method will be call all functions connected to the
'key_release_event' with a :class:`KeyEvent`
"""
s = 'key_release_event'
event = KeyEvent(s, self, key, self._lastx, self._lasty, guiEvent=guiEvent)
self.callbacks.process(s, event)
self._key = None
def pick_event(self, mouseevent, artist, **kwargs):
"""
This method will be called by artists who are picked and will
fire off :class:`PickEvent` callbacks registered listeners
"""
s = 'pick_event'
event = PickEvent(s, self, mouseevent, artist, **kwargs)
self.callbacks.process(s, event)
def scroll_event(self, x, y, step, guiEvent=None):
"""
Backend derived classes should call this function on any
scroll wheel event. x,y are the canvas coords: 0,0 is lower,
left. button and key are as defined in MouseEvent.
This method will be call all functions connected to the
'scroll_event' with a :class:`MouseEvent` instance.
"""
if step >= 0:
self._button = 'up'
else:
self._button = 'down'
s = 'scroll_event'
mouseevent = MouseEvent(s, self, x, y, self._button, self._key,
step=step, guiEvent=guiEvent)
self.callbacks.process(s, mouseevent)
def button_press_event(self, x, y, button, guiEvent=None):
"""
Backend derived classes should call this function on any mouse
button press. x,y are the canvas coords: 0,0 is lower, left.
button and key are as defined in :class:`MouseEvent`.
This method will be call all functions connected to the
'button_press_event' with a :class:`MouseEvent` instance.
"""
self._button = button
s = 'button_press_event'
mouseevent = MouseEvent(s, self, x, y, button, self._key, guiEvent=guiEvent)
self.callbacks.process(s, mouseevent)
def button_release_event(self, x, y, button, guiEvent=None):
"""
Backend derived classes should call this function on any mouse
button release.
*x*
the canvas coordinates where 0=left
*y*
the canvas coordinates where 0=bottom
*guiEvent*
the native UI event that generated the mpl event
This method will be call all functions connected to the
'button_release_event' with a :class:`MouseEvent` instance.
"""
s = 'button_release_event'
event = MouseEvent(s, self, x, y, button, self._key, guiEvent=guiEvent)
self.callbacks.process(s, event)
self._button = None
def motion_notify_event(self, x, y, guiEvent=None):
"""
Backend derived classes should call this function on any
motion-notify-event.
*x*
the canvas coordinates where 0=left
*y*
the canvas coordinates where 0=bottom
*guiEvent*
the native UI event that generated the mpl event
This method will be call all functions connected to the
'motion_notify_event' with a :class:`MouseEvent` instance.
"""
self._lastx, self._lasty = x, y
s = 'motion_notify_event'
event = MouseEvent(s, self, x, y, self._button, self._key,
guiEvent=guiEvent)
self.callbacks.process(s, event)
def leave_notify_event(self, guiEvent=None):
"""
Backend derived classes should call this function when leaving
canvas
*guiEvent*
the native UI event that generated the mpl event
"""
self.callbacks.process('figure_leave_event', LocationEvent.lastevent)
LocationEvent.lastevent = None
def enter_notify_event(self, guiEvent=None):
"""
Backend derived classes should call this function when entering
canvas
*guiEvent*
the native UI event that generated the mpl event
"""
event = Event('figure_enter_event', self, guiEvent)
self.callbacks.process('figure_enter_event', event)
def idle_event(self, guiEvent=None):
'call when GUI is idle'
s = 'idle_event'
event = IdleEvent(s, self, guiEvent=guiEvent)
self.callbacks.process(s, event)
def draw(self, *args, **kwargs):
"""
Render the :class:`~matplotlib.figure.Figure`
"""
pass
def draw_idle(self, *args, **kwargs):
"""
:meth:`draw` only if idle; defaults to draw but backends can overrride
"""
self.draw(*args, **kwargs)
def draw_cursor(self, event):
"""
Draw a cursor in the event.axes if inaxes is not None. Use
native GUI drawing for efficiency if possible
"""
pass
def get_width_height(self):
"""
return the figure width and height in points or pixels
(depending on the backend), truncated to integers
"""
return int(self.figure.bbox.width), int(self.figure.bbox.height)
filetypes = {
'emf': 'Enhanced Metafile',
'eps': 'Encapsulated Postscript',
'pdf': 'Portable Document Format',
'png': 'Portable Network Graphics',
'ps' : 'Postscript',
'raw': 'Raw RGBA bitmap',
'rgba': 'Raw RGBA bitmap',
'svg': 'Scalable Vector Graphics',
'svgz': 'Scalable Vector Graphics'
}
# All of these print_* functions do a lazy import because
# a) otherwise we'd have cyclical imports, since all of these
# classes inherit from FigureCanvasBase
# b) so we don't import a bunch of stuff the user may never use
def print_emf(self, *args, **kwargs):
from backends.backend_emf import FigureCanvasEMF # lazy import
emf = self.switch_backends(FigureCanvasEMF)
return emf.print_emf(*args, **kwargs)
def print_eps(self, *args, **kwargs):
from backends.backend_ps import FigureCanvasPS # lazy import
ps = self.switch_backends(FigureCanvasPS)
return ps.print_eps(*args, **kwargs)
def print_pdf(self, *args, **kwargs):
from backends.backend_pdf import FigureCanvasPdf # lazy import
pdf = self.switch_backends(FigureCanvasPdf)
return pdf.print_pdf(*args, **kwargs)
def print_png(self, *args, **kwargs):
from backends.backend_agg import FigureCanvasAgg # lazy import
agg = self.switch_backends(FigureCanvasAgg)
return agg.print_png(*args, **kwargs)
def print_ps(self, *args, **kwargs):
from backends.backend_ps import FigureCanvasPS # lazy import
ps = self.switch_backends(FigureCanvasPS)
return ps.print_ps(*args, **kwargs)
def print_raw(self, *args, **kwargs):
from backends.backend_agg import FigureCanvasAgg # lazy import
agg = self.switch_backends(FigureCanvasAgg)
return agg.print_raw(*args, **kwargs)
print_bmp = print_rgb = print_raw
def print_svg(self, *args, **kwargs):
from backends.backend_svg import FigureCanvasSVG # lazy import
svg = self.switch_backends(FigureCanvasSVG)
return svg.print_svg(*args, **kwargs)
def print_svgz(self, *args, **kwargs):
from backends.backend_svg import FigureCanvasSVG # lazy import
svg = self.switch_backends(FigureCanvasSVG)
return svg.print_svgz(*args, **kwargs)
def get_supported_filetypes(self):
return self.filetypes
def get_supported_filetypes_grouped(self):
groupings = {}
for ext, name in self.filetypes.items():
groupings.setdefault(name, []).append(ext)
groupings[name].sort()
return groupings
def print_figure(self, filename, dpi=None, facecolor='w', edgecolor='w',
orientation='portrait', format=None, **kwargs):
"""
Render the figure to hardcopy. Set the figure patch face and edge
colors. This is useful because some of the GUIs have a gray figure
face color background and you'll probably want to override this on
hardcopy.
Arguments are:
*filename*
can also be a file object on image backends
*orientation*
only currently applies to PostScript printing.
*dpi*
the dots per inch to save the figure in; if None, use savefig.dpi
*facecolor*
the facecolor of the figure
*edgecolor*
the edgecolor of the figure
*orientation* '
landscape' | 'portrait' (not supported on all backends)
*format*
when set, forcibly set the file format to save to
"""
if format is None:
if cbook.is_string_like(filename):
format = os.path.splitext(filename)[1][1:]
if format is None or format == '':
format = self.get_default_filetype()
if cbook.is_string_like(filename):
filename = filename.rstrip('.') + '.' + format
format = format.lower()
method_name = 'print_%s' % format
if (format not in self.filetypes or
not hasattr(self, method_name)):
formats = self.filetypes.keys()
formats.sort()
raise ValueError(
'Format "%s" is not supported.\n'
'Supported formats: '
'%s.' % (format, ', '.join(formats)))
if dpi is None:
dpi = rcParams['savefig.dpi']
origDPI = self.figure.dpi
origfacecolor = self.figure.get_facecolor()
origedgecolor = self.figure.get_edgecolor()
self.figure.dpi = dpi
self.figure.set_facecolor(facecolor)
self.figure.set_edgecolor(edgecolor)
try:
result = getattr(self, method_name)(
filename,
dpi=dpi,
facecolor=facecolor,
edgecolor=edgecolor,
orientation=orientation,
**kwargs)
finally:
self.figure.dpi = origDPI
self.figure.set_facecolor(origfacecolor)
self.figure.set_edgecolor(origedgecolor)
self.figure.set_canvas(self)
#self.figure.canvas.draw() ## seems superfluous
return result
def get_default_filetype(self):
raise NotImplementedError
def set_window_title(self, title):
"""
Set the title text of the window containing the figure. Note that
this has no effect if there is no window (eg, a PS backend).
"""
if hasattr(self, "manager"):
self.manager.set_window_title(title)
def switch_backends(self, FigureCanvasClass):
"""
instantiate an instance of FigureCanvasClass
This is used for backend switching, eg, to instantiate a
FigureCanvasPS from a FigureCanvasGTK. Note, deep copying is
not done, so any changes to one of the instances (eg, setting
figure size or line props), will be reflected in the other
"""
newCanvas = FigureCanvasClass(self.figure)
return newCanvas
def mpl_connect(self, s, func):
"""
Connect event with string *s* to *func*. The signature of *func* is::
def func(event)
where event is a :class:`matplotlib.backend_bases.Event`. The
following events are recognized
- 'button_press_event'
- 'button_release_event'
- 'draw_event'
- 'key_press_event'
- 'key_release_event'
- 'motion_notify_event'
- 'pick_event'
- 'resize_event'
- 'scroll_event'
For the location events (button and key press/release), if the
mouse is over the axes, the variable ``event.inaxes`` will be
set to the :class:`~matplotlib.axes.Axes` the event occurs is
over, and additionally, the variables ``event.xdata`` and
``event.ydata`` will be defined. This is the mouse location
in data coords. See
:class:`~matplotlib.backend_bases.KeyEvent` and
:class:`~matplotlib.backend_bases.MouseEvent` for more info.
Return value is a connection id that can be used with
:meth:`~matplotlib.backend_bases.Event.mpl_disconnect`.
Example usage::
def on_press(event):
print 'you pressed', event.button, event.xdata, event.ydata
cid = canvas.mpl_connect('button_press_event', on_press)
"""
return self.callbacks.connect(s, func)
def mpl_disconnect(self, cid):
"""
disconnect callback id cid
Example usage::
cid = canvas.mpl_connect('button_press_event', on_press)
#...later
canvas.mpl_disconnect(cid)
"""
return self.callbacks.disconnect(cid)
def flush_events(self):
"""
Flush the GUI events for the figure. Implemented only for
backends with GUIs.
"""
raise NotImplementedError
def start_event_loop(self,timeout):
"""
Start an event loop. This is used to start a blocking event
loop so that interactive functions, such as ginput and
waitforbuttonpress, can wait for events. This should not be
confused with the main GUI event loop, which is always running
and has nothing to do with this.
This is implemented only for backends with GUIs.
"""
raise NotImplementedError
def stop_event_loop(self):
"""
Stop an event loop. This is used to stop a blocking event
loop so that interactive functions, such as ginput and
waitforbuttonpress, can wait for events.
This is implemented only for backends with GUIs.
"""
raise NotImplementedError
def start_event_loop_default(self,timeout=0):
"""
Start an event loop. This is used to start a blocking event
loop so that interactive functions, such as ginput and
waitforbuttonpress, can wait for events. This should not be
confused with the main GUI event loop, which is always running
and has nothing to do with this.
This function provides default event loop functionality based
on time.sleep that is meant to be used until event loop
functions for each of the GUI backends can be written. As
such, it throws a deprecated warning.
Call signature::
start_event_loop_default(self,timeout=0)
This call blocks until a callback function triggers
stop_event_loop() or *timeout* is reached. If *timeout* is
<=0, never timeout.
"""
str = "Using default event loop until function specific"
str += " to this GUI is implemented"
warnings.warn(str,DeprecationWarning)
if timeout <= 0: timeout = np.inf
timestep = 0.01
counter = 0
self._looping = True
while self._looping and counter*timestep < timeout:
self.flush_events()
time.sleep(timestep)
counter += 1
def stop_event_loop_default(self):
"""
Stop an event loop. This is used to stop a blocking event
loop so that interactive functions, such as ginput and
waitforbuttonpress, can wait for events.
Call signature::
stop_event_loop_default(self)
"""
self._looping = False
class FigureManagerBase:
"""
Helper class for matlab mode, wraps everything up into a neat bundle
Public attibutes:
*canvas*
A :class:`FigureCanvasBase` instance
*num*
The figure nuamber
"""
def __init__(self, canvas, num):
self.canvas = canvas
canvas.manager = self # store a pointer to parent
self.num = num
self.canvas.mpl_connect('key_press_event', self.key_press)
def destroy(self):
pass
def full_screen_toggle (self):
pass
def resize(self, w, h):
'For gui backends: resize window in pixels'
pass
def key_press(self, event):
# these bindings happen whether you are over an axes or not
#if event.key == 'q':
# self.destroy() # how cruel to have to destroy oneself!
# return
if event.key == 'f':
self.full_screen_toggle()
# *h*ome or *r*eset mnemonic
elif event.key == 'h' or event.key == 'r' or event.key == "home":
self.canvas.toolbar.home()
# c and v to enable left handed quick navigation
elif event.key == 'left' or event.key == 'c' or event.key == 'backspace':
self.canvas.toolbar.back()
elif event.key == 'right' or event.key == 'v':
self.canvas.toolbar.forward()
# *p*an mnemonic
elif event.key == 'p':
self.canvas.toolbar.pan()
# z*o*om mnemonic
elif event.key == 'o':
self.canvas.toolbar.zoom()
elif event.key == 's':
self.canvas.toolbar.save_figure(self.canvas.toolbar)
if event.inaxes is None:
return
# the mouse has to be over an axes to trigger these
if event.key == 'g':
event.inaxes.grid()
self.canvas.draw()
elif event.key == 'l':
ax = event.inaxes
scale = ax.get_yscale()
if scale=='log':
ax.set_yscale('linear')
ax.figure.canvas.draw()
elif scale=='linear':
ax.set_yscale('log')
ax.figure.canvas.draw()
elif event.key is not None and (event.key.isdigit() and event.key!='0') or event.key=='a':
# 'a' enables all axes
if event.key!='a':
n=int(event.key)-1
for i, a in enumerate(self.canvas.figure.get_axes()):
if event.x is not None and event.y is not None and a.in_axes(event):
if event.key=='a':
a.set_navigate(True)
else:
a.set_navigate(i==n)
def show_popup(self, msg):
"""
Display message in a popup -- GUI only
"""
pass
def set_window_title(self, title):
"""
Set the title text of the window containing the figure. Note that
this has no effect if there is no window (eg, a PS backend).
"""
pass
# cursors
class Cursors: #namespace
HAND, POINTER, SELECT_REGION, MOVE = range(4)
cursors = Cursors()
class NavigationToolbar2:
"""
Base class for the navigation cursor, version 2
backends must implement a canvas that handles connections for
'button_press_event' and 'button_release_event'. See
:meth:`FigureCanvasBase.mpl_connect` for more information
They must also define
:meth:`save_figure`
save the current figure
:meth:`set_cursor`
if you want the pointer icon to change
:meth:`_init_toolbar`
create your toolbar widget
:meth:`draw_rubberband` (optional)
draw the zoom to rect "rubberband" rectangle
:meth:`press` (optional)
whenever a mouse button is pressed, you'll be notified with
the event
:meth:`release` (optional)
whenever a mouse button is released, you'll be notified with
the event
:meth:`dynamic_update` (optional)
dynamically update the window while navigating
:meth:`set_message` (optional)
display message
:meth:`set_history_buttons` (optional)
you can change the history back / forward buttons to
indicate disabled / enabled state.
That's it, we'll do the rest!
"""
def __init__(self, canvas):
self.canvas = canvas
canvas.toolbar = self
# a dict from axes index to a list of view limits
self._views = cbook.Stack()
self._positions = cbook.Stack() # stack of subplot positions
self._xypress = None # the location and axis info at the time of the press
self._idPress = None
self._idRelease = None
self._active = None
self._lastCursor = None
self._init_toolbar()
self._idDrag=self.canvas.mpl_connect('motion_notify_event', self.mouse_move)
self._button_pressed = None # determined by the button pressed at start
self.mode = '' # a mode string for the status bar
self.set_history_buttons()
def set_message(self, s):
'display a message on toolbar or in status bar'
pass
def back(self, *args):
'move back up the view lim stack'
self._views.back()
self._positions.back()
self.set_history_buttons()
self._update_view()
def dynamic_update(self):
pass
def draw_rubberband(self, event, x0, y0, x1, y1):
'draw a rectangle rubberband to indicate zoom limits'
pass
def forward(self, *args):
'move forward in the view lim stack'
self._views.forward()
self._positions.forward()
self.set_history_buttons()
self._update_view()
def home(self, *args):
'restore the original view'
self._views.home()
self._positions.home()
self.set_history_buttons()
self._update_view()
def _init_toolbar(self):
"""
This is where you actually build the GUI widgets (called by
__init__). The icons ``home.xpm``, ``back.xpm``, ``forward.xpm``,
``hand.xpm``, ``zoom_to_rect.xpm`` and ``filesave.xpm`` are standard
across backends (there are ppm versions in CVS also).
You just need to set the callbacks
home : self.home
back : self.back
forward : self.forward
hand : self.pan
zoom_to_rect : self.zoom
filesave : self.save_figure
You only need to define the last one - the others are in the base
class implementation.
"""
raise NotImplementedError
def mouse_move(self, event):
#print 'mouse_move', event.button
if not event.inaxes or not self._active:
if self._lastCursor != cursors.POINTER:
self.set_cursor(cursors.POINTER)
self._lastCursor = cursors.POINTER
else:
if self._active=='ZOOM':
if self._lastCursor != cursors.SELECT_REGION:
self.set_cursor(cursors.SELECT_REGION)
self._lastCursor = cursors.SELECT_REGION
if self._xypress:
x, y = event.x, event.y
lastx, lasty, a, ind, lim, trans = self._xypress[0]
self.draw_rubberband(event, x, y, lastx, lasty)
elif (self._active=='PAN' and
self._lastCursor != cursors.MOVE):
self.set_cursor(cursors.MOVE)
self._lastCursor = cursors.MOVE
if event.inaxes and event.inaxes.get_navigate():
try: s = event.inaxes.format_coord(event.xdata, event.ydata)
except ValueError: pass
except OverflowError: pass
else:
if len(self.mode):
self.set_message('%s : %s' % (self.mode, s))
else:
self.set_message(s)
else: self.set_message(self.mode)
def pan(self,*args):
'Activate the pan/zoom tool. pan with left button, zoom with right'
# set the pointer icon and button press funcs to the
# appropriate callbacks
if self._active == 'PAN':
self._active = None
else:
self._active = 'PAN'
if self._idPress is not None:
self._idPress = self.canvas.mpl_disconnect(self._idPress)
self.mode = ''
if self._idRelease is not None:
self._idRelease = self.canvas.mpl_disconnect(self._idRelease)
self.mode = ''
if self._active:
self._idPress = self.canvas.mpl_connect(
'button_press_event', self.press_pan)
self._idRelease = self.canvas.mpl_connect(
'button_release_event', self.release_pan)
self.mode = 'pan/zoom mode'
self.canvas.widgetlock(self)
else:
self.canvas.widgetlock.release(self)
for a in self.canvas.figure.get_axes():
a.set_navigate_mode(self._active)
self.set_message(self.mode)
def press(self, event):
'this will be called whenver a mouse button is pressed'
pass
def press_pan(self, event):
'the press mouse button in pan/zoom mode callback'
if event.button == 1:
self._button_pressed=1
elif event.button == 3:
self._button_pressed=3
else:
self._button_pressed=None
return
x, y = event.x, event.y
# push the current view to define home if stack is empty
if self._views.empty(): self.push_current()
self._xypress=[]
for i, a in enumerate(self.canvas.figure.get_axes()):
if x is not None and y is not None and a.in_axes(event) and a.get_navigate():
a.start_pan(x, y, event.button)
self._xypress.append((a, i))
self.canvas.mpl_disconnect(self._idDrag)
self._idDrag=self.canvas.mpl_connect('motion_notify_event', self.drag_pan)
self.press(event)
def press_zoom(self, event):
'the press mouse button in zoom to rect mode callback'
if event.button == 1:
self._button_pressed=1
elif event.button == 3:
self._button_pressed=3
else:
self._button_pressed=None
return
x, y = event.x, event.y
# push the current view to define home if stack is empty
if self._views.empty(): self.push_current()
self._xypress=[]
for i, a in enumerate(self.canvas.figure.get_axes()):
if x is not None and y is not None and a.in_axes(event) \
and a.get_navigate() and a.can_zoom():
self._xypress.append(( x, y, a, i, a.viewLim.frozen(), a.transData.frozen()))
self.press(event)
def push_current(self):
'push the current view limits and position onto the stack'
lims = []; pos = []
for a in self.canvas.figure.get_axes():
xmin, xmax = a.get_xlim()
ymin, ymax = a.get_ylim()
lims.append( (xmin, xmax, ymin, ymax) )
# Store both the original and modified positions
pos.append( (
a.get_position(True).frozen(),
a.get_position().frozen() ) )
self._views.push(lims)
self._positions.push(pos)
self.set_history_buttons()
def release(self, event):
'this will be called whenever mouse button is released'
pass
def release_pan(self, event):
'the release mouse button callback in pan/zoom mode'
self.canvas.mpl_disconnect(self._idDrag)
self._idDrag=self.canvas.mpl_connect('motion_notify_event', self.mouse_move)
for a, ind in self._xypress:
a.end_pan()
if not self._xypress: return
self._xypress = []
self._button_pressed=None
self.push_current()
self.release(event)
self.draw()
def drag_pan(self, event):
'the drag callback in pan/zoom mode'
for a, ind in self._xypress:
#safer to use the recorded button at the press than current button:
#multiple button can get pressed during motion...
a.drag_pan(self._button_pressed, event.key, event.x, event.y)
self.dynamic_update()
def release_zoom(self, event):
'the release mouse button callback in zoom to rect mode'
if not self._xypress: return
last_a = []
for cur_xypress in self._xypress:
x, y = event.x, event.y
lastx, lasty, a, ind, lim, trans = cur_xypress
# ignore singular clicks - 5 pixels is a threshold
if abs(x-lastx)<5 or abs(y-lasty)<5:
self._xypress = None
self.release(event)
self.draw()
return
x0, y0, x1, y1 = lim.extents
# zoom to rect
inverse = a.transData.inverted()
lastx, lasty = inverse.transform_point( (lastx, lasty) )
x, y = inverse.transform_point( (x, y) )
Xmin,Xmax=a.get_xlim()
Ymin,Ymax=a.get_ylim()
# detect twinx,y axes and avoid double zooming
twinx, twiny = False, False
if last_a:
for la in last_a:
if a.get_shared_x_axes().joined(a,la): twinx=True
if a.get_shared_y_axes().joined(a,la): twiny=True
last_a.append(a)
if twinx:
x0, x1 = Xmin, Xmax
else:
if Xmin < Xmax:
if x<lastx: x0, x1 = x, lastx
else: x0, x1 = lastx, x
if x0 < Xmin: x0=Xmin
if x1 > Xmax: x1=Xmax
else:
if x>lastx: x0, x1 = x, lastx
else: x0, x1 = lastx, x
if x0 > Xmin: x0=Xmin
if x1 < Xmax: x1=Xmax
if twiny:
y0, y1 = Ymin, Ymax
else:
if Ymin < Ymax:
if y<lasty: y0, y1 = y, lasty
else: y0, y1 = lasty, y
if y0 < Ymin: y0=Ymin
if y1 > Ymax: y1=Ymax
else:
if y>lasty: y0, y1 = y, lasty
else: y0, y1 = lasty, y
if y0 > Ymin: y0=Ymin
if y1 < Ymax: y1=Ymax
if self._button_pressed == 1:
a.set_xlim((x0, x1))
a.set_ylim((y0, y1))
elif self._button_pressed == 3:
if a.get_xscale()=='log':
alpha=np.log(Xmax/Xmin)/np.log(x1/x0)
rx1=pow(Xmin/x0,alpha)*Xmin
rx2=pow(Xmax/x0,alpha)*Xmin
else:
alpha=(Xmax-Xmin)/(x1-x0)
rx1=alpha*(Xmin-x0)+Xmin
rx2=alpha*(Xmax-x0)+Xmin
if a.get_yscale()=='log':
alpha=np.log(Ymax/Ymin)/np.log(y1/y0)
ry1=pow(Ymin/y0,alpha)*Ymin
ry2=pow(Ymax/y0,alpha)*Ymin
else:
alpha=(Ymax-Ymin)/(y1-y0)
ry1=alpha*(Ymin-y0)+Ymin
ry2=alpha*(Ymax-y0)+Ymin
a.set_xlim((rx1, rx2))
a.set_ylim((ry1, ry2))
self.draw()
self._xypress = None
self._button_pressed = None
self.push_current()
self.release(event)
def draw(self):
'redraw the canvases, update the locators'
for a in self.canvas.figure.get_axes():
xaxis = getattr(a, 'xaxis', None)
yaxis = getattr(a, 'yaxis', None)
locators = []
if xaxis is not None:
locators.append(xaxis.get_major_locator())
locators.append(xaxis.get_minor_locator())
if yaxis is not None:
locators.append(yaxis.get_major_locator())
locators.append(yaxis.get_minor_locator())
for loc in locators:
loc.refresh()
self.canvas.draw()
def _update_view(self):
'''update the viewlim and position from the view and
position stack for each axes
'''
lims = self._views()
if lims is None: return
pos = self._positions()
if pos is None: return
for i, a in enumerate(self.canvas.figure.get_axes()):
xmin, xmax, ymin, ymax = lims[i]
a.set_xlim((xmin, xmax))
a.set_ylim((ymin, ymax))
# Restore both the original and modified positions
a.set_position( pos[i][0], 'original' )
a.set_position( pos[i][1], 'active' )
self.draw()
def save_figure(self, *args):
'save the current figure'
raise NotImplementedError
def set_cursor(self, cursor):
"""
Set the current cursor to one of the :class:`Cursors`
enums values
"""
pass
def update(self):
'reset the axes stack'
self._views.clear()
self._positions.clear()
self.set_history_buttons()
def zoom(self, *args):
'activate zoom to rect mode'
if self._active == 'ZOOM':
self._active = None
else:
self._active = 'ZOOM'
if self._idPress is not None:
self._idPress=self.canvas.mpl_disconnect(self._idPress)
self.mode = ''
if self._idRelease is not None:
self._idRelease=self.canvas.mpl_disconnect(self._idRelease)
self.mode = ''
if self._active:
self._idPress = self.canvas.mpl_connect('button_press_event', self.press_zoom)
self._idRelease = self.canvas.mpl_connect('button_release_event', self.release_zoom)
self.mode = 'Zoom to rect mode'
self.canvas.widgetlock(self)
else:
self.canvas.widgetlock.release(self)
for a in self.canvas.figure.get_axes():
a.set_navigate_mode(self._active)
self.set_message(self.mode)
def set_history_buttons(self):
'enable or disable back/forward button'
pass
| agpl-3.0 |
ljschumacher/tierpsy-tracker | tierpsy/analysis/feat_food/getFoodContourNN.py | 1 | 8481 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 18 16:55:14 2017
@author: ajaver
Get food contour using a pre-trained neural network
"""
import tables
import os
import numpy as np
import cv2
from skimage.morphology import disk
from tierpsy import AUX_FILES_DIR
RESIZING_SIZE = 512 #the network was trained with images of this size 512
MODEL_PATH = os.path.join(AUX_FILES_DIR, 'unet_RMSprop-5-04999-0.3997.h5')
def _get_sizes(im_size, d4a_size= 24, n_conv_layers=4):
''' Useful to determine the expected inputs and output sizes of a u-net.
Additionally if the image is larger than the network output the points to
subdivide the image in tiles are given
'''
#assuming 4 layers of convolutions
def _in_size(d4a_size):
mm = d4a_size
for n in range(n_conv_layers):
mm = mm*2 + 2 + 2
return mm
def _out_size(d4a_size):
mm = d4a_size -2 -2
for n in range(n_conv_layers):
mm = mm*2 - 2 - 2
return mm
#this is the size of the central reduced layer. I choose this value manually
input_size = _in_size(d4a_size) #required 444 of input
output_size = _out_size(d4a_size) #set 260 of outpu
pad_size = int((input_size-output_size)/2)
if any(x < output_size for x in im_size):
msg = 'All the sides of the image ({}) must be larger or equal to ' \
'the network output {}.'
raise ValueError(msg.format(im_size, output_size))
n_tiles_x = int(np.ceil(im_size[0]/output_size))
n_tiles_y = int(np.ceil(im_size[1]/output_size))
txs = np.round(np.linspace(0, im_size[0] - output_size, n_tiles_x)).astype(np.int)
tys = np.round(np.linspace(0, im_size[1] - output_size, n_tiles_y)).astype(np.int)
tile_corners = [(tx, ty) for tx in txs for ty in tys]
return input_size, output_size, pad_size, tile_corners
def _preprocess(X,
input_size,
pad_size,
tile_corners
):
'''
Pre-process an image to input for the pre-trained u-net model
'''
def _get_tile_in(img, x,y):
return img[np.newaxis, x:x+input_size, y:y+input_size, :]
def _cast_tf(D):
D = D.astype(np.float32())
if D.ndim == 2:
D = D[..., None]
return D
#normalize image
X = _cast_tf(X)
X /= 255
X -= np.median(X)
pad_size_s = ((pad_size,pad_size), (pad_size,pad_size), (0,0))
X = np.lib.pad(X, pad_size_s, 'reflect')
X = [_get_tile_in(X, x, y) for x,y in tile_corners]
return X
def get_unet_prediction(Xi,
model_t,
n_flips = 1,
im_size=None,
n_conv_layers = 4,
d4a_size = 24,
_is_debug=False):
'''
Predict the food probability for each pixel using a pretrained u-net model (Helper)
'''
def _flip_d(img_o, nn):
if nn == 0:
img = img_o[::-1, :]
elif nn == 2:
img = img_o[:, ::-1]
elif nn == 3:
img = img_o[::-1, ::-1]
else:
img = img_o
return img
if im_size is None:
im_size = Xi.shape
input_size, output_size, pad_size, tile_corners = \
_get_sizes(im_size, d4a_size= d4a_size, n_conv_layers=n_conv_layers)
Y_pred = np.zeros(im_size)
for n_t in range(n_flips):
X = _flip_d(Xi, n_t)
if im_size is None:
im_size = X.shape
x_crop = _preprocess(X, input_size, pad_size, tile_corners)
x_crop = np.concatenate(x_crop)
y_pred = model_t.predict(x_crop)
Y_pred_s = np.zeros(X.shape)
N_s = np.zeros(X.shape)
for (i,j), yy,xx in zip(tile_corners, y_pred, x_crop):
Y_pred_s[i:i+output_size, j:j+output_size] += yy[:,:,1]
if _is_debug:
import matplotlib.pylab as plt
plt.figure()
plt.subplot(1,2,1)
plt.imshow(np.squeeze(xx))
plt.subplot(1,2,2)
plt.imshow(yy[:,:,1])
N_s[i:i+output_size, j:j+output_size] += 1
Y_pred += _flip_d(Y_pred_s/N_s, n_t)
return Y_pred
def get_food_prob(mask_file, model, max_bgnd_images = 2, _is_debug = False):
'''
Predict the food probability for each pixel using a pretrained u-net model.
'''
with tables.File(mask_file, 'r') as fid:
if not '/full_data' in fid:
raise ValueError('The mask file {} does not content the /full_data dataset.'.format(mask_file))
bgnd_o = fid.get_node('/full_data')[:max_bgnd_images]
assert bgnd_o.ndim == 3
if bgnd_o.shape[0] > 1:
bgnd = [np.max(bgnd_o[i:i+1], axis=0) for i in range(bgnd_o.shape[0]-1)]
else:
bgnd = [np.squeeze(bgnd_o)]
min_size = min(bgnd[0].shape)
resize_factor = min(RESIZING_SIZE, min_size)/min_size
dsize = tuple(int(x*resize_factor) for x in bgnd[0].shape[::-1])
bgnd_s = [cv2.resize(x, dsize) for x in bgnd]
for b_img in bgnd_s:
Y_pred = get_unet_prediction(b_img, model, n_flips=1)
if _is_debug:
import matplotlib.pylab as plt
plt.figure()
plt.subplot(1,2,1)
plt.imshow(b_img, cmap='gray')
plt.subplot(1, 2,2)
plt.imshow(Y_pred, interpolation='none')
original_size = bgnd[0].shape
return Y_pred, original_size, bgnd_s
def get_food_contour_nn(mask_file, model=None, _is_debug=False):
'''
Get the food contour using a pretrained u-net model.
This function is faster if a preloaded model is given since it is very slow
to load the model and tensorflow.
'''
if model is None:
from keras.models import load_model
model = load_model(MODEL_PATH)
food_prob, original_size, bgnd_images = get_food_prob(mask_file, model, _is_debug=_is_debug)
#bgnd_images are only used in debug mode
#%%
patch_m = (food_prob>0.5).astype(np.uint8)
_, cnts, _ = cv2.findContours(patch_m, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
#pick the largest contour
cnt_areas = [cv2.contourArea(x) for x in cnts]
ind = np.argmax(cnt_areas)
patch_m = np.zeros(patch_m.shape, np.uint8)
patch_m = cv2.drawContours(patch_m, cnts , ind, color=1, thickness=cv2.FILLED)
patch_m = cv2.morphologyEx(patch_m, cv2.MORPH_CLOSE, disk(3), iterations=5)
_, cnts, _ = cv2.findContours(patch_m, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
assert len(cnts) == 1
cnts = cnts[0]
hull = cv2.convexHull(cnts)
hull_area = cv2.contourArea(hull)
cnt_solidity = cv2.contourArea(cnts)/hull_area
food_cnt = np.squeeze(cnts).astype(np.float)
# rescale contour to be the same dimension as the original images
food_cnt[:,0] *= original_size[0]/food_prob.shape[0]
food_cnt[:,1] *= original_size[1]/food_prob.shape[1]
#%%
if _is_debug:
import matplotlib.pylab as plt
img = bgnd_images[0]
#np.squeeze(food_cnt)
patch_n = np.zeros(img.shape, np.uint8)
patch_n = cv2.drawContours(patch_n, [cnts], 0, color=1, thickness=cv2.FILLED)
top = img.max()
bot = img.min()
img_n = (img-bot)/(top-bot)
img_rgb = np.repeat(img_n[..., None], 3, axis=2)
#img_rgb = img_rgb.astype(np.uint8)
img_rgb[...,0] = ((patch_n==0)*0.5 + 0.5)*img_rgb[...,0]
plt.figure()
plt.imshow(img_rgb)
plt.plot(hull[:,:,0], hull[:,:,1], 'r')
plt.title('solidity = {:.3}'.format(cnt_solidity))
#%%
return food_cnt, food_prob, cnt_solidity
if __name__ == '__main__':
mask_file = '/Users/ajaver/OneDrive - Imperial College London/optogenetics/Arantza/MaskedVideos/oig8/oig-8_ChR2_control_males_3_Ch1_11052017_161018.hdf5'
#mask_file = '/Volumes/behavgenom_archive$/Avelino/Worm_Rig_Tests/short_movies_new/MaskedVideos/Double_picking_020317/trp-4_worms6_food1-3_Set4_Pos5_Ch3_02032017_153225.hdf5'
food_cnt, food_prob,cnt_solidity = get_food_contour_nn(mask_file, _is_debug=True)
| mit |
itdxer/neupy | examples/cnn/cifar10_cnn.py | 1 | 2477 | import numpy as np
from sklearn import metrics
from sklearn.preprocessing import OneHotEncoder
from neupy.layers import *
from neupy import algorithms
from neupy.utils import asfloat
from load_cifar10 import read_cifar10
def process_cifar10_data(x_train, x_test):
x_train, x_test = asfloat(x_train), asfloat(x_test)
mean = x_train.mean(axis=(0, 1, 2)).reshape(1, 1, 1, -1)
std = x_train.std(axis=(0, 1, 2)).reshape(1, 1, 1, -1)
x_train -= mean
x_train /= std
x_test -= mean
x_test /= std
return x_train, x_test
def one_hot_encoder(y_train, y_test):
y_train, y_test = asfloat(y_train), asfloat(y_test)
target_scaler = OneHotEncoder(categories='auto', sparse=False)
y_train = target_scaler.fit_transform(y_train.reshape(-1, 1))
y_test = target_scaler.transform(y_test.reshape(-1, 1))
return y_train, y_test
if __name__ == '__main__':
x_train, x_test, y_train, y_test = read_cifar10()
x_train, x_test = process_cifar10_data(x_train, x_test)
y_train, y_test = one_hot_encoder(y_train, y_test)
network = algorithms.Adam(
[
Input((32, 32, 3)),
Convolution((3, 3, 32)) >> Relu(),
Convolution((3, 3, 32)) >> Relu(),
MaxPooling((2, 2)),
Convolution((3, 3, 64)) >> Relu(),
Convolution((3, 3, 64)) >> Relu(),
MaxPooling((2, 2)),
Reshape(),
Relu(256) >> Dropout(0.5),
Softmax(10),
],
step=algorithms.step_decay(
initial_value=0.001,
# Parameter controls step redution frequency. The larger
# the value the slower step parameter decreases. Step will
# be reduced after every mini-batch update. In the training
# data we have 500 mini-batches.
reduction_freq=5 * 500,
),
regularizer=algorithms.l2(0.01),
loss='categorical_crossentropy',
batch_size=100,
shuffle_data=True,
verbose=True,
)
network.train(x_train, y_train, x_test, y_test, epochs=30)
y_predicted = network.predict(x_test).argmax(axis=1)
y_test_labels = np.asarray(y_test.argmax(axis=1)).reshape(len(y_test))
print(metrics.classification_report(y_test_labels, y_predicted))
score = metrics.accuracy_score(y_test_labels, y_predicted)
print("Validation accuracy: {:.2%}".format(score))
print(metrics.confusion_matrix(y_predicted, y_test_labels))
| mit |
wavelets/pandashells | pandashells/test/p_config_test.py | 10 | 1841 | #! /usr/bin/env python
import copy
from contextlib import contextmanager
import json
import os
import sys
from mock import patch, MagicMock
from unittest import TestCase
from pandashells.lib import config_lib
from pandashells.bin.p_config import (
main,
)
@contextmanager
def mute_output():
sys.stdout = MagicMock()
yield
sys.stdout = sys.__stdout__
class MainTests(TestCase):
# tests wipe out config file (might want to change this later)
def setUp(self):
if os.path.isfile(config_lib.CONFIG_FILE_NAME):
cmd = 'rm {} 2>/dev/null'.format(config_lib.CONFIG_FILE_NAME)
os.system(cmd)
def tearDown(self):
if os.path.isfile(config_lib.CONFIG_FILE_NAME):
cmd = 'rm {} 2>/dev/null'.format(config_lib.CONFIG_FILE_NAME)
os.system(cmd)
@patch(
'pandashells.bin.p_config.sys.argv',
[
'p.config',
'--force_defaults',
]
)
def test_force_defaults(self):
with mute_output():
main()
with open(config_lib.CONFIG_FILE_NAME) as config_file:
config_dict = json.loads(config_file.read())
self.assertEqual(config_dict, config_lib.DEFAULT_DICT)
@patch(
'pandashells.bin.p_config.sys.argv',
[
'p.config',
'--io_output_na_rep', '',
'--io_input_type', 'table',
]
)
def test_custom(self):
with mute_output():
main()
with open(config_lib.CONFIG_FILE_NAME) as config_file:
expected_dict = copy.copy(config_lib.DEFAULT_DICT)
expected_dict['io_output_na_rep'] = ''
expected_dict['io_input_type'] = 'table'
config_dict = json.loads(config_file.read())
self.assertEqual(config_dict, expected_dict)
| bsd-2-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.