repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
LiaoPan/scikit-learn | examples/calibration/plot_compare_calibration.py | 241 | 5008 | """
========================================
Comparison of Calibration of Classifiers
========================================
Well calibrated classifiers are probabilistic classifiers for which the output
of the predict_proba method can be directly interpreted as a confidence level.
For instance a well calibrated (binary) classifier should classify the samples
such that among the samples to which it gave a predict_proba value close to
0.8, approx. 80% actually belong to the positive class.
LogisticRegression returns well calibrated predictions as it directly
optimizes log-loss. In contrast, the other methods return biased probilities,
with different biases per method:
* GaussianNaiveBayes tends to push probabilties to 0 or 1 (note the counts in
the histograms). This is mainly because it makes the assumption that features
are conditionally independent given the class, which is not the case in this
dataset which contains 2 redundant features.
* RandomForestClassifier shows the opposite behavior: the histograms show
peaks at approx. 0.2 and 0.9 probability, while probabilities close to 0 or 1
are very rare. An explanation for this is given by Niculescu-Mizil and Caruana
[1]: "Methods such as bagging and random forests that average predictions from
a base set of models can have difficulty making predictions near 0 and 1
because variance in the underlying base models will bias predictions that
should be near zero or one away from these values. Because predictions are
restricted to the interval [0,1], errors caused by variance tend to be one-
sided near zero and one. For example, if a model should predict p = 0 for a
case, the only way bagging can achieve this is if all bagged trees predict
zero. If we add noise to the trees that bagging is averaging over, this noise
will cause some trees to predict values larger than 0 for this case, thus
moving the average prediction of the bagged ensemble away from 0. We observe
this effect most strongly with random forests because the base-level trees
trained with random forests have relatively high variance due to feature
subseting." As a result, the calibration curve shows a characteristic sigmoid
shape, indicating that the classifier could trust its "intuition" more and
return probabilties closer to 0 or 1 typically.
* Support Vector Classification (SVC) shows an even more sigmoid curve as
the RandomForestClassifier, which is typical for maximum-margin methods
(compare Niculescu-Mizil and Caruana [1]), which focus on hard samples
that are close to the decision boundary (the support vectors).
.. topic:: References:
.. [1] Predicting Good Probabilities with Supervised Learning,
A. Niculescu-Mizil & R. Caruana, ICML 2005
"""
print(__doc__)
# Author: Jan Hendrik Metzen <[email protected]>
# License: BSD Style.
import numpy as np
np.random.seed(0)
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import LinearSVC
from sklearn.calibration import calibration_curve
X, y = datasets.make_classification(n_samples=100000, n_features=20,
n_informative=2, n_redundant=2)
train_samples = 100 # Samples used for training the models
X_train = X[:train_samples]
X_test = X[train_samples:]
y_train = y[:train_samples]
y_test = y[train_samples:]
# Create classifiers
lr = LogisticRegression()
gnb = GaussianNB()
svc = LinearSVC(C=1.0)
rfc = RandomForestClassifier(n_estimators=100)
###############################################################################
# Plot calibration plots
plt.figure(figsize=(10, 10))
ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)
ax2 = plt.subplot2grid((3, 1), (2, 0))
ax1.plot([0, 1], [0, 1], "k:", label="Perfectly calibrated")
for clf, name in [(lr, 'Logistic'),
(gnb, 'Naive Bayes'),
(svc, 'Support Vector Classification'),
(rfc, 'Random Forest')]:
clf.fit(X_train, y_train)
if hasattr(clf, "predict_proba"):
prob_pos = clf.predict_proba(X_test)[:, 1]
else: # use decision function
prob_pos = clf.decision_function(X_test)
prob_pos = \
(prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())
fraction_of_positives, mean_predicted_value = \
calibration_curve(y_test, prob_pos, n_bins=10)
ax1.plot(mean_predicted_value, fraction_of_positives, "s-",
label="%s" % (name, ))
ax2.hist(prob_pos, range=(0, 1), bins=10, label=name,
histtype="step", lw=2)
ax1.set_ylabel("Fraction of positives")
ax1.set_ylim([-0.05, 1.05])
ax1.legend(loc="lower right")
ax1.set_title('Calibration plots (reliability curve)')
ax2.set_xlabel("Mean predicted value")
ax2.set_ylabel("Count")
ax2.legend(loc="upper center", ncol=2)
plt.tight_layout()
plt.show()
| bsd-3-clause |
hugobowne/scikit-learn | sklearn/decomposition/truncated_svd.py | 19 | 7884 | """Truncated SVD for sparse matrices, aka latent semantic analysis (LSA).
"""
# Author: Lars Buitinck
# Olivier Grisel <[email protected]>
# Michael Becker <[email protected]>
# License: 3-clause BSD.
import numpy as np
import scipy.sparse as sp
try:
from scipy.sparse.linalg import svds
except ImportError:
from ..utils.arpack import svds
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_array, as_float_array, check_random_state
from ..utils.extmath import randomized_svd, safe_sparse_dot, svd_flip
from ..utils.sparsefuncs import mean_variance_axis
__all__ = ["TruncatedSVD"]
class TruncatedSVD(BaseEstimator, TransformerMixin):
"""Dimensionality reduction using truncated SVD (aka LSA).
This transformer performs linear dimensionality reduction by means of
truncated singular value decomposition (SVD). It is very similar to PCA,
but operates on sample vectors directly, instead of on a covariance matrix.
This means it can work with scipy.sparse matrices efficiently.
In particular, truncated SVD works on term count/tf-idf matrices as
returned by the vectorizers in sklearn.feature_extraction.text. In that
context, it is known as latent semantic analysis (LSA).
This estimator supports two algorithm: a fast randomized SVD solver, and
a "naive" algorithm that uses ARPACK as an eigensolver on (X * X.T) or
(X.T * X), whichever is more efficient.
Read more in the :ref:`User Guide <LSA>`.
Parameters
----------
n_components : int, default = 2
Desired dimensionality of output data.
Must be strictly less than the number of features.
The default value is useful for visualisation. For LSA, a value of
100 is recommended.
algorithm : string, default = "randomized"
SVD solver to use. Either "arpack" for the ARPACK wrapper in SciPy
(scipy.sparse.linalg.svds), or "randomized" for the randomized
algorithm due to Halko (2009).
n_iter : int, optional (default 5)
Number of iterations for randomized SVD solver. Not used by ARPACK.
The default is larger than the default in `randomized_svd` to handle
sparse matrices that may have large slowly decaying spectrum.
random_state : int or RandomState, optional
(Seed for) pseudo-random number generator. If not given, the
numpy.random singleton is used.
tol : float, optional
Tolerance for ARPACK. 0 means machine precision. Ignored by randomized
SVD solver.
Attributes
----------
components_ : array, shape (n_components, n_features)
explained_variance_ratio_ : array, [n_components]
Percentage of variance explained by each of the selected components.
explained_variance_ : array, [n_components]
The variance of the training samples transformed by a projection to
each component.
Examples
--------
>>> from sklearn.decomposition import TruncatedSVD
>>> from sklearn.random_projection import sparse_random_matrix
>>> X = sparse_random_matrix(100, 100, density=0.01, random_state=42)
>>> svd = TruncatedSVD(n_components=5, n_iter=7, random_state=42)
>>> svd.fit(X) # doctest: +NORMALIZE_WHITESPACE
TruncatedSVD(algorithm='randomized', n_components=5, n_iter=7,
random_state=42, tol=0.0)
>>> print(svd.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.0782... 0.0552... 0.0544... 0.0499... 0.0413...]
>>> print(svd.explained_variance_ratio_.sum()) # doctest: +ELLIPSIS
0.279...
See also
--------
PCA
RandomizedPCA
References
----------
Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 (arXiv:909) http://arxiv.org/pdf/0909.4061
Notes
-----
SVD suffers from a problem called "sign indeterminancy", which means the
sign of the ``components_`` and the output from transform depend on the
algorithm and random state. To work around this, fit instances of this
class to data once, then keep the instance around to do transformations.
"""
def __init__(self, n_components=2, algorithm="randomized", n_iter=5,
random_state=None, tol=0.):
self.algorithm = algorithm
self.n_components = n_components
self.n_iter = n_iter
self.random_state = random_state
self.tol = tol
def fit(self, X, y=None):
"""Fit LSI model on training data X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Returns
-------
self : object
Returns the transformer object.
"""
self.fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Fit LSI model to X and perform dimensionality reduction on X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Reduced version of X. This will always be a dense array.
"""
X = as_float_array(X, copy=False)
random_state = check_random_state(self.random_state)
# If sparse and not csr or csc, convert to csr
if sp.issparse(X) and X.getformat() not in ["csr", "csc"]:
X = X.tocsr()
if self.algorithm == "arpack":
U, Sigma, VT = svds(X, k=self.n_components, tol=self.tol)
# svds doesn't abide by scipy.linalg.svd/randomized_svd
# conventions, so reverse its outputs.
Sigma = Sigma[::-1]
U, VT = svd_flip(U[:, ::-1], VT[::-1])
elif self.algorithm == "randomized":
k = self.n_components
n_features = X.shape[1]
if k >= n_features:
raise ValueError("n_components must be < n_features;"
" got %d >= %d" % (k, n_features))
U, Sigma, VT = randomized_svd(X, self.n_components,
n_iter=self.n_iter,
random_state=random_state)
else:
raise ValueError("unknown algorithm %r" % self.algorithm)
self.components_ = VT
# Calculate explained variance & explained variance ratio
X_transformed = np.dot(U, np.diag(Sigma))
self.explained_variance_ = exp_var = np.var(X_transformed, axis=0)
if sp.issparse(X):
_, full_var = mean_variance_axis(X, axis=0)
full_var = full_var.sum()
else:
full_var = np.var(X, axis=0).sum()
self.explained_variance_ratio_ = exp_var / full_var
return X_transformed
def transform(self, X):
"""Perform dimensionality reduction on X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
New data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Reduced version of X. This will always be a dense array.
"""
X = check_array(X, accept_sparse='csr')
return safe_sparse_dot(X, self.components_.T)
def inverse_transform(self, X):
"""Transform X back to its original space.
Returns an array X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data.
Returns
-------
X_original : array, shape (n_samples, n_features)
Note that this is always a dense array.
"""
X = check_array(X)
return np.dot(X, self.components_)
| bsd-3-clause |
udacity/ggplot | ggplot/tests/test_basic.py | 12 | 9308 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
from six.moves import xrange
from nose.tools import assert_equal, assert_true, assert_raises
from . import get_assert_same_ggplot, cleanup
assert_same_ggplot = get_assert_same_ggplot(__file__)
from ggplot import *
from ggplot.exampledata import diamonds
import numpy as np
import pandas as pd
def _build_testing_df():
df = pd.DataFrame({
"x": np.arange(0, 100),
"y": np.arange(0, 100),
"z": np.arange(0, 100)
})
df['cat'] = np.where(df.x*2 > 50, 'blah', 'blue')
df['cat'] = np.where(df.y > 50, 'hello', df.cat)
df['cat2'] = np.where(df.y < 15, 'one', 'two')
df['y'] = np.sin(df.y)
df['z'] = df['y'] + 100
df['c'] = np.where(df.x%2==0,"red", "blue")
return df
def _build_meat_df():
meat['date'] = pd.to_datetime(meat.date)
return meat
@cleanup
def test_geom_density():
df = _build_testing_df()
gg = ggplot(aes(x="x", color="c"), data=df)
gg = gg + geom_density() + xlab("x label") + ylab("y label")
assert_same_ggplot(gg, "geom_density")
@cleanup
def test_geom_histogram():
df = _build_testing_df()
# TODO: use fill aesthetic for a better test
gg = ggplot(aes(x="x", y="y", shape="cat2", color="cat"), data=df)
assert_same_ggplot(gg + geom_histogram(), "geom_hist")
assert_same_ggplot(gg + geom_histogram() + ggtitle("My Histogram"), "geom_hist_title")
@cleanup
def test_geom_point():
df = _build_testing_df()
gg = ggplot(aes(x="x", y="y", shape="cat2", color="cat"), data=df)
assert_same_ggplot(gg + geom_point(), "geom_point")
gg = gg + geom_point() + geom_vline(xintercept=50, ymin=-1.5, ymax=1.5)
assert_same_ggplot(gg, "geom_point_vline")
@cleanup
def test_geom_area():
df = _build_testing_df()
gg = ggplot(aes(x='x', ymax='y', ymin='z', color="cat2"), data=df)
assert_same_ggplot(gg + geom_area(), "geom_area")
@cleanup
def test_geom_text():
gg = ggplot(aes(x='wt',y='mpg',label='name'),data=mtcars) + geom_text()
assert_same_ggplot(gg, "geom_text")
@cleanup
def test_geom_line():
p = ggplot(mtcars, aes(x='wt', y='mpg', colour='factor(cyl)', size='mpg', linetype='factor(cyl)'))
assert_same_ggplot(p + geom_line(), "factor_geom_line")
@cleanup
def test_geom_rect():
df = pd.DataFrame({
'xmin':[3, 5, 3, 3, 9, 4, 8, 3, 9, 2, 9, 1, 11, 4, 7, 1],
'xmax':[10, 8, 10, 4, 10, 5, 9, 4, 10, 4, 11, 2, 12, 6, 9, 12],
'ymin':[3, 3, 6, 2, 2, 6, 6, 8, 8, 4, 4, 2, 2, 1, 1, 4],
'ymax':[5, 7, 7, 7, 7, 8, 8, 9, 9, 6, 6, 5, 5, 2, 2, 5]})
p = ggplot(df, aes(xmin='xmin', xmax='xmax', ymin='ymin', ymax='ymax'))
p += geom_rect(xmin=0, xmax=13, ymin=0, ymax=10)
p += geom_rect(colour="white", fill="white")
p += xlim(0, 13)
assert_same_ggplot(p, "geom_rect_inv")
@cleanup
def test_factor_geom_point():
p = ggplot(mtcars, aes(x='wt', y='mpg', colour='factor(cyl)', size='mpg', linetype='factor(cyl)'))
assert_same_ggplot(p + geom_point(), "factor_geom_point")
@cleanup
def test_factor_geom_point_line():
p = ggplot(mtcars, aes(x='wt', y='mpg', colour='factor(cyl)', size='mpg', linetype='factor(cyl)'))
assert_same_ggplot(p + geom_line() + geom_point(), "factor_geom_point_line")
@cleanup
def test_factor_point_line_title_lab():
p = ggplot(mtcars, aes(x='wt', y='mpg', colour='factor(cyl)', size='mpg', linetype='factor(cyl)'))
p = p + geom_point() + geom_line(color='lightblue') + ggtitle("Beef: It's What's for Dinner")
p = p + xlab("Date") + ylab("Head of Cattle Slaughtered")
assert_same_ggplot(p, "factor_complicated")
@cleanup
def test_labs():
p = ggplot(mtcars, aes(x='wt', y='mpg', colour='factor(cyl)', size='mpg', linetype='factor(cyl)'))
p = p + geom_point() + geom_line(color='lightblue')
p = p + labs(title="Beef: It's What's for Dinner", x="Date", y="Head of Cattle Slaughtered")
assert_same_ggplot(p, "labs")
@cleanup
def test_factor_bar():
p = ggplot(aes(x='factor(cyl)'), data=mtcars)
assert_same_ggplot(p + geom_histogram(), "factor_geom_bar")
@cleanup
def test_stats_smooth():
df = _build_testing_df()
gg = ggplot(aes(x="x", y="y", color="cat"), data=df)
gg = gg + stat_smooth(se=False) + ggtitle("My Smoothed Chart")
assert_same_ggplot(gg, "stat_smooth")
@cleanup
def test_stats_bin2d():
import matplotlib.pyplot as plt
if not hasattr(plt, "hist2d"):
import nose
raise nose.SkipTest("stat_bin2d only works with newer matplotlib (1.3) versions.")
df = _build_testing_df()
gg = ggplot(aes(x='x', y='y', shape='cat', color='cat2'), data=df)
assert_same_ggplot(gg + stat_bin2d(), "stat_bin2d")
@cleanup
def test_alpha_density():
gg = ggplot(aes(x='mpg'), data=mtcars)
assert_same_ggplot(gg + geom_density(fill=True, alpha=0.3), "geom_density_alpha")
@cleanup
def test_facet_wrap():
df = _build_testing_df()
gg = ggplot(aes(x='x', ymax='y', ymin='z'), data=df)
#assert_same_ggplot(gg + geom_bar() + facet_wrap(x="cat2"), "geom_bar_facet")
assert_same_ggplot(gg + geom_area() + facet_wrap(x="cat2"), "geom_area_facet")
@cleanup
def test_facet_wrap2():
meat = _build_meat_df()
meat_lng = pd.melt(meat, id_vars=['date'])
p = ggplot(aes(x='date', y='value', colour='variable'), data=meat_lng)
assert_same_ggplot(p + geom_density(fill=True, alpha=0.3) + facet_wrap("variable"), "geom_density_facet")
assert_same_ggplot(p + geom_line(alpha=0.3) + facet_wrap("variable"), "geom_line_facet")
@cleanup
def test_facet_grid_exceptions():
meat = _build_meat_df()
meat_lng = pd.melt(meat, id_vars=['date'])
p = ggplot(aes(x="date", y="value", colour="variable", shape="variable"), meat_lng)
with assert_raises(Exception):
print(p + geom_point() + facet_grid(y="variable"))
with assert_raises(Exception):
print(p + geom_point() + facet_grid(y="variable", x="NOT_AVAILABLE"))
with assert_raises(Exception):
print(p + geom_point() + facet_grid(y="NOT_AVAILABLE", x="variable"))
@cleanup
def test_facet_grid():
# only use a small subset of the data to speedup tests
# N=53940 -> N=7916 and only 2x2 facets
_mask1 = (diamonds.cut == "Ideal") | (diamonds.cut == "Good")
_mask2 = (diamonds.clarity == "SI2") | (diamonds.clarity == "VS1")
_df = diamonds[_mask1 & _mask2]
p = ggplot(aes(x='x', y='y', colour='z'), data=_df)
p = p + geom_point() + scale_colour_gradient(low="white", high="red")
p = p + facet_grid("cut", "clarity")
assert_same_ggplot(p, "diamonds_big")
p = ggplot(aes(x='carat'), data=_df)
p = p + geom_density() + facet_grid("cut", "clarity")
assert_same_ggplot(p, "diamonds_facet")
@cleanup
def test_smooth_se():
meat = _build_meat_df()
p = ggplot(aes(x='date', y='beef'), data=meat)
assert_same_ggplot(p + geom_point() + stat_smooth(), "point_smooth_se")
assert_same_ggplot(p + stat_smooth(), "smooth_se")
@cleanup
def test_scale_xy_continous():
meat = _build_meat_df()
p = ggplot(aes(x='date', y='beef'), data=meat)
p = p + geom_point() + scale_x_continuous("This is the X")
p = p + scale_y_continuous("Squared", limits=[0, 1500])
assert_same_ggplot(p, "scale1")
@cleanup
def test_ylim():
meat = _build_meat_df()
p = ggplot(aes(x='date', y='beef'), data=meat)
assert_same_ggplot(p + geom_point() + ylim(0, 1500), "ylim")
@cleanup
def test_partial_limits() :
p = ggplot(diamonds, aes('carat', 'price'))
assert_same_ggplot(p + geom_point(alpha=1/20.) + xlim(high = 4) + ylim(0), "partial_limits")
@cleanup
def test_partial_limits_facet() :
p = ggplot(diamonds, aes('carat', 'price', color="clarity"))
p = p + geom_point(alpha=1/20.) + facet_wrap(x="cut", scales="free") + xlim(low=0) + ylim(low=0)
assert_same_ggplot(p, "partial_limits_facet")
@cleanup
def test_scale_date():
meat = _build_meat_df()
gg = ggplot(aes(x='date', y='beef'), data=meat) + geom_line()
assert_same_ggplot(gg+scale_x_date(labels="%Y-%m-%d"), "scale_date")
@cleanup
def test_diamond():
p = ggplot(aes(x='x', y='y', colour='z'), data=diamonds.head(4))
p = p + geom_point() + scale_colour_gradient(low="white", high="red")
p = p + facet_wrap("cut")
assert_same_ggplot(p, "diamonds_small")
def test_aes_positional_args():
result = aes("weight", "hp")
expected = {"x": "weight", "y": "hp"}
assert_equal(result, expected)
result3 = aes("weight", "hp", "qsec")
expected3 = {"x": "weight", "y": "hp", "color": "qsec"}
assert_equal(result3, expected3)
def test_aes_keyword_args():
result = aes(x="weight", y="hp")
expected = {"x": "weight", "y": "hp"}
assert_equal(result, expected)
result3 = aes(x="weight", y="hp", color="qsec")
expected3 = {"x": "weight", "y": "hp", "color": "qsec"}
assert_equal(result3,expected3)
def test_aes_mixed_args():
result = aes("weight", "hp", color="qsec")
expected = {"x": "weight", "y": "hp", "color": "qsec"}
assert_equal(result, expected)
@cleanup
def test_scale_color_brewer() :
p = ggplot(diamonds, aes(x = "x", y="y"))
p = p + geom_line() + scale_color_brewer(type='qual', palette=2)
assert_same_ggplot(p, "scale_color_brewer")
| bsd-2-clause |
alephu5/Soundbyte | environment/lib/python3.3/site-packages/pandas/tools/tests/test_merge.py | 1 | 74672 | # pylint: disable=E1103
import nose
from datetime import datetime
from numpy.random import randn
from numpy import nan
import numpy as np
import random
from pandas.compat import range, lrange, lzip, zip
from pandas import compat, _np_version_under1p7
from pandas.tseries.index import DatetimeIndex
from pandas.tools.merge import merge, concat, ordered_merge, MergeError
from pandas.util.testing import (assert_frame_equal, assert_series_equal,
assert_almost_equal, rands,
makeCustomDataframe as mkdf,
assertRaisesRegexp)
from pandas import isnull, DataFrame, Index, MultiIndex, Panel, Series, date_range
import pandas.algos as algos
import pandas.util.testing as tm
a_ = np.array
N = 50
NGROUPS = 8
JOIN_TYPES = ['inner', 'outer', 'left', 'right']
def get_test_data(ngroups=NGROUPS, n=N):
unique_groups = lrange(ngroups)
arr = np.asarray(np.tile(unique_groups, n // ngroups))
if len(arr) < n:
arr = np.asarray(list(arr) + unique_groups[:n - len(arr)])
random.shuffle(arr)
return arr
class TestMerge(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
# aggregate multiple columns
self.df = DataFrame({'key1': get_test_data(),
'key2': get_test_data(),
'data1': np.random.randn(N),
'data2': np.random.randn(N)})
# exclude a couple keys for fun
self.df = self.df[self.df['key2'] > 1]
self.df2 = DataFrame({'key1': get_test_data(n=N // 5),
'key2': get_test_data(ngroups=NGROUPS // 2,
n=N // 5),
'value': np.random.randn(N // 5)})
index, data = tm.getMixedTypeDict()
self.target = DataFrame(data, index=index)
# Join on string value
self.source = DataFrame({'MergedA': data['A'], 'MergedD': data['D']},
index=data['C'])
self.left = DataFrame({'key': ['a', 'b', 'c', 'd', 'e', 'e', 'a'],
'v1': np.random.randn(7)})
self.right = DataFrame({'v2': np.random.randn(4)},
index=['d', 'b', 'c', 'a'])
def test_cython_left_outer_join(self):
left = a_([0, 1, 2, 1, 2, 0, 0, 1, 2, 3, 3], dtype=np.int64)
right = a_([1, 1, 0, 4, 2, 2, 1], dtype=np.int64)
max_group = 5
ls, rs = algos.left_outer_join(left, right, max_group)
exp_ls = left.argsort(kind='mergesort')
exp_rs = right.argsort(kind='mergesort')
exp_li = a_([0, 1, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5,
6, 6, 7, 7, 8, 8, 9, 10])
exp_ri = a_([0, 0, 0, 1, 2, 3, 1, 2, 3, 1, 2, 3,
4, 5, 4, 5, 4, 5, -1, -1])
exp_ls = exp_ls.take(exp_li)
exp_ls[exp_li == -1] = -1
exp_rs = exp_rs.take(exp_ri)
exp_rs[exp_ri == -1] = -1
self.assert_(np.array_equal(ls, exp_ls))
self.assert_(np.array_equal(rs, exp_rs))
def test_cython_right_outer_join(self):
left = a_([0, 1, 2, 1, 2, 0, 0, 1, 2, 3, 3], dtype=np.int64)
right = a_([1, 1, 0, 4, 2, 2, 1], dtype=np.int64)
max_group = 5
rs, ls = algos.left_outer_join(right, left, max_group)
exp_ls = left.argsort(kind='mergesort')
exp_rs = right.argsort(kind='mergesort')
# 0 1 1 1
exp_li = a_([0, 1, 2, 3, 4, 5, 3, 4, 5, 3, 4, 5,
# 2 2 4
6, 7, 8, 6, 7, 8, -1])
exp_ri = a_([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3,
4, 4, 4, 5, 5, 5, 6])
exp_ls = exp_ls.take(exp_li)
exp_ls[exp_li == -1] = -1
exp_rs = exp_rs.take(exp_ri)
exp_rs[exp_ri == -1] = -1
self.assert_(np.array_equal(ls, exp_ls))
self.assert_(np.array_equal(rs, exp_rs))
def test_cython_inner_join(self):
left = a_([0, 1, 2, 1, 2, 0, 0, 1, 2, 3, 3], dtype=np.int64)
right = a_([1, 1, 0, 4, 2, 2, 1, 4], dtype=np.int64)
max_group = 5
ls, rs = algos.inner_join(left, right, max_group)
exp_ls = left.argsort(kind='mergesort')
exp_rs = right.argsort(kind='mergesort')
exp_li = a_([0, 1, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5,
6, 6, 7, 7, 8, 8])
exp_ri = a_([0, 0, 0, 1, 2, 3, 1, 2, 3, 1, 2, 3,
4, 5, 4, 5, 4, 5])
exp_ls = exp_ls.take(exp_li)
exp_ls[exp_li == -1] = -1
exp_rs = exp_rs.take(exp_ri)
exp_rs[exp_ri == -1] = -1
self.assert_(np.array_equal(ls, exp_ls))
self.assert_(np.array_equal(rs, exp_rs))
def test_left_outer_join(self):
joined_key2 = merge(self.df, self.df2, on='key2')
_check_join(self.df, self.df2, joined_key2, ['key2'], how='left')
joined_both = merge(self.df, self.df2)
_check_join(self.df, self.df2, joined_both, ['key1', 'key2'],
how='left')
def test_right_outer_join(self):
joined_key2 = merge(self.df, self.df2, on='key2', how='right')
_check_join(self.df, self.df2, joined_key2, ['key2'], how='right')
joined_both = merge(self.df, self.df2, how='right')
_check_join(self.df, self.df2, joined_both, ['key1', 'key2'],
how='right')
def test_full_outer_join(self):
joined_key2 = merge(self.df, self.df2, on='key2', how='outer')
_check_join(self.df, self.df2, joined_key2, ['key2'], how='outer')
joined_both = merge(self.df, self.df2, how='outer')
_check_join(self.df, self.df2, joined_both, ['key1', 'key2'],
how='outer')
def test_inner_join(self):
joined_key2 = merge(self.df, self.df2, on='key2', how='inner')
_check_join(self.df, self.df2, joined_key2, ['key2'], how='inner')
joined_both = merge(self.df, self.df2, how='inner')
_check_join(self.df, self.df2, joined_both, ['key1', 'key2'],
how='inner')
def test_handle_overlap(self):
joined = merge(self.df, self.df2, on='key2',
suffixes=['.foo', '.bar'])
self.assert_('key1.foo' in joined)
self.assert_('key1.bar' in joined)
def test_handle_overlap_arbitrary_key(self):
joined = merge(self.df, self.df2,
left_on='key2', right_on='key1',
suffixes=['.foo', '.bar'])
self.assert_('key1.foo' in joined)
self.assert_('key2.bar' in joined)
def test_merge_common(self):
joined = merge(self.df, self.df2)
exp = merge(self.df, self.df2, on=['key1', 'key2'])
tm.assert_frame_equal(joined, exp)
def test_join_on(self):
target = self.target
source = self.source
merged = target.join(source, on='C')
self.assert_(np.array_equal(merged['MergedA'], target['A']))
self.assert_(np.array_equal(merged['MergedD'], target['D']))
# join with duplicates (fix regression from DataFrame/Matrix merge)
df = DataFrame({'key': ['a', 'a', 'b', 'b', 'c']})
df2 = DataFrame({'value': [0, 1, 2]}, index=['a', 'b', 'c'])
joined = df.join(df2, on='key')
expected = DataFrame({'key': ['a', 'a', 'b', 'b', 'c'],
'value': [0, 0, 1, 1, 2]})
assert_frame_equal(joined, expected)
# Test when some are missing
df_a = DataFrame([[1], [2], [3]], index=['a', 'b', 'c'],
columns=['one'])
df_b = DataFrame([['foo'], ['bar']], index=[1, 2],
columns=['two'])
df_c = DataFrame([[1], [2]], index=[1, 2],
columns=['three'])
joined = df_a.join(df_b, on='one')
joined = joined.join(df_c, on='one')
self.assert_(np.isnan(joined['two']['c']))
self.assert_(np.isnan(joined['three']['c']))
# merge column not p resent
self.assertRaises(Exception, target.join, source, on='E')
# overlap
source_copy = source.copy()
source_copy['A'] = 0
self.assertRaises(Exception, target.join, source_copy, on='A')
def test_join_on_fails_with_different_right_index(self):
with tm.assertRaises(ValueError):
df = DataFrame({'a': tm.choice(['m', 'f'], size=3),
'b': np.random.randn(3)})
df2 = DataFrame({'a': tm.choice(['m', 'f'], size=10),
'b': np.random.randn(10)},
index=tm.makeCustomIndex(10, 2))
merge(df, df2, left_on='a', right_index=True)
def test_join_on_fails_with_different_left_index(self):
with tm.assertRaises(ValueError):
df = DataFrame({'a': tm.choice(['m', 'f'], size=3),
'b': np.random.randn(3)},
index=tm.makeCustomIndex(10, 2))
df2 = DataFrame({'a': tm.choice(['m', 'f'], size=10),
'b': np.random.randn(10)})
merge(df, df2, right_on='b', left_index=True)
def test_join_on_fails_with_different_column_counts(self):
with tm.assertRaises(ValueError):
df = DataFrame({'a': tm.choice(['m', 'f'], size=3),
'b': np.random.randn(3)})
df2 = DataFrame({'a': tm.choice(['m', 'f'], size=10),
'b': np.random.randn(10)},
index=tm.makeCustomIndex(10, 2))
merge(df, df2, right_on='a', left_on=['a', 'b'])
def test_join_on_pass_vector(self):
expected = self.target.join(self.source, on='C')
del expected['C']
join_col = self.target.pop('C')
result = self.target.join(self.source, on=join_col)
assert_frame_equal(result, expected)
def test_join_with_len0(self):
# nothing to merge
merged = self.target.join(self.source.reindex([]), on='C')
for col in self.source:
self.assert_(col in merged)
self.assert_(merged[col].isnull().all())
merged2 = self.target.join(self.source.reindex([]), on='C',
how='inner')
self.assert_(merged2.columns.equals(merged.columns))
self.assertEqual(len(merged2), 0)
def test_join_on_inner(self):
df = DataFrame({'key': ['a', 'a', 'd', 'b', 'b', 'c']})
df2 = DataFrame({'value': [0, 1]}, index=['a', 'b'])
joined = df.join(df2, on='key', how='inner')
expected = df.join(df2, on='key')
expected = expected[expected['value'].notnull()]
self.assert_(np.array_equal(joined['key'], expected['key']))
self.assert_(np.array_equal(joined['value'], expected['value']))
self.assert_(joined.index.equals(expected.index))
def test_join_on_singlekey_list(self):
df = DataFrame({'key': ['a', 'a', 'b', 'b', 'c']})
df2 = DataFrame({'value': [0, 1, 2]}, index=['a', 'b', 'c'])
# corner cases
joined = df.join(df2, on=['key'])
expected = df.join(df2, on='key')
assert_frame_equal(joined, expected)
def test_join_on_series(self):
result = self.target.join(self.source['MergedA'], on='C')
expected = self.target.join(self.source[['MergedA']], on='C')
assert_frame_equal(result, expected)
def test_join_on_series_buglet(self):
# GH #638
df = DataFrame({'a': [1, 1]})
ds = Series([2], index=[1], name='b')
result = df.join(ds, on='a')
expected = DataFrame({'a': [1, 1],
'b': [2, 2]}, index=df.index)
tm.assert_frame_equal(result, expected)
def test_join_index_mixed(self):
df1 = DataFrame({'A': 1., 'B': 2, 'C': 'foo', 'D': True},
index=np.arange(10),
columns=['A', 'B', 'C', 'D'])
self.assert_(df1['B'].dtype == np.int64)
self.assert_(df1['D'].dtype == np.bool_)
df2 = DataFrame({'A': 1., 'B': 2, 'C': 'foo', 'D': True},
index=np.arange(0, 10, 2),
columns=['A', 'B', 'C', 'D'])
# overlap
joined = df1.join(df2, lsuffix='_one', rsuffix='_two')
expected_columns = ['A_one', 'B_one', 'C_one', 'D_one',
'A_two', 'B_two', 'C_two', 'D_two']
df1.columns = expected_columns[:4]
df2.columns = expected_columns[4:]
expected = _join_by_hand(df1, df2)
assert_frame_equal(joined, expected)
# no overlapping blocks
df1 = DataFrame(index=np.arange(10))
df1['bool'] = True
df1['string'] = 'foo'
df2 = DataFrame(index=np.arange(5, 15))
df2['int'] = 1
df2['float'] = 1.
for kind in JOIN_TYPES:
joined = df1.join(df2, how=kind)
expected = _join_by_hand(df1, df2, how=kind)
assert_frame_equal(joined, expected)
joined = df2.join(df1, how=kind)
expected = _join_by_hand(df2, df1, how=kind)
assert_frame_equal(joined, expected)
def test_join_empty_bug(self):
# generated an exception in 0.4.3
x = DataFrame()
x.join(DataFrame([3], index=[0], columns=['A']), how='outer')
def test_join_unconsolidated(self):
# GH #331
a = DataFrame(randn(30, 2), columns=['a', 'b'])
c = Series(randn(30))
a['c'] = c
d = DataFrame(randn(30, 1), columns=['q'])
# it works!
a.join(d)
d.join(a)
def test_join_multiindex(self):
index1 = MultiIndex.from_arrays([['a', 'a', 'a', 'b', 'b', 'b'],
[1, 2, 3, 1, 2, 3]],
names=['first', 'second'])
index2 = MultiIndex.from_arrays([['b', 'b', 'b', 'c', 'c', 'c'],
[1, 2, 3, 1, 2, 3]],
names=['first', 'second'])
df1 = DataFrame(data=np.random.randn(6), index=index1,
columns=['var X'])
df2 = DataFrame(data=np.random.randn(6), index=index2,
columns=['var Y'])
df1 = df1.sortlevel(0)
df2 = df2.sortlevel(0)
joined = df1.join(df2, how='outer')
ex_index = index1._tuple_index + index2._tuple_index
expected = df1.reindex(ex_index).join(df2.reindex(ex_index))
expected.index.names = index1.names
assert_frame_equal(joined, expected)
self.assertEqual(joined.index.names, index1.names)
df1 = df1.sortlevel(1)
df2 = df2.sortlevel(1)
joined = df1.join(df2, how='outer').sortlevel(0)
ex_index = index1._tuple_index + index2._tuple_index
expected = df1.reindex(ex_index).join(df2.reindex(ex_index))
expected.index.names = index1.names
assert_frame_equal(joined, expected)
self.assertEqual(joined.index.names, index1.names)
def test_join_inner_multiindex(self):
key1 = ['bar', 'bar', 'bar', 'foo', 'foo', 'baz', 'baz', 'qux',
'qux', 'snap']
key2 = ['two', 'one', 'three', 'one', 'two', 'one', 'two', 'two',
'three', 'one']
data = np.random.randn(len(key1))
data = DataFrame({'key1': key1, 'key2': key2,
'data': data})
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
to_join = DataFrame(np.random.randn(10, 3), index=index,
columns=['j_one', 'j_two', 'j_three'])
joined = data.join(to_join, on=['key1', 'key2'], how='inner')
expected = merge(data, to_join.reset_index(),
left_on=['key1', 'key2'],
right_on=['first', 'second'], how='inner',
sort=False)
expected2 = merge(to_join, data,
right_on=['key1', 'key2'], left_index=True,
how='inner', sort=False)
assert_frame_equal(joined, expected2.reindex_like(joined))
expected2 = merge(to_join, data, right_on=['key1', 'key2'],
left_index=True, how='inner', sort=False)
expected = expected.drop(['first', 'second'], axis=1)
expected.index = joined.index
self.assert_(joined.index.is_monotonic)
assert_frame_equal(joined, expected)
# _assert_same_contents(expected, expected2.ix[:, expected.columns])
def test_join_hierarchical_mixed(self):
df = DataFrame([(1, 2, 3), (4, 5, 6)], columns=['a', 'b', 'c'])
new_df = df.groupby(['a']).agg({'b': [np.mean, np.sum]})
other_df = DataFrame(
[(1, 2, 3), (7, 10, 6)], columns=['a', 'b', 'd'])
other_df.set_index('a', inplace=True)
result = merge(new_df, other_df, left_index=True, right_index=True)
self.assertTrue(('b', 'mean') in result)
self.assertTrue('b' in result)
def test_join_float64_float32(self):
a = DataFrame(randn(10, 2), columns=['a', 'b'], dtype = np.float64)
b = DataFrame(randn(10, 1), columns=['c'], dtype = np.float32)
joined = a.join(b)
self.assert_(joined.dtypes['a'] == 'float64')
self.assert_(joined.dtypes['b'] == 'float64')
self.assert_(joined.dtypes['c'] == 'float32')
a = np.random.randint(0, 5, 100).astype('int64')
b = np.random.random(100).astype('float64')
c = np.random.random(100).astype('float32')
df = DataFrame({'a': a, 'b': b, 'c': c})
xpdf = DataFrame({'a': a, 'b': b, 'c': c })
s = DataFrame(np.random.random(5).astype('float32'), columns=['md'])
rs = df.merge(s, left_on='a', right_index=True)
self.assert_(rs.dtypes['a'] == 'int64')
self.assert_(rs.dtypes['b'] == 'float64')
self.assert_(rs.dtypes['c'] == 'float32')
self.assert_(rs.dtypes['md'] == 'float32')
xp = xpdf.merge(s, left_on='a', right_index=True)
assert_frame_equal(rs, xp)
def test_join_many_non_unique_index(self):
df1 = DataFrame({"a": [1, 1], "b": [1, 1], "c": [10, 20]})
df2 = DataFrame({"a": [1, 1], "b": [1, 2], "d": [100, 200]})
df3 = DataFrame({"a": [1, 1], "b": [1, 2], "e": [1000, 2000]})
idf1 = df1.set_index(["a", "b"])
idf2 = df2.set_index(["a", "b"])
idf3 = df3.set_index(["a", "b"])
result = idf1.join([idf2, idf3], how='outer')
df_partially_merged = merge(df1, df2, on=['a', 'b'], how='outer')
expected = merge(df_partially_merged, df3, on=['a', 'b'], how='outer')
result = result.reset_index()
result['a'] = result['a'].astype(np.float64)
result['b'] = result['b'].astype(np.float64)
assert_frame_equal(result, expected.ix[:, result.columns])
df1 = DataFrame({"a": [1, 1, 1], "b": [1, 1, 1], "c": [10, 20, 30]})
df2 = DataFrame({"a": [1, 1, 1], "b": [1, 1, 2], "d": [100, 200, 300]})
df3 = DataFrame(
{"a": [1, 1, 1], "b": [1, 1, 2], "e": [1000, 2000, 3000]})
idf1 = df1.set_index(["a", "b"])
idf2 = df2.set_index(["a", "b"])
idf3 = df3.set_index(["a", "b"])
result = idf1.join([idf2, idf3], how='inner')
df_partially_merged = merge(df1, df2, on=['a', 'b'], how='inner')
expected = merge(df_partially_merged, df3, on=['a', 'b'], how='inner')
result = result.reset_index()
assert_frame_equal(result, expected.ix[:, result.columns])
def test_merge_index_singlekey_right_vs_left(self):
left = DataFrame({'key': ['a', 'b', 'c', 'd', 'e', 'e', 'a'],
'v1': np.random.randn(7)})
right = DataFrame({'v2': np.random.randn(4)},
index=['d', 'b', 'c', 'a'])
merged1 = merge(left, right, left_on='key',
right_index=True, how='left', sort=False)
merged2 = merge(right, left, right_on='key',
left_index=True, how='right', sort=False)
assert_frame_equal(merged1, merged2.ix[:, merged1.columns])
merged1 = merge(left, right, left_on='key',
right_index=True, how='left', sort=True)
merged2 = merge(right, left, right_on='key',
left_index=True, how='right', sort=True)
assert_frame_equal(merged1, merged2.ix[:, merged1.columns])
def test_merge_index_singlekey_inner(self):
left = DataFrame({'key': ['a', 'b', 'c', 'd', 'e', 'e', 'a'],
'v1': np.random.randn(7)})
right = DataFrame({'v2': np.random.randn(4)},
index=['d', 'b', 'c', 'a'])
# inner join
result = merge(left, right, left_on='key', right_index=True,
how='inner')
expected = left.join(right, on='key').ix[result.index]
assert_frame_equal(result, expected)
result = merge(right, left, right_on='key', left_index=True,
how='inner')
expected = left.join(right, on='key').ix[result.index]
assert_frame_equal(result, expected.ix[:, result.columns])
def test_merge_misspecified(self):
self.assertRaises(Exception, merge, self.left, self.right,
left_index=True)
self.assertRaises(Exception, merge, self.left, self.right,
right_index=True)
self.assertRaises(Exception, merge, self.left, self.left,
left_on='key', on='key')
self.assertRaises(Exception, merge, self.df, self.df2,
left_on=['key1'], right_on=['key1', 'key2'])
def test_merge_overlap(self):
merged = merge(self.left, self.left, on='key')
exp_len = (self.left['key'].value_counts() ** 2).sum()
self.assertEqual(len(merged), exp_len)
self.assert_('v1_x' in merged)
self.assert_('v1_y' in merged)
def test_merge_different_column_key_names(self):
left = DataFrame({'lkey': ['foo', 'bar', 'baz', 'foo'],
'value': [1, 2, 3, 4]})
right = DataFrame({'rkey': ['foo', 'bar', 'qux', 'foo'],
'value': [5, 6, 7, 8]})
merged = left.merge(right, left_on='lkey', right_on='rkey',
how='outer', sort=True)
assert_almost_equal(merged['lkey'],
['bar', 'baz', 'foo', 'foo', 'foo', 'foo', np.nan])
assert_almost_equal(merged['rkey'],
['bar', np.nan, 'foo', 'foo', 'foo', 'foo', 'qux'])
assert_almost_equal(merged['value_x'], [2, 3, 1, 1, 4, 4, np.nan])
assert_almost_equal(merged['value_y'], [6, np.nan, 5, 8, 5, 8, 7])
def test_merge_nocopy(self):
left = DataFrame({'a': 0, 'b': 1}, index=lrange(10))
right = DataFrame({'c': 'foo', 'd': 'bar'}, index=lrange(10))
merged = merge(left, right, left_index=True,
right_index=True, copy=False)
merged['a'] = 6
self.assert_((left['a'] == 6).all())
merged['d'] = 'peekaboo'
self.assert_((right['d'] == 'peekaboo').all())
def test_join_sort(self):
left = DataFrame({'key': ['foo', 'bar', 'baz', 'foo'],
'value': [1, 2, 3, 4]})
right = DataFrame({'value2': ['a', 'b', 'c']},
index=['bar', 'baz', 'foo'])
joined = left.join(right, on='key', sort=True)
expected = DataFrame({'key': ['bar', 'baz', 'foo', 'foo'],
'value': [2, 3, 1, 4],
'value2': ['a', 'b', 'c', 'c']},
index=[1, 2, 0, 3])
assert_frame_equal(joined, expected)
# smoke test
joined = left.join(right, on='key', sort=False)
self.assert_(np.array_equal(joined.index, lrange(4)))
def test_intelligently_handle_join_key(self):
# #733, be a bit more 1337 about not returning unconsolidated DataFrame
left = DataFrame({'key': [1, 1, 2, 2, 3],
'value': lrange(5)}, columns=['value', 'key'])
right = DataFrame({'key': [1, 1, 2, 3, 4, 5],
'rvalue': lrange(6)})
joined = merge(left, right, on='key', how='outer')
expected = DataFrame({'key': [1, 1, 1, 1, 2, 2, 3, 4, 5.],
'value': np.array([0, 0, 1, 1, 2, 3, 4,
np.nan, np.nan]),
'rvalue': np.array([0, 1, 0, 1, 2, 2, 3, 4, 5])},
columns=['value', 'key', 'rvalue'])
assert_frame_equal(joined, expected, check_dtype=False)
self.assert_(joined._data.is_consolidated())
def test_handle_join_key_pass_array(self):
left = DataFrame({'key': [1, 1, 2, 2, 3],
'value': lrange(5)}, columns=['value', 'key'])
right = DataFrame({'rvalue': lrange(6)})
key = np.array([1, 1, 2, 3, 4, 5])
merged = merge(left, right, left_on='key', right_on=key, how='outer')
merged2 = merge(right, left, left_on=key, right_on='key', how='outer')
assert_series_equal(merged['key'], merged2['key'])
self.assert_(merged['key'].notnull().all())
self.assert_(merged2['key'].notnull().all())
left = DataFrame({'value': lrange(5)}, columns=['value'])
right = DataFrame({'rvalue': lrange(6)})
lkey = np.array([1, 1, 2, 2, 3])
rkey = np.array([1, 1, 2, 3, 4, 5])
merged = merge(left, right, left_on=lkey, right_on=rkey, how='outer')
self.assert_(np.array_equal(merged['key_0'],
np.array([1, 1, 1, 1, 2, 2, 3, 4, 5])))
left = DataFrame({'value': lrange(3)})
right = DataFrame({'rvalue': lrange(6)})
key = np.array([0, 1, 1, 2, 2, 3])
merged = merge(left, right, left_index=True, right_on=key, how='outer')
self.assert_(np.array_equal(merged['key_0'], key))
def test_mixed_type_join_with_suffix(self):
# GH #916
df = DataFrame(np.random.randn(20, 6),
columns=['a', 'b', 'c', 'd', 'e', 'f'])
df.insert(0, 'id', 0)
df.insert(5, 'dt', 'foo')
grouped = df.groupby('id')
mn = grouped.mean()
cn = grouped.count()
# it works!
mn.join(cn, rsuffix='_right')
def test_no_overlap_more_informative_error(self):
dt = datetime.now()
df1 = DataFrame({'x': ['a']}, index=[dt])
df2 = DataFrame({'y': ['b', 'c']}, index=[dt, dt])
self.assertRaises(MergeError, merge, df1, df2)
def test_merge_non_unique_indexes(self):
dt = datetime(2012, 5, 1)
dt2 = datetime(2012, 5, 2)
dt3 = datetime(2012, 5, 3)
dt4 = datetime(2012, 5, 4)
df1 = DataFrame({'x': ['a']}, index=[dt])
df2 = DataFrame({'y': ['b', 'c']}, index=[dt, dt])
_check_merge(df1, df2)
# Not monotonic
df1 = DataFrame({'x': ['a', 'b', 'q']}, index=[dt2, dt, dt4])
df2 = DataFrame({'y': ['c', 'd', 'e', 'f', 'g', 'h']},
index=[dt3, dt3, dt2, dt2, dt, dt])
_check_merge(df1, df2)
df1 = DataFrame({'x': ['a', 'b']}, index=[dt, dt])
df2 = DataFrame({'y': ['c', 'd']}, index=[dt, dt])
_check_merge(df1, df2)
def test_merge_non_unique_index_many_to_many(self):
dt = datetime(2012, 5, 1)
dt2 = datetime(2012, 5, 2)
dt3 = datetime(2012, 5, 3)
df1 = DataFrame({'x': ['a', 'b', 'c', 'd']},
index=[dt2, dt2, dt, dt])
df2 = DataFrame({'y': ['e', 'f', 'g', ' h', 'i']},
index=[dt2, dt2, dt3, dt, dt])
_check_merge(df1, df2)
def test_left_merge_empty_dataframe(self):
left = DataFrame({'key': [1], 'value': [2]})
right = DataFrame({'key': []})
result = merge(left, right, on='key', how='left')
assert_frame_equal(result, left)
result = merge(right, left, on='key', how='right')
assert_frame_equal(result, left)
def test_merge_nosort(self):
# #2098, anything to do?
from datetime import datetime
d = {"var1": np.random.randint(0, 10, size=10),
"var2": np.random.randint(0, 10, size=10),
"var3": [datetime(2012, 1, 12), datetime(2011, 2, 4),
datetime(
2010, 2, 3), datetime(2012, 1, 12),
datetime(
2011, 2, 4), datetime(2012, 4, 3),
datetime(
2012, 3, 4), datetime(2008, 5, 1),
datetime(2010, 2, 3), datetime(2012, 2, 3)]}
df = DataFrame.from_dict(d)
var3 = df.var3.unique()
var3.sort()
new = DataFrame.from_dict({"var3": var3,
"var8": np.random.random(7)})
result = df.merge(new, on="var3", sort=False)
exp = merge(df, new, on='var3', sort=False)
assert_frame_equal(result, exp)
self.assert_((df.var3.unique() == result.var3.unique()).all())
def test_merge_nan_right(self):
df1 = DataFrame({"i1" : [0, 1], "i2" : [0, 1]})
df2 = DataFrame({"i1" : [0], "i3" : [0]})
result = df1.join(df2, on="i1", rsuffix="_")
expected = DataFrame({'i1': {0: 0.0, 1: 1}, 'i2': {0: 0, 1: 1},
'i1_': {0: 0, 1: np.nan}, 'i3': {0: 0.0, 1: np.nan},
None: {0: 0, 1: 0}}).set_index(None).reset_index()[['i1', 'i2', 'i1_', 'i3']]
assert_frame_equal(result, expected, check_dtype=False)
df1 = DataFrame({"i1" : [0, 1], "i2" : [0.5, 1.5]})
df2 = DataFrame({"i1" : [0], "i3" : [0.7]})
result = df1.join(df2, rsuffix="_", on='i1')
expected = DataFrame({'i1': {0: 0, 1: 1}, 'i1_': {0: 0.0, 1: nan},
'i2': {0: 0.5, 1: 1.5}, 'i3': {0: 0.69999999999999996,
1: nan}})[['i1', 'i2', 'i1_', 'i3']]
assert_frame_equal(result, expected)
def test_append_dtype_coerce(self):
# GH 4993
# appending with datetime will incorrectly convert datetime64
import datetime as dt
from pandas import NaT
df1 = DataFrame(index=[1,2], data=[dt.datetime(2013,1,1,0,0),
dt.datetime(2013,1,2,0,0)],
columns=['start_time'])
df2 = DataFrame(index=[4,5], data=[[dt.datetime(2013,1,3,0,0),
dt.datetime(2013,1,3,6,10)],
[dt.datetime(2013,1,4,0,0),
dt.datetime(2013,1,4,7,10)]],
columns=['start_time','end_time'])
expected = concat([
Series([NaT,NaT,dt.datetime(2013,1,3,6,10),dt.datetime(2013,1,4,7,10)],name='end_time'),
Series([dt.datetime(2013,1,1,0,0),dt.datetime(2013,1,2,0,0),dt.datetime(2013,1,3,0,0),dt.datetime(2013,1,4,0,0)],name='start_time'),
],axis=1)
result = df1.append(df2,ignore_index=True)
assert_frame_equal(result, expected)
def test_join_append_timedeltas(self):
import datetime as dt
from pandas import NaT
# timedelta64 issues with join/merge
# GH 5695
if _np_version_under1p7:
raise nose.SkipTest("numpy < 1.7")
d = {'d': dt.datetime(2013, 11, 5, 5, 56), 't': dt.timedelta(0, 22500)}
df = DataFrame(columns=list('dt'))
df = df.append(d, ignore_index=True)
result = df.append(d, ignore_index=True)
expected = DataFrame({'d': [dt.datetime(2013, 11, 5, 5, 56),
dt.datetime(2013, 11, 5, 5, 56) ],
't': [ dt.timedelta(0, 22500),
dt.timedelta(0, 22500) ]})
assert_frame_equal(result, expected)
td = np.timedelta64(300000000)
lhs = DataFrame(Series([td,td],index=["A","B"]))
rhs = DataFrame(Series([td],index=["A"]))
from pandas import NaT
result = lhs.join(rhs,rsuffix='r', how="left")
expected = DataFrame({ '0' : Series([td,td],index=list('AB')), '0r' : Series([td,NaT],index=list('AB')) })
assert_frame_equal(result, expected)
def test_overlapping_columns_error_message(self):
# #2649
df = DataFrame({'key': [1, 2, 3],
'v1': [4, 5, 6],
'v2': [7, 8, 9]})
df2 = DataFrame({'key': [1, 2, 3],
'v1': [4, 5, 6],
'v2': [7, 8, 9]})
df.columns = ['key', 'foo', 'foo']
df2.columns = ['key', 'bar', 'bar']
self.assertRaises(Exception, merge, df, df2)
def _check_merge(x, y):
for how in ['inner', 'left', 'outer']:
result = x.join(y, how=how)
expected = merge(x.reset_index(), y.reset_index(), how=how,
sort=True)
expected = expected.set_index('index')
assert_frame_equal(result, expected, check_names=False) # TODO check_names on merge?
class TestMergeMulti(tm.TestCase):
def setUp(self):
self.index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
self.to_join = DataFrame(np.random.randn(10, 3), index=self.index,
columns=['j_one', 'j_two', 'j_three'])
# a little relevant example with NAs
key1 = ['bar', 'bar', 'bar', 'foo', 'foo', 'baz', 'baz', 'qux',
'qux', 'snap']
key2 = ['two', 'one', 'three', 'one', 'two', 'one', 'two', 'two',
'three', 'one']
data = np.random.randn(len(key1))
self.data = DataFrame({'key1': key1, 'key2': key2,
'data': data})
def test_merge_on_multikey(self):
joined = self.data.join(self.to_join, on=['key1', 'key2'])
join_key = Index(lzip(self.data['key1'], self.data['key2']))
indexer = self.to_join.index.get_indexer(join_key)
ex_values = self.to_join.values.take(indexer, axis=0)
ex_values[indexer == -1] = np.nan
expected = self.data.join(DataFrame(ex_values,
columns=self.to_join.columns))
# TODO: columns aren't in the same order yet
assert_frame_equal(joined, expected.ix[:, joined.columns])
def test_merge_right_vs_left(self):
# compare left vs right merge with multikey
merged1 = self.data.merge(self.to_join, left_on=['key1', 'key2'],
right_index=True, how='left')
merged2 = self.to_join.merge(self.data, right_on=['key1', 'key2'],
left_index=True, how='right')
merged2 = merged2.ix[:, merged1.columns]
assert_frame_equal(merged1, merged2)
def test_compress_group_combinations(self):
# ~ 40000000 possible unique groups
key1 = np.array([rands(10) for _ in range(10000)], dtype='O')
key1 = np.tile(key1, 2)
key2 = key1[::-1]
df = DataFrame({'key1': key1, 'key2': key2,
'value1': np.random.randn(20000)})
df2 = DataFrame({'key1': key1[::2], 'key2': key2[::2],
'value2': np.random.randn(10000)})
# just to hit the label compression code path
merged = merge(df, df2, how='outer')
def test_left_join_index_preserve_order(self):
left = DataFrame({'k1': [0, 1, 2] * 8,
'k2': ['foo', 'bar'] * 12,
'v': np.array(np.arange(24),dtype=np.int64) })
index = MultiIndex.from_tuples([(2, 'bar'), (1, 'foo')])
right = DataFrame({'v2': [5, 7]}, index=index)
result = left.join(right, on=['k1', 'k2'])
expected = left.copy()
expected['v2'] = np.nan
expected['v2'][(expected.k1 == 2) & (expected.k2 == 'bar')] = 5
expected['v2'][(expected.k1 == 1) & (expected.k2 == 'foo')] = 7
tm.assert_frame_equal(result, expected)
# test join with multi dtypes blocks
left = DataFrame({'k1': [0, 1, 2] * 8,
'k2': ['foo', 'bar'] * 12,
'k3' : np.array([0, 1, 2]*8, dtype=np.float32),
'v': np.array(np.arange(24),dtype=np.int32) })
index = MultiIndex.from_tuples([(2, 'bar'), (1, 'foo')])
right = DataFrame({'v2': [5, 7]}, index=index)
result = left.join(right, on=['k1', 'k2'])
expected = left.copy()
expected['v2'] = np.nan
expected['v2'][(expected.k1 == 2) & (expected.k2 == 'bar')] = 5
expected['v2'][(expected.k1 == 1) & (expected.k2 == 'foo')] = 7
tm.assert_frame_equal(result, expected)
# do a right join for an extra test
joined = merge(right, left, left_index=True,
right_on=['k1', 'k2'], how='right')
tm.assert_frame_equal(joined.ix[:, expected.columns], expected)
def test_join_multi_dtypes(self):
# test with multi dtypes in the join index
def _test(dtype1,dtype2):
left = DataFrame({'k1': np.array([0, 1, 2] * 8, dtype=dtype1),
'k2': ['foo', 'bar'] * 12,
'v': np.array(np.arange(24),dtype=np.int64) })
index = MultiIndex.from_tuples([(2, 'bar'), (1, 'foo')])
right = DataFrame({'v2': np.array([5, 7], dtype=dtype2)}, index=index)
result = left.join(right, on=['k1', 'k2'])
expected = left.copy()
if dtype2.kind == 'i':
dtype2 = np.dtype('float64')
expected['v2'] = np.array(np.nan,dtype=dtype2)
expected['v2'][(expected.k1 == 2) & (expected.k2 == 'bar')] = 5
expected['v2'][(expected.k1 == 1) & (expected.k2 == 'foo')] = 7
tm.assert_frame_equal(result, expected)
for d1 in [np.int64,np.int32,np.int16,np.int8,np.uint8]:
for d2 in [np.int64,np.float64,np.float32,np.float16]:
_test(np.dtype(d1),np.dtype(d2))
def test_left_merge_na_buglet(self):
left = DataFrame({'id': list('abcde'), 'v1': randn(5),
'v2': randn(5), 'dummy': list('abcde'),
'v3': randn(5)},
columns=['id', 'v1', 'v2', 'dummy', 'v3'])
right = DataFrame({'id': ['a', 'b', np.nan, np.nan, np.nan],
'sv3': [1.234, 5.678, np.nan, np.nan, np.nan]})
merged = merge(left, right, on='id', how='left')
rdf = right.drop(['id'], axis=1)
expected = left.join(rdf)
tm.assert_frame_equal(merged, expected)
def test_merge_na_keys(self):
data = [[1950, "A", 1.5],
[1950, "B", 1.5],
[1955, "B", 1.5],
[1960, "B", np.nan],
[1970, "B", 4.],
[1950, "C", 4.],
[1960, "C", np.nan],
[1965, "C", 3.],
[1970, "C", 4.]]
frame = DataFrame(data, columns=["year", "panel", "data"])
other_data = [[1960, 'A', np.nan],
[1970, 'A', np.nan],
[1955, 'A', np.nan],
[1965, 'A', np.nan],
[1965, 'B', np.nan],
[1955, 'C', np.nan]]
other = DataFrame(other_data, columns=['year', 'panel', 'data'])
result = frame.merge(other, how='outer')
expected = frame.fillna(-999).merge(other.fillna(-999), how='outer')
expected = expected.replace(-999, np.nan)
tm.assert_frame_equal(result, expected)
def test_int64_overflow_issues(self):
# #2690, combinatorial explosion
df1 = DataFrame(np.random.randn(1000, 7),
columns=list('ABCDEF') + ['G1'])
df2 = DataFrame(np.random.randn(1000, 7),
columns=list('ABCDEF') + ['G2'])
# it works!
result = merge(df1, df2, how='outer')
self.assertTrue(len(result) == 2000)
def _check_join(left, right, result, join_col, how='left',
lsuffix='_x', rsuffix='_y'):
# some smoke tests
for c in join_col:
assert(result[c].notnull().all())
left_grouped = left.groupby(join_col)
right_grouped = right.groupby(join_col)
for group_key, group in result.groupby(join_col):
l_joined = _restrict_to_columns(group, left.columns, lsuffix)
r_joined = _restrict_to_columns(group, right.columns, rsuffix)
try:
lgroup = left_grouped.get_group(group_key)
except KeyError:
if how in ('left', 'inner'):
raise AssertionError('key %s should not have been in the join'
% str(group_key))
_assert_all_na(l_joined, left.columns, join_col)
else:
_assert_same_contents(l_joined, lgroup)
try:
rgroup = right_grouped.get_group(group_key)
except KeyError:
if how in ('right', 'inner'):
raise AssertionError('key %s should not have been in the join'
% str(group_key))
_assert_all_na(r_joined, right.columns, join_col)
else:
_assert_same_contents(r_joined, rgroup)
def _restrict_to_columns(group, columns, suffix):
found = [c for c in group.columns
if c in columns or c.replace(suffix, '') in columns]
# filter
group = group.ix[:, found]
# get rid of suffixes, if any
group = group.rename(columns=lambda x: x.replace(suffix, ''))
# put in the right order...
group = group.ix[:, columns]
return group
def _assert_same_contents(join_chunk, source):
NA_SENTINEL = -1234567 # drop_duplicates not so NA-friendly...
jvalues = join_chunk.fillna(NA_SENTINEL).drop_duplicates().values
svalues = source.fillna(NA_SENTINEL).drop_duplicates().values
rows = set(tuple(row) for row in jvalues)
assert(len(rows) == len(source))
assert(all(tuple(row) in rows for row in svalues))
def _assert_all_na(join_chunk, source_columns, join_col):
for c in source_columns:
if c in join_col:
continue
assert(join_chunk[c].isnull().all())
def _join_by_hand(a, b, how='left'):
join_index = a.index.join(b.index, how=how)
a_re = a.reindex(join_index)
b_re = b.reindex(join_index)
result_columns = a.columns.append(b.columns)
for col, s in compat.iteritems(b_re):
a_re[col] = s
return a_re.reindex(columns=result_columns)
class TestConcatenate(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.frame = DataFrame(tm.getSeriesData())
self.mixed_frame = self.frame.copy()
self.mixed_frame['foo'] = 'bar'
def test_append(self):
begin_index = self.frame.index[:5]
end_index = self.frame.index[5:]
begin_frame = self.frame.reindex(begin_index)
end_frame = self.frame.reindex(end_index)
appended = begin_frame.append(end_frame)
assert_almost_equal(appended['A'], self.frame['A'])
del end_frame['A']
partial_appended = begin_frame.append(end_frame)
self.assert_('A' in partial_appended)
partial_appended = end_frame.append(begin_frame)
self.assert_('A' in partial_appended)
# mixed type handling
appended = self.mixed_frame[:5].append(self.mixed_frame[5:])
assert_frame_equal(appended, self.mixed_frame)
# what to test here
mixed_appended = self.mixed_frame[:5].append(self.frame[5:])
mixed_appended2 = self.frame[:5].append(self.mixed_frame[5:])
# all equal except 'foo' column
assert_frame_equal(
mixed_appended.reindex(columns=['A', 'B', 'C', 'D']),
mixed_appended2.reindex(columns=['A', 'B', 'C', 'D']))
# append empty
empty = DataFrame({})
appended = self.frame.append(empty)
assert_frame_equal(self.frame, appended)
self.assert_(appended is not self.frame)
appended = empty.append(self.frame)
assert_frame_equal(self.frame, appended)
self.assert_(appended is not self.frame)
# overlap
self.assertRaises(ValueError, self.frame.append, self.frame,
verify_integrity=True)
# new columns
# GH 6129
df = DataFrame({'a': {'x': 1, 'y': 2}, 'b': {'x': 3, 'y': 4}})
row = Series([5, 6, 7], index=['a', 'b', 'c'], name='z')
expected = DataFrame({'a': {'x': 1, 'y': 2, 'z': 5}, 'b': {'x': 3, 'y': 4, 'z': 6}, 'c' : {'z' : 7}})
result = df.append(row)
assert_frame_equal(result, expected)
def test_append_length0_frame(self):
df = DataFrame(columns=['A', 'B', 'C'])
df3 = DataFrame(index=[0, 1], columns=['A', 'B'])
df5 = df.append(df3)
expected = DataFrame(index=[0, 1], columns=['A', 'B', 'C'])
assert_frame_equal(df5, expected)
def test_append_records(self):
arr1 = np.zeros((2,), dtype=('i4,f4,a10'))
arr1[:] = [(1, 2., 'Hello'), (2, 3., "World")]
arr2 = np.zeros((3,), dtype=('i4,f4,a10'))
arr2[:] = [(3, 4., 'foo'),
(5, 6., "bar"),
(7., 8., 'baz')]
df1 = DataFrame(arr1)
df2 = DataFrame(arr2)
result = df1.append(df2, ignore_index=True)
expected = DataFrame(np.concatenate((arr1, arr2)))
assert_frame_equal(result, expected)
def test_append_different_columns(self):
df = DataFrame({'bools': np.random.randn(10) > 0,
'ints': np.random.randint(0, 10, 10),
'floats': np.random.randn(10),
'strings': ['foo', 'bar'] * 5})
a = df[:5].ix[:, ['bools', 'ints', 'floats']]
b = df[5:].ix[:, ['strings', 'ints', 'floats']]
appended = a.append(b)
self.assert_(isnull(appended['strings'][0:4]).all())
self.assert_(isnull(appended['bools'][5:]).all())
def test_append_many(self):
chunks = [self.frame[:5], self.frame[5:10],
self.frame[10:15], self.frame[15:]]
result = chunks[0].append(chunks[1:])
tm.assert_frame_equal(result, self.frame)
chunks[-1]['foo'] = 'bar'
result = chunks[0].append(chunks[1:])
tm.assert_frame_equal(result.ix[:, self.frame.columns], self.frame)
self.assert_((result['foo'][15:] == 'bar').all())
self.assert_(result['foo'][:15].isnull().all())
def test_append_preserve_index_name(self):
# #980
df1 = DataFrame(data=None, columns=['A', 'B', 'C'])
df1 = df1.set_index(['A'])
df2 = DataFrame(data=[[1, 4, 7], [2, 5, 8], [3, 6, 9]],
columns=['A', 'B', 'C'])
df2 = df2.set_index(['A'])
result = df1.append(df2)
self.assert_(result.index.name == 'A')
def test_join_many(self):
df = DataFrame(np.random.randn(10, 6), columns=list('abcdef'))
df_list = [df[['a', 'b']], df[['c', 'd']], df[['e', 'f']]]
joined = df_list[0].join(df_list[1:])
tm.assert_frame_equal(joined, df)
df_list = [df[['a', 'b']][:-2],
df[['c', 'd']][2:], df[['e', 'f']][1:9]]
def _check_diff_index(df_list, result, exp_index):
reindexed = [x.reindex(exp_index) for x in df_list]
expected = reindexed[0].join(reindexed[1:])
tm.assert_frame_equal(result, expected)
# different join types
joined = df_list[0].join(df_list[1:], how='outer')
_check_diff_index(df_list, joined, df.index)
joined = df_list[0].join(df_list[1:])
_check_diff_index(df_list, joined, df_list[0].index)
joined = df_list[0].join(df_list[1:], how='inner')
_check_diff_index(df_list, joined, df.index[2:8])
self.assertRaises(ValueError, df_list[0].join, df_list[1:], on='a')
def test_join_many_mixed(self):
df = DataFrame(np.random.randn(8, 4), columns=['A', 'B', 'C', 'D'])
df['key'] = ['foo', 'bar'] * 4
df1 = df.ix[:, ['A', 'B']]
df2 = df.ix[:, ['C', 'D']]
df3 = df.ix[:, ['key']]
result = df1.join([df2, df3])
assert_frame_equal(result, df)
def test_append_missing_column_proper_upcast(self):
df1 = DataFrame({'A': np.array([1, 2, 3, 4], dtype='i8')})
df2 = DataFrame({'B': np.array([True, False, True, False],
dtype=bool)})
appended = df1.append(df2, ignore_index=True)
self.assert_(appended['A'].dtype == 'f8')
self.assert_(appended['B'].dtype == 'O')
def test_concat_with_group_keys(self):
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randn(4, 4))
# axis=0
df = DataFrame(np.random.randn(3, 4))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1])
exp_index = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1, 1],
[0, 1, 2, 0, 1, 2, 3]])
expected = DataFrame(np.r_[df.values, df2.values],
index=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1])
exp_index2 = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1],
[0, 1, 2, 0, 1, 2]])
expected = DataFrame(np.r_[df.values, df.values],
index=exp_index2)
tm.assert_frame_equal(result, expected)
# axis=1
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df2.values],
columns=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df.values],
columns=exp_index2)
tm.assert_frame_equal(result, expected)
def test_concat_keys_specific_levels(self):
df = DataFrame(np.random.randn(10, 4))
pieces = [df.ix[:, [0, 1]], df.ix[:, [2]], df.ix[:, [3]]]
level = ['three', 'two', 'one', 'zero']
result = concat(pieces, axis=1, keys=['one', 'two', 'three'],
levels=[level],
names=['group_key'])
self.assert_(np.array_equal(result.columns.levels[0], level))
self.assertEqual(result.columns.names[0], 'group_key')
def test_concat_dataframe_keys_bug(self):
t1 = DataFrame({'value': Series([1, 2, 3],
index=Index(['a', 'b', 'c'], name='id'))})
t2 = DataFrame({'value': Series([7, 8],
index=Index(['a', 'b'], name='id'))})
# it works
result = concat([t1, t2], axis=1, keys=['t1', 't2'])
self.assertEqual(list(result.columns), [('t1', 'value'),
('t2', 'value')])
def test_concat_dict(self):
frames = {'foo': DataFrame(np.random.randn(4, 3)),
'bar': DataFrame(np.random.randn(4, 3)),
'baz': DataFrame(np.random.randn(4, 3)),
'qux': DataFrame(np.random.randn(4, 3))}
sorted_keys = sorted(frames)
result = concat(frames)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys)
tm.assert_frame_equal(result, expected)
result = concat(frames, axis=1)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys,
axis=1)
tm.assert_frame_equal(result, expected)
keys = ['baz', 'foo', 'bar']
result = concat(frames, keys=keys)
expected = concat([frames[k] for k in keys], keys=keys)
tm.assert_frame_equal(result, expected)
def test_concat_ignore_index(self):
frame1 = DataFrame({"test1": ["a", "b", "c"],
"test2": [1, 2, 3],
"test3": [4.5, 3.2, 1.2]})
frame2 = DataFrame({"test3": [5.2, 2.2, 4.3]})
frame1.index = Index(["x", "y", "z"])
frame2.index = Index(["x", "y", "q"])
v1 = concat([frame1, frame2], axis=1, ignore_index=True)
nan = np.nan
expected = DataFrame([[nan, nan, nan, 4.3],
['a', 1, 4.5, 5.2],
['b', 2, 3.2, 2.2],
['c', 3, 1.2, nan]],
index=Index(["q", "x", "y", "z"]))
tm.assert_frame_equal(v1, expected)
def test_concat_multiindex_with_keys(self):
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
frame = DataFrame(np.random.randn(10, 3), index=index,
columns=Index(['A', 'B', 'C'], name='exp'))
result = concat([frame, frame], keys=[0, 1], names=['iteration'])
self.assertEqual(result.index.names, ('iteration',) + index.names)
tm.assert_frame_equal(result.ix[0], frame)
tm.assert_frame_equal(result.ix[1], frame)
self.assertEqual(result.index.nlevels, 3)
def test_concat_keys_and_levels(self):
df = DataFrame(np.random.randn(1, 3))
df2 = DataFrame(np.random.randn(1, 4))
levels = [['foo', 'baz'], ['one', 'two']]
names = ['first', 'second']
result = concat([df, df2, df, df2],
keys=[('foo', 'one'), ('foo', 'two'),
('baz', 'one'), ('baz', 'two')],
levels=levels,
names=names)
expected = concat([df, df2, df, df2])
exp_index = MultiIndex(levels=levels + [[0]],
labels=[[0, 0, 1, 1], [0, 1, 0, 1],
[0, 0, 0, 0]],
names=names + [None])
expected.index = exp_index
assert_frame_equal(result, expected)
# no names
result = concat([df, df2, df, df2],
keys=[('foo', 'one'), ('foo', 'two'),
('baz', 'one'), ('baz', 'two')],
levels=levels)
self.assertEqual(result.index.names, (None,) * 3)
# no levels
result = concat([df, df2, df, df2],
keys=[('foo', 'one'), ('foo', 'two'),
('baz', 'one'), ('baz', 'two')],
names=['first', 'second'])
self.assertEqual(result.index.names, ('first', 'second') + (None,))
self.assert_(np.array_equal(result.index.levels[0], ['baz', 'foo']))
def test_concat_keys_levels_no_overlap(self):
# GH #1406
df = DataFrame(np.random.randn(1, 3), index=['a'])
df2 = DataFrame(np.random.randn(1, 4), index=['b'])
self.assertRaises(ValueError, concat, [df, df],
keys=['one', 'two'], levels=[['foo', 'bar', 'baz']])
self.assertRaises(ValueError, concat, [df, df2],
keys=['one', 'two'], levels=[['foo', 'bar', 'baz']])
def test_concat_rename_index(self):
a = DataFrame(np.random.rand(3, 3),
columns=list('ABC'),
index=Index(list('abc'), name='index_a'))
b = DataFrame(np.random.rand(3, 3),
columns=list('ABC'),
index=Index(list('abc'), name='index_b'))
result = concat([a, b], keys=['key0', 'key1'],
names=['lvl0', 'lvl1'])
exp = concat([a, b], keys=['key0', 'key1'], names=['lvl0'])
names = list(exp.index.names)
names[1] = 'lvl1'
exp.index.set_names(names, inplace=True)
tm.assert_frame_equal(result, exp)
self.assertEqual(result.index.names, exp.index.names)
def test_crossed_dtypes_weird_corner(self):
columns = ['A', 'B', 'C', 'D']
df1 = DataFrame({'A': np.array([1, 2, 3, 4], dtype='f8'),
'B': np.array([1, 2, 3, 4], dtype='i8'),
'C': np.array([1, 2, 3, 4], dtype='f8'),
'D': np.array([1, 2, 3, 4], dtype='i8')},
columns=columns)
df2 = DataFrame({'A': np.array([1, 2, 3, 4], dtype='i8'),
'B': np.array([1, 2, 3, 4], dtype='f8'),
'C': np.array([1, 2, 3, 4], dtype='i8'),
'D': np.array([1, 2, 3, 4], dtype='f8')},
columns=columns)
appended = df1.append(df2, ignore_index=True)
expected = DataFrame(np.concatenate([df1.values, df2.values], axis=0),
columns=columns)
tm.assert_frame_equal(appended, expected)
df = DataFrame(np.random.randn(1, 3), index=['a'])
df2 = DataFrame(np.random.randn(1, 4), index=['b'])
result = concat(
[df, df2], keys=['one', 'two'], names=['first', 'second'])
self.assertEqual(result.index.names, ('first', 'second'))
def test_dups_index(self):
# GH 4771
# single dtypes
df = DataFrame(np.random.randint(0,10,size=40).reshape(10,4),columns=['A','A','C','C'])
result = concat([df,df],axis=1)
assert_frame_equal(result.iloc[:,:4],df)
assert_frame_equal(result.iloc[:,4:],df)
result = concat([df,df],axis=0)
assert_frame_equal(result.iloc[:10],df)
assert_frame_equal(result.iloc[10:],df)
# multi dtypes
df = concat([DataFrame(np.random.randn(10,4),columns=['A','A','B','B']),
DataFrame(np.random.randint(0,10,size=20).reshape(10,2),columns=['A','C'])],
axis=1)
result = concat([df,df],axis=1)
assert_frame_equal(result.iloc[:,:6],df)
assert_frame_equal(result.iloc[:,6:],df)
result = concat([df,df],axis=0)
assert_frame_equal(result.iloc[:10],df)
assert_frame_equal(result.iloc[10:],df)
# append
result = df.iloc[0:8,:].append(df.iloc[8:])
assert_frame_equal(result, df)
result = df.iloc[0:8,:].append(df.iloc[8:9]).append(df.iloc[9:10])
assert_frame_equal(result, df)
expected = concat([df,df],axis=0)
result = df.append(df)
assert_frame_equal(result, expected)
def test_join_dups(self):
# joining dups
df = concat([DataFrame(np.random.randn(10,4),columns=['A','A','B','B']),
DataFrame(np.random.randint(0,10,size=20).reshape(10,2),columns=['A','C'])],
axis=1)
expected = concat([df,df],axis=1)
result = df.join(df,rsuffix='_2')
result.columns = expected.columns
assert_frame_equal(result, expected)
# GH 4975, invalid join on dups
w = DataFrame(np.random.randn(4,2), columns=["x", "y"])
x = DataFrame(np.random.randn(4,2), columns=["x", "y"])
y = DataFrame(np.random.randn(4,2), columns=["x", "y"])
z = DataFrame(np.random.randn(4,2), columns=["x", "y"])
dta = x.merge(y, left_index=True, right_index=True).merge(z, left_index=True, right_index=True, how="outer")
dta = dta.merge(w, left_index=True, right_index=True)
expected = concat([x,y,z,w],axis=1)
expected.columns=['x_x','y_x','x_y','y_y','x_x','y_x','x_y','y_y']
assert_frame_equal(dta,expected)
def test_handle_empty_objects(self):
df = DataFrame(np.random.randn(10, 4), columns=list('abcd'))
baz = df[:5]
baz['foo'] = 'bar'
empty = df[5:5]
frames = [baz, empty, empty, df[5:]]
concatted = concat(frames, axis=0)
expected = df.ix[:, ['a', 'b', 'c', 'd', 'foo']]
expected['foo'] = expected['foo'].astype('O')
expected['foo'][:5] = 'bar'
tm.assert_frame_equal(concatted, expected)
def test_panel_join(self):
panel = tm.makePanel()
tm.add_nans(panel)
p1 = panel.ix[:2, :10, :3]
p2 = panel.ix[2:, 5:, 2:]
# left join
result = p1.join(p2)
expected = p1.copy()
expected['ItemC'] = p2['ItemC']
tm.assert_panel_equal(result, expected)
# right join
result = p1.join(p2, how='right')
expected = p2.copy()
expected['ItemA'] = p1['ItemA']
expected['ItemB'] = p1['ItemB']
expected = expected.reindex(items=['ItemA', 'ItemB', 'ItemC'])
tm.assert_panel_equal(result, expected)
# inner join
result = p1.join(p2, how='inner')
expected = panel.ix[:, 5:10, 2:3]
tm.assert_panel_equal(result, expected)
# outer join
result = p1.join(p2, how='outer')
expected = p1.reindex(major=panel.major_axis,
minor=panel.minor_axis)
expected = expected.join(p2.reindex(major=panel.major_axis,
minor=panel.minor_axis))
tm.assert_panel_equal(result, expected)
def test_panel_join_overlap(self):
panel = tm.makePanel()
tm.add_nans(panel)
p1 = panel.ix[['ItemA', 'ItemB', 'ItemC']]
p2 = panel.ix[['ItemB', 'ItemC']]
joined = p1.join(p2, lsuffix='_p1', rsuffix='_p2')
p1_suf = p1.ix[['ItemB', 'ItemC']].add_suffix('_p1')
p2_suf = p2.ix[['ItemB', 'ItemC']].add_suffix('_p2')
no_overlap = panel.ix[['ItemA']]
expected = p1_suf.join(p2_suf).join(no_overlap)
tm.assert_panel_equal(joined, expected)
def test_panel_join_many(self):
tm.K = 10
panel = tm.makePanel()
tm.K = 4
panels = [panel.ix[:2], panel.ix[2:6], panel.ix[6:]]
joined = panels[0].join(panels[1:])
tm.assert_panel_equal(joined, panel)
panels = [panel.ix[:2, :-5], panel.ix[2:6, 2:], panel.ix[6:, 5:-7]]
data_dict = {}
for p in panels:
data_dict.update(compat.iteritems(p))
joined = panels[0].join(panels[1:], how='inner')
expected = Panel.from_dict(data_dict, intersect=True)
tm.assert_panel_equal(joined, expected)
joined = panels[0].join(panels[1:], how='outer')
expected = Panel.from_dict(data_dict, intersect=False)
tm.assert_panel_equal(joined, expected)
# edge cases
self.assertRaises(ValueError, panels[0].join, panels[1:],
how='outer', lsuffix='foo', rsuffix='bar')
self.assertRaises(ValueError, panels[0].join, panels[1:],
how='right')
def test_panel_concat_other_axes(self):
panel = tm.makePanel()
p1 = panel.ix[:, :5, :]
p2 = panel.ix[:, 5:, :]
result = concat([p1, p2], axis=1)
tm.assert_panel_equal(result, panel)
p1 = panel.ix[:, :, :2]
p2 = panel.ix[:, :, 2:]
result = concat([p1, p2], axis=2)
tm.assert_panel_equal(result, panel)
# if things are a bit misbehaved
p1 = panel.ix[:2, :, :2]
p2 = panel.ix[:, :, 2:]
p1['ItemC'] = 'baz'
result = concat([p1, p2], axis=2)
expected = panel.copy()
expected['ItemC'] = expected['ItemC'].astype('O')
expected.ix['ItemC', :, :2] = 'baz'
tm.assert_panel_equal(result, expected)
def test_panel_concat_buglet(self):
# #2257
def make_panel():
index = 5
cols = 3
def df():
return DataFrame(np.random.randn(index, cols),
index=["I%s" % i for i in range(index)],
columns=["C%s" % i for i in range(cols)])
return Panel(dict([("Item%s" % x, df()) for x in ['A', 'B', 'C']]))
panel1 = make_panel()
panel2 = make_panel()
panel2 = panel2.rename_axis(dict([(x, "%s_1" % x)
for x in panel2.major_axis]),
axis=1)
panel3 = panel2.rename_axis(lambda x: '%s_1' % x, axis=1)
panel3 = panel3.rename_axis(lambda x: '%s_1' % x, axis=2)
# it works!
concat([panel1, panel3], axis=1, verify_integrity=True)
def test_panel4d_concat(self):
p4d = tm.makePanel4D()
p1 = p4d.ix[:, :, :5, :]
p2 = p4d.ix[:, :, 5:, :]
result = concat([p1, p2], axis=2)
tm.assert_panel4d_equal(result, p4d)
p1 = p4d.ix[:, :, :, :2]
p2 = p4d.ix[:, :, :, 2:]
result = concat([p1, p2], axis=3)
tm.assert_panel4d_equal(result, p4d)
def test_panel4d_concat_mixed_type(self):
p4d = tm.makePanel4D()
# if things are a bit misbehaved
p1 = p4d.ix[:, :2, :, :2]
p2 = p4d.ix[:, :, :, 2:]
p1['L5'] = 'baz'
result = concat([p1, p2], axis=3)
p2['L5'] = np.nan
expected = concat([p1, p2], axis=3)
expected = expected.ix[result.labels]
tm.assert_panel4d_equal(result, expected)
def test_concat_series(self):
ts = tm.makeTimeSeries()
ts.name = 'foo'
pieces = [ts[:5], ts[5:15], ts[15:]]
result = concat(pieces)
tm.assert_series_equal(result, ts)
self.assertEqual(result.name, ts.name)
result = concat(pieces, keys=[0, 1, 2])
expected = ts.copy()
ts.index = DatetimeIndex(np.array(ts.index.values, dtype='M8[ns]'))
exp_labels = [np.repeat([0, 1, 2], [len(x) for x in pieces]),
np.arange(len(ts))]
exp_index = MultiIndex(levels=[[0, 1, 2], ts.index],
labels=exp_labels)
expected.index = exp_index
tm.assert_series_equal(result, expected)
def test_concat_series_axis1(self):
ts = tm.makeTimeSeries()
pieces = [ts[:-2], ts[2:], ts[2:-2]]
result = concat(pieces, axis=1)
expected = DataFrame(pieces).T
assert_frame_equal(result, expected)
result = concat(pieces, keys=['A', 'B', 'C'], axis=1)
expected = DataFrame(pieces, index=['A', 'B', 'C']).T
assert_frame_equal(result, expected)
# preserve series names, #2489
s = Series(randn(5), name='A')
s2 = Series(randn(5), name='B')
result = concat([s, s2], axis=1)
expected = DataFrame({'A': s, 'B': s2})
assert_frame_equal(result, expected)
s2.name = None
result = concat([s, s2], axis=1)
self.assertTrue(np.array_equal(result.columns, lrange(2)))
# must reindex, #2603
s = Series(randn(3), index=['c', 'a', 'b'], name='A')
s2 = Series(randn(4), index=['d', 'a', 'b', 'c'], name='B')
result = concat([s, s2], axis=1)
expected = DataFrame({'A': s, 'B': s2})
assert_frame_equal(result, expected)
def test_concat_single_with_key(self):
df = DataFrame(np.random.randn(10, 4))
result = concat([df], keys=['foo'])
expected = concat([df, df], keys=['foo', 'bar'])
tm.assert_frame_equal(result, expected[:10])
def test_concat_exclude_none(self):
df = DataFrame(np.random.randn(10, 4))
pieces = [df[:5], None, None, df[5:]]
result = concat(pieces)
tm.assert_frame_equal(result, df)
self.assertRaises(Exception, concat, [None, None])
def test_concat_datetime64_block(self):
from pandas.tseries.index import date_range
rng = date_range('1/1/2000', periods=10)
df = DataFrame({'time': rng})
result = concat([df, df])
self.assert_((result.iloc[:10]['time'] == rng).all())
self.assert_((result.iloc[10:]['time'] == rng).all())
def test_concat_timedelta64_block(self):
# not friendly for < 1.7
if _np_version_under1p7:
raise nose.SkipTest("numpy < 1.7")
from pandas import to_timedelta
rng = to_timedelta(np.arange(10),unit='s')
df = DataFrame({'time': rng})
result = concat([df, df])
self.assert_((result.iloc[:10]['time'] == rng).all())
self.assert_((result.iloc[10:]['time'] == rng).all())
def test_concat_keys_with_none(self):
# #1649
df0 = DataFrame([[10, 20, 30], [10, 20, 30], [10, 20, 30]])
result = concat(dict(a=None, b=df0, c=df0[:2], d=df0[:1], e=df0))
expected = concat(dict(b=df0, c=df0[:2], d=df0[:1], e=df0))
tm.assert_frame_equal(result, expected)
result = concat([None, df0, df0[:2], df0[:1], df0],
keys=['a', 'b', 'c', 'd', 'e'])
expected = concat([df0, df0[:2], df0[:1], df0],
keys=['b', 'c', 'd', 'e'])
tm.assert_frame_equal(result, expected)
def test_concat_bug_1719(self):
ts1 = tm.makeTimeSeries()
ts2 = tm.makeTimeSeries()[::2]
## to join with union
## these two are of different length!
left = concat([ts1, ts2], join='outer', axis=1)
right = concat([ts2, ts1], join='outer', axis=1)
self.assertEqual(len(left), len(right))
def test_concat_bug_2972(self):
ts0 = Series(np.zeros(5))
ts1 = Series(np.ones(5))
ts0.name = ts1.name = 'same name'
result = concat([ts0, ts1], axis=1)
expected = DataFrame({0: ts0, 1: ts1})
expected.columns=['same name', 'same name']
assert_frame_equal(result, expected)
def test_concat_bug_3602(self):
# GH 3602, duplicate columns
df1 = DataFrame({'firmNo' : [0,0,0,0], 'stringvar' : ['rrr', 'rrr', 'rrr', 'rrr'], 'prc' : [6,6,6,6] })
df2 = DataFrame({'misc' : [1,2,3,4], 'prc' : [6,6,6,6], 'C' : [9,10,11,12]})
expected = DataFrame([[0,6,'rrr',9,1,6],
[0,6,'rrr',10,2,6],
[0,6,'rrr',11,3,6],
[0,6,'rrr',12,4,6]])
expected.columns = ['firmNo','prc','stringvar','C','misc','prc']
result = concat([df1,df2],axis=1)
assert_frame_equal(result,expected)
def test_concat_series_axis1_same_names_ignore_index(self):
dates = date_range('01-Jan-2013', '01-Jan-2014', freq='MS')[0:-1]
s1 = Series(randn(len(dates)), index=dates, name='value')
s2 = Series(randn(len(dates)), index=dates, name='value')
result = concat([s1, s2], axis=1, ignore_index=True)
self.assertTrue(np.array_equal(result.columns, [0, 1]))
def test_concat_invalid_first_argument(self):
df1 = mkdf(10, 2)
df2 = mkdf(10, 2)
self.assertRaises(AssertionError, concat, df1, df2)
# generator ok though
concat(DataFrame(np.random.rand(5,5)) for _ in range(3))
def test_concat_mixed_types_fails(self):
df = DataFrame(randn(10, 1))
with tm.assertRaisesRegexp(TypeError, "Cannot concatenate.+"):
concat([df[0], df], axis=1)
with tm.assertRaisesRegexp(TypeError, "Cannot concatenate.+"):
concat([df, df[0]], axis=1)
class TestOrderedMerge(tm.TestCase):
def setUp(self):
self.left = DataFrame({'key': ['a', 'c', 'e'],
'lvalue': [1, 2., 3]})
self.right = DataFrame({'key': ['b', 'c', 'd', 'f'],
'rvalue': [1, 2, 3., 4]})
# GH #813
def test_basic(self):
result = ordered_merge(self.left, self.right, on='key')
expected = DataFrame({'key': ['a', 'b', 'c', 'd', 'e', 'f'],
'lvalue': [1, nan, 2, nan, 3, nan],
'rvalue': [nan, 1, 2, 3, nan, 4]})
assert_frame_equal(result, expected)
def test_ffill(self):
result = ordered_merge(
self.left, self.right, on='key', fill_method='ffill')
expected = DataFrame({'key': ['a', 'b', 'c', 'd', 'e', 'f'],
'lvalue': [1., 1, 2, 2, 3, 3.],
'rvalue': [nan, 1, 2, 3, 3, 4]})
assert_frame_equal(result, expected)
def test_multigroup(self):
left = concat([self.left, self.left], ignore_index=True)
# right = concat([self.right, self.right], ignore_index=True)
left['group'] = ['a'] * 3 + ['b'] * 3
# right['group'] = ['a'] * 4 + ['b'] * 4
result = ordered_merge(left, self.right, on='key', left_by='group',
fill_method='ffill')
expected = DataFrame({'key': ['a', 'b', 'c', 'd', 'e', 'f'] * 2,
'lvalue': [1., 1, 2, 2, 3, 3.] * 2,
'rvalue': [nan, 1, 2, 3, 3, 4] * 2})
expected['group'] = ['a'] * 6 + ['b'] * 6
assert_frame_equal(result, expected.ix[:, result.columns])
result2 = ordered_merge(self.right, left, on='key', right_by='group',
fill_method='ffill')
assert_frame_equal(result, result2.ix[:, result.columns])
result = ordered_merge(left, self.right, on='key', left_by='group')
self.assert_(result['group'].notnull().all())
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| gpl-3.0 |
chen0031/Dato-Core | src/unity/python/graphlab/data_structures/gframe.py | 13 | 10841 | '''
Copyright (C) 2015 Dato, Inc.
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the DATO-PYTHON-LICENSE file for details.
'''
from graphlab.data_structures.sframe import SFrame
from graphlab.data_structures.sframe import SArray
from graphlab.cython.context import debug_trace as cython_context
from graphlab.data_structures.sarray import SArray, _create_sequential_sarray
import copy
VERTEX_GFRAME = 0
EDGE_GFRAME = 1
class GFrame(SFrame):
"""
GFrame is similar to SFrame but is associated with an SGraph.
- GFrame can be obtained from either the `vertices` or `edges`
attributed in any SGraph:
>>> import graphlab
>>> g = graphlab.load_sgraph(...)
>>> vertices_gf = g.vertices
>>> edges_gf = g.edges
- GFrame has the same API as SFrame:
>>> sa = vertices_gf['pagerank']
>>> # column lambda transform
>>> vertices_gf['pagerank'] = vertices_gf['pagerank'].apply(lambda x: 0.15 + 0.85 * x)
>>> # frame lambda transform
>>> vertices_gf['score'] = vertices_gf.apply(lambda x: 0.2 * x['triangle_count'] + 0.8 * x['pagerank'])
>>> del vertices_gf['pagerank']
- GFrame can be converted to SFrame:
>>> # extract an SFrame
>>> sf = vertices_gf.__to_sframe__()
"""
def __init__(self, graph, gframe_type):
self.__type__ = gframe_type
self.__graph__ = graph
self.__sframe_cache__ = None
self.__is_dirty__ = False
def __to_sframe__(self):
return copy.copy(self._get_cache())
#/**************************************************************************/
#/* */
#/* Modifiers */
#/* */
#/**************************************************************************/
def add_column(self, data, name=""):
"""
Adds the specified column to this SFrame. The number of elements in
the data given must match every other column of the SFrame.
Parameters
----------
data : SArray
The 'column' of data.
name : string
The name of the column. If no name is given, a default name is chosen.
"""
# Check type for pandas dataframe or SArray?
if not isinstance(data, SArray):
raise TypeError("Must give column as SArray")
if not isinstance(name, str):
raise TypeError("Invalid column name: must be str")
self.__is_dirty__ = True
with cython_context():
if self._is_vertex_frame():
graph_proxy = self.__graph__.__proxy__.add_vertex_field(data.__proxy__, name)
self.__graph__.__proxy__ = graph_proxy
elif self._is_edge_frame():
graph_proxy = self.__graph__.__proxy__.add_edge_field(data.__proxy__, name)
self.__graph__.__proxy__ = graph_proxy
def add_columns(self, datalist, namelist):
"""
Adds columns to the SFrame. The number of elements in all columns must
match every other column of the SFrame.
Parameters
----------
datalist : list of SArray
A list of columns
namelist : list of string
A list of column names. All names must be specified.
"""
if not hasattr(datalist, '__iter__'):
raise TypeError("datalist must be an iterable")
if not hasattr(namelist, '__iter__'):
raise TypeError("namelist must be an iterable")
if not all([isinstance(x, SArray) for x in datalist]):
raise TypeError("Must give column as SArray")
if not all([isinstance(x, str) for x in namelist]):
raise TypeError("Invalid column name in list: must all be str")
for (data, name) in zip(datalist, namelist):
self.add_column(data, name)
def remove_column(self, name):
"""
Removes the column with the given name from the SFrame.
Parameters
----------
name : string
The name of the column to remove.
"""
if name not in self.column_names():
raise KeyError('Cannot find column %s' % name)
self.__is_dirty__ = True
try:
with cython_context():
if self._is_vertex_frame():
assert name != '__id', 'Cannot remove \"__id\" column'
graph_proxy = self.__graph__.__proxy__.delete_vertex_field(name)
self.__graph__.__proxy__ = graph_proxy
elif self._is_edge_frame():
assert name != '__src_id', 'Cannot remove \"__src_id\" column'
assert name != '__dst_id', 'Cannot remove \"__dst_id\" column'
graph_proxy = self.__graph__.__proxy__.delete_edge_field(name)
self.__graph__.__proxy__ = graph_proxy
except:
self.__is_dirty__ = False
raise
def swap_columns(self, column_1, column_2):
"""
Swaps the columns with the given names.
Parameters
----------
column_1 : string
Name of column to swap
column_2 : string
Name of other column to swap
"""
self.__is_dirty__ = True
with cython_context():
if self._is_vertex_frame():
graph_proxy = self.__graph__.__proxy__.swap_vertex_fields(column_1, column_2)
self.__graph__.__proxy__ = graph_proxy
elif self._is_edge_frame():
graph_proxy = self.__graph__.__proxy__.swap_edge_fields(column_1, column_2)
self.__graph__.__proxy__ = graph_proxy
def rename(self, names):
"""
Rename the columns using the 'names' dict. This changes the names of
the columns given as the keys and replaces them with the names given as
the values.
Parameters
----------
names : dict[string, string]
Dictionary of [old_name, new_name]
"""
if (type(names) is not dict):
raise TypeError('names must be a dictionary: oldname -> newname')
self.__is_dirty__ = True
with cython_context():
if self._is_vertex_frame():
graph_proxy = self.__graph__.__proxy__.rename_vertex_fields(names.keys(), names.values())
self.__graph__.__proxy__ = graph_proxy
elif self._is_edge_frame():
graph_proxy = self.__graph__.__proxy__.rename_edge_fields(names.keys(), names.values())
self.__graph__.__proxy__ = graph_proxy
def add_row_number(self, column_name='id', start=0):
if type(column_name) is not str:
raise TypeError("Must give column_name as str")
if column_name in self.column_names():
raise RuntimeError("Column name %s already exists" % str(column_name))
if type(start) is not int:
raise TypeError("Must give start as int")
the_col = _create_sequential_sarray(self.num_rows(), start)
self[column_name] = the_col
return self
def __setitem__(self, key, value):
"""
A wrapper around add_column(s). Key can be either a list or a str. If
value is an SArray, it is added to the SFrame as a column. If it is a
constant value (int, str, or float), then a column is created where
every entry is equal to the constant value. Existing columns can also
be replaced using this wrapper.
"""
if (key in ['__id', '__src_id', '__dst_id']):
raise KeyError('Cannot modify column %s. Changing __id column will\
change the graph structure' % key)
else:
self.__is_dirty__ = True
super(GFrame, self).__setitem__(key, value)
#/**************************************************************************/
#/* */
#/* Read-only Accessor */
#/* */
#/**************************************************************************/
def num_rows(self):
"""
Returns the number of rows.
Returns
-------
out : int
Number of rows in the SFrame.
"""
if self._is_vertex_frame():
return self.__graph__.summary()['num_vertices']
elif self._is_edge_frame():
return self.__graph__.summary()['num_edges']
def num_cols(self):
"""
Returns the number of columns.
Returns
-------
out : int
Number of columns in the SFrame.
"""
return len(self.column_names())
def column_names(self):
"""
Returns the column names.
Returns
-------
out : list[string]
Column names of the SFrame.
"""
if self._is_vertex_frame():
return self.__graph__.__proxy__.get_vertex_fields()
elif self._is_edge_frame():
return self.__graph__.__proxy__.get_edge_fields()
def column_types(self):
"""
Returns the column types.
Returns
-------
out : list[type]
Column types of the SFrame.
"""
if self.__type__ == VERTEX_GFRAME:
return self.__graph__.__proxy__.get_vertex_field_types()
elif self.__type__ == EDGE_GFRAME:
return self.__graph__.__proxy__.get_edge_field_types()
#/**************************************************************************/
#/* */
#/* Internal Private Methods */
#/* */
#/**************************************************************************/
def _get_cache(self):
if self.__sframe_cache__ is None or self.__is_dirty__:
if self._is_vertex_frame():
self.__sframe_cache__ = self.__graph__.get_vertices()
elif self._is_edge_frame():
self.__sframe_cache__ = self.__graph__.get_edges()
else:
raise TypeError
self.__is_dirty__ = False
return self.__sframe_cache__
def _is_vertex_frame(self):
return self.__type__ == VERTEX_GFRAME
def _is_edge_frame(self):
return self.__type__ == EDGE_GFRAME
@property
def __proxy__(self):
return self._get_cache().__proxy__
| agpl-3.0 |
GuessWhoSamFoo/pandas | pandas/tests/indexing/multiindex/test_panel.py | 1 | 3761 | import numpy as np
import pytest
from pandas import DataFrame, MultiIndex, Panel, Series
from pandas.util import testing as tm
@pytest.mark.filterwarnings('ignore:\\nPanel:FutureWarning')
class TestMultiIndexPanel(object):
def test_iloc_getitem_panel_multiindex(self):
# GH 7199
# Panel with multi-index
multi_index = MultiIndex.from_tuples([('ONE', 'one'),
('TWO', 'two'),
('THREE', 'three')],
names=['UPPER', 'lower'])
simple_index = [x[0] for x in multi_index]
wd1 = Panel(items=['First', 'Second'],
major_axis=['a', 'b', 'c', 'd'],
minor_axis=multi_index)
wd2 = Panel(items=['First', 'Second'],
major_axis=['a', 'b', 'c', 'd'],
minor_axis=simple_index)
expected1 = wd1['First'].iloc[[True, True, True, False], [0, 2]]
result1 = wd1.iloc[0, [True, True, True, False], [0, 2]] # WRONG
tm.assert_frame_equal(result1, expected1)
expected2 = wd2['First'].iloc[[True, True, True, False], [0, 2]]
result2 = wd2.iloc[0, [True, True, True, False], [0, 2]]
tm.assert_frame_equal(result2, expected2)
expected1 = DataFrame(index=['a'], columns=multi_index,
dtype='float64')
result1 = wd1.iloc[0, [0], [0, 1, 2]]
tm.assert_frame_equal(result1, expected1)
expected2 = DataFrame(index=['a'], columns=simple_index,
dtype='float64')
result2 = wd2.iloc[0, [0], [0, 1, 2]]
tm.assert_frame_equal(result2, expected2)
# GH 7516
mi = MultiIndex.from_tuples([(0, 'x'), (1, 'y'), (2, 'z')])
p = Panel(np.arange(3 * 3 * 3, dtype='int64').reshape(3, 3, 3),
items=['a', 'b', 'c'], major_axis=mi,
minor_axis=['u', 'v', 'w'])
result = p.iloc[:, 1, 0]
expected = Series([3, 12, 21], index=['a', 'b', 'c'], name='u')
tm.assert_series_equal(result, expected)
result = p.loc[:, (1, 'y'), 'u']
tm.assert_series_equal(result, expected)
def test_panel_setitem_with_multiindex(self):
# 10360
# failing with a multi-index
arr = np.array([[[1, 2, 3], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0]]],
dtype=np.float64)
# reg index
axes = dict(items=['A', 'B'], major_axis=[0, 1],
minor_axis=['X', 'Y', 'Z'])
p1 = Panel(0., **axes)
p1.iloc[0, 0, :] = [1, 2, 3]
expected = Panel(arr, **axes)
tm.assert_panel_equal(p1, expected)
# multi-indexes
axes['items'] = MultiIndex.from_tuples(
[('A', 'a'), ('B', 'b')])
p2 = Panel(0., **axes)
p2.iloc[0, 0, :] = [1, 2, 3]
expected = Panel(arr, **axes)
tm.assert_panel_equal(p2, expected)
axes['major_axis'] = MultiIndex.from_tuples(
[('A', 1), ('A', 2)])
p3 = Panel(0., **axes)
p3.iloc[0, 0, :] = [1, 2, 3]
expected = Panel(arr, **axes)
tm.assert_panel_equal(p3, expected)
axes['minor_axis'] = MultiIndex.from_product(
[['X'], range(3)])
p4 = Panel(0., **axes)
p4.iloc[0, 0, :] = [1, 2, 3]
expected = Panel(arr, **axes)
tm.assert_panel_equal(p4, expected)
arr = np.array(
[[[1, 0, 0], [2, 0, 0]], [[0, 0, 0], [0, 0, 0]]],
dtype=np.float64)
p5 = Panel(0., **axes)
p5.iloc[0, :, 0] = [1, 2]
expected = Panel(arr, **axes)
tm.assert_panel_equal(p5, expected)
| bsd-3-clause |
azariven/BioSig_SEAS | bin_stable/spectra/display_simple_spectra.py | 1 | 2369 | #!/usr/bin/env python
#
# Copyright (C) 2017 - Massachusetts Institute of Technology (MIT)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
This is an example of showing spectra from a molecule
Temperature Pressure Grid to sample from
T_grid = [100,150,200,250,275,300,325,350,400]
P_grid = [100000.0, 36800.0, 13500.0, 4980.0, 1830.0, 674.0, 248.0, 91.2, 33.5, 12.3, 4.54, 1.67, 0.614, 0.226,
0.0832, 0.0306, 0.0113, 0.00414, 0.00152, 0.00056, 0.000206, 7.58e-05, 2.79e-05, 1.03e-05]
"""
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
DIR = os.path.abspath(os.path.dirname(__file__))
sys.path.insert(0, os.path.join(DIR, '../..'))
import SEAS_Utils.common_utils.db_management2 as dbm
from SEAS_Utils.common_utils.DIRs import Simulation_DB
#import SEAS_Main.simulation.astrophysics as astro
if __name__ == "__main__":
kwargs = {"dir" :"/Users/mac/Workspace/BioSig2/SEAS_Utils/common_utils/../../input/database/Simulation_Band",
"db_name" :"cross_section_Simulation.db",
"user" :"azariven",
"DEBUG" :False,
"REMOVE" :True,
"BACKUP" :False,
"OVERWRITE" :True}
cross_db = dbm.database(**kwargs)
cross_db.access_db()
molecule = "CS"
P = 0.0306
T = 275
numin = 400
numax = 30000
result = cross_db.c.execute("SELECT nu, coef FROM {} WHERE P={} AND T={} AND nu>={} AND nu<{} ORDER BY nu".format(molecule,P,T,numin,numax))
nu,xsec = np.array(result.fetchall()).T
pathl = 1
n = 10**25
tau = n*xsec*pathl
trans = np.e**(-tau)
plt.plot(nu,trans)
plt.show()
| gpl-3.0 |
ssaeger/scikit-learn | examples/cluster/plot_segmentation_toy.py | 91 | 3522 | """
===========================================
Spectral clustering for image segmentation
===========================================
In this example, an image with connected circles is generated and
spectral clustering is used to separate the circles.
In these settings, the :ref:`spectral_clustering` approach solves the problem
know as 'normalized graph cuts': the image is seen as a graph of
connected voxels, and the spectral clustering algorithm amounts to
choosing graph cuts defining regions while minimizing the ratio of the
gradient along the cut, and the volume of the region.
As the algorithm tries to balance the volume (ie balance the region
sizes), if we take circles with different sizes, the segmentation fails.
In addition, as there is no useful information in the intensity of the image,
or its gradient, we choose to perform the spectral clustering on a graph
that is only weakly informed by the gradient. This is close to performing
a Voronoi partition of the graph.
In addition, we use the mask of the objects to restrict the graph to the
outline of the objects. In this example, we are interested in
separating the objects one from the other, and not from the background.
"""
print(__doc__)
# Authors: Emmanuelle Gouillart <[email protected]>
# Gael Varoquaux <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.feature_extraction import image
from sklearn.cluster import spectral_clustering
###############################################################################
l = 100
x, y = np.indices((l, l))
center1 = (28, 24)
center2 = (40, 50)
center3 = (67, 58)
center4 = (24, 70)
radius1, radius2, radius3, radius4 = 16, 14, 15, 14
circle1 = (x - center1[0]) ** 2 + (y - center1[1]) ** 2 < radius1 ** 2
circle2 = (x - center2[0]) ** 2 + (y - center2[1]) ** 2 < radius2 ** 2
circle3 = (x - center3[0]) ** 2 + (y - center3[1]) ** 2 < radius3 ** 2
circle4 = (x - center4[0]) ** 2 + (y - center4[1]) ** 2 < radius4 ** 2
###############################################################################
# 4 circles
img = circle1 + circle2 + circle3 + circle4
# We use a mask that limits to the foreground: the problem that we are
# interested in here is not separating the objects from the background,
# but separating them one from the other.
mask = img.astype(bool)
img = img.astype(float)
img += 1 + 0.2 * np.random.randn(*img.shape)
# Convert the image into a graph with the value of the gradient on the
# edges.
graph = image.img_to_graph(img, mask=mask)
# Take a decreasing function of the gradient: we take it weakly
# dependent from the gradient the segmentation is close to a voronoi
graph.data = np.exp(-graph.data / graph.data.std())
# Force the solver to be arpack, since amg is numerically
# unstable on this example
labels = spectral_clustering(graph, n_clusters=4, eigen_solver='arpack')
label_im = -np.ones(mask.shape)
label_im[mask] = labels
plt.matshow(img)
plt.matshow(label_im)
###############################################################################
# 2 circles
img = circle1 + circle2
mask = img.astype(bool)
img = img.astype(float)
img += 1 + 0.2 * np.random.randn(*img.shape)
graph = image.img_to_graph(img, mask=mask)
graph.data = np.exp(-graph.data / graph.data.std())
labels = spectral_clustering(graph, n_clusters=2, eigen_solver='arpack')
label_im = -np.ones(mask.shape)
label_im[mask] = labels
plt.matshow(img)
plt.matshow(label_im)
plt.show()
| bsd-3-clause |
elastic/examples | Exploring Public Datasets/nyc_restaurants/scripts/ingestRestaurantData.py | 3 | 5504 | # coding: utf-8
# In[ ]:
import pandas as pd
import elasticsearch
import json
import re
import certifi
# If you are using the Elastic cloud, or need https/ssl, toggle the below
# commented sections. Note that the Elastic cloud may be using port 9243
#
es = elasticsearch.Elasticsearch(
# ['host1'],
# http_auth=('myuser', 'mypassword'),
# port=443,
# use_ssl=True
)
# In this example, we use the [Google geocoding API](https://developers.google.com/maps/documentation/geocoding/) to translate addresses into geo-coordinates. Google imposes usages limits on the API. If you are using this script to index data, you many need to sign up for an API key to overcome limits.
# In[ ]:
from geopy.geocoders import GoogleV3
geolocator = GoogleV3()
# geolocator = GoogleV3(api_key=<your_google_api_key>)
# # Import Data
# Import restaurant inspection data into a Pandas dataframe
# In[ ]:
t = pd.read_csv('https://data.cityofnewyork.us/api/views/43nn-pn8j/rows.csv?accessType=DOWNLOAD', header=0, sep=',',
dtype={'PHONE': str, 'INSPECTION DATE': str});
# In[ ]:
## Helper Functions
from datetime import datetime
def str_to_iso(text):
if text != '':
for fmt in (['%m/%d/%Y']):
try:
# print(fmt)
# print(datetime.strptime(text, fmt))
return datetime.isoformat(datetime.strptime(text, fmt))
except ValueError:
# print(text)
pass
# raise ValueError('Changing date')
else:
return None
def getLatLon(row):
if row['Address'] != '':
location = geolocator.geocode(row['Address'], timeout=10000, sensor=False)
if location != None:
lat = location.latitude
lon = location.longitude
# print(lat,lon)
return [lon, lat]
elif row['Zipcode'] != '' or location != None:
location = geolocator.geocode(row['Zipcode'], timeout=10000, sensor=False)
if location != None:
lat = location.latitude
lon = location.longitude
# print(lat,lon)
return [lon, lat]
else:
return None
def getAddress(row):
if row['Building'] != '' and row['Street'] != '' and row['Boro'] != '':
x = row['Building'] + ' ' + row['Street'] + ' ' + row['Boro'] + ',NY'
x = re.sub(' +', ' ', x)
return x
else:
return ''
def combineCT(x):
return str(x['Inspection_Date'][0][0:10]) + '_' + str(x['Camis'])
# # Data preprocessing
# In[ ]:
# process column names: remove spaces & use title casing
t.columns = map(str.title, t.columns)
t.columns = map(lambda x: x.replace(' ', '_'), t.columns)
# replace nan with ''
t.fillna('', inplace=True)
# Convert date to ISO format
t['Inspection_Date'] = t['Inspection_Date'].map(lambda x: str_to_iso(x))
t['Record_Date'] = t['Record_Date'].map(lambda x: str_to_iso(x))
t['Grade_Date'] = t['Grade_Date'].map(lambda x: str_to_iso(x))
# t['Inspection_Date'] = t['Inspection_Date'].map(lambda x: x.split('/'))
# Combine Street, Building and Boro information to create Address string
t['Address'] = t.apply(getAddress, axis=1)
# Create a dictionary of unique Addresses. We do this to avoid calling the Google geocoding api multiple times for the same address
# In[ ]:
addDict = t[['Address', 'Zipcode']].copy(deep=True)
addDict = addDict.drop_duplicates()
addDict['Coord'] = [None] * len(addDict)
# Get address for the geolocation for each address. This step can take a while because it's calling the Google geocoding API for each unique address.
# In[ ]:
for item_id, item in addDict.iterrows():
if item_id % 100 == 0:
print(item_id)
if addDict['Coord'][item_id] == None:
addDict['Coord'][item_id] = getLatLon(item)
# print(addDict.loc[item_id]['Coord'])
# Save address dictionary to CSV
# addDict.to_csv('./dict_final.csv')
# In[ ]:
# Merge coordinates into original table
t1 = t.merge(addDict[['Address', 'Coord']])
# Keep only 1 value of score and grade per inspection
t2 = t1.copy(deep=True)
t2['raw_num'] = t2.index
t2['RI'] = t2.apply(combineCT, axis=1)
yy = t2.groupby('RI').first().reset_index()['raw_num']
t2['Unique_Score'] = None
t2['Unique_Score'].loc[yy.values] = t2['Score'].loc[yy.values]
t2['Unique_Grade'] = None
t2['Unique_Grade'].loc[yy.values] = t2['Grade'].loc[yy.values]
del (t2['RI'])
del (t2['raw_num'])
del (t2['Grade'])
del (t2['Score'])
t2.rename(columns={'Unique_Grade': 'Grade', 'Unique_Score': 'Score'}, inplace=True)
t2['Grade'].fillna('', inplace=True)
# In[ ]:
t2.iloc[1]
# # Index Data
# In[ ]:
### Create and configure Elasticsearch index
# Name of index and document type
index_name = 'nyc_restaurants';
doc_name = 'inspection'
# Delete donorschoose index if one does exist
if es.indices.exists(index_name):
es.indices.delete(index_name)
# Create donorschoose index
es.indices.create(index_name)
# In[ ]:
# Add mapping
with open('./inspection_mapping.json') as json_mapping:
d = json.load(json_mapping)
es.indices.put_mapping(index=index_name, doc_type=doc_name, body=d)
# Index data
for item_id, item in t2.iterrows():
if item_id % 1000 == 0:
print(item_id)
thisItem = item.to_dict()
# thisItem['Coord'] = getLatLon(thisItem)
thisDoc = json.dumps(thisItem);
# pprint.pprint(thisItem)
# write to elasticsearch
es.index(index=index_name, doc_type=doc_name, id=item_id, body=thisDoc)
# In[ ]:
| apache-2.0 |
t2abdulg/SALib | SALib/plotting/morris.py | 2 | 4789 | '''
Created on 29 Jun 2015
@author: @willu47
This module provides the basic infrastructure for plotting charts for the
Method of Morris results
The procedures should build upon and return an axes instance.
Si = morris.analyze(problem, param_values, Y, conf_level=0.95, print_to_console=False, num_levels=10, grid_jump=5)
p = morris.horizontal_bar_plot(Si)
# set plot style etc.
fig, ax = plt.subplots(1, 1)
my_plotter(ax, data1, data2, {'marker':'x'})
p.show()
def my_plotter(ax, data1, data2, param_dict):
"""
A helper function to make a graph
Parameters
----------
ax : Axes
The axes to draw to
data1 : array
The x data
data2 : array
The y data
param_dict : dict
Dictionary of kwargs to pass to ax.plot
Returns
-------
out : list
list of artists added
"""
out = ax.plot(data1, data2, **param_dict)
return out
'''
import matplotlib.pyplot as plt
import numpy as np
def _sort_Si(Si, key, sortby='mu_star'):
return np.array([Si[key][x] for x in np.argsort(Si[sortby])])
def _sort_Si_by_index(Si, key, index):
return np.array([Si[key][x] for x in index])
def horizontal_bar_plot(ax, Si, param_dict={}, sortby='mu_star', unit=''):
'''
Updates a matplotlib axes instance with a horizontal bar plot
of mu_star, with error bars representing mu_star_conf
'''
assert sortby in ['mu_star', 'mu_star_conf', 'sigma', 'mu']
fig = ax.get_figure()
# Sort all the plotted elements by mu_star (or optionally another
# metric)
names_sorted = _sort_Si(Si, 'names', sortby)
mu_star_sorted = _sort_Si(Si, 'mu_star', sortby)
mu_star_conf_sorted = _sort_Si(Si, 'mu_star_conf', sortby)
# Plot horizontal barchart
y_pos = np.arange(len(mu_star_sorted))
plot_names = names_sorted
out = ax.barh(y_pos,
mu_star_sorted,
xerr=mu_star_conf_sorted,
align='center',
ecolor='black',
**param_dict)
ax.set_yticks(y_pos)
ax.set_yticklabels(plot_names)
ax.set_xlabel(r'$\mu^\star$' + unit)
return out
def covariance_plot(ax, Si, param_dict, unit=""):
'''
Plots mu* against sigma or the 95% confidence interval
'''
if Si['sigma'] is not None:
# sigma is not present if using morris groups
y = Si['sigma']
out = ax.scatter(Si['mu_star'], y, c=u'k', marker=u'o',
**param_dict)
ax.set_ylabel(r'$\sigma$')
ax.set_xlim(0,)
ax.set_ylim(0,)
x_axis_bounds = np.array(ax.get_xlim())
line1, = ax.plot(x_axis_bounds, x_axis_bounds, 'k-')
line2, = ax.plot(x_axis_bounds, 0.5 * x_axis_bounds, 'k--')
line3, = ax.plot(x_axis_bounds, 0.1 * x_axis_bounds, 'k-.')
ax.legend((line1, line2, line3), (r'$\sigma / \mu^{\star} = 1.0$',
r'$\sigma / \mu^{\star} = 0.5$',
r'$\sigma / \mu^{\star} = 0.1$'),
loc='upper left')
else:
y = Si['mu_star_conf']
out = ax.scatter(Si['mu_star'], y, c=u'k', marker=u'o',
**param_dict)
ax.set_ylabel(r'$95\% CI$')
ax.set_xlabel(r'$\mu^\star$ ' + unit)
return out
def sample_histograms(fig, input_sample, problem, param_dict={}):
'''
Plots a set of subplots of histograms of the input sample
'''
num_vars = problem['num_vars']
names = problem['names']
framing = 101 + (num_vars * 10)
# Find number of levels
num_levels = len(set(input_sample[:,1]))
out = []
for variable in range(num_vars):
ax = fig.add_subplot(framing + variable)
out.append(ax.hist(input_sample[:, variable],
bins=num_levels,
normed=False,
label=None,
**param_dict))
ax.set_title('%s' % (names[variable]))
ax.tick_params(axis='x', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom='off', # ticks along the bottom edge are off
top='off', # ticks along the top edge are off
labelbottom='off') # labels along the bottom edge are off)
if variable > 0:
ax.tick_params(axis='y', # changes apply to the y-axis
which='both', # both major and minor ticks are affected
labelleft='off') # labels along the left edge are off)
return out
if __name__ == '__main__':
pass
| mit |
gotomypc/scikit-learn | examples/neighbors/plot_nearest_centroid.py | 264 | 1804 | """
===============================
Nearest Centroid Classification
===============================
Sample usage of Nearest Centroid classification.
It will plot the decision boundaries for each class.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn import datasets
from sklearn.neighbors import NearestCentroid
n_neighbors = 15
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# Create color maps
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
for shrinkage in [None, 0.1]:
# we create an instance of Neighbours Classifier and fit the data.
clf = NearestCentroid(shrink_threshold=shrinkage)
clf.fit(X, y)
y_pred = clf.predict(X)
print(shrinkage, np.mean(y == y_pred))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
plt.title("3-Class classification (shrink_threshold=%r)"
% shrinkage)
plt.axis('tight')
plt.show()
| bsd-3-clause |
LiaoPan/scikit-learn | sklearn/tests/test_grid_search.py | 68 | 28778 | """
Testing for grid search module (sklearn.grid_search)
"""
from collections import Iterable, Sized
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.externals.six.moves import xrange
from itertools import chain, product
import pickle
import sys
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
from scipy.stats import bernoulli, expon, uniform
from sklearn.externals.six.moves import zip
from sklearn.base import BaseEstimator
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_multilabel_classification
from sklearn.grid_search import (GridSearchCV, RandomizedSearchCV,
ParameterGrid, ParameterSampler,
ChangedBehaviorWarning)
from sklearn.svm import LinearSVC, SVC
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import DecisionTreeClassifier
from sklearn.cluster import KMeans
from sklearn.neighbors import KernelDensity
from sklearn.metrics import f1_score
from sklearn.metrics import make_scorer
from sklearn.metrics import roc_auc_score
from sklearn.cross_validation import KFold, StratifiedKFold, FitFailedWarning
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
# Neither of the following two estimators inherit from BaseEstimator,
# to test hyperparameter search on user-defined classifiers.
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert_true(len(X) == len(Y))
return self
def predict(self, T):
return T.shape[0]
predict_proba = predict
decision_function = predict
transform = predict
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
def get_params(self, deep=False):
return {'foo_param': self.foo_param}
def set_params(self, **params):
self.foo_param = params['foo_param']
return self
class LinearSVCNoScore(LinearSVC):
"""An LinearSVC classifier that has no score method."""
@property
def score(self):
raise AttributeError
X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
y = np.array([1, 1, 2, 2])
def assert_grid_iter_equals_getitem(grid):
assert_equal(list(grid), [grid[i] for i in range(len(grid))])
def test_parameter_grid():
# Test basic properties of ParameterGrid.
params1 = {"foo": [1, 2, 3]}
grid1 = ParameterGrid(params1)
assert_true(isinstance(grid1, Iterable))
assert_true(isinstance(grid1, Sized))
assert_equal(len(grid1), 3)
assert_grid_iter_equals_getitem(grid1)
params2 = {"foo": [4, 2],
"bar": ["ham", "spam", "eggs"]}
grid2 = ParameterGrid(params2)
assert_equal(len(grid2), 6)
# loop to assert we can iterate over the grid multiple times
for i in xrange(2):
# tuple + chain transforms {"a": 1, "b": 2} to ("a", 1, "b", 2)
points = set(tuple(chain(*(sorted(p.items())))) for p in grid2)
assert_equal(points,
set(("bar", x, "foo", y)
for x, y in product(params2["bar"], params2["foo"])))
assert_grid_iter_equals_getitem(grid2)
# Special case: empty grid (useful to get default estimator settings)
empty = ParameterGrid({})
assert_equal(len(empty), 1)
assert_equal(list(empty), [{}])
assert_grid_iter_equals_getitem(empty)
assert_raises(IndexError, lambda: empty[1])
has_empty = ParameterGrid([{'C': [1, 10]}, {}, {'C': [.5]}])
assert_equal(len(has_empty), 4)
assert_equal(list(has_empty), [{'C': 1}, {'C': 10}, {}, {'C': .5}])
assert_grid_iter_equals_getitem(has_empty)
def test_grid_search():
# Test that the best estimator contains the right value for foo_param
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, verbose=3)
# make sure it selects the smallest parameter in case of ties
old_stdout = sys.stdout
sys.stdout = StringIO()
grid_search.fit(X, y)
sys.stdout = old_stdout
assert_equal(grid_search.best_estimator_.foo_param, 2)
for i, foo_i in enumerate([1, 2, 3]):
assert_true(grid_search.grid_scores_[i][0]
== {'foo_param': foo_i})
# Smoke test the score etc:
grid_search.score(X, y)
grid_search.predict_proba(X)
grid_search.decision_function(X)
grid_search.transform(X)
# Test exception handling on scoring
grid_search.scoring = 'sklearn'
assert_raises(ValueError, grid_search.fit, X, y)
@ignore_warnings
def test_grid_search_no_score():
# Test grid-search on classifier that has no score function.
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
clf_no_score = LinearSVCNoScore(random_state=0)
grid_search = GridSearchCV(clf, {'C': Cs}, scoring='accuracy')
grid_search.fit(X, y)
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs},
scoring='accuracy')
# smoketest grid search
grid_search_no_score.fit(X, y)
# check that best params are equal
assert_equal(grid_search_no_score.best_params_, grid_search.best_params_)
# check that we can call score and that it gives the correct result
assert_equal(grid_search.score(X, y), grid_search_no_score.score(X, y))
# giving no scoring function raises an error
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs})
assert_raise_message(TypeError, "no scoring", grid_search_no_score.fit,
[[1]])
def test_grid_search_score_method():
X, y = make_classification(n_samples=100, n_classes=2, flip_y=.2,
random_state=0)
clf = LinearSVC(random_state=0)
grid = {'C': [.1]}
search_no_scoring = GridSearchCV(clf, grid, scoring=None).fit(X, y)
search_accuracy = GridSearchCV(clf, grid, scoring='accuracy').fit(X, y)
search_no_score_method_auc = GridSearchCV(LinearSVCNoScore(), grid,
scoring='roc_auc').fit(X, y)
search_auc = GridSearchCV(clf, grid, scoring='roc_auc').fit(X, y)
# Check warning only occurs in situation where behavior changed:
# estimator requires score method to compete with scoring parameter
score_no_scoring = assert_no_warnings(search_no_scoring.score, X, y)
score_accuracy = assert_warns(ChangedBehaviorWarning,
search_accuracy.score, X, y)
score_no_score_auc = assert_no_warnings(search_no_score_method_auc.score,
X, y)
score_auc = assert_warns(ChangedBehaviorWarning,
search_auc.score, X, y)
# ensure the test is sane
assert_true(score_auc < 1.0)
assert_true(score_accuracy < 1.0)
assert_not_equal(score_auc, score_accuracy)
assert_almost_equal(score_accuracy, score_no_scoring)
assert_almost_equal(score_auc, score_no_score_auc)
def test_trivial_grid_scores():
# Test search over a "grid" with only one point.
# Non-regression test: grid_scores_ wouldn't be set by GridSearchCV.
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1]})
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
random_search = RandomizedSearchCV(clf, {'foo_param': [0]}, n_iter=1)
random_search.fit(X, y)
assert_true(hasattr(random_search, "grid_scores_"))
def test_no_refit():
# Test that grid search can be used for model selection only
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=False)
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "best_params_"))
def test_grid_search_error():
# Test that grid search will capture errors on data with different
# length
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_[:180], y_)
def test_grid_search_iid():
# test the iid parameter
# noise-free simple 2d-data
X, y = make_blobs(centers=[[0, 0], [1, 0], [0, 1], [1, 1]], random_state=0,
cluster_std=0.1, shuffle=False, n_samples=80)
# split dataset into two folds that are not iid
# first one contains data of all 4 blobs, second only from two.
mask = np.ones(X.shape[0], dtype=np.bool)
mask[np.where(y == 1)[0][::2]] = 0
mask[np.where(y == 2)[0][::2]] = 0
# this leads to perfect classification on one fold and a score of 1/3 on
# the other
svm = SVC(kernel='linear')
# create "cv" for splits
cv = [[mask, ~mask], [~mask, mask]]
# once with iid=True (default)
grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv)
grid_search.fit(X, y)
first = grid_search.grid_scores_[0]
assert_equal(first.parameters['C'], 1)
assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])
# for first split, 1/4 of dataset is in test, for second 3/4.
# take weighted average
assert_almost_equal(first.mean_validation_score,
1 * 1. / 4. + 1. / 3. * 3. / 4.)
# once with iid=False
grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv,
iid=False)
grid_search.fit(X, y)
first = grid_search.grid_scores_[0]
assert_equal(first.parameters['C'], 1)
# scores are the same as above
assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])
# averaged score is just mean of scores
assert_almost_equal(first.mean_validation_score,
np.mean(first.cv_validation_scores))
def test_grid_search_one_grid_point():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
param_dict = {"C": [1.0], "kernel": ["rbf"], "gamma": [0.1]}
clf = SVC()
cv = GridSearchCV(clf, param_dict)
cv.fit(X_, y_)
clf = SVC(C=1.0, kernel="rbf", gamma=0.1)
clf.fit(X_, y_)
assert_array_equal(clf.dual_coef_, cv.best_estimator_.dual_coef_)
def test_grid_search_bad_param_grid():
param_dict = {"C": 1.0}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
param_dict = {"C": []}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
param_dict = {"C": np.ones(6).reshape(3, 2)}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
def test_grid_search_sparse():
# Test that grid search works with both dense and sparse matrices
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180].tocoo(), y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_true(np.mean(y_pred == y_pred2) >= .9)
assert_equal(C, C2)
def test_grid_search_sparse_scoring():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_array_equal(y_pred, y_pred2)
assert_equal(C, C2)
# Smoke test the score
# np.testing.assert_allclose(f1_score(cv.predict(X_[:180]), y[:180]),
# cv.score(X_[:180], y[:180]))
# test loss where greater is worse
def f1_loss(y_true_, y_pred_):
return -f1_score(y_true_, y_pred_)
F1Loss = make_scorer(f1_loss, greater_is_better=False)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring=F1Loss)
cv.fit(X_[:180], y_[:180])
y_pred3 = cv.predict(X_[180:])
C3 = cv.best_estimator_.C
assert_equal(C, C3)
assert_array_equal(y_pred, y_pred3)
def test_grid_search_precomputed_kernel():
# Test that grid search works when the input features are given in the
# form of a precomputed kernel matrix
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
# compute the training kernel matrix corresponding to the linear kernel
K_train = np.dot(X_[:180], X_[:180].T)
y_train = y_[:180]
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(K_train, y_train)
assert_true(cv.best_score_ >= 0)
# compute the test kernel matrix
K_test = np.dot(X_[180:], X_[:180].T)
y_test = y_[180:]
y_pred = cv.predict(K_test)
assert_true(np.mean(y_pred == y_test) >= 0)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cv.fit, K_train.tolist(), y_train)
def test_grid_search_precomputed_kernel_error_nonsquare():
# Test that grid search returns an error with a non-square precomputed
# training kernel matrix
K_train = np.zeros((10, 20))
y_train = np.ones((10, ))
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, K_train, y_train)
def test_grid_search_precomputed_kernel_error_kernel_function():
# Test that grid search returns an error when using a kernel_function
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
kernel_function = lambda x1, x2: np.dot(x1, x2.T)
clf = SVC(kernel=kernel_function)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_, y_)
class BrokenClassifier(BaseEstimator):
"""Broken classifier that cannot be fit twice"""
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y):
assert_true(not hasattr(self, 'has_been_fit_'))
self.has_been_fit_ = True
def predict(self, X):
return np.zeros(X.shape[0])
def test_refit():
# Regression test for bug in refitting
# Simulates re-fitting a broken estimator; this used to break with
# sparse SVMs.
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = GridSearchCV(BrokenClassifier(), [{'parameter': [0, 1]}],
scoring="precision", refit=True)
clf.fit(X, y)
def test_gridsearch_nd():
# Pass X as list in GridSearchCV
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
check_X = lambda x: x.shape[1:] == (5, 3, 2)
check_y = lambda x: x.shape[1:] == (7, 11)
clf = CheckingClassifier(check_X=check_X, check_y=check_y)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_4d, y_3d).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_X_as_list():
# Pass X as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_X=lambda x: isinstance(x, list))
cv = KFold(n=len(X), n_folds=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X.tolist(), y).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_y_as_list():
# Pass y as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_y=lambda x: isinstance(x, list))
cv = KFold(n=len(X), n_folds=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X, y.tolist()).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_pandas_input():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((DataFrame, Series))
except ImportError:
pass
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
for InputFeatureType, TargetType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_df, y_ser).score(X_df, y_ser)
grid_search.predict(X_df)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_unsupervised_grid_search():
# test grid-search with unsupervised estimator
X, y = make_blobs(random_state=0)
km = KMeans(random_state=0)
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]),
scoring='adjusted_rand_score')
grid_search.fit(X, y)
# ARI can find the right number :)
assert_equal(grid_search.best_params_["n_clusters"], 3)
# Now without a score, and without y
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]))
grid_search.fit(X)
assert_equal(grid_search.best_params_["n_clusters"], 4)
def test_gridsearch_no_predict():
# test grid-search with an estimator without predict.
# slight duplication of a test from KDE
def custom_scoring(estimator, X):
return 42 if estimator.bandwidth == .1 else 0
X, _ = make_blobs(cluster_std=.1, random_state=1,
centers=[[0, 1], [1, 0], [0, 0]])
search = GridSearchCV(KernelDensity(),
param_grid=dict(bandwidth=[.01, .1, 1]),
scoring=custom_scoring)
search.fit(X)
assert_equal(search.best_params_['bandwidth'], .1)
assert_equal(search.best_score_, 42)
def test_param_sampler():
# test basic properties of param sampler
param_distributions = {"kernel": ["rbf", "linear"],
"C": uniform(0, 1)}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=10, random_state=0)
samples = [x for x in sampler]
assert_equal(len(samples), 10)
for sample in samples:
assert_true(sample["kernel"] in ["rbf", "linear"])
assert_true(0 <= sample["C"] <= 1)
def test_randomized_search_grid_scores():
# Make a dataset with a lot of noise to get various kind of prediction
# errors across CV folds and parameter settings
X, y = make_classification(n_samples=200, n_features=100, n_informative=3,
random_state=0)
# XXX: as of today (scipy 0.12) it's not possible to set the random seed
# of scipy.stats distributions: the assertions in this test should thus
# not depend on the randomization
params = dict(C=expon(scale=10),
gamma=expon(scale=0.1))
n_cv_iter = 3
n_search_iter = 30
search = RandomizedSearchCV(SVC(), n_iter=n_search_iter, cv=n_cv_iter,
param_distributions=params, iid=False)
search.fit(X, y)
assert_equal(len(search.grid_scores_), n_search_iter)
# Check consistency of the structure of each cv_score item
for cv_score in search.grid_scores_:
assert_equal(len(cv_score.cv_validation_scores), n_cv_iter)
# Because we set iid to False, the mean_validation score is the
# mean of the fold mean scores instead of the aggregate sample-wise
# mean score
assert_almost_equal(np.mean(cv_score.cv_validation_scores),
cv_score.mean_validation_score)
assert_equal(list(sorted(cv_score.parameters.keys())),
list(sorted(params.keys())))
# Check the consistency with the best_score_ and best_params_ attributes
sorted_grid_scores = list(sorted(search.grid_scores_,
key=lambda x: x.mean_validation_score))
best_score = sorted_grid_scores[-1].mean_validation_score
assert_equal(search.best_score_, best_score)
tied_best_params = [s.parameters for s in sorted_grid_scores
if s.mean_validation_score == best_score]
assert_true(search.best_params_ in tied_best_params,
"best_params_={0} is not part of the"
" tied best models: {1}".format(
search.best_params_, tied_best_params))
def test_grid_search_score_consistency():
# test that correct scores are used
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
for score in ['f1', 'roc_auc']:
grid_search = GridSearchCV(clf, {'C': Cs}, scoring=score)
grid_search.fit(X, y)
cv = StratifiedKFold(n_folds=3, y=y)
for C, scores in zip(Cs, grid_search.grid_scores_):
clf.set_params(C=C)
scores = scores[2] # get the separate runs from grid scores
i = 0
for train, test in cv:
clf.fit(X[train], y[train])
if score == "f1":
correct_score = f1_score(y[test], clf.predict(X[test]))
elif score == "roc_auc":
dec = clf.decision_function(X[test])
correct_score = roc_auc_score(y[test], dec)
assert_almost_equal(correct_score, scores[i])
i += 1
def test_pickle():
# Test that a fit search can be pickled
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=True)
grid_search.fit(X, y)
pickle.dumps(grid_search) # smoke test
random_search = RandomizedSearchCV(clf, {'foo_param': [1, 2, 3]},
refit=True, n_iter=3)
random_search.fit(X, y)
pickle.dumps(random_search) # smoke test
def test_grid_search_with_multioutput_data():
# Test search with multi-output estimator
X, y = make_multilabel_classification(return_indicator=True,
random_state=0)
est_parameters = {"max_depth": [1, 2, 3, 4]}
cv = KFold(y.shape[0], random_state=0)
estimators = [DecisionTreeRegressor(random_state=0),
DecisionTreeClassifier(random_state=0)]
# Test with grid search cv
for est in estimators:
grid_search = GridSearchCV(est, est_parameters, cv=cv)
grid_search.fit(X, y)
for parameters, _, cv_validation_scores in grid_search.grid_scores_:
est.set_params(**parameters)
for i, (train, test) in enumerate(cv):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(correct_score,
cv_validation_scores[i])
# Test with a randomized search
for est in estimators:
random_search = RandomizedSearchCV(est, est_parameters,
cv=cv, n_iter=3)
random_search.fit(X, y)
for parameters, _, cv_validation_scores in random_search.grid_scores_:
est.set_params(**parameters)
for i, (train, test) in enumerate(cv):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(correct_score,
cv_validation_scores[i])
def test_predict_proba_disabled():
# Test predict_proba when disabled on estimator.
X = np.arange(20).reshape(5, -1)
y = [0, 0, 1, 1, 1]
clf = SVC(probability=False)
gs = GridSearchCV(clf, {}, cv=2).fit(X, y)
assert_false(hasattr(gs, "predict_proba"))
def test_grid_search_allows_nans():
# Test GridSearchCV with Imputer
X = np.arange(20, dtype=np.float64).reshape(5, -1)
X[2, :] = np.nan
y = [0, 0, 1, 1, 1]
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
GridSearchCV(p, {'classifier__foo_param': [1, 2, 3]}, cv=2).fit(X, y)
class FailingClassifier(BaseEstimator):
"""Classifier that raises a ValueError on fit()"""
FAILING_PARAMETER = 2
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y=None):
if self.parameter == FailingClassifier.FAILING_PARAMETER:
raise ValueError("Failing classifier failed as required")
def predict(self, X):
return np.zeros(X.shape[0])
def test_grid_search_failing_classifier():
# GridSearchCV with on_error != 'raise'
# Ensures that a warning is raised and score reset where appropriate.
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we only want to check that errors caused by fits
# to individual folds will be caught and warnings raised instead. If
# refit was done, then an exception would be raised on refit and not
# caught by grid_search (expected behavior), and this would cause an
# error in this test.
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=0.0)
assert_warns(FitFailedWarning, gs.fit, X, y)
# Ensure that grid scores were set to zero as required for those fits
# that are expected to fail.
assert all(np.all(this_point.cv_validation_scores == 0.0)
for this_point in gs.grid_scores_
if this_point.parameters['parameter'] ==
FailingClassifier.FAILING_PARAMETER)
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=float('nan'))
assert_warns(FitFailedWarning, gs.fit, X, y)
assert all(np.all(np.isnan(this_point.cv_validation_scores))
for this_point in gs.grid_scores_
if this_point.parameters['parameter'] ==
FailingClassifier.FAILING_PARAMETER)
def test_grid_search_failing_classifier_raise():
# GridSearchCV with on_error == 'raise' raises the error
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we want to test the behaviour of the grid search part
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score='raise')
# FailingClassifier issues a ValueError so this is what we look for.
assert_raises(ValueError, gs.fit, X, y)
def test_parameters_sampler_replacement():
# raise error if n_iter too large
params = {'first': [0, 1], 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params, n_iter=7)
assert_raises(ValueError, list, sampler)
# degenerates to GridSearchCV if n_iter the same as grid_size
sampler = ParameterSampler(params, n_iter=6)
samples = list(sampler)
assert_equal(len(samples), 6)
for values in ParameterGrid(params):
assert_true(values in samples)
# test sampling without replacement in a large grid
params = {'a': range(10), 'b': range(10), 'c': range(10)}
sampler = ParameterSampler(params, n_iter=99, random_state=42)
samples = list(sampler)
assert_equal(len(samples), 99)
hashable_samples = ["a%db%dc%d" % (p['a'], p['b'], p['c'])
for p in samples]
assert_equal(len(set(hashable_samples)), 99)
# doesn't go into infinite loops
params_distribution = {'first': bernoulli(.5), 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params_distribution, n_iter=7)
samples = list(sampler)
assert_equal(len(samples), 7)
| bsd-3-clause |
tangyouze/tushare | tushare/datayes/master.py | 17 | 4457 | # -*- coding:utf-8 -*-
"""
通联数据
Created on 2015/08/24
@author: Jimmy Liu
@group : waditu
@contact: [email protected]
"""
from pandas.compat import StringIO
import pandas as pd
from tushare.util import vars as vs
from tushare.util.common import Client
from tushare.util import upass as up
class Master():
def __init__(self, client=None):
if client is None:
self.client = Client(up.get_token())
else:
self.client = client
def SecID(self, assetClass='', cnSpell='', partyID='', ticker='', field=''):
"""
通过机构partyID,或证券简称拼音cnSpell,或证券交易代码ticker,
检索证券ID(证券在数据结构中的一个唯一识别的编码),
也可通过证券简称拼音cnSpell检索证券交易代码ticker等;同时可以获取输入证券的基本上市信息,如交易市场,上市状态,交易币种,ISIN编码等。
"""
code, result = self.client.getData(vs.SECID%(assetClass, cnSpell, partyID, ticker, field))
return _ret_data(code, result)
def TradeCal(self, exchangeCD='', beginDate='', endDate='', field=''):
"""
记录了上海证券交易所,深圳证券交易所,中国银行间市场,大连商品交易所,郑州商品交易所,上海期货交易所,
中国金融期货交易所和香港交易所等交易所在日历日期当天是否开市的信息,
其中上证、深证记录了自成立以来的全部日期是否开始信息。各交易日节假日安排通知发布当天即更新数据。
"""
code, result = self.client.getData(vs.TRADECAL%(exchangeCD, beginDate, endDate, field))
return _ret_data(code, result)
def Industry(self, industryVersion='', industryVersionCD='', industryLevel='', isNew='', field=''):
"""
输入行业分类通联编码(如,010303表示申万行业分类2014版)或输入一个行业分类标准名称,获取行业分类标准下行业划分
"""
code, result = self.client.getData(vs.INDUSTRY%(industryVersion, industryVersionCD,
industryLevel, isNew, field))
return _ret_data(code, result)
def SecTypeRel(self, secID='', ticker='', typeID='', field=''):
"""
记录证券每个分类的成分,证券分类可通过在getSecType获取。
"""
code, result = self.client.getData(vs.SECTYPEREL%(secID, ticker, typeID, field))
return _ret_data(code, result)
def EquInfo(self, ticker='', field=''):
"""
根据拼音或股票代码,匹配股票代码、名称。包含正在上市的全部沪深股票。
"""
code, result = self.client.getData(vs.EQUINFO%(ticker, field))
return _ret_data(code, result)
def SecTypeRegionRel(self, secID='', ticker='', typeID='', field=''):
"""
获取沪深股票地域分类,以注册地所在行政区域为标准。
"""
code, result = self.client.getData(vs.SECTYPEREGIONREL%(secID, ticker, typeID, field))
return _ret_data(code, result)
def SecType(self, field=''):
"""
证券分类列表,一级分类包含有沪深股票、港股、基金、债券、期货、期权等,每个分类又细分有不同类型;可一次获取全部分类。
"""
code, result = self.client.getData(vs.SECTYPE%(field))
return _ret_data(code, result)
def SecTypeRegion(self, field=''):
"""
获取中国地域分类,以行政划分为标准。
"""
code, result = self.client.getData(vs.SECTYPEREGION%(field))
return _ret_data(code, result)
def SysCode(self, codeTypeID='', valueCD='', field=''):
"""
各api接口有枚举值特性的输出列,如getSecID输出项exchangeCD值,编码分别代表的是什么市场,所有枚举值都可以在这个接口获取。
"""
code, result = self.client.getData(vs.SYSCODE%(codeTypeID, valueCD, field))
return _ret_data(code, result)
def _ret_data(code, result):
if code==200:
result = result.decode('utf-8') if vs.PY3 else result
df = pd.read_csv(StringIO(result))
return df
else:
print(result)
return None
| bsd-3-clause |
richardotis/pycalphad | pycalphad/plot/triangular.py | 2 | 7405 | """
Register a ``'triangular'`` projection with matplotlib to plot diagrams on
triangular axes.
Users should not have to instantiate the TriangularAxes class directly.
Instead, the projection name can be passed as a keyword argument to
matplotlib.
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> plt.gca(projection='triangular')
>>> plt.scatter(np.random.random(10), np.random.random(10))
"""
from matplotlib.axes import Axes
from matplotlib.patches import Polygon
from matplotlib.ticker import NullLocator
from matplotlib.transforms import Affine2D, BboxTransformTo
from matplotlib.projections import register_projection
import matplotlib.spines as mspines
import matplotlib.axis as maxis
import numpy as np
class TriangularAxes(Axes):
"""
A custom class for triangular projections.
"""
name = 'triangular'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.set_aspect(1, adjustable='box', anchor='SW')
self.cla()
def _init_axis(self):
self.xaxis = maxis.XAxis(self)
self.yaxis = maxis.YAxis(self)
self._update_transScale()
def cla(self):
"""
Hard-code axes limits to be on [0, 1] for both axes.
Warning: Limits not on [0, 1] may lead to clipping issues!
"""
# Don't forget to call the base class
super().cla()
x_min = 0
y_min = 0
x_max = 1
y_max = 1
x_spacing = 0.1
y_spacing = 0.1
self.xaxis.set_minor_locator(NullLocator())
self.yaxis.set_minor_locator(NullLocator())
self.xaxis.set_ticks_position('bottom')
self.yaxis.set_ticks_position('left')
super().set_xlim(x_min, x_max)
super().set_ylim(y_min, y_max)
self.xaxis.set_ticks(np.arange(x_min, x_max+x_spacing, x_spacing))
self.yaxis.set_ticks(np.arange(y_min, y_max+y_spacing, y_spacing))
def _set_lim_and_transforms(self):
"""
This is called once when the plot is created to set up all the
transforms for the data, text and grids.
"""
# This code is based off of matplotlib's example for a custom Hammer
# projection. See: https://matplotlib.org/gallery/misc/custom_projection.html#sphx-glr-gallery-misc-custom-projection-py
# This function makes heavy use of the Transform classes in
# ``lib/matplotlib/transforms.py.`` For more information, see
# the inline documentation there.
# Affine2D.from_values(a, b, c, d, e, f) constructs an affine
# transformation matrix of
# a c e
# b d f
# 0 0 1
# A useful reference for the different coordinate systems can be found
# in a table in the matplotlib transforms tutorial:
# https://matplotlib.org/tutorials/advanced/transforms_tutorial.html#transformations-tutorial
# The goal of this transformation is to get from the data space to axes
# space. We perform an affine transformation on the y-axis, i.e.
# transforming the y-axis from (0, 1) to (0.5, sqrt(3)/2).
self.transAffine = Affine2D.from_values(1., 0, 0.5, np.sqrt(3)/2., 0, 0)
# Affine transformation along the dependent axis
self.transAffinedep = Affine2D.from_values(1., 0, -0.5, np.sqrt(3)/2., 0, 0)
# This is the transformation from axes space to display space.
self.transAxes = BboxTransformTo(self.bbox)
# The data transformation is the application of the affine
# transformation from data to axes space, then from axes to display
# space. The '+' operator applies these in order.
self.transData = self.transAffine + self.transAxes
# The main data transformation is set up. Now deal with gridlines and
# tick labels. For these, we want the same trasnform as the, so we
# apply transData directly.
self._xaxis_transform = self.transData
self._xaxis_text1_transform = self.transData
self._xaxis_text2_transform = self.transData
self._yaxis_transform = self.transData
self._yaxis_text1_transform = self.transData
self._yaxis_text2_transform = self.transData
def get_xaxis_transform(self, which='grid'):
assert which in ['tick1', 'tick2', 'grid']
return self._xaxis_transform
def get_xaxis_text1_transform(self, pad):
return super().get_xaxis_text1_transform(pad)[0], 'top', 'center'
def get_xaxis_text2_transform(self, pad):
return super().get_xaxis_text2_transform(pad)[0], 'top', 'center'
def get_yaxis_transform(self, which='grid'):
assert which in ['tick1', 'tick2', 'grid']
return self._yaxis_transform
def get_yaxis_text1_transform(self, pad):
return super().get_yaxis_text1_transform(pad)[0], 'center', 'right'
def get_yaxis_text2_transform(self, pad):
return super().get_yaxis_text2_transform(pad)[0], 'center', 'left'
def _gen_axes_spines(self):
# The dependent axis (right hand side) spine should be set to complete
# the triangle, i.e. the spine from (1, 0) to (1, 1) will be
# transformed to (1, 0) to (0.5, sqrt(3)/2).
dep_spine = mspines.Spine.linear_spine(self, 'right')
dep_spine.set_transform(self.transAffinedep + self.transAxes)
return {
'left': mspines.Spine.linear_spine(self, 'left'),
'bottom': mspines.Spine.linear_spine(self, 'bottom'),
'right': dep_spine,
}
def _gen_axes_patch(self):
"""
Override this method to define the shape that is used for the
background of the plot. It should be a subclass of Patch.
Any data and gridlines will be clipped to this shape.
"""
return Polygon([[0, 0], [0.5, np.sqrt(3)/2], [1, 0]], closed=True)
# Interactive panning and zooming is not supported with this projection,
# so we override all of the following methods to disable it.
def can_zoom(self):
"""
Return True if this axes support the zoom box
"""
return False
def start_pan(self, x, y, button):
pass
def end_pan(self):
pass
def drag_pan(self, button, key, x, y):
pass
def set_ylabel(self, ylabel, fontdict=None, labelpad=None, *, loc=None, **kwargs):
"""
Set the label for the y-axis. Default rotation=60 degrees.
Parameters
----------
ylabel : str
The label text.
labelpad : float, default: None
Spacing in points from the axes bounding box including ticks
and tick labels.
loc : {'bottom', 'center', 'top'}, default: :rc:`yaxis.labellocation`
The label position. This is a high-level alternative for passing
parameters *y* and *horizontalalignment*.
Other Parameters
----------------
**kwargs : `.Text` properties
`.Text` properties control the appearance of the label.
See Also
--------
text : Documents the properties supported by `.Text`.
"""
kwargs.setdefault('rotation', 60)
return super().set_ylabel(ylabel, fontdict, labelpad, loc=loc, **kwargs)
# Now register the projection with matplotlib so the user can select it.
register_projection(TriangularAxes)
| mit |
voxlol/scikit-learn | sklearn/datasets/samples_generator.py | 45 | 56433 | """
Generate samples of synthetic data sets.
"""
# Authors: B. Thirion, G. Varoquaux, A. Gramfort, V. Michel, O. Grisel,
# G. Louppe, J. Nothman
# License: BSD 3 clause
import numbers
import warnings
import array
import numpy as np
from scipy import linalg
import scipy.sparse as sp
from ..preprocessing import MultiLabelBinarizer
from ..utils import check_array, check_random_state
from ..utils import shuffle as util_shuffle
from ..utils.fixes import astype
from ..utils.random import sample_without_replacement
from ..externals import six
map = six.moves.map
zip = six.moves.zip
def _generate_hypercube(samples, dimensions, rng):
"""Returns distinct binary samples of length dimensions
"""
if dimensions > 30:
return np.hstack([_generate_hypercube(samples, dimensions - 30, rng),
_generate_hypercube(samples, 30, rng)])
out = astype(sample_without_replacement(2 ** dimensions, samples,
random_state=rng),
dtype='>u4', copy=False)
out = np.unpackbits(out.view('>u1')).reshape((-1, 32))[:, -dimensions:]
return out
def make_classification(n_samples=100, n_features=20, n_informative=2,
n_redundant=2, n_repeated=0, n_classes=2,
n_clusters_per_class=2, weights=None, flip_y=0.01,
class_sep=1.0, hypercube=True, shift=0.0, scale=1.0,
shuffle=True, random_state=None):
"""Generate a random n-class classification problem.
This initially creates clusters of points normally distributed (std=1)
about vertices of a `2 * class_sep`-sided hypercube, and assigns an equal
number of clusters to each class. It introduces interdependence between
these features and adds various types of further noise to the data.
Prior to shuffling, `X` stacks a number of these primary "informative"
features, "redundant" linear combinations of these, "repeated" duplicates
of sampled features, and arbitrary noise for and remaining features.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features. These comprise `n_informative`
informative features, `n_redundant` redundant features, `n_repeated`
duplicated features and `n_features-n_informative-n_redundant-
n_repeated` useless features drawn at random.
n_informative : int, optional (default=2)
The number of informative features. Each class is composed of a number
of gaussian clusters each located around the vertices of a hypercube
in a subspace of dimension `n_informative`. For each cluster,
informative features are drawn independently from N(0, 1) and then
randomly linearly combined within each cluster in order to add
covariance. The clusters are then placed on the vertices of the
hypercube.
n_redundant : int, optional (default=2)
The number of redundant features. These features are generated as
random linear combinations of the informative features.
n_repeated : int, optional (default=0)
The number of duplicated features, drawn randomly from the informative
and the redundant features.
n_classes : int, optional (default=2)
The number of classes (or labels) of the classification problem.
n_clusters_per_class : int, optional (default=2)
The number of clusters per class.
weights : list of floats or None (default=None)
The proportions of samples assigned to each class. If None, then
classes are balanced. Note that if `len(weights) == n_classes - 1`,
then the last class weight is automatically inferred.
More than `n_samples` samples may be returned if the sum of `weights`
exceeds 1.
flip_y : float, optional (default=0.01)
The fraction of samples whose class are randomly exchanged.
class_sep : float, optional (default=1.0)
The factor multiplying the hypercube dimension.
hypercube : boolean, optional (default=True)
If True, the clusters are put on the vertices of a hypercube. If
False, the clusters are put on the vertices of a random polytope.
shift : float, array of shape [n_features] or None, optional (default=0.0)
Shift features by the specified value. If None, then features
are shifted by a random value drawn in [-class_sep, class_sep].
scale : float, array of shape [n_features] or None, optional (default=1.0)
Multiply features by the specified value. If None, then features
are scaled by a random value drawn in [1, 100]. Note that scaling
happens after shifting.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for class membership of each sample.
Notes
-----
The algorithm is adapted from Guyon [1] and was designed to generate
the "Madelon" dataset.
References
----------
.. [1] I. Guyon, "Design of experiments for the NIPS 2003 variable
selection benchmark", 2003.
See also
--------
make_blobs: simplified variant
make_multilabel_classification: unrelated generator for multilabel tasks
"""
generator = check_random_state(random_state)
# Count features, clusters and samples
if n_informative + n_redundant + n_repeated > n_features:
raise ValueError("Number of informative, redundant and repeated "
"features must sum to less than the number of total"
" features")
if 2 ** n_informative < n_classes * n_clusters_per_class:
raise ValueError("n_classes * n_clusters_per_class must"
" be smaller or equal 2 ** n_informative")
if weights and len(weights) not in [n_classes, n_classes - 1]:
raise ValueError("Weights specified but incompatible with number "
"of classes.")
n_useless = n_features - n_informative - n_redundant - n_repeated
n_clusters = n_classes * n_clusters_per_class
if weights and len(weights) == (n_classes - 1):
weights.append(1.0 - sum(weights))
if weights is None:
weights = [1.0 / n_classes] * n_classes
weights[-1] = 1.0 - sum(weights[:-1])
# Distribute samples among clusters by weight
n_samples_per_cluster = []
for k in range(n_clusters):
n_samples_per_cluster.append(int(n_samples * weights[k % n_classes]
/ n_clusters_per_class))
for i in range(n_samples - sum(n_samples_per_cluster)):
n_samples_per_cluster[i % n_clusters] += 1
# Intialize X and y
X = np.zeros((n_samples, n_features))
y = np.zeros(n_samples, dtype=np.int)
# Build the polytope whose vertices become cluster centroids
centroids = _generate_hypercube(n_clusters, n_informative,
generator).astype(float)
centroids *= 2 * class_sep
centroids -= class_sep
if not hypercube:
centroids *= generator.rand(n_clusters, 1)
centroids *= generator.rand(1, n_informative)
# Initially draw informative features from the standard normal
X[:, :n_informative] = generator.randn(n_samples, n_informative)
# Create each cluster; a variant of make_blobs
stop = 0
for k, centroid in enumerate(centroids):
start, stop = stop, stop + n_samples_per_cluster[k]
y[start:stop] = k % n_classes # assign labels
X_k = X[start:stop, :n_informative] # slice a view of the cluster
A = 2 * generator.rand(n_informative, n_informative) - 1
X_k[...] = np.dot(X_k, A) # introduce random covariance
X_k += centroid # shift the cluster to a vertex
# Create redundant features
if n_redundant > 0:
B = 2 * generator.rand(n_informative, n_redundant) - 1
X[:, n_informative:n_informative + n_redundant] = \
np.dot(X[:, :n_informative], B)
# Repeat some features
if n_repeated > 0:
n = n_informative + n_redundant
indices = ((n - 1) * generator.rand(n_repeated) + 0.5).astype(np.intp)
X[:, n:n + n_repeated] = X[:, indices]
# Fill useless features
if n_useless > 0:
X[:, -n_useless:] = generator.randn(n_samples, n_useless)
# Randomly replace labels
if flip_y >= 0.0:
flip_mask = generator.rand(n_samples) < flip_y
y[flip_mask] = generator.randint(n_classes, size=flip_mask.sum())
# Randomly shift and scale
if shift is None:
shift = (2 * generator.rand(n_features) - 1) * class_sep
X += shift
if scale is None:
scale = 1 + 100 * generator.rand(n_features)
X *= scale
if shuffle:
# Randomly permute samples
X, y = util_shuffle(X, y, random_state=generator)
# Randomly permute features
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
return X, y
def make_multilabel_classification(n_samples=100, n_features=20, n_classes=5,
n_labels=2, length=50, allow_unlabeled=True,
sparse=False, return_indicator=False,
return_distributions=False,
random_state=None):
"""Generate a random multilabel classification problem.
For each sample, the generative process is:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that
n is never zero or more than `n_classes`, and that the document length
is never zero. Likewise, we reject classes which have already been chosen.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features.
n_classes : int, optional (default=5)
The number of classes of the classification problem.
n_labels : int, optional (default=2)
The average number of labels per instance. More precisely, the number
of labels per sample is drawn from a Poisson distribution with
``n_labels`` as its expected value, but samples are bounded (using
rejection sampling) by ``n_classes``, and must be nonzero if
``allow_unlabeled`` is False.
length : int, optional (default=50)
The sum of the features (number of words if documents) is drawn from
a Poisson distribution with this expected value.
allow_unlabeled : bool, optional (default=True)
If ``True``, some instances might not belong to any class.
sparse : bool, optional (default=False)
If ``True``, return a sparse feature matrix
return_indicator : bool, optional (default=False),
If ``True``, return ``Y`` in the binary indicator format, else
return a tuple of lists of labels.
return_distributions : bool, optional (default=False)
If ``True``, return the prior class probability and conditional
probabilities of features given classes, from which the data was
drawn.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array or sparse CSR matrix of shape [n_samples, n_features]
The generated samples.
Y : tuple of lists or array of shape [n_samples, n_classes]
The label sets.
p_c : array, shape [n_classes]
The probability of each class being drawn. Only returned if
``return_distributions=True``.
p_w_c : array, shape [n_features, n_classes]
The probability of each feature being drawn given each class.
Only returned if ``return_distributions=True``.
"""
generator = check_random_state(random_state)
p_c = generator.rand(n_classes)
p_c /= p_c.sum()
cumulative_p_c = np.cumsum(p_c)
p_w_c = generator.rand(n_features, n_classes)
p_w_c /= np.sum(p_w_c, axis=0)
def sample_example():
_, n_classes = p_w_c.shape
# pick a nonzero number of labels per document by rejection sampling
y_size = n_classes + 1
while (not allow_unlabeled and y_size == 0) or y_size > n_classes:
y_size = generator.poisson(n_labels)
# pick n classes
y = set()
while len(y) != y_size:
# pick a class with probability P(c)
c = np.searchsorted(cumulative_p_c,
generator.rand(y_size - len(y)))
y.update(c)
y = list(y)
# pick a non-zero document length by rejection sampling
n_words = 0
while n_words == 0:
n_words = generator.poisson(length)
# generate a document of length n_words
if len(y) == 0:
# if sample does not belong to any class, generate noise word
words = generator.randint(n_features, size=n_words)
return words, y
# sample words with replacement from selected classes
cumulative_p_w_sample = p_w_c.take(y, axis=1).sum(axis=1).cumsum()
cumulative_p_w_sample /= cumulative_p_w_sample[-1]
words = np.searchsorted(cumulative_p_w_sample, generator.rand(n_words))
return words, y
X_indices = array.array('i')
X_indptr = array.array('i', [0])
Y = []
for i in range(n_samples):
words, y = sample_example()
X_indices.extend(words)
X_indptr.append(len(X_indices))
Y.append(y)
X_data = np.ones(len(X_indices), dtype=np.float64)
X = sp.csr_matrix((X_data, X_indices, X_indptr),
shape=(n_samples, n_features))
X.sum_duplicates()
if not sparse:
X = X.toarray()
if return_indicator:
lb = MultiLabelBinarizer()
Y = lb.fit([range(n_classes)]).transform(Y)
else:
warnings.warn('Support for the sequence of sequences multilabel '
'representation is being deprecated and replaced with '
'a sparse indicator matrix. '
'return_indicator will default to True from version '
'0.17.',
DeprecationWarning)
if return_distributions:
return X, Y, p_c, p_w_c
return X, Y
def make_hastie_10_2(n_samples=12000, random_state=None):
"""Generates data for binary classification used in
Hastie et al. 2009, Example 10.2.
The ten features are standard independent Gaussian and
the target ``y`` is defined by::
y[i] = 1 if np.sum(X[i] ** 2) > 9.34 else -1
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=12000)
The number of samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 10]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
See also
--------
make_gaussian_quantiles: a generalization of this dataset approach
"""
rs = check_random_state(random_state)
shape = (n_samples, 10)
X = rs.normal(size=shape).reshape(shape)
y = ((X ** 2.0).sum(axis=1) > 9.34).astype(np.float64)
y[y == 0.0] = -1.0
return X, y
def make_regression(n_samples=100, n_features=100, n_informative=10,
n_targets=1, bias=0.0, effective_rank=None,
tail_strength=0.5, noise=0.0, shuffle=True, coef=False,
random_state=None):
"""Generate a random regression problem.
The input set can either be well conditioned (by default) or have a low
rank-fat tail singular profile. See :func:`make_low_rank_matrix` for
more details.
The output is generated by applying a (potentially biased) random linear
regression model with `n_informative` nonzero regressors to the previously
generated input and some gaussian centered noise with some adjustable
scale.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
n_informative : int, optional (default=10)
The number of informative features, i.e., the number of features used
to build the linear model used to generate the output.
n_targets : int, optional (default=1)
The number of regression targets, i.e., the dimension of the y output
vector associated with a sample. By default, the output is a scalar.
bias : float, optional (default=0.0)
The bias term in the underlying linear model.
effective_rank : int or None, optional (default=None)
if not None:
The approximate number of singular vectors required to explain most
of the input data by linear combinations. Using this kind of
singular spectrum in the input allows the generator to reproduce
the correlations often observed in practice.
if None:
The input set is well conditioned, centered and gaussian with
unit variance.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile if `effective_rank` is not None.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
coef : boolean, optional (default=False)
If True, the coefficients of the underlying linear model are returned.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples] or [n_samples, n_targets]
The output values.
coef : array of shape [n_features] or [n_features, n_targets], optional
The coefficient of the underlying linear model. It is returned only if
coef is True.
"""
n_informative = min(n_features, n_informative)
generator = check_random_state(random_state)
if effective_rank is None:
# Randomly generate a well conditioned input set
X = generator.randn(n_samples, n_features)
else:
# Randomly generate a low rank, fat tail input set
X = make_low_rank_matrix(n_samples=n_samples,
n_features=n_features,
effective_rank=effective_rank,
tail_strength=tail_strength,
random_state=generator)
# Generate a ground truth model with only n_informative features being non
# zeros (the other features are not correlated to y and should be ignored
# by a sparsifying regularizers such as L1 or elastic net)
ground_truth = np.zeros((n_features, n_targets))
ground_truth[:n_informative, :] = 100 * generator.rand(n_informative,
n_targets)
y = np.dot(X, ground_truth) + bias
# Add noise
if noise > 0.0:
y += generator.normal(scale=noise, size=y.shape)
# Randomly permute samples and features
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
ground_truth = ground_truth[indices]
y = np.squeeze(y)
if coef:
return X, y, np.squeeze(ground_truth)
else:
return X, y
def make_circles(n_samples=100, shuffle=True, noise=None, random_state=None,
factor=.8):
"""Make a large circle containing a smaller circle in 2d.
A simple toy dataset to visualize clustering and classification
algorithms.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle: bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
factor : double < 1 (default=.8)
Scale factor between inner and outer circle.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels (0 or 1) for class membership of each sample.
"""
if factor > 1 or factor < 0:
raise ValueError("'factor' has to be between 0 and 1.")
generator = check_random_state(random_state)
# so as not to have the first point = last point, we add one and then
# remove it.
linspace = np.linspace(0, 2 * np.pi, n_samples // 2 + 1)[:-1]
outer_circ_x = np.cos(linspace)
outer_circ_y = np.sin(linspace)
inner_circ_x = outer_circ_x * factor
inner_circ_y = outer_circ_y * factor
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples // 2, dtype=np.intp),
np.ones(n_samples // 2, dtype=np.intp)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if noise is not None:
X += generator.normal(scale=noise, size=X.shape)
return X, y
def make_moons(n_samples=100, shuffle=True, noise=None, random_state=None):
"""Make two interleaving half circles
A simple toy dataset to visualize clustering and classification
algorithms.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle : bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
Read more in the :ref:`User Guide <sample_generators>`.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels (0 or 1) for class membership of each sample.
"""
n_samples_out = n_samples // 2
n_samples_in = n_samples - n_samples_out
generator = check_random_state(random_state)
outer_circ_x = np.cos(np.linspace(0, np.pi, n_samples_out))
outer_circ_y = np.sin(np.linspace(0, np.pi, n_samples_out))
inner_circ_x = 1 - np.cos(np.linspace(0, np.pi, n_samples_in))
inner_circ_y = 1 - np.sin(np.linspace(0, np.pi, n_samples_in)) - .5
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples_in, dtype=np.intp),
np.ones(n_samples_out, dtype=np.intp)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if noise is not None:
X += generator.normal(scale=noise, size=X.shape)
return X, y
def make_blobs(n_samples=100, n_features=2, centers=3, cluster_std=1.0,
center_box=(-10.0, 10.0), shuffle=True, random_state=None):
"""Generate isotropic Gaussian blobs for clustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points equally divided among clusters.
n_features : int, optional (default=2)
The number of features for each sample.
centers : int or array of shape [n_centers, n_features], optional
(default=3)
The number of centers to generate, or the fixed center locations.
cluster_std: float or sequence of floats, optional (default=1.0)
The standard deviation of the clusters.
center_box: pair of floats (min, max), optional (default=(-10.0, 10.0))
The bounding box for each cluster center when centers are
generated at random.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for cluster membership of each sample.
Examples
--------
>>> from sklearn.datasets.samples_generator import make_blobs
>>> X, y = make_blobs(n_samples=10, centers=3, n_features=2,
... random_state=0)
>>> print(X.shape)
(10, 2)
>>> y
array([0, 0, 1, 0, 2, 2, 2, 1, 1, 0])
See also
--------
make_classification: a more intricate variant
"""
generator = check_random_state(random_state)
if isinstance(centers, numbers.Integral):
centers = generator.uniform(center_box[0], center_box[1],
size=(centers, n_features))
else:
centers = check_array(centers)
n_features = centers.shape[1]
if isinstance(cluster_std, numbers.Real):
cluster_std = np.ones(len(centers)) * cluster_std
X = []
y = []
n_centers = centers.shape[0]
n_samples_per_center = [int(n_samples // n_centers)] * n_centers
for i in range(n_samples % n_centers):
n_samples_per_center[i] += 1
for i, (n, std) in enumerate(zip(n_samples_per_center, cluster_std)):
X.append(centers[i] + generator.normal(scale=std,
size=(n, n_features)))
y += [i] * n
X = np.concatenate(X)
y = np.array(y)
if shuffle:
indices = np.arange(n_samples)
generator.shuffle(indices)
X = X[indices]
y = y[indices]
return X, y
def make_friedman1(n_samples=100, n_features=10, noise=0.0, random_state=None):
"""Generate the "Friedman \#1" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are independent features uniformly distributed on the interval
[0, 1]. The output `y` is created according to the formula::
y(X) = 10 * sin(pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * N(0, 1).
Out of the `n_features` features, only 5 are actually used to compute
`y`. The remaining features are independent of `y`.
The number of features has to be >= 5.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features. Should be at least 5.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
if n_features < 5:
raise ValueError("n_features must be at least five.")
generator = check_random_state(random_state)
X = generator.rand(n_samples, n_features)
y = 10 * np.sin(np.pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * generator.randn(n_samples)
return X, y
def make_friedman2(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman \#2" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = (X[:, 0] ** 2 + (X[:, 1] * X[:, 2] \
- 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 + noise * N(0, 1).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = (X[:, 0] ** 2
+ (X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 \
+ noise * generator.randn(n_samples)
return X, y
def make_friedman3(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman \#3" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) \
/ X[:, 0]) + noise * N(0, 1).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = np.arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) / X[:, 0]) \
+ noise * generator.randn(n_samples)
return X, y
def make_low_rank_matrix(n_samples=100, n_features=100, effective_rank=10,
tail_strength=0.5, random_state=None):
"""Generate a mostly low rank matrix with bell-shaped singular values
Most of the variance can be explained by a bell-shaped curve of width
effective_rank: the low rank part of the singular values profile is::
(1 - tail_strength) * exp(-1.0 * (i / effective_rank) ** 2)
The remaining singular values' tail is fat, decreasing as::
tail_strength * exp(-0.1 * i / effective_rank).
The low rank part of the profile can be considered the structured
signal part of the data while the tail can be considered the noisy
part of the data that cannot be summarized by a low number of linear
components (singular vectors).
This kind of singular profiles is often seen in practice, for instance:
- gray level pictures of faces
- TF-IDF vectors of text documents crawled from the web
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
effective_rank : int, optional (default=10)
The approximate number of singular vectors required to explain most of
the data by linear combinations.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The matrix.
"""
generator = check_random_state(random_state)
n = min(n_samples, n_features)
# Random (ortho normal) vectors
u, _ = linalg.qr(generator.randn(n_samples, n), mode='economic')
v, _ = linalg.qr(generator.randn(n_features, n), mode='economic')
# Index of the singular values
singular_ind = np.arange(n, dtype=np.float64)
# Build the singular profile by assembling signal and noise components
low_rank = ((1 - tail_strength) *
np.exp(-1.0 * (singular_ind / effective_rank) ** 2))
tail = tail_strength * np.exp(-0.1 * singular_ind / effective_rank)
s = np.identity(n) * (low_rank + tail)
return np.dot(np.dot(u, s), v.T)
def make_sparse_coded_signal(n_samples, n_components, n_features,
n_nonzero_coefs, random_state=None):
"""Generate a signal as a sparse combination of dictionary elements.
Returns a matrix Y = DX, such as D is (n_features, n_components),
X is (n_components, n_samples) and each column of X has exactly
n_nonzero_coefs non-zero elements.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int
number of samples to generate
n_components: int,
number of components in the dictionary
n_features : int
number of features of the dataset to generate
n_nonzero_coefs : int
number of active (non-zero) coefficients in each sample
random_state: int or RandomState instance, optional (default=None)
seed used by the pseudo random number generator
Returns
-------
data: array of shape [n_features, n_samples]
The encoded signal (Y).
dictionary: array of shape [n_features, n_components]
The dictionary with normalized components (D).
code: array of shape [n_components, n_samples]
The sparse code such that each column of this matrix has exactly
n_nonzero_coefs non-zero items (X).
"""
generator = check_random_state(random_state)
# generate dictionary
D = generator.randn(n_features, n_components)
D /= np.sqrt(np.sum((D ** 2), axis=0))
# generate code
X = np.zeros((n_components, n_samples))
for i in range(n_samples):
idx = np.arange(n_components)
generator.shuffle(idx)
idx = idx[:n_nonzero_coefs]
X[idx, i] = generator.randn(n_nonzero_coefs)
# encode signal
Y = np.dot(D, X)
return map(np.squeeze, (Y, D, X))
def make_sparse_uncorrelated(n_samples=100, n_features=10, random_state=None):
"""Generate a random regression problem with sparse uncorrelated design
This dataset is described in Celeux et al [1]. as::
X ~ N(0, 1)
y(X) = X[:, 0] + 2 * X[:, 1] - 2 * X[:, 2] - 1.5 * X[:, 3]
Only the first 4 features are informative. The remaining features are
useless.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] G. Celeux, M. El Anbari, J.-M. Marin, C. P. Robert,
"Regularization in regression: comparing Bayesian and frequentist
methods in a poorly informative situation", 2009.
"""
generator = check_random_state(random_state)
X = generator.normal(loc=0, scale=1, size=(n_samples, n_features))
y = generator.normal(loc=(X[:, 0] +
2 * X[:, 1] -
2 * X[:, 2] -
1.5 * X[:, 3]), scale=np.ones(n_samples))
return X, y
def make_spd_matrix(n_dim, random_state=None):
"""Generate a random symmetric, positive-definite matrix.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_dim : int
The matrix dimension.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_dim, n_dim]
The random symmetric, positive-definite matrix.
See also
--------
make_sparse_spd_matrix
"""
generator = check_random_state(random_state)
A = generator.rand(n_dim, n_dim)
U, s, V = linalg.svd(np.dot(A.T, A))
X = np.dot(np.dot(U, 1.0 + np.diag(generator.rand(n_dim))), V)
return X
def make_sparse_spd_matrix(dim=1, alpha=0.95, norm_diag=False,
smallest_coef=.1, largest_coef=.9,
random_state=None):
"""Generate a sparse symmetric definite positive matrix.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
dim: integer, optional (default=1)
The size of the random matrix to generate.
alpha: float between 0 and 1, optional (default=0.95)
The probability that a coefficient is non zero (see notes).
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
largest_coef : float between 0 and 1, optional (default=0.9)
The value of the largest coefficient.
smallest_coef : float between 0 and 1, optional (default=0.1)
The value of the smallest coefficient.
norm_diag : boolean, optional (default=False)
Whether to normalize the output matrix to make the leading diagonal
elements all 1
Returns
-------
prec : sparse matrix of shape (dim, dim)
The generated matrix.
Notes
-----
The sparsity is actually imposed on the cholesky factor of the matrix.
Thus alpha does not translate directly into the filling fraction of
the matrix itself.
See also
--------
make_spd_matrix
"""
random_state = check_random_state(random_state)
chol = -np.eye(dim)
aux = random_state.rand(dim, dim)
aux[aux < alpha] = 0
aux[aux > alpha] = (smallest_coef
+ (largest_coef - smallest_coef)
* random_state.rand(np.sum(aux > alpha)))
aux = np.tril(aux, k=-1)
# Permute the lines: we don't want to have asymmetries in the final
# SPD matrix
permutation = random_state.permutation(dim)
aux = aux[permutation].T[permutation]
chol += aux
prec = np.dot(chol.T, chol)
if norm_diag:
# Form the diagonal vector into a row matrix
d = np.diag(prec).reshape(1, prec.shape[0])
d = 1. / np.sqrt(d)
prec *= d
prec *= d.T
return prec
def make_swiss_roll(n_samples=100, noise=0.0, random_state=None):
"""Generate a swiss roll dataset.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
Notes
-----
The algorithm is from Marsland [1].
References
----------
.. [1] S. Marsland, "Machine Learning: An Algorithmic Perspective",
Chapter 10, 2009.
http://www-ist.massey.ac.nz/smarsland/Code/10/lle.py
"""
generator = check_random_state(random_state)
t = 1.5 * np.pi * (1 + 2 * generator.rand(1, n_samples))
x = t * np.cos(t)
y = 21 * generator.rand(1, n_samples)
z = t * np.sin(t)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_s_curve(n_samples=100, noise=0.0, random_state=None):
"""Generate an S curve dataset.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
"""
generator = check_random_state(random_state)
t = 3 * np.pi * (generator.rand(1, n_samples) - 0.5)
x = np.sin(t)
y = 2.0 * generator.rand(1, n_samples)
z = np.sign(t) * (np.cos(t) - 1)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_gaussian_quantiles(mean=None, cov=1., n_samples=100,
n_features=2, n_classes=3,
shuffle=True, random_state=None):
"""Generate isotropic Gaussian and label samples by quantile
This classification dataset is constructed by taking a multi-dimensional
standard normal distribution and defining classes separated by nested
concentric multi-dimensional spheres such that roughly equal numbers of
samples are in each class (quantiles of the :math:`\chi^2` distribution).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
mean : array of shape [n_features], optional (default=None)
The mean of the multi-dimensional normal distribution.
If None then use the origin (0, 0, ...).
cov : float, optional (default=1.)
The covariance matrix will be this value times the unit matrix. This
dataset only produces symmetric normal distributions.
n_samples : int, optional (default=100)
The total number of points equally divided among classes.
n_features : int, optional (default=2)
The number of features for each sample.
n_classes : int, optional (default=3)
The number of classes
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for quantile membership of each sample.
Notes
-----
The dataset is from Zhu et al [1].
References
----------
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
if n_samples < n_classes:
raise ValueError("n_samples must be at least n_classes")
generator = check_random_state(random_state)
if mean is None:
mean = np.zeros(n_features)
else:
mean = np.array(mean)
# Build multivariate normal distribution
X = generator.multivariate_normal(mean, cov * np.identity(n_features),
(n_samples,))
# Sort by distance from origin
idx = np.argsort(np.sum((X - mean[np.newaxis, :]) ** 2, axis=1))
X = X[idx, :]
# Label by quantile
step = n_samples // n_classes
y = np.hstack([np.repeat(np.arange(n_classes), step),
np.repeat(n_classes - 1, n_samples - step * n_classes)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
return X, y
def _shuffle(data, random_state=None):
generator = check_random_state(random_state)
n_rows, n_cols = data.shape
row_idx = generator.permutation(n_rows)
col_idx = generator.permutation(n_cols)
result = data[row_idx][:, col_idx]
return result, row_idx, col_idx
def make_biclusters(shape, n_clusters, noise=0.0, minval=10,
maxval=100, shuffle=True, random_state=None):
"""Generate an array with constant block diagonal structure for
biclustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
shape : iterable (n_rows, n_cols)
The shape of the result.
n_clusters : integer
The number of biclusters.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
minval : int, optional (default=10)
Minimum value of a bicluster.
maxval : int, optional (default=100)
Maximum value of a bicluster.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape `shape`
The generated array.
rows : array of shape (n_clusters, X.shape[0],)
The indicators for cluster membership of each row.
cols : array of shape (n_clusters, X.shape[1],)
The indicators for cluster membership of each column.
References
----------
.. [1] Dhillon, I. S. (2001, August). Co-clustering documents and
words using bipartite spectral graph partitioning. In Proceedings
of the seventh ACM SIGKDD international conference on Knowledge
discovery and data mining (pp. 269-274). ACM.
See also
--------
make_checkerboard
"""
generator = check_random_state(random_state)
n_rows, n_cols = shape
consts = generator.uniform(minval, maxval, n_clusters)
# row and column clusters of approximately equal sizes
row_sizes = generator.multinomial(n_rows,
np.repeat(1.0 / n_clusters,
n_clusters))
col_sizes = generator.multinomial(n_cols,
np.repeat(1.0 / n_clusters,
n_clusters))
row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_clusters), row_sizes)))
col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_clusters), col_sizes)))
result = np.zeros(shape, dtype=np.float64)
for i in range(n_clusters):
selector = np.outer(row_labels == i, col_labels == i)
result[selector] += consts[i]
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack(row_labels == c for c in range(n_clusters))
cols = np.vstack(col_labels == c for c in range(n_clusters))
return result, rows, cols
def make_checkerboard(shape, n_clusters, noise=0.0, minval=10,
maxval=100, shuffle=True, random_state=None):
"""Generate an array with block checkerboard structure for
biclustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
shape : iterable (n_rows, n_cols)
The shape of the result.
n_clusters : integer or iterable (n_row_clusters, n_column_clusters)
The number of row and column clusters.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
minval : int, optional (default=10)
Minimum value of a bicluster.
maxval : int, optional (default=100)
Maximum value of a bicluster.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape `shape`
The generated array.
rows : array of shape (n_clusters, X.shape[0],)
The indicators for cluster membership of each row.
cols : array of shape (n_clusters, X.shape[1],)
The indicators for cluster membership of each column.
References
----------
.. [1] Kluger, Y., Basri, R., Chang, J. T., & Gerstein, M. (2003).
Spectral biclustering of microarray data: coclustering genes
and conditions. Genome research, 13(4), 703-716.
See also
--------
make_biclusters
"""
generator = check_random_state(random_state)
if hasattr(n_clusters, "__len__"):
n_row_clusters, n_col_clusters = n_clusters
else:
n_row_clusters = n_col_clusters = n_clusters
# row and column clusters of approximately equal sizes
n_rows, n_cols = shape
row_sizes = generator.multinomial(n_rows,
np.repeat(1.0 / n_row_clusters,
n_row_clusters))
col_sizes = generator.multinomial(n_cols,
np.repeat(1.0 / n_col_clusters,
n_col_clusters))
row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_row_clusters), row_sizes)))
col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_col_clusters), col_sizes)))
result = np.zeros(shape, dtype=np.float64)
for i in range(n_row_clusters):
for j in range(n_col_clusters):
selector = np.outer(row_labels == i, col_labels == j)
result[selector] += generator.uniform(minval, maxval)
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack(row_labels == label
for label in range(n_row_clusters)
for _ in range(n_col_clusters))
cols = np.vstack(col_labels == label
for _ in range(n_row_clusters)
for label in range(n_col_clusters))
return result, rows, cols
| bsd-3-clause |
annahs/atmos_research | AL_PAPI_parser.py | 1 | 2020 | import sys
import os
from datetime import datetime
from pprint import pprint
import pickle
from datetime import timedelta
import calendar
from matplotlib import dates
import matplotlib.pyplot as plt
import numpy as np
######get data
list = []
data_dir = 'F:/Alert/2013/Reduced/' #Alert data is in UTC - see email from Dan Veber
os.chdir(data_dir)
for directory in os.listdir(data_dir):
if os.path.isdir(directory) == True and directory.startswith('20'):
folder_date = datetime.strptime(directory, '%Y%m%d')
folder_path = os.path.join(data_dir, directory)
os.chdir(folder_path)
if datetime(2013,10,1) <= folder_date < datetime(2014,1,1) :
for file in os.listdir('.'):
if file.endswith('OutputWaves.dat'):
print file
with open(file, 'r') as f:
temp = f.read().splitlines()
first_line = True
for line in temp:
if first_line == True:
first_line = False
continue
newline = line.split()
raw_time = (float(newline[0]) + 3600/2) + calendar.timegm(folder_date.utctimetuple())
date_time = datetime.utcfromtimestamp(raw_time)
try:
incand_conc = float(newline[8])
if incand_conc == 0:
incand_conc = np.nan
tot_incand_conc = float(newline[9])
except:
incand_conc = np.nan
tot_incand_conc = np.nan
ratio= tot_incand_conc/incand_conc
list.append([date_time,ratio])
os.chdir(data_dir)
dates_plot = [dates.date2num(row[0]) for row in list]
ratios = [row[1] for row in list]
mean_ratio = np.nanmean(ratios)
hfmt = dates.DateFormatter('%Y%m%d')
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.xaxis.set_major_formatter(hfmt)
ax1.plot(dates_plot,ratios,'-bo')
ax1.set_ylim(0,6)
ax1.set_ylabel('PAPI Total incandescent mass/Incandescent mass\n Oct-Dec 2013')
ax1.set_xlabel('Date')
plt.axhline(y=mean_ratio,color='r')
ax1.text(0.7, 0.9,'mean ratio: ' + str(round(mean_ratio,3)), fontsize = 16, transform=ax1.transAxes)
plt.show() | mit |
justincassidy/scikit-learn | sklearn/metrics/tests/test_ranking.py | 127 | 40813 | from __future__ import division, print_function
import numpy as np
from itertools import product
import warnings
from scipy.sparse import csr_matrix
from sklearn import datasets
from sklearn import svm
from sklearn import ensemble
from sklearn.datasets import make_multilabel_classification
from sklearn.random_projection import sparse_random_matrix
from sklearn.utils.validation import check_array, check_consistent_length
from sklearn.utils.validation import check_random_state
from sklearn.utils.testing import assert_raises, clean_warning_registry
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.metrics import auc
from sklearn.metrics import average_precision_score
from sklearn.metrics import coverage_error
from sklearn.metrics import label_ranking_average_precision_score
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import label_ranking_loss
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
from sklearn.metrics.base import UndefinedMetricWarning
###############################################################################
# Utilities for testing
def make_prediction(dataset=None, binary=False):
"""Make some classification predictions on a toy dataset using a SVC
If binary is True restrict to a binary classification problem instead of a
multiclass classification problem
"""
if dataset is None:
# import some data to play with
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
if binary:
# restrict to a binary classification task
X, y = X[y < 2], y[y < 2]
n_samples, n_features = X.shape
p = np.arange(n_samples)
rng = check_random_state(37)
rng.shuffle(p)
X, y = X[p], y[p]
half = int(n_samples / 2)
# add noisy features to make the problem harder and avoid perfect results
rng = np.random.RandomState(0)
X = np.c_[X, rng.randn(n_samples, 200 * n_features)]
# run classifier, get class probabilities and label predictions
clf = svm.SVC(kernel='linear', probability=True, random_state=0)
probas_pred = clf.fit(X[:half], y[:half]).predict_proba(X[half:])
if binary:
# only interested in probabilities of the positive case
# XXX: do we really want a special API for the binary case?
probas_pred = probas_pred[:, 1]
y_pred = clf.predict(X[half:])
y_true = y[half:]
return y_true, y_pred, probas_pred
###############################################################################
# Tests
def _auc(y_true, y_score):
"""Alternative implementation to check for correctness of
`roc_auc_score`."""
pos_label = np.unique(y_true)[1]
# Count the number of times positive samples are correctly ranked above
# negative samples.
pos = y_score[y_true == pos_label]
neg = y_score[y_true != pos_label]
diff_matrix = pos.reshape(1, -1) - neg.reshape(-1, 1)
n_correct = np.sum(diff_matrix > 0)
return n_correct / float(len(pos) * len(neg))
def _average_precision(y_true, y_score):
"""Alternative implementation to check for correctness of
`average_precision_score`."""
pos_label = np.unique(y_true)[1]
n_pos = np.sum(y_true == pos_label)
order = np.argsort(y_score)[::-1]
y_score = y_score[order]
y_true = y_true[order]
score = 0
for i in range(len(y_score)):
if y_true[i] == pos_label:
# Compute precision up to document i
# i.e, percentage of relevant documents up to document i.
prec = 0
for j in range(0, i + 1):
if y_true[j] == pos_label:
prec += 1.0
prec /= (i + 1.0)
score += prec
return score / n_pos
def test_roc_curve():
# Test Area under Receiver Operating Characteristic (ROC) curve
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred)
roc_auc = auc(fpr, tpr)
expected_auc = _auc(y_true, probas_pred)
assert_array_almost_equal(roc_auc, expected_auc, decimal=2)
assert_almost_equal(roc_auc, roc_auc_score(y_true, probas_pred))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_end_points():
# Make sure that roc_curve returns a curve start at 0 and ending and
# 1 even in corner cases
rng = np.random.RandomState(0)
y_true = np.array([0] * 50 + [1] * 50)
y_pred = rng.randint(3, size=100)
fpr, tpr, thr = roc_curve(y_true, y_pred)
assert_equal(fpr[0], 0)
assert_equal(fpr[-1], 1)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thr.shape)
def test_roc_returns_consistency():
# Test whether the returned threshold matches up with tpr
# make small toy dataset
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred)
# use the given thresholds to determine the tpr
tpr_correct = []
for t in thresholds:
tp = np.sum((probas_pred >= t) & y_true)
p = np.sum(y_true)
tpr_correct.append(1.0 * tp / p)
# compare tpr and tpr_correct to see if the thresholds' order was correct
assert_array_almost_equal(tpr, tpr_correct, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_nonrepeating_thresholds():
# Test to ensure that we don't return spurious repeating thresholds.
# Duplicated thresholds can arise due to machine precision issues.
dataset = datasets.load_digits()
X = dataset['data']
y = dataset['target']
# This random forest classifier can only return probabilities
# significant to two decimal places
clf = ensemble.RandomForestClassifier(n_estimators=100, random_state=0)
# How well can the classifier predict whether a digit is less than 5?
# This task contributes floating point roundoff errors to the probabilities
train, test = slice(None, None, 2), slice(1, None, 2)
probas_pred = clf.fit(X[train], y[train]).predict_proba(X[test])
y_score = probas_pred[:, :5].sum(axis=1) # roundoff errors begin here
y_true = [yy < 5 for yy in y[test]]
# Check for repeating values in the thresholds
fpr, tpr, thresholds = roc_curve(y_true, y_score)
assert_equal(thresholds.size, np.unique(np.round(thresholds, 2)).size)
def test_roc_curve_multi():
# roc_curve not applicable for multi-class problems
y_true, _, probas_pred = make_prediction(binary=False)
assert_raises(ValueError, roc_curve, y_true, probas_pred)
def test_roc_curve_confidence():
# roc_curve for confidence scores
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred - 0.5)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.90, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_hard():
# roc_curve for hard decisions
y_true, pred, probas_pred = make_prediction(binary=True)
# always predict one
trivial_pred = np.ones(y_true.shape)
fpr, tpr, thresholds = roc_curve(y_true, trivial_pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.50, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# always predict zero
trivial_pred = np.zeros(y_true.shape)
fpr, tpr, thresholds = roc_curve(y_true, trivial_pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.50, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# hard decisions
fpr, tpr, thresholds = roc_curve(y_true, pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.78, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_one_label():
y_true = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
y_pred = [0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
# assert there are warnings
w = UndefinedMetricWarning
fpr, tpr, thresholds = assert_warns(w, roc_curve, y_true, y_pred)
# all true labels, all fpr should be nan
assert_array_equal(fpr,
np.nan * np.ones(len(thresholds)))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# assert there are warnings
fpr, tpr, thresholds = assert_warns(w, roc_curve,
[1 - x for x in y_true],
y_pred)
# all negative labels, all tpr should be nan
assert_array_equal(tpr,
np.nan * np.ones(len(thresholds)))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_toydata():
# Binary classification
y_true = [0, 1]
y_score = [0, 1]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [1, 1])
assert_almost_equal(roc_auc, 1.)
y_true = [0, 1]
y_score = [1, 0]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1, 1])
assert_array_almost_equal(fpr, [0, 0, 1])
assert_almost_equal(roc_auc, 0.)
y_true = [1, 0]
y_score = [1, 1]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [0, 1])
assert_almost_equal(roc_auc, 0.5)
y_true = [1, 0]
y_score = [1, 0]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [1, 1])
assert_almost_equal(roc_auc, 1.)
y_true = [1, 0]
y_score = [0.5, 0.5]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [0, 1])
assert_almost_equal(roc_auc, .5)
y_true = [0, 0]
y_score = [0.25, 0.75]
tpr, fpr, _ = roc_curve(y_true, y_score)
assert_raises(ValueError, roc_auc_score, y_true, y_score)
assert_array_almost_equal(tpr, [0., 0.5, 1.])
assert_array_almost_equal(fpr, [np.nan, np.nan, np.nan])
y_true = [1, 1]
y_score = [0.25, 0.75]
tpr, fpr, _ = roc_curve(y_true, y_score)
assert_raises(ValueError, roc_auc_score, y_true, y_score)
assert_array_almost_equal(tpr, [np.nan, np.nan])
assert_array_almost_equal(fpr, [0.5, 1.])
# Multi-label classification task
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [0, 1]])
assert_raises(ValueError, roc_auc_score, y_true, y_score, average="macro")
assert_raises(ValueError, roc_auc_score, y_true, y_score,
average="weighted")
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 1.)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 1.)
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_raises(ValueError, roc_auc_score, y_true, y_score, average="macro")
assert_raises(ValueError, roc_auc_score, y_true, y_score,
average="weighted")
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 0.5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 0.5)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_almost_equal(roc_auc_score(y_true, y_score, average="macro"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="weighted"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 0)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0.5, 0.5], [0.5, 0.5]])
assert_almost_equal(roc_auc_score(y_true, y_score, average="macro"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="weighted"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), .5)
def test_auc():
# Test Area Under Curve (AUC) computation
x = [0, 1]
y = [0, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [1, 0]
y = [0, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [1, 0, 0]
y = [0, 1, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [0, 1]
y = [1, 1]
assert_array_almost_equal(auc(x, y), 1)
x = [0, 0.5, 1]
y = [0, 0.5, 1]
assert_array_almost_equal(auc(x, y), 0.5)
def test_auc_duplicate_values():
# Test Area Under Curve (AUC) computation with duplicate values
# auc() was previously sorting the x and y arrays according to the indices
# from numpy.argsort(x), which was reordering the tied 0's in this example
# and resulting in an incorrect area computation. This test detects the
# error.
x = [-2.0, 0.0, 0.0, 0.0, 1.0]
y1 = [2.0, 0.0, 0.5, 1.0, 1.0]
y2 = [2.0, 1.0, 0.0, 0.5, 1.0]
y3 = [2.0, 1.0, 0.5, 0.0, 1.0]
for y in (y1, y2, y3):
assert_array_almost_equal(auc(x, y, reorder=True), 3.0)
def test_auc_errors():
# Incompatible shapes
assert_raises(ValueError, auc, [0.0, 0.5, 1.0], [0.1, 0.2])
# Too few x values
assert_raises(ValueError, auc, [0.0], [0.1])
# x is not in order
assert_raises(ValueError, auc, [1.0, 0.0, 0.5], [0.0, 0.0, 0.0])
def test_auc_score_non_binary_class():
# Test that roc_auc_score function returns an error when trying
# to compute AUC for non-binary class values.
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains only one class value
y_true = np.zeros(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = -np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
roc_auc_score, y_true, y_pred)
clean_warning_registry()
with warnings.catch_warnings(record=True):
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains only one class value
y_true = np.zeros(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = -np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
roc_auc_score, y_true, y_pred)
def test_precision_recall_curve():
y_true, _, probas_pred = make_prediction(binary=True)
_test_precision_recall_curve(y_true, probas_pred)
# Use {-1, 1} for labels; make sure original labels aren't modified
y_true[np.where(y_true == 0)] = -1
y_true_copy = y_true.copy()
_test_precision_recall_curve(y_true, probas_pred)
assert_array_equal(y_true_copy, y_true)
labels = [1, 0, 0, 1]
predict_probas = [1, 2, 3, 4]
p, r, t = precision_recall_curve(labels, predict_probas)
assert_array_almost_equal(p, np.array([0.5, 0.33333333, 0.5, 1., 1.]))
assert_array_almost_equal(r, np.array([1., 0.5, 0.5, 0.5, 0.]))
assert_array_almost_equal(t, np.array([1, 2, 3, 4]))
assert_equal(p.size, r.size)
assert_equal(p.size, t.size + 1)
def test_precision_recall_curve_pos_label():
y_true, _, probas_pred = make_prediction(binary=False)
pos_label = 2
p, r, thresholds = precision_recall_curve(y_true,
probas_pred[:, pos_label],
pos_label=pos_label)
p2, r2, thresholds2 = precision_recall_curve(y_true == pos_label,
probas_pred[:, pos_label])
assert_array_almost_equal(p, p2)
assert_array_almost_equal(r, r2)
assert_array_almost_equal(thresholds, thresholds2)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
def _test_precision_recall_curve(y_true, probas_pred):
# Test Precision-Recall and aread under PR curve
p, r, thresholds = precision_recall_curve(y_true, probas_pred)
precision_recall_auc = auc(r, p)
assert_array_almost_equal(precision_recall_auc, 0.85, 2)
assert_array_almost_equal(precision_recall_auc,
average_precision_score(y_true, probas_pred))
assert_almost_equal(_average_precision(y_true, probas_pred),
precision_recall_auc, 1)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
# Smoke test in the case of proba having only one value
p, r, thresholds = precision_recall_curve(y_true,
np.zeros_like(probas_pred))
precision_recall_auc = auc(r, p)
assert_array_almost_equal(precision_recall_auc, 0.75, 3)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
def test_precision_recall_curve_errors():
# Contains non-binary labels
assert_raises(ValueError, precision_recall_curve,
[0, 1, 2], [[0.0], [1.0], [1.0]])
def test_precision_recall_curve_toydata():
with np.errstate(all="raise"):
# Binary classification
y_true = [0, 1]
y_score = [0, 1]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [1, 1])
assert_array_almost_equal(r, [1, 0])
assert_almost_equal(auc_prc, 1.)
y_true = [0, 1]
y_score = [1, 0]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 0., 1.])
assert_array_almost_equal(r, [1., 0., 0.])
assert_almost_equal(auc_prc, 0.25)
y_true = [1, 0]
y_score = [1, 1]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 1])
assert_array_almost_equal(r, [1., 0])
assert_almost_equal(auc_prc, .75)
y_true = [1, 0]
y_score = [1, 0]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [1, 1])
assert_array_almost_equal(r, [1, 0])
assert_almost_equal(auc_prc, 1.)
y_true = [1, 0]
y_score = [0.5, 0.5]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 1])
assert_array_almost_equal(r, [1, 0.])
assert_almost_equal(auc_prc, .75)
y_true = [0, 0]
y_score = [0.25, 0.75]
assert_raises(Exception, precision_recall_curve, y_true, y_score)
assert_raises(Exception, average_precision_score, y_true, y_score)
y_true = [1, 1]
y_score = [0.25, 0.75]
p, r, _ = precision_recall_curve(y_true, y_score)
assert_almost_equal(average_precision_score(y_true, y_score), 1.)
assert_array_almost_equal(p, [1., 1., 1.])
assert_array_almost_equal(r, [1, 0.5, 0.])
# Multi-label classification task
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [0, 1]])
assert_raises(Exception, average_precision_score, y_true, y_score,
average="macro")
assert_raises(Exception, average_precision_score, y_true, y_score,
average="weighted")
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 1.)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 1.)
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_raises(Exception, average_precision_score, y_true, y_score,
average="macro")
assert_raises(Exception, average_precision_score, y_true, y_score,
average="weighted")
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.625)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.625)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_almost_equal(average_precision_score(y_true, y_score,
average="macro"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="weighted"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.25)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0.5, 0.5], [0.5, 0.5]])
assert_almost_equal(average_precision_score(y_true, y_score,
average="macro"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="weighted"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.75)
def test_score_scale_invariance():
# Test that average_precision_score and roc_auc_score are invariant by
# the scaling or shifting of probabilities
y_true, _, probas_pred = make_prediction(binary=True)
roc_auc = roc_auc_score(y_true, probas_pred)
roc_auc_scaled = roc_auc_score(y_true, 100 * probas_pred)
roc_auc_shifted = roc_auc_score(y_true, probas_pred - 10)
assert_equal(roc_auc, roc_auc_scaled)
assert_equal(roc_auc, roc_auc_shifted)
pr_auc = average_precision_score(y_true, probas_pred)
pr_auc_scaled = average_precision_score(y_true, 100 * probas_pred)
pr_auc_shifted = average_precision_score(y_true, probas_pred - 10)
assert_equal(pr_auc, pr_auc_scaled)
assert_equal(pr_auc, pr_auc_shifted)
def check_lrap_toy(lrap_score):
# Check on several small example that it works
assert_almost_equal(lrap_score([[0, 1]], [[0.25, 0.75]]), 1)
assert_almost_equal(lrap_score([[0, 1]], [[0.75, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[1, 1]], [[0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.25, 0.5, 0.75]]), 1 / 2)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.25, 0.5, 0.75]]), 1 / 3)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.25, 0.5, 0.75]]),
(2 / 3 + 1 / 1) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.25, 0.5, 0.75]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.75, 0.5, 0.25]]), 1 / 3)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.75, 0.5, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.75, 0.5, 0.25]]),
(1 / 2 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.75, 0.5, 0.25]]),
(1 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.5, 0.75, 0.25]]), 1 / 3)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.5, 0.75, 0.25]]),
(1 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.5, 0.75, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.5, 0.75, 0.25]]),
(1 / 2 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.5, 0.75, 0.25]]), 1)
# Tie handling
assert_almost_equal(lrap_score([[1, 0]], [[0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1]], [[0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[1, 1]], [[0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.25, 0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.25, 0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 1 / 3)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.25, 0.5, 0.5]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.25, 0.5, 0.5]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.5, 0.5, 0.5]]), 2 / 3)
assert_almost_equal(lrap_score([[1, 1, 1, 0]], [[0.5, 0.5, 0.5, 0.5]]),
3 / 4)
def check_zero_or_all_relevant_labels(lrap_score):
random_state = check_random_state(0)
for n_labels in range(2, 5):
y_score = random_state.uniform(size=(1, n_labels))
y_score_ties = np.zeros_like(y_score)
# No relevant labels
y_true = np.zeros((1, n_labels))
assert_equal(lrap_score(y_true, y_score), 1.)
assert_equal(lrap_score(y_true, y_score_ties), 1.)
# Only relevant labels
y_true = np.ones((1, n_labels))
assert_equal(lrap_score(y_true, y_score), 1.)
assert_equal(lrap_score(y_true, y_score_ties), 1.)
# Degenerate case: only one label
assert_almost_equal(lrap_score([[1], [0], [1], [0]],
[[0.5], [0.5], [0.5], [0.5]]), 1.)
def check_lrap_error_raised(lrap_score):
# Raise value error if not appropriate format
assert_raises(ValueError, lrap_score,
[0, 1, 0], [0.25, 0.3, 0.2])
assert_raises(ValueError, lrap_score, [0, 1, 2],
[[0.25, 0.75, 0.0], [0.7, 0.3, 0.0], [0.8, 0.2, 0.0]])
assert_raises(ValueError, lrap_score, [(0), (1), (2)],
[[0.25, 0.75, 0.0], [0.7, 0.3, 0.0], [0.8, 0.2, 0.0]])
# Check that that y_true.shape != y_score.shape raise the proper exception
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [0, 1])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0, 1]])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0], [1]])
assert_raises(ValueError, lrap_score, [[0, 1]], [[0, 1], [0, 1]])
assert_raises(ValueError, lrap_score, [[0], [1]], [[0, 1], [0, 1]])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0], [1]])
def check_lrap_only_ties(lrap_score):
# Check tie handling in score
# Basic check with only ties and increasing label space
for n_labels in range(2, 10):
y_score = np.ones((1, n_labels))
# Check for growing number of consecutive relevant
for n_relevant in range(1, n_labels):
# Check for a bunch of positions
for pos in range(n_labels - n_relevant):
y_true = np.zeros((1, n_labels))
y_true[0, pos:pos + n_relevant] = 1
assert_almost_equal(lrap_score(y_true, y_score),
n_relevant / n_labels)
def check_lrap_without_tie_and_increasing_score(lrap_score):
# Check that Label ranking average precision works for various
# Basic check with increasing label space size and decreasing score
for n_labels in range(2, 10):
y_score = n_labels - (np.arange(n_labels).reshape((1, n_labels)) + 1)
# First and last
y_true = np.zeros((1, n_labels))
y_true[0, 0] = 1
y_true[0, -1] = 1
assert_almost_equal(lrap_score(y_true, y_score),
(2 / n_labels + 1) / 2)
# Check for growing number of consecutive relevant label
for n_relevant in range(1, n_labels):
# Check for a bunch of position
for pos in range(n_labels - n_relevant):
y_true = np.zeros((1, n_labels))
y_true[0, pos:pos + n_relevant] = 1
assert_almost_equal(lrap_score(y_true, y_score),
sum((r + 1) / ((pos + r + 1) * n_relevant)
for r in range(n_relevant)))
def _my_lrap(y_true, y_score):
"""Simple implementation of label ranking average precision"""
check_consistent_length(y_true, y_score)
y_true = check_array(y_true)
y_score = check_array(y_score)
n_samples, n_labels = y_true.shape
score = np.empty((n_samples, ))
for i in range(n_samples):
# The best rank correspond to 1. Rank higher than 1 are worse.
# The best inverse ranking correspond to n_labels.
unique_rank, inv_rank = np.unique(y_score[i], return_inverse=True)
n_ranks = unique_rank.size
rank = n_ranks - inv_rank
# Rank need to be corrected to take into account ties
# ex: rank 1 ex aequo means that both label are rank 2.
corr_rank = np.bincount(rank, minlength=n_ranks + 1).cumsum()
rank = corr_rank[rank]
relevant = y_true[i].nonzero()[0]
if relevant.size == 0 or relevant.size == n_labels:
score[i] = 1
continue
score[i] = 0.
for label in relevant:
# Let's count the number of relevant label with better rank
# (smaller rank).
n_ranked_above = sum(rank[r] <= rank[label] for r in relevant)
# Weight by the rank of the actual label
score[i] += n_ranked_above / rank[label]
score[i] /= relevant.size
return score.mean()
def check_alternative_lrap_implementation(lrap_score, n_classes=5,
n_samples=20, random_state=0):
_, y_true = make_multilabel_classification(n_features=1,
allow_unlabeled=False,
random_state=random_state,
n_classes=n_classes,
n_samples=n_samples)
# Score with ties
y_score = sparse_random_matrix(n_components=y_true.shape[0],
n_features=y_true.shape[1],
random_state=random_state)
if hasattr(y_score, "toarray"):
y_score = y_score.toarray()
score_lrap = label_ranking_average_precision_score(y_true, y_score)
score_my_lrap = _my_lrap(y_true, y_score)
assert_almost_equal(score_lrap, score_my_lrap)
# Uniform score
random_state = check_random_state(random_state)
y_score = random_state.uniform(size=(n_samples, n_classes))
score_lrap = label_ranking_average_precision_score(y_true, y_score)
score_my_lrap = _my_lrap(y_true, y_score)
assert_almost_equal(score_lrap, score_my_lrap)
def test_label_ranking_avp():
for fn in [label_ranking_average_precision_score, _my_lrap]:
yield check_lrap_toy, fn
yield check_lrap_without_tie_and_increasing_score, fn
yield check_lrap_only_ties, fn
yield check_zero_or_all_relevant_labels, fn
yield check_lrap_error_raised, label_ranking_average_precision_score
for n_samples, n_classes, random_state in product((1, 2, 8, 20),
(2, 5, 10),
range(1)):
yield (check_alternative_lrap_implementation,
label_ranking_average_precision_score,
n_classes, n_samples, random_state)
def test_coverage_error():
# Toy case
assert_almost_equal(coverage_error([[0, 1]], [[0.25, 0.75]]), 1)
assert_almost_equal(coverage_error([[0, 1]], [[0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1]], [[0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[0, 0]], [[0.75, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.25, 0.5, 0.75]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.25, 0.5, 0.75]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.25, 0.5, 0.75]]), 2)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.75, 0.5, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.75, 0.5, 0.25]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.75, 0.5, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.5, 0.75, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.5, 0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.5, 0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.5, 0.75, 0.25]]), 3)
# Non trival case
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0]],
[[0.1, 10., -3], [0, 1, 3]]),
(1 + 3) / 2.)
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [0, 1, 3], [0, 2, 0]]),
(1 + 3 + 3) / 3.)
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [3, 1, 3], [0, 2, 0]]),
(1 + 3 + 3) / 3.)
def test_coverage_tie_handling():
assert_almost_equal(coverage_error([[0, 0]], [[0.5, 0.5]]), 0)
assert_almost_equal(coverage_error([[1, 0]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[1, 1]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.25, 0.5, 0.5]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 3)
def test_label_ranking_loss():
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.25, 0.75]]), 0)
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.75, 0.25]]), 1)
assert_almost_equal(label_ranking_loss([[0, 0, 1]], [[0.25, 0.5, 0.75]]),
0)
assert_almost_equal(label_ranking_loss([[0, 1, 0]], [[0.25, 0.5, 0.75]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 1]], [[0.25, 0.5, 0.75]]),
0)
assert_almost_equal(label_ranking_loss([[1, 0, 0]], [[0.25, 0.5, 0.75]]),
2 / 2)
assert_almost_equal(label_ranking_loss([[1, 0, 1]], [[0.25, 0.5, 0.75]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[1, 1, 0]], [[0.25, 0.5, 0.75]]),
2 / 2)
# Undefined metrics - the ranking doesn't matter
assert_almost_equal(label_ranking_loss([[0, 0]], [[0.75, 0.25]]), 0)
assert_almost_equal(label_ranking_loss([[1, 1]], [[0.75, 0.25]]), 0)
assert_almost_equal(label_ranking_loss([[0, 0]], [[0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[1, 1]], [[0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[0, 0, 0]], [[0.5, 0.75, 0.25]]),
0)
assert_almost_equal(label_ranking_loss([[1, 1, 1]], [[0.5, 0.75, 0.25]]),
0)
assert_almost_equal(label_ranking_loss([[0, 0, 0]], [[0.25, 0.5, 0.5]]),
0)
assert_almost_equal(label_ranking_loss([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 0)
# Non trival case
assert_almost_equal(label_ranking_loss([[0, 1, 0], [1, 1, 0]],
[[0.1, 10., -3], [0, 1, 3]]),
(0 + 2 / 2) / 2.)
assert_almost_equal(label_ranking_loss(
[[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [0, 1, 3], [0, 2, 0]]),
(0 + 2 / 2 + 1 / 2) / 3.)
assert_almost_equal(label_ranking_loss(
[[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [3, 1, 3], [0, 2, 0]]),
(0 + 2 / 2 + 1 / 2) / 3.)
# Sparse csr matrices
assert_almost_equal(label_ranking_loss(
csr_matrix(np.array([[0, 1, 0], [1, 1, 0]])),
[[0.1, 10, -3], [3, 1, 3]]),
(0 + 2 / 2) / 2.)
def test_ranking_appropriate_input_shape():
# Check that that y_true.shape != y_score.shape raise the proper exception
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [0, 1])
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [[0, 1]])
assert_raises(ValueError, label_ranking_loss,
[[0, 1], [0, 1]], [[0], [1]])
assert_raises(ValueError, label_ranking_loss, [[0, 1]], [[0, 1], [0, 1]])
assert_raises(ValueError, label_ranking_loss,
[[0], [1]], [[0, 1], [0, 1]])
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [[0], [1]])
def test_ranking_loss_ties_handling():
# Tie handling
assert_almost_equal(label_ranking_loss([[1, 0]], [[0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[0, 0, 1]], [[0.25, 0.5, 0.5]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 0]], [[0.25, 0.5, 0.5]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[1, 0, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[1, 1, 0]], [[0.25, 0.5, 0.5]]), 1)
| bsd-3-clause |
google/brain-tokyo-workshop | AttentionAgent/solutions/torch_solutions.py | 1 | 16033 | import abc
import gin
import numpy as np
import os
import solutions.abc_solution
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import transforms
class BaseTorchSolution(solutions.abc_solution.BaseSolution):
"""Base class for all Torch solutions."""
def __init__(self):
self._layers = []
def get_output(self, inputs, update_filter=False):
torch.set_num_threads(1)
with torch.no_grad():
return self._get_output(inputs, update_filter)
@abc.abstractmethod
def _get_output(self, inputs, update_filter):
raise NotImplementedError()
def get_params(self):
params = []
for layer in self._layers:
weight_dict = layer.state_dict()
for k in sorted(weight_dict.keys()):
params.append(weight_dict[k].numpy().copy().ravel())
return np.concatenate(params)
def set_params(self, params):
offset = 0
for i, layer in enumerate(self._layers):
weights_to_set = {}
weight_dict = layer.state_dict()
for k in sorted(weight_dict.keys()):
weight = weight_dict[k].numpy()
weight_size = weight.size
weights_to_set[k] = torch.from_numpy(
params[offset:(offset + weight_size)].reshape(weight.shape))
offset += weight_size
self._layers[i].load_state_dict(state_dict=weights_to_set)
def get_params_from_layer(self, layer_index):
params = []
layer = self._layers[layer_index]
weight_dict = layer.state_dict()
for k in sorted(weight_dict.keys()):
params.append(weight_dict[k].numpy().copy().ravel())
return np.concatenate(params)
def set_params_to_layer(self, params, layer_index):
weights_to_set = {}
weight_dict = self._layers[layer_index].state_dict()
offset = 0
for k in sorted(weight_dict.keys()):
weight = weight_dict[k].numpy()
weight_size = weight.size
weights_to_set[k] = torch.from_numpy(
params[offset:(offset + weight_size)].reshape(weight.shape))
offset += weight_size
self._layers[layer_index].load_state_dict(state_dict=weights_to_set)
def get_num_params_per_layer(self):
num_params_per_layer = []
for layer in self._layers:
weight_dict = layer.state_dict()
num_params = 0
for k in sorted(weight_dict.keys()):
weights = weight_dict[k].numpy()
num_params += weights.size
num_params_per_layer.append(num_params)
return num_params_per_layer
def _save_to_file(self, filename):
params = self.get_params()
np.savez(filename, params=params)
def save(self, log_dir, iter_count, best_so_far):
filename = os.path.join(log_dir, 'model_{}.npz'.format(iter_count))
self._save_to_file(filename=filename)
if best_so_far:
filename = os.path.join(log_dir, 'best_model.npz')
self._save_to_file(filename=filename)
def load(self, filename):
with np.load(filename) as data:
params = data['params']
self.set_params(params)
def reset(self):
raise NotImplementedError()
@property
def layers(self):
return self._layers
class SelfAttention(nn.Module):
"""A simple self-attention solution."""
def __init__(self, data_dim, dim_q):
super(SelfAttention, self).__init__()
self._layers = []
self._fc_q = nn.Linear(data_dim, dim_q)
self._layers.append(self._fc_q)
self._fc_k = nn.Linear(data_dim, dim_q)
self._layers.append(self._fc_k)
def forward(self, input_data):
# Expect input_data to be of shape (b, t, k).
b, t, k = input_data.size()
# Linear transforms.
queries = self._fc_q(input=input_data) # (b, t, q)
keys = self._fc_k(input=input_data) # (b, t, q)
# Attention matrix.
dot = torch.bmm(queries, keys.transpose(1, 2)) # (b, t, t)
scaled_dot = torch.div(dot, torch.sqrt(torch.tensor(k).float()))
return scaled_dot
@property
def layers(self):
return self._layers
class FCStack(nn.Module):
"""Fully connected layers."""
def __init__(self, input_dim, num_units, activation, output_dim):
super(FCStack, self).__init__()
self._activation = activation
self._layers = []
dim_in = input_dim
for i, n in enumerate(num_units):
layer = nn.Linear(dim_in, n)
# layer.weight.data.fill_(0.0)
# layer.bias.data.fill_(0.0)
self._layers.append(layer)
setattr(self, '_fc{}'.format(i + 1), layer)
dim_in = n
output_layer = nn.Linear(dim_in, output_dim)
# output_layer.weight.data.fill_(0.0)
# output_layer.bias.data.fill_(0.0)
self._layers.append(output_layer)
@property
def layers(self):
return self._layers
def forward(self, input_data):
x_input = input_data
for layer in self._layers[:-1]:
x_output = layer(x_input)
if self._activation == 'tanh':
x_input = torch.tanh(x_output)
elif self._activation == 'elu':
x_input = F.elu(x_output)
else:
x_input = F.relu(x_output)
x_output = self._layers[-1](x_input)
return x_output
class LSTMStack(nn.Module):
"""LSTM layers."""
def __init__(self, input_dim, num_units, output_dim):
super(LSTMStack, self).__init__()
self._layers = []
self._hidden_layers = len(num_units) if len(num_units) else 1
self._hidden_size = num_units[0] if len(num_units) else output_dim
self._hidden = (
torch.zeros((self._hidden_layers, 1, self._hidden_size)),
torch.zeros((self._hidden_layers, 1, self._hidden_size)),
)
if len(num_units):
self._lstm = nn.LSTM(
input_size=input_dim,
hidden_size=self._hidden_size,
num_layers=self._hidden_layers,
)
self._layers.append(self._lstm)
fc = nn.Linear(
in_features=self._hidden_size,
out_features=output_dim,
)
self._layers.append(fc)
else:
self._lstm = nn.LSTMCell(
input_size=input_dim,
hidden_size=self._hidden_size,
)
self._layers.append(self._lstm)
@property
def layers(self):
return self._layers
def forward(self, input_data):
x_input = input_data
x_output, self._hidden = self._layers[0](
x_input.view(1, 1, -1), self._hidden)
x_output = torch.flatten(x_output, start_dim=0, end_dim=-1)
if len(self._layers) > 1:
x_output = self._layers[-1](x_output)
return x_output
def reset(self):
self._hidden = (
torch.zeros((self._hidden_layers, 1, self._hidden_size)),
torch.zeros((self._hidden_layers, 1, self._hidden_size)),
)
@gin.configurable
class MLPSolution(BaseTorchSolution):
"""Multi-layer perception."""
def __init__(self,
input_dim,
num_hiddens,
activation,
output_dim,
output_activation,
use_lstm,
l2_coefficient):
super(MLPSolution, self).__init__()
self._use_lstm = use_lstm
self._output_dim = output_dim
self._output_activation = output_activation
if 'roulette' in self._output_activation:
assert self._output_dim == 1
self._n_grid = int(self._output_activation.split('_')[-1])
self._theta_per_grid = 2 * np.pi / self._n_grid
self._l2_coefficient = abs(l2_coefficient)
if self._use_lstm:
self._fc_stack = LSTMStack(
input_dim=input_dim,
output_dim=output_dim,
num_units=num_hiddens,
)
else:
self._fc_stack = FCStack(
input_dim=input_dim,
output_dim=output_dim,
num_units=num_hiddens,
activation=activation,
)
self._layers = self._fc_stack.layers
print('Number of parameters: {}'.format(
self.get_num_params_per_layer()))
def _get_output(self, inputs, update_filter=False):
if not isinstance(inputs, torch.Tensor):
inputs = torch.from_numpy(inputs).float()
fc_output = self._fc_stack(inputs)
if self._output_activation == 'tanh':
output = torch.tanh(fc_output).squeeze().numpy()
elif self._output_activation == 'softmax':
output = F.softmax(fc_output, dim=-1).squeeze().numpy()
else:
output = fc_output.squeeze().numpy()
return output
def reset(self):
if hasattr(self._fc_stack, 'reset'):
self._fc_stack.reset()
print('hidden reset.')
@gin.configurable
class VisionTaskSolution(BaseTorchSolution):
"""A general solution for vision based tasks."""
def __init__(self,
image_size,
query_dim,
output_dim,
output_activation,
num_hiddens,
l2_coefficient,
patch_size,
patch_stride,
top_k,
data_dim,
activation,
normalize_positions=False,
use_lstm_controller=False,
show_overplot=False):
super(VisionTaskSolution, self).__init__()
self._image_size = image_size
self._patch_size = patch_size
self._patch_stride = patch_stride
self._top_k = top_k
self._l2_coefficient = l2_coefficient
self._show_overplot = show_overplot
self._normalize_positions = normalize_positions
self._screen_dir = None
self._img_ix = 1
self._raw_importances = []
self._transform = transforms.Compose([
transforms.ToPILImage(),
transforms.Resize((image_size, image_size)),
transforms.ToTensor(),
])
n = int((image_size - patch_size) / patch_stride + 1)
offset = self._patch_size // 2
patch_centers = []
for i in range(n):
patch_center_row = offset + i * patch_stride
for j in range(n):
patch_center_col = offset + j * patch_stride
patch_centers.append([patch_center_row, patch_center_col])
self._patch_centers = torch.tensor(patch_centers).float()
num_patches = n ** 2
print('num_patches = {}'.format(num_patches))
self._attention = SelfAttention(
data_dim=data_dim * self._patch_size ** 2,
dim_q=query_dim,
)
self._layers.extend(self._attention.layers)
self._mlp_solution = MLPSolution(
input_dim=self._top_k * 2,
num_hiddens=num_hiddens,
activation=activation,
output_dim=output_dim,
output_activation=output_activation,
l2_coefficient=l2_coefficient,
use_lstm=use_lstm_controller,
)
self._layers.extend(self._mlp_solution.layers)
print('Number of parameters: {}'.format(
self.get_num_params_per_layer()))
def _get_output(self, inputs, update_filter):
# ob.shape = (h, w, c)
ob = self._transform(inputs).permute(1, 2, 0)
# print(ob.shape)
h, w, c = ob.size()
patches = ob.unfold(
0, self._patch_size, self._patch_stride).permute(0, 3, 1, 2)
patches = patches.unfold(
2, self._patch_size, self._patch_stride).permute(0, 2, 1, 4, 3)
patches = patches.reshape((-1, self._patch_size, self._patch_size, c))
# flattened_patches.shape = (1, n, p * p * c)
flattened_patches = patches.reshape(
(1, -1, c * self._patch_size ** 2))
# attention_matrix.shape = (1, n, n)
attention_matrix = self._attention(flattened_patches)
# patch_importance_matrix.shape = (n, n)
patch_importance_matrix = torch.softmax(
attention_matrix.squeeze(), dim=-1)
# patch_importance.shape = (n,)
patch_importance = patch_importance_matrix.sum(dim=0)
# extract top k important patches
ix = torch.argsort(patch_importance, descending=True)
top_k_ix = ix[:self._top_k]
centers = self._patch_centers[top_k_ix]
# Overplot.
if self._show_overplot:
task_image = ob.numpy().copy()
patch_importance_copy = patch_importance.numpy().copy()
import cv2
if self._screen_dir is not None:
# Save the original screen.
img_filepath = os.path.join(
self._screen_dir, 'orig_{0:04d}.png'.format(self._img_ix))
cv2.imwrite(img_filepath, inputs[:, :, ::-1])
# Save the scaled screen.
img_filepath = os.path.join(
self._screen_dir, 'scaled_{0:04d}.png'.format(self._img_ix))
cv2.imwrite(
img_filepath,
(task_image * 255).astype(np.uint8)[:, :, ::-1]
)
# Save importance vectors.
dd = {
'step': self._img_ix,
'importance': patch_importance_copy.tolist(),
}
self._raw_importances.append(dd)
import pandas as pd
if self._img_ix % 20 == 0:
csv_path = os.path.join(self._screen_dir, 'importances.csv')
pd.DataFrame(self._raw_importances).to_csv(
csv_path, index=False
)
white_patch = np.ones(
(self._patch_size, self._patch_size, 3))
half_patch_size = self._patch_size // 2
for i, center in enumerate(centers):
row_ss = int(center[0]) - half_patch_size
row_ee = int(center[0]) + half_patch_size + 1
col_ss = int(center[1]) - half_patch_size
col_ee = int(center[1]) + half_patch_size + 1
ratio = 1.0 * i / self._top_k
task_image[row_ss:row_ee, col_ss:col_ee] = (
ratio * task_image[row_ss:row_ee, col_ss:col_ee] +
(1 - ratio) * white_patch)
task_image = cv2.resize(
task_image, (task_image.shape[0] * 5, task_image.shape[1] * 5))
cv2.imshow('Overplotting', task_image[:, :, [2, 1, 0]])
cv2.waitKey(1)
if self._screen_dir is not None:
# Save the scaled screen.
img_filepath = os.path.join(
self._screen_dir, 'att_{0:04d}.png'.format(self._img_ix))
cv2.imwrite(
img_filepath,
(task_image * 255).astype(np.uint8)[:, :, ::-1]
)
self._img_ix += 1
centers = centers.flatten(0, -1)
if self._normalize_positions:
centers = centers / self._image_size
return self._mlp_solution.get_output(centers)
def reset(self):
self._selected_patch_centers = []
self._value_network_input_images = []
self._accumulated_gradients = None
self._mlp_solution.reset()
self._img_ix = 1
self._raw_importances = []
def set_log_dir(self, folder):
self._screen_dir = folder
if not os.path.exists(self._screen_dir):
os.makedirs(self._screen_dir)
| apache-2.0 |
miketrumpis/machine_learning_project | scripts/sparse_classify.py | 1 | 2469 | import random
import numpy as np
import matplotlib.pyplot as pp
import recog.dict.facedicts as facedicts
import recog.opt.shrinkers as shrinkers
import recog.opt.salsa as salsa
import recog.faces.classify as classify
N = 2000
shape = (192,168)
#shape = (12,10)
## trn, tst = facedicts.FacesDictionary.pair_from_saved(
## 'simple_faces', 0.5, 0.5, resize=(16,16)
## )
## fx = None
## trn, tst = facedicts.EigenFaces.pair_from_saved(
## 'simple_faces', 0.5, 0.5, klass2=facedicts.FacesDictionary,
## m=120, skip=2
## )
## fx = 'eig'
trn, tst = facedicts.EigenFaces2.pair_from_saved(
'simple_faces', 0.5, 0.5, klass2=facedicts.FacesDictionary,
m=200, skip=0
)
fx = 'eig'
## trn, tst = facedicts.RandomFaces.pair_from_saved(
## 'simple_faces', 0.4, 0.6, klass2=facedicts.FacesDictionary,
## m=100
## )
## fx = 'rand'
## trn, tst = facedicts.DiffusionFaces.pair_from_saved(
## 'downsamp_yale_simple.npz', 0.4, 0.6,
## klass2=facedicts.FacesDictionary, m=10
## )
trn.learn_class_dicts(20, 15)
m, n = trn.frame.shape
# choose N test faces to classify
N = min(N, tst.frame.shape[1])
r_cols = np.array(random.sample(xrange(tst.frame.shape[1]), N))
tst_cols = tst.frame[:,r_cols]
tst_cols -= np.mean(tst_cols, axis=0)
# feature transform if necessary
if fx=='eig':
tst_cols = np.dot(trn.eigenfaces.T, (tst_cols - trn.avg_face[:,None]))
if fx=='rand':
tst_cols = np.dot(trn.randomfaces.T, tst_cols)
nrm = np.sqrt(np.sum(tst_cols**2, axis=0))
tst_cols /= nrm
tst_classes = [tst.column_to_class[r_col] for r_col in r_cols]
# start with the MMSE reconstructions
r = np.linalg.lstsq(trn.frame, tst_cols)
mmse_x = r[0]
mmse_resids = trn.compute_residuals(mmse_x, tst_cols)
mmse_classes = [min(c)[1] for c in mmse_resids]
mmse_err = [ int( tc != xc )
for (tc, xc) in zip(tst_classes, mmse_classes) ]
mmse_SCI = trn.SCI(mmse_x)
# allow a little wiggle room for working in the nullspace of A
#eps = 1.25 * np.sqrt(np.max(r[1]))
## eps = 1e-1
## x = classify.classify_faces_dense_err(
## trn, tst_cols, eps, mu=10.0, x0=mmse_x, n_iter=100, rtol=1e-4
## )
tau = 10.
x = classify.classify_faces_dense_err_qreg(
trn, tst_cols, tau, x0=mmse_x, n_iter=100, rtol=1e-5
)
sparse_resids = trn.compute_residuals(x, tst_cols)
sparse_classes = [min(c)[1] for c in sparse_resids]
sparse_err = [ int( tc != xc )
for (tc, xc) in zip(tst_classes, sparse_classes) ]
sparse_SCI = trn.SCI(x)
| bsd-2-clause |
ML-KULeuven/socceraction | socceraction/spadl/opta.py | 1 | 60410 | # -*- coding: utf-8 -*-
"""Opta event stream data to SPADL converter."""
import copy
import glob
import json # type: ignore
import os
import re
import warnings
from abc import ABC
from datetime import datetime, timedelta
from typing import Any, Dict, List, Mapping, Optional, Tuple, Type
import pandas as pd # type: ignore
import pandera as pa
import unidecode # type: ignore
from lxml import objectify
from pandera.typing import DataFrame, DateTime, Series
from . import config as spadlconfig
from .base import (
CompetitionSchema,
EventDataLoader,
EventSchema,
GameSchema,
MissingDataError,
PlayerSchema,
TeamSchema,
_add_dribbles,
_fix_clearances,
_fix_direction_of_play,
)
__all__ = [
'OptaLoader',
'convert_to_actions',
'OptaCompetitionSchema',
'OptaGameSchema',
'OptaPlayerSchema',
'OptaTeamSchema',
'OptaEventSchema',
]
class OptaCompetitionSchema(CompetitionSchema):
"""Definition of a dataframe containing a list of competitions and seasons."""
class OptaGameSchema(GameSchema):
"""Definition of a dataframe containing a list of games."""
venue: Series[str] = pa.Field(nullable=True)
referee_id: Series[int] = pa.Field(nullable=True)
attendance: Series[int] = pa.Field(nullable=True)
duration: Series[int]
home_score: Series[int]
away_score: Series[int]
class OptaPlayerSchema(PlayerSchema):
"""Definition of a dataframe containing the list of teams of a game."""
firstname: Optional[Series[str]]
lastname: Optional[Series[str]]
nickname: Optional[Series[str]] = pa.Field(nullable=True)
starting_position_id: Series[int]
starting_position_name: Series[str]
height: Optional[Series[float]]
weight: Optional[Series[float]]
age: Optional[Series[int]]
class OptaTeamSchema(TeamSchema):
"""Definition of a dataframe containing the list of players of a game."""
class OptaEventSchema(EventSchema):
"""Definition of a dataframe containing event stream data of a game."""
timestamp: Series[DateTime]
minute: Series[int]
second: Series[int] = pa.Field(ge=0, le=59)
outcome: Series[bool]
start_x: Series[float] = pa.Field(nullable=True)
start_y: Series[float] = pa.Field(nullable=True)
end_x: Series[float] = pa.Field(nullable=True)
end_y: Series[float] = pa.Field(nullable=True)
assist: Series[bool] = pa.Field(nullable=True)
keypass: Series[bool] = pa.Field(nullable=True)
qualifiers: Series[object]
def _deepupdate(target: Dict[Any, Any], src: Dict[Any, Any]) -> None:
"""Deep update target dict with src.
For each k,v in src: if k doesn't exist in target, it is deep copied from
src to target. Otherwise, if v is a list, target[k] is extended with
src[k]. If v is a set, target[k] is updated with v, If v is a dict,
recursively deep-update it.
Examples
--------
>>> t = {'name': 'Ferry', 'hobbies': ['programming', 'sci-fi']}
>>> deepupdate(t, {'hobbies': ['gaming']})
>>> print(t)
{'name': 'Ferry', 'hobbies': ['programming', 'sci-fi', 'gaming']}
"""
for k, v in src.items():
if isinstance(v, list):
if k not in target:
target[k] = copy.deepcopy(v)
else:
target[k].extend(v)
elif isinstance(v, dict):
if k not in target:
target[k] = copy.deepcopy(v)
else:
_deepupdate(target[k], v)
elif isinstance(v, set):
if k not in target:
target[k] = v.copy()
else:
target[k].update(v.copy())
else:
target[k] = copy.copy(v)
def _extract_ids_from_path(path: str, pattern: str) -> Dict[str, int]:
regex = re.compile(
'.+?'
+ re.escape(pattern)
.replace(r'\{competition_id\}', r'(?P<competition_id>\d+)')
.replace(r'\{season_id\}', r'(?P<season_id>\d+)')
.replace(r'\{game_id\}', r'(?P<game_id>\d+)')
)
m = re.match(regex, path)
if m is None:
raise ValueError('The filepath {} does not match the format {}.'.format(path, pattern))
ids = m.groupdict()
return {k: int(v) for k, v in ids.items()}
class OptaParser(ABC):
"""Extract data from an Opta data stream.
Parameters
----------
path : str
Path of the data file.
"""
def __init__(self, path: str, *args: Any, **kwargs: Any):
pass
def extract_competitions(self) -> Dict[int, Dict[str, Any]]:
return {}
def extract_games(self) -> Dict[int, Dict[str, Any]]:
return {}
def extract_teams(self) -> Dict[int, Dict[str, Any]]:
return {}
def extract_players(self) -> Dict[int, Dict[str, Any]]:
return {}
def extract_events(self) -> Dict[int, Dict[str, Any]]:
return {}
class OptaLoader(EventDataLoader):
"""
Load Opta data from a local folder.
Parameters
----------
root : str
Root-path of the data.
feeds : dict
Glob pattern for each feed that should be parsed. For example::
{
'f7': "f7-{competition_id}-{season_id}-{game_id}.xml",
'f24': "f24-{competition_id}-{season_id}-{game_id}.xml"
}
If you use JSON files obtained from `WhoScored <whoscored.com>`__ use::
{
'whoscored': "{competition_id}-{season_id}/{game_id}.json",
}
parser : str or dict
Either 'xml', 'json', 'whoscored' or your custom parser for each feed.
The default xml parser supports F7 and F24 feeds; the default json
parser supports F1, F9 and F24 feeds. Custom parsers can be specified
as::
{
'feed1_name': Feed1Parser
'feed2_name': Feed2Parser
}
where Feed1Parser and Feed2Parser are classes implementing
:class:`~socceraction.spadl.opta.OptaParser` and 'feed1_name' and
'feed2_name' correspond to the keys in 'feeds'.
"""
def __init__(self, root: str, feeds: Dict[str, str], parser: Mapping[str, Type[OptaParser]]):
self.root = root
if parser == 'json':
self.parsers = self._get_parsers_for_feeds(_jsonparsers, feeds)
elif parser == 'xml':
self.parsers = self._get_parsers_for_feeds(_xmlparsers, feeds)
elif parser == 'whoscored':
self.parsers = self._get_parsers_for_feeds(_whoscoredparsers, feeds)
else:
self.parsers = self._get_parsers_for_feeds(parser, feeds)
self.feeds = feeds
def _get_parsers_for_feeds(
self, available_parsers: Mapping[str, Type[OptaParser]], feeds: Dict[str, str]
) -> Mapping[str, Type[OptaParser]]:
"""Select the appropriate parser for each feed.
Parameters
----------
available_parsers : dict(str, OptaParser)
Dictionary with all available parsers.
feeds : dict(str, str)
All feeds that should be parsed.
Returns
-------
dict(str, OptaParser)
A mapping between all feeds that should be parsed and the
corresponding parser class.
Warns
-----
Raises a warning if there is no parser available for any of the
provided feeds.
"""
parsers = {}
for feed in feeds:
if feed in available_parsers:
parsers[feed] = available_parsers[feed]
else:
warnings.warn(
'No parser available for {} feeds. This feed is ignored.'.format(feed)
)
return parsers
def competitions(self) -> DataFrame[OptaCompetitionSchema]:
"""Return a dataframe with all available competitions and seasons.
Returns
-------
pd.DataFrame
A dataframe containing all available competitions and seasons. See
:class:`~socceraction.spadl.opta.OptaCompetitionSchema` for the schema.
"""
data: Dict[int, Dict[str, Any]] = {}
for feed, feed_pattern in self.feeds.items():
glob_pattern = feed_pattern.format(competition_id='*', season_id='*', game_id='*')
feed_files = glob.glob(os.path.join(self.root, glob_pattern))
for ffp in feed_files:
ids = _extract_ids_from_path(ffp, feed_pattern)
parser = self.parsers[feed](ffp, **ids)
_deepupdate(data, parser.extract_competitions())
return pd.DataFrame(list(data.values()))
def games(self, competition_id: int, season_id: int) -> DataFrame[OptaGameSchema]:
"""Return a dataframe with all available games in a season.
Parameters
----------
competition_id : int
The ID of the competition.
season_id : int
The ID of the season.
Returns
-------
pd.DataFrame
A dataframe containing all available games. See
:class:`~socceraction.spadl.opta.OptaGameSchema` for the schema.
"""
data: Dict[int, Dict[str, Any]] = {}
for feed, feed_pattern in self.feeds.items():
glob_pattern = feed_pattern.format(
competition_id=competition_id, season_id=season_id, game_id='*'
)
feed_files = glob.glob(os.path.join(self.root, glob_pattern))
for ffp in feed_files:
try:
ids = _extract_ids_from_path(ffp, feed_pattern)
parser = self.parsers[feed](ffp, **ids)
_deepupdate(data, parser.extract_games())
except Exception:
warnings.warn('Could not parse {}'.format(ffp))
return pd.DataFrame(list(data.values()))
def teams(self, game_id: int) -> DataFrame[OptaTeamSchema]:
"""Return a dataframe with both teams that participated in a game.
Parameters
----------
game_id : int
The ID of the game.
Returns
-------
pd.DataFrame
A dataframe containing both teams. See
:class:`~socceraction.spadl.opta.OptaTeamSchema` for the schema.
"""
data: Dict[int, Dict[str, Any]] = {}
for feed, feed_pattern in self.feeds.items():
glob_pattern = feed_pattern.format(competition_id='*', season_id='*', game_id=game_id)
feed_files = glob.glob(os.path.join(self.root, glob_pattern))
for ffp in feed_files:
ids = _extract_ids_from_path(ffp, feed_pattern)
parser = self.parsers[feed](ffp, **ids)
_deepupdate(data, parser.extract_teams())
return pd.DataFrame(list(data.values()))
def players(self, game_id: int) -> DataFrame[OptaPlayerSchema]:
"""Return a dataframe with all players that participated in a game.
Parameters
----------
game_id : int
The ID of the game.
Returns
-------
pd.DataFrame
A dataframe containing all players. See
:class:`~socceraction.spadl.opta.OptaPlayerSchema` for the schema.
"""
data: Dict[int, Dict[str, Any]] = {}
for feed, feed_pattern in self.feeds.items():
glob_pattern = feed_pattern.format(competition_id='*', season_id='*', game_id=game_id)
feed_files = glob.glob(os.path.join(self.root, glob_pattern))
for ffp in feed_files:
ids = _extract_ids_from_path(ffp, feed_pattern)
parser = self.parsers[feed](ffp, **ids)
_deepupdate(data, parser.extract_players())
df_players = pd.DataFrame(list(data.values()))
df_players['game_id'] = game_id
return df_players
def events(self, game_id: int) -> DataFrame[OptaEventSchema]:
"""Return a dataframe with the event stream of a game.
Parameters
----------
game_id : int
The ID of the game.
Returns
-------
pd.DataFrame
A dataframe containing the event stream. See
:class:`~socceraction.spadl.opta.OptaEventSchema` for the schema.
"""
data: Dict[int, Dict[str, Any]] = {}
for feed, feed_pattern in self.feeds.items():
glob_pattern = feed_pattern.format(competition_id='*', season_id='*', game_id=game_id)
feed_files = glob.glob(os.path.join(self.root, glob_pattern))
for ffp in feed_files:
ids = _extract_ids_from_path(ffp, feed_pattern)
parser = self.parsers[feed](ffp, **ids)
_deepupdate(data, parser.extract_events())
events = (
pd.DataFrame(list(data.values()))
.merge(_eventtypesdf, on='type_id', how='left')
.sort_values(['game_id', 'period_id', 'minute', 'second', 'timestamp'])
.reset_index(drop=True)
)
return events
class OptaJSONParser(OptaParser):
"""Extract data from an Opta JSON data stream.
Parameters
----------
path : str
Path of the data file.
"""
def __init__(self, path: str, *args: Any, **kwargs: Any):
with open(path, 'rt', encoding='utf-8') as fh:
self.root = json.load(fh)
class OptaXMLParser(OptaParser):
"""Extract data from an Opta XML data stream.
Parameters
----------
path : str
Path of the data file.
"""
def __init__(self, path: str, *args: Any, **kwargs: Any):
with open(path, 'rb') as fh:
self.root = objectify.fromstring(fh.read())
class _F1JSONParser(OptaJSONParser):
def get_feed(self) -> Dict[str, Any]:
for node in self.root:
if 'OptaFeed' in node['data'].keys():
return node
raise MissingDataError
def get_doc(self) -> Dict[str, Any]:
f1 = self.get_feed()
data = assertget(f1, 'data')
optafeed = assertget(data, 'OptaFeed')
optadocument = assertget(optafeed, 'OptaDocument')
return optadocument
def extract_competitions(self) -> Dict[int, Dict[str, Any]]:
optadocument = self.get_doc()
attr = assertget(optadocument, '@attributes')
competition_id = int(assertget(attr, 'competition_id'))
competition = dict(
season_id=int(assertget(attr, 'season_id')),
season_name=str(assertget(attr, 'season_id')),
competition_id=competition_id,
competition_name=assertget(attr, 'competition_name'),
)
return {competition_id: competition}
def extract_games(self) -> Dict[int, Dict[str, Any]]:
optadocument = self.get_doc()
attr = assertget(optadocument, '@attributes')
matchdata = assertget(optadocument, 'MatchData')
matches = {}
for match in matchdata:
match_dict: Dict[str, Any] = {}
match_dict['competition_id'] = int(assertget(attr, 'competition_id'))
match_dict['season_id'] = int(assertget(attr, 'season_id'))
matchattr = assertget(match, '@attributes')
match_dict['game_id'] = int(assertget(matchattr, 'uID')[1:])
matchinfo = assertget(match, 'MatchInfo')
matchinfoattr = assertget(matchinfo, '@attributes')
match_dict['game_day'] = int(assertget(matchinfoattr, 'MatchDay'))
match_dict['venue'] = str(assertget(matchinfoattr, 'Venue_id'))
match_dict['game_date'] = datetime.strptime(
assertget(matchinfo, 'Date'), '%Y-%m-%d %H:%M:%S'
)
teamdata = assertget(match, 'TeamData')
for team in teamdata:
teamattr = assertget(team, '@attributes')
side = assertget(teamattr, 'Side')
teamid = assertget(teamattr, 'TeamRef')
if side == 'Home':
match_dict['home_team_id'] = int(teamid[1:])
else:
match_dict['away_team_id'] = int(teamid[1:])
matches[match_dict['game_id']] = match_dict
return matches
class _F9JSONParser(OptaJSONParser):
def get_feed(self) -> Dict[str, Any]:
for node in self.root:
if 'OptaFeed' in node['data'].keys():
return node
raise MissingDataError
def get_doc(self) -> Dict[str, Any]:
f9 = self.get_feed()
data = assertget(f9, 'data')
optafeed = assertget(data, 'OptaFeed')
optadocument = assertget(optafeed, 'OptaDocument')[0]
return optadocument
def extract_games(self) -> Dict[int, Dict[str, Any]]:
optadocument = self.get_doc()
attr = assertget(optadocument, '@attributes')
venue = assertget(optadocument, 'Venue')
matchdata = assertget(optadocument, 'MatchData')
matchofficial = assertget(matchdata, 'MatchOfficial')
matchinfo = assertget(matchdata, 'MatchInfo')
stat = assertget(matchdata, 'Stat')
assert stat['@attributes']['Type'] == 'match_time'
teamdata = assertget(matchdata, 'TeamData')
scores = {}
for t in teamdata:
scores[t['@attributes']['Side']] = t['@attributes']['Score']
game_id = int(assertget(attr, 'uID')[1:])
game_dict = {
game_id: dict(
game_id=game_id,
venue=str(
venue['@attributes']['uID']
), # The venue's name is not included in this stream
referee_id=int(matchofficial['@attributes']['uID'].replace('o', '')),
game_date=datetime.strptime(
assertget(matchinfo, 'Date'), '%Y%m%dT%H%M%S%z'
).replace(tzinfo=None),
attendance=int(matchinfo.get('Attendance', 0)),
duration=int(stat['@value']),
home_score=int(scores['Home']),
away_score=int(scores['Away']),
)
}
return game_dict
def extract_teams(self) -> Dict[int, Dict[str, Any]]:
optadocument = self.get_doc()
root_teams = assertget(optadocument, 'Team')
teams = {}
for team in root_teams:
if 'id' in team.keys():
nameobj = team.get('nameObj')
team_id = int(team['id'])
team = dict(
team_id=team_id,
team_name=nameobj.get('name'),
)
for f in ['team_name']:
team[f] = unidecode.unidecode(team[f]) if f in team else team[f]
teams[team_id] = team
return teams
def extract_players(self) -> Dict[int, Dict[str, Any]]:
optadocument = self.get_doc()
root_teams = assertget(optadocument, 'Team')
lineups = self.extract_lineups()
players = {}
for team in root_teams:
team_id = int(team['@attributes']['uID'].replace('t', ''))
for player in team['Player']:
player_id = int(player['@attributes']['uID'].replace('p', ''))
assert 'nameObj' in player['PersonName']
nameobj = player['PersonName']['nameObj']
if not nameobj.get('is_unknown'):
player = dict(
team_id=team_id,
player_id=player_id,
firstname=nameobj.get('first').strip() or None,
lastname=nameobj.get('last').strip() or None,
player_name=nameobj.get('full').strip() or None,
nickname=nameobj.get('known') or nameobj.get('full').strip() or None,
)
if player_id in lineups[team_id]['players']:
player = dict(
**player,
jersey_number=lineups[team_id]['players'][player_id]['jersey_number'],
starting_position_name=lineups[team_id]['players'][player_id][
'starting_position_name'
],
starting_position_id=lineups[team_id]['players'][player_id][
'starting_position_id'
],
is_starter=lineups[team_id]['players'][player_id]['is_starter'],
minutes_played=lineups[team_id]['players'][player_id][
'minutes_played'
],
)
for f in ['firstname', 'lastname', 'player_name', 'nickname']:
if player[f]:
player[f] = unidecode.unidecode(player[f])
players[player_id] = player
return players
def extract_referee(self) -> Dict[int, Dict[str, Any]]:
optadocument = self.get_doc()
try:
rootf9 = optadocument['MatchData']['MatchOfficial']
except KeyError:
raise MissingDataError
name = rootf9['OfficialName']
nameobj = name['nameObj']
referee_id = int(rootf9['@attributes']['uID'].replace('o', ''))
referee = dict(
referee_id=referee_id,
referee_firstname=name.get('First') or nameobj.get('first'),
referee_lastname=name.get('Last') or nameobj.get('last'),
)
for f in ['referee_firstname', 'referee_lastname']:
if referee[f]:
referee[f] = unidecode.unidecode(referee[f])
return {referee_id: referee}
def extract_teamgamestats(self) -> List[Dict[str, Any]]:
optadocument = self.get_doc()
attr = assertget(optadocument, '@attributes')
game_id = int(assertget(attr, 'uID')[1:])
try:
rootf9 = optadocument['MatchData']['TeamData']
except KeyError:
raise MissingDataError
teams_gamestats = []
for team in rootf9:
attr = team['@attributes']
statsdict = {stat['@attributes']['Type']: stat['@value'] for stat in team['Stat']}
team_gamestats = dict(
game_id=game_id,
team_id=int(attr['TeamRef'].replace('t', '')),
side=attr['Side'],
score=attr['Score'],
shootout_score=attr['ShootOutScore'],
**statsdict,
)
teams_gamestats.append(team_gamestats)
return teams_gamestats
def extract_lineups(self) -> Dict[int, Dict[str, Any]]:
optadocument = self.get_doc()
attr = assertget(optadocument, '@attributes')
try:
rootf9 = optadocument['MatchData']['TeamData']
except KeyError:
raise MissingDataError
matchstats = optadocument['MatchData']['Stat']
matchstats = [matchstats] if isinstance(matchstats, dict) else matchstats
matchstatsdict = {stat['@attributes']['Type']: stat['@value'] for stat in matchstats}
lineups: Dict[int, Dict[str, Any]] = {}
for team in rootf9:
# lineup attributes
team_id = int(team['@attributes']['TeamRef'].replace('t', ''))
lineups[team_id] = dict(players=dict())
# substitutes
subst = [s['@attributes'] for s in team['Substitution']]
for player in team['PlayerLineUp']['MatchPlayer']:
attr = player['@attributes']
player_id = int(attr['PlayerRef'].replace('p', ''))
playerstatsdict = {
stat['@attributes']['Type']: stat['@value'] for stat in player['Stat']
}
sub_on = next(
(
item['Time']
for item in subst
if 'Retired' not in item and item['SubOn'] == f'p{player_id}'
),
matchstatsdict['match_time'] if attr['Status'] == 'Sub' else 0,
)
sub_off = next(
(item['Time'] for item in subst if item['SubOff'] == f'p{player_id}'),
matchstatsdict['match_time'],
)
minutes_played = sub_off - sub_on
lineups[team_id]['players'][player_id] = dict(
jersey_number=attr['ShirtNumber'],
starting_position_name=attr['Position'],
starting_position_id=attr['position_id'],
is_starter=attr['Status'] == 'Start',
minutes_played=minutes_played,
**playerstatsdict,
)
return lineups
class _F24JSONParser(OptaJSONParser):
def get_feed(self) -> Dict[str, Any]:
for node in self.root:
if 'Games' in node['data'].keys():
return node
raise MissingDataError
def extract_games(self) -> Dict[int, Dict[str, Any]]:
f24 = self.get_feed()
data = assertget(f24, 'data')
games = assertget(data, 'Games')
game = assertget(games, 'Game')
attr = assertget(game, '@attributes')
game_id = int(assertget(attr, 'id'))
game_dict = {
game_id: dict(
competition_id=int(assertget(attr, 'competition_id')),
game_id=game_id,
season_id=int(assertget(attr, 'season_id')),
game_day=int(assertget(attr, 'matchday')),
home_team_id=int(assertget(attr, 'home_team_id')),
away_team_id=int(assertget(attr, 'away_team_id')),
)
}
return game_dict
def extract_events(self) -> Dict[int, Dict[str, Any]]:
f24 = self.get_feed()
data = assertget(f24, 'data')
games = assertget(data, 'Games')
game = assertget(games, 'Game')
game_attr = assertget(game, '@attributes')
game_id = int(assertget(game_attr, 'id'))
events = {}
for element in assertget(game, 'Event'):
attr = element['@attributes']
timestamp = attr['TimeStamp'].get('locale') if attr.get('TimeStamp') else None
timestamp = datetime.strptime(timestamp, '%Y-%m-%dT%H:%M:%S.%fZ')
qualifiers = {
int(q['@attributes']['qualifier_id']): q['@attributes']['value']
for q in element.get('Q', [])
}
start_x = float(assertget(attr, 'x'))
start_y = float(assertget(attr, 'y'))
end_x = _get_end_x(qualifiers)
end_y = _get_end_y(qualifiers)
if end_x is None:
end_x = start_x
if end_y is None:
end_y = start_y
event_id = int(assertget(attr, 'event_id'))
event = dict(
game_id=game_id,
event_id=event_id,
type_id=int(assertget(attr, 'type_id')),
period_id=int(assertget(attr, 'period_id')),
minute=int(assertget(attr, 'min')),
second=int(assertget(attr, 'sec')),
timestamp=timestamp,
player_id=int(assertget(attr, 'player_id')),
team_id=int(assertget(attr, 'team_id')),
outcome=bool(int(attr.get('outcome', 1))),
start_x=start_x,
start_y=start_y,
end_x=end_x,
end_y=end_y,
assist=bool(int(attr.get('assist', 0))),
keypass=bool(int(attr.get('keypass', 0))),
qualifiers=qualifiers,
)
events[event_id] = event
return events
class _F7XMLParser(OptaXMLParser):
def get_doc(self) -> Type[objectify.ObjectifiedElement]:
optadocument = self.root.find('SoccerDocument')
return optadocument
def extract_competitions(self) -> Dict[int, Dict[str, Any]]:
optadocument = self.get_doc()
competition = optadocument.Competition
stats = {}
for stat in competition.find('Stat'):
stats[stat.attrib['Type']] = stat.text
competition_id = int(competition.attrib['uID'][1:])
competition_dict = dict(
competition_id=competition_id,
season_id=int(assertget(stats, 'season_id')),
season_name=assertget(stats, 'season_name'),
competition_name=competition.Name.text,
)
return {competition_id: competition_dict}
def extract_games(self) -> Dict[int, Dict[str, Any]]:
optadocument = self.get_doc()
match_info = optadocument.MatchData.MatchInfo
game_id = int(optadocument.attrib['uID'][1:])
stats = {}
for stat in optadocument.MatchData.find('Stat'):
stats[stat.attrib['Type']] = stat.text
game_dict = dict(
game_id=game_id,
venue=optadocument.Venue.Name.text,
referee_id=int(optadocument.MatchData.MatchOfficial.attrib['uID'][1:]),
game_date=datetime.strptime(match_info.Date.text, '%Y%m%dT%H%M%S%z').replace(
tzinfo=None
),
attendance=int(match_info.Attendance),
duration=int(stats['match_time']),
)
return {game_id: game_dict}
def extract_teams(self) -> Dict[int, Dict[str, Any]]:
optadocument = self.get_doc()
team_elms = list(optadocument.iterchildren('Team'))
teams = {}
for team_elm in team_elms:
team_id = int(assertget(team_elm.attrib, 'uID')[1:])
team = dict(
team_id=team_id,
team_name=team_elm.Name.text,
)
teams[team_id] = team
return teams
def extract_lineups(self) -> Dict[int, Dict[str, Any]]:
optadocument = self.get_doc()
stats = {}
for stat in optadocument.MatchData.find('Stat'):
stats[stat.attrib['Type']] = stat.text
lineup_elms = optadocument.MatchData.iterchildren('TeamData')
lineups = {}
for team_elm in lineup_elms:
# lineup attributes
team_id = int(team_elm.attrib['TeamRef'][1:])
lineups[team_id] = dict(
formation=team_elm.attrib['Formation'],
score=int(team_elm.attrib['Score']),
side=team_elm.attrib['Side'],
players=dict(),
)
# substitutes
subst_elms = team_elm.iterchildren('Substitution')
subst = [subst_elm.attrib for subst_elm in subst_elms]
# players
player_elms = team_elm.PlayerLineUp.iterchildren('MatchPlayer')
for player_elm in player_elms:
player_id = int(player_elm.attrib['PlayerRef'][1:])
sub_on = int(
next(
(
item['Time']
for item in subst
if 'Retired' not in item and item['SubOn'] == f'p{player_id}'
),
stats['match_time'] if player_elm.attrib['Status'] == 'Sub' else 0,
)
)
sub_off = int(
next(
(item['Time'] for item in subst if item['SubOff'] == f'p{player_id}'),
stats['match_time'],
)
)
minutes_played = sub_off - sub_on
lineups[team_id]['players'][player_id] = dict(
starting_position_id=int(player_elm.attrib['Formation_Place']),
starting_position_name=player_elm.attrib['Position'],
jersey_number=int(player_elm.attrib['ShirtNumber']),
is_starter=int(player_elm.attrib['Formation_Place']) != 0,
minutes_played=minutes_played,
)
return lineups
def extract_players(self) -> Dict[int, Dict[str, Any]]:
optadocument = self.get_doc()
lineups = self.extract_lineups()
team_elms = list(optadocument.iterchildren('Team'))
players = {}
for team_elm in team_elms:
team_id = int(team_elm.attrib['uID'][1:])
for player_elm in team_elm.iterchildren('Player'):
player_id = int(player_elm.attrib['uID'][1:])
firstname = str(player_elm.find('PersonName').find('First'))
lastname = str(player_elm.find('PersonName').find('Last'))
nickname = str(player_elm.find('PersonName').find('Known'))
player = dict(
team_id=team_id,
player_id=player_id,
player_name=' '.join([firstname, lastname]),
firstname=firstname,
lastname=lastname,
nickname=nickname,
**lineups[team_id]['players'][player_id],
)
players[player_id] = player
return players
class _F24XMLParser(OptaXMLParser):
def get_doc(self) -> Type[objectify.ObjectifiedElement]:
return self.root
def extract_games(self) -> Dict[int, Dict[str, Any]]:
optadocument = self.get_doc()
game_elem = optadocument.find('Game')
attr = game_elem.attrib
game_id = int(assertget(attr, 'id'))
game_dict = dict(
game_id=game_id,
competition_id=int(assertget(attr, 'competition_id')),
season_id=int(assertget(attr, 'season_id')),
game_day=int(assertget(attr, 'matchday')),
game_date=datetime.strptime(assertget(attr, 'game_date'), '%Y-%m-%dT%H:%M:%S'),
home_team_id=int(assertget(attr, 'home_team_id')),
home_score=int(assertget(attr, 'home_score')),
away_team_id=int(assertget(attr, 'away_team_id')),
away_score=int(assertget(attr, 'away_score')),
)
return {game_id: game_dict}
def extract_events(self) -> Dict[int, Dict[str, Any]]:
optadocument = self.get_doc()
game_elm = optadocument.find('Game')
attr = game_elm.attrib
game_id = int(assertget(attr, 'id'))
events = {}
for event_elm in game_elm.iterchildren('Event'):
attr = dict(event_elm.attrib)
event_id = int(attr['id'])
qualifiers = {
int(qualifier_elm.attrib['qualifier_id']): qualifier_elm.attrib.get('value')
for qualifier_elm in event_elm.iterchildren('Q')
}
start_x = float(assertget(attr, 'x'))
start_y = float(assertget(attr, 'y'))
end_x = _get_end_x(qualifiers)
end_y = _get_end_y(qualifiers)
if end_x is None:
end_x = start_x
if end_y is None:
end_y = start_y
event = dict(
game_id=game_id,
event_id=event_id,
type_id=int(assertget(attr, 'type_id')),
period_id=int(assertget(attr, 'period_id')),
minute=int(assertget(attr, 'min')),
second=int(assertget(attr, 'sec')),
timestamp=datetime.strptime(assertget(attr, 'timestamp'), '%Y-%m-%dT%H:%M:%S.%f'),
player_id=int(attr.get('player_id', 0)),
team_id=int(assertget(attr, 'team_id')),
outcome=bool(int(attr.get('outcome', 1))),
start_x=start_x,
start_y=start_y,
end_x=end_x,
end_y=end_y,
assist=bool(int(attr.get('assist', 0))),
keypass=bool(int(attr.get('keypass', 0))),
qualifiers=qualifiers,
)
events[event_id] = event
return events
class _WhoScoredParser(OptaParser):
"""Extract data from a JSON data stream scraped from WhoScored.
Parameters
----------
path : str
Path of the data file.
competition_id : int
ID of the competition to which the provided data file belongs. If
None, this information is extracted from a field 'competition_id' in
the JSON.
season_id : int
ID of the season to which the provided data file belongs. If None,
this information is extracted from a field 'season_id' in the JSON.
game_id : int
ID of the game to which the provided data file belongs. If None, this
information is extracted from a field 'game_id' in the JSON.
"""
def __init__( # noqa: C901
self,
path: str,
competition_id: Optional[int] = None,
season_id: Optional[int] = None,
game_id: Optional[int] = None,
*args: Any,
**kwargs: Any,
):
with open(path, 'rt', encoding='utf-8') as fh:
self.root = json.load(fh)
self.position_mapping = lambda formation, x, y: 'Unknown'
if competition_id is None:
try:
competition_id = int(assertget(self.root, 'competition_id'))
except AssertionError:
raise MissingDataError(
"""Could not determine the competition id. Add it to the
file path or include a field 'competition_id' in the
JSON."""
)
self.competition_id = competition_id
if season_id is None:
try:
season_id = int(assertget(self.root, 'season_id'))
except AssertionError:
raise MissingDataError(
"""Could not determine the season id. Add it to the file
path or include a field 'season_id' in the JSON."""
)
self.season_id = season_id
if game_id is None:
try:
game_id = int(assertget(self.root, 'game_id'))
except AssertionError:
raise MissingDataError(
"""Could not determine the game id. Add it to the file
path or include a field 'game_id' in the JSON."""
)
self.game_id = game_id
def get_period_id(self, event: Dict[str, Any]) -> int:
period = assertget(event, 'period')
period_id = int(assertget(period, 'value'))
return period_id
def get_period_milliseconds(self, event: Dict[str, Any]) -> int:
period_minute_limits = assertget(self.root, 'periodMinuteLimits')
period_id = self.get_period_id(event)
if period_id == 16: # Pre-match
return 0
if period_id == 14: # Post-game
return 0
minute = int(assertget(event, 'minute'))
period_minute = minute
if period_id > 1:
period_minute = minute - period_minute_limits[str(period_id - 1)]
period_second = period_minute * 60 + int(event.get('second', 0))
return period_second * 1000
def extract_games(self) -> Dict[int, Dict[str, Any]]:
team_home = assertget(self.root, 'home')
team_away = assertget(self.root, 'away')
game_id = self.game_id
game_dict = dict(
game_id=game_id,
season_id=self.season_id,
competition_id=self.competition_id,
game_day=0, # TODO: not defined in the JSON object
game_date=datetime.strptime(
assertget(self.root, 'startTime'), '%Y-%m-%dT%H:%M:%S'
), # Dates are UTC
home_team_id=int(assertget(team_home, 'teamId')),
away_team_id=int(assertget(team_away, 'teamId')),
# is_regular=None, # TODO
# is_extra_time=None, # TODO
# is_penalties=None, # TODO
# is_golden_goal=None, # TODO
# is_silver_goal=None, # TODO
# Optional fields
home_score=int(assertget(assertget(self.root['home'], 'scores'), 'fulltime')),
away_score=int(assertget(assertget(self.root['away'], 'scores'), 'fulltime')),
attendance=int(self.root.get('attendance', 0)),
venue=str(self.root.get('venueName')),
referee_id=int(self.root.get('referee', {}).get('officialId', 0)),
duration=int(self.root.get('expandedMaxMinute')),
)
return {game_id: game_dict}
def extract_players(self) -> Dict[int, Dict[str, Any]]:
player_gamestats = self.extract_playergamestats()
game_id = self.game_id
players = {}
for team in [self.root['home'], self.root['away']]:
team_id = int(assertget(team, 'teamId'))
for p in team['players']:
player_id = int(assertget(p, 'playerId'))
player = dict(
game_id=game_id,
team_id=team_id,
player_id=int(assertget(p, 'playerId')),
is_starter=bool(p.get('isFirstEleven', False)),
player_name=str(assertget(p, 'name')),
age=int(p['age']),
# nation_code=None,
# line_code=str(assertget(p, "position")),
# preferred_foot=None,
# gender=None,
height=float(p.get('height', float('NaN'))),
weight=float(p.get('weight', float('NaN'))),
minutes_played=player_gamestats[player_id]['minutes_played'],
jersey_number=player_gamestats[player_id]['jersey_number'],
starting_position_id=0, # TODO
starting_position_name=player_gamestats[player_id]['position_code'],
)
for f in ['player_name']:
if player[f]:
player[f] = unidecode.unidecode(player[f])
players[player_id] = player
return players
def extract_substitutions(self) -> Dict[int, Dict[str, Any]]:
game_id = self.game_id
subs = {}
subonevents = [e for e in self.root['events'] if e['type'].get('value') == 19]
for e in subonevents:
sub_id = int(assertget(e, 'playerId'))
sub = dict(
game_id=game_id,
team_id=int(assertget(e, 'teamId')),
period_id=int(assertget(assertget(e, 'period'), 'value')),
period_milliseconds=self.get_period_milliseconds(e),
player_in_id=int(assertget(e, 'playerId')),
player_out_id=int(assertget(e, 'relatedPlayerId')),
)
subs[sub_id] = sub
return subs
def extract_positions(self) -> Dict[int, Dict[str, Any]]: # noqa: C901
game_id = self.game_id
positions = {}
for t in [self.root['home'], self.root['away']]:
team_id = int(assertget(t, 'teamId'))
for f in assertget(t, 'formations'):
fpositions = assertget(f, 'formationPositions')
playersIds = assertget(f, 'playerIds')
formation = assertget(f, 'formationName')
period_end_minutes = assertget(self.root, 'periodEndMinutes')
period_minute_limits = assertget(self.root, 'periodMinuteLimits')
start_minute = int(assertget(f, 'startMinuteExpanded'))
end_minute = int(assertget(f, 'endMinuteExpanded'))
for period_id in sorted(period_end_minutes.keys()):
if period_end_minutes[period_id] > start_minute:
break
period_id = int(period_id)
period_minute = start_minute
if period_id > 1:
period_minute = start_minute - period_minute_limits[str(period_id - 1)]
for i, p in enumerate(fpositions):
x = float(assertget(p, 'vertical'))
y = float(assertget(p, 'horizontal'))
try:
position_code = self.position_mapping(formation, x, y)
except KeyError:
position_code = 'Unknown'
pos = dict(
game_id=game_id,
team_id=team_id,
period_id=period_id,
period_milliseconds=(period_minute * 60 * 1000),
start_milliseconds=(start_minute * 60 * 1000),
end_milliseconds=(end_minute * 60 * 1000),
formation_scheme=formation,
player_id=int(playersIds[i]),
player_position=position_code,
player_position_x=x,
player_position_y=y,
)
positions[team_id] = pos
return positions
def extract_teams(self) -> Dict[int, Dict[str, Any]]:
teams = {}
for t in [self.root['home'], self.root['away']]:
team_id = int(assertget(t, 'teamId'))
team = dict(
team_id=team_id,
team_name=assertget(t, 'name'),
)
for f in ['team_name']:
if team[f]:
team[f] = unidecode.unidecode(team[f])
teams[team_id] = team
return teams
def extract_referee(self) -> Dict[int, Dict[str, Any]]:
if 'referee' not in self.root:
return {
0: dict(referee_id=0, first_name='Unkown', last_name='Unkown', short_name='Unkown')
}
r = self.root['referee']
referee_id = int(assertget(r, 'officialId'))
referee = dict(
referee_id=referee_id,
first_name=r.get('firstName'),
last_name=r.get('lastName'),
short_name=r.get('name'),
)
for f in ['first_name', 'last_name', 'short_name']:
if referee[f]:
referee[f] = unidecode.unidecode(referee[f])
return {referee_id: referee}
def extract_teamgamestats(self) -> List[Dict[str, Any]]:
game_id = self.game_id
teams_gamestats = []
teams = [self.root['home'], self.root['away']]
for team in teams:
statsdict = {}
for name in team['stats']:
if isinstance(team['stats'][name], dict):
statsdict[camel_to_snake(name)] = sum(team['stats'][name].values())
scores = assertget(team, 'scores')
team_gamestats = dict(
game_id=game_id,
team_id=int(assertget(team, 'teamId')),
side=assertget(team, 'field'),
score=assertget(scores, 'fulltime'),
shootout_score=scores.get('penalty', 0),
**{k: statsdict[k] for k in statsdict if not k.endswith('Success')},
)
teams_gamestats.append(team_gamestats)
return teams_gamestats
def extract_playergamestats(self) -> Dict[int, Dict[str, Any]]: # noqa: C901
game_id = self.game_id
players_gamestats = {}
for team in [self.root['home'], self.root['away']]:
team_id = int(assertget(team, 'teamId'))
for player in team['players']:
statsdict = {
camel_to_snake(name): sum(stat.values())
for name, stat in player['stats'].items()
}
stats = [k for k in statsdict if not k.endswith('Success')]
player_id = int(assertget(player, 'playerId'))
p = dict(
game_id=game_id,
team_id=team_id,
player_id=player_id,
is_starter=bool(player.get('isFirstEleven', False)),
position_code=player.get('position', None),
# optional fields
jersey_number=int(player.get('shirtNo', 0)),
mvp=bool(player.get('isManOfTheMatch', False)),
**{k: statsdict[k] for k in stats},
)
if 'subbedInExpandedMinute' in player:
p['minute_start'] = player['subbedInExpandedMinute']
if 'subbedOutExpandedMinute' in player:
p['minute_end'] = player['subbedOutExpandedMinute']
# Did not play
p['minutes_played'] = 0
# Played the full game
if p['is_starter'] and 'minute_end' not in p:
p['minute_start'] = 0
p['minute_end'] = self.root['expandedMaxMinute']
p['minutes_played'] = self.root['expandedMaxMinute']
# Started but substituted out
elif p['is_starter'] and 'minute_end' in p:
p['minute_start'] = 0
p['minutes_played'] = p['minute_end']
# Substitud in and played the remainder of the game
elif 'minute_start' in p and 'minute_end' not in p:
p['minute_end'] = self.root['expandedMaxMinute']
p['minutes_played'] = self.root['expandedMaxMinute'] - p['minute_start']
# Substitud in and out
elif 'minute_start' in p and 'minute_end' in p:
p['minutes_played'] = p['minute_end'] - p['minute_start']
players_gamestats[player_id] = p
return players_gamestats
def extract_events(self) -> Dict[int, Dict[str, Any]]:
events = {}
game_id = self.game_id
time_start_str = str(assertget(self.root, 'startTime'))
time_start = datetime.strptime(time_start_str, '%Y-%m-%dT%H:%M:%S')
for attr in self.root['events']:
qualifiers = {}
qualifiers = {
int(q['type']['value']): q.get('value', True) for q in attr.get('qualifiers', [])
}
start_x = float(assertget(attr, 'x'))
start_y = float(assertget(attr, 'y'))
end_x = _get_end_x(qualifiers)
end_y = _get_end_y(qualifiers)
if end_x is None:
end_x = start_x
if end_y is None:
end_y = start_y
eventtype = attr.get('type', {})
period = attr.get('period', {})
outcome = attr.get('outcomeType', {'value': 1})
eventIdKey = 'eventId'
if 'id' in attr:
eventIdKey = 'id'
minute = int(assertget(attr, 'expandedMinute'))
second = int(attr.get('second', 0))
event_id = int(assertget(attr, eventIdKey))
event = dict(
game_id=game_id,
event_id=event_id,
type_id=int(assertget(eventtype, 'value')),
period_id=int(assertget(period, 'value')),
minute=minute,
second=second,
timestamp=(time_start + timedelta(seconds=(minute * 60 + second))),
player_id=int(attr.get('playerId', 0)),
team_id=int(assertget(attr, 'teamId')),
outcome=bool(int(outcome.get('value', 1))),
start_x=start_x,
start_y=start_y,
end_x=end_x,
end_y=end_y,
assist=bool(int(attr.get('assist', 0))),
keypass=bool(int(attr.get('keypass', 0))),
qualifiers=qualifiers,
)
events[event_id] = event
return events
_jsonparsers = {'f1': _F1JSONParser, 'f9': _F9JSONParser, 'f24': _F24JSONParser}
_xmlparsers = {'f7': _F7XMLParser, 'f24': _F24XMLParser}
_whoscoredparsers = {'whoscored': _WhoScoredParser}
def assertget(dictionary: Dict[str, Any], key: str) -> Any:
value = dictionary.get(key)
assert value is not None, 'KeyError: ' + key + ' not found in ' + str(dictionary)
return value
def camel_to_snake(name: str) -> str:
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
def _get_end_x(qualifiers: Dict[int, Any]) -> Optional[float]:
try:
# pass
if 140 in qualifiers:
return float(qualifiers[140])
# blocked shot
if 146 in qualifiers:
return float(qualifiers[146])
# passed the goal line
if 102 in qualifiers:
return float(100)
return None
except ValueError:
return None
def _get_end_y(qualifiers: Dict[int, Any]) -> Optional[float]:
try:
# pass
if 141 in qualifiers:
return float(qualifiers[141])
# blocked shot
if 147 in qualifiers:
return float(qualifiers[147])
# passed the goal line
if 102 in qualifiers:
return float(qualifiers[102])
return None
except ValueError:
return None
_eventtypesdf = pd.DataFrame(
[
(1, 'pass'),
(2, 'offside pass'),
(3, 'take on'),
(4, 'foul'),
(5, 'out'),
(6, 'corner awarded'),
(7, 'tackle'),
(8, 'interception'),
(9, 'turnover'),
(10, 'save'),
(11, 'claim'),
(12, 'clearance'),
(13, 'miss'),
(14, 'post'),
(15, 'attempt saved'),
(16, 'goal'),
(17, 'card'),
(18, 'player off'),
(19, 'player on'),
(20, 'player retired'),
(21, 'player returns'),
(22, 'player becomes goalkeeper'),
(23, 'goalkeeper becomes player'),
(24, 'condition change'),
(25, 'official change'),
(26, 'unknown26'),
(27, 'start delay'),
(28, 'end delay'),
(29, 'unknown29'),
(30, 'end'),
(31, 'unknown31'),
(32, 'start'),
(33, 'unknown33'),
(34, 'team set up'),
(35, 'player changed position'),
(36, 'player changed jersey number'),
(37, 'collection end'),
(38, 'temp_goal'),
(39, 'temp_attempt'),
(40, 'formation change'),
(41, 'punch'),
(42, 'good skill'),
(43, 'deleted event'),
(44, 'aerial'),
(45, 'challenge'),
(46, 'unknown46'),
(47, 'rescinded card'),
(48, 'unknown46'),
(49, 'ball recovery'),
(50, 'dispossessed'),
(51, 'error'),
(52, 'keeper pick-up'),
(53, 'cross not claimed'),
(54, 'smother'),
(55, 'offside provoked'),
(56, 'shield ball opp'),
(57, 'foul throw in'),
(58, 'penalty faced'),
(59, 'keeper sweeper'),
(60, 'chance missed'),
(61, 'ball touch'),
(62, 'unknown62'),
(63, 'temp_save'),
(64, 'resume'),
(65, 'contentious referee decision'),
(66, 'possession data'),
(67, '50/50'),
(68, 'referee drop ball'),
(69, 'failed to block'),
(70, 'injury time announcement'),
(71, 'coach setup'),
(72, 'caught offside'),
(73, 'other ball contact'),
(74, 'blocked pass'),
(75, 'delayed start'),
(76, 'early end'),
(77, 'player off pitch'),
],
columns=['type_id', 'type_name'],
)
def convert_to_actions(events: pd.DataFrame, home_team_id: int) -> pd.DataFrame:
"""
Convert Opta events to SPADL actions.
Parameters
----------
events : pd.DataFrame
DataFrame containing Opta events from a single game.
home_team_id : int
ID of the home team in the corresponding game.
Returns
-------
actions : pd.DataFrame
DataFrame with corresponding SPADL actions.
"""
actions = pd.DataFrame()
actions['game_id'] = events.game_id
actions['original_event_id'] = events.event_id.astype(object)
actions['period_id'] = events.period_id
actions['time_seconds'] = (
60 * events.minute
+ events.second
- ((events.period_id > 1) * 45 * 60)
- ((events.period_id > 2) * 45 * 60)
- ((events.period_id > 3) * 15 * 60)
- ((events.period_id > 4) * 15 * 60)
)
actions['team_id'] = events.team_id
actions['player_id'] = events.player_id
for col in ['start_x', 'end_x']:
actions[col] = events[col] / 100 * spadlconfig.field_length
for col in ['start_y', 'end_y']:
actions[col] = events[col] / 100 * spadlconfig.field_width
actions['type_id'] = events[['type_name', 'outcome', 'qualifiers']].apply(_get_type_id, axis=1)
actions['result_id'] = events[['type_name', 'outcome', 'qualifiers']].apply(
_get_result_id, axis=1
)
actions['bodypart_id'] = events.qualifiers.apply(_get_bodypart_id)
actions = (
actions[actions.type_id != spadlconfig.actiontypes.index('non_action')]
.sort_values(['game_id', 'period_id', 'time_seconds'])
.reset_index(drop=True)
)
actions = _fix_owngoals(actions)
actions = _fix_direction_of_play(actions, home_team_id)
actions = _fix_clearances(actions)
actions['action_id'] = range(len(actions))
actions = _add_dribbles(actions)
for col in [c for c in actions.columns.values if c != 'original_event_id']:
if '_id' in col:
actions[col] = actions[col].astype(int)
return actions
def _get_bodypart_id(qualifiers: Dict[int, Any]) -> int:
if 15 in qualifiers:
b = 'head'
elif 21 in qualifiers:
b = 'other'
else:
b = 'foot'
return spadlconfig.bodyparts.index(b)
def _get_result_id(args: Tuple[str, bool, Dict[int, Any]]) -> int:
e, outcome, q = args
if e == 'offside pass':
r = 'offside' # offside
elif e == 'foul':
r = 'fail'
elif e in ['attempt saved', 'miss', 'post']:
r = 'fail'
elif e == 'goal':
if 28 in q:
r = 'owngoal' # own goal, x and y must be switched
else:
r = 'success'
elif e == 'ball touch':
r = 'fail'
elif outcome:
r = 'success'
else:
r = 'fail'
return spadlconfig.results.index(r)
def _get_type_id(args: Tuple[str, bool, Dict[int, Any]]) -> int: # noqa: C901
eventname, outcome, q = args
if eventname in ('pass', 'offside pass'):
cross = 2 in q
freekick = 5 in q
corner = 6 in q
throw_in = 107 in q
goalkick = 124 in q
if throw_in:
a = 'throw_in'
elif freekick and cross:
a = 'freekick_crossed'
elif freekick:
a = 'freekick_short'
elif corner and cross:
a = 'corner_crossed'
elif corner:
a = 'corner_short'
elif cross:
a = 'cross'
elif goalkick:
a = 'goalkick'
else:
a = 'pass'
elif eventname == 'take on':
a = 'take_on'
elif eventname == 'foul' and outcome is False:
a = 'foul'
elif eventname == 'tackle':
a = 'tackle'
elif eventname in ('interception', 'blocked pass'):
a = 'interception'
elif eventname in ['miss', 'post', 'attempt saved', 'goal']:
if 9 in q:
a = 'shot_penalty'
elif 26 in q:
a = 'shot_freekick'
else:
a = 'shot'
elif eventname == 'save':
a = 'keeper_save'
elif eventname == 'claim':
a = 'keeper_claim'
elif eventname == 'punch':
a = 'keeper_punch'
elif eventname == 'keeper pick-up':
a = 'keeper_pick_up'
elif eventname == 'clearance':
a = 'clearance'
elif eventname == 'ball touch' and outcome is False:
a = 'bad_touch'
else:
a = 'non_action'
return spadlconfig.actiontypes.index(a)
def _fix_owngoals(actions: pd.DataFrame) -> pd.DataFrame:
owngoals_idx = (actions.result_id == spadlconfig.results.index('owngoal')) & (
actions.type_id == spadlconfig.actiontypes.index('shot')
)
actions.loc[owngoals_idx, 'end_x'] = (
spadlconfig.field_length - actions[owngoals_idx].end_x.values
)
actions.loc[owngoals_idx, 'end_y'] = (
spadlconfig.field_width - actions[owngoals_idx].end_y.values
)
actions.loc[owngoals_idx, 'type_id'] = spadlconfig.actiontypes.index('bad_touch')
return actions
| mit |
btittelbach/pyhledger | examples_and_script_dependant_on_r3_account_structure/stats.py | 1 | 18587 | #!/usr/bin/python2
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import sys
import codecs
import os, io
import sqlite3 as lite
import datetime
import dateutil.relativedelta
from collections import defaultdict
import csv
import subprocess
sqlite_db_=os.path.split(__file__)[0]+'/../Ledgers/members.sqlite'
hledger_ledgerpath_=os.path.split(__file__)[0]+'/../Ledgers/r3.ledger'
dateformat_ = "%Y-%m-%d"
dateformat_hledger_csvexport_ = "%Y/%m/%d"
dateformat_monthonly_ = "%Y-%m"
# unaccounted money in fraction of expected beverages revenue
unaccounted_money_fraction = [ (datetime.date(2013,1,22),-.0857),
#...
]
colorslist_=["r","b","g","y",'c',"m",'w',"burlywood","aquamarine","chartreuse","Coral","Brown","DarkCyan","DarkOrchid","DeepSkyBlue","ForestGreen","Gold","FloralWhite","Indigo","Khaki","GreenYellow","MediumVioletRed","Navy","Tomato","Maroon","Fuchsia","LightGoldenRodYellow"] * 20
def getVeryFirstMonth(con):
cur = con.cursor()
cur.execute("SELECT min(m_firstmonth) from membership")
strdate = cur.fetchone()[0]
rdate = datetime.datetime.strptime(strdate, dateformat_).date()
return normalizeDateToFirstInMonth(rdate)
def getNextMonth():
rdate = datetime.date.today() + dateutil.relativedelta.relativedelta(months=2)
rdate -= dateutil.relativedelta.relativedelta(days=rdate.day)
return rdate
def getMembersInMonth(con, month):
assert(isinstance(month,datetime.date))
cur = con.cursor()
cur.execute('SELECT count(*),sum(m_fee) from membership where m_firstmonth <= ? and (m_lastmonth is null or m_lastmonth is "" or m_lastmonth >= ?)', (month.isoformat(),month.isoformat()))
return cur.fetchone()
#return [r[0] for r in rows] # unpack singlevalue tuples: [(x,)] => [x]
### returns results from membership table in sqlite db
### @return [(p_id, m_firstmonth :: datetime.date, m_lastmonth :: datetime.date || None, m_fee: float), (...), (...), ...]
def getMemberships(con):
cur = con.cursor()
cur.execute('SELECT p_id, m_firstmonth, m_lastmonth, m_fee from membership order by m_firstmonth')
rows = cur.fetchall()
return [(r[0],datetime.datetime.strptime(r[1], dateformat_).date(), datetime.datetime.strptime(r[2], dateformat_).date() if not (r[2] is None or len(r[2]) < 1) else None, r[3]) for r in rows]
### Helperclass, basically a variant (float,float) tuple with a __str__ method
### used to count plus/minus members / membershipincome per date
class PlusMinusTuple():
def __init__(self):
self.plus=0
self.minus=0
def __str__(self):
s=[]
if self.plus:
s+=["+%d" % self.plus]
if self.minus:
s+=["%d" % self.minus]
return "\n".join(s)
### @return datetime.date with the day of given datetime.date set to 1
def normalizeDateToFirstInMonth(rdate):
if rdate.day != 1:
rdate += dateutil.relativedelta.relativedelta(days=1-rdate.day)
return rdate
### calculates for each month, the number of members who are new and who left
### respectively for each month the amount and decrease in membershipincome
### summing up the two values in a PlusMinusTuple would give the change for that month
### @return tuple (pm_dates,pm_fee_dates) where pm_dates is of type dict{ datetime.date : PlusMinusTuple }
def extractPlusMinusFromMemberships(membership):
pm_dates = defaultdict(lambda:PlusMinusTuple())
pm_fee_dates = defaultdict(lambda:PlusMinusTuple())
for p_id, fmonth, lmonth, m_fee in membership:
pm_dates[fmonth.strftime(dateformat_monthonly_)].plus += 1
pm_fee_dates[fmonth.strftime(dateformat_monthonly_)].plus += m_fee
if lmonth:
lmonth = normalizeDateToFirstInMonth(lmonth) + dateutil.relativedelta.relativedelta(months=1)
pm_dates[lmonth.strftime(dateformat_monthonly_)].minus -= 1
pm_fee_dates[lmonth.strftime(dateformat_monthonly_)].minus -= m_fee
return (pm_dates, pm_fee_dates)
### @return dict[ p_id :(p_nick,p_name) ] for all members
def getMemberInfos(con):
cur = con.cursor()
cur.execute('SELECT p_id, p_nick, p_name from membership left join persons using (p_id) order by p_id')
rows = cur.fetchall()
return dict([(a,(b,c)) for a,b,c in rows])
### for each month between start of data and now,
### the function returns the number of members and the theoretical amount of membershipincome from membershipfees
### @return [(datetime.date, (<number of members in month>:int, <membershipincome in month>:float))]
def getMembersTimeData(con):
cdate = getVeryFirstMonth(con)
enddate = getNextMonth()
rv = []
while cdate < enddate:
rv.append((cdate, getMembersInMonth(con,cdate)))
cdate += dateutil.relativedelta.relativedelta(months=1)
return rv
def graphMembersOverTimeWithPlusMinusText(membertimedata, memberinfos, membership):
ydates, yvalues = zip(*membertimedata)
ydates = list(ydates)
#yvalues = map(lambda x:x[0],yvalues)
plotlabel = [u"Number of members over time","Membership Income over time"]
plt.plot(ydates, yvalues, 'o',linewidth=2, markevery=1)
plt.ylabel("#Members")
plt.xlabel("Month")
plt.grid(True)
plt.legend(plotlabel,loc='upper left')
plt.twiny()
plt.ylabel("Euro")
#plt.title(plotlabel)
## label with +x-y members per month
membersinmonth = dict(membertimedata)
#print "\n".join([ "%s:%s" % (x[0],str(x[1])) for x in extractPlusMinusFromMemberships(membership).items()])
pm_dates, pm_fee_dates = extractPlusMinusFromMemberships(membership)
for astrdate, tpl in pm_dates.items():
adate = datetime.datetime.strptime(astrdate, dateformat_monthonly_).date()
assert(adate.day==1)
if adate in membersinmonth:
xy = (adate, membersinmonth[adate][0])
xytext = (xy[0], xy[1]+1)
plt.annotate(str(tpl), xy=xy, xytext=xytext,arrowprops=dict(facecolor='gray', shrink=0.5))
for astrdate, tpl in pm_fee_dates.items():
adate = datetime.datetime.strptime(astrdate, dateformat_monthonly_).date()
assert(adate.day==1)
if adate in membersinmonth:
xy = (adate, membersinmonth[adate][1])
xytext = (xy[0], xy[1]+30)
plt.annotate(str(tpl), xy=xy, xytext=xytext,arrowprops=dict(facecolor='gray', shrink=0.5))
plt.subplots_adjust(left=0.06, bottom=0.05, right=0.99, top=0.95)
def graphMembershipIncomeOverTime(membertimedata, memberinfos, membership):
ydates, yvalues = zip(*membertimedata)
ydates = list(ydates)
yvalues = map(lambda x:x[1],yvalues)
plotlabel = u"Membership Income over time"
plt.plot(ydates, yvalues, '^g',linewidth=2, markevery=1)
plt.ylabel("Euro")
plt.xlabel("Month")
plt.grid(True)
plt.title(plotlabel)
## label with +x-y members per month
membersinmonth = dict(membertimedata)
#print "\n".join([ "%s:%s" % (x[0],str(x[1])) for x in extractPlusMinusFromMemberships(membership).items()])
pm_dates, pm_fee_dates = extractPlusMinusFromMemberships(membership)
for astrdate, tpl in pm_fee_dates.items():
adate = datetime.datetime.strptime(astrdate, dateformat_monthonly_).date()- dateutil.relativedelta.relativedelta(months=1)
assert(adate.day==1)
if adate in membersinmonth:
plt.vlines(adate + dateutil.relativedelta.relativedelta(days=11),tpl.minus+membersinmonth[adate][1],tpl.plus+membersinmonth[adate][1])
plt.hlines(membersinmonth[adate][1], adate + dateutil.relativedelta.relativedelta(days=10),adate + dateutil.relativedelta.relativedelta(days=12))
xstart,xend = plt.xlim()
locs, labels = plt.xticks(np.arange(xstart,xend,61))
plt.gca().xaxis.set_major_formatter(plt.matplotlib.dates.DateFormatter(dateformat_monthonly_, tz=None))
plt.setp(labels, rotation=80)
plt.subplots_adjust(left=0.06, bottom=0.08, right=0.99, top=0.95)
def graphMembersOverTime(membertimedata, memberinfos, membership):
ydates, yvalues = zip(*membertimedata)
ydates = list(ydates)
yvalues = map(lambda x:x[0],yvalues)
plotlabel = u"Number of members over time"
plt.plot(ydates, yvalues, '^',linewidth=2, markevery=1)
plt.ylabel("Members")
plt.ylim(ymin=0)
plt.xlabel("Month")
plt.grid(True)
plt.title(plotlabel)
## label with +x-y members per month
membersinmonth = dict(membertimedata)
#print "\n".join([ "%s:%s" % (x[0],str(x[1])) for x in extractPlusMinusFromMemberships(membership).items()])
pm_dates, pm_fee_dates = extractPlusMinusFromMemberships(membership)
for astrdate, tpl in pm_dates.items():
adate = datetime.datetime.strptime(astrdate, dateformat_monthonly_).date()- dateutil.relativedelta.relativedelta(months=1)
assert(adate.day==1)
if adate in membersinmonth:
plt.vlines(adate + dateutil.relativedelta.relativedelta(days=11),tpl.minus+membersinmonth[adate][0],tpl.plus+membersinmonth[adate][0])
plt.hlines(membersinmonth[adate][0], adate + dateutil.relativedelta.relativedelta(days=10),adate + dateutil.relativedelta.relativedelta(days=12))
plt.subplots_adjust(left=0.06, bottom=0.05, right=0.99, top=0.95)
def graphMembershipdurationsPerPersonOverTime(membership, memberinfos):
colorslist = list(colorslist_)
membercolor = defaultdict(lambda: colorslist.pop(0))
plotlabel = u"Members over Time"
legendhandles={}
duration = None
membernames_inorder = [None]*(max(memberinfos.keys())+1)
for p_id, fmonth, lmonth, m_fee in membership:
if fmonth > datetime.date.today():
continue
xpos = fmonth
if lmonth is None:
duration = getNextMonth() - fmonth
else:
duration = lmonth - fmonth
legendhandles[memberinfos[p_id][0]] = plt.barh([p_id],[duration.days],left=xpos,color=membercolor[p_id])
membernames_inorder[p_id] = memberinfos[p_id][0]
plt.ylabel("Member")
plt.xlabel("Month")
## set xaxis maximum to 2 months from today, so we have some space between yaxis and bars
plt.xlim(xmax=datetime.date.today() + dateutil.relativedelta.relativedelta(months=2))
## fill the yaxis description with the membernames
plt.yticks(memberinfos.keys(),[nick for nick, name in memberinfos.values()])
## put yaxis labelin on the right as well as on the left
plt.tick_params(labelright=True)
## show a grid so we can more easily connect bars to membernames
plt.grid(True)
plt.title(plotlabel)
## show dates on xaxis in 61 day intervals
xstart,xend = plt.xlim()
locs, labels = plt.xticks(np.arange(xstart,xend,61))
## format dates as %Y-%m
plt.gca().xaxis.set_major_formatter(plt.matplotlib.dates.DateFormatter(dateformat_monthonly_, tz=None))
## rotate dates on xaxis for better fit
plt.setp(labels, rotation=80)
## make graph use more space (good if windows is maximized)
plt.subplots_adjust(left=0.08, bottom=0.08, right=0.92, top=0.95)
def graphMembershipdurationsPerPersonOverTime2(membership, memberinfos):
plotlabel = u"Members over Time"
colorslist = list(colorslist_)
membercolor = defaultdict(lambda: colorslist.pop(0))
membership2 = defaultdict(list)
for p_id, fmonth, lmonth in membership:
duration = None
if lmonth is None:
duration = getNextMonth() - fmonth
else:
duration = lmonth - fmonth
membership2[p_id].append((fmonth,))
for p_id, xrng in membership2.items():
broken_barh(self, xrng, [(p_id,1)], label=memberinfos[p_id][0],color=membercolor[p_id])
plt.ylabel("Member")
plt.xlabel("Month")
plt.legend(legendhandles.values(),legendhandles.keys(), loc='upper left')
plt.title(plotlabel)
def graphUnaccountedMoney(ucmoney):
plotlabel = u"Unaccounted-for money in cash register"
plus_ucmoney = filter(lambda (x,y): y>=0,ucmoney)
minus_ucmoney = filter(lambda (x,y): y<0,ucmoney)
plus_x,plus_y = zip(*plus_ucmoney)
minus_x,minus_y = zip(*minus_ucmoney)
#plt.stem(x,y)
plt.bar(plus_x,plus_y,width=15,color="OliveDrab",edgecolor="k")
plt.bar(minus_x,minus_y,width=15,color="Crimson",edgecolor="k")
plt.ylabel("% EUR of expected income")
plt.xlabel("Date")
plt.grid(True)
## draw a line at y=0 (i.e. xaxis line)
plt.axhline(0, color='black')
plt.title(plotlabel)
### cvs reader workaround, see python CSV module documentation
def unicode_csv_reader(unicode_csv_data, dialect=csv.excel, **kwargs):
# csv.py doesn't do Unicode; encode temporarily as UTF-8:
csv_reader = csv.reader(utf_8_encoder(unicode_csv_data),
dialect=dialect, **kwargs)
for row in csv_reader:
# decode UTF-8 back to Unicode, cell by cell:
yield [unicode(cell, 'utf-8') for cell in row]
### cvs reader workaround, see python CSV module documentation
def utf_8_encoder(unicode_csv_data):
for line in unicode_csv_data:
yield line.encode('utf-8')
### query hledger in 'register' mode with given parameters and makes hledger output in csv-format
### the csv output is then being parsed and inserted into
### @return dict[accountname : string][day/week/month/quarter : datetime.time] = amount : float
def getHLedger(hledger_filter):
assert(isinstance(hledger_filter,list))
stdout = subprocess.Popen(['/home/bernhard/.cabal/bin/hledger', '-f', hledger_ledgerpath_,"register", '-O', 'csv','-o','-'] + hledger_filter, stdout=subprocess.PIPE).communicate()[0]
## python asumes subprocess.PIPE i.e. stdout is ascii encoded
## thus a hack is required to make csv.reader process it in utf-8.
## first we need to convert it to unicode() type and then
## each line has to be converted back to a utf-8 string for cvs.reader()
csvfile = unicode_csv_reader(codecs.decode(stdout,"utf-8").split(u"\n"),delimiter=',', quotechar='"')
rv = defaultdict(dict)
for row in csvfile:
if len(row) != 5 or row[0] == "date":
continue
(date,description,account,amtWcurrency,balance) = row
date = datetime.datetime.strptime(date,dateformat_hledger_csvexport_).date()
amtstr, currency = amtWcurrency.split(" ")
amount = float(amtstr.replace(",",""))
rv[account][date] = amount
return rv
def getHBarBottoms(bottom_dict_min, bottom_dict_max, dates, values):
return map(lambda (dat, val): bottom_dict_max[dat] if val >= 0 else bottom_dict_min[dat], zip(dates,values))
def plotMonthlyExpenses(monthly_register_dict):
plotlabel = u"Monthly Expenses"
colorslist = list(colorslist_)
acctcolor = defaultdict(lambda: colorslist.pop(0))
legend_barrefs = []
legend_accts = []
bottom_dict_max = defaultdict(int)
bottom_dict_min = defaultdict(int)
width=20
for acct, date_amt_dict in monthly_register_dict.items():
dates, amts = zip(*sorted(date_amt_dict.items()))
legend_barrefs.append( plt.bar(dates, amts, width, color=acctcolor[acct], bottom=getHBarBottoms(bottom_dict_min, bottom_dict_max, dates, amts)) )
legend_accts.append(acct)
for date, amt in date_amt_dict.items():
bottom_dict_max[date] = max(bottom_dict_max[date], bottom_dict_max[date] + amt)
bottom_dict_min[date] = min(bottom_dict_min[date], bottom_dict_max[date] + amt)
plt.ylabel("EUROs")
plt.xlabel("Date")
plt.grid(True)
plt.title(plotlabel)
## show dates on xaxis in 61 day intervals
xstart,xend = plt.xlim()
locs, labels = plt.xticks(np.arange(xstart,xend,61))
## format dates as %Y-%m
plt.gca().xaxis.set_major_formatter(plt.matplotlib.dates.DateFormatter(dateformat_monthonly_, tz=None))
## rotate dates on xaxis for better fit
plt.setp(labels, rotation=80)
## display legend in given corner
plt.legend( legend_barrefs, legend_accts ,loc='upper left')
## make graph use more space (good if windows is maximized)
plt.subplots_adjust(left=0.05, bottom=0.08, right=0.98, top=0.95)
def plotQuaterlyOtherExpenses(register_dict):
plotlabel = u"Other Quaterly Expenses"
colorslist = list(colorslist_)
acctcolor = defaultdict(lambda: colorslist.pop(0))
legend_barrefs = []
legend_accts = []
bottom_dict_max = defaultdict(int)
bottom_dict_min = defaultdict(int)
width=20
for acct, date_amt_dict in register_dict.items():
dates, amts = zip(*sorted(date_amt_dict.items()))
legend_barrefs.append( plt.bar(dates, amts, width, color=acctcolor[acct], bottom=getHBarBottoms(bottom_dict_min, bottom_dict_max, dates, amts)) )
legend_accts.append(acct)
for date, amt in date_amt_dict.items():
bottom_dict_max[date] = max(bottom_dict_max[date], bottom_dict_max[date] + amt)
bottom_dict_min[date] = min(bottom_dict_min[date], bottom_dict_max[date] + amt)
plt.ylabel("EUROs")
plt.xlabel("Date")
plt.grid(True)
plt.title(plotlabel)
xstart,xend = plt.xlim()
locs, labels = plt.xticks(np.arange(xstart,xend,30.5*3))
plt.gca().xaxis.set_major_formatter(plt.matplotlib.dates.DateFormatter(dateformat_monthonly_, tz=None))
plt.setp(labels, rotation=80)
plt.legend( legend_barrefs, legend_accts ,loc='lower left')
plt.subplots_adjust(left=0.05, bottom=0.08, right=0.98, top=0.95)
con = lite.connect(sqlite_db_)
membertimedata = getMembersTimeData(con)
memberships = getMemberships(con)
memberinfos=getMemberInfos(con)
con.close()
graphMembersOverTime(membertimedata,memberinfos,memberships)
plt.figure()
graphMembershipIncomeOverTime(membertimedata,memberinfos,memberships)
plt.figure()
graphMembersOverTimeWithPlusMinusText(membertimedata,memberinfos,memberships)
plt.figure()
graphMembershipdurationsPerPersonOverTime(memberships,memberinfos)
# plt.figure()
# graphMembershipdurationsPerPersonOverTime2(memberships,memberinfos)
plt.figure()
graphUnaccountedMoney(unaccounted_money_fraction)
plt.figure()
plotMonthlyExpenses(getHLedger(["-M","acct:expenses:room","acct:expenses:bank","acct:expenses:internet-domain","acct:expenses:taxes","date:from 2010/01/01"]))
plt.figure()
plotQuaterlyOtherExpenses(getHLedger(["-Q","acct:expenses:---.+---","acct:expenses:projects","acct:expenses:disposal","date:from 2013/03/01"]))
plt.figure()
plotQuaterlyOtherExpenses(getHLedger(["-Q","--depth=1","acct:expenses","acct:revenue","not:acct:expenses:hirepurchase:lasercutter1","date:from 2013/03/01 to "+datetime.date.today().strftime("%Y/%m/19")]))
plt.show()
| agpl-3.0 |
bmazin/ARCONS-pipeline | legacy/arcons_control/lib/make_image_v2.py | 2 | 5473 | # make_image.py
# 05/30/11 version 2 updated to make image as numpy array and return mplib figure to arcons quicklook
#from data2ascii import unpack_data
from PIL import Image
from PIL import ImageDraw
from numpy import *
import matplotlib
from matplotlib.pyplot import plot, figure, show, rc, grid
import matplotlib.pyplot as plt
#will actually need intermediate work to unpack these arrays from file and pass them in
def make_image(photon_count, median_energy, color_on = True, white_pixels = .10):
'''
Updated from 08/31/10 version. Image generation will happen on GUI machine now. organize_data
will be run on SDR to pass over binary file with arrays of each pixels photon count and median energy.
Those arrays will be unpacked in GUI image generation thread, combined into cumulative arrays if we
are doing an observation, then passes arrays of photon counts and energies to make_image
'''
array_rows = 32
array_cols = 32
total_pixels = array_rows * array_cols
print "Generating image"
im = Image.new("RGB",(array_cols,array_rows))
draw = ImageDraw.ImageDraw(im)
#to get better v gradient we want to saturate brightest 10% of pixels
#make histogram out of the lengths of each pixel. Histogram peak will be at the low end
#as most pixels will be dark, thus having small "lengths" for their photon lists.
hist_counts, hist_bins = histogram(photon_count, bins=100)
brightest_pixels = 0
nbrightestcounts = 0.0
q=1
#starting at the high end of the histogram (bins containing the pixels with the most photons),
#count backwards until we get to the 5th brightest, then set that to maximum v value.
#Thus the few brighter pixels will be saturated, and the rest will be scaled to this
#5th brightest pixel.
ncounts = float(sum(photon_count))
#print "ncounts ", ncounts
cdf = array(cumsum(hist_counts*hist_bins[:-1]),dtype = float32)
#print cdf
idx = (where(cdf > (ncounts*(1.0-white_pixels))))[0][0] #where cdf has 1-white_pixels percent of max number of counts
#print idx
vmax = hist_bins[idx]
#while float(nbrightestcounts/float(ncounts)) <= white_pixels:
#brightest_pixels += hist_bins[-q]
#nbrightestcounts += hist_counts[-q]
#q+=1
#if vmax == 0: #if vmax = 0 then no pixels are illuminated
#while vmax ==0: #check through brightest pixels until one is found
#q -= 1
#vmax = pixel_hist[1][-q]
for m in range(total_pixels):
try:
if median_energy[m] >= 3.1:
hue= 300
elif median_energy[m] <= 1.9:
hue= 0
else:
hue = int(((median_energy[m]-1.9)/(3.1-1.9))*300)
except ValueError:
hue = 150 #if median energy is NaN, that pixel has no photons, so set hue to green and v will be 0
#normalize number of photons in that pixel by vmax, then *80 to give brightness
try:
v = int((photon_count[m]/vmax)*80)
if v < 0:
v=0 #after sky subtraction we may get negative counts for some pixels
except ValueError:
v=0 #if v is NaN set v to 0
if color_on == True:
s=v #scale saturation with v so brightest pixels show most color, dimmer show less color
else:
s=0 #make image black and white if color is turned off
colorstring = "hsl(%i,%i%%,%i%%)" %(hue,s,v)
imx = m%(array_cols)
#to flip image vertically use: imy = m/array_cols
imy = (array_rows - 1) - m/(array_cols)
draw.point((imx,imy),colorstring)
return im
#10/5/10 added main portion so single binary data file can be turned into an image
if __name__ == "__main__":
file = raw_input("enter binary data file name: ")
newpixel, newtime, newenergy = unpack_data(file)
imagefile = raw_input("enter image file name to save data to: ")
obs = len(newenergy)
print "creating list of each pixel's photons"
each_pixels_photons = []
lengths = []
#generate empty list for pixels to have photons dumped into
for j in range(1024):
each_pixels_photons.append([])
#search through data and place energies in right pixels
for k in range(obs):
each_pixels_photons[newpixel[k]].append(newenergy[k])
for l in range(1024):
lengths.append(len(each_pixels_photons[l]))
print "Generating image"
im = Image.new("RGB",(32,32))
draw = ImageDraw.ImageDraw(im)
#to get better v distribution we want to saturate brightest 0.5% of pixels
pixel_hist = histogram(lengths, bins=100)
photon_sum=0
q=1
while photon_sum <=4:
photon_sum += pixel_hist[0][-q]
q+=1
vmax = pixel_hist[1][-q]
for m in range(1024):
#normalize pixel's ave energy by max of 5, then multiply by 300 to give hue value between 0 and 300
median_energy = median(each_pixels_photons[m])
try:
if median_energy >= 3.1:
hue= 300
elif median_energy <= 1.9:
hue= 0
else:
hue = int(((median_energy-1.9)/(3.1-1.9))*300)
except ValueError:
hue = 150 #if median energy is NaN, that pixel has no photons, so set hue to green and v will be 0
#normalize number of photons in that pixel by vmax, then *80 to give brightness
try:
v = (len(each_pixels_photons[m])/vmax)*80
except ValueError:
v=0 #if v is NaN set v to 0
s=v #scale saturation with v so brightest pixels show most color, dimmer show less color
colorstring = "hsl(%i,%i%%,%i%%)" %(hue,s,v)
imx = m%(32)
#switch between two lines below to flip array vertically
#imy = m/array_cols
imy = (31) - m/(32)
#imy = m/(32)
draw.point((imx,imy),colorstring)
im.show()
| gpl-2.0 |
glennq/scikit-learn | doc/tutorial/text_analytics/skeletons/exercise_02_sentiment.py | 157 | 2409 | """Build a sentiment analysis / polarity model
Sentiment analysis can be casted as a binary text classification problem,
that is fitting a linear classifier on features extracted from the text
of the user messages so as to guess wether the opinion of the author is
positive or negative.
In this examples we will use a movie review dataset.
"""
# Author: Olivier Grisel <[email protected]>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import LinearSVC
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
from sklearn.datasets import load_files
from sklearn.model_selection import train_test_split
from sklearn import metrics
if __name__ == "__main__":
# NOTE: we put the following in a 'if __name__ == "__main__"' protected
# block to be able to use a multi-core grid search that also works under
# Windows, see: http://docs.python.org/library/multiprocessing.html#windows
# The multiprocessing module is used as the backend of joblib.Parallel
# that is used when n_jobs != 1 in GridSearchCV
# the training data folder must be passed as first argument
movie_reviews_data_folder = sys.argv[1]
dataset = load_files(movie_reviews_data_folder, shuffle=False)
print("n_samples: %d" % len(dataset.data))
# split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.25, random_state=None)
# TASK: Build a vectorizer / classifier pipeline that filters out tokens
# that are too rare or too frequent
# TASK: Build a grid search to find out whether unigrams or bigrams are
# more useful.
# Fit the pipeline on the training set using grid search for the parameters
# TASK: print the cross-validated scores for the each parameters set
# explored by the grid search
# TASK: Predict the outcome on the testing set and store it in a variable
# named y_predicted
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Print and plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
# import matplotlib.pyplot as plt
# plt.matshow(cm)
# plt.show()
| bsd-3-clause |
nickgentoo/scikit-learn-graph | scripts/cross_validation_NEUROCOMPUTING16.py | 1 | 6331 | import sys, os
from math import sqrt
from copy import copy
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '',''))
import numpy as np
#from skgraph import datasets
from sklearn import svm
#from skgraph.ioskgraph import *
from math import sqrt, ceil
import sys
#"sys.path.append('..\\..\\Multiple Kernel Learning\\Framework')"
if len(sys.argv)<4:
sys.exit("python cross_validation_from_matrix_norm.py inputMatrix.libsvm C outfile MCit")
c=float(sys.argv[2])
MC=int(sys.argv[4])
##TODO read from libsvm format
from sklearn.datasets import load_svmlight_file
#TODO metodo + veloce per caricar ele amtrici (anche per fare dump)
#from svmlight_loader import load_svmlight_file # riga 22 non serve
km, target_array = load_svmlight_file(sys.argv[1])
#print type(target_array)
#print target_array
#Controlla se target array ha +1 e -1! se ha 0, sostituisco gli 0 ai -1
if not -1 in target_array:
print "WARNING: no -1 in target array! Changing 0s to -1s"
target_array = np.array([-1 if x == 0 else x for x in target_array])
#print km
#tolgo indice
##############kmgood=km[:,1:].todense()
gram=km[:,1:].todense()
#NORMALIZATION
#for i in xrange(len(target_array)):
# for j in xrange(0,len(target_array)):
# #print i,j,kmgood[i,j],kmgood[i,i],kmgood[j,j]
# if kmgood[i,i]*kmgood[j,j]==0:
# print "WARNING: avoided divizion by zero"
# gram[i,j]=0
# else:
# gram[i,j]=kmgood[i,j]/sqrt(kmgood[i,i]*kmgood[j,j])
#-----------------------------------
#print gram
#from sklearn.metrics import make_scorer
# (16) in the paper
def my_custom_loss_func(ground_truth, predictions):
total_loss=0.0
for gt,p in zip(ground_truth, predictions):
#print gt, p
diff = (1.0 - (gt * p)) / 2.0
if diff<0:
diff=0.0
if diff > 1.0:
diff=1.0
total_loss+=diff
return total_loss / len(predictions)
X=range(len(gram))
from sklearn import cross_validation
#for rs in range(42,43):
#for rs in range(42,53):
f=open(str(sys.argv[3]+".c"+str(c)),'w')
#NEW CODE FOR SUBSAMPLING
#REPEAT N TIMES
#gram=km[:,1:].todense()
f.write("Total examples "+str(len(gram))+"\n")
f.write("# \t Stability_MCsample\n")
from sklearn.cross_validation import train_test_split
Complexity=0.0
Wtot=0.0
for MCit in xrange(MC):
#print gram.shape
print("number of examples "+str(ceil(sqrt(gram.shape[0]))))
radn=int(ceil(sqrt(gram.shape[0])))
#print radn
y_train=[]
#continue only if both class labels are in the set
rand=MCit
while (len(np.unique(y_train))<2):
train_index, test_index, y_train, y_test = train_test_split(X, target_array, train_size=(radn-1),test_size=1, random_state=rand)
rand=MCit+MC
#print train_index, test_index, y_train.shape, y_test.shape
#At this point, X_train, X_test are the list of indices to consider in training/test
sc=[]
#print("TRAIN:", train_index, "TEST:", test_index)
#generated train and test lists, incuding indices of the examples in training/test
#for the specific fold. Indices starts from 0 now
total_index=copy(train_index)
total_index.extend(test_index)
#print total_index
#print y_train.tolist(),y_test.tolist()
temp=copy(y_train.tolist())
temp.extend(y_test.tolist())
y_total=np.array(temp)
#y_total.extend(y_test)
clf = svm.SVC(C=c, kernel='precomputed',max_iter=10000000)
clf1 = svm.SVC(C=c, kernel='precomputed',max_iter=10000000)
train_gram = [] #[[] for x in xrange(0,len(train))]
test_gram = []# [[] for x in xrange(0,len(test))]
total_gram = []
#compute training and test sub-matrices
index=-1
for row in gram:
index+=1
if index in train_index:
train_gram.append([gram[index,i] for i in train_index])
elif index in test_index:
test_gram.append([gram[index,i] for i in train_index])
if index in total_index:
total_gram.append([gram[index,i] for i in total_index])
#if not in training nor test, just discard the row
#print gram
#X_train, X_test, y_train, y_test = np.array(train_gram), np.array(test_gram), target_array[train_index], target_array[test_index]
X_train, X_test = np.array(train_gram), np.array(test_gram)
X_total=np.array(total_gram)
print("Fitting first SVM(training only)")
clf.fit(X_train, y_train)
print("Fitting second SVM(training+test)")
clf1.fit(X_total,y_total)
print("Training done")
#commented code to compute |W|
#print |W|^2= alpha Q alpha, where Q_ij= y_i y_j K(x_i,x_j)
alpha = clf1.dual_coef_
yw=target_array[clf1.support_]
Kw=gram[clf1.support_,:][:,clf1.support_]
#print yw.shape, Kw.shape, gram.shape
yw.shape=(yw.shape[0],1)
YM=np.ones(yw.shape[0])*yw.T
Q= np.multiply(np.multiply(YM,Kw),YM.T)
#print Q.shape
#print alpha.shape
#alpha.shape=(alpha.shape[1],1)
W2=alpha*Q*alpha.T
print "|W|" , sqrt(W2),
#f.write("|W| "+str(sqrt(W2))+"\n")
Wtot+=float(W2)
#-------------------------
#loss = make_scorer(my_custom_loss_func, greater_is_better=False)
#from sklearn.metrics import accuracy_score
#predictions on training set
y_test_predicted=clf.decision_function(X_test)
#print type( my_custom_loss_func(y_train, y_train_predicted))
# predict on test examples
loss_training=my_custom_loss_func(y_test, y_test_predicted)
y_test_predicted_total=clf1.predict(X_total)
loss_total=my_custom_loss_func(y_test, y_test_predicted_total)
Complexity+=abs(loss_training-loss_total)
print "Complexity", abs(loss_training-loss_total)
f.write(str(MCit)+" "+str(abs(loss_training-loss_total))+"\n")
#print y_test.shape, y_test_predicted.shape
#print y_test
#print y_test_predicted_binary
#print "Accuracy: ", accuracy_score(y_test, y_test_predicted_binary)
#y_test_sign=map(np.sign, y_test_predicted)
#print "Accuracy_decision: ", accuracy_score(y_test, y_test_sign)
Complexity/=MC
Wtot/=MC
f.write(str(abs(loss_training-loss_total))+"\n")
f.write("Stability with "+str(MC)+" MonteCarlo samples: "+str(float(Complexity))+" Wmax "+str(Wtot)+"\n")
print "Stability with", MC, "MonteCarlo samples:", Complexity,"Wmax", str(Wtot)
f.close()
| gpl-3.0 |
RomainBrault/scikit-learn | examples/plot_digits_pipe.py | 65 | 1652 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Pipelining: chaining a PCA and a logistic regression
=========================================================
The PCA does an unsupervised dimensionality reduction, while the logistic
regression does the prediction.
We use a GridSearchCV to set the dimensionality of the PCA
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, decomposition, datasets
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
logistic = linear_model.LogisticRegression()
pca = decomposition.PCA()
pipe = Pipeline(steps=[('pca', pca), ('logistic', logistic)])
digits = datasets.load_digits()
X_digits = digits.data
y_digits = digits.target
# Plot the PCA spectrum
pca.fit(X_digits)
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.axes([.2, .2, .7, .7])
plt.plot(pca.explained_variance_, linewidth=2)
plt.axis('tight')
plt.xlabel('n_components')
plt.ylabel('explained_variance_')
# Prediction
n_components = [20, 40, 64]
Cs = np.logspace(-4, 4, 3)
# Parameters of pipelines can be set using ‘__’ separated parameter names:
estimator = GridSearchCV(pipe,
dict(pca__n_components=n_components,
logistic__C=Cs))
estimator.fit(X_digits, y_digits)
plt.axvline(estimator.best_estimator_.named_steps['pca'].n_components,
linestyle=':', label='n_components chosen')
plt.legend(prop=dict(size=12))
plt.show()
| bsd-3-clause |
jasanmiguel10/HackEvent | heartpoo/predict/predict_heartpoo.py | 1 | 1495 | # -*- coding: utf-8 -*-
import pandas as pd
import sys
import re
from keras.models import load_model
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.preprocessing.text import text_to_word_sequence
from keras.layers import Embedding
from unidecode import unidecode
from keras.models import Model
import numpy as np
from keras.layers import Dense, Input, Flatten
import pickle
MAX_NB_WORDS = 20000
tokenizer_pickle = '../model/default_tokenizer_5.pickle'
model_cnn = '../model/default_5.h5'
txt = sys.argv[1]
print('%%%')
print(len(txt))
txt = txt.strip()
txt_sq = [txt]
#Still needs to get clean pre-process data
#txt = _pattern.sub(r'',txt)
#txt = re.sub("&.*?;","",txt)
#txt = re.sub("http.*?\s","",txt)
#txt = re.sub("@.*?\s","",txt)
#Test_data
#input_1 = 'word is you use roids, stupid hypocrite lying faggots.'
#input_2 = 'some species of birds have been known to hold funerals for their deceased.'
with open(tokenizer_pickle, "rb") as f:
tokenizer = pickle.load(f)
#seq_c = text_to_word_sequence(txt,filters='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n',lower=True,split=" ")
#test = sequence_to_matrix(seq_c)
sequences = tokenizer.texts_to_sequences(txt_sq)
query = pad_sequences(sequences, maxlen=1000)
model_ = load_model(model_cnn)
predicted_labels = []
prediction = model_.predict(query)
print (prediction)
for item in prediction:
predicted_labels.append(np.argmax(item))
print predicted_labels
| mit |
kthyng/octant | octant/sandbox/googleearth.py | 4 | 8875 | #!/usr/bin/env python
# encoding: utf-8
"""
geo_anim.py
Created by Rob Hetland on 2008-01-14.
Copyright (c) 2008 Texas A&M Univsersity. All rights reserved.
"""
import matplotlib
matplotlib.use('Agg')
from numpy import *
from matplotlib.pyplot import *
import pylab
import zipfile
import octant
import os
kml_preamble = '''<?xml version="1.0" encoding="UTF-8"?>
<kml xmlns="http://earth.google.com/kml/2.1">
<Document>
<name>Time Animation Test</name>
<open>1</open>
<description>
by Rob Hetland
</description>
<Folder>
<name>Frames</name>
'''
kml_frame = ''' <GroundOverlay>
<TimeSpan>
<begin>__TIMEBEGIN__</begin>
<end>__TIMEEND__</end>
</TimeSpan>
<color>__COLOR__</color>
<Icon>
<href>__FRAME__</href>
</Icon>
<LatLonBox>
<north>__NORTH__</north>
<south>__SOUTH__</south>
<east> __EAST__</east>
<west> __WEST__</west>
</LatLonBox>
</GroundOverlay>
'''
kml_legend = '''<ScreenOverlay>
<name>Legend</name>
<Icon>
<href>legend.png</href>
</Icon>
<overlayXY x="0" y="0" xunits="fraction" yunits="fraction"/>
<screenXY x="0.015" y="0.075" xunits="fraction" yunits="fraction"/>
<rotationXY x="0.5" y="0.5" xunits="fraction" yunits="fraction"/>
<size x="0" y="0" xunits="pixels" yunits="pixels"/>
</ScreenOverlay>
'''
kml_closing = ''' </Folder>
</Document>
</kml>
'''
def kmz_anim(lon, lat, time, prop, **kwargs):
lon = asarray(lon)
lat = asarray(lat)
jd = pylab.date2num(time)
jd_edges = hstack((1.5*jd[0]-0.5*jd[1],
0.5*(jd[1:]+jd[:-1]),
1.5*jd[-1]-0.5*jd[-2]))
time_edges = pylab.num2date(jd_edges)
time_starts = time_edges[:-1]
time_stops = time_edges[1:]
name = kwargs.pop('name', 'overlay')
color = kwargs.pop('color', '9effffff')
visibility = str( kwargs.pop('visibility', 1) )
kmzfile = kwargs.pop('kmzfile', 'overlay.kmz')
pixels = kwargs.pop('pixels', 300) # pixels of the max. dimension
units = kwargs.pop('units', '')
vmax = kwargs.pop('vmax', prop.max())
kwargs['vmax'] = vmax
vmin = kwargs.pop('vmin', prop.min())
kwargs['vmin'] = vmin
geo_aspect = cos(lat.mean()*pi/180.0)
xsize = lon.ptp()*geo_aspect
ysize = lat.ptp()
aspect = ysize/xsize
if aspect > 1.0:
figsize = (10.0/aspect, 10.0)
else:
figsize = (10.0, 10.0*aspect)
kml_text = kml_preamble
ioff()
fig = figure(figsize=figsize, dpi=pixels//10, facecolor=None, frameon=False)
ax = fig.add_axes([0, 0, 1, 1])
f = zipfile.ZipFile(kmzfile, 'w')
for frame in range(prop.shape[0]):
tstart = time_starts[frame]
tstop = time_stops[frame]
print 'Writing frame ', frame, tstart.isoformat(), tstop.isoformat()
ax.cla()
pc = ax.pcolor(lon, lat, prop[frame], **kwargs)
ax.set_xlim(lon.min(), lon.max())
ax.set_ylim(lat.min(), lat.max())
ax.set_axis_off()
icon = 'overlay_%d.png' % frame
savefig(icon)
kml_text += kml_frame.replace('__NAME__', name)\
.replace('__COLOR__', color)\
.replace('__VISIBILITY__', visibility)\
.replace('__SOUTH__', str(lat.min()))\
.replace('__NORTH__', str(lat.max()))\
.replace('__EAST__', str(lon.max()))\
.replace('__WEST__', str(lon.min()))\
.replace('__FRAME__', icon)\
.replace('__TIMEBEGIN__', tstart.isoformat())\
.replace('__TIMEEND__', tstop.isoformat())
f.write(icon)
os.remove(icon)
# legend
fig = figure(figsize=(1.0, 4.0), facecolor=None, frameon=False)
cax = fig.add_axes([0.0, 0.05, 0.2, 0.90])
cb = colorbar(pc, cax=cax)
cb.set_label(units, color='0.9')
for lab in cb.ax.get_yticklabels():
setp(lab, 'color', '0.9')
savefig('legend.png')
f.write('legend.png')
os.remove('legend.png')
kml_text += kml_legend
kml_text += kml_closing
f.writestr('overlay.kml', kml_text)
f.close()
if __name__ == '__main__':
ncll = octant.io.Dataset('/Users/rob/Archive/GWB/bodden/latlon.nc')
nc = octant.io.Dataset('/Users/rob/Archive/GWB/bodden/bsh_elev_2001-10.nc')
lat = ncll.variables['lat'][:]
lon = ncll.variables['lon'][:]
lon, lat = meshgrid(lon, lat)
time = octant.ocean_time(nc, name='time')[:200:4]
propname = 'elev'
prop = nc.variables[propname][:200:4]
mask = prop == nc.variables[propname].missing_value
prop = ma.masked_where(mask, prop)
kmz_anim(lon, lat, time.dates, prop, kmzfile='bsh_anim.kmz',
name='BSH model -- sea surface height', units='sea surface height [m]')
kml_groundoverlay = '''<?xml version="1.0" encoding="UTF-8"?>
<kml xmlns="http://earth.google.com/kml/2.0">
<Document>
<GroundOverlay>
<name>__NAME__</name>
<color>__COLOR__</color>
<visibility>__VISIBILITY__</visibility>
<Icon>
<href>overlay.png</href>
</Icon>
<LatLonBox>
<south>__SOUTH__</south>
<north>__NORTH__</north>
<west>__WEST__</west>
<east>__EAST__</east>
</LatLonBox>
</GroundOverlay>
<ScreenOverlay>
<name>Legend</name>
<Icon>
<href>legend.png</href>
</Icon>
<overlayXY x="0" y="0" xunits="fraction" yunits="fraction"/>
<screenXY x="0.015" y="0.075" xunits="fraction" yunits="fraction"/>
<rotationXY x="0.5" y="0.5" xunits="fraction" yunits="fraction"/>
<size x="0" y="0" xunits="pixels" yunits="pixels"/>
</ScreenOverlay>
</Document>
</kml>
'''
def geo_pcolor(lon, lat, prop, **kwargs):
"""docstring for geo_pcolor"""
name = kwargs.pop('name', 'overlay')
color = kwargs.pop('color', '9effffff')
visibility = str( kwargs.pop('visibility', 1) )
kmzfile = kwargs.pop('kmzfile', 'overlay.kmz')
pixels = kwargs.pop('pixels', 1024) # pixels of the max. dimension
units = kwargs.pop('units', '')
vmax = kwargs.pop('vmax', prop.max())
kwargs['vmax'] = vmax
vmin = kwargs.pop('vmin', prop.min())
kwargs['vmin'] = vmin
geo_aspect = cos(lat.mean()*pi/180.0)
xsize = lon.ptp()*geo_aspect
ysize = lat.ptp()
aspect = ysize/xsize
if aspect > 1.0:
figsize = (10.0/aspect, 10.0)
else:
figsize = (10.0, 10.0*aspect)
ioff()
fig = figure(figsize=figsize, facecolor=None, frameon=False, dpi=pixels//10)
ax = fig.add_axes([0, 0, 1, 1])
pc = ax.pcolor(lon, lat, prop, **kwargs)
ax.set_xlim(lon.min(), lon.max())
ax.set_ylim(lat.min(), lat.max())
ax.set_axis_off()
savefig('overlay.png')
f = zipfile.ZipFile(kmzfile, 'w')
f.writestr('overlay.kml', kml_groundoverlay.replace('__NAME__', name)\
.replace('__COLOR__', color)\
.replace('__VISIBILITY__', visibility)\
.replace('__SOUTH__', str(lat.min()))\
.replace('__NORTH__', str(lat.max()))\
.replace('__EAST__', str(lon.max()))\
.replace('__WEST__', str(lon.min())))
f.write('overlay.png')
os.remove('overlay.png')
fig = figure(figsize=(1.0, 4.0), facecolor=None, frameon=False)
ax = fig.add_axes([0.0, 0.05, 0.2, 0.9])
cb = colorbar(pc, cax=ax)
cb.set_label(units, color='0.9')
for lab in cb.ax.get_yticklabels():
setp(lab, 'color', '0.9')
savefig('legend.png')
f.write('legend.png')
os.remove('legend.png')
f.close()
if __name__ == '__main__':
ncll = pyroms.Dataset('/Users/rob/Archive/GWB/bodden/latlon.nc')
nc = pyroms.Dataset('/Users/rob/Archive/GWB/bodden/bsh_elev_2001-10.nc')
lat = ncll.variables['lat'][:]
lon = ncll.variables['lon'][:]
lon, lat = meshgrid(lon, lat)
propname = 'elev'
prop = nc.variables[propname][-1]
mask = prop == nc.variables[propname].missing_value
prop = ma.masked_where(mask, prop)
geo_pcolor(lon, lat, prop, kmzfile='bsh.kmz', \
name='BSH model -- sea surface height',\
units='sea surface height [m]')
| bsd-3-clause |
ENSTA-Bretagne-Guerledan-BoiteNoire/ROS_BUBBLE_Project | src/bubble_simu/src/sim_world.py | 1 | 4495 | #!/usr/bin/env python
# coding=utf-8
import rospy
from geometry_msgs.msg import Twist, Point, Pose
from std_msgs.msg import Float32, Float64, Int8
#from sensor_msgs import Imu, NavSatFix
import numpy as np
import math
import matplotlib.pyplot as plt
import tf.transformations as tf
from models.Boat import Boat
from models.BlackBox import BlackBox
# 1 = Left
# 2 = Right
class world():
def __init__(self):
rospy.init_node('display_python')
print "Node initialisation"
# Subscriber
self.TL_sub = rospy.Subscriber('commandT2', Float32, self.updateTL)
self.TR_sub = rospy.Subscriber('commandT1', Float32, self.updateTR)
self.state_sub = rospy.Subscriber('cmd_state', Int8, self.updateState)
# Publisher
self.pose_pub = rospy.Publisher('pose_real', Pose, queue_size=1)
self.twist_pub = rospy.Publisher('twist_real', Twist, queue_size=1)
self.pose_blackbox = rospy.Publisher('pose_blackBox', Point, queue_size=1)
#self.imu_pub = rospy.Publisher('imu/imu', Pose, queue_size=1)
#self.gps_pub = rospy.Publisher('pose_real', Pose, queue_size=1)
self.angle_pub = rospy.Publisher('angle_ping', Float64, queue_size=1)
# Internal variable
self.dt = 0.1
self.detectedAngle = -3
print "Creating boat"
self.boat = Boat(0,0,0,0,0,0)
self.blackBox = BlackBox(10,9,10)
def updateTL(self, msg):
# print 'received TL : ',msg
self.boat.TL = msg.data
def updateTR(self, msg):
# print 'received TR : ',msg
self.boat.TR = msg.data
def updateState(self, msg):
self.boat.state = msg.data
def updateWorld(self):
coeffFrot = 10
dx,dy,dtheta,dv = self.boat.move(coeffFrot)
# print 'dx : ',dx,dy,dtheta,dv
self.boat.update(
self.boat.x + (dx )*self.dt,
self.boat.y + (dy )*self.dt,
self.boat.theta + (dtheta)*self.dt,
self.boat.v + (dv )*self.dt,
dtheta
)
self.getAngleBlackBox()
def getAngleBlackBox(self):
# Si le bateau est assez près ET que son cap est à moins de 90 degré de la boite noire
dist2BlackBox = np.sqrt((self.blackBox.x - self.boat.x)**2 + (self.blackBox.y - self.boat.y)**2)
print 'dist2BlackBox = ',dist2BlackBox
angle2BlackBox = self.angle_add(math.atan2(self.blackBox.y - self.boat.y,self.blackBox.x - self.boat.x),-self.boat.theta)
print 'angle2BlackBox = ',angle2BlackBox/np.pi*180.0,' deg'
# print '-self.boat.theta = ',-self.boat.theta,' deg'
# print 'self.angle_add(angle2BlackBox,-self.boat.theta) = ',self.angle_add(0,0)/np.pi*180.0,' deg'
# print 'np.abs(self.angle_add(angle2BlackBox,-self.boat.theta)) = ',np.abs(self.angle_add(angle2BlackBox,-self.boat.theta))/np.pi*180.0,' deg'
if dist2BlackBox<self.blackBox.range \
and np.abs(self.angle_add(angle2BlackBox,-self.boat.theta))<np.pi/2.0:
self.detectedAngle = angle2BlackBox
else:
self.detectedAngle = -3
print 'Attribute angle : ',self.detectedAngle
def angle_add(self, a1, a2):
return np.mod(( a1 + a2 + 3*np.pi), 2*np.pi) - np.pi
def spin(self):
rate = rospy.Rate(1/self.dt)
while not rospy.is_shutdown():
self.updateWorld()
# print 'Pose : ', self.boat.pose
# print 'self.boat.pose.orientation.x : ',self.boat.pose.orientation.x
# print 'self.boat.pose.orientation.y : ',self.boat.pose.orientation.y
# print 'self.boat.pose.orientation.z : ',self.boat.pose.orientation.z
# print 'self.boat.pose.orientation.w : ',self.boat.pose.orientation.w
# print 'self.boat.pose.position.x : ',self.boat.pose.position.x
# print 'self.boat.pose.position.y : ',self.boat.pose.position.y
# print 'self.boat.pose.position.z : ',self.boat.pose.position.z
self.pose_pub.publish(self.boat.pose)
self.twist_pub.publish(self.boat.twist)
if self.boat.state != 0:
print "Publishing detectedAngle : ",self.detectedAngle
self.angle_pub.publish(self.detectedAngle)
self.pose_blackbox.publish(self.blackBox.point)
rate.sleep()
if __name__ == '__main__':
print "Node created"
w = world()
print "Spinning"
w.spin()
| mit |
samuel1208/scikit-learn | examples/semi_supervised/plot_label_propagation_digits.py | 268 | 2723 | """
===================================================
Label Propagation digits: Demonstrating performance
===================================================
This example demonstrates the power of semisupervised learning by
training a Label Spreading model to classify handwritten digits
with sets of very few labels.
The handwritten digit dataset has 1797 total points. The model will
be trained using all points, but only 30 will be labeled. Results
in the form of a confusion matrix and a series of metrics over each
class will be very good.
At the end, the top 10 most uncertain predictions will be shown.
"""
print(__doc__)
# Authors: Clay Woolam <[email protected]>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn import datasets
from sklearn.semi_supervised import label_propagation
from sklearn.metrics import confusion_matrix, classification_report
digits = datasets.load_digits()
rng = np.random.RandomState(0)
indices = np.arange(len(digits.data))
rng.shuffle(indices)
X = digits.data[indices[:330]]
y = digits.target[indices[:330]]
images = digits.images[indices[:330]]
n_total_samples = len(y)
n_labeled_points = 30
indices = np.arange(n_total_samples)
unlabeled_set = indices[n_labeled_points:]
# shuffle everything around
y_train = np.copy(y)
y_train[unlabeled_set] = -1
###############################################################################
# Learn with LabelSpreading
lp_model = label_propagation.LabelSpreading(gamma=0.25, max_iter=5)
lp_model.fit(X, y_train)
predicted_labels = lp_model.transduction_[unlabeled_set]
true_labels = y[unlabeled_set]
cm = confusion_matrix(true_labels, predicted_labels, labels=lp_model.classes_)
print("Label Spreading model: %d labeled & %d unlabeled points (%d total)" %
(n_labeled_points, n_total_samples - n_labeled_points, n_total_samples))
print(classification_report(true_labels, predicted_labels))
print("Confusion matrix")
print(cm)
# calculate uncertainty values for each transduced distribution
pred_entropies = stats.distributions.entropy(lp_model.label_distributions_.T)
# pick the top 10 most uncertain labels
uncertainty_index = np.argsort(pred_entropies)[-10:]
###############################################################################
# plot
f = plt.figure(figsize=(7, 5))
for index, image_index in enumerate(uncertainty_index):
image = images[image_index]
sub = f.add_subplot(2, 5, index + 1)
sub.imshow(image, cmap=plt.cm.gray_r)
plt.xticks([])
plt.yticks([])
sub.set_title('predict: %i\ntrue: %i' % (
lp_model.transduction_[image_index], y[image_index]))
f.suptitle('Learning with small amount of labeled data')
plt.show()
| bsd-3-clause |
DHI-GRAS/processing_SWAT | MDWF_PlotResults.py | 2 | 7620 | """
***************************************************************************
MDWF_PlotResults.py
-------------------------------------
Copyright (C) 2014 TIGER-NET (www.tiger-net.org)
***************************************************************************
* This plugin is part of the Water Observation Information System (WOIS) *
* developed under the TIGER-NET project funded by the European Space *
* Agency as part of the long-term TIGER initiative aiming at promoting *
* the use of Earth Observation (EO) for improved Integrated Water *
* Resources Management (IWRM) in Africa. *
* *
* WOIS is a free software i.e. you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published *
* by the Free Software Foundation, either version 3 of the License, *
* or (at your option) any later version. *
* *
* WOIS is distributed in the hope that it will be useful, but WITHOUT ANY *
* WARRANTY; without even the implied warranty of MERCHANTABILITY or *
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License *
* for more details. *
* *
* You should have received a copy of the GNU General Public License along *
* with this program. If not, see <http://www.gnu.org/licenses/>. *
***************************************************************************
"""
import os
from PyQt4 import QtGui
from processing.core.GeoAlgorithmExecutionException import GeoAlgorithmExecutionException
from processing.core.parameters import *
from SWATAlgorithm import SWATAlgorithm
from datetime import date, timedelta, datetime
import matplotlib
matplotlib.use('Qt4Agg')
import matplotlib.pyplot as plt
import read_SWAT_out
from SWAT_output_format_specs import SWAT_output_format_specs
RES_OUTSPECS = SWAT_output_format_specs()
class MDWF_PlotResults(SWATAlgorithm):
RES_FOLDER = "RES_FOLDER"
RES_TYPE = "RES_TYPE"
RES_VAR = "RES_VAR"
REACH_ID = "REACH_ID"
SUB_ID = "SUB_ID"
HRU_ID = "HRU_ID"
RES_OBSFILE = "RES_OBSFILE"
TEMP_RES = "TEMP_RES"
def __init__(self):
super(MDWF_PlotResults, self).__init__(__file__)
def defineCharacteristics(self):
self.name = "6 - Plot Results (MDWF)"
self.group = "Model development workflow (MDWF)"
self.addParameter(ParameterFile(MDWF_PlotResults.RES_FOLDER, "Select results folder", True))
self.addParameter(ParameterSelection(MDWF_PlotResults.TEMP_RES, "Temporal resolution", ['Daily','Weekly','Monthly'], False))
self.addParameter(ParameterSelection(MDWF_PlotResults.RES_TYPE, "Type of result", RES_OUTSPECS.RESULT_TYPES, False))
param = ParameterSelection(MDWF_PlotResults.RES_VAR, "Variable", RES_OUTSPECS.RESULT_VARIABLES , False)
param.isAdvanced = False
self.addParameter(param)
param = ParameterNumber(MDWF_PlotResults.REACH_ID, "Reach ID", 1, 500, 1)
param.isAdvanced = False
self.addParameter(param)
param = ParameterNumber(MDWF_PlotResults.SUB_ID, "Sub-basin ID", 1, 500, 1)
param.isAdvanced = False
self.addParameter(param)
param = ParameterNumber(MDWF_PlotResults.HRU_ID, "HRU ID", 1, 500, 1)
param.isAdvanced = False
self.addParameter(param)
self.addParameter(ParameterFile(MDWF_PlotResults.RES_OBSFILE, "Select file with corresponding observations", False))
def processAlgorithm(self, progress):
RES_FOLDER = self.getParameterValue(MDWF_PlotResults.RES_FOLDER)
RES_TYPE = self.getParameterValue(MDWF_PlotResults.RES_TYPE)
RES_VAR = self.getParameterValue(MDWF_PlotResults.RES_VAR)
REACH_ID = self.getParameterValue(MDWF_PlotResults.REACH_ID)
SUB_ID = self.getParameterValue(MDWF_PlotResults.SUB_ID)
HRU_ID = self.getParameterValue(MDWF_PlotResults.HRU_ID)
RES_OBSFILE = self.getParameterValue(MDWF_PlotResults.RES_OBSFILE)
TEMP_RES = self.getParameterValue(MDWF_PlotResults.TEMP_RES)
if RES_TYPE == 0:
RES_UNIT = RES_OUTSPECS.REACH_UNITS[RES_VAR]
RES_VARCOL = RES_OUTSPECS.REACH_RES_COLS[RES_VAR]
RES_VAR = RES_OUTSPECS.RESULT_VARIABLES[RES_VAR]
## data = read_SWAT_out.read_SWAT_out(RES_FOLDER,RES_OUTSPECS.REACH_DELIMITER,RES_OUTSPECS.REACH_SKIPROWS,RES_OUTSPECS.REACH_OUTNAME)
data = read_SWAT_out.read_SWAT_out(RES_FOLDER,RES_OUTSPECS.REACH_SKIPROWS,RES_OUTSPECS.REACH_OUTNAME)
data_ex = read_SWAT_out.reach_SWAT_ts(data,REACH_ID,RES_VARCOL,RES_VAR)
stime = read_SWAT_out.read_SWAT_time(RES_FOLDER)
if (TEMP_RES == 2) & (stime[-1] != 0):
raise GeoAlgorithmExecutionException('According to master watershed file (file.cio) the reach output file is not printed with a monthly time step.')
else:
read_SWAT_out.reach_tsplot(stime,data_ex,REACH_ID,RES_VAR,RES_UNIT,RES_FOLDER,RES_OUTSPECS.PYEX_DATE_OFFSET,RES_OBSFILE,TEMP_RES)
elif RES_TYPE == 1:
RES_UNIT = RES_OUTSPECS.SUB_UNITS[RES_VAR]
RES_VARCOL = RES_OUTSPECS.SUB_RES_COLS[RES_VAR]
RES_VAR = RES_OUTSPECS.RESULT_VARIABLES[RES_VAR]
## data = read_SWAT_out.read_SWAT_out(RES_FOLDER,RES_OUTSPECS.SUB_DELIMITER,RES_OUTSPECS.SUB_SKIPROWS,RES_OUTSPECS.SUB_OUTNAME)
data = read_SWAT_out.read_SWAT_out(RES_FOLDER,RES_OUTSPECS.SUB_SKIPROWS,RES_OUTSPECS.SUB_OUTNAME)
data_ex = read_SWAT_out.sub_SWAT_ts(data,SUB_ID,RES_VARCOL,RES_VAR)
stime = read_SWAT_out.read_SWAT_time(RES_FOLDER)
read_SWAT_out.sub_tsplot(stime,data_ex,SUB_ID,RES_VAR,RES_UNIT,RES_FOLDER,RES_OUTSPECS.PYEX_DATE_OFFSET,RES_OBSFILE)
elif RES_TYPE == 2:
RES_UNIT = RES_OUTSPECS.HRU_UNITS[RES_VAR]
RES_VARCOL = RES_OUTSPECS.HRU_RES_COLS[RES_VAR]
RES_VAR = RES_OUTSPECS.RESULT_VARIABLES[RES_VAR]
## data = read_SWAT_out.read_SWAT_out(RES_FOLDER,RES_OUTSPECS.HRU_DELIMITER,RES_OUTSPECS.HRU_SKIPROWS,RES_OUTSPECS.HRU_OUTNAME)
data = read_SWAT_out.read_SWAT_out(RES_FOLDER,RES_OUTSPECS.HRU_SKIPROWS,RES_OUTSPECS.HRU_OUTNAME)
data_ex = read_SWAT_out.hru_SWAT_ts(data,SUB_ID,HRU_ID,RES_VARCOL,RES_VAR)
stime = read_SWAT_out.read_SWAT_time(RES_FOLDER)
read_SWAT_out.hru_tsplot(stime,data_ex,SUB_ID,HRU_ID,RES_VAR,RES_UNIT,RES_FOLDER,RES_OUTSPECS.PYEX_DATE_OFFSET,RES_OBSFILE)
elif RES_TYPE == 3:
RES_UNIT = RES_OUTSPECS.RSV_UNITS[RES_VAR]
RES_VARCOL = RES_OUTSPECS.RSV_RES_COLS[RES_VAR]
RES_VAR = RES_OUTSPECS.RESULT_VARIABLES[RES_VAR]
data = read_SWAT_out.read_SWAT_out(RES_FOLDER,RES_OUTSPECS.RSV_SKIPROWS,RES_OUTSPECS.RSV_OUTNAME)
(data_ex, RSV_ID) = read_SWAT_out.rsv_SWAT_ts(RES_FOLDER,data,SUB_ID,RES_VARCOL,RES_VAR)
stime = read_SWAT_out.read_SWAT_time(RES_FOLDER)
read_SWAT_out.rsv_tsplot(stime,data_ex,SUB_ID,RSV_ID,RES_VAR,RES_UNIT,RES_FOLDER,RES_OUTSPECS.PYEX_DATE_OFFSET,RES_OBSFILE)
else:
raise GeoAlgorithmExecutionException('Result type not supported at the moment')
| gpl-3.0 |
lisongze/SForecast | tools/k_plot.py | 1 | 3119 | import time
from math import pi
import pandas as pd
from datetime import datetime
import numpy as np
import sys, os
from bokeh.io import output_notebook
from bokeh.plotting import figure, show, output_file
from bokeh.models import ColumnDataSource, Rect, HoverTool, Range1d, LinearAxis, WheelZoomTool, PanTool, ResetTool, ResizeTool, SaveTool
#output_notebook()
output_file("kline.html")
infile = sys.argv[1]
quotes = pd.read_csv(infile, index_col=[0])
#quotes[quotes['Volume']==0]=np.nan
quotes= quotes.dropna()
openp=quotes['Open']
closep=quotes['Close']
highp=quotes['High']
lowp=quotes['Low']
volume=quotes['Volume']
money=quotes['Money']
#time=quotes.index
#date=[x.strftime("%Y-%m-%d") for x in quotes.index]
#time=quotes['Time']
print quotes.columns
time=[datetime.strptime(x, '%Y-%m-%d') for x in quotes.index]
date=[x.strftime("%Y-%m-%d") for x in time]
quotes['Date']=time
quotes['Time']=quotes.index
w = 12*60*60*1000 # half day in ms
mids = (openp + closep)/2
spans = abs(closep-openp)
inc = closep >= openp
dec = openp > closep
quotes['Mids']=mids
quotes['Spans']=spans
ht = HoverTool(tooltips=[
("date", "@Time"),
("open", "@Open"),
("close", "@Close"),
("high", "@High"),
("low", "@Low"),
("volume", "@Volume"),
("money", "@Money"),])
TOOLS = [ht, WheelZoomTool(), ResizeTool(), ResetTool(),PanTool(), SaveTool()]
max_x = max(highp)
min_x = min(lowp)
x_range = max_x - min_x
y_range = (min_x - x_range / 2.0, max_x + x_range * 0.1)
p = figure(x_axis_type="datetime", tools=TOOLS, plot_height=600, plot_width=950,toolbar_location="above", y_range=y_range)
p.xaxis.major_label_orientation = pi/4
p.grid.grid_line_alpha=0.3
p.background_fill_color = "black"
quotesdate=dict(date1=quotes['Date'],open1=openp,close1=closep,high1=highp,low1=lowp)
ColumnDataSource(quotesdate)
x_rect_inc_src =ColumnDataSource(quotes[inc])
x_rect_dec_src =ColumnDataSource(quotes[dec])
time_inc = [time[i] for i in xrange(len(inc)) if inc[i]==True]
time_dec = [time[i] for i in xrange(len(inc)) if dec[i]==True]
mids_inc = [mids[i] for i in xrange(len(inc)) if inc[i]==True]
mids_dec = [mids[i] for i in xrange(len(inc)) if dec[i]==True]
spans_inc = [spans[i] for i in xrange(len(inc)) if inc[i]==True]
spans_dec = [spans[i] for i in xrange(len(inc)) if dec[i]==True]
highp_inc = [highp[i] for i in xrange(len(inc)) if inc[i]==True]
highp_dec = [highp[i] for i in xrange(len(inc)) if dec[i]==True]
lowp_inc = [lowp[i] for i in xrange(len(inc)) if inc[i]==True]
lowp_dec = [lowp[i] for i in xrange(len(inc)) if dec[i]==True]
p.rect(x='Date', y='Mids', width=w, height='Spans', fill_color="red", line_color="red", source=x_rect_inc_src)
p.rect(x='Date', y='Mids', width=w, height='Spans', fill_color="green", line_color="green", source=x_rect_dec_src)
#p.segment(time[inc], highp[inc], time[inc], lowp[inc], color="red")
#p.segment(time[dec], highp[dec], time[dec], lowp[dec], color="green")
p.segment(time_inc, highp_inc, time_inc, lowp_inc, color="red")
p.segment(time_dec, highp_dec, time_dec, lowp_dec, color="green")
show(p)
| mit |
dufferzafar/Python-Scripts | Facebook/Conversations/plot-count.py | 3 | 2122 | """
Plot the number of messages sent/recieved to a friend.
You should run messages.py first.
"""
import os
import json
import time
from datetime import datetime as DT
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from messages import mkdir
ROOT = "Messages"
date_format = "%Y-%m-%d"
def pretty_epoch(epoch, fmt):
""" Convert timestamp to a pretty format. """
return time.strftime(fmt, time.localtime(epoch))
if __name__ == '__main__':
mkdir('Plot')
for friend in os.listdir(ROOT):
print("Processing conversation with %s" % friend)
messages = {}
# Read all the files of a friend & build a hashmap
for file in os.listdir(os.path.join(ROOT, friend)):
with open(os.path.join(ROOT, friend, file)) as inp:
data = json.load(inp)
for act in data['payload']['actions']:
# BUG: Why wouldn't body be present?
if 'body' in act:
# Facebook uses timestamps with 13 digits for milliseconds
# precision, while Python only needs the first 10 digits.
date = pretty_epoch(act['timestamp'] // 1000, date_format)
if date in messages:
messages[date] += 1
else:
messages[date] = 1
# Begin creating a new plot
plt.figure()
# Prepare the date
x, y = [], []
for date in messages.keys():
x.append(mdates.date2num(DT.strptime(date, date_format)))
y.append(messages[date])
# Use custom date format
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter("%Y-%m"))
plt.gca().xaxis.set_major_locator(mdates.MonthLocator())
# Plot!
plt.plot_date(x, y)
# Ensure that the x-axis ticks don't overlap
plt.gcf().autofmt_xdate()
plt.gcf().set_size_inches(17, 9)
# Save plot
plt.title("Conversation with %s" % friend)
plt.savefig("Plot/%s.png" % friend)
| unlicense |
jiansenzheng/oanda_trading | oanda_trading/forex17_0724_RevTreTickNW_EURUSD.py | 1 | 27811 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 06 20:00:30 2016
@author: Jiansen
"""
import requests
import threading
import copy
import logging
import os
#import urllib3
import json
from scipy import stats
#from decimal import Decimal, getcontext, ROUND_HALF_DOWN
#from event00 import TickEvent,TickEvent2
#import time
import oandapy
import httplib
import pandas as pd
import math
import numpy as np
import pywt
import time
from settings import STREAM_DOMAIN, API_DOMAIN, ACCESS_TOKEN, ACCOUNT_ID
from param_EUR_USD import MA_dict, threshold_dict,sltp_dict
import Queue
#for writing data
import datetime
from bson.objectid import ObjectId
import pymongo as pm
from pymongo import MongoClient
import statsmodels.tsa.stattools as ts
#requests.adapters.DEFAULT_RETRIES = 5
from warningOps import warning
from seriesADF import getADF
corpid= 'wxf8ba6658b456540b'
secret='f78XFqKjNnNJF8Mpkb3BVh4BMpa-vbChBWMHu653KjFL0-mqT67lDQlt5YaEeD6w'
warn = warning(corpid,secret)
client = MongoClient('localhost',27017)
collection = client.test_database.tick_test
def getDoc(data):
lis=data['tick']
ask=lis['ask']
bid=lis['bid']
instrument=lis['instrument']
time0=datetime.datetime.strptime(lis['time'], '%Y-%m-%dT%H:%M:%S.%fZ')
date = time0.strftime("%Y-%m-%d")
hms = time0.strftime("%H:%M:%S")
ms = time0.microsecond
sec = 3600*time0.hour+60*time0.minute+ time0.second
sec_ms = sec*1000+ms
post = {u'ask':ask, u'bid': bid,u'instrument': instrument, u'date':date,
u'hms':hms,u'ms':ms,u'sec':sec,u'sec_ms':sec_ms}
return post
def denoise(X,wave0):
wavelet=wave0
if len(X)>=8:
level0= 1
if np.floor(np.log(len(X)))>7:
level0= np.floor(np.log(len(X))/2.0)
thres = 2*np.sqrt(2*np.log(len(X))/len(X))*np.std(X)
thres = 0.0
WaveletCoeffs = pywt.wavedec(X, wavelet, level=level0)
NewWaveletCoeffs = map (lambda x: pywt.threshold(x, thres, mode='hard'),WaveletCoeffs)
newWave2 = pywt.waverec( NewWaveletCoeffs, wavelet)
return newWave2
else:
logging.warning( "the series is too short")
return X
#compute the liquidity index
def ohlcv_lis(lis):
def get_ohlcv(candle, i):
return map(candle[i].get,["openMid","highMid","lowMid","closeMid","volume"])
ohlcv1 = np.array([get_ohlcv(lis,0)])
for i in range(1,len(lis)-1,1): # drop the last row
ohlcv1 = np.concatenate((ohlcv1, np.array([get_ohlcv(lis,i)])),axis=0)
return ohlcv1
def liq15min(lis):
def vol_F(q1, q2, q3, q4):
return (math.sqrt(math.log(q2/q3) - 2.0*(2.0*math.log(2.0) - 1.0)*math.log(q4/q1)))
liq = 0.0
sigma = pd.Series()
for i in range(0,len(lis),1):
s1 = vol_F(lis[i,0],lis[i,1],lis[i,2],lis[i,3])
sigma = np.append(sigma,s1)
liq = math.sqrt(np.sum(lis[:,4])/100)/np.mean(sigma)
liq = round(liq)
return liq
#-------------------------#
class Event(object):
pass
class TickEvent2(Event):
def __init__(self, instrument, time, bid, ask):
self.type = 'TICK'
self.instrument = instrument
self.time = time
self.bid = bid
self.ask = ask
class LiqEvent(Event):
def __init__(self, instrument, time, liq):
self.type = 'LIQ'
self.instrument = instrument
self.time = time
self.liq = liq
class OrderEvent(Event):
def __init__(self, instrument, units, order_type, side, stopLoss, takeProfit,stra):
self.type = 'ORDER'
self.instrument = instrument
self.units = units
self.order_type = order_type
self.side = side
self.stopLoss = stopLoss,
self.takeProfit = takeProfit
self.stra = stra
class CloseEvent(Event):
def __init__(self, instrument,num):
self.type = 'CLOSE'
self.instrument = instrument
self.num = num
#--------------------------Liq------------------------------------------#
class LiqForex(object):
def __init__(
self, domain, access_token,
account_id, instruments,ct, gran, dd, events_queue
):
self.domain = domain
self.access_token = access_token
self.account_id = account_id
self.instruments = instruments
self.ct = ct
self.gran=gran
self.dd= dd
self.events_queue = events_queue
def getLiq(self):
try:
requests.packages.urllib3.disable_warnings()
s = requests.Session()
#s.keep_alive = False
url = "https://" + self.domain + "/v1/candles"
headers = {'Authorization' : 'Bearer ' + self.access_token}
params = {'instrument':self.instruments, 'accountId' : self.account_id,
'count':self.ct,'candleFormat':'midpoint','granularity':self.gran}
req = requests.Request('GET', url, headers=headers, params=params)
pre = req.prepare()
logging.info( pre)
resp = s.send(pre, stream=False, verify=False)
try:
msg=json.loads(resp.text)
except Exception as e:
logging.warning( "Caught exception when converting message into json\n" + str(e))
return
if msg.has_key("candles"):
time=msg.get("candles")[-1]["time"]
lis = ohlcv_lis(msg.get("candles"))
liqS = pd.Series()
for i in range(0, len(lis)- (self.dd+1) ,1):
s2 = liq15min(lis[i:i+self.dd])
liqS = np.append(liqS,s2)
liq=liqS[-1]
logging.info( "liq=".format(liq))
tev = LiqEvent(self.instruments,time,liq)
self.events_queue.put(tev,False)
except Exception as e:
s.close()
content0 = "Caught exception when connecting to history\n" + str(e)
logging.warning(content0)
#warn.tradingWarning(content0)
def activeLiq(self,period):
while True:
self.getLiq()
time.sleep(period)
#--------------------------------------------------------------------#
class StreamingForexPrices(object):
def __init__(
self, domain, access_token,
account_id, instruments,ct, gran, dd, events_queue
):
self.domain = domain
self.access_token = access_token
self.account_id = account_id
self.instruments = instruments
self.ct = ct
self.gran=gran
self.dd= dd
self.events_queue = events_queue
def connect_to_stream(self):
try:
requests.packages.urllib3.disable_warnings()
s = requests.Session() # socket
url = "https://" + self.domain + "/v1/prices"
headers = {'Authorization' : 'Bearer ' + self.access_token}
params = {'instruments' : self.instruments, 'accountId' : self.account_id}
time.sleep(0.8) # sleep some seconds
req = requests.Request('GET', url, headers=headers, params=params)
pre = req.prepare()
resp = s.send(pre, stream=True, verify=False)
return resp
except Exception as e:
#global s
s.close()
content0 = "Caught exception when connecting to stream\n" + str(e)
logging.warning(content0)
#warn.tradingWarning(content0)
def stream_to_queue_old(self,collection):
response = self.connect_to_stream()
if response.status_code != 200:
return
for line in response.iter_lines(1):
if line:
try:
msg = json.loads(line)
except Exception as e:
content0 = "Caught exception when converting message into json\n" + str(e)
logging.warning(content0)
return
if msg.has_key("instrument") or msg.has_key("tick"):
logging.info(msg)
instrument = msg["tick"]["instrument"]
time = msg["tick"]["time"]
bid = msg["tick"]["bid"]
ask = msg["tick"]["ask"]
tev = TickEvent2(instrument, time, bid, ask)
self.events_queue.put(tev,False)
post= getDoc(msg)
collection.insert_one(post)
#--------------
#------
# new strategy
class LiqMAStrategy(object):
"""
"""
def __init__(
self, access_token, account_id, pairs, units, events, stopLoss1, takeProfit1,stopLoss2, takeProfit2,
short_window1, long_window1,short_window2, long_window2, idxU, lam, thres1, thres2,thres3, thres4, adf_thres
):
self.access_token = access_token
self.account_id = account_id
self.pairs = pairs
self.units = units
self.stopLoss1 = stopLoss1
self.takeProfit1 = takeProfit1
self.stopLoss2 = stopLoss2
self.takeProfit2 = takeProfit2
self.pairs_dict = self.create_pairs_dict()
self.events = events
self.short_window1 = short_window1
self.long_window1 = long_window1
self.short_window2 = short_window2
self.long_window2 = long_window2
self.idxU = idxU
self.lam = lam
self.priceLis1 = pd.Series() #for trends
self.priceLis2 = pd.Series() #for reversion
self.thres1 = thres1
self.thres2 = thres2
self.thres3 = thres3
self.thres4 = thres4
self.adf_thres = adf_thres
def create_pairs_dict(self):
attr_dict = {
"ticks": 0,
"tick0": 0,
"priceLS":0.0,
"invested": False,
"short_sma": None,
"long_sma": None,
"longShort": None,
"short_slope":None,
"long_slope":None, # False denotes sell, while True denotes buy
"check": False,
"orlis":[0,0,0,0],
"stra": 0,
"fixed": False
}
#pairs_dict = {}
pairs_dict = copy.deepcopy(attr_dict)
return pairs_dict
def check_order(self,check):
if check== True:
oanda0 = oandapy.API(environment="practice", access_token=self.access_token)
responseTrades = oanda0.get_trades(self.account_id,instrument=self.pairs)
if responseTrades.get("trades")==[]:
pd = self.pairs_dict
pd["orlis"].pop(0)
logging.info(" orlis: "+str(pd["orlis"]))
pd["orlis"].append(0)
logging.info(" orlis: "+str(pd["orlis"]))
if pd["orlis"][0:4]==[1,1,0,0]:
logging.warning( "Stop Loss Order Executed!")
#warn.tradingWarning(" Stop Loss Order Executed!")
pd["invested"]= False
pd["fixed"] = False #position closed, the stra type is free
pd["check"] = False
else:
pass
else:
pd = self.pairs_dict
#pd["orlis"][0] = copy.copy(pd["orlis"][1])
pd["orlis"].pop(0)
pd["orlis"].append(1)
logging.info("not empty- orlis: "+str(pd["orlis"]))
pd["invested"]= True
pd["fixed"] = True #position closed, the stra type is free
pd["check"] = True
else:
pass
def getSlope(self,aa):
'''
return the slope ratio of a time series
---args---
aa: a (np.ndarray) object as a time series
'''
return stats.linregress(np.arange(0,len(aa),1),aa)[0]
def get_new_price_lis(self,price_lis,pairs_dict,window):
'''
change the attributes in pairs_dict and return it
Arguments:
pairs_dict {[type]} -- [description]
{[type]} -- [description]
'''
newPriceLis = denoise(price_lis,'db4')
pairs_dict["short_sma"] = np.mean(newPriceLis[-window:])
pairs_dict["long_sma"] = np.mean(newPriceLis)
return newPriceLis
def compute_slope(self,price_lis,window_length,k):
'''[summary]
compute the slope ratio for a short time series
Arguments:
price_lis {np.ndarray} -- the filtered time series to compute the slope ratio
for both SMA and LMA
default: newPriceLis
window_length {[type]} -- a parameter for the SMA
k: an parameter for performing average, default->0.5
default: self.short_window2
Returns:
[float] -- [the slope ratio]
'''
amp = lambda lis: (lis-lis[0])*10000.0
pShort = amp(price_lis[-window_length:])
pLong = amp(price_lis)
#compute the slope ratio
aveSlope = k*self.getSlope(pShort)+ (1-k)*self.getSlope(pLong)
return aveSlope
def set_invested_check_fixed(self,pair_dict,invested_bool,check_bool,fixed_bool):
pair_dict["invested"] = invested_bool
pair_dict["check"] = check_bool
pair_dict["fixed"] = fixed_bool
time.sleep(0.0)
def calculate_signals(self, event):
#if True:
global liqIndex
global newPriceLis
if event.type == 'TICK':
price = (event.bid+event.ask)/2.000
self.priceLis1 = np.append(self.priceLis1,price)
self.priceLis2 = np.append(self.priceLis2,price)
if len(self.priceLis1)>max([self.long_window1,self.long_window2]):
self.priceLis1=self.priceLis1[-self.long_window1:]
self.priceLis2=self.priceLis2[-self.long_window2:]
else:
pass
#liqIndex= event.liq
logging.info("liqIndex= "+str(liqIndex)+"\n")
logging.info("price= "+str(price))
pd = self.pairs_dict
logging.info("check"+str(pd["check"]))
self.check_order(pd["check"]) #check whether the SLTP order is triggered..
# Only start the strategy when we have created an accurate short window
logging.info("INVESTED= "+str(pd["invested"]))
if not pd["invested"]:
#global price0
if pd["ticks"]>max([self.long_window1, self.long_window2])+1 and liqIndex > self.idxU:
if not pd["fixed"]:
critAdf = getADF(collection).priceADF(200,1)
if critAdf > self.adf_thres:
pd["stra"] = "reversion"
newPriceLis = self.get_new_price_lis(self.priceLis2, pd, self.short_window2)
aveSlope = self.compute_slope(newPriceLis,self.short_window2, 0.5)
logging.info( "REVERSION+aveSlope="+str(aveSlope))
else:
pd["stra"] = "trends"
newPriceLis = self.get_new_price_lis(self.priceLis1, pd, self.short_window1)
aveSlope = self.compute_slope(newPriceLis,self.short_window1, 0.5)
logging.info("TRENDS+aveSlope="+str(aveSlope))
else:
raise ValueError("pd[fixed] should be False!")
price0, price1 = event.bid, event.ask
if pd["stra"] =="trends":
if pd["short_sma"] > pd["long_sma"] and aveSlope> self.thres1:
side = "buy"
logging.info("price02={0}".format(price0))
self.set_invested_check_fixed(pd,True,True,True)
sl_b, tp_b= round(price0 - self.stopLoss1,5),round(price1 + self.takeProfit1,5)
order = OrderEvent(self.pairs, self.units, "market", side, sl_b, tp_b,"Trends")
self.events.put(order)
pd["longShort"] = True
pd["tick0"]= pd["ticks"]
pd["priceLS"]= price0
elif pd["short_sma"] < pd["long_sma"] and aveSlope< -self.thres1:
side = "sell"
logging.info("price01={0}".format(price1))
self.set_invested_check_fixed(pd,True,True,True)
sl_s,tp_s = round(price1 + self.stopLoss1,5),round(price0 - self.takeProfit1,5)
order = OrderEvent(self.pairs, self.units, "market", side, sl_s, tp_s,"Trends")
self.events.put(order)
pd["longShort"] = False
pd["tick0"]= pd["ticks"]
pd["priceLS"]= price1
else:
pd["fixed"] = False
elif pd["stra"] =="reversion":
if pd["short_sma"] > pd["long_sma"] and aveSlope> self.thres3:
side = "sell"
logging.info("price02={0}".format(price1))
self.set_invested_check_fixed(pd,True,True,True)
sl_s,tp_s = round(price1+self.stopLoss2,5),round(price0-self.takeProfit2,5)
order = OrderEvent(self.pairs, self.units, "market", side, sl_s, tp_s,"reversion")
self.events.put(order)
pd["longShort"] = False
pd["tick0"]= pd["ticks"]
pd["priceLS"]= price0
elif pd["short_sma"] < pd["long_sma"] and aveSlope< -self.thres3:
side = "buy"
logging.info("price01={0}".format(price0))
self.set_invested_check_fixed(pd,True,True,True)
sl_b, tp_b = round(price0-self.stopLoss2,5),round(price1+self.takeProfit2,5)
order = OrderEvent(self.pairs, self.units, "market", side, sl_b, tp_b,"reversion")
self.events.put(order)
pd["longShort"] = True
pd["tick0"]= pd["ticks"]
pd["priceLS"]= price1
else:
pd["fixed"] = False
else:
pass
else:
pass
elif pd["invested"]:
sign= 1 if pd["longShort"] == True else -1
if pd["stra"] =="trends":
logging.info("Trends position!")
newPriceLis = self.get_new_price_lis(self.priceLis1, pd, self.short_window1)
basePrice=pd["priceLS"]+sign*self.lam*np.std(self.priceLis1)*np.sqrt(pd["ticks"]-pd["tick0"])
logging.info( "basePrice="+str(basePrice))
logging.info( "short_sma"+str(pd["short_sma"]))
logging.info( "long_sma"+str(pd["long_sma"]))
aveSlope = self.compute_slope(newPriceLis,self.short_window1, 0.5)
logging.info( "aveSlope="+str(aveSlope))
if not pd["longShort"] and aveSlope > -self.thres2:
#side = "sell"
self.set_invested_check_fixed(pd,False,False,False)
#warn.tradingWarning(" check->False Executed!")
close_order = CloseEvent(self.pairs,0)
self.events.put(close_order)
elif pd["longShort"] and aveSlope < self.thres2:
#side = "buy"
self.set_invested_check_fixed(pd,False,False,False)
#warn.tradingWarning(" check->False Executed!")
close_order = CloseEvent(self.pairs,0)
self.events.put(close_order)
else: #not closing positions, just keep the pd["fixed"] as True.
pd["fixed"] = True #should we add pd["invested"]
elif pd["stra"] =="reversion":
logging.info( "Reversion position!")
newPriceLis = self.get_new_price_lis(self.priceLis2, pd, self.short_window2)
basePrice=pd["priceLS"]+sign*self.lam*np.std(self.priceLis2)*np.sqrt(pd["ticks"]-pd["tick0"])
logging.info( "basePrice="+str(basePrice))
logging.info( "short_sma"+str(pd["short_sma"]))
logging.info( "long_sma"+str(pd["long_sma"]))
aveSlope = self.compute_slope(newPriceLis,self.short_window2, 0.5)
logging.info( "aveSlope="+str(aveSlope))
if pd["short_sma"] < pd["long_sma"]-0.00006 and not pd["longShort"]:
#side = "sell"
self.set_invested_check_fixed(pd,False,False,False)
#warn.tradingWarning(" check->False Executed!")
close_order = CloseEvent(self.pairs,0)
self.events.put(close_order)
elif pd["short_sma"] > pd["long_sma"]+0.00006 and pd["longShort"]:
#side = "buy"
self.set_invested_check_fixed(pd,False,False,False)
#warn.tradingWarning(" check->False Executed!")
close_order = CloseEvent(self.pairs,0)
self.events.put(close_order)
else:
pd["fixed"] = True #should we add pd["invested"]
else:
pass
pd["ticks"] += 1
logging.info("current Tick "+str(pd["ticks"])+"\n"+str(time.ctime()))
#--------------------------------------------------------------------#
class Execution(object):
def __init__(self, domain, access_token, account_id):
self.domain = domain
self.access_token = access_token
self.account_id = account_id
self.conn = self.obtain_connection()
def obtain_connection(self):
return httplib.HTTPSConnection(self.domain)
def execute_order(self, event):
oanda0 = oandapy.API(environment="practice", access_token=self.access_token)
try:
responseX = oanda0.create_order(self.account_id,
instrument=event.instrument,
units= event.units,
side= event.side,
type= event.order_type,
stopLoss = event.stopLoss,
takeProfit = event.takeProfit
)
except Exception as e:
content0 = "Caught OnadaError when sending the orders\n" + str(e)
logging.warning(content0)
return
logging.info( "Execute Order ! \n {0}".format(responseX))
content0 = str(event.stra)+"Execute Order ! "+" "+str(event.side)+" "+ str(event.units)+" units of "+str(event.instrument)
#warn.tradingWarning(content0)
logging.info(content0)
def close_order(self, event):
oanda0 = oandapy.API(environment="practice", access_token=self.access_token)
response1= oanda0.get_trades(self.account_id,instrument=event.instrument)
order_lis= response1["trades"]
if order_lis !=[]:
for order in order_lis: #close all trades
responseX = oanda0.close_trade(self.account_id,trade_id= order['id'])
logging.info( "Close Order ! \n {0}".format(responseX))
content0 = "Close Order !" + "profit: "+str(responseX['profit'])+" CLOSE "+str(responseX['instrument'])
content0 = content0 + " "+str(responseX['side'])+" at "+ str(responseX['price'])
#warn.tradingWarning(content0)
else:
logging.warning("No trade to be closed! :{0}".format(time.ctime()))
#--------------------------------------------------------------------#
def trade(events, strategy,execution,heartbeat):
"""
"""
global liqIndex
while True:
try:
event = events.get(False)
except Queue.Empty:
pass
else:
if event is not None:
if event.type =='LIQ':
liqIndex= event.liq
#print "current index ="+str(liqIndex)
elif event.type == 'TICK':
strategy.calculate_signals(event)
logging.info( "Tick!")
elif event.type == 'ORDER':
logging.info( "Executing order!")
execution.execute_order(event)
elif event.type == "CLOSE":
logging.info( "Close trading!")
execution.close_order(event)
time.sleep(heartbeat)
#--------------------------------------------------------------------#
if __name__ == "__main__":
pairs = "EUR_USD"
logPath = '/home/zheng/data/trading/EUR_USD/'
logName = 'eur_usd.log'
logging.basicConfig(filename= os.path.join(logPath,logName),
format='%(levelname)s:%(message)s',level=logging.DEBUG)
global liqIndex
liqIndex=0
ct = 20
gran ='M15'
time_dict = {
"S5": 5,
"S10": 10,
"S15": 15,
"S30": 30,
"M1": 60,
"M2": 120 }
dd = 11
lam= 0.1 #0.5 basePrice tuning
units = 100 #100
#----------Parameters----------------
short_window1= MA_dict['short_window1']
long_window1 = MA_dict['long_window1']
short_window2= MA_dict['short_window2']
long_window2 = MA_dict['long_window2']
idxu = threshold_dict['idxu']
thres1= threshold_dict['thres1']
thres2= threshold_dict['thres2']
thres3 = threshold_dict['thres3']
thres4= threshold_dict['thres4']
adf_thres = threshold_dict['adf_thres']
sl1 = sltp_dict['sl1'] #10
tp1 = sltp_dict['tp1'] #10
sl2 = sltp_dict['sl2'] #10
tp2 = sltp_dict['tp2'] #10
#--------------------------------------
#pairs = "EUR_USD"
#pip = 10000.0
heartbeat= 0.2
period= 600
print 'initial'
print('MA:\n sw1 {0} lw1 {1} sw2 {2} lw2 {3}'.format(short_window1, long_window1, short_window2, long_window2))
print('parameters:\n thres1 {0} thres2 {1} thres3 {2} thres4 {3}'.format(thres1,thres2,thres3,thres4))
print('sltp_parameters:\n {0} {1} {2} {3}'.format(sl1,tp1,sl2,tp2))
events = Queue.Queue()
# initial the threads
prices = StreamingForexPrices(STREAM_DOMAIN, ACCESS_TOKEN, ACCOUNT_ID, pairs, ct, gran, dd, events)
liquidity = LiqForex(API_DOMAIN, ACCESS_TOKEN, ACCOUNT_ID, pairs, ct, gran, dd, events)
execution = Execution(API_DOMAIN, ACCESS_TOKEN, ACCOUNT_ID)
#strategy = MovingAverageCrossStrategy(pairs, units, events, sl, tp, short_window,long_window)
strategy = LiqMAStrategy(ACCESS_TOKEN, ACCOUNT_ID, pairs, units, events, sl1, tp1, sl2, tp2, short_window1,long_window1,
short_window2,long_window2,idxu,lam,thres1,thres2,thres3,thres4,adf_thres)
# construct the thread
price_thread = threading.Thread(target=prices.stream_to_queue_old, args=[collection])
liq_thread = threading.Thread(target= liquidity.activeLiq, args=[period])
trade_thread = threading.Thread(target=trade, args=(events, strategy,execution,heartbeat))
print "Full?:",events.full()
trade_thread.start()
price_thread.start()
liq_thread.start()
| gpl-3.0 |
a-ro/preimage | preimage/kernels/polynomial.py | 1 | 2100 | __author__ = 'amelie'
import numpy
from sklearn.base import BaseEstimator
class PolynomialKernel(BaseEstimator):
"""Polynomial kernel.
Attributes
----------
degree : int
Degree.
bias : float
Bias.
is_normalized : bool
True if the kernel should be normalized, False otherwise.
"""
def __init__(self, degree=2, bias=1., is_normalized=True):
self.degree = degree
self.bias = bias
self.is_normalized = is_normalized
def __call__(self, X_one, X_two):
"""Compute the similarity of all the vectors in X1 with all the vectors in X2.
Parameters
----------
X1 : array, shape=[n_samples, n_features]
Vectors, where n_samples is the number of samples in X1 and n_features is the number of features.
X2 : array, shape=[n_samples, n_features]
Vectors, where n_samples is the number of samples in X2 and n_features is the number of features.
Returns
-------
gram_matrix : array, shape = [n_samples_x1, n_samples_x2]
Similarity of each vector of X1 with each vector of X2, where n_samples_x1 is the number of samples in X1
and n_samples_x2 is the number of samples in X2.
"""
X_one = numpy.array(X_one)
X_two = numpy.array(X_two)
gram_matrix = (numpy.dot(X_one, X_two.T) + self.bias) ** self.degree
if self.is_normalized:
gram_matrix = self._normalize_gram_matrix(X_one, X_two, gram_matrix)
return gram_matrix
def _normalize_gram_matrix(self, X_one, X_two, gram_matrix):
x_one_diagonal = self._compute_element_wise_similarity(X_one)
x_two_diagonal = self._compute_element_wise_similarity(X_two)
gram_matrix = ((gram_matrix / numpy.sqrt(x_one_diagonal)).T / numpy.sqrt(x_two_diagonal)).T
return gram_matrix
def _compute_element_wise_similarity(self, X):
x_x_similarity = ((X * X).sum(axis=1) + self.bias) ** self.degree
x_x_similarity = x_x_similarity.reshape(-1, 1)
return x_x_similarity | bsd-2-clause |
pranavtbhat/EE219 | project2/Project2_404761131_004758927_704741684/d.py | 2 | 2133 | from sklearn.feature_extraction import text
from sklearn.pipeline import Pipeline
import cPickle
from sklearn.decomposition import TruncatedSVD
import os
import a
import b
stop_words = text.ENGLISH_STOP_WORDS
def get_svd():
return TruncatedSVD(n_components=50)
def fetch_lsi_representation(train, test):
pipeline = Pipeline(
[
('vectorize', b.get_vectorizer()),
('tf-idf', b.get_tfid_transformer()),
('svd', get_svd())
]
)
svd_matrix_train = pipeline.fit_transform(train.data)
svd_matrix_test = pipeline.transform(test.data)
return svd_matrix_train, svd_matrix_test
def fetch_lsi_representation_catched(train, test):
if not (os.path.isfile("Data/Train_LSI.pkl") and os.path.isfile("Data/Test_LSI.pkl")):
print "Performing LSI on the TFxIDF matrices for Train and Test"
svd_matrix_train, svd_matrix_test = fetch_lsi_representation(
train,
test
)
cPickle.dump(svd_matrix_train, open("data/Train_LSI.pkl", "wb"))
cPickle.dump(svd_matrix_test, open("data/Test_LSI.pkl", "wb"))
return svd_matrix_train, svd_matrix_test
else:
svd_matrix_train = cPickle.load(open("Data/Train_LSI.pkl", "r"))
svd_matrix_test = cPickle.load(open("Data/Test_LSI.pkl", "r"))
return svd_matrix_train, svd_matrix_test
if __name__ == "__main__":
categories=[
'comp.graphics',
'comp.os.ms-windows.misc',
'comp.sys.ibm.pc.hardware',
'comp.sys.mac.hardware',
'rec.autos',
'rec.motorcycles',
'rec.sport.baseball',
'rec.sport.hockey'
]
train = a.fetch_train(categories)
test = a.fetch_test(categories)
svd_matrix_train, svd_matrix_test = fetch_lsi_representation(
train,
test
)
print "Size of Training LSI representation is ", svd_matrix_train.shape
print "Size of Testing LSI representation is ", svd_matrix_test.shape
cPickle.dump(svd_matrix_train, open("data/Train_LSI.pkl", "wb"))
cPickle.dump(svd_matrix_test, open("data/Test_LSI.pkl", "wb"))
| unlicense |
avian2/spectrum-sensing-methods | simulate_analyze.py | 1 | 2240 | import glob
import numpy
import re
import sys
import os
from matplotlib import pyplot
def get_ccdf(x):
xs = numpy.array(x)
xs.sort()
N = float(len(xs))
P = numpy.arange(N)/N
return xs, P
def get_gamma0(campaign_glob, Pfa=0.1):
path = campaign_glob.replace("*", "off")
gammaN = numpy.loadtxt(path)
gammaN, Pd = get_ccdf(gammaN)
gamma0 = numpy.interp(1. - Pfa, Pd, gammaN)
return gamma0
def iterate_campaign(path):
for fn in glob.glob(path):
g = re.search("_m([0-9_]+)dbm\.dat$", fn)
if g:
Pg = -float(g.group(1).replace('_', '.'))
gamma = numpy.loadtxt(fn)
yield Pg, gamma
def get_campaign_g(path, gamma0):
Pg = []
Pd = []
for Pg0, gamma in iterate_campaign(path):
Pg.append(Pg0)
Pd.append(numpy.mean(gamma > gamma0))
Pg = numpy.array(Pg)
Pd = numpy.array(Pd)
Pga = Pg.argsort()
Pd = Pd[Pga]
Pg = Pg[Pga]
return Pg, Pd
def get_campaign(path, gamma0):
Pg, Pd = get_campaign_g(path, gamma0)
Pin = Pg
return Pin, Pd
def get_pinmin(campaign_glob, gamma0, Pdmin, figdir):
Pin, Pd = get_campaign(campaign_glob, gamma0)
figname = os.path.basename(campaign_glob).replace("_*.dat", ".png")
figpath = os.path.join(figdir, figname)
pyplot.figure()
pyplot.plot(Pin, Pd)
pyplot.xlabel("Pin")
pyplot.ylabel("Pd")
pyplot.axis([None, None, 0, 1])
pyplot.title(os.path.basename(campaign_glob))
pyplot.grid()
pyplot.savefig(figpath)
pyplot.close()
Pinmin = numpy.interp(Pdmin, Pd, Pin, left=0, right=0)
return Pinmin
def process_campaign(campaign_glob, fout, figdir):
gamma0 = get_gamma0(campaign_glob)
Pinmin = get_pinmin(campaign_glob, gamma0, .9, figdir)
fout.write("%s\t%f\n" % (campaign_glob, Pinmin))
def main():
try:
indir = sys.argv[1]
except IndexError:
print "USAGE: %s dir_to_analyze" % (sys.argv[0],)
return
outdir = os.path.join(indir, "ana")
figdir = os.path.join(indir, "fig")
try:
os.mkdir(outdir)
except OSError:
pass
try:
os.mkdir(figdir)
except OSError:
pass
fout = open("%s/pinmin.dat" % (outdir,), "w")
for path in glob.glob("%s/dat/*_off.dat" % (indir,)):
campaign_glob = path.replace("_off.", "_*.")
print campaign_glob
process_campaign(campaign_glob, fout, figdir)
fout.close()
if __name__ == '__main__':
main()
| gpl-3.0 |
mbayon/TFG-MachineLearning | venv/lib/python3.6/site-packages/pandas/tests/api/test_api.py | 7 | 7858 | # -*- coding: utf-8 -*-
from warnings import catch_warnings
import pytest
import pandas as pd
from pandas import api
from pandas.util import testing as tm
class Base(object):
def check(self, namespace, expected, ignored=None):
# see which names are in the namespace, minus optional
# ignored ones
# compare vs the expected
result = sorted([f for f in dir(namespace) if not f.startswith('_')])
if ignored is not None:
result = sorted(list(set(result) - set(ignored)))
expected = sorted(expected)
tm.assert_almost_equal(result, expected)
class TestPDApi(Base):
# these are optionally imported based on testing
# & need to be ignored
ignored = ['tests', 'locale', 'conftest']
# top-level sub-packages
lib = ['api', 'compat', 'core', 'errors', 'pandas',
'plotting', 'test', 'testing', 'tools', 'tseries',
'util', 'options', 'io']
# these are already deprecated; awaiting removal
deprecated_modules = ['stats', 'datetools', 'parser',
'json', 'lib', 'tslib']
# misc
misc = ['IndexSlice', 'NaT']
# top-level classes
classes = ['Categorical', 'CategoricalIndex', 'DataFrame', 'DateOffset',
'DatetimeIndex', 'ExcelFile', 'ExcelWriter', 'Float64Index',
'Grouper', 'HDFStore', 'Index', 'Int64Index', 'MultiIndex',
'Period', 'PeriodIndex', 'RangeIndex', 'UInt64Index',
'Series', 'SparseArray', 'SparseDataFrame',
'SparseSeries', 'TimeGrouper', 'Timedelta',
'TimedeltaIndex', 'Timestamp', 'Interval', 'IntervalIndex']
# these are already deprecated; awaiting removal
deprecated_classes = ['WidePanel', 'Panel4D',
'SparseList', 'Expr', 'Term']
# these should be deprecated in the future
deprecated_classes_in_future = ['Panel']
# external modules exposed in pandas namespace
modules = ['np', 'datetime']
# top-level functions
funcs = ['bdate_range', 'concat', 'crosstab', 'cut',
'date_range', 'interval_range', 'eval',
'factorize', 'get_dummies',
'infer_freq', 'isnull', 'lreshape',
'melt', 'notnull', 'offsets',
'merge', 'merge_ordered', 'merge_asof',
'period_range',
'pivot', 'pivot_table', 'qcut',
'show_versions', 'timedelta_range', 'unique',
'value_counts', 'wide_to_long']
# top-level option funcs
funcs_option = ['reset_option', 'describe_option', 'get_option',
'option_context', 'set_option',
'set_eng_float_format']
# top-level read_* funcs
funcs_read = ['read_clipboard', 'read_csv', 'read_excel', 'read_fwf',
'read_gbq', 'read_hdf', 'read_html', 'read_json',
'read_msgpack', 'read_pickle', 'read_sas', 'read_sql',
'read_sql_query', 'read_sql_table', 'read_stata',
'read_table', 'read_feather']
# top-level to_* funcs
funcs_to = ['to_datetime', 'to_msgpack',
'to_numeric', 'to_pickle', 'to_timedelta']
# these are already deprecated; awaiting removal
deprecated_funcs = ['ewma', 'ewmcorr', 'ewmcov', 'ewmstd', 'ewmvar',
'ewmvol', 'expanding_apply', 'expanding_corr',
'expanding_count', 'expanding_cov', 'expanding_kurt',
'expanding_max', 'expanding_mean', 'expanding_median',
'expanding_min', 'expanding_quantile',
'expanding_skew', 'expanding_std', 'expanding_sum',
'expanding_var', 'rolling_apply',
'rolling_corr', 'rolling_count', 'rolling_cov',
'rolling_kurt', 'rolling_max', 'rolling_mean',
'rolling_median', 'rolling_min', 'rolling_quantile',
'rolling_skew', 'rolling_std', 'rolling_sum',
'rolling_var', 'rolling_window', 'ordered_merge',
'pnow', 'match', 'groupby', 'get_store',
'plot_params', 'scatter_matrix']
def test_api(self):
self.check(pd,
self.lib + self.misc +
self.modules + self.deprecated_modules +
self.classes + self.deprecated_classes +
self.deprecated_classes_in_future +
self.funcs + self.funcs_option +
self.funcs_read + self.funcs_to +
self.deprecated_funcs,
self.ignored)
class TestApi(Base):
allowed = ['types']
def test_api(self):
self.check(api, self.allowed)
class TestTesting(Base):
funcs = ['assert_frame_equal', 'assert_series_equal',
'assert_index_equal']
def test_testing(self):
from pandas import testing
self.check(testing, self.funcs)
class TestDatetoolsDeprecation(object):
def test_deprecation_access_func(self):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
pd.datetools.to_datetime('2016-01-01')
def test_deprecation_access_obj(self):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
pd.datetools.monthEnd
class TestTopLevelDeprecations(object):
# top-level API deprecations
# GH 13790
def test_pnow(self):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
pd.pnow(freq='M')
def test_term(self):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
pd.Term('index>=date')
def test_expr(self):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
pd.Expr('2>1')
def test_match(self):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
pd.match([1, 2, 3], [1])
def test_groupby(self):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
pd.groupby(pd.Series([1, 2, 3]), [1, 1, 1])
# GH 15940
def test_get_store(self):
pytest.importorskip('tables')
with tm.ensure_clean() as path:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
s = pd.get_store(path)
s.close()
class TestJson(object):
def test_deprecation_access_func(self):
with catch_warnings(record=True):
pd.json.dumps([])
class TestParser(object):
def test_deprecation_access_func(self):
with catch_warnings(record=True):
pd.parser.na_values
class TestLib(object):
def test_deprecation_access_func(self):
with catch_warnings(record=True):
pd.lib.infer_dtype('foo')
class TestTSLib(object):
def test_deprecation_access_func(self):
with catch_warnings(record=True):
pd.tslib.Timestamp('20160101')
class TestTypes(object):
def test_deprecation_access_func(self):
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
from pandas.types.concat import union_categoricals
c1 = pd.Categorical(list('aabc'))
c2 = pd.Categorical(list('abcd'))
union_categoricals(
[c1, c2],
sort_categories=True,
ignore_order=True)
| mit |
startcode/apollo | modules/tools/navigation/planning/obstacle_decider.py | 1 | 8075 | #!/usr/bin/env python
###############################################################################
# Copyright 2017 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
from shapely.geometry import LineString
from shapely.geometry import Point
class ObstacleDecider:
def __init__(self):
self.obstacle_lat_ttc = {}
self.obstacle_lon_ttc = {}
self.obstacle_lat_dist = {}
self.obstacle_lon_dist = {}
self.front_edge_to_center = 3.89
self.back_edge_to_center = 1.043
self.left_edge_to_center = 1.055
self.right_edge_to_center = 1.055
self.LAT_DIST = 0.9
self.mobileye = None
self.path_obstacle_processed = False
self.default_lane_width = 3.3
def update(self, mobileye):
self.mobileye = mobileye
self.path_obstacle_processed = False
def process_path_obstacle(self, fpath):
if self.path_obstacle_processed:
return
path_x, path_y = fpath.get_xy()
self.obstacle_lat_dist = {}
path = []
self.mobileye.process_obstacles()
for i in range(len(path_x)):
path.append((path_x[i], path_y[i]))
line = LineString(path)
for obs_id, obstacle in self.mobileye.obstacles.items():
point = Point(obstacle.x, obstacle.y)
dist = line.distance(point)
if dist < self.LAT_DIST + obstacle.width + self.left_edge_to_center:
proj_len = line.project(point)
if proj_len == 0 or proj_len >= line.length:
continue
p1 = line.interpolate(proj_len)
if (proj_len + 1) > line.length:
p2 = line.interpolate(line.length)
else:
p2 = line.interpolate(proj_len + 1)
d = (point.x - p1.x) * (p2.y - p1.y) - (point.y - p1.y) * (
p2.x - p1.x)
if d > 0:
dist *= -1
self.obstacle_lat_dist[obstacle.obstacle_id] = dist
self.path_obstacle_processed = True
# print self.obstacle_lat_dist
def get_adv_left_right_nudgable_dist(self, fpath):
left_nudgable = 0
right_nudgable = 0
routing_y = fpath.init_y()
if routing_y <= 0:
left_nudgable = self.default_lane_width / 2.0 \
- abs(routing_y) \
- self.left_edge_to_center
right_nudgable = self.default_lane_width / 2.0 \
+ abs(routing_y) \
- self.right_edge_to_center
else:
left_nudgable = self.default_lane_width / 2.0 \
+ abs(routing_y) \
- self.left_edge_to_center
right_nudgable = self.default_lane_width / 2.0 \
- abs(routing_y) \
- self.right_edge_to_center
return left_nudgable, -1 * right_nudgable
def get_nudge_distance(self, left_nudgable, right_nudgable):
left_nudge = None
right_nudge = None
for obs_id, lat_dist in self.obstacle_lat_dist.items():
if lat_dist >= 0:
actual_dist = abs(lat_dist) \
- self.mobileye.obstacles[obs_id].width / 2.0 \
- self.left_edge_to_center
if self.LAT_DIST > actual_dist > 0.2:
if right_nudge is None:
right_nudge = -1 * (self.LAT_DIST - actual_dist)
elif right_nudge > -1 * (self.LAT_DIST - actual_dist):
right_nudge = -1 * (self.LAT_DIST - actual_dist)
else:
actual_dist = abs(lat_dist) \
- self.mobileye.obstacles[obs_id].width / 2.0 \
- self.left_edge_to_center
if self.LAT_DIST > actual_dist > 0.2:
if left_nudge is None:
left_nudge = self.LAT_DIST - actual_dist
elif left_nudge < self.LAT_DIST - actual_dist:
left_nudge = self.LAT_DIST - actual_dist
if left_nudge is None and right_nudge is None:
return 0
if left_nudge is not None and right_nudge is not None:
return 0
if left_nudge is not None:
if left_nudgable < left_nudge:
return left_nudgable
else:
return left_nudge
if right_nudge is not None:
if abs(right_nudgable) > abs(right_nudge):
return right_nudgable
else:
return right_nudge
if __name__ == "__main__":
import rospy
from std_msgs.msg import String
import matplotlib.pyplot as plt
from modules.localization.proto import localization_pb2
from modules.canbus.proto import chassis_pb2
from ad_vehicle import ADVehicle
import matplotlib.animation as animation
from modules.drivers.proto import mobileye_pb2
from provider_routing import RoutingProvider
from provider_mobileye import MobileyeProvider
from path_decider import PathDecider
def localization_callback(localization_pb):
ad_vehicle.update_localization(localization_pb)
def routing_callback(routing_str):
routing.update(routing_str)
def chassis_callback(chassis_pb):
ad_vehicle.update_chassis(chassis_pb)
def mobileye_callback(mobileye_pb):
global fpath
mobileye.update(mobileye_pb)
mobileye.process_lane_markers()
fpath = path_decider.get_path(mobileye, routing, ad_vehicle,
obs_decider)
obs_decider.update(mobileye)
obs_decider.process_path_obstacle(fpath)
print "nudge distance = ", obs_decider.get_nudge_distance()
def update(frame):
if not ad_vehicle.is_ready():
return
x = []
y = []
for obs_id, obs in mobileye.obstacles.items():
x.append(obs.x)
y.append(obs.y)
obstacles_points.set_xdata(x)
obstacles_points.set_ydata(y)
if fpath is not None:
px, py = fpath.get_xy()
path_line.set_xdata(px)
path_line.set_ydata(py)
fpath = None
ad_vehicle = ADVehicle()
routing = RoutingProvider()
mobileye = MobileyeProvider()
obs_decider = ObstacleDecider()
path_decider = PathDecider(True, False, False)
rospy.init_node("path_decider_debug", anonymous=True)
rospy.Subscriber('/apollo/localization/pose',
localization_pb2.LocalizationEstimate,
localization_callback)
rospy.Subscriber('/apollo/navigation/routing',
String, routing_callback)
rospy.Subscriber('/apollo/canbus/chassis',
chassis_pb2.Chassis,
chassis_callback)
rospy.Subscriber('/apollo/sensor/mobileye',
mobileye_pb2.Mobileye,
mobileye_callback)
fig = plt.figure()
ax = plt.subplot2grid((1, 1), (0, 0), rowspan=1, colspan=1)
obstacles_points, = ax.plot([], [], 'ro')
path_line, = ax.plot([], [], 'b-')
ani = animation.FuncAnimation(fig, update, interval=100)
ax.set_xlim([-2, 128])
ax.set_ylim([-5, 5])
# ax2.axis('equal')
plt.show()
| apache-2.0 |
B3AU/waveTree | examples/decomposition/plot_image_denoising.py | 8 | 5773 | """
=========================================
Image denoising using dictionary learning
=========================================
An example comparing the effect of reconstructing noisy fragments
of the Lena image using firstly online :ref:`DictionaryLearning` and
various transform methods.
The dictionary is fitted on the distorted left half of the image, and
subsequently used to reconstruct the right half. Note that even better
performance could be achieved by fitting to an undistorted (i.e.
noiseless) image, but here we start from the assumption that it is not
available.
A common practice for evaluating the results of image denoising is by looking
at the difference between the reconstruction and the original image. If the
reconstruction is perfect this will look like gaussian noise.
It can be seen from the plots that the results of :ref:`omp` with two
non-zero coefficients is a bit less biased than when keeping only one
(the edges look less prominent). It is in addition closer from the ground
truth in Frobenius norm.
The result of :ref:`least_angle_regression` is much more strongly biased: the
difference is reminiscent of the local intensity value of the original image.
Thresholding is clearly not useful for denoising, but it is here to show that
it can produce a suggestive output with very high speed, and thus be useful
for other tasks such as object classification, where performance is not
necessarily related to visualisation.
"""
print(__doc__)
from time import time
import pylab as pl
import numpy as np
from scipy.misc import lena
from sklearn.decomposition import MiniBatchDictionaryLearning
from sklearn.feature_extraction.image import extract_patches_2d
from sklearn.feature_extraction.image import reconstruct_from_patches_2d
###############################################################################
# Load Lena image and extract patches
lena = lena() / 256.0
# downsample for higher speed
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
lena /= 4.0
height, width = lena.shape
# Distort the right half of the image
print('Distorting image...')
distorted = lena.copy()
distorted[:, height / 2:] += 0.075 * np.random.randn(width, height / 2)
# Extract all reference patches from the left half of the image
print('Extracting reference patches...')
t0 = time()
patch_size = (7, 7)
data = extract_patches_2d(distorted[:, :height / 2], patch_size)
data = data.reshape(data.shape[0], -1)
data -= np.mean(data, axis=0)
data /= np.std(data, axis=0)
print('done in %.2fs.' % (time() - t0))
###############################################################################
# Learn the dictionary from reference patches
print('Learning the dictionary...')
t0 = time()
dico = MiniBatchDictionaryLearning(n_components=100, alpha=1, n_iter=500)
V = dico.fit(data).components_
dt = time() - t0
print('done in %.2fs.' % dt)
pl.figure(figsize=(4.2, 4))
for i, comp in enumerate(V[:100]):
pl.subplot(10, 10, i + 1)
pl.imshow(comp.reshape(patch_size), cmap=pl.cm.gray_r,
interpolation='nearest')
pl.xticks(())
pl.yticks(())
pl.suptitle('Dictionary learned from Lena patches\n' +
'Train time %.1fs on %d patches' % (dt, len(data)),
fontsize=16)
pl.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
###############################################################################
# Display the distorted image
def show_with_diff(image, reference, title):
"""Helper function to display denoising"""
pl.figure(figsize=(5, 3.3))
pl.subplot(1, 2, 1)
pl.title('Image')
pl.imshow(image, vmin=0, vmax=1, cmap=pl.cm.gray, interpolation='nearest')
pl.xticks(())
pl.yticks(())
pl.subplot(1, 2, 2)
difference = image - reference
pl.title('Difference (norm: %.2f)' % np.sqrt(np.sum(difference ** 2)))
pl.imshow(difference, vmin=-0.5, vmax=0.5, cmap=pl.cm.PuOr,
interpolation='nearest')
pl.xticks(())
pl.yticks(())
pl.suptitle(title, size=16)
pl.subplots_adjust(0.02, 0.02, 0.98, 0.79, 0.02, 0.2)
show_with_diff(distorted, lena, 'Distorted image')
###############################################################################
# Extract noisy patches and reconstruct them using the dictionary
print('Extracting noisy patches... ')
t0 = time()
data = extract_patches_2d(distorted[:, height / 2:], patch_size)
data = data.reshape(data.shape[0], -1)
intercept = np.mean(data, axis=0)
data -= intercept
print('done in %.2fs.' % (time() - t0))
transform_algorithms = [
('Orthogonal Matching Pursuit\n1 atom', 'omp',
{'transform_n_nonzero_coefs': 1}),
('Orthogonal Matching Pursuit\n2 atoms', 'omp',
{'transform_n_nonzero_coefs': 2}),
('Least-angle regression\n5 atoms', 'lars',
{'transform_n_nonzero_coefs': 5}),
('Thresholding\n alpha=0.1', 'threshold', {'transform_alpha': .1})]
reconstructions = {}
for title, transform_algorithm, kwargs in transform_algorithms:
print(title + '...')
reconstructions[title] = lena.copy()
t0 = time()
dico.set_params(transform_algorithm=transform_algorithm, **kwargs)
code = dico.transform(data)
patches = np.dot(code, V)
if transform_algorithm == 'threshold':
patches -= patches.min()
patches /= patches.max()
patches += intercept
patches = patches.reshape(len(data), *patch_size)
if transform_algorithm == 'threshold':
patches -= patches.min()
patches /= patches.max()
reconstructions[title][:, height / 2:] = reconstruct_from_patches_2d(
patches, (width, height / 2))
dt = time() - t0
print('done in %.2fs.' % dt)
show_with_diff(reconstructions[title], lena,
title + ' (time: %.1fs)' % dt)
pl.show()
| bsd-3-clause |
rabipanda/tensorflow | tensorflow/contrib/metrics/python/ops/metric_ops_test.py | 4 | 262821 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for metric_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.contrib import metrics as metrics_lib
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
NAN = float('nan')
metrics = metrics_lib
def _enqueue_vector(sess, queue, values, shape=None):
if not shape:
shape = (1, len(values))
dtype = queue.dtypes[0]
sess.run(
queue.enqueue(constant_op.constant(values, dtype=dtype, shape=shape)))
def _binary_2d_label_to_sparse_value(labels):
"""Convert dense 2D binary indicator tensor to sparse tensor.
Only 1 values in `labels` are included in result.
Args:
labels: Dense 2D binary indicator tensor.
Returns:
`SparseTensorValue` whose values are indices along the last dimension of
`labels`.
"""
indices = []
values = []
batch = 0
for row in labels:
label = 0
xi = 0
for x in row:
if x == 1:
indices.append([batch, xi])
values.append(label)
xi += 1
else:
assert x == 0
label += 1
batch += 1
shape = [len(labels), len(labels[0])]
return sparse_tensor.SparseTensorValue(
np.array(indices, np.int64), np.array(values, np.int64),
np.array(shape, np.int64))
def _binary_2d_label_to_sparse(labels):
"""Convert dense 2D binary indicator tensor to sparse tensor.
Only 1 values in `labels` are included in result.
Args:
labels: Dense 2D binary indicator tensor.
Returns:
`SparseTensor` whose values are indices along the last dimension of
`labels`.
"""
return sparse_tensor.SparseTensor.from_value(
_binary_2d_label_to_sparse_value(labels))
def _binary_3d_label_to_sparse_value(labels):
"""Convert dense 3D binary indicator tensor to sparse tensor.
Only 1 values in `labels` are included in result.
Args:
labels: Dense 2D binary indicator tensor.
Returns:
`SparseTensorValue` whose values are indices along the last dimension of
`labels`.
"""
indices = []
values = []
for d0, labels_d0 in enumerate(labels):
for d1, labels_d1 in enumerate(labels_d0):
d2 = 0
for class_id, label in enumerate(labels_d1):
if label == 1:
values.append(class_id)
indices.append([d0, d1, d2])
d2 += 1
else:
assert label == 0
shape = [len(labels), len(labels[0]), len(labels[0][0])]
return sparse_tensor.SparseTensorValue(
np.array(indices, np.int64), np.array(values, np.int64),
np.array(shape, np.int64))
def _binary_3d_label_to_sparse(labels):
"""Convert dense 3D binary indicator tensor to sparse tensor.
Only 1 values in `labels` are included in result.
Args:
labels: Dense 2D binary indicator tensor.
Returns:
`SparseTensor` whose values are indices along the last dimension of
`labels`.
"""
return sparse_tensor.SparseTensor.from_value(
_binary_3d_label_to_sparse_value(labels))
def _assert_nan(test_case, actual):
test_case.assertTrue(math.isnan(actual), 'Expected NAN, got %s.' % actual)
def _assert_metric_variables(test_case, expected):
test_case.assertEquals(
set(expected), set(v.name for v in variables.local_variables()))
test_case.assertEquals(
set(expected),
set(v.name for v in ops.get_collection(ops.GraphKeys.METRIC_VARIABLES)))
class StreamingMeanTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_mean(array_ops.ones([4, 3]))
_assert_metric_variables(self, ('mean/count:0', 'mean/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_mean(
array_ops.ones([4, 3]), metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_mean(
array_ops.ones([4, 3]), updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testBasic(self):
with self.test_session() as sess:
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
mean, update_op = metrics.streaming_mean(values)
sess.run(variables.local_variables_initializer())
for _ in range(4):
sess.run(update_op)
self.assertAlmostEqual(1.65, sess.run(mean), 5)
def testUpdateOpsReturnsCurrentValue(self):
with self.test_session() as sess:
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
mean, update_op = metrics.streaming_mean(values)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.5, sess.run(update_op), 5)
self.assertAlmostEqual(1.475, sess.run(update_op), 5)
self.assertAlmostEqual(12.4 / 6.0, sess.run(update_op), 5)
self.assertAlmostEqual(1.65, sess.run(update_op), 5)
self.assertAlmostEqual(1.65, sess.run(mean), 5)
def test1dWeightedValues(self):
with self.test_session() as sess:
# Create the queue that populates the values.
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weighted labels.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, weights_queue, [1])
_enqueue_vector(sess, weights_queue, [0])
_enqueue_vector(sess, weights_queue, [0])
_enqueue_vector(sess, weights_queue, [1])
weights = weights_queue.dequeue()
mean, update_op = metrics.streaming_mean(values, weights)
variables.local_variables_initializer().run()
for _ in range(4):
update_op.eval()
self.assertAlmostEqual((0 + 1 - 3.2 + 4.0) / 4.0, mean.eval(), 5)
def test1dWeightedValues_placeholders(self):
with self.test_session() as sess:
# Create the queue that populates the values.
feed_values = ((0, 1), (-4.2, 9.1), (6.5, 0), (-3.2, 4.0))
values = array_ops.placeholder(dtype=dtypes_lib.float32)
# Create the queue that populates the weighted labels.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1,))
_enqueue_vector(sess, weights_queue, 1, shape=(1,))
_enqueue_vector(sess, weights_queue, 0, shape=(1,))
_enqueue_vector(sess, weights_queue, 0, shape=(1,))
_enqueue_vector(sess, weights_queue, 1, shape=(1,))
weights = weights_queue.dequeue()
mean, update_op = metrics.streaming_mean(values, weights)
variables.local_variables_initializer().run()
for i in range(4):
update_op.eval(feed_dict={values: feed_values[i]})
self.assertAlmostEqual((0 + 1 - 3.2 + 4.0) / 4.0, mean.eval(), 5)
def test2dWeightedValues(self):
with self.test_session() as sess:
# Create the queue that populates the values.
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weighted labels.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, weights_queue, [1, 1])
_enqueue_vector(sess, weights_queue, [1, 0])
_enqueue_vector(sess, weights_queue, [0, 1])
_enqueue_vector(sess, weights_queue, [0, 0])
weights = weights_queue.dequeue()
mean, update_op = metrics.streaming_mean(values, weights)
variables.local_variables_initializer().run()
for _ in range(4):
update_op.eval()
self.assertAlmostEqual((0 + 1 - 4.2 + 0) / 4.0, mean.eval(), 5)
def test2dWeightedValues_placeholders(self):
with self.test_session() as sess:
# Create the queue that populates the values.
feed_values = ((0, 1), (-4.2, 9.1), (6.5, 0), (-3.2, 4.0))
values = array_ops.placeholder(dtype=dtypes_lib.float32)
# Create the queue that populates the weighted labels.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(2,))
_enqueue_vector(sess, weights_queue, [1, 1], shape=(2,))
_enqueue_vector(sess, weights_queue, [1, 0], shape=(2,))
_enqueue_vector(sess, weights_queue, [0, 1], shape=(2,))
_enqueue_vector(sess, weights_queue, [0, 0], shape=(2,))
weights = weights_queue.dequeue()
mean, update_op = metrics.streaming_mean(values, weights)
variables.local_variables_initializer().run()
for i in range(4):
update_op.eval(feed_dict={values: feed_values[i]})
self.assertAlmostEqual((0 + 1 - 4.2 + 0) / 4.0, mean.eval(), 5)
class StreamingMeanTensorTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_mean_tensor(array_ops.ones([4, 3]))
_assert_metric_variables(self,
('mean/total_tensor:0', 'mean/count_tensor:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_mean_tensor(
array_ops.ones([4, 3]), metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_mean_tensor(
array_ops.ones([4, 3]), updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testBasic(self):
with self.test_session() as sess:
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
mean, update_op = metrics.streaming_mean_tensor(values)
sess.run(variables.local_variables_initializer())
for _ in range(4):
sess.run(update_op)
self.assertAllClose([[-0.9 / 4., 3.525]], sess.run(mean))
def testMultiDimensional(self):
with self.test_session() as sess:
values_queue = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(2, 2, 2))
_enqueue_vector(
sess,
values_queue, [[[1, 2], [1, 2]], [[1, 2], [1, 2]]],
shape=(2, 2, 2))
_enqueue_vector(
sess,
values_queue, [[[1, 2], [1, 2]], [[3, 4], [9, 10]]],
shape=(2, 2, 2))
values = values_queue.dequeue()
mean, update_op = metrics.streaming_mean_tensor(values)
sess.run(variables.local_variables_initializer())
for _ in range(2):
sess.run(update_op)
self.assertAllClose([[[1, 2], [1, 2]], [[2, 3], [5, 6]]], sess.run(mean))
def testUpdateOpsReturnsCurrentValue(self):
with self.test_session() as sess:
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
mean, update_op = metrics.streaming_mean_tensor(values)
sess.run(variables.local_variables_initializer())
self.assertAllClose([[0, 1]], sess.run(update_op), 5)
self.assertAllClose([[-2.1, 5.05]], sess.run(update_op), 5)
self.assertAllClose([[2.3 / 3., 10.1 / 3.]], sess.run(update_op), 5)
self.assertAllClose([[-0.9 / 4., 3.525]], sess.run(update_op), 5)
self.assertAllClose([[-0.9 / 4., 3.525]], sess.run(mean), 5)
def testWeighted1d(self):
with self.test_session() as sess:
# Create the queue that populates the values.
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weights.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, weights_queue, [[1]])
_enqueue_vector(sess, weights_queue, [[0]])
_enqueue_vector(sess, weights_queue, [[1]])
_enqueue_vector(sess, weights_queue, [[0]])
weights = weights_queue.dequeue()
mean, update_op = metrics.streaming_mean_tensor(values, weights)
sess.run(variables.local_variables_initializer())
for _ in range(4):
sess.run(update_op)
self.assertAllClose([[3.25, 0.5]], sess.run(mean), 5)
def testWeighted2d_1(self):
with self.test_session() as sess:
# Create the queue that populates the values.
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weights.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, weights_queue, [1, 1])
_enqueue_vector(sess, weights_queue, [1, 0])
_enqueue_vector(sess, weights_queue, [0, 1])
_enqueue_vector(sess, weights_queue, [0, 0])
weights = weights_queue.dequeue()
mean, update_op = metrics.streaming_mean_tensor(values, weights)
sess.run(variables.local_variables_initializer())
for _ in range(4):
sess.run(update_op)
self.assertAllClose([[-2.1, 0.5]], sess.run(mean), 5)
def testWeighted2d_2(self):
with self.test_session() as sess:
# Create the queue that populates the values.
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weights.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, weights_queue, [0, 1])
_enqueue_vector(sess, weights_queue, [0, 0])
_enqueue_vector(sess, weights_queue, [0, 1])
_enqueue_vector(sess, weights_queue, [0, 0])
weights = weights_queue.dequeue()
mean, update_op = metrics.streaming_mean_tensor(values, weights)
sess.run(variables.local_variables_initializer())
for _ in range(4):
sess.run(update_op)
self.assertAllClose([[0, 0.5]], sess.run(mean), 5)
class StreamingAccuracyTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_accuracy(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
name='my_accuracy')
_assert_metric_variables(self,
('my_accuracy/count:0', 'my_accuracy/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_accuracy(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_accuracy(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testPredictionsAndLabelsOfDifferentSizeRaisesValueError(self):
predictions = array_ops.ones((10, 3))
labels = array_ops.ones((10, 4))
with self.assertRaises(ValueError):
metrics.streaming_accuracy(predictions, labels)
def testPredictionsAndWeightsOfDifferentSizeRaisesValueError(self):
predictions = array_ops.ones((10, 3))
labels = array_ops.ones((10, 3))
weights = array_ops.ones((9, 3))
with self.assertRaises(ValueError):
metrics.streaming_accuracy(predictions, labels, weights)
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=3, dtype=dtypes_lib.int64, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=3, dtype=dtypes_lib.int64, seed=2)
accuracy, update_op = metrics.streaming_accuracy(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_accuracy = accuracy.eval()
for _ in range(10):
self.assertEqual(initial_accuracy, accuracy.eval())
def testMultipleUpdates(self):
with self.test_session() as sess:
# Create the queue that populates the predictions.
preds_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [2])
_enqueue_vector(sess, preds_queue, [1])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [2])
labels = labels_queue.dequeue()
accuracy, update_op = metrics.streaming_accuracy(predictions, labels)
sess.run(variables.local_variables_initializer())
for _ in xrange(3):
sess.run(update_op)
self.assertEqual(0.5, sess.run(update_op))
self.assertEqual(0.5, accuracy.eval())
def testEffectivelyEquivalentSizes(self):
predictions = array_ops.ones((40, 1))
labels = array_ops.ones((40,))
with self.test_session() as sess:
accuracy, update_op = metrics.streaming_accuracy(predictions, labels)
sess.run(variables.local_variables_initializer())
self.assertEqual(1.0, update_op.eval())
self.assertEqual(1.0, accuracy.eval())
def testEffectivelyEquivalentSizesWithStaicShapedWeight(self):
predictions = ops.convert_to_tensor([1, 1, 1]) # shape 3,
labels = array_ops.expand_dims(ops.convert_to_tensor([1, 0, 0]),
1) # shape 3, 1
weights = array_ops.expand_dims(ops.convert_to_tensor([100, 1, 1]),
1) # shape 3, 1
with self.test_session() as sess:
accuracy, update_op = metrics.streaming_accuracy(predictions, labels,
weights)
sess.run(variables.local_variables_initializer())
# if streaming_accuracy does not flatten the weight, accuracy would be
# 0.33333334 due to an intended broadcast of weight. Due to flattening,
# it will be higher than .95
self.assertGreater(update_op.eval(), .95)
self.assertGreater(accuracy.eval(), .95)
def testEffectivelyEquivalentSizesWithDynamicallyShapedWeight(self):
predictions = ops.convert_to_tensor([1, 1, 1]) # shape 3,
labels = array_ops.expand_dims(ops.convert_to_tensor([1, 0, 0]),
1) # shape 3, 1
weights = [[100], [1], [1]] # shape 3, 1
weights_placeholder = array_ops.placeholder(
dtype=dtypes_lib.int32, name='weights')
feed_dict = {weights_placeholder: weights}
with self.test_session() as sess:
accuracy, update_op = metrics.streaming_accuracy(predictions, labels,
weights_placeholder)
sess.run(variables.local_variables_initializer())
# if streaming_accuracy does not flatten the weight, accuracy would be
# 0.33333334 due to an intended broadcast of weight. Due to flattening,
# it will be higher than .95
self.assertGreater(update_op.eval(feed_dict=feed_dict), .95)
self.assertGreater(accuracy.eval(feed_dict=feed_dict), .95)
def testMultipleUpdatesWithWeightedValues(self):
with self.test_session() as sess:
# Create the queue that populates the predictions.
preds_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [2])
_enqueue_vector(sess, preds_queue, [1])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [2])
labels = labels_queue.dequeue()
# Create the queue that populates the weights.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.int64, shapes=(1, 1))
_enqueue_vector(sess, weights_queue, [1])
_enqueue_vector(sess, weights_queue, [1])
_enqueue_vector(sess, weights_queue, [0])
_enqueue_vector(sess, weights_queue, [0])
weights = weights_queue.dequeue()
accuracy, update_op = metrics.streaming_accuracy(predictions, labels,
weights)
sess.run(variables.local_variables_initializer())
for _ in xrange(3):
sess.run(update_op)
self.assertEqual(1.0, sess.run(update_op))
self.assertEqual(1.0, accuracy.eval())
class StreamingTruePositivesTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_true_positives((0, 1, 0), (0, 1, 1))
_assert_metric_variables(self, ('true_positives/count:0',))
def testUnweighted(self):
for expand_predictions in [True, False]:
for expand_labels in [True, False]:
for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = math_ops.cast(
constant_op.constant(((1, 0, 1, 0), (0, 1, 1, 1), (0, 0, 0, 0))),
dtype=dtype)
if expand_predictions:
predictions = array_ops.expand_dims(predictions, 2)
labels = math_ops.cast(
constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0))),
dtype=dtype)
if expand_labels:
labels = array_ops.expand_dims(labels, 2)
tp, tp_update_op = metrics.streaming_true_positives(
predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, tp.eval())
self.assertEqual(1, tp_update_op.eval())
self.assertEqual(1, tp.eval())
def testWeighted(self):
for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = math_ops.cast(
constant_op.constant(((1, 0, 1, 0), (0, 1, 1, 1), (0, 0, 0, 0))),
dtype=dtype)
labels = math_ops.cast(
constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0))),
dtype=dtype)
tp, tp_update_op = metrics.streaming_true_positives(
predictions, labels, weights=37.0)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, tp.eval())
self.assertEqual(37.0, tp_update_op.eval())
self.assertEqual(37.0, tp.eval())
class StreamingFalseNegativesTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_false_negatives((0, 1, 0), (0, 1, 1))
_assert_metric_variables(self, ('false_negatives/count:0',))
def testUnweighted(self):
for expand_predictions in [True, False]:
for expand_labels in [True, False]:
for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = math_ops.cast(
constant_op.constant(((1, 0, 1, 0), (0, 1, 1, 1), (0, 0, 0, 0))),
dtype=dtype)
if expand_predictions:
predictions = array_ops.expand_dims(predictions, 2)
labels = math_ops.cast(
constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0))),
dtype=dtype)
if expand_labels:
labels = array_ops.expand_dims(labels, 2)
fn, fn_update_op = metrics.streaming_false_negatives(
predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, fn.eval())
self.assertEqual(2, fn_update_op.eval())
self.assertEqual(2, fn.eval())
def testWeighted(self):
for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = math_ops.cast(
constant_op.constant(((1, 0, 1, 0), (0, 1, 1, 1), (0, 0, 0, 0))),
dtype=dtype)
labels = math_ops.cast(
constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0))),
dtype=dtype)
fn, fn_update_op = metrics.streaming_false_negatives(
predictions, labels, weights=((3.0,), (5.0,), (7.0,)))
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, fn.eval())
self.assertEqual(8.0, fn_update_op.eval())
self.assertEqual(8.0, fn.eval())
class StreamingFalsePositivesTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_false_positives((0, 1, 0), (0, 1, 1))
_assert_metric_variables(self, ('false_positives/count:0',))
def testUnweighted(self):
for expand_predictions in [True, False]:
for expand_labels in [True, False]:
for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = math_ops.cast(
constant_op.constant(((1, 0, 1, 0), (0, 1, 1, 1), (0, 0, 0, 0))),
dtype=dtype)
if expand_predictions:
predictions = array_ops.expand_dims(predictions, 2)
labels = math_ops.cast(
constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0))),
dtype=dtype)
if expand_labels:
labels = array_ops.expand_dims(labels, 2)
fp, fp_update_op = metrics.streaming_false_positives(
predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, fp.eval())
self.assertEqual(4, fp_update_op.eval())
self.assertEqual(4, fp.eval())
def testWeighted(self):
for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = math_ops.cast(
constant_op.constant(((1, 0, 1, 0), (0, 1, 1, 1), (0, 0, 0, 0))),
dtype=dtype)
labels = math_ops.cast(
constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0))),
dtype=dtype)
fp, fp_update_op = metrics.streaming_false_positives(
predictions,
labels,
weights=((1.0, 2.0, 3.0, 5.0), (7.0, 11.0, 13.0, 17.0), (19.0, 23.0,
29.0, 31.0)))
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, fp.eval())
self.assertEqual(42.0, fp_update_op.eval())
self.assertEqual(42.0, fp.eval())
class StreamingTrueNegativesTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_true_negatives((0, 1, 0), (0, 1, 1))
_assert_metric_variables(self, ('true_negatives/count:0',))
def testUnweighted(self):
for expand_predictions in [True, False]:
for expand_labels in [True, False]:
for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = math_ops.cast(
constant_op.constant(((1, 0, 1, 0), (0, 1, 1, 1), (0, 0, 0, 0))),
dtype=dtype)
if expand_predictions:
predictions = array_ops.expand_dims(predictions, 2)
labels = math_ops.cast(
constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0))),
dtype=dtype)
if expand_labels:
labels = array_ops.expand_dims(labels, 2)
tn, tn_update_op = metrics.streaming_true_negatives(
predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, tn.eval())
self.assertEqual(5, tn_update_op.eval())
self.assertEqual(5, tn.eval())
def testWeighted(self):
for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = math_ops.cast(
constant_op.constant(((1, 0, 1, 0), (0, 1, 1, 1), (0, 0, 0, 0))),
dtype=dtype)
labels = math_ops.cast(
constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0))),
dtype=dtype)
tn, tn_update_op = metrics.streaming_true_negatives(
predictions, labels, weights=((0.0, 2.0, 3.0, 5.0),))
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, tn.eval())
self.assertEqual(15.0, tn_update_op.eval())
self.assertEqual(15.0, tn.eval())
class StreamingTruePositivesAtThresholdsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_true_positives_at_thresholds(
(0.0, 1.0, 0.0), (0, 1, 1), thresholds=(0.15, 0.5, 0.85))
_assert_metric_variables(self, ('true_positives:0',))
def testUnweighted(self):
predictions = constant_op.constant(
((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6), (0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0)))
tp, tp_update_op = metrics.streaming_true_positives_at_thresholds(
predictions, labels, thresholds=(0.15, 0.5, 0.85))
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAllEqual((0, 0, 0), tp.eval())
self.assertAllEqual((3, 1, 0), tp_update_op.eval())
self.assertAllEqual((3, 1, 0), tp.eval())
def testWeighted(self):
predictions = constant_op.constant(
((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6), (0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0)))
tp, tp_update_op = metrics.streaming_true_positives_at_thresholds(
predictions, labels, weights=37.0, thresholds=(0.15, 0.5, 0.85))
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAllEqual((0.0, 0.0, 0.0), tp.eval())
self.assertAllEqual((111.0, 37.0, 0.0), tp_update_op.eval())
self.assertAllEqual((111.0, 37.0, 0.0), tp.eval())
class StreamingFalseNegativesAtThresholdsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_false_negatives_at_thresholds(
(0.0, 1.0, 0.0), (0, 1, 1), thresholds=(
0.15,
0.5,
0.85,
))
_assert_metric_variables(self, ('false_negatives:0',))
def testUnweighted(self):
predictions = constant_op.constant(
((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6), (0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0)))
fn, fn_update_op = metrics.streaming_false_negatives_at_thresholds(
predictions, labels, thresholds=(0.15, 0.5, 0.85))
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAllEqual((0, 0, 0), fn.eval())
self.assertAllEqual((0, 2, 3), fn_update_op.eval())
self.assertAllEqual((0, 2, 3), fn.eval())
def testWeighted(self):
predictions = constant_op.constant(
((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6), (0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0)))
fn, fn_update_op = metrics.streaming_false_negatives_at_thresholds(
predictions,
labels,
weights=((3.0,), (5.0,), (7.0,)),
thresholds=(0.15, 0.5, 0.85))
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAllEqual((0.0, 0.0, 0.0), fn.eval())
self.assertAllEqual((0.0, 8.0, 11.0), fn_update_op.eval())
self.assertAllEqual((0.0, 8.0, 11.0), fn.eval())
class StreamingFalsePositivesAtThresholdsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_false_positives_at_thresholds(
(0.0, 1.0, 0.0), (0, 1, 1), thresholds=(0.15, 0.5, 0.85))
_assert_metric_variables(self, ('false_positives:0',))
def testUnweighted(self):
predictions = constant_op.constant(
((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6), (0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0)))
fp, fp_update_op = metrics.streaming_false_positives_at_thresholds(
predictions, labels, thresholds=(0.15, 0.5, 0.85))
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAllEqual((0, 0, 0), fp.eval())
self.assertAllEqual((7, 4, 2), fp_update_op.eval())
self.assertAllEqual((7, 4, 2), fp.eval())
def testWeighted(self):
predictions = constant_op.constant(
((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6), (0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0)))
fp, fp_update_op = metrics.streaming_false_positives_at_thresholds(
predictions,
labels,
weights=((1.0, 2.0, 3.0, 5.0), (7.0, 11.0, 13.0, 17.0), (19.0, 23.0,
29.0, 31.0)),
thresholds=(0.15, 0.5, 0.85))
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAllEqual((0.0, 0.0, 0.0), fp.eval())
self.assertAllEqual((125.0, 42.0, 12.0), fp_update_op.eval())
self.assertAllEqual((125.0, 42.0, 12.0), fp.eval())
class StreamingTrueNegativesAtThresholdsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_true_negatives_at_thresholds(
(0.0, 1.0, 0.0), (0, 1, 1), thresholds=(0.15, 0.5, 0.85))
_assert_metric_variables(self, ('true_negatives:0',))
def testUnweighted(self):
predictions = constant_op.constant(
((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6), (0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0)))
tn, tn_update_op = metrics.streaming_true_negatives_at_thresholds(
predictions, labels, thresholds=(0.15, 0.5, 0.85))
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAllEqual((0, 0, 0), tn.eval())
self.assertAllEqual((2, 5, 7), tn_update_op.eval())
self.assertAllEqual((2, 5, 7), tn.eval())
def testWeighted(self):
predictions = constant_op.constant(
((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6), (0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0)))
tn, tn_update_op = metrics.streaming_true_negatives_at_thresholds(
predictions,
labels,
weights=((0.0, 2.0, 3.0, 5.0),),
thresholds=(0.15, 0.5, 0.85))
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAllEqual((0.0, 0.0, 0.0), tn.eval())
self.assertAllEqual((5.0, 15.0, 23.0), tn_update_op.eval())
self.assertAllEqual((5.0, 15.0, 23.0), tn.eval())
class StreamingPrecisionTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_precision(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_metric_variables(self, ('precision/false_positives/count:0',
'precision/true_positives/count:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_precision(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_precision(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.int64, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
precision, update_op = metrics.streaming_precision(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_precision = precision.eval()
for _ in range(10):
self.assertEqual(initial_precision, precision.eval())
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(inputs)
labels = constant_op.constant(inputs)
precision, update_op = metrics.streaming_precision(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(1, sess.run(update_op))
self.assertAlmostEqual(1, precision.eval())
def testSomeCorrect(self):
predictions = constant_op.constant([1, 0, 1, 0], shape=(1, 4))
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
precision, update_op = metrics.streaming_precision(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.5, update_op.eval())
self.assertAlmostEqual(0.5, precision.eval())
def testWeighted1d(self):
predictions = constant_op.constant([[1, 0, 1, 0], [1, 0, 1, 0]])
labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
precision, update_op = metrics.streaming_precision(
predictions, labels, weights=constant_op.constant([[2], [5]]))
with self.test_session():
variables.local_variables_initializer().run()
weighted_tp = 2.0 + 5.0
weighted_positives = (2.0 + 2.0) + (5.0 + 5.0)
expected_precision = weighted_tp / weighted_positives
self.assertAlmostEqual(expected_precision, update_op.eval())
self.assertAlmostEqual(expected_precision, precision.eval())
def testWeighted1d_placeholders(self):
predictions = array_ops.placeholder(dtype=dtypes_lib.float32)
labels = array_ops.placeholder(dtype=dtypes_lib.float32)
feed_dict = {
predictions: ((1, 0, 1, 0), (1, 0, 1, 0)),
labels: ((0, 1, 1, 0), (1, 0, 0, 1))
}
precision, update_op = metrics.streaming_precision(
predictions, labels, weights=constant_op.constant([[2], [5]]))
with self.test_session():
variables.local_variables_initializer().run()
weighted_tp = 2.0 + 5.0
weighted_positives = (2.0 + 2.0) + (5.0 + 5.0)
expected_precision = weighted_tp / weighted_positives
self.assertAlmostEqual(
expected_precision, update_op.eval(feed_dict=feed_dict))
self.assertAlmostEqual(
expected_precision, precision.eval(feed_dict=feed_dict))
def testWeighted2d(self):
predictions = constant_op.constant([[1, 0, 1, 0], [1, 0, 1, 0]])
labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
precision, update_op = metrics.streaming_precision(
predictions,
labels,
weights=constant_op.constant([[1, 2, 3, 4], [4, 3, 2, 1]]))
with self.test_session():
variables.local_variables_initializer().run()
weighted_tp = 3.0 + 4.0
weighted_positives = (1.0 + 3.0) + (4.0 + 2.0)
expected_precision = weighted_tp / weighted_positives
self.assertAlmostEqual(expected_precision, update_op.eval())
self.assertAlmostEqual(expected_precision, precision.eval())
def testWeighted2d_placeholders(self):
predictions = array_ops.placeholder(dtype=dtypes_lib.float32)
labels = array_ops.placeholder(dtype=dtypes_lib.float32)
feed_dict = {
predictions: ((1, 0, 1, 0), (1, 0, 1, 0)),
labels: ((0, 1, 1, 0), (1, 0, 0, 1))
}
precision, update_op = metrics.streaming_precision(
predictions,
labels,
weights=constant_op.constant([[1, 2, 3, 4], [4, 3, 2, 1]]))
with self.test_session():
variables.local_variables_initializer().run()
weighted_tp = 3.0 + 4.0
weighted_positives = (1.0 + 3.0) + (4.0 + 2.0)
expected_precision = weighted_tp / weighted_positives
self.assertAlmostEqual(
expected_precision, update_op.eval(feed_dict=feed_dict))
self.assertAlmostEqual(
expected_precision, precision.eval(feed_dict=feed_dict))
def testAllIncorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(inputs)
labels = constant_op.constant(1 - inputs)
precision, update_op = metrics.streaming_precision(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertAlmostEqual(0, precision.eval())
def testZeroTrueAndFalsePositivesGivesZeroPrecision(self):
predictions = constant_op.constant([0, 0, 0, 0])
labels = constant_op.constant([0, 0, 0, 0])
precision, update_op = metrics.streaming_precision(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(0.0, precision.eval())
class StreamingRecallTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_recall(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_metric_variables(
self,
('recall/false_negatives/count:0', 'recall/true_positives/count:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_recall(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_recall(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.int64, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
recall, update_op = metrics.streaming_recall(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_recall = recall.eval()
for _ in range(10):
self.assertEqual(initial_recall, recall.eval())
def testAllCorrect(self):
np_inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(np_inputs)
labels = constant_op.constant(np_inputs)
recall, update_op = metrics.streaming_recall(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(1, recall.eval())
def testSomeCorrect(self):
predictions = constant_op.constant([1, 0, 1, 0], shape=(1, 4))
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
recall, update_op = metrics.streaming_recall(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.5, update_op.eval())
self.assertAlmostEqual(0.5, recall.eval())
def testWeighted1d(self):
predictions = constant_op.constant([[1, 0, 1, 0], [0, 1, 0, 1]])
labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
weights = constant_op.constant([[2], [5]])
recall, update_op = metrics.streaming_recall(
predictions, labels, weights=weights)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
weighted_tp = 2.0 + 5.0
weighted_t = (2.0 + 2.0) + (5.0 + 5.0)
expected_precision = weighted_tp / weighted_t
self.assertAlmostEqual(expected_precision, update_op.eval())
self.assertAlmostEqual(expected_precision, recall.eval())
def testWeighted2d(self):
predictions = constant_op.constant([[1, 0, 1, 0], [0, 1, 0, 1]])
labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
weights = constant_op.constant([[1, 2, 3, 4], [4, 3, 2, 1]])
recall, update_op = metrics.streaming_recall(
predictions, labels, weights=weights)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
weighted_tp = 3.0 + 1.0
weighted_t = (2.0 + 3.0) + (4.0 + 1.0)
expected_precision = weighted_tp / weighted_t
self.assertAlmostEqual(expected_precision, update_op.eval())
self.assertAlmostEqual(expected_precision, recall.eval())
def testAllIncorrect(self):
np_inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(np_inputs)
labels = constant_op.constant(1 - np_inputs)
recall, update_op = metrics.streaming_recall(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(0, recall.eval())
def testZeroTruePositivesAndFalseNegativesGivesZeroRecall(self):
predictions = array_ops.zeros((1, 4))
labels = array_ops.zeros((1, 4))
recall, update_op = metrics.streaming_recall(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(0, recall.eval())
class StreamingFPRTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_false_positive_rate(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_metric_variables(self,
('false_positive_rate/false_positives/count:0',
'false_positive_rate/true_negatives/count:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_false_positive_rate(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_false_positive_rate(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.int64, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
fpr, update_op = metrics.streaming_false_positive_rate(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_fpr = fpr.eval()
for _ in range(10):
self.assertEqual(initial_fpr, fpr.eval())
def testAllCorrect(self):
np_inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(np_inputs)
labels = constant_op.constant(np_inputs)
fpr, update_op = metrics.streaming_false_positive_rate(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(0, fpr.eval())
def testSomeCorrect(self):
predictions = constant_op.constant([1, 0, 1, 0], shape=(1, 4))
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
fpr, update_op = metrics.streaming_false_positive_rate(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.5, update_op.eval())
self.assertAlmostEqual(0.5, fpr.eval())
def testWeighted1d(self):
predictions = constant_op.constant([[1, 0, 1, 0], [0, 1, 0, 1]])
labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
weights = constant_op.constant([[2], [5]])
fpr, update_op = metrics.streaming_false_positive_rate(
predictions, labels, weights=weights)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
weighted_fp = 2.0 + 5.0
weighted_f = (2.0 + 2.0) + (5.0 + 5.0)
expected_fpr = weighted_fp / weighted_f
self.assertAlmostEqual(expected_fpr, update_op.eval())
self.assertAlmostEqual(expected_fpr, fpr.eval())
def testWeighted2d(self):
predictions = constant_op.constant([[1, 0, 1, 0], [0, 1, 0, 1]])
labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
weights = constant_op.constant([[1, 2, 3, 4], [4, 3, 2, 1]])
fpr, update_op = metrics.streaming_false_positive_rate(
predictions, labels, weights=weights)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
weighted_fp = 1.0 + 3.0
weighted_f = (1.0 + 4.0) + (2.0 + 3.0)
expected_fpr = weighted_fp / weighted_f
self.assertAlmostEqual(expected_fpr, update_op.eval())
self.assertAlmostEqual(expected_fpr, fpr.eval())
def testAllIncorrect(self):
np_inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(np_inputs)
labels = constant_op.constant(1 - np_inputs)
fpr, update_op = metrics.streaming_false_positive_rate(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(1, fpr.eval())
def testZeroFalsePositivesAndTrueNegativesGivesZeroFPR(self):
predictions = array_ops.ones((1, 4))
labels = array_ops.ones((1, 4))
fpr, update_op = metrics.streaming_false_positive_rate(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(0, fpr.eval())
class StreamingFNRTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_false_negative_rate(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_metric_variables(self,
('false_negative_rate/false_negatives/count:0',
'false_negative_rate/true_positives/count:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_false_negative_rate(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_false_negative_rate(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.int64, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
fnr, update_op = metrics.streaming_false_negative_rate(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_fnr = fnr.eval()
for _ in range(10):
self.assertEqual(initial_fnr, fnr.eval())
def testAllCorrect(self):
np_inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(np_inputs)
labels = constant_op.constant(np_inputs)
fnr, update_op = metrics.streaming_false_negative_rate(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(0, fnr.eval())
def testSomeCorrect(self):
predictions = constant_op.constant([1, 0, 1, 0], shape=(1, 4))
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
fnr, update_op = metrics.streaming_false_negative_rate(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.5, update_op.eval())
self.assertAlmostEqual(0.5, fnr.eval())
def testWeighted1d(self):
predictions = constant_op.constant([[1, 0, 1, 0], [0, 1, 0, 1]])
labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
weights = constant_op.constant([[2], [5]])
fnr, update_op = metrics.streaming_false_negative_rate(
predictions, labels, weights=weights)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
weighted_fn = 2.0 + 5.0
weighted_t = (2.0 + 2.0) + (5.0 + 5.0)
expected_fnr = weighted_fn / weighted_t
self.assertAlmostEqual(expected_fnr, update_op.eval())
self.assertAlmostEqual(expected_fnr, fnr.eval())
def testWeighted2d(self):
predictions = constant_op.constant([[1, 0, 1, 0], [0, 1, 0, 1]])
labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
weights = constant_op.constant([[1, 2, 3, 4], [4, 3, 2, 1]])
fnr, update_op = metrics.streaming_false_negative_rate(
predictions, labels, weights=weights)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
weighted_fn = 2.0 + 4.0
weighted_t = (2.0 + 3.0) + (1.0 + 4.0)
expected_fnr = weighted_fn / weighted_t
self.assertAlmostEqual(expected_fnr, update_op.eval())
self.assertAlmostEqual(expected_fnr, fnr.eval())
def testAllIncorrect(self):
np_inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(np_inputs)
labels = constant_op.constant(1 - np_inputs)
fnr, update_op = metrics.streaming_false_negative_rate(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(1, fnr.eval())
def testZeroFalseNegativesAndTruePositivesGivesZeroFNR(self):
predictions = array_ops.zeros((1, 4))
labels = array_ops.zeros((1, 4))
fnr, update_op = metrics.streaming_false_negative_rate(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(0, fnr.eval())
class StreamingCurvePointsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metric_ops.streaming_curve_points(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_metric_variables(
self,
('curve_points/true_positives:0', 'curve_points/false_negatives:0',
'curve_points/false_positives:0', 'curve_points/true_negatives:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
points, _ = metric_ops.streaming_curve_points(
labels=array_ops.ones((10, 1)),
predictions=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [points])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metric_ops.streaming_curve_points(
labels=array_ops.ones((10, 1)),
predictions=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def _testValueTensorIsIdempotent(self, curve):
predictions = constant_op.constant(
np.random.uniform(size=(10, 3)), dtype=dtypes_lib.float32)
labels = constant_op.constant(
np.random.uniform(high=2, size=(10, 3)), dtype=dtypes_lib.float32)
points, update_op = metric_ops.streaming_curve_points(
labels, predictions=predictions, curve=curve)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
initial_points = points.eval()
sess.run(update_op)
self.assertAllClose(initial_points, points.eval())
def testValueTensorIsIdempotentROC(self):
self._testValueTensorIsIdempotent(curve='ROC')
def testValueTensorIsIdempotentPR(self):
self._testValueTensorIsIdempotent(curve='PR')
def _testCase(self, labels, predictions, curve, expected_points):
with self.test_session() as sess:
predictions_tensor = constant_op.constant(
predictions, dtype=dtypes_lib.float32)
labels_tensor = constant_op.constant(labels, dtype=dtypes_lib.float32)
points, update_op = metric_ops.streaming_curve_points(
labels=labels_tensor,
predictions=predictions_tensor,
num_thresholds=3,
curve=curve)
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertAllClose(expected_points, points.eval())
def testEdgeCasesROC(self):
self._testCase([[1]], [[1]], 'ROC', [[0, 1], [0, 1], [0, 0]])
self._testCase([[0]], [[0]], 'ROC', [[1, 1], [0, 1], [0, 1]])
self._testCase([[0]], [[1]], 'ROC', [[1, 1], [1, 1], [0, 1]])
self._testCase([[1]], [[0]], 'ROC', [[0, 1], [0, 0], [0, 0]])
def testManyValuesROC(self):
self._testCase([[1.0, 0.0, 0.0, 1.0, 1.0, 1.0]],
[[0.2, 0.3, 0.4, 0.6, 0.7, 0.8]], 'ROC',
[[1.0, 1.0], [0.0, 0.75], [0.0, 0.0]])
def testEdgeCasesPR(self):
self._testCase([[1]], [[1]], 'PR', [[1, 1], [1, 1], [0, 1]])
self._testCase([[0]], [[0]], 'PR', [[1, 0], [1, 1], [1, 1]])
self._testCase([[0]], [[1]], 'PR', [[1, 0], [1, 0], [1, 1]])
self._testCase([[1]], [[0]], 'PR', [[1, 1], [0, 1], [0, 1]])
def testManyValuesPR(self):
self._testCase([[1.0, 0.0, 0.0, 1.0, 1.0, 1.0]],
[[0.2, 0.3, 0.4, 0.6, 0.7, 0.8]], 'PR',
[[1.0, 4.0 / 6.0], [0.75, 1.0], [0.0, 1.0]])
def _np_auc(predictions, labels, weights=None):
"""Computes the AUC explicitly using Numpy.
Args:
predictions: an ndarray with shape [N].
labels: an ndarray with shape [N].
weights: an ndarray with shape [N].
Returns:
the area under the ROC curve.
"""
if weights is None:
weights = np.ones(np.size(predictions))
is_positive = labels > 0
num_positives = np.sum(weights[is_positive])
num_negatives = np.sum(weights[~is_positive])
# Sort descending:
inds = np.argsort(-predictions)
sorted_labels = labels[inds]
sorted_weights = weights[inds]
is_positive = sorted_labels > 0
tp = np.cumsum(sorted_weights * is_positive) / num_positives
return np.sum((sorted_weights * tp)[~is_positive]) / num_negatives
class StreamingAUCTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_auc(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_metric_variables(self,
('auc/true_positives:0', 'auc/false_negatives:0',
'auc/false_positives:0', 'auc/true_negatives:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_auc(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_auc(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
auc, update_op = metrics.streaming_auc(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_auc = auc.eval()
for _ in range(10):
self.assertAlmostEqual(initial_auc, auc.eval(), 5)
def testPredictionsOutOfRange(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[1, -1, 1, -1], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
_, update_op = metrics.streaming_auc(predictions, labels)
sess.run(variables.local_variables_initializer())
self.assertRaises(errors_impl.InvalidArgumentError, update_op.eval)
def testAllCorrect(self):
self.allCorrectAsExpected('ROC')
def allCorrectAsExpected(self, curve):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.test_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(inputs)
auc, update_op = metrics.streaming_auc(predictions, labels, curve=curve)
sess.run(variables.local_variables_initializer())
self.assertEqual(1, sess.run(update_op))
self.assertEqual(1, auc.eval())
def testSomeCorrect(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
auc, update_op = metrics.streaming_auc(predictions, labels)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.5, sess.run(update_op))
self.assertAlmostEqual(0.5, auc.eval())
def testWeighted1d(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
weights = constant_op.constant([2], shape=(1, 1))
auc, update_op = metrics.streaming_auc(
predictions, labels, weights=weights)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.5, sess.run(update_op), 5)
self.assertAlmostEqual(0.5, auc.eval(), 5)
def testWeighted2d(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
weights = constant_op.constant([1, 2, 3, 4], shape=(1, 4))
auc, update_op = metrics.streaming_auc(
predictions, labels, weights=weights)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.7, sess.run(update_op), 5)
self.assertAlmostEqual(0.7, auc.eval(), 5)
def testAUCPRSpecialCase(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[0.1, 0.4, 0.35, 0.8], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 0, 1, 1], shape=(1, 4))
auc, update_op = metrics.streaming_auc(predictions, labels, curve='PR')
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.79166, sess.run(update_op), delta=1e-3)
self.assertAlmostEqual(0.79166, auc.eval(), delta=1e-3)
def testAnotherAUCPRSpecialCase(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[0.1, 0.4, 0.35, 0.8, 0.1, 0.135, 0.81],
shape=(1, 7),
dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 0, 1, 0, 1, 0, 1], shape=(1, 7))
auc, update_op = metrics.streaming_auc(predictions, labels, curve='PR')
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.610317, sess.run(update_op), delta=1e-3)
self.assertAlmostEqual(0.610317, auc.eval(), delta=1e-3)
def testThirdAUCPRSpecialCase(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[0.0, 0.1, 0.2, 0.33, 0.3, 0.4, 0.5],
shape=(1, 7),
dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 0, 0, 0, 1, 1, 1], shape=(1, 7))
auc, update_op = metrics.streaming_auc(predictions, labels, curve='PR')
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.90277, sess.run(update_op), delta=1e-3)
self.assertAlmostEqual(0.90277, auc.eval(), delta=1e-3)
def testAllIncorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.test_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(1 - inputs, dtype=dtypes_lib.float32)
auc, update_op = metrics.streaming_auc(predictions, labels)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0, sess.run(update_op))
self.assertAlmostEqual(0, auc.eval())
def testZeroTruePositivesAndFalseNegativesGivesOneAUC(self):
with self.test_session() as sess:
predictions = array_ops.zeros([4], dtype=dtypes_lib.float32)
labels = array_ops.zeros([4])
auc, update_op = metrics.streaming_auc(predictions, labels)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(1, sess.run(update_op), 6)
self.assertAlmostEqual(1, auc.eval(), 6)
def testRecallOneAndPrecisionOneGivesOnePRAUC(self):
with self.test_session() as sess:
predictions = array_ops.ones([4], dtype=dtypes_lib.float32)
labels = array_ops.ones([4])
auc, update_op = metrics.streaming_auc(predictions, labels, curve='PR')
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(1, sess.run(update_op), 6)
self.assertAlmostEqual(1, auc.eval(), 6)
def testWithMultipleUpdates(self):
num_samples = 1000
batch_size = 10
num_batches = int(num_samples / batch_size)
# Create the labels and data.
labels = np.random.randint(0, 2, size=num_samples)
noise = np.random.normal(0.0, scale=0.2, size=num_samples)
predictions = 0.4 + 0.2 * labels + noise
predictions[predictions > 1] = 1
predictions[predictions < 0] = 0
def _enqueue_as_batches(x, enqueue_ops):
x_batches = x.astype(np.float32).reshape((num_batches, batch_size))
x_queue = data_flow_ops.FIFOQueue(
num_batches, dtypes=dtypes_lib.float32, shapes=(batch_size,))
for i in range(num_batches):
enqueue_ops[i].append(x_queue.enqueue(x_batches[i, :]))
return x_queue.dequeue()
for weights in (None, np.ones(num_samples),
np.random.exponential(scale=1.0, size=num_samples)):
expected_auc = _np_auc(predictions, labels, weights)
with self.test_session() as sess:
enqueue_ops = [[] for i in range(num_batches)]
tf_predictions = _enqueue_as_batches(predictions, enqueue_ops)
tf_labels = _enqueue_as_batches(labels, enqueue_ops)
tf_weights = (
_enqueue_as_batches(weights, enqueue_ops)
if weights is not None else None)
for i in range(num_batches):
sess.run(enqueue_ops[i])
auc, update_op = metrics.streaming_auc(
tf_predictions,
tf_labels,
curve='ROC',
num_thresholds=500,
weights=tf_weights)
sess.run(variables.local_variables_initializer())
for i in range(num_batches):
sess.run(update_op)
# Since this is only approximate, we can't expect a 6 digits match.
# Although with higher number of samples/thresholds we should see the
# accuracy improving
self.assertAlmostEqual(expected_auc, auc.eval(), 2)
class StreamingDynamicAUCTest(test.TestCase):
def setUp(self):
super(StreamingDynamicAUCTest, self).setUp()
np.random.seed(1)
ops.reset_default_graph()
def testUnknownCurve(self):
with self.assertRaisesRegexp(
ValueError, 'curve must be either ROC or PR, TEST_CURVE unknown'):
metrics.streaming_dynamic_auc(
labels=array_ops.ones((10, 1)),
predictions=array_ops.ones((10, 1)),
curve='TEST_CURVE')
def testVars(self):
metrics.streaming_dynamic_auc(
labels=array_ops.ones((10, 1)), predictions=array_ops.ones((10, 1)))
_assert_metric_variables(self, [
'dynamic_auc/concat_labels/array:0', 'dynamic_auc/concat_labels/size:0',
'dynamic_auc/concat_preds/array:0', 'dynamic_auc/concat_preds/size:0'
])
def testMetricsCollection(self):
my_collection_name = '__metrics__'
auc, _ = metrics.streaming_dynamic_auc(
labels=array_ops.ones((10, 1)),
predictions=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertEqual(ops.get_collection(my_collection_name), [auc])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_dynamic_auc(
labels=array_ops.ones((10, 1)),
predictions=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
auc, update_op = metrics.streaming_dynamic_auc(labels, predictions)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in xrange(10):
sess.run(update_op)
# Then verify idempotency.
initial_auc = auc.eval()
for _ in xrange(10):
self.assertAlmostEqual(initial_auc, auc.eval(), 5)
def testAllLabelsOnes(self):
with self.test_session() as sess:
predictions = constant_op.constant([1., 1., 1.])
labels = constant_op.constant([1, 1, 1])
auc, update_op = metrics.streaming_dynamic_auc(labels, predictions)
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(0, auc.eval())
def testAllLabelsZeros(self):
with self.test_session() as sess:
predictions = constant_op.constant([1., 1., 1.])
labels = constant_op.constant([0, 0, 0])
auc, update_op = metrics.streaming_dynamic_auc(labels, predictions)
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(0, auc.eval())
def testNonZeroOnePredictions(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[2.5, -2.5, 2.5, -2.5], dtype=dtypes_lib.float32)
labels = constant_op.constant([1, 0, 1, 0])
auc, update_op = metrics.streaming_dynamic_auc(labels, predictions)
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertAlmostEqual(auc.eval(), 1.0)
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.test_session() as sess:
predictions = constant_op.constant(inputs)
labels = constant_op.constant(inputs)
auc, update_op = metrics.streaming_dynamic_auc(labels, predictions)
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(1, auc.eval())
def testSomeCorrect(self):
with self.test_session() as sess:
predictions = constant_op.constant([1, 0, 1, 0])
labels = constant_op.constant([0, 1, 1, 0])
auc, update_op = metrics.streaming_dynamic_auc(labels, predictions)
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertAlmostEqual(0.5, auc.eval())
def testAllIncorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.test_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(1 - inputs, dtype=dtypes_lib.float32)
auc, update_op = metrics.streaming_dynamic_auc(labels, predictions)
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertAlmostEqual(0, auc.eval())
def testExceptionOnIncompatibleShapes(self):
with self.test_session() as sess:
predictions = array_ops.ones([5])
labels = array_ops.zeros([6])
with self.assertRaisesRegexp(ValueError, 'Shapes .* are incompatible'):
_, update_op = metrics.streaming_dynamic_auc(labels, predictions)
sess.run(variables.local_variables_initializer())
sess.run(update_op)
def testExceptionOnGreaterThanOneLabel(self):
with self.test_session() as sess:
predictions = constant_op.constant([1, 0.5, 0], dtypes_lib.float32)
labels = constant_op.constant([2, 1, 0])
_, update_op = metrics.streaming_dynamic_auc(labels, predictions)
sess.run(variables.local_variables_initializer())
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
'.*labels must be 0 or 1, at least one is >1.*'):
sess.run(update_op)
def testExceptionOnNegativeLabel(self):
with self.test_session() as sess:
predictions = constant_op.constant([1, 0.5, 0], dtypes_lib.float32)
labels = constant_op.constant([1, 0, -1])
_, update_op = metrics.streaming_dynamic_auc(labels, predictions)
sess.run(variables.local_variables_initializer())
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
'.*labels must be 0 or 1, at least one is <0.*'):
sess.run(update_op)
def testWithMultipleUpdates(self):
batch_size = 10
num_batches = 100
labels = np.array([])
predictions = np.array([])
tf_labels = variables.Variable(
array_ops.ones(batch_size, dtypes_lib.int32),
collections=[ops.GraphKeys.LOCAL_VARIABLES],
dtype=dtypes_lib.int32)
tf_predictions = variables.Variable(
array_ops.ones(batch_size),
collections=[ops.GraphKeys.LOCAL_VARIABLES],
dtype=dtypes_lib.float32)
auc, update_op = metrics.streaming_dynamic_auc(tf_labels, tf_predictions)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
for _ in xrange(num_batches):
new_labels = np.random.randint(0, 2, size=batch_size)
noise = np.random.normal(0.0, scale=0.2, size=batch_size)
new_predictions = 0.4 + 0.2 * new_labels + noise
labels = np.concatenate([labels, new_labels])
predictions = np.concatenate([predictions, new_predictions])
sess.run(tf_labels.assign(new_labels))
sess.run(tf_predictions.assign(new_predictions))
sess.run(update_op)
expected_auc = _np_auc(predictions, labels)
self.assertAlmostEqual(expected_auc, auc.eval())
def testAUCPRReverseIncreasingPredictions(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[0.1, 0.4, 0.35, 0.8], dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 0, 1, 1])
auc, update_op = metrics.streaming_dynamic_auc(
labels, predictions, curve='PR')
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertAlmostEqual(0.79166, auc.eval(), delta=1e-5)
def testAUCPRJumbledPredictions(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[0.1, 0.4, 0.35, 0.8, 0.1, 0.135, 0.81], dtypes_lib.float32)
labels = constant_op.constant([0, 0, 1, 0, 1, 0, 1])
auc, update_op = metrics.streaming_dynamic_auc(
labels, predictions, curve='PR')
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertAlmostEqual(0.610317, auc.eval(), delta=1e-6)
def testAUCPRPredictionsLessThanHalf(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[0.0, 0.1, 0.2, 0.33, 0.3, 0.4, 0.5],
shape=(1, 7),
dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 0, 0, 0, 1, 1, 1], shape=(1, 7))
auc, update_op = metrics.streaming_dynamic_auc(
labels, predictions, curve='PR')
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertAlmostEqual(0.90277, auc.eval(), delta=1e-5)
class StreamingPrecisionRecallAtEqualThresholdsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def _testResultsEqual(self, expected_dict, gotten_result):
"""Tests that 2 results (dicts) represent the same data.
Args:
expected_dict: A dictionary with keys that are the names of properties
of PrecisionRecallData and whose values are lists of floats.
gotten_result: A PrecisionRecallData object.
"""
gotten_dict = {k: t.eval() for k, t in gotten_result._asdict().items()}
self.assertItemsEqual(list(expected_dict.keys()), list(gotten_dict.keys()))
for key, expected_values in expected_dict.items():
self.assertAllClose(expected_values, gotten_dict[key])
def _testCase(self, predictions, labels, expected_result, weights=None):
"""Performs a test given a certain scenario of labels, predictions, weights.
Args:
predictions: The predictions tensor. Of type float32.
labels: The labels tensor. Of type bool.
expected_result: The expected result (dict) that maps to tensors.
weights: Optional weights tensor.
"""
with self.test_session() as sess:
predictions_tensor = constant_op.constant(
predictions, dtype=dtypes_lib.float32)
labels_tensor = constant_op.constant(labels, dtype=dtypes_lib.bool)
weights_tensor = None
if weights:
weights_tensor = constant_op.constant(weights, dtype=dtypes_lib.float32)
gotten_result, update_op = (
metric_ops.precision_recall_at_equal_thresholds(
labels=labels_tensor,
predictions=predictions_tensor,
weights=weights_tensor,
num_thresholds=3))
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self._testResultsEqual(expected_result, gotten_result)
def testVars(self):
metric_ops.precision_recall_at_equal_thresholds(
labels=constant_op.constant([True], dtype=dtypes_lib.bool),
predictions=constant_op.constant([0.42], dtype=dtypes_lib.float32))
_assert_metric_variables(
self, ('precision_recall_at_equal_thresholds/variables/tp_buckets:0',
'precision_recall_at_equal_thresholds/variables/fp_buckets:0'))
def testVarsWithName(self):
metric_ops.precision_recall_at_equal_thresholds(
labels=constant_op.constant([True], dtype=dtypes_lib.bool),
predictions=constant_op.constant([0.42], dtype=dtypes_lib.float32),
name='foo')
_assert_metric_variables(
self, ('foo/variables/tp_buckets:0', 'foo/variables/fp_buckets:0'))
def testValuesAreIdempotent(self):
predictions = constant_op.constant(
np.random.uniform(size=(10, 3)), dtype=dtypes_lib.float32)
labels = constant_op.constant(
np.random.uniform(size=(10, 3)) > 0.5, dtype=dtypes_lib.bool)
result, update_op = metric_ops.precision_recall_at_equal_thresholds(
labels=labels, predictions=predictions)
with self.test_session() as sess:
# Run several updates.
sess.run(variables.local_variables_initializer())
for _ in range(3):
sess.run(update_op)
# Then verify idempotency.
initial_result = {
k: value.eval().tolist()
for k, value in result._asdict().items()
}
for _ in range(3):
self._testResultsEqual(initial_result, result)
def testAllTruePositives(self):
self._testCase(
[[1]], [[True]], {
'tp': [1, 1, 1],
'fp': [0, 0, 0],
'tn': [0, 0, 0],
'fn': [0, 0, 0],
'precision': [1.0, 1.0, 1.0],
'recall': [1.0, 1.0, 1.0],
'thresholds': [0.0, 0.5, 1.0],
})
def testAllTrueNegatives(self):
self._testCase(
[[0]], [[False]], {
'tp': [0, 0, 0],
'fp': [1, 0, 0],
'tn': [0, 1, 1],
'fn': [0, 0, 0],
'precision': [0.0, 0.0, 0.0],
'recall': [0.0, 0.0, 0.0],
'thresholds': [0.0, 0.5, 1.0],
})
def testAllFalsePositives(self):
self._testCase(
[[1]], [[False]], {
'tp': [0, 0, 0],
'fp': [1, 1, 1],
'tn': [0, 0, 0],
'fn': [0, 0, 0],
'precision': [0.0, 0.0, 0.0],
'recall': [0.0, 0.0, 0.0],
'thresholds': [0.0, 0.5, 1.0],
})
def testAllFalseNegatives(self):
self._testCase(
[[0]], [[True]], {
'tp': [1, 0, 0],
'fp': [0, 0, 0],
'tn': [0, 0, 0],
'fn': [0, 1, 1],
'precision': [1.0, 0.0, 0.0],
'recall': [1.0, 0.0, 0.0],
'thresholds': [0.0, 0.5, 1.0],
})
def testManyValues(self):
self._testCase(
[[0.2, 0.3, 0.4, 0.6, 0.7, 0.8]],
[[True, False, False, True, True, True]], {
'tp': [4, 3, 0],
'fp': [2, 0, 0],
'tn': [0, 2, 2],
'fn': [0, 1, 4],
'precision': [2.0 / 3.0, 1.0, 0.0],
'recall': [1.0, 0.75, 0.0],
'thresholds': [0.0, 0.5, 1.0],
})
def testManyValuesWithWeights(self):
self._testCase(
[[0.2, 0.3, 0.4, 0.6, 0.7, 0.8]],
[[True, False, False, True, True, True]], {
'tp': [1.5, 1.5, 0.0],
'fp': [2.5, 0.0, 0.0],
'tn': [0.0, 2.5, 2.5],
'fn': [0.0, 0.0, 1.5],
'precision': [0.375, 1.0, 0.0],
'recall': [1.0, 1.0, 0.0],
'thresholds': [0.0, 0.5, 1.0],
},
weights=[[0.0, 0.5, 2.0, 0.0, 0.5, 1.0]])
class StreamingSpecificityAtSensitivityTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_specificity_at_sensitivity(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
sensitivity=0.7)
_assert_metric_variables(self,
('specificity_at_sensitivity/true_positives:0',
'specificity_at_sensitivity/false_negatives:0',
'specificity_at_sensitivity/false_positives:0',
'specificity_at_sensitivity/true_negatives:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_specificity_at_sensitivity(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
sensitivity=0.7,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_specificity_at_sensitivity(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
sensitivity=0.7,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
specificity, update_op = metrics.streaming_specificity_at_sensitivity(
predictions, labels, sensitivity=0.7)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_specificity = specificity.eval()
for _ in range(10):
self.assertAlmostEqual(initial_specificity, specificity.eval(), 5)
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(inputs)
specificity, update_op = metrics.streaming_specificity_at_sensitivity(
predictions, labels, sensitivity=0.7)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(1, sess.run(update_op))
self.assertEqual(1, specificity.eval())
def testSomeCorrectHighSensitivity(self):
predictions_values = [0.1, 0.2, 0.4, 0.3, 0.0, 0.1, 0.45, 0.5, 0.8, 0.9]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
specificity, update_op = metrics.streaming_specificity_at_sensitivity(
predictions, labels, sensitivity=0.8)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(1.0, sess.run(update_op))
self.assertAlmostEqual(1.0, specificity.eval())
def testSomeCorrectLowSensitivity(self):
predictions_values = [0.1, 0.2, 0.4, 0.3, 0.0, 0.1, 0.2, 0.2, 0.26, 0.26]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
specificity, update_op = metrics.streaming_specificity_at_sensitivity(
predictions, labels, sensitivity=0.4)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.6, sess.run(update_op))
self.assertAlmostEqual(0.6, specificity.eval())
def testWeighted1d(self):
predictions_values = [0.1, 0.2, 0.4, 0.3, 0.0, 0.1, 0.2, 0.2, 0.26, 0.26]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
weights_values = [3]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
weights = constant_op.constant(weights_values)
specificity, update_op = metrics.streaming_specificity_at_sensitivity(
predictions, labels, weights=weights, sensitivity=0.4)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.6, sess.run(update_op))
self.assertAlmostEqual(0.6, specificity.eval())
def testWeighted2d(self):
predictions_values = [0.1, 0.2, 0.4, 0.3, 0.0, 0.1, 0.2, 0.2, 0.26, 0.26]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
weights_values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
weights = constant_op.constant(weights_values)
specificity, update_op = metrics.streaming_specificity_at_sensitivity(
predictions, labels, weights=weights, sensitivity=0.4)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(8.0 / 15.0, sess.run(update_op))
self.assertAlmostEqual(8.0 / 15.0, specificity.eval())
class StreamingSensitivityAtSpecificityTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_sensitivity_at_specificity(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
specificity=0.7)
_assert_metric_variables(self,
('sensitivity_at_specificity/true_positives:0',
'sensitivity_at_specificity/false_negatives:0',
'sensitivity_at_specificity/false_positives:0',
'sensitivity_at_specificity/true_negatives:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_sensitivity_at_specificity(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
specificity=0.7,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_sensitivity_at_specificity(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
specificity=0.7,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
sensitivity, update_op = metrics.streaming_sensitivity_at_specificity(
predictions, labels, specificity=0.7)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_sensitivity = sensitivity.eval()
for _ in range(10):
self.assertAlmostEqual(initial_sensitivity, sensitivity.eval(), 5)
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(inputs)
specificity, update_op = metrics.streaming_sensitivity_at_specificity(
predictions, labels, specificity=0.7)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(1, sess.run(update_op))
self.assertEqual(1, specificity.eval())
def testSomeCorrectHighSpecificity(self):
predictions_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.1, 0.45, 0.5, 0.8, 0.9]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
specificity, update_op = metrics.streaming_sensitivity_at_specificity(
predictions, labels, specificity=0.8)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.8, sess.run(update_op))
self.assertAlmostEqual(0.8, specificity.eval())
def testSomeCorrectLowSpecificity(self):
predictions_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.01, 0.02, 0.25, 0.26, 0.26]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
specificity, update_op = metrics.streaming_sensitivity_at_specificity(
predictions, labels, specificity=0.4)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.6, sess.run(update_op))
self.assertAlmostEqual(0.6, specificity.eval())
def testWeighted(self):
predictions_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.01, 0.02, 0.25, 0.26, 0.26]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
weights_values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
weights = constant_op.constant(weights_values)
specificity, update_op = metrics.streaming_sensitivity_at_specificity(
predictions, labels, weights=weights, specificity=0.4)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.675, sess.run(update_op))
self.assertAlmostEqual(0.675, specificity.eval())
# TODO(nsilberman): Break this up into two sets of tests.
class StreamingPrecisionRecallThresholdsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_precision_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0])
_assert_metric_variables(self, (
'precision_at_thresholds/true_positives:0',
'precision_at_thresholds/false_positives:0',
))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
prec, _ = metrics.streaming_precision_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
metrics_collections=[my_collection_name])
rec, _ = metrics.streaming_recall_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [prec, rec])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, precision_op = metrics.streaming_precision_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
updates_collections=[my_collection_name])
_, recall_op = metrics.streaming_recall_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
updates_collections=[my_collection_name])
self.assertListEqual(
ops.get_collection(my_collection_name), [precision_op, recall_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
thresholds = [0, 0.5, 1.0]
prec, prec_op = metrics.streaming_precision_at_thresholds(
predictions, labels, thresholds)
rec, rec_op = metrics.streaming_recall_at_thresholds(
predictions, labels, thresholds)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run([prec_op, rec_op])
# Then verify idempotency.
initial_prec = prec.eval()
initial_rec = rec.eval()
for _ in range(10):
self.assertAllClose(initial_prec, prec.eval())
self.assertAllClose(initial_rec, rec.eval())
# TODO(nsilberman): fix tests (passing but incorrect).
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.test_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(inputs)
thresholds = [0.5]
prec, prec_op = metrics.streaming_precision_at_thresholds(
predictions, labels, thresholds)
rec, rec_op = metrics.streaming_recall_at_thresholds(
predictions, labels, thresholds)
sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertEqual(1, prec.eval())
self.assertEqual(1, rec.eval())
def testSomeCorrect(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
thresholds = [0.5]
prec, prec_op = metrics.streaming_precision_at_thresholds(
predictions, labels, thresholds)
rec, rec_op = metrics.streaming_recall_at_thresholds(
predictions, labels, thresholds)
sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertAlmostEqual(0.5, prec.eval())
self.assertAlmostEqual(0.5, rec.eval())
def testAllIncorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.test_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(1 - inputs, dtype=dtypes_lib.float32)
thresholds = [0.5]
prec, prec_op = metrics.streaming_precision_at_thresholds(
predictions, labels, thresholds)
rec, rec_op = metrics.streaming_recall_at_thresholds(
predictions, labels, thresholds)
sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertAlmostEqual(0, prec.eval())
self.assertAlmostEqual(0, rec.eval())
def testWeights1d(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[[1, 0], [1, 0]], shape=(2, 2), dtype=dtypes_lib.float32)
labels = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
weights = constant_op.constant(
[[0], [1]], shape=(2, 1), dtype=dtypes_lib.float32)
thresholds = [0.5, 1.1]
prec, prec_op = metrics.streaming_precision_at_thresholds(
predictions, labels, thresholds, weights=weights)
rec, rec_op = metrics.streaming_recall_at_thresholds(
predictions, labels, thresholds, weights=weights)
prec_low = prec[0]
prec_high = prec[1]
rec_low = rec[0]
rec_high = rec[1]
sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertAlmostEqual(1.0, prec_low.eval(), places=5)
self.assertAlmostEqual(0.0, prec_high.eval(), places=5)
self.assertAlmostEqual(1.0, rec_low.eval(), places=5)
self.assertAlmostEqual(0.0, rec_high.eval(), places=5)
def testWeights2d(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[[1, 0], [1, 0]], shape=(2, 2), dtype=dtypes_lib.float32)
labels = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
weights = constant_op.constant(
[[0, 0], [1, 1]], shape=(2, 2), dtype=dtypes_lib.float32)
thresholds = [0.5, 1.1]
prec, prec_op = metrics.streaming_precision_at_thresholds(
predictions, labels, thresholds, weights=weights)
rec, rec_op = metrics.streaming_recall_at_thresholds(
predictions, labels, thresholds, weights=weights)
prec_low = prec[0]
prec_high = prec[1]
rec_low = rec[0]
rec_high = rec[1]
sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertAlmostEqual(1.0, prec_low.eval(), places=5)
self.assertAlmostEqual(0.0, prec_high.eval(), places=5)
self.assertAlmostEqual(1.0, rec_low.eval(), places=5)
self.assertAlmostEqual(0.0, rec_high.eval(), places=5)
def testExtremeThresholds(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 1], shape=(1, 4))
thresholds = [-1.0, 2.0] # lower/higher than any values
prec, prec_op = metrics.streaming_precision_at_thresholds(
predictions, labels, thresholds)
rec, rec_op = metrics.streaming_recall_at_thresholds(
predictions, labels, thresholds)
prec_low = prec[0]
prec_high = prec[1]
rec_low = rec[0]
rec_high = rec[1]
sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertAlmostEqual(0.75, prec_low.eval())
self.assertAlmostEqual(0.0, prec_high.eval())
self.assertAlmostEqual(1.0, rec_low.eval())
self.assertAlmostEqual(0.0, rec_high.eval())
def testZeroLabelsPredictions(self):
with self.test_session() as sess:
predictions = array_ops.zeros([4], dtype=dtypes_lib.float32)
labels = array_ops.zeros([4])
thresholds = [0.5]
prec, prec_op = metrics.streaming_precision_at_thresholds(
predictions, labels, thresholds)
rec, rec_op = metrics.streaming_recall_at_thresholds(
predictions, labels, thresholds)
sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertAlmostEqual(0, prec.eval(), 6)
self.assertAlmostEqual(0, rec.eval(), 6)
def testWithMultipleUpdates(self):
num_samples = 1000
batch_size = 10
num_batches = int(num_samples / batch_size)
# Create the labels and data.
labels = np.random.randint(0, 2, size=(num_samples, 1))
noise = np.random.normal(0.0, scale=0.2, size=(num_samples, 1))
predictions = 0.4 + 0.2 * labels + noise
predictions[predictions > 1] = 1
predictions[predictions < 0] = 0
thresholds = [0.3]
tp = 0
fp = 0
fn = 0
tn = 0
for i in range(num_samples):
if predictions[i] > thresholds[0]:
if labels[i] == 1:
tp += 1
else:
fp += 1
else:
if labels[i] == 1:
fn += 1
else:
tn += 1
epsilon = 1e-7
expected_prec = tp / (epsilon + tp + fp)
expected_rec = tp / (epsilon + tp + fn)
labels = labels.astype(np.float32)
predictions = predictions.astype(np.float32)
with self.test_session() as sess:
# Reshape the data so its easy to queue up:
predictions_batches = predictions.reshape((batch_size, num_batches))
labels_batches = labels.reshape((batch_size, num_batches))
# Enqueue the data:
predictions_queue = data_flow_ops.FIFOQueue(
num_batches, dtypes=dtypes_lib.float32, shapes=(batch_size,))
labels_queue = data_flow_ops.FIFOQueue(
num_batches, dtypes=dtypes_lib.float32, shapes=(batch_size,))
for i in range(int(num_batches)):
tf_prediction = constant_op.constant(predictions_batches[:, i])
tf_label = constant_op.constant(labels_batches[:, i])
sess.run([
predictions_queue.enqueue(tf_prediction),
labels_queue.enqueue(tf_label)
])
tf_predictions = predictions_queue.dequeue()
tf_labels = labels_queue.dequeue()
prec, prec_op = metrics.streaming_precision_at_thresholds(
tf_predictions, tf_labels, thresholds)
rec, rec_op = metrics.streaming_recall_at_thresholds(
tf_predictions, tf_labels, thresholds)
sess.run(variables.local_variables_initializer())
for _ in range(int(num_samples / batch_size)):
sess.run([prec_op, rec_op])
# Since this is only approximate, we can't expect a 6 digits match.
# Although with higher number of samples/thresholds we should see the
# accuracy improving
self.assertAlmostEqual(expected_prec, prec.eval(), 2)
self.assertAlmostEqual(expected_rec, rec.eval(), 2)
class StreamingFPRThresholdsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_false_positive_rate_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0])
_assert_metric_variables(self, (
'false_positive_rate_at_thresholds/false_positives:0',
'false_positive_rate_at_thresholds/true_negatives:0',
))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
fpr, _ = metrics.streaming_false_positive_rate_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [fpr])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_false_positive_rate_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
thresholds = [0, 0.5, 1.0]
fpr, fpr_op = metrics.streaming_false_positive_rate_at_thresholds(
predictions, labels, thresholds)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(fpr_op)
# Then verify idempotency.
initial_fpr = fpr.eval()
for _ in range(10):
self.assertAllClose(initial_fpr, fpr.eval())
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.test_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(inputs)
thresholds = [0.5]
fpr, fpr_op = metrics.streaming_false_positive_rate_at_thresholds(
predictions, labels, thresholds)
sess.run(variables.local_variables_initializer())
sess.run(fpr_op)
self.assertEqual(0, fpr.eval())
def testSomeCorrect(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
thresholds = [0.5]
fpr, fpr_op = metrics.streaming_false_positive_rate_at_thresholds(
predictions, labels, thresholds)
sess.run(variables.local_variables_initializer())
sess.run(fpr_op)
self.assertAlmostEqual(0.5, fpr.eval())
def testAllIncorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.test_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(1 - inputs, dtype=dtypes_lib.float32)
thresholds = [0.5]
fpr, fpr_op = metrics.streaming_false_positive_rate_at_thresholds(
predictions, labels, thresholds)
sess.run(variables.local_variables_initializer())
sess.run(fpr_op)
self.assertAlmostEqual(1, fpr.eval())
def testWeights1d(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[[1, 0], [1, 0]], shape=(2, 2), dtype=dtypes_lib.float32)
labels = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
weights = constant_op.constant(
[[0], [1]], shape=(2, 1), dtype=dtypes_lib.float32)
thresholds = [0.5, 1.1]
fpr, fpr_op = metrics.streaming_false_positive_rate_at_thresholds(
predictions, labels, thresholds, weights=weights)
fpr_low = fpr[0]
fpr_high = fpr[1]
sess.run(variables.local_variables_initializer())
sess.run(fpr_op)
self.assertAlmostEqual(0.0, fpr_low.eval(), places=5)
self.assertAlmostEqual(0.0, fpr_high.eval(), places=5)
def testWeights2d(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[[1, 0], [1, 0]], shape=(2, 2), dtype=dtypes_lib.float32)
labels = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
weights = constant_op.constant(
[[0, 0], [1, 1]], shape=(2, 2), dtype=dtypes_lib.float32)
thresholds = [0.5, 1.1]
fpr, fpr_op = metrics.streaming_false_positive_rate_at_thresholds(
predictions, labels, thresholds, weights=weights)
fpr_low = fpr[0]
fpr_high = fpr[1]
sess.run(variables.local_variables_initializer())
sess.run(fpr_op)
self.assertAlmostEqual(0.0, fpr_low.eval(), places=5)
self.assertAlmostEqual(0.0, fpr_high.eval(), places=5)
def testExtremeThresholds(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 1], shape=(1, 4))
thresholds = [-1.0, 2.0] # lower/higher than any values
fpr, fpr_op = metrics.streaming_false_positive_rate_at_thresholds(
predictions, labels, thresholds)
fpr_low = fpr[0]
fpr_high = fpr[1]
sess.run(variables.local_variables_initializer())
sess.run(fpr_op)
self.assertAlmostEqual(1.0, fpr_low.eval(), places=5)
self.assertAlmostEqual(0.0, fpr_high.eval(), places=5)
def testZeroLabelsPredictions(self):
with self.test_session() as sess:
predictions = array_ops.zeros([4], dtype=dtypes_lib.float32)
labels = array_ops.zeros([4])
thresholds = [0.5]
fpr, fpr_op = metrics.streaming_false_positive_rate_at_thresholds(
predictions, labels, thresholds)
sess.run(variables.local_variables_initializer())
sess.run(fpr_op)
self.assertAlmostEqual(0, fpr.eval(), 6)
def testWithMultipleUpdates(self):
num_samples = 1000
batch_size = 10
num_batches = int(num_samples / batch_size)
# Create the labels and data.
labels = np.random.randint(0, 2, size=(num_samples, 1))
noise = np.random.normal(0.0, scale=0.2, size=(num_samples, 1))
predictions = 0.4 + 0.2 * labels + noise
predictions[predictions > 1] = 1
predictions[predictions < 0] = 0
thresholds = [0.3]
fp = 0
tn = 0
for i in range(num_samples):
if predictions[i] > thresholds[0]:
if labels[i] == 0:
fp += 1
else:
if labels[i] == 0:
tn += 1
epsilon = 1e-7
expected_fpr = fp / (epsilon + fp + tn)
labels = labels.astype(np.float32)
predictions = predictions.astype(np.float32)
with self.test_session() as sess:
# Reshape the data so its easy to queue up:
predictions_batches = predictions.reshape((batch_size, num_batches))
labels_batches = labels.reshape((batch_size, num_batches))
# Enqueue the data:
predictions_queue = data_flow_ops.FIFOQueue(
num_batches, dtypes=dtypes_lib.float32, shapes=(batch_size,))
labels_queue = data_flow_ops.FIFOQueue(
num_batches, dtypes=dtypes_lib.float32, shapes=(batch_size,))
for i in range(int(num_batches)):
tf_prediction = constant_op.constant(predictions_batches[:, i])
tf_label = constant_op.constant(labels_batches[:, i])
sess.run([
predictions_queue.enqueue(tf_prediction),
labels_queue.enqueue(tf_label)
])
tf_predictions = predictions_queue.dequeue()
tf_labels = labels_queue.dequeue()
fpr, fpr_op = metrics.streaming_false_positive_rate_at_thresholds(
tf_predictions, tf_labels, thresholds)
sess.run(variables.local_variables_initializer())
for _ in range(int(num_samples / batch_size)):
sess.run(fpr_op)
# Since this is only approximate, we can't expect a 6 digits match.
# Although with higher number of samples/thresholds we should see the
# accuracy improving
self.assertAlmostEqual(expected_fpr, fpr.eval(), 2)
class RecallAtPrecisionTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.recall_at_precision(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
precision=0.7)
_assert_metric_variables(self, ('recall_at_precision/true_positives:0',
'recall_at_precision/false_negatives:0',
'recall_at_precision/false_positives:0',
'recall_at_precision/true_negatives:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.recall_at_precision(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
precision=0.7,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.recall_at_precision(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
precision=0.7,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
recall, update_op = metrics.recall_at_precision(
labels, predictions, precision=0.7)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_recall = recall.eval()
for _ in range(10):
self.assertAlmostEqual(initial_recall, recall.eval(), 5)
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(inputs)
recall, update_op = metrics.recall_at_precision(
labels, predictions, precision=1.0)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(1, sess.run(update_op))
self.assertEqual(1, recall.eval())
def testSomeCorrectHighPrecision(self):
predictions_values = [1, .9, .8, .7, .6, .5, .4, .3]
labels_values = [1, 1, 1, 1, 0, 0, 0, 1]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
recall, update_op = metrics.recall_at_precision(
labels, predictions, precision=0.8)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.8, sess.run(update_op))
self.assertAlmostEqual(0.8, recall.eval())
def testSomeCorrectLowPrecision(self):
predictions_values = [1, .9, .8, .7, .6, .5, .4, .3, .2, .1]
labels_values = [1, 1, 0, 0, 0, 0, 0, 0, 0, 1]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
recall, update_op = metrics.recall_at_precision(
labels, predictions, precision=0.4)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
target_recall = 2.0 / 3.0
self.assertAlmostEqual(target_recall, sess.run(update_op))
self.assertAlmostEqual(target_recall, recall.eval())
def testWeighted(self):
predictions_values = [1, .9, .8, .7, .6]
labels_values = [1, 1, 0, 0, 1]
weights_values = [1, 1, 3, 4, 1]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
weights = constant_op.constant(weights_values)
recall, update_op = metrics.recall_at_precision(
labels, predictions, weights=weights, precision=0.4)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
target_recall = 2.0 / 3.0
self.assertAlmostEqual(target_recall, sess.run(update_op))
self.assertAlmostEqual(target_recall, recall.eval())
class StreamingFNRThresholdsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_false_negative_rate_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0])
_assert_metric_variables(self, (
'false_negative_rate_at_thresholds/false_negatives:0',
'false_negative_rate_at_thresholds/true_positives:0',
))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
fnr, _ = metrics.streaming_false_negative_rate_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [fnr])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_false_negative_rate_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
thresholds = [0, 0.5, 1.0]
fnr, fnr_op = metrics.streaming_false_negative_rate_at_thresholds(
predictions, labels, thresholds)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(fnr_op)
# Then verify idempotency.
initial_fnr = fnr.eval()
for _ in range(10):
self.assertAllClose(initial_fnr, fnr.eval())
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.test_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(inputs)
thresholds = [0.5]
fnr, fnr_op = metrics.streaming_false_negative_rate_at_thresholds(
predictions, labels, thresholds)
sess.run(variables.local_variables_initializer())
sess.run(fnr_op)
self.assertEqual(0, fnr.eval())
def testSomeCorrect(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
thresholds = [0.5]
fnr, fnr_op = metrics.streaming_false_negative_rate_at_thresholds(
predictions, labels, thresholds)
sess.run(variables.local_variables_initializer())
sess.run(fnr_op)
self.assertAlmostEqual(0.5, fnr.eval())
def testAllIncorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.test_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(1 - inputs, dtype=dtypes_lib.float32)
thresholds = [0.5]
fnr, fnr_op = metrics.streaming_false_negative_rate_at_thresholds(
predictions, labels, thresholds)
sess.run(variables.local_variables_initializer())
sess.run(fnr_op)
self.assertAlmostEqual(1, fnr.eval())
def testWeights1d(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[[1, 0], [1, 0]], shape=(2, 2), dtype=dtypes_lib.float32)
labels = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
weights = constant_op.constant(
[[0], [1]], shape=(2, 1), dtype=dtypes_lib.float32)
thresholds = [0.5, 1.1]
fnr, fnr_op = metrics.streaming_false_negative_rate_at_thresholds(
predictions, labels, thresholds, weights=weights)
fnr_low = fnr[0]
fnr_high = fnr[1]
sess.run(variables.local_variables_initializer())
sess.run(fnr_op)
self.assertAlmostEqual(0.0, fnr_low.eval(), places=5)
self.assertAlmostEqual(1.0, fnr_high.eval(), places=5)
def testWeights2d(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[[1, 0], [1, 0]], shape=(2, 2), dtype=dtypes_lib.float32)
labels = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
weights = constant_op.constant(
[[0, 0], [1, 1]], shape=(2, 2), dtype=dtypes_lib.float32)
thresholds = [0.5, 1.1]
fnr, fnr_op = metrics.streaming_false_negative_rate_at_thresholds(
predictions, labels, thresholds, weights=weights)
fnr_low = fnr[0]
fnr_high = fnr[1]
sess.run(variables.local_variables_initializer())
sess.run(fnr_op)
self.assertAlmostEqual(0.0, fnr_low.eval(), places=5)
self.assertAlmostEqual(1.0, fnr_high.eval(), places=5)
def testExtremeThresholds(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 1], shape=(1, 4))
thresholds = [-1.0, 2.0] # lower/higher than any values
fnr, fnr_op = metrics.streaming_false_negative_rate_at_thresholds(
predictions, labels, thresholds)
fnr_low = fnr[0]
fnr_high = fnr[1]
sess.run(variables.local_variables_initializer())
sess.run(fnr_op)
self.assertAlmostEqual(0.0, fnr_low.eval())
self.assertAlmostEqual(1.0, fnr_high.eval())
def testZeroLabelsPredictions(self):
with self.test_session() as sess:
predictions = array_ops.zeros([4], dtype=dtypes_lib.float32)
labels = array_ops.zeros([4])
thresholds = [0.5]
fnr, fnr_op = metrics.streaming_false_negative_rate_at_thresholds(
predictions, labels, thresholds)
sess.run(variables.local_variables_initializer())
sess.run(fnr_op)
self.assertAlmostEqual(0, fnr.eval(), 6)
def testWithMultipleUpdates(self):
num_samples = 1000
batch_size = 10
num_batches = int(num_samples / batch_size)
# Create the labels and data.
labels = np.random.randint(0, 2, size=(num_samples, 1))
noise = np.random.normal(0.0, scale=0.2, size=(num_samples, 1))
predictions = 0.4 + 0.2 * labels + noise
predictions[predictions > 1] = 1
predictions[predictions < 0] = 0
thresholds = [0.3]
fn = 0
tp = 0
for i in range(num_samples):
if predictions[i] > thresholds[0]:
if labels[i] == 1:
tp += 1
else:
if labels[i] == 1:
fn += 1
epsilon = 1e-7
expected_fnr = fn / (epsilon + fn + tp)
labels = labels.astype(np.float32)
predictions = predictions.astype(np.float32)
with self.test_session() as sess:
# Reshape the data so its easy to queue up:
predictions_batches = predictions.reshape((batch_size, num_batches))
labels_batches = labels.reshape((batch_size, num_batches))
# Enqueue the data:
predictions_queue = data_flow_ops.FIFOQueue(
num_batches, dtypes=dtypes_lib.float32, shapes=(batch_size,))
labels_queue = data_flow_ops.FIFOQueue(
num_batches, dtypes=dtypes_lib.float32, shapes=(batch_size,))
for i in range(int(num_batches)):
tf_prediction = constant_op.constant(predictions_batches[:, i])
tf_label = constant_op.constant(labels_batches[:, i])
sess.run([
predictions_queue.enqueue(tf_prediction),
labels_queue.enqueue(tf_label)
])
tf_predictions = predictions_queue.dequeue()
tf_labels = labels_queue.dequeue()
fnr, fnr_op = metrics.streaming_false_negative_rate_at_thresholds(
tf_predictions, tf_labels, thresholds)
sess.run(variables.local_variables_initializer())
for _ in range(int(num_samples / batch_size)):
sess.run(fnr_op)
# Since this is only approximate, we can't expect a 6 digits match.
# Although with higher number of samples/thresholds we should see the
# accuracy improving
self.assertAlmostEqual(expected_fnr, fnr.eval(), 2)
# TODO(ptucker): Remove when we remove `streaming_recall_at_k`.
# This op will be deprecated soon in favor of `streaming_sparse_recall_at_k`.
# Until then, this test validates that both ops yield the same results.
class StreamingRecallAtKTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
self._batch_size = 4
self._num_classes = 3
self._np_predictions = np.matrix(('0.1 0.2 0.7;'
'0.6 0.2 0.2;'
'0.0 0.9 0.1;'
'0.2 0.0 0.8'))
self._np_labels = [0, 0, 0, 0]
def testVars(self):
metrics.streaming_recall_at_k(
predictions=array_ops.ones((self._batch_size, self._num_classes)),
labels=array_ops.ones((self._batch_size,), dtype=dtypes_lib.int32),
k=1)
_assert_metric_variables(self,
('recall_at_1/count:0', 'recall_at_1/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_recall_at_k(
predictions=array_ops.ones((self._batch_size, self._num_classes)),
labels=array_ops.ones((self._batch_size,), dtype=dtypes_lib.int32),
k=1,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_recall_at_k(
predictions=array_ops.ones((self._batch_size, self._num_classes)),
labels=array_ops.ones((self._batch_size,), dtype=dtypes_lib.int32),
k=1,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testSingleUpdateKIs1(self):
predictions = constant_op.constant(
self._np_predictions,
shape=(self._batch_size, self._num_classes),
dtype=dtypes_lib.float32)
labels = constant_op.constant(
self._np_labels, shape=(self._batch_size,), dtype=dtypes_lib.int64)
recall, update_op = metrics.streaming_recall_at_k(predictions, labels, k=1)
sp_recall, sp_update_op = metrics.streaming_sparse_recall_at_k(
predictions, array_ops.reshape(labels, (self._batch_size, 1)), k=1)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0.25, sess.run(update_op))
self.assertEqual(0.25, recall.eval())
self.assertEqual(0.25, sess.run(sp_update_op))
self.assertEqual(0.25, sp_recall.eval())
def testSingleUpdateKIs2(self):
predictions = constant_op.constant(
self._np_predictions,
shape=(self._batch_size, self._num_classes),
dtype=dtypes_lib.float32)
labels = constant_op.constant(
self._np_labels, shape=(self._batch_size,), dtype=dtypes_lib.int64)
recall, update_op = metrics.streaming_recall_at_k(predictions, labels, k=2)
sp_recall, sp_update_op = metrics.streaming_sparse_recall_at_k(
predictions, array_ops.reshape(labels, (self._batch_size, 1)), k=2)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0.5, sess.run(update_op))
self.assertEqual(0.5, recall.eval())
self.assertEqual(0.5, sess.run(sp_update_op))
self.assertEqual(0.5, sp_recall.eval())
def testSingleUpdateKIs3(self):
predictions = constant_op.constant(
self._np_predictions,
shape=(self._batch_size, self._num_classes),
dtype=dtypes_lib.float32)
labels = constant_op.constant(
self._np_labels, shape=(self._batch_size,), dtype=dtypes_lib.int64)
recall, update_op = metrics.streaming_recall_at_k(predictions, labels, k=3)
sp_recall, sp_update_op = metrics.streaming_sparse_recall_at_k(
predictions, array_ops.reshape(labels, (self._batch_size, 1)), k=3)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(1.0, sess.run(update_op))
self.assertEqual(1.0, recall.eval())
self.assertEqual(1.0, sess.run(sp_update_op))
self.assertEqual(1.0, sp_recall.eval())
def testSingleUpdateSomeMissingKIs2(self):
predictions = constant_op.constant(
self._np_predictions,
shape=(self._batch_size, self._num_classes),
dtype=dtypes_lib.float32)
labels = constant_op.constant(
self._np_labels, shape=(self._batch_size,), dtype=dtypes_lib.int64)
weights = constant_op.constant(
[0, 1, 0, 1], shape=(self._batch_size,), dtype=dtypes_lib.float32)
recall, update_op = metrics.streaming_recall_at_k(
predictions, labels, k=2, weights=weights)
sp_recall, sp_update_op = metrics.streaming_sparse_recall_at_k(
predictions,
array_ops.reshape(labels, (self._batch_size, 1)),
k=2,
weights=weights)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(1.0, sess.run(update_op))
self.assertEqual(1.0, recall.eval())
self.assertEqual(1.0, sess.run(sp_update_op))
self.assertEqual(1.0, sp_recall.eval())
class StreamingSparsePrecisionTest(test.TestCase):
def _test_streaming_sparse_precision_at_k(self,
predictions,
labels,
k,
expected,
class_id=None,
weights=None):
with ops.Graph().as_default() as g, self.test_session(g):
if weights is not None:
weights = constant_op.constant(weights, dtypes_lib.float32)
metric, update = metrics.streaming_sparse_precision_at_k(
predictions=constant_op.constant(predictions, dtypes_lib.float32),
labels=labels,
k=k,
class_id=class_id,
weights=weights)
# Fails without initialized vars.
self.assertRaises(errors_impl.OpError, metric.eval)
self.assertRaises(errors_impl.OpError, update.eval)
variables.variables_initializer(variables.local_variables()).run()
# Run per-step op and assert expected values.
if math.isnan(expected):
_assert_nan(self, update.eval())
_assert_nan(self, metric.eval())
else:
self.assertEqual(expected, update.eval())
self.assertEqual(expected, metric.eval())
def _test_streaming_sparse_precision_at_top_k(self,
top_k_predictions,
labels,
expected,
class_id=None,
weights=None):
with ops.Graph().as_default() as g, self.test_session(g):
if weights is not None:
weights = constant_op.constant(weights, dtypes_lib.float32)
metric, update = metrics.streaming_sparse_precision_at_top_k(
top_k_predictions=constant_op.constant(top_k_predictions,
dtypes_lib.int32),
labels=labels,
class_id=class_id,
weights=weights)
# Fails without initialized vars.
self.assertRaises(errors_impl.OpError, metric.eval)
self.assertRaises(errors_impl.OpError, update.eval)
variables.variables_initializer(variables.local_variables()).run()
# Run per-step op and assert expected values.
if math.isnan(expected):
self.assertTrue(math.isnan(update.eval()))
self.assertTrue(math.isnan(metric.eval()))
else:
self.assertEqual(expected, update.eval())
self.assertEqual(expected, metric.eval())
def _test_streaming_sparse_average_precision_at_k(self,
predictions,
labels,
k,
expected,
weights=None):
with ops.Graph().as_default() as g, self.test_session(g):
if weights is not None:
weights = constant_op.constant(weights, dtypes_lib.float32)
predictions = constant_op.constant(predictions, dtypes_lib.float32)
metric, update = metrics.streaming_sparse_average_precision_at_k(
predictions, labels, k, weights=weights)
# Fails without initialized vars.
self.assertRaises(errors_impl.OpError, metric.eval)
self.assertRaises(errors_impl.OpError, update.eval)
local_variables = variables.local_variables()
variables.variables_initializer(local_variables).run()
# Run per-step op and assert expected values.
if math.isnan(expected):
_assert_nan(self, update.eval())
_assert_nan(self, metric.eval())
else:
self.assertAlmostEqual(expected, update.eval())
self.assertAlmostEqual(expected, metric.eval())
def _test_streaming_sparse_average_precision_at_top_k(self,
top_k_predictions,
labels,
expected,
weights=None):
with ops.Graph().as_default() as g, self.test_session(g):
if weights is not None:
weights = constant_op.constant(weights, dtypes_lib.float32)
metric, update = metrics.streaming_sparse_average_precision_at_top_k(
top_k_predictions, labels, weights=weights)
# Fails without initialized vars.
self.assertRaises(errors_impl.OpError, metric.eval)
self.assertRaises(errors_impl.OpError, update.eval)
local_variables = variables.local_variables()
variables.variables_initializer(local_variables).run()
# Run per-step op and assert expected values.
if math.isnan(expected):
_assert_nan(self, update.eval())
_assert_nan(self, metric.eval())
else:
self.assertAlmostEqual(expected, update.eval())
self.assertAlmostEqual(expected, metric.eval())
def test_top_k_rank_invalid(self):
with self.test_session():
# top_k_predictions has rank < 2.
top_k_predictions = [9, 4, 6, 2, 0]
sp_labels = sparse_tensor.SparseTensorValue(
indices=np.array([[
0,
], [
1,
], [
2,
]], np.int64),
values=np.array([2, 7, 8], np.int64),
dense_shape=np.array([
10,
], np.int64))
with self.assertRaises(ValueError):
precision, _ = metrics.streaming_sparse_precision_at_top_k(
top_k_predictions=constant_op.constant(top_k_predictions,
dtypes_lib.int64),
labels=sp_labels)
variables.variables_initializer(variables.local_variables()).run()
precision.eval()
def test_average_precision(self):
# Example 1.
# Matches example here:
# fastml.com/what-you-wanted-to-know-about-mean-average-precision
labels_ex1 = (0, 1, 2, 3, 4)
labels = np.array([labels_ex1], dtype=np.int64)
predictions_ex1 = (0.2, 0.1, 0.0, 0.4, 0.0, 0.5, 0.3)
predictions = (predictions_ex1,)
predictions_top_k_ex1 = (5, 3, 6, 0, 1, 2)
precision_ex1 = (0.0 / 1, 1.0 / 2, 1.0 / 3, 2.0 / 4)
avg_precision_ex1 = (0.0 / 1, precision_ex1[1] / 2, precision_ex1[1] / 3,
(precision_ex1[1] + precision_ex1[3]) / 4)
for i in xrange(4):
k = i + 1
self._test_streaming_sparse_precision_at_k(
predictions, labels, k, expected=precision_ex1[i])
self._test_streaming_sparse_precision_at_top_k(
(predictions_top_k_ex1[:k],), labels, expected=precision_ex1[i])
self._test_streaming_sparse_average_precision_at_k(
predictions, labels, k, expected=avg_precision_ex1[i])
self._test_streaming_sparse_average_precision_at_top_k(
(predictions_top_k_ex1[:k],), labels, expected=avg_precision_ex1[i])
# Example 2.
labels_ex2 = (0, 2, 4, 5, 6)
labels = np.array([labels_ex2], dtype=np.int64)
predictions_ex2 = (0.3, 0.5, 0.0, 0.4, 0.0, 0.1, 0.2)
predictions = (predictions_ex2,)
predictions_top_k_ex2 = (1, 3, 0, 6, 5)
precision_ex2 = (0.0 / 1, 0.0 / 2, 1.0 / 3, 2.0 / 4)
avg_precision_ex2 = (0.0 / 1, 0.0 / 2, precision_ex2[2] / 3,
(precision_ex2[2] + precision_ex2[3]) / 4)
for i in xrange(4):
k = i + 1
self._test_streaming_sparse_precision_at_k(
predictions, labels, k, expected=precision_ex2[i])
self._test_streaming_sparse_precision_at_top_k(
(predictions_top_k_ex2[:k],), labels, expected=precision_ex2[i])
self._test_streaming_sparse_average_precision_at_k(
predictions, labels, k, expected=avg_precision_ex2[i])
self._test_streaming_sparse_average_precision_at_top_k(
(predictions_top_k_ex2[:k],), labels, expected=avg_precision_ex2[i])
# Both examples, we expect both precision and average precision to be the
# average of the 2 examples.
labels = np.array([labels_ex1, labels_ex2], dtype=np.int64)
predictions = (predictions_ex1, predictions_ex2)
streaming_precision = [
(ex1 + ex2) / 2 for ex1, ex2 in zip(precision_ex1, precision_ex2)
]
streaming_average_precision = [
(ex1 + ex2) / 2
for ex1, ex2 in zip(avg_precision_ex1, avg_precision_ex2)
]
for i in xrange(4):
k = i + 1
self._test_streaming_sparse_precision_at_k(
predictions, labels, k, expected=streaming_precision[i])
predictions_top_k = (predictions_top_k_ex1[:k], predictions_top_k_ex2[:k])
self._test_streaming_sparse_precision_at_top_k(
predictions_top_k, labels, expected=streaming_precision[i])
self._test_streaming_sparse_average_precision_at_k(
predictions, labels, k, expected=streaming_average_precision[i])
self._test_streaming_sparse_average_precision_at_top_k(
predictions_top_k, labels, expected=streaming_average_precision[i])
# Weighted examples, we expect streaming average precision to be the
# weighted average of the 2 examples.
weights = (0.3, 0.6)
streaming_average_precision = [
(weights[0] * ex1 + weights[1] * ex2) / (weights[0] + weights[1])
for ex1, ex2 in zip(avg_precision_ex1, avg_precision_ex2)
]
for i in xrange(4):
k = i + 1
self._test_streaming_sparse_average_precision_at_k(
predictions,
labels,
k,
expected=streaming_average_precision[i],
weights=weights)
self._test_streaming_sparse_average_precision_at_top_k(
(predictions_top_k_ex1[:k], predictions_top_k_ex2[:k]),
labels,
expected=streaming_average_precision[i],
weights=weights)
def test_average_precision_some_labels_out_of_range(self):
"""Tests that labels outside the [0, n_classes) range are ignored."""
labels_ex1 = (-1, 0, 1, 2, 3, 4, 7)
labels = np.array([labels_ex1], dtype=np.int64)
predictions_ex1 = (0.2, 0.1, 0.0, 0.4, 0.0, 0.5, 0.3)
predictions = (predictions_ex1,)
predictions_top_k_ex1 = (5, 3, 6, 0, 1, 2)
precision_ex1 = (0.0 / 1, 1.0 / 2, 1.0 / 3, 2.0 / 4)
avg_precision_ex1 = (0.0 / 1, precision_ex1[1] / 2, precision_ex1[1] / 3,
(precision_ex1[1] + precision_ex1[3]) / 4)
for i in xrange(4):
k = i + 1
self._test_streaming_sparse_precision_at_k(
predictions, labels, k, expected=precision_ex1[i])
self._test_streaming_sparse_precision_at_top_k(
(predictions_top_k_ex1[:k],), labels, expected=precision_ex1[i])
self._test_streaming_sparse_average_precision_at_k(
predictions, labels, k, expected=avg_precision_ex1[i])
self._test_streaming_sparse_average_precision_at_top_k(
(predictions_top_k_ex1[:k],), labels, expected=avg_precision_ex1[i])
def test_average_precision_at_top_k_static_shape_check(self):
predictions_top_k = array_ops.placeholder(
shape=(2, None), dtype=dtypes_lib.int64)
labels = np.array(((1,), (2,)), dtype=np.int64)
# Fails due to non-static predictions_idx shape.
with self.assertRaises(ValueError):
metric_ops.streaming_sparse_average_precision_at_top_k(
predictions_top_k, labels)
predictions_top_k = (2, 1)
# Fails since rank of predictions_idx is less than one.
with self.assertRaises(ValueError):
metric_ops.streaming_sparse_average_precision_at_top_k(
predictions_top_k, labels)
predictions_top_k = ((2,), (1,))
# Valid static shape.
metric_ops.streaming_sparse_average_precision_at_top_k(
predictions_top_k, labels)
def test_one_label_at_k1_nan(self):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
top_k_predictions = [[3], [3]]
sparse_labels = _binary_2d_label_to_sparse_value([[0, 0, 0, 1],
[0, 0, 1, 0]])
dense_labels = np.array([[3], [2]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Classes 0,1,2 have 0 predictions, classes -1 and 4 are out of range.
for class_id in (-1, 0, 1, 2, 4):
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=1, expected=NAN, class_id=class_id)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=NAN, class_id=class_id)
def test_one_label_at_k1(self):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
top_k_predictions = [[3], [3]]
sparse_labels = _binary_2d_label_to_sparse_value([[0, 0, 0, 1],
[0, 0, 1, 0]])
dense_labels = np.array([[3], [2]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Class 3: 1 label, 2 predictions, 1 correct.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=1, expected=1.0 / 2, class_id=3)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=1.0 / 2, class_id=3)
# All classes: 2 labels, 2 predictions, 1 correct.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=1, expected=1.0 / 2)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=1.0 / 2)
def test_three_labels_at_k5_no_predictions(self):
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
top_k_predictions = [
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]])
dense_labels = np.array([[2, 7, 8], [1, 2, 5]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Classes 1,3,8 have 0 predictions, classes -1 and 10 are out of range.
for class_id in (-1, 1, 3, 8, 10):
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=NAN, class_id=class_id)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=NAN, class_id=class_id)
def test_three_labels_at_k5_no_labels(self):
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
top_k_predictions = [
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]])
dense_labels = np.array([[2, 7, 8], [1, 2, 5]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Classes 0,4,6,9: 0 labels, >=1 prediction.
for class_id in (0, 4, 6, 9):
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=0.0, class_id=class_id)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=0.0, class_id=class_id)
def test_three_labels_at_k5(self):
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
top_k_predictions = [
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]])
dense_labels = np.array([[2, 7, 8], [1, 2, 5]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Class 2: 2 labels, 2 correct predictions.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=2.0 / 2, class_id=2)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=2.0 / 2, class_id=2)
# Class 5: 1 label, 1 correct prediction.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=1.0 / 1, class_id=5)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=1.0 / 1, class_id=5)
# Class 7: 1 label, 1 incorrect prediction.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=0.0 / 1, class_id=7)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=0.0 / 1, class_id=7)
# All classes: 10 predictions, 3 correct.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=3.0 / 10)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=3.0 / 10)
def test_three_labels_at_k5_some_out_of_range(self):
"""Tests that labels outside the [0, n_classes) range are ignored."""
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
top_k_predictions = [
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
]
sp_labels = sparse_tensor.SparseTensorValue(
indices=[[0, 0], [0, 1], [0, 2], [0, 3], [1, 0], [1, 1], [1, 2], [1,
3]],
# values -1 and 10 are outside the [0, n_classes) range and are ignored.
values=np.array([2, 7, -1, 8, 1, 2, 5, 10], np.int64),
dense_shape=[2, 4])
# Class 2: 2 labels, 2 correct predictions.
self._test_streaming_sparse_precision_at_k(
predictions, sp_labels, k=5, expected=2.0 / 2, class_id=2)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, sp_labels, expected=2.0 / 2, class_id=2)
# Class 5: 1 label, 1 correct prediction.
self._test_streaming_sparse_precision_at_k(
predictions, sp_labels, k=5, expected=1.0 / 1, class_id=5)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, sp_labels, expected=1.0 / 1, class_id=5)
# Class 7: 1 label, 1 incorrect prediction.
self._test_streaming_sparse_precision_at_k(
predictions, sp_labels, k=5, expected=0.0 / 1, class_id=7)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, sp_labels, expected=0.0 / 1, class_id=7)
# All classes: 10 predictions, 3 correct.
self._test_streaming_sparse_precision_at_k(
predictions, sp_labels, k=5, expected=3.0 / 10)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, sp_labels, expected=3.0 / 10)
def test_3d_nan(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
# Classes 1,3,8 have 0 predictions, classes -1 and 10 are out of range.
for class_id in (-1, 1, 3, 8, 10):
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=NAN, class_id=class_id)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=NAN, class_id=class_id)
def test_3d_no_labels(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
# Classes 0,4,6,9: 0 labels, >=1 prediction.
for class_id in (0, 4, 6, 9):
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=0.0, class_id=class_id)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=0.0, class_id=class_id)
def test_3d(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
# Class 2: 4 predictions, all correct.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=4.0 / 4, class_id=2)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=4.0 / 4, class_id=2)
# Class 5: 2 predictions, both correct.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=2.0 / 2, class_id=5)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=2.0 / 2, class_id=5)
# Class 7: 2 predictions, 1 correct.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=1.0 / 2, class_id=7)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=1.0 / 2, class_id=7)
# All classes: 20 predictions, 7 correct.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=7.0 / 20)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=7.0 / 20)
def test_3d_ignore_all(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
for class_id in xrange(10):
self._test_streaming_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=NAN,
class_id=class_id,
weights=[[0], [0]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions,
labels,
expected=NAN,
class_id=class_id,
weights=[[0], [0]])
self._test_streaming_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=NAN,
class_id=class_id,
weights=[[0, 0], [0, 0]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions,
labels,
expected=NAN,
class_id=class_id,
weights=[[0, 0], [0, 0]])
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=NAN, weights=[[0], [0]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=NAN, weights=[[0], [0]])
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=NAN, weights=[[0, 0], [0, 0]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=NAN, weights=[[0, 0], [0, 0]])
def test_3d_ignore_some(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
# Class 2: 2 predictions, both correct.
self._test_streaming_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=2.0 / 2.0,
class_id=2,
weights=[[1], [0]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions,
labels,
expected=2.0 / 2.0,
class_id=2,
weights=[[1], [0]])
# Class 2: 2 predictions, both correct.
self._test_streaming_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=2.0 / 2.0,
class_id=2,
weights=[[0], [1]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions,
labels,
expected=2.0 / 2.0,
class_id=2,
weights=[[0], [1]])
# Class 7: 1 incorrect prediction.
self._test_streaming_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=0.0 / 1.0,
class_id=7,
weights=[[1], [0]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions,
labels,
expected=0.0 / 1.0,
class_id=7,
weights=[[1], [0]])
# Class 7: 1 correct prediction.
self._test_streaming_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=1.0 / 1.0,
class_id=7,
weights=[[0], [1]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions,
labels,
expected=1.0 / 1.0,
class_id=7,
weights=[[0], [1]])
# Class 7: no predictions.
self._test_streaming_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=NAN,
class_id=7,
weights=[[1, 0], [0, 1]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions,
labels,
expected=NAN,
class_id=7,
weights=[[1, 0], [0, 1]])
# Class 7: 2 predictions, 1 correct.
self._test_streaming_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=1.0 / 2.0,
class_id=7,
weights=[[0, 1], [1, 0]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions,
labels,
expected=1.0 / 2.0,
class_id=7,
weights=[[0, 1], [1, 0]])
def test_sparse_tensor_value(self):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
labels = [[0, 0, 0, 1], [0, 0, 1, 0]]
expected_precision = 0.5
with self.test_session():
_, precision = metrics.streaming_sparse_precision_at_k(
predictions=constant_op.constant(predictions, dtypes_lib.float32),
labels=_binary_2d_label_to_sparse_value(labels),
k=1)
variables.variables_initializer(variables.local_variables()).run()
self.assertEqual(expected_precision, precision.eval())
class StreamingSparseRecallTest(test.TestCase):
def _test_streaming_sparse_recall_at_k(self,
predictions,
labels,
k,
expected,
class_id=None,
weights=None):
with ops.Graph().as_default() as g, self.test_session(g):
if weights is not None:
weights = constant_op.constant(weights, dtypes_lib.float32)
metric, update = metrics.streaming_sparse_recall_at_k(
predictions=constant_op.constant(predictions, dtypes_lib.float32),
labels=labels,
k=k,
class_id=class_id,
weights=weights)
# Fails without initialized vars.
self.assertRaises(errors_impl.OpError, metric.eval)
self.assertRaises(errors_impl.OpError, update.eval)
variables.variables_initializer(variables.local_variables()).run()
# Run per-step op and assert expected values.
if math.isnan(expected):
_assert_nan(self, update.eval())
_assert_nan(self, metric.eval())
else:
self.assertEqual(expected, update.eval())
self.assertEqual(expected, metric.eval())
def _test_sparse_recall_at_top_k(self,
labels,
top_k_predictions,
expected,
class_id=None,
weights=None):
with ops.Graph().as_default() as g, self.test_session(g):
if weights is not None:
weights = constant_op.constant(weights, dtypes_lib.float32)
metric, update = metric_ops.sparse_recall_at_top_k(
labels=labels,
top_k_predictions=constant_op.constant(top_k_predictions,
dtypes_lib.int32),
class_id=class_id,
weights=weights)
# Fails without initialized vars.
self.assertRaises(errors_impl.OpError, metric.eval)
self.assertRaises(errors_impl.OpError, update.eval)
variables.variables_initializer(variables.local_variables()).run()
# Run per-step op and assert expected values.
if math.isnan(expected):
self.assertTrue(math.isnan(update.eval()))
self.assertTrue(math.isnan(metric.eval()))
else:
self.assertEqual(expected, update.eval())
self.assertEqual(expected, metric.eval())
def test_one_label_at_k1_nan(self):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
top_k_predictions = [[3], [3]]
sparse_labels = _binary_2d_label_to_sparse_value([[0, 0, 0, 1],
[0, 0, 1, 0]])
dense_labels = np.array([[3], [2]], dtype=np.int64)
# Classes 0,1 have 0 labels, 0 predictions, classes -1 and 4 are out of
# range.
for labels in (sparse_labels, dense_labels):
for class_id in (-1, 0, 1, 4):
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=NAN, class_id=class_id)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=NAN, class_id=class_id)
def test_one_label_at_k1_no_predictions(self):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
top_k_predictions = [[3], [3]]
sparse_labels = _binary_2d_label_to_sparse_value([[0, 0, 0, 1],
[0, 0, 1, 0]])
dense_labels = np.array([[3], [2]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Class 2: 0 predictions.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=0.0, class_id=2)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=0.0, class_id=2)
def test_one_label_at_k1(self):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
top_k_predictions = [[3], [3]]
sparse_labels = _binary_2d_label_to_sparse_value([[0, 0, 0, 1],
[0, 0, 1, 0]])
dense_labels = np.array([[3], [2]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Class 3: 1 label, 2 predictions, 1 correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=1.0 / 1, class_id=3)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=1.0 / 1, class_id=3)
# All classes: 2 labels, 2 predictions, 1 correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=1.0 / 2)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=1.0 / 2)
def test_one_label_at_k1_weighted(self):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
top_k_predictions = [[3], [3]]
sparse_labels = _binary_2d_label_to_sparse_value([[0, 0, 0, 1],
[0, 0, 1, 0]])
dense_labels = np.array([[3], [2]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Class 3: 1 label, 2 predictions, 1 correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=NAN, class_id=3, weights=(0.0,))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=NAN, class_id=3, weights=(0.0,))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=1.0 / 1,
class_id=3,
weights=(1.0,))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=1.0 / 1,
class_id=3,
weights=(1.0,))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=1.0 / 1,
class_id=3,
weights=(2.0,))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=1.0 / 1,
class_id=3,
weights=(2.0,))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=NAN,
class_id=3,
weights=(0.0, 0.0))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=NAN,
class_id=3,
weights=(0.0, 0.0))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=NAN,
class_id=3,
weights=(0.0, 1.0))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=NAN,
class_id=3,
weights=(0.0, 1.0))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=1.0 / 1,
class_id=3,
weights=(1.0, 0.0))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=1.0 / 1,
class_id=3,
weights=(1.0, 0.0))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=1.0 / 1,
class_id=3,
weights=(1.0, 1.0))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=1.0 / 1,
class_id=3,
weights=(1.0, 1.0))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=2.0 / 2,
class_id=3,
weights=(2.0, 3.0))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=2.0 / 2,
class_id=3,
weights=(2.0, 3.0))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=3.0 / 3,
class_id=3,
weights=(3.0, 2.0))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=3.0 / 3,
class_id=3,
weights=(3.0, 2.0))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=0.3 / 0.3,
class_id=3,
weights=(0.3, 0.6))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=0.3 / 0.3,
class_id=3,
weights=(0.3, 0.6))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=0.6 / 0.6,
class_id=3,
weights=(0.6, 0.3))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=0.6 / 0.6,
class_id=3,
weights=(0.6, 0.3))
# All classes: 2 labels, 2 predictions, 1 correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=NAN, weights=(0.0,))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=NAN, weights=(0.0,))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=1.0 / 2, weights=(1.0,))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=1.0 / 2, weights=(1.0,))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=1.0 / 2, weights=(2.0,))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=1.0 / 2, weights=(2.0,))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=1.0 / 1, weights=(1.0, 0.0))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=1.0 / 1, weights=(1.0, 0.0))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=0.0 / 1, weights=(0.0, 1.0))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=0.0 / 1, weights=(0.0, 1.0))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=1.0 / 2, weights=(1.0, 1.0))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=1.0 / 2, weights=(1.0, 1.0))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=2.0 / 5, weights=(2.0, 3.0))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=2.0 / 5, weights=(2.0, 3.0))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=3.0 / 5, weights=(3.0, 2.0))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=3.0 / 5, weights=(3.0, 2.0))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=0.3 / 0.9, weights=(0.3, 0.6))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=0.3 / 0.9, weights=(0.3, 0.6))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=0.6 / 0.9, weights=(0.6, 0.3))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=0.6 / 0.9, weights=(0.6, 0.3))
def test_three_labels_at_k5_nan(self):
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
top_k_predictions = [
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]])
dense_labels = np.array([[2, 7, 8], [1, 2, 5]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Classes 0,3,4,6,9 have 0 labels, class 10 is out of range.
for class_id in (0, 3, 4, 6, 9, 10):
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=NAN, class_id=class_id)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=NAN, class_id=class_id)
def test_three_labels_at_k5_no_predictions(self):
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
top_k_predictions = [
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]])
dense_labels = np.array([[2, 7, 8], [1, 2, 5]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Class 8: 1 label, no predictions.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=0.0 / 1, class_id=8)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=0.0 / 1, class_id=8)
def test_three_labels_at_k5(self):
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
top_k_predictions = [
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]])
dense_labels = np.array([[2, 7, 8], [1, 2, 5]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Class 2: 2 labels, both correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=2.0 / 2, class_id=2)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=2.0 / 2, class_id=2)
# Class 5: 1 label, incorrect.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=1.0 / 1, class_id=5)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=1.0 / 1, class_id=5)
# Class 7: 1 label, incorrect.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=0.0 / 1, class_id=7)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=0.0 / 1, class_id=7)
# All classes: 6 labels, 3 correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=3.0 / 6)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=3.0 / 6)
def test_three_labels_at_k5_some_out_of_range(self):
"""Tests that labels outside the [0, n_classes) count in denominator."""
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
top_k_predictions = [
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
]
sp_labels = sparse_tensor.SparseTensorValue(
indices=[[0, 0], [0, 1], [0, 2], [0, 3], [1, 0], [1, 1], [1, 2], [1,
3]],
# values -1 and 10 are outside the [0, n_classes) range.
values=np.array([2, 7, -1, 8, 1, 2, 5, 10], np.int64),
dense_shape=[2, 4])
# Class 2: 2 labels, both correct.
self._test_streaming_sparse_recall_at_k(
predictions=predictions,
labels=sp_labels,
k=5,
expected=2.0 / 2,
class_id=2)
self._test_sparse_recall_at_top_k(
sp_labels, top_k_predictions, expected=2.0 / 2, class_id=2)
# Class 5: 1 label, incorrect.
self._test_streaming_sparse_recall_at_k(
predictions=predictions,
labels=sp_labels,
k=5,
expected=1.0 / 1,
class_id=5)
self._test_sparse_recall_at_top_k(
sp_labels, top_k_predictions, expected=1.0 / 1, class_id=5)
# Class 7: 1 label, incorrect.
self._test_streaming_sparse_recall_at_k(
predictions=predictions,
labels=sp_labels,
k=5,
expected=0.0 / 1,
class_id=7)
self._test_sparse_recall_at_top_k(
sp_labels, top_k_predictions, expected=0.0 / 1, class_id=7)
# All classes: 8 labels, 3 correct.
self._test_streaming_sparse_recall_at_k(
predictions=predictions, labels=sp_labels, k=5, expected=3.0 / 8)
self._test_sparse_recall_at_top_k(
sp_labels, top_k_predictions, expected=3.0 / 8)
def test_3d_nan(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
sparse_labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 1, 1, 0]]])
dense_labels = np.array(
[[[2, 7, 8], [1, 2, 5]], [
[1, 2, 5],
[2, 7, 8],
]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Classes 0,3,4,6,9 have 0 labels, class 10 is out of range.
for class_id in (0, 3, 4, 6, 9, 10):
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=NAN, class_id=class_id)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=NAN, class_id=class_id)
def test_3d_no_predictions(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
sparse_labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 1, 1, 0]]])
dense_labels = np.array(
[[[2, 7, 8], [1, 2, 5]], [
[1, 2, 5],
[2, 7, 8],
]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Classes 1,8 have 0 predictions, >=1 label.
for class_id in (1, 8):
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=0.0, class_id=class_id)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=0.0, class_id=class_id)
def test_3d(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
# Class 2: 4 labels, all correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=4.0 / 4, class_id=2)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=4.0 / 4, class_id=2)
# Class 5: 2 labels, both correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=2.0 / 2, class_id=5)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=2.0 / 2, class_id=5)
# Class 7: 2 labels, 1 incorrect.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=1.0 / 2, class_id=7)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=1.0 / 2, class_id=7)
# All classes: 12 labels, 7 correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=7.0 / 12)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=7.0 / 12)
def test_3d_ignore_all(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
for class_id in xrange(10):
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=5,
expected=NAN,
class_id=class_id,
weights=[[0], [0]])
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=NAN,
class_id=class_id,
weights=[[0], [0]])
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=5,
expected=NAN,
class_id=class_id,
weights=[[0, 0], [0, 0]])
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=NAN,
class_id=class_id,
weights=[[0, 0], [0, 0]])
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=NAN, weights=[[0], [0]])
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=NAN, weights=[[0], [0]])
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=NAN, weights=[[0, 0], [0, 0]])
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=NAN, weights=[[0, 0], [0, 0]])
def test_3d_ignore_some(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
# Class 2: 2 labels, both correct.
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=5,
expected=2.0 / 2.0,
class_id=2,
weights=[[1], [0]])
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=2.0 / 2.0,
class_id=2,
weights=[[1], [0]])
# Class 2: 2 labels, both correct.
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=5,
expected=2.0 / 2.0,
class_id=2,
weights=[[0], [1]])
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=2.0 / 2.0,
class_id=2,
weights=[[0], [1]])
# Class 7: 1 label, correct.
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=5,
expected=1.0 / 1.0,
class_id=7,
weights=[[0], [1]])
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=1.0 / 1.0,
class_id=7,
weights=[[0], [1]])
# Class 7: 1 label, incorrect.
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=5,
expected=0.0 / 1.0,
class_id=7,
weights=[[1], [0]])
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=0.0 / 1.0,
class_id=7,
weights=[[1], [0]])
# Class 7: 2 labels, 1 correct.
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=5,
expected=1.0 / 2.0,
class_id=7,
weights=[[1, 0], [1, 0]])
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=1.0 / 2.0,
class_id=7,
weights=[[1, 0], [1, 0]])
# Class 7: No labels.
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=5,
expected=NAN,
class_id=7,
weights=[[0, 1], [0, 1]])
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=NAN,
class_id=7,
weights=[[0, 1], [0, 1]])
def test_sparse_tensor_value(self):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
labels = [[0, 0, 1, 0], [0, 0, 0, 1]]
expected_recall = 0.5
with self.test_session():
_, recall = metrics.streaming_sparse_recall_at_k(
predictions=constant_op.constant(predictions, dtypes_lib.float32),
labels=_binary_2d_label_to_sparse_value(labels),
k=1)
variables.variables_initializer(variables.local_variables()).run()
self.assertEqual(expected_recall, recall.eval())
class StreamingMeanAbsoluteErrorTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_mean_absolute_error(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_metric_variables(
self, ('mean_absolute_error/count:0', 'mean_absolute_error/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_mean_absolute_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_mean_absolute_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_normal((10, 3), seed=1)
labels = random_ops.random_normal((10, 3), seed=2)
error, update_op = metrics.streaming_mean_absolute_error(
predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_error = error.eval()
for _ in range(10):
self.assertEqual(initial_error, error.eval())
def testSingleUpdateWithErrorAndWeights(self):
predictions = constant_op.constant(
[2, 4, 6, 8], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2, 3], shape=(1, 4), dtype=dtypes_lib.float32)
weights = constant_op.constant([0, 1, 0, 1], shape=(1, 4))
error, update_op = metrics.streaming_mean_absolute_error(
predictions, labels, weights)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(3, sess.run(update_op))
self.assertEqual(3, error.eval())
class StreamingMeanRelativeErrorTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_mean_relative_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
normalizer=array_ops.ones((10, 1)))
_assert_metric_variables(
self, ('mean_relative_error/count:0', 'mean_relative_error/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_mean_relative_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
normalizer=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_mean_relative_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
normalizer=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_normal((10, 3), seed=1)
labels = random_ops.random_normal((10, 3), seed=2)
normalizer = random_ops.random_normal((10, 3), seed=3)
error, update_op = metrics.streaming_mean_relative_error(
predictions, labels, normalizer)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_error = error.eval()
for _ in range(10):
self.assertEqual(initial_error, error.eval())
def testSingleUpdateNormalizedByLabels(self):
np_predictions = np.asarray([2, 4, 6, 8], dtype=np.float32)
np_labels = np.asarray([1, 3, 2, 3], dtype=np.float32)
expected_error = np.mean(
np.divide(np.absolute(np_predictions - np_labels), np_labels))
predictions = constant_op.constant(
np_predictions, shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant(np_labels, shape=(1, 4))
error, update_op = metrics.streaming_mean_relative_error(
predictions, labels, normalizer=labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(expected_error, sess.run(update_op))
self.assertEqual(expected_error, error.eval())
def testSingleUpdateNormalizedByZeros(self):
np_predictions = np.asarray([2, 4, 6, 8], dtype=np.float32)
predictions = constant_op.constant(
np_predictions, shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2, 3], shape=(1, 4), dtype=dtypes_lib.float32)
error, update_op = metrics.streaming_mean_relative_error(
predictions, labels, normalizer=array_ops.zeros_like(labels))
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0.0, sess.run(update_op))
self.assertEqual(0.0, error.eval())
class StreamingMeanSquaredErrorTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_mean_squared_error(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_metric_variables(
self, ('mean_squared_error/count:0', 'mean_squared_error/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_mean_squared_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_mean_squared_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_normal((10, 3), seed=1)
labels = random_ops.random_normal((10, 3), seed=2)
error, update_op = metrics.streaming_mean_squared_error(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_error = error.eval()
for _ in range(10):
self.assertEqual(initial_error, error.eval())
def testSingleUpdateZeroError(self):
predictions = array_ops.zeros((1, 3), dtype=dtypes_lib.float32)
labels = array_ops.zeros((1, 3), dtype=dtypes_lib.float32)
error, update_op = metrics.streaming_mean_squared_error(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, sess.run(update_op))
self.assertEqual(0, error.eval())
def testSingleUpdateWithError(self):
predictions = constant_op.constant(
[2, 4, 6], shape=(1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2], shape=(1, 3), dtype=dtypes_lib.float32)
error, update_op = metrics.streaming_mean_squared_error(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(6, sess.run(update_op))
self.assertEqual(6, error.eval())
def testSingleUpdateWithErrorAndWeights(self):
predictions = constant_op.constant(
[2, 4, 6, 8], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2, 3], shape=(1, 4), dtype=dtypes_lib.float32)
weights = constant_op.constant([0, 1, 0, 1], shape=(1, 4))
error, update_op = metrics.streaming_mean_squared_error(
predictions, labels, weights)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(13, sess.run(update_op))
self.assertEqual(13, error.eval())
def testMultipleBatchesOfSizeOne(self):
with self.test_session() as sess:
# Create the queue that populates the predictions.
preds_queue = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, preds_queue, [10, 8, 6])
_enqueue_vector(sess, preds_queue, [-4, 3, -1])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, labels_queue, [1, 3, 2])
_enqueue_vector(sess, labels_queue, [2, 4, 6])
labels = labels_queue.dequeue()
error, update_op = metrics.streaming_mean_squared_error(
predictions, labels)
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertAlmostEqual(208.0 / 6, sess.run(update_op), 5)
self.assertAlmostEqual(208.0 / 6, error.eval(), 5)
def testMetricsComputedConcurrently(self):
with self.test_session() as sess:
# Create the queue that populates one set of predictions.
preds_queue0 = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, preds_queue0, [10, 8, 6])
_enqueue_vector(sess, preds_queue0, [-4, 3, -1])
predictions0 = preds_queue0.dequeue()
# Create the queue that populates one set of predictions.
preds_queue1 = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, preds_queue1, [0, 1, 1])
_enqueue_vector(sess, preds_queue1, [1, 1, 0])
predictions1 = preds_queue1.dequeue()
# Create the queue that populates one set of labels.
labels_queue0 = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, labels_queue0, [1, 3, 2])
_enqueue_vector(sess, labels_queue0, [2, 4, 6])
labels0 = labels_queue0.dequeue()
# Create the queue that populates another set of labels.
labels_queue1 = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, labels_queue1, [-5, -3, -1])
_enqueue_vector(sess, labels_queue1, [5, 4, 3])
labels1 = labels_queue1.dequeue()
mse0, update_op0 = metrics.streaming_mean_squared_error(
predictions0, labels0, name='msd0')
mse1, update_op1 = metrics.streaming_mean_squared_error(
predictions1, labels1, name='msd1')
sess.run(variables.local_variables_initializer())
sess.run([update_op0, update_op1])
sess.run([update_op0, update_op1])
mse0, mse1 = sess.run([mse0, mse1])
self.assertAlmostEqual(208.0 / 6, mse0, 5)
self.assertAlmostEqual(79.0 / 6, mse1, 5)
def testMultipleMetricsOnMultipleBatchesOfSizeOne(self):
with self.test_session() as sess:
# Create the queue that populates the predictions.
preds_queue = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, preds_queue, [10, 8, 6])
_enqueue_vector(sess, preds_queue, [-4, 3, -1])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, labels_queue, [1, 3, 2])
_enqueue_vector(sess, labels_queue, [2, 4, 6])
labels = labels_queue.dequeue()
mae, ma_update_op = metrics.streaming_mean_absolute_error(
predictions, labels)
mse, ms_update_op = metrics.streaming_mean_squared_error(
predictions, labels)
sess.run(variables.local_variables_initializer())
sess.run([ma_update_op, ms_update_op])
sess.run([ma_update_op, ms_update_op])
self.assertAlmostEqual(32.0 / 6, mae.eval(), 5)
self.assertAlmostEqual(208.0 / 6, mse.eval(), 5)
class StreamingRootMeanSquaredErrorTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_root_mean_squared_error(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_metric_variables(
self,
('root_mean_squared_error/count:0', 'root_mean_squared_error/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_root_mean_squared_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_root_mean_squared_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_normal((10, 3), seed=1)
labels = random_ops.random_normal((10, 3), seed=2)
error, update_op = metrics.streaming_root_mean_squared_error(
predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_error = error.eval()
for _ in range(10):
self.assertEqual(initial_error, error.eval())
def testSingleUpdateZeroError(self):
with self.test_session() as sess:
predictions = constant_op.constant(
0.0, shape=(1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(0.0, shape=(1, 3), dtype=dtypes_lib.float32)
rmse, update_op = metrics.streaming_root_mean_squared_error(
predictions, labels)
sess.run(variables.local_variables_initializer())
self.assertEqual(0, sess.run(update_op))
self.assertEqual(0, rmse.eval())
def testSingleUpdateWithError(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[2, 4, 6], shape=(1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2], shape=(1, 3), dtype=dtypes_lib.float32)
rmse, update_op = metrics.streaming_root_mean_squared_error(
predictions, labels)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(math.sqrt(6), update_op.eval(), 5)
self.assertAlmostEqual(math.sqrt(6), rmse.eval(), 5)
def testSingleUpdateWithErrorAndWeights(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[2, 4, 6, 8], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2, 3], shape=(1, 4), dtype=dtypes_lib.float32)
weights = constant_op.constant([0, 1, 0, 1], shape=(1, 4))
rmse, update_op = metrics.streaming_root_mean_squared_error(
predictions, labels, weights)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(math.sqrt(13), sess.run(update_op))
self.assertAlmostEqual(math.sqrt(13), rmse.eval(), 5)
class StreamingCovarianceTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_covariance(
predictions=math_ops.to_float(math_ops.range(10)) +
array_ops.ones([10, 10]),
labels=math_ops.to_float(math_ops.range(10)) + array_ops.ones([10, 10]))
_assert_metric_variables(self, (
'covariance/comoment:0',
'covariance/count:0',
'covariance/mean_label:0',
'covariance/mean_prediction:0',
))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
cov, _ = metrics.streaming_covariance(
predictions=math_ops.to_float(math_ops.range(10)) +
array_ops.ones([10, 10]),
labels=math_ops.to_float(math_ops.range(10)) + array_ops.ones([10, 10]),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [cov])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_covariance(
predictions=math_ops.to_float(math_ops.range(10)) +
array_ops.ones([10, 10]),
labels=math_ops.to_float(math_ops.range(10)) + array_ops.ones([10, 10]),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
labels = random_ops.random_normal((10, 3), seed=2)
predictions = labels * 0.5 + random_ops.random_normal((10, 3), seed=1) * 0.5
cov, update_op = metrics.streaming_covariance(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_cov = cov.eval()
for _ in range(10):
self.assertEqual(initial_cov, cov.eval())
def testSingleUpdateIdentical(self):
with self.test_session() as sess:
predictions = math_ops.to_float(math_ops.range(10))
labels = math_ops.to_float(math_ops.range(10))
cov, update_op = metrics.streaming_covariance(predictions, labels)
expected_cov = np.cov(np.arange(10), np.arange(10))[0, 1]
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expected_cov, sess.run(update_op), 5)
self.assertAlmostEqual(expected_cov, cov.eval(), 5)
def testSingleUpdateNonIdentical(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[2, 4, 6], shape=(1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2], shape=(1, 3), dtype=dtypes_lib.float32)
cov, update_op = metrics.streaming_covariance(predictions, labels)
expected_cov = np.cov([2, 4, 6], [1, 3, 2])[0, 1]
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expected_cov, update_op.eval())
self.assertAlmostEqual(expected_cov, cov.eval())
def testSingleUpdateWithErrorAndWeights(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[2, 4, 6, 8], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2, 7], shape=(1, 4), dtype=dtypes_lib.float32)
weights = constant_op.constant(
[0, 1, 3, 1], shape=(1, 4), dtype=dtypes_lib.float32)
cov, update_op = metrics.streaming_covariance(
predictions, labels, weights=weights)
expected_cov = np.cov(
[2, 4, 6, 8], [1, 3, 2, 7], fweights=[0, 1, 3, 1])[0, 1]
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expected_cov, sess.run(update_op))
self.assertAlmostEqual(expected_cov, cov.eval())
def testMultiUpdateWithErrorNoWeights(self):
with self.test_session() as sess:
np.random.seed(123)
n = 100
predictions = np.random.randn(n)
labels = 0.5 * predictions + np.random.randn(n)
stride = 10
predictions_t = array_ops.placeholder(dtypes_lib.float32, [stride])
labels_t = array_ops.placeholder(dtypes_lib.float32, [stride])
cov, update_op = metrics.streaming_covariance(predictions_t, labels_t)
sess.run(variables.local_variables_initializer())
prev_expected_cov = NAN
for i in range(n // stride):
feed_dict = {
predictions_t: predictions[stride * i:stride * (i + 1)],
labels_t: labels[stride * i:stride * (i + 1)]
}
self.assertEqual(
np.isnan(prev_expected_cov),
np.isnan(sess.run(cov, feed_dict=feed_dict)))
if not np.isnan(prev_expected_cov):
self.assertAlmostEqual(prev_expected_cov,
sess.run(cov, feed_dict=feed_dict), 5)
expected_cov = np.cov(predictions[:stride * (i + 1)],
labels[:stride * (i + 1)])[0, 1]
self.assertAlmostEqual(expected_cov,
sess.run(update_op, feed_dict=feed_dict), 5)
self.assertAlmostEqual(expected_cov, sess.run(cov, feed_dict=feed_dict),
5)
prev_expected_cov = expected_cov
def testMultiUpdateWithErrorAndWeights(self):
with self.test_session() as sess:
np.random.seed(123)
n = 100
predictions = np.random.randn(n)
labels = 0.5 * predictions + np.random.randn(n)
weights = np.tile(np.arange(n // 10), n // 10)
np.random.shuffle(weights)
stride = 10
predictions_t = array_ops.placeholder(dtypes_lib.float32, [stride])
labels_t = array_ops.placeholder(dtypes_lib.float32, [stride])
weights_t = array_ops.placeholder(dtypes_lib.float32, [stride])
cov, update_op = metrics.streaming_covariance(
predictions_t, labels_t, weights=weights_t)
sess.run(variables.local_variables_initializer())
prev_expected_cov = NAN
for i in range(n // stride):
feed_dict = {
predictions_t: predictions[stride * i:stride * (i + 1)],
labels_t: labels[stride * i:stride * (i + 1)],
weights_t: weights[stride * i:stride * (i + 1)]
}
self.assertEqual(
np.isnan(prev_expected_cov),
np.isnan(sess.run(cov, feed_dict=feed_dict)))
if not np.isnan(prev_expected_cov):
self.assertAlmostEqual(prev_expected_cov,
sess.run(cov, feed_dict=feed_dict), 5)
expected_cov = np.cov(
predictions[:stride * (i + 1)],
labels[:stride * (i + 1)],
fweights=weights[:stride * (i + 1)])[0, 1]
self.assertAlmostEqual(expected_cov,
sess.run(update_op, feed_dict=feed_dict), 5)
self.assertAlmostEqual(expected_cov, sess.run(cov, feed_dict=feed_dict),
5)
prev_expected_cov = expected_cov
class StreamingPearsonRTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_pearson_correlation(
predictions=math_ops.to_float(math_ops.range(10)) +
array_ops.ones([10, 10]),
labels=math_ops.to_float(math_ops.range(10)) + array_ops.ones([10, 10]))
_assert_metric_variables(self, (
'pearson_r/covariance/comoment:0',
'pearson_r/covariance/count:0',
'pearson_r/covariance/mean_label:0',
'pearson_r/covariance/mean_prediction:0',
'pearson_r/variance_labels/count:0',
'pearson_r/variance_labels/comoment:0',
'pearson_r/variance_labels/mean_label:0',
'pearson_r/variance_labels/mean_prediction:0',
'pearson_r/variance_predictions/comoment:0',
'pearson_r/variance_predictions/count:0',
'pearson_r/variance_predictions/mean_label:0',
'pearson_r/variance_predictions/mean_prediction:0',
))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
pearson_r, _ = metrics.streaming_pearson_correlation(
predictions=math_ops.to_float(math_ops.range(10)) +
array_ops.ones([10, 10]),
labels=math_ops.to_float(math_ops.range(10)) + array_ops.ones([10, 10]),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [pearson_r])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_pearson_correlation(
predictions=math_ops.to_float(math_ops.range(10)) +
array_ops.ones([10, 10]),
labels=math_ops.to_float(math_ops.range(10)) + array_ops.ones([10, 10]),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
labels = random_ops.random_normal((10, 3), seed=2)
predictions = labels * 0.5 + random_ops.random_normal((10, 3), seed=1) * 0.5
pearson_r, update_op = metrics.streaming_pearson_correlation(
predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_r = pearson_r.eval()
for _ in range(10):
self.assertEqual(initial_r, pearson_r.eval())
def testSingleUpdateIdentical(self):
with self.test_session() as sess:
predictions = math_ops.to_float(math_ops.range(10))
labels = math_ops.to_float(math_ops.range(10))
pearson_r, update_op = metrics.streaming_pearson_correlation(
predictions, labels)
expected_r = np.corrcoef(np.arange(10), np.arange(10))[0, 1]
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expected_r, sess.run(update_op), 5)
self.assertAlmostEqual(expected_r, pearson_r.eval(), 5)
def testSingleUpdateNonIdentical(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[2, 4, 6], shape=(1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2], shape=(1, 3), dtype=dtypes_lib.float32)
pearson_r, update_op = metrics.streaming_pearson_correlation(
predictions, labels)
expected_r = np.corrcoef([2, 4, 6], [1, 3, 2])[0, 1]
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expected_r, update_op.eval())
self.assertAlmostEqual(expected_r, pearson_r.eval())
def testSingleUpdateWithErrorAndWeights(self):
with self.test_session() as sess:
predictions = np.array([2, 4, 6, 8])
labels = np.array([1, 3, 2, 7])
weights = np.array([0, 1, 3, 1])
predictions_t = constant_op.constant(
predictions, shape=(1, 4), dtype=dtypes_lib.float32)
labels_t = constant_op.constant(
labels, shape=(1, 4), dtype=dtypes_lib.float32)
weights_t = constant_op.constant(
weights, shape=(1, 4), dtype=dtypes_lib.float32)
pearson_r, update_op = metrics.streaming_pearson_correlation(
predictions_t, labels_t, weights=weights_t)
cmat = np.cov(predictions, labels, fweights=weights)
expected_r = cmat[0, 1] / np.sqrt(cmat[0, 0] * cmat[1, 1])
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expected_r, sess.run(update_op))
self.assertAlmostEqual(expected_r, pearson_r.eval())
def testMultiUpdateWithErrorNoWeights(self):
with self.test_session() as sess:
np.random.seed(123)
n = 100
predictions = np.random.randn(n)
labels = 0.5 * predictions + np.random.randn(n)
stride = 10
predictions_t = array_ops.placeholder(dtypes_lib.float32, [stride])
labels_t = array_ops.placeholder(dtypes_lib.float32, [stride])
pearson_r, update_op = metrics.streaming_pearson_correlation(
predictions_t, labels_t)
sess.run(variables.local_variables_initializer())
prev_expected_r = NAN
for i in range(n // stride):
feed_dict = {
predictions_t: predictions[stride * i:stride * (i + 1)],
labels_t: labels[stride * i:stride * (i + 1)]
}
self.assertEqual(
np.isnan(prev_expected_r),
np.isnan(sess.run(pearson_r, feed_dict=feed_dict)))
if not np.isnan(prev_expected_r):
self.assertAlmostEqual(prev_expected_r,
sess.run(pearson_r, feed_dict=feed_dict), 5)
expected_r = np.corrcoef(predictions[:stride * (i + 1)],
labels[:stride * (i + 1)])[0, 1]
self.assertAlmostEqual(expected_r,
sess.run(update_op, feed_dict=feed_dict), 5)
self.assertAlmostEqual(expected_r,
sess.run(pearson_r, feed_dict=feed_dict), 5)
prev_expected_r = expected_r
def testMultiUpdateWithErrorAndWeights(self):
with self.test_session() as sess:
np.random.seed(123)
n = 100
predictions = np.random.randn(n)
labels = 0.5 * predictions + np.random.randn(n)
weights = np.tile(np.arange(n // 10), n // 10)
np.random.shuffle(weights)
stride = 10
predictions_t = array_ops.placeholder(dtypes_lib.float32, [stride])
labels_t = array_ops.placeholder(dtypes_lib.float32, [stride])
weights_t = array_ops.placeholder(dtypes_lib.float32, [stride])
pearson_r, update_op = metrics.streaming_pearson_correlation(
predictions_t, labels_t, weights=weights_t)
sess.run(variables.local_variables_initializer())
prev_expected_r = NAN
for i in range(n // stride):
feed_dict = {
predictions_t: predictions[stride * i:stride * (i + 1)],
labels_t: labels[stride * i:stride * (i + 1)],
weights_t: weights[stride * i:stride * (i + 1)]
}
self.assertEqual(
np.isnan(prev_expected_r),
np.isnan(sess.run(pearson_r, feed_dict=feed_dict)))
if not np.isnan(prev_expected_r):
self.assertAlmostEqual(prev_expected_r,
sess.run(pearson_r, feed_dict=feed_dict), 5)
cmat = np.cov(
predictions[:stride * (i + 1)],
labels[:stride * (i + 1)],
fweights=weights[:stride * (i + 1)])
expected_r = cmat[0, 1] / np.sqrt(cmat[0, 0] * cmat[1, 1])
self.assertAlmostEqual(expected_r,
sess.run(update_op, feed_dict=feed_dict), 5)
self.assertAlmostEqual(expected_r,
sess.run(pearson_r, feed_dict=feed_dict), 5)
prev_expected_r = expected_r
def testMultiUpdateWithErrorAndSingletonBatches(self):
with self.test_session() as sess:
np.random.seed(123)
n = 100
predictions = np.random.randn(n)
labels = 0.5 * predictions + np.random.randn(n)
stride = 10
weights = (np.arange(n).reshape(n // stride, stride) % stride == 0)
for row in weights:
np.random.shuffle(row)
# Now, weights is one-hot by row - one item per batch has non-zero weight.
weights = weights.reshape((n,))
predictions_t = array_ops.placeholder(dtypes_lib.float32, [stride])
labels_t = array_ops.placeholder(dtypes_lib.float32, [stride])
weights_t = array_ops.placeholder(dtypes_lib.float32, [stride])
pearson_r, update_op = metrics.streaming_pearson_correlation(
predictions_t, labels_t, weights=weights_t)
sess.run(variables.local_variables_initializer())
for i in range(n // stride):
feed_dict = {
predictions_t: predictions[stride * i:stride * (i + 1)],
labels_t: labels[stride * i:stride * (i + 1)],
weights_t: weights[stride * i:stride * (i + 1)]
}
cmat = np.cov(
predictions[:stride * (i + 1)],
labels[:stride * (i + 1)],
fweights=weights[:stride * (i + 1)])
expected_r = cmat[0, 1] / np.sqrt(cmat[0, 0] * cmat[1, 1])
actual_r = sess.run(update_op, feed_dict=feed_dict)
self.assertEqual(np.isnan(expected_r), np.isnan(actual_r))
self.assertEqual(
np.isnan(expected_r),
np.isnan(sess.run(pearson_r, feed_dict=feed_dict)))
if not np.isnan(expected_r):
self.assertAlmostEqual(expected_r, actual_r, 5)
self.assertAlmostEqual(expected_r,
sess.run(pearson_r, feed_dict=feed_dict), 5)
class StreamingMeanCosineDistanceTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_mean_cosine_distance(
predictions=array_ops.ones((10, 3)),
labels=array_ops.ones((10, 3)),
dim=1)
_assert_metric_variables(self, (
'mean_cosine_distance/count:0',
'mean_cosine_distance/total:0',
))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_mean_cosine_distance(
predictions=array_ops.ones((10, 3)),
labels=array_ops.ones((10, 3)),
dim=1,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_mean_cosine_distance(
predictions=array_ops.ones((10, 3)),
labels=array_ops.ones((10, 3)),
dim=1,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_normal((10, 3), seed=1)
labels = random_ops.random_normal((10, 3), seed=2)
error, update_op = metrics.streaming_mean_cosine_distance(
predictions, labels, dim=1)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_error = error.eval()
for _ in range(10):
self.assertEqual(initial_error, error.eval())
def testSingleUpdateZeroError(self):
np_labels = np.matrix(('1 0 0;' '0 0 1;' '0 1 0'))
predictions = constant_op.constant(
np_labels, shape=(1, 3, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
np_labels, shape=(1, 3, 3), dtype=dtypes_lib.float32)
error, update_op = metrics.streaming_mean_cosine_distance(
predictions, labels, dim=2)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, sess.run(update_op))
self.assertEqual(0, error.eval())
def testSingleUpdateWithError1(self):
np_labels = np.matrix(('1 0 0;' '0 0 1;' '0 1 0'))
np_predictions = np.matrix(('1 0 0;' '0 0 -1;' '1 0 0'))
predictions = constant_op.constant(
np_predictions, shape=(3, 1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
np_labels, shape=(3, 1, 3), dtype=dtypes_lib.float32)
error, update_op = metrics.streaming_mean_cosine_distance(
predictions, labels, dim=2)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(1, sess.run(update_op), 5)
self.assertAlmostEqual(1, error.eval(), 5)
def testSingleUpdateWithError2(self):
np_predictions = np.matrix(
('0.819031913261206 0.567041924552012 0.087465312324590;'
'-0.665139432070255 -0.739487441769973 -0.103671883216994;'
'0.707106781186548 -0.707106781186548 0'))
np_labels = np.matrix(
('0.819031913261206 0.567041924552012 0.087465312324590;'
'0.665139432070255 0.739487441769973 0.103671883216994;'
'0.707106781186548 0.707106781186548 0'))
predictions = constant_op.constant(
np_predictions, shape=(3, 1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
np_labels, shape=(3, 1, 3), dtype=dtypes_lib.float32)
error, update_op = metrics.streaming_mean_cosine_distance(
predictions, labels, dim=2)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(1.0, sess.run(update_op), 5)
self.assertAlmostEqual(1.0, error.eval(), 5)
def testSingleUpdateWithErrorAndWeights1(self):
np_predictions = np.matrix(('1 0 0;' '0 0 -1;' '1 0 0'))
np_labels = np.matrix(('1 0 0;' '0 0 1;' '0 1 0'))
predictions = constant_op.constant(
np_predictions, shape=(3, 1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
np_labels, shape=(3, 1, 3), dtype=dtypes_lib.float32)
weights = constant_op.constant(
[1, 0, 0], shape=(3, 1, 1), dtype=dtypes_lib.float32)
error, update_op = metrics.streaming_mean_cosine_distance(
predictions, labels, dim=2, weights=weights)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, sess.run(update_op))
self.assertEqual(0, error.eval())
def testSingleUpdateWithErrorAndWeights2(self):
np_predictions = np.matrix(('1 0 0;' '0 0 -1;' '1 0 0'))
np_labels = np.matrix(('1 0 0;' '0 0 1;' '0 1 0'))
predictions = constant_op.constant(
np_predictions, shape=(3, 1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
np_labels, shape=(3, 1, 3), dtype=dtypes_lib.float32)
weights = constant_op.constant(
[0, 1, 1], shape=(3, 1, 1), dtype=dtypes_lib.float32)
error, update_op = metrics.streaming_mean_cosine_distance(
predictions, labels, dim=2, weights=weights)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(1.5, update_op.eval())
self.assertEqual(1.5, error.eval())
class PcntBelowThreshTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_percentage_less(values=array_ops.ones((10,)), threshold=2)
_assert_metric_variables(self, (
'percentage_below_threshold/count:0',
'percentage_below_threshold/total:0',
))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_percentage_less(
values=array_ops.ones((10,)),
threshold=2,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_percentage_less(
values=array_ops.ones((10,)),
threshold=2,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testOneUpdate(self):
with self.test_session() as sess:
values = constant_op.constant(
[2, 4, 6, 8], shape=(1, 4), dtype=dtypes_lib.float32)
pcnt0, update_op0 = metrics.streaming_percentage_less(
values, 100, name='high')
pcnt1, update_op1 = metrics.streaming_percentage_less(
values, 7, name='medium')
pcnt2, update_op2 = metrics.streaming_percentage_less(
values, 1, name='low')
sess.run(variables.local_variables_initializer())
sess.run([update_op0, update_op1, update_op2])
pcnt0, pcnt1, pcnt2 = sess.run([pcnt0, pcnt1, pcnt2])
self.assertAlmostEqual(1.0, pcnt0, 5)
self.assertAlmostEqual(0.75, pcnt1, 5)
self.assertAlmostEqual(0.0, pcnt2, 5)
def testSomePresentOneUpdate(self):
with self.test_session() as sess:
values = constant_op.constant(
[2, 4, 6, 8], shape=(1, 4), dtype=dtypes_lib.float32)
weights = constant_op.constant(
[1, 0, 0, 1], shape=(1, 4), dtype=dtypes_lib.float32)
pcnt0, update_op0 = metrics.streaming_percentage_less(
values, 100, weights=weights, name='high')
pcnt1, update_op1 = metrics.streaming_percentage_less(
values, 7, weights=weights, name='medium')
pcnt2, update_op2 = metrics.streaming_percentage_less(
values, 1, weights=weights, name='low')
sess.run(variables.local_variables_initializer())
self.assertListEqual([1.0, 0.5, 0.0],
sess.run([update_op0, update_op1, update_op2]))
pcnt0, pcnt1, pcnt2 = sess.run([pcnt0, pcnt1, pcnt2])
self.assertAlmostEqual(1.0, pcnt0, 5)
self.assertAlmostEqual(0.5, pcnt1, 5)
self.assertAlmostEqual(0.0, pcnt2, 5)
class StreamingMeanIOUTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_mean_iou(
predictions=array_ops.ones([10, 1]),
labels=array_ops.ones([10, 1]),
num_classes=2)
_assert_metric_variables(self, ('mean_iou/total_confusion_matrix:0',))
def testMetricsCollections(self):
my_collection_name = '__metrics__'
mean_iou, _ = metrics.streaming_mean_iou(
predictions=array_ops.ones([10, 1]),
labels=array_ops.ones([10, 1]),
num_classes=2,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean_iou])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_mean_iou(
predictions=array_ops.ones([10, 1]),
labels=array_ops.ones([10, 1]),
num_classes=2,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testPredictionsAndLabelsOfDifferentSizeRaisesValueError(self):
predictions = array_ops.ones([10, 3])
labels = array_ops.ones([10, 4])
with self.assertRaises(ValueError):
metrics.streaming_mean_iou(predictions, labels, num_classes=2)
def testLabelsAndWeightsOfDifferentSizeRaisesValueError(self):
predictions = array_ops.ones([10])
labels = array_ops.ones([10])
weights = array_ops.zeros([9])
with self.assertRaises(ValueError):
metrics.streaming_mean_iou(
predictions, labels, num_classes=2, weights=weights)
def testValueTensorIsIdempotent(self):
num_classes = 3
predictions = random_ops.random_uniform(
[10], maxval=num_classes, dtype=dtypes_lib.int64, seed=1)
labels = random_ops.random_uniform(
[10], maxval=num_classes, dtype=dtypes_lib.int64, seed=2)
miou, update_op = metrics.streaming_mean_iou(
predictions, labels, num_classes=num_classes)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_miou = miou.eval()
for _ in range(10):
self.assertEqual(initial_miou, miou.eval())
def testMultipleUpdates(self):
num_classes = 3
with self.test_session() as sess:
# Create the queue that populates the predictions.
preds_queue = data_flow_ops.FIFOQueue(
5, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [2])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [0])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = data_flow_ops.FIFOQueue(
5, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [2])
_enqueue_vector(sess, labels_queue, [1])
labels = labels_queue.dequeue()
miou, update_op = metrics.streaming_mean_iou(predictions, labels,
num_classes)
sess.run(variables.local_variables_initializer())
for _ in range(5):
sess.run(update_op)
desired_output = np.mean([1.0 / 2.0, 1.0 / 4.0, 0.])
self.assertEqual(desired_output, miou.eval())
def testMultipleUpdatesWithWeights(self):
num_classes = 2
with self.test_session() as sess:
# Create the queue that populates the predictions.
preds_queue = data_flow_ops.FIFOQueue(
6, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = data_flow_ops.FIFOQueue(
6, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
labels = labels_queue.dequeue()
# Create the queue that populates the weights.
weights_queue = data_flow_ops.FIFOQueue(
6, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, weights_queue, [1.0])
_enqueue_vector(sess, weights_queue, [1.0])
_enqueue_vector(sess, weights_queue, [1.0])
_enqueue_vector(sess, weights_queue, [0.0])
_enqueue_vector(sess, weights_queue, [1.0])
_enqueue_vector(sess, weights_queue, [0.0])
weights = weights_queue.dequeue()
miou, update_op = metrics.streaming_mean_iou(
predictions, labels, num_classes, weights=weights)
sess.run(variables.local_variables_initializer())
for _ in range(6):
sess.run(update_op)
desired_output = np.mean([2.0 / 3.0, 1.0 / 2.0])
self.assertAlmostEqual(desired_output, miou.eval())
def testMultipleUpdatesWithMissingClass(self):
# Test the case where there are no predicions and labels for
# one class, and thus there is one row and one column with
# zero entries in the confusion matrix.
num_classes = 3
with self.test_session() as sess:
# Create the queue that populates the predictions.
# There is no prediction for class 2.
preds_queue = data_flow_ops.FIFOQueue(
5, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [0])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
# There is label for class 2.
labels_queue = data_flow_ops.FIFOQueue(
5, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
labels = labels_queue.dequeue()
miou, update_op = metrics.streaming_mean_iou(predictions, labels,
num_classes)
sess.run(variables.local_variables_initializer())
for _ in range(5):
sess.run(update_op)
desired_output = np.mean([1.0 / 3.0, 2.0 / 4.0])
self.assertAlmostEqual(desired_output, miou.eval())
def testUpdateOpEvalIsAccumulatedConfusionMatrix(self):
predictions = array_ops.concat([
constant_op.constant(0, shape=[5]),
constant_op.constant(1, shape=[5])
], 0)
labels = array_ops.concat([
constant_op.constant(0, shape=[3]),
constant_op.constant(1, shape=[7])
], 0)
num_classes = 2
with self.test_session() as sess:
miou, update_op = metrics.streaming_mean_iou(predictions, labels,
num_classes)
sess.run(variables.local_variables_initializer())
confusion_matrix = update_op.eval()
self.assertAllEqual([[3, 0], [2, 5]], confusion_matrix)
desired_miou = np.mean([3. / 5., 5. / 7.])
self.assertAlmostEqual(desired_miou, miou.eval())
def testAllCorrect(self):
predictions = array_ops.zeros([40])
labels = array_ops.zeros([40])
num_classes = 1
with self.test_session() as sess:
miou, update_op = metrics.streaming_mean_iou(predictions, labels,
num_classes)
sess.run(variables.local_variables_initializer())
self.assertEqual(40, update_op.eval()[0])
self.assertEqual(1.0, miou.eval())
def testAllWrong(self):
predictions = array_ops.zeros([40])
labels = array_ops.ones([40])
num_classes = 2
with self.test_session() as sess:
miou, update_op = metrics.streaming_mean_iou(predictions, labels,
num_classes)
sess.run(variables.local_variables_initializer())
self.assertAllEqual([[0, 0], [40, 0]], update_op.eval())
self.assertEqual(0., miou.eval())
def testResultsWithSomeMissing(self):
predictions = array_ops.concat([
constant_op.constant(0, shape=[5]),
constant_op.constant(1, shape=[5])
], 0)
labels = array_ops.concat([
constant_op.constant(0, shape=[3]),
constant_op.constant(1, shape=[7])
], 0)
num_classes = 2
weights = array_ops.concat([
constant_op.constant(0, shape=[1]),
constant_op.constant(1, shape=[8]),
constant_op.constant(0, shape=[1])
], 0)
with self.test_session() as sess:
miou, update_op = metrics.streaming_mean_iou(
predictions, labels, num_classes, weights=weights)
sess.run(variables.local_variables_initializer())
self.assertAllEqual([[2, 0], [2, 4]], update_op.eval())
desired_miou = np.mean([2. / 4., 4. / 6.])
self.assertAlmostEqual(desired_miou, miou.eval())
def testMissingClassInLabels(self):
labels = constant_op.constant([[[0, 0, 1, 1, 0, 0], [1, 0, 0, 0, 0, 1]],
[[1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0]]])
predictions = constant_op.constant(
[[[0, 0, 2, 1, 1, 0], [0, 1, 2, 2, 0, 1]], [[0, 0, 2, 1, 1, 1],
[1, 1, 2, 0, 0, 0]]])
num_classes = 3
with self.test_session() as sess:
miou, update_op = metrics.streaming_mean_iou(predictions, labels,
num_classes)
sess.run(variables.local_variables_initializer())
self.assertAllEqual([[7, 4, 3], [3, 5, 2], [0, 0, 0]], update_op.eval())
self.assertAlmostEqual(1 / 3 * (7 / (7 + 3 + 7) + 5 / (5 + 4 + 5) + 0 /
(0 + 5 + 0)), miou.eval())
def testMissingClassOverallSmall(self):
labels = constant_op.constant([0])
predictions = constant_op.constant([0])
num_classes = 2
with self.test_session() as sess:
miou, update_op = metrics.streaming_mean_iou(predictions, labels,
num_classes)
sess.run(variables.local_variables_initializer())
self.assertAllEqual([[1, 0], [0, 0]], update_op.eval())
self.assertAlmostEqual(1, miou.eval())
def testMissingClassOverallLarge(self):
labels = constant_op.constant([[[0, 0, 1, 1, 0, 0], [1, 0, 0, 0, 0, 1]],
[[1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0]]])
predictions = constant_op.constant(
[[[0, 0, 1, 1, 0, 0], [1, 1, 0, 0, 1, 1]], [[0, 0, 0, 1, 1, 1],
[1, 1, 1, 0, 0, 0]]])
num_classes = 3
with self.test_session() as sess:
miou, update_op = metrics.streaming_mean_iou(predictions, labels,
num_classes)
sess.run(variables.local_variables_initializer())
self.assertAllEqual([[9, 5, 0], [3, 7, 0], [0, 0, 0]], update_op.eval())
self.assertAlmostEqual(1 / 2 * (9 / (9 + 3 + 5) + 7 / (7 + 5 + 3)),
miou.eval())
class StreamingConcatTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_concat(values=array_ops.ones((10,)))
_assert_metric_variables(self, (
'streaming_concat/array:0',
'streaming_concat/size:0',
))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
value, _ = metrics.streaming_concat(
values=array_ops.ones((10,)), metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [value])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_concat(
values=array_ops.ones((10,)), updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testNextArraySize(self):
next_array_size = metric_ops._next_array_size # pylint: disable=protected-access
with self.test_session():
self.assertEqual(next_array_size(2, growth_factor=2).eval(), 2)
self.assertEqual(next_array_size(3, growth_factor=2).eval(), 4)
self.assertEqual(next_array_size(4, growth_factor=2).eval(), 4)
self.assertEqual(next_array_size(5, growth_factor=2).eval(), 8)
self.assertEqual(next_array_size(6, growth_factor=2).eval(), 8)
def testStreamingConcat(self):
with self.test_session() as sess:
values = array_ops.placeholder(dtypes_lib.int32, [None])
concatenated, update_op = metrics.streaming_concat(values)
sess.run(variables.local_variables_initializer())
self.assertAllEqual([], concatenated.eval())
sess.run([update_op], feed_dict={values: [0, 1, 2]})
self.assertAllEqual([0, 1, 2], concatenated.eval())
sess.run([update_op], feed_dict={values: [3, 4]})
self.assertAllEqual([0, 1, 2, 3, 4], concatenated.eval())
sess.run([update_op], feed_dict={values: [5, 6, 7, 8, 9]})
self.assertAllEqual(np.arange(10), concatenated.eval())
def testStreamingConcatStringValues(self):
with self.test_session() as sess:
values = array_ops.placeholder(dtypes_lib.string, [None])
concatenated, update_op = metrics.streaming_concat(values)
sess.run(variables.local_variables_initializer())
self.assertItemsEqual([], concatenated.eval())
sess.run([update_op], feed_dict={values: ['a', 'b', 'c']})
self.assertItemsEqual([b'a', b'b', b'c'], concatenated.eval())
sess.run([update_op], feed_dict={values: ['d', 'e']})
self.assertItemsEqual([b'a', b'b', b'c', b'd', b'e'], concatenated.eval())
sess.run([update_op], feed_dict={values: ['f', 'g', 'h', 'i', 'j']})
self.assertItemsEqual(
[b'a', b'b', b'c', b'd', b'e', b'f', b'g', b'h', b'i', b'j'],
concatenated.eval())
def testStreamingConcatMaxSize(self):
with self.test_session() as sess:
values = math_ops.range(3)
concatenated, update_op = metrics.streaming_concat(values, max_size=5)
sess.run(variables.local_variables_initializer())
self.assertAllEqual([], concatenated.eval())
sess.run([update_op])
self.assertAllEqual([0, 1, 2], concatenated.eval())
sess.run([update_op])
self.assertAllEqual([0, 1, 2, 0, 1], concatenated.eval())
sess.run([update_op])
self.assertAllEqual([0, 1, 2, 0, 1], concatenated.eval())
def testStreamingConcat2D(self):
with self.test_session() as sess:
values = array_ops.reshape(math_ops.range(3), (3, 1))
concatenated, update_op = metrics.streaming_concat(values, axis=-1)
sess.run(variables.local_variables_initializer())
for _ in range(10):
sess.run([update_op])
self.assertAllEqual([[0] * 10, [1] * 10, [2] * 10], concatenated.eval())
def testStreamingConcatErrors(self):
with self.assertRaises(ValueError):
metrics.streaming_concat(array_ops.placeholder(dtypes_lib.float32))
values = array_ops.zeros((2, 3))
with self.assertRaises(ValueError):
metrics.streaming_concat(values, axis=-3, max_size=3)
with self.assertRaises(ValueError):
metrics.streaming_concat(values, axis=2, max_size=3)
with self.assertRaises(ValueError):
metrics.streaming_concat(
array_ops.placeholder(dtypes_lib.float32, [None, None]))
def testStreamingConcatReset(self):
with self.test_session() as sess:
values = array_ops.placeholder(dtypes_lib.int32, [None])
concatenated, update_op = metrics.streaming_concat(values)
sess.run(variables.local_variables_initializer())
self.assertAllEqual([], concatenated.eval())
sess.run([update_op], feed_dict={values: [0, 1, 2]})
self.assertAllEqual([0, 1, 2], concatenated.eval())
sess.run(variables.local_variables_initializer())
sess.run([update_op], feed_dict={values: [3, 4]})
self.assertAllEqual([3, 4], concatenated.eval())
class AggregateMetricsTest(test.TestCase):
def testAggregateNoMetricsRaisesValueError(self):
with self.assertRaises(ValueError):
metrics.aggregate_metrics()
def testAggregateSingleMetricReturnsOneItemLists(self):
values = array_ops.ones((10, 4))
value_tensors, update_ops = metrics.aggregate_metrics(
metrics.streaming_mean(values))
self.assertEqual(len(value_tensors), 1)
self.assertEqual(len(update_ops), 1)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(1, update_ops[0].eval())
self.assertEqual(1, value_tensors[0].eval())
def testAggregateMultipleMetricsReturnsListsInOrder(self):
predictions = array_ops.ones((10, 4))
labels = array_ops.ones((10, 4)) * 3
value_tensors, update_ops = metrics.aggregate_metrics(
metrics.streaming_mean_absolute_error(predictions, labels),
metrics.streaming_mean_squared_error(predictions, labels))
self.assertEqual(len(value_tensors), 2)
self.assertEqual(len(update_ops), 2)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(2, update_ops[0].eval())
self.assertEqual(4, update_ops[1].eval())
self.assertEqual(2, value_tensors[0].eval())
self.assertEqual(4, value_tensors[1].eval())
class AggregateMetricMapTest(test.TestCase):
def testAggregateMultipleMetricsReturnsListsInOrder(self):
predictions = array_ops.ones((10, 4))
labels = array_ops.ones((10, 4)) * 3
names_to_values, names_to_updates = metrics.aggregate_metric_map({
'm1': metrics.streaming_mean_absolute_error(predictions, labels),
'm2': metrics.streaming_mean_squared_error(predictions, labels),
})
self.assertEqual(2, len(names_to_values))
self.assertEqual(2, len(names_to_updates))
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(2, names_to_updates['m1'].eval())
self.assertEqual(4, names_to_updates['m2'].eval())
self.assertEqual(2, names_to_values['m1'].eval())
self.assertEqual(4, names_to_values['m2'].eval())
class CountTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.count(array_ops.ones([4, 3]))
_assert_metric_variables(self, ['count/count:0'])
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.count(
array_ops.ones([4, 3]), metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.count(
array_ops.ones([4, 3]), updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testBasic(self):
with self.test_session() as sess:
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
result, update_op = metrics.count(values)
sess.run(variables.local_variables_initializer())
for _ in range(4):
sess.run(update_op)
self.assertAlmostEqual(8.0, sess.run(result), 5)
def testUpdateOpsReturnsCurrentValue(self):
with self.test_session() as sess:
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
result, update_op = metrics.count(values)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(2.0, sess.run(update_op), 5)
self.assertAlmostEqual(4.0, sess.run(update_op), 5)
self.assertAlmostEqual(6.0, sess.run(update_op), 5)
self.assertAlmostEqual(8.0, sess.run(update_op), 5)
self.assertAlmostEqual(8.0, sess.run(result), 5)
def test1dWeightedValues(self):
with self.test_session() as sess:
# Create the queue that populates the values.
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weighted labels.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, weights_queue, [0.5])
_enqueue_vector(sess, weights_queue, [0])
_enqueue_vector(sess, weights_queue, [0])
_enqueue_vector(sess, weights_queue, [1.2])
weights = weights_queue.dequeue()
result, update_op = metrics.count(values, weights)
variables.local_variables_initializer().run()
for _ in range(4):
update_op.eval()
self.assertAlmostEqual(3.4, result.eval(), 5)
def test1dWeightedValues_placeholders(self):
with self.test_session() as sess:
# Create the queue that populates the values.
feed_values = ((0, 1), (-4.2, 9.1), (6.5, 0), (-3.2, 4.0))
values = array_ops.placeholder(dtype=dtypes_lib.float32)
# Create the queue that populates the weighted labels.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1,))
_enqueue_vector(sess, weights_queue, 0.5, shape=(1,))
_enqueue_vector(sess, weights_queue, 0, shape=(1,))
_enqueue_vector(sess, weights_queue, 0, shape=(1,))
_enqueue_vector(sess, weights_queue, 1.2, shape=(1,))
weights = weights_queue.dequeue()
result, update_op = metrics.count(values, weights)
variables.local_variables_initializer().run()
for i in range(4):
update_op.eval(feed_dict={values: feed_values[i]})
self.assertAlmostEqual(3.4, result.eval(), 5)
def test2dWeightedValues(self):
with self.test_session() as sess:
# Create the queue that populates the values.
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weighted labels.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, weights_queue, [1.1, 1])
_enqueue_vector(sess, weights_queue, [1, 0])
_enqueue_vector(sess, weights_queue, [0, 1])
_enqueue_vector(sess, weights_queue, [0, 0])
weights = weights_queue.dequeue()
result, update_op = metrics.count(values, weights)
variables.local_variables_initializer().run()
for _ in range(4):
update_op.eval()
self.assertAlmostEqual(4.1, result.eval(), 5)
def test2dWeightedValues_placeholders(self):
with self.test_session() as sess:
# Create the queue that populates the values.
feed_values = ((0, 1), (-4.2, 9.1), (6.5, 0), (-3.2, 4.0))
values = array_ops.placeholder(dtype=dtypes_lib.float32)
# Create the queue that populates the weighted labels.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(2,))
_enqueue_vector(sess, weights_queue, [1.1, 1], shape=(2,))
_enqueue_vector(sess, weights_queue, [1, 0], shape=(2,))
_enqueue_vector(sess, weights_queue, [0, 1], shape=(2,))
_enqueue_vector(sess, weights_queue, [0, 0], shape=(2,))
weights = weights_queue.dequeue()
result, update_op = metrics.count(values, weights)
variables.local_variables_initializer().run()
for i in range(4):
update_op.eval(feed_dict={values: feed_values[i]})
self.assertAlmostEqual(4.1, result.eval(), 5)
class CohenKappaTest(test.TestCase):
def _confusion_matrix_to_samples(self, confusion_matrix):
x, y = confusion_matrix.shape
pairs = []
for label in range(x):
for feature in range(y):
pairs += [label, feature] * confusion_matrix[label, feature]
pairs = np.array(pairs).reshape((-1, 2))
return pairs[:, 0], pairs[:, 1]
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.cohen_kappa(
predictions_idx=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
num_classes=2)
_assert_metric_variables(self, (
'cohen_kappa/po:0',
'cohen_kappa/pe_row:0',
'cohen_kappa/pe_col:0',
))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
kappa, _ = metrics.cohen_kappa(
predictions_idx=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
num_classes=2,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [kappa])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.cohen_kappa(
predictions_idx=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
num_classes=2,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 1), maxval=3, dtype=dtypes_lib.int64, seed=1)
labels = random_ops.random_uniform(
(10, 1), maxval=3, dtype=dtypes_lib.int64, seed=2)
kappa, update_op = metrics.cohen_kappa(labels, predictions, 3)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_kappa = kappa.eval()
for _ in range(10):
self.assertAlmostEqual(initial_kappa, kappa.eval(), 5)
def testBasic(self):
confusion_matrix = np.array([[9, 3, 1], [4, 8, 2], [2, 1, 6]])
# overall total = 36
# po = [9, 8, 6], sum(po) = 23
# pe_row = [15, 12, 9], pe_col = [13, 14, 9], so pe = [5.42, 4.67, 2.25]
# finally, kappa = (sum(po) - sum(pe)) / (N - sum(pe))
# = (23 - 12.34) / (36 - 12.34)
# = 0.45
# see: http://psych.unl.edu/psycrs/handcomp/hckappa.PDF
expect = 0.45
labels, predictions = self._confusion_matrix_to_samples(confusion_matrix)
dtypes = [dtypes_lib.int16, dtypes_lib.int32, dtypes_lib.int64]
shapes = [
(len(labels,)), # 1-dim
(len(labels), 1)
] # 2-dim
weights = [None, np.ones_like(labels)]
for dtype in dtypes:
for shape in shapes:
for weight in weights:
with self.test_session() as sess:
predictions_tensor = constant_op.constant(
np.reshape(predictions, shape), dtype=dtype)
labels_tensor = constant_op.constant(
np.reshape(labels, shape), dtype=dtype)
kappa, update_op = metrics.cohen_kappa(
labels_tensor, predictions_tensor, 3, weights=weight)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expect, sess.run(update_op), 2)
self.assertAlmostEqual(expect, kappa.eval(), 2)
def testAllCorrect(self):
inputs = np.arange(0, 100) % 4
# confusion matrix
# [[25, 0, 0],
# [0, 25, 0],
# [0, 0, 25]]
# Calculated by v0.19: sklearn.metrics.cohen_kappa_score(inputs, inputs)
expect = 1.0
with self.test_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(inputs)
kappa, update_op = metrics.cohen_kappa(labels, predictions, 4)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expect, sess.run(update_op), 5)
self.assertAlmostEqual(expect, kappa.eval(), 5)
def testAllIncorrect(self):
labels = np.arange(0, 100) % 4
predictions = (labels + 1) % 4
# confusion matrix
# [[0, 25, 0],
# [0, 0, 25],
# [25, 0, 0]]
# Calculated by v0.19: sklearn.metrics.cohen_kappa_score(labels, predictions)
expect = -0.333333333333
with self.test_session() as sess:
predictions = constant_op.constant(predictions, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels)
kappa, update_op = metrics.cohen_kappa(labels, predictions, 4)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expect, sess.run(update_op), 5)
self.assertAlmostEqual(expect, kappa.eval(), 5)
def testWeighted(self):
confusion_matrix = np.array([[9, 3, 1], [4, 8, 2], [2, 1, 6]])
labels, predictions = self._confusion_matrix_to_samples(confusion_matrix)
num_samples = np.sum(confusion_matrix, dtype=np.int32)
weights = (np.arange(0, num_samples) % 5) / 5.0
# Calculated by v0.19: sklearn.metrics.cohen_kappa_score(
# labels, predictions, sample_weight=weights)
expect = 0.453466583385
with self.test_session() as sess:
predictions = constant_op.constant(predictions, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels)
kappa, update_op = metrics.cohen_kappa(
labels, predictions, 4, weights=weights)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expect, sess.run(update_op), 5)
self.assertAlmostEqual(expect, kappa.eval(), 5)
def testWithMultipleUpdates(self):
confusion_matrix = np.array([[90, 30, 10, 20], [40, 80, 20, 30],
[20, 10, 60, 35], [15, 25, 30, 25]])
labels, predictions = self._confusion_matrix_to_samples(confusion_matrix)
num_samples = np.sum(confusion_matrix, dtype=np.int32)
weights = (np.arange(0, num_samples) % 5) / 5.0
num_classes = confusion_matrix.shape[0]
batch_size = num_samples // 10
predictions_t = array_ops.placeholder(
dtypes_lib.float32, shape=(batch_size,))
labels_t = array_ops.placeholder(dtypes_lib.int32, shape=(batch_size,))
weights_t = array_ops.placeholder(dtypes_lib.float32, shape=(batch_size,))
kappa, update_op = metrics.cohen_kappa(
labels_t, predictions_t, num_classes, weights=weights_t)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
for idx in range(0, num_samples, batch_size):
batch_start, batch_end = idx, idx + batch_size
sess.run(
update_op,
feed_dict={
labels_t: labels[batch_start:batch_end],
predictions_t: predictions[batch_start:batch_end],
weights_t: weights[batch_start:batch_end]
})
# Calculated by v0.19: sklearn.metrics.cohen_kappa_score(
# labels_np, predictions_np, sample_weight=weights_np)
expect = 0.289965397924
self.assertAlmostEqual(expect, kappa.eval(), 5)
def testInvalidNumClasses(self):
predictions = array_ops.placeholder(dtypes_lib.float32, shape=(4, 1))
labels = array_ops.placeholder(dtypes_lib.int32, shape=(4, 1))
with self.assertRaisesRegexp(ValueError, 'num_classes'):
metrics.cohen_kappa(labels, predictions, 1)
def testInvalidDimension(self):
predictions = array_ops.placeholder(dtypes_lib.float32, shape=(4, 1))
invalid_labels = array_ops.placeholder(dtypes_lib.int32, shape=(4, 2))
with self.assertRaises(ValueError):
metrics.cohen_kappa(invalid_labels, predictions, 3)
invalid_predictions = array_ops.placeholder(
dtypes_lib.float32, shape=(4, 2))
labels = array_ops.placeholder(dtypes_lib.int32, shape=(4, 1))
with self.assertRaises(ValueError):
metrics.cohen_kappa(labels, invalid_predictions, 3)
if __name__ == '__main__':
test.main()
| apache-2.0 |
SNeugber/OpenVault | Plotting/dataPlot.py | 1 | 1424 | import csv
import glob
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.ticker as tkr
import pandas as pd
def getDataInFolder(dir):
outData=dict()
outData2=[]
for fileName in glob.glob(dir+'/*.csv'):
numruns=int(fileName[fileName.find('_')+1:fileName.find('epochs')])
data=list(csv.reader(open(fileName),delimiter=','))
data=np.array(data[1:len(data[0])-1]).astype('float')
avgTravelTime=np.average(data[:,2])
stdevTravTime=np.std(data[:,2])
outData2.append([int(numruns),avgTravelTime])
return np.array(outData2)
dataLearnSafeOff=getDataInFolder('./safeoffdata')
dataLearnSafeOn=getDataInFolder('./safeondata')
seriesSafeOff = pd.DataFrame(dataLearnSafeOff[:,1],columns=['Learned safe behaviour'])
seriesSafeOn = pd.DataFrame(dataLearnSafeOn[:,1],columns=['Enforced safe behaviour'])
seriesSafeOff['number of learning epochs'] = pd.Series(dataLearnSafeOff[:,0])
seriesSafeOn['number of learning epochs'] = pd.Series(dataLearnSafeOn[:,0])
ax = seriesSafeOff.boxplot(by='number of learning epochs')
ax.set_ylabel('Average travel time in seconds')
#plt.suptitle('Influence of learning time on learning performance in repeated experiments')
plt.suptitle('')
ax = seriesSafeOn.boxplot(by='number of learning epochs')
ax.set_ylabel('Average travel time in seconds')
plt.suptitle('')
#plt.suptitle('Influence of learning time on learning performance in repeated experiments')
plt.show() | apache-2.0 |
ebolyen/qiime2 | qiime2/tests/test_metadata.py | 1 | 26083 | # ----------------------------------------------------------------------------
# Copyright (c) 2016-2017, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import pkg_resources
import sqlite3
import unittest
import pandas as pd
import pandas.util.testing as pdt
import qiime2
from qiime2.core.testing.util import get_dummy_plugin, ReallyEqualMixin
class TestMetadata(unittest.TestCase):
def setUp(self):
self.illegal_chars = ['/', '\0', '\\', '*', '<', '>', '?', '|', '$']
def test_valid_metadata(self):
exp_index = pd.Index(['a', 'b', 'c'], dtype=object)
exp_df = pd.DataFrame({'col1': ['2', '1', '3']},
index=exp_index, dtype=object)
metadata = qiime2.Metadata(exp_df)
df = metadata.to_dataframe()
pdt.assert_frame_equal(
df, exp_df, check_dtype=True, check_index_type=True,
check_column_type=True, check_frame_type=True, check_names=True,
check_exact=True)
def test_valid_metadata_str(self):
exp_index = pd.Index(['a', 'b', 'c'], dtype=str)
exp_df = pd.DataFrame({'col1': ['2', '1', '3']},
index=exp_index, dtype=str)
metadata = qiime2.Metadata(exp_df)
df = metadata.to_dataframe()
pdt.assert_frame_equal(
df, exp_df, check_dtype=True, check_index_type=True,
check_column_type=True, check_frame_type=True, check_names=True,
check_exact=True)
def test_valid_metadata_no_columns(self):
exp_index = pd.Index(['a', 'b', 'c'], dtype=object)
exp_df = pd.DataFrame({}, index=exp_index, dtype=object)
metadata = qiime2.Metadata(exp_df)
obs_df = metadata.to_dataframe()
self.assertFalse(obs_df.index.empty)
self.assertTrue(obs_df.columns.empty)
pdt.assert_frame_equal(
obs_df, exp_df, check_dtype=True, check_index_type=True,
check_column_type=True, check_frame_type=True, check_names=True,
check_exact=True)
def test_artifacts(self):
index = pd.Index(['a', 'b', 'c'], dtype=object)
df = pd.DataFrame({'col1': ['2', '1', '3']}, index=index, dtype=object)
metadata = qiime2.Metadata(df)
self.assertEqual(metadata.artifacts, [])
def test_empty_metadata(self):
# No index, no columns.
df = pd.DataFrame([], index=[])
with self.assertRaisesRegex(ValueError, 'Metadata is empty'):
qiime2.Metadata(df)
# No index, has columns.
df = pd.DataFrame([], index=[], columns=['a', 'b'])
with self.assertRaisesRegex(ValueError, 'Metadata is empty'):
qiime2.Metadata(df)
def test_invalid_metadata_characters_in_category(self):
for val in self.illegal_chars:
index = pd.Index(['a', 'b', 'c'], dtype=object)
df = pd.DataFrame({'col1%s' % val: ['2', '1', '3']},
index=index, dtype=object)
with self.assertRaisesRegex(ValueError,
'Invalid characters.*category'):
qiime2.Metadata(df)
def test_invalid_metadata_characters_in_index(self):
for val in self.illegal_chars:
index = pd.Index(['a', 'b%s' % val, 'c'], dtype=object)
df = pd.DataFrame({'col1': ['2', '1', '3']},
index=index, dtype=object)
with self.assertRaisesRegex(ValueError,
'Invalid character.*index'):
qiime2.Metadata(df)
def test_invalid_columns_dtype(self):
with self.assertRaisesRegex(ValueError, 'Non-string.*category label'):
qiime2.Metadata(pd.DataFrame(['a', 'b', 'c']))
def test_invalid_index_dtype(self):
with self.assertRaisesRegex(ValueError, 'Non-string.*index values'):
qiime2.Metadata(pd.DataFrame({'foo': ['a', 'b', 'c']}))
def test_duplicate_categories(self):
index = pd.Index(['a', 'b'], dtype=object)
df = pd.DataFrame({'foo': [1, 2], 'bar': [3, 4]}, index=index)
df.columns = ['foo', 'foo']
with self.assertRaisesRegex(ValueError, 'Duplicate.*category'):
qiime2.Metadata(df)
def test_duplicate_indices(self):
index = pd.Index(['b', 'b', 'b'], dtype=object)
df = pd.DataFrame({'foo': [1, 2, 3]}, index=index)
with self.assertRaisesRegex(ValueError, 'Duplicate.*index values'):
qiime2.Metadata(df)
class TestMetadataLoad(unittest.TestCase):
def test_comments_and_blank_lines(self):
fp = pkg_resources.resource_filename(
'qiime2.tests', 'data/metadata/comments-n-blanks.tsv')
obs_df = qiime2.Metadata.load(fp).to_dataframe()
exp_index = pd.Index(['id1', 'id2', 'id3'], name='ID',
dtype=object)
exp_df = pd.DataFrame({'col1': ['1', '2', '3'],
'col2': ['a', 'b', 'c'],
'col3': ['foo', 'bar', '42']},
index=exp_index, dtype=object)
pdt.assert_frame_equal(obs_df, exp_df)
def test_qiime1_mapping_file(self):
fp = pkg_resources.resource_filename(
'qiime2.tests', 'data/metadata/qiime1.tsv')
obs_df = qiime2.Metadata.load(fp).to_dataframe()
exp_index = pd.Index(['id1', 'id2', 'id3'], name='#SampleID',
dtype=object)
exp_df = pd.DataFrame({'col1': ['1', '2', '3'],
'col2': ['a', 'b', 'c'],
'col3': ['foo', 'bar', '42']},
index=exp_index, dtype=object)
pdt.assert_frame_equal(obs_df, exp_df)
def test_qiime1_empty_mapping_file(self):
fp = pkg_resources.resource_filename(
'qiime2.tests', 'data/metadata/qiime1-empty.tsv')
with self.assertRaisesRegex(ValueError, 'empty'):
qiime2.Metadata.load(fp)
def test_no_columns(self):
fp = pkg_resources.resource_filename(
'qiime2.tests', 'data/metadata/no-columns.tsv')
metadata = qiime2.Metadata.load(fp)
obs_df = metadata.to_dataframe()
exp_index = pd.Index(['a', 'b', 'id'], name='my-index', dtype=object)
exp_df = pd.DataFrame({}, index=exp_index, dtype=object)
self.assertFalse(obs_df.index.empty)
self.assertTrue(obs_df.columns.empty)
pdt.assert_frame_equal(
obs_df, exp_df, check_dtype=True, check_index_type=True,
check_column_type=True, check_frame_type=True, check_names=True,
check_exact=True)
def test_does_not_cast_index_or_column_types(self):
fp = pkg_resources.resource_filename(
'qiime2.tests', 'data/metadata/no-type-cast.tsv')
metadata = qiime2.Metadata.load(fp)
df = metadata.to_dataframe()
exp_index = pd.Index(['0.000001', '0.004000', '0.000000'],
dtype=object, name='my-index')
exp_df = pd.DataFrame({'col1': ['2', '1', '3'],
'col2': ['b', 'b', 'c'],
'col3': ['2.5', '4.2', '-9.999']},
index=exp_index, dtype=object)
pdt.assert_frame_equal(
df, exp_df, check_dtype=True, check_index_type=True,
check_column_type=True, check_frame_type=True, check_names=True,
check_exact=True)
def test_artifacts(self):
fp = pkg_resources.resource_filename(
'qiime2.tests', 'data/metadata/simple.tsv')
metadata = qiime2.Metadata.load(fp)
self.assertEqual(metadata.artifacts, [])
def test_invalid_metadata_characters_in_category(self):
fp = pkg_resources.resource_filename(
'qiime2.tests', 'data/metadata/illegal-categories-characters.tsv')
with self.assertRaisesRegex(ValueError,
'Invalid characters.*category'):
qiime2.Metadata.load(fp)
def test_invalid_metadata_characters_in_index(self):
fp = pkg_resources.resource_filename(
'qiime2.tests', 'data/metadata/illegal-index-characters.tsv')
with self.assertRaisesRegex(ValueError,
'Invalid characters.*index'):
qiime2.Metadata.load(fp)
def test_empty(self):
fp = pkg_resources.resource_filename(
'qiime2.tests', 'data/metadata/empty')
with self.assertRaises(pd.errors.EmptyDataError):
qiime2.Metadata.load(fp)
class TestMetadataFromArtifact(unittest.TestCase):
def setUp(self):
get_dummy_plugin()
def test_from_artifact(self):
A = qiime2.Artifact.import_data('Mapping', {'a': '1', 'b': '3'})
md = qiime2.Metadata.from_artifact(A)
pdt.assert_frame_equal(md.to_dataframe(),
pd.DataFrame({'a': '1', 'b': '3'}, index=['0']))
def test_from_bad_artifact(self):
A = qiime2.Artifact.import_data('IntSequence1', [1, 2, 3, 4])
with self.assertRaisesRegex(ValueError, 'Artifact has no metadata'):
qiime2.Metadata.from_artifact(A)
def test_invalid_metadata_characters_in_category(self):
A = qiime2.Artifact.import_data('Mapping', {'a': '1', '>b': '3'})
with self.assertRaisesRegex(ValueError, 'Invalid characters'):
qiime2.Metadata.from_artifact(A)
def test_artifacts(self):
A = qiime2.Artifact.import_data('Mapping', {'a': ['1', '2'],
'b': ['2', '3']})
md = qiime2.Metadata.from_artifact(A)
obs = md.artifacts
self.assertEqual(obs, [A])
class TestGetCategory(unittest.TestCase):
def setUp(self):
get_dummy_plugin()
def test_artifacts_are_propagated(self):
A = qiime2.Artifact.import_data('Mapping', {'a': '1', 'b': '3'})
md = qiime2.Metadata.from_artifact(A)
obs = md.get_category('b')
self.assertEqual(obs.artifacts, [A])
pdt.assert_series_equal(obs.to_series(),
pd.Series(['3'], index=['0'], name='b'))
class TestMerge(unittest.TestCase):
def setUp(self):
get_dummy_plugin()
def test_merging_one(self):
md = qiime2.Metadata(pd.DataFrame(
{'a': [1, 2, 3], 'b': [4, 5, 6]}, index=['id1', 'id2', 'id3']))
obs = md.merge()
self.assertIsNot(obs, md)
self.assertEqual(obs, md)
def test_merging_two(self):
md1 = qiime2.Metadata(pd.DataFrame(
{'a': [1, 2, 3], 'b': [4, 5, 6]}, index=['id1', 'id2', 'id3']))
md2 = qiime2.Metadata(pd.DataFrame(
{'c': [7, 8, 9], 'd': [10, 11, 12]}, index=['id1', 'id2', 'id3']))
obs = md1.merge(md2)
exp = qiime2.Metadata(pd.DataFrame(
{'a': [1, 2, 3], 'b': [4, 5, 6],
'c': [7, 8, 9], 'd': [10, 11, 12]}, index=['id1', 'id2', 'id3']))
self.assertEqual(obs, exp)
def test_merging_three(self):
md1 = qiime2.Metadata(pd.DataFrame(
{'a': [1, 2, 3], 'b': [4, 5, 6]}, index=['id1', 'id2', 'id3']))
md2 = qiime2.Metadata(pd.DataFrame(
{'c': [7, 8, 9], 'd': [10, 11, 12]}, index=['id1', 'id2', 'id3']))
md3 = qiime2.Metadata(pd.DataFrame(
{'e': [13, 14, 15], 'f': [16, 17, 18]},
index=['id1', 'id2', 'id3']))
obs = md1.merge(md2, md3)
exp = qiime2.Metadata(pd.DataFrame(
{'a': [1, 2, 3], 'b': [4, 5, 6],
'c': [7, 8, 9], 'd': [10, 11, 12],
'e': [13, 14, 15], 'f': [16, 17, 18]},
index=['id1', 'id2', 'id3']))
self.assertEqual(obs, exp)
def test_merging_unaligned_indices(self):
md1 = qiime2.Metadata(pd.DataFrame(
{'a': [1, 2, 3], 'b': [4, 5, 6]}, index=['id1', 'id2', 'id3']))
md2 = qiime2.Metadata(pd.DataFrame(
{'c': [9, 8, 7], 'd': [12, 11, 10]}, index=['id3', 'id2', 'id1']))
md3 = qiime2.Metadata(pd.DataFrame(
{'e': [13, 15, 14], 'f': [16, 18, 17]},
index=['id1', 'id3', 'id2']))
obs = md1.merge(md2, md3)
exp = qiime2.Metadata(pd.DataFrame(
{'a': [1, 2, 3], 'b': [4, 5, 6],
'c': [7, 8, 9], 'd': [10, 11, 12],
'e': [13, 14, 15], 'f': [16, 17, 18]},
index=['id1', 'id2', 'id3']))
self.assertEqual(obs, exp)
def test_inner_join(self):
md1 = qiime2.Metadata(pd.DataFrame(
{'a': [1, 2, 3], 'b': [4, 5, 6]}, index=['id1', 'id2', 'id3']))
md2 = qiime2.Metadata(pd.DataFrame(
{'c': [7, 8, 9], 'd': [10, 11, 12]}, index=['id2', 'X', 'Y']))
md3 = qiime2.Metadata(pd.DataFrame(
{'e': [13, 14, 15], 'f': [16, 17, 18]}, index=['X', 'id3', 'id2']))
# Single shared ID.
obs = md1.merge(md2, md3)
exp = qiime2.Metadata(pd.DataFrame(
{'a': [2], 'b': [5], 'c': [7], 'd': [10], 'e': [15], 'f': [18]},
index=['id2']))
self.assertEqual(obs, exp)
# Multiple shared IDs.
obs = md1.merge(md3)
exp = qiime2.Metadata(pd.DataFrame(
{'a': [2, 3], 'b': [5, 6], 'e': [15, 14], 'f': [18, 17]},
index=['id2', 'id3']))
self.assertEqual(obs, exp)
def test_index_and_column_merge_order(self):
md1 = qiime2.Metadata(pd.DataFrame(
[[1], [2], [3], [4]],
index=['id1', 'id2', 'id3', 'id4'], columns=['a']))
md2 = qiime2.Metadata(pd.DataFrame(
[[5], [6], [7]], index=['id4', 'id3', 'id1'], columns=['b']))
md3 = qiime2.Metadata(pd.DataFrame(
[[8], [9], [10]], index=['id1', 'id4', 'id3'], columns=['c']))
obs = md1.merge(md2, md3)
exp = qiime2.Metadata(pd.DataFrame(
[[1, 7, 8], [3, 6, 10], [4, 5, 9]],
index=['id1', 'id3', 'id4'], columns=['a', 'b', 'c']))
self.assertEqual(obs, exp)
# Merging in different order produces different index/column order.
obs = md2.merge(md1, md3)
exp = qiime2.Metadata(pd.DataFrame(
[[5, 4, 9], [6, 3, 10], [7, 1, 8]],
index=['id4', 'id3', 'id1'], columns=['b', 'a', 'c']))
self.assertEqual(obs, exp)
def test_no_columns(self):
md1 = qiime2.Metadata(pd.DataFrame({}, index=['id1', 'id2', 'id3']))
md2 = qiime2.Metadata(pd.DataFrame({}, index=['id2', 'X', 'id1']))
md3 = qiime2.Metadata(pd.DataFrame({}, index=['id1', 'id3', 'id2']))
obs = md1.merge(md2, md3)
exp = qiime2.Metadata(pd.DataFrame({}, index=['id1', 'id2']))
self.assertEqual(obs, exp)
def test_index_and_column_names(self):
md1 = qiime2.Metadata(pd.DataFrame(
{'a': [1, 2]},
index=pd.Index(['id1', 'id2'], name='foo'),
columns=pd.Index(['a'], name='abc')))
md2 = qiime2.Metadata(pd.DataFrame(
{'b': [3, 4]},
index=pd.Index(['id1', 'id2'], name='bar'),
columns=pd.Index(['b'], name='def')))
obs = md1.merge(md2)
exp = qiime2.Metadata(pd.DataFrame(
{'a': [1, 2], 'b': [3, 4]}, index=['id1', 'id2']))
self.assertEqual(obs, exp)
self.assertIsNone(obs._dataframe.index.name)
self.assertIsNone(obs._dataframe.columns.name)
def test_no_artifacts(self):
md1 = qiime2.Metadata(pd.DataFrame(
{'a': [1, 2]}, index=['id1', 'id2']))
md2 = qiime2.Metadata(pd.DataFrame(
{'b': [3, 4]}, index=['id1', 'id2']))
metadata = md1.merge(md2)
self.assertEqual(metadata.artifacts, [])
def test_with_artifacts(self):
artifact1 = qiime2.Artifact.import_data('Mapping',
{'a': '1', 'b': '2'})
artifact2 = qiime2.Artifact.import_data('Mapping', {'d': '4'})
md_from_artifact1 = qiime2.Metadata.from_artifact(artifact1)
md_from_artifact2 = qiime2.Metadata.from_artifact(artifact2)
md_no_artifact = qiime2.Metadata(pd.DataFrame(
{'c': ['3', '42']}, index=['0', '1']))
# Merge three metadata objects -- the first has an artifact, the second
# does not, and the third has an artifact.
obs = md_from_artifact1.merge(md_no_artifact, md_from_artifact2)
exp = pd.DataFrame(
{'a': '1', 'b': '2', 'c': '3', 'd': '4'}, index=['0'])
pdt.assert_frame_equal(obs.to_dataframe(), exp)
self.assertEqual(obs.artifacts, [artifact1, artifact2])
def test_disjoint_indices(self):
md1 = qiime2.Metadata(pd.DataFrame(
{'a': [1, 2, 3], 'b': [4, 5, 6]}, index=['id1', 'id2', 'id3']))
md2 = qiime2.Metadata(pd.DataFrame(
{'c': [7, 8, 9], 'd': [10, 11, 12]}, index=['X', 'Y', 'Z']))
with self.assertRaisesRegex(ValueError, 'no IDs shared'):
md1.merge(md2)
def test_duplicate_columns(self):
md1 = qiime2.Metadata(pd.DataFrame(
{'a': [1, 2], 'b': [3, 4]}, index=['id1', 'id2']))
md2 = qiime2.Metadata(pd.DataFrame(
{'c': [5, 6], 'b': [7, 8]}, index=['id1', 'id2']))
with self.assertRaisesRegex(ValueError, "categories overlap: 'b'"):
md1.merge(md2)
def test_duplicate_columns_self_merge(self):
md = qiime2.Metadata(pd.DataFrame(
{'a': [1, 2], 'b': [3, 4]}, index=['id1', 'id2']))
with self.assertRaisesRegex(ValueError,
"categories overlap: 'a', 'b'"):
md.merge(md)
class TestIDs(unittest.TestCase):
def test_default(self):
df = pd.DataFrame({'Subject': ['subject-1', 'subject-1', 'subject-2'],
'SampleType': ['gut', 'tongue', 'gut']},
index=pd.Index(['S1', 'S2', 'S3'], name='id'))
metadata = qiime2.Metadata(df)
actual = metadata.ids()
expected = {'S1', 'S2', 'S3'}
self.assertEqual(actual, expected)
def test_incomplete_where(self):
df = pd.DataFrame({'Subject': ['subject-1', 'subject-1', 'subject-2'],
'SampleType': ['gut', 'tongue', 'gut']},
index=['S1', 'S2', 'S3'])
metadata = qiime2.Metadata(df)
where = "Subject='subject-1' AND SampleType="
with self.assertRaises(ValueError):
metadata.ids(where)
where = "Subject="
with self.assertRaises(ValueError):
metadata.ids(where)
def test_invalid_where(self):
df = pd.DataFrame({'Subject': ['subject-1', 'subject-1', 'subject-2'],
'SampleType': ['gut', 'tongue', 'gut']},
index=['S1', 'S2', 'S3'])
metadata = qiime2.Metadata(df)
where = "not-a-column-name='subject-1'"
with self.assertRaises(ValueError):
metadata.ids(where)
def test_empty_result(self):
df = pd.DataFrame({'Subject': ['subject-1', 'subject-1', 'subject-2'],
'SampleType': ['gut', 'tongue', 'gut']},
index=pd.Index(['S1', 'S2', 'S3'], name='id'))
metadata = qiime2.Metadata(df)
where = "Subject='subject-3'"
actual = metadata.ids(where)
expected = set()
self.assertEqual(actual, expected)
def test_simple_expression(self):
df = pd.DataFrame({'Subject': ['subject-1', 'subject-1', 'subject-2'],
'SampleType': ['gut', 'tongue', 'gut']},
index=pd.Index(['S1', 'S2', 'S3'], name='id'))
metadata = qiime2.Metadata(df)
where = "Subject='subject-1'"
actual = metadata.ids(where)
expected = {'S1', 'S2'}
self.assertEqual(actual, expected)
where = "Subject='subject-2'"
actual = metadata.ids(where)
expected = {'S3'}
self.assertEqual(actual, expected)
where = "Subject='subject-3'"
actual = metadata.ids(where)
expected = set()
self.assertEqual(actual, expected)
where = "SampleType='gut'"
actual = metadata.ids(where)
expected = {'S1', 'S3'}
self.assertEqual(actual, expected)
where = "SampleType='tongue'"
actual = metadata.ids(where)
expected = {'S2'}
self.assertEqual(actual, expected)
def test_more_complex_expressions(self):
df = pd.DataFrame({'Subject': ['subject-1', 'subject-1', 'subject-2'],
'SampleType': ['gut', 'tongue', 'gut']},
index=pd.Index(['S1', 'S2', 'S3'], name='id'))
metadata = qiime2.Metadata(df)
where = "Subject='subject-1' OR Subject='subject-2'"
actual = metadata.ids(where)
expected = {'S1', 'S2', 'S3'}
self.assertEqual(actual, expected)
where = "Subject='subject-1' AND Subject='subject-2'"
actual = metadata.ids(where)
expected = set()
self.assertEqual(actual, expected)
where = "Subject='subject-1' AND SampleType='gut'"
actual = metadata.ids(where)
expected = {'S1'}
self.assertEqual(actual, expected)
def test_index_without_name(self):
df = pd.DataFrame({'Subject': ['subject-1', 'subject-1', 'subject-2'],
'SampleType': ['gut', 'tongue', 'gut']},
index=['S1', 'S2', 'S3'])
metadata = qiime2.Metadata(df)
actual = metadata.ids(where="SampleType='gut'")
expected = {'S1', 'S3'}
self.assertEqual(actual, expected)
def test_index_with_column_name_clash(self):
df = pd.DataFrame(
{'Subject': ['subject-1', 'subject-1', 'subject-2'],
'SampleType': ['gut', 'tongue', 'gut']},
index=pd.Index(['S1', 'S2', 'S3'], name='SampleType'))
metadata = qiime2.Metadata(df)
with self.assertRaises(sqlite3.OperationalError):
metadata.ids(where="Subject='subject-1'")
def test_query_by_index(self):
df = pd.DataFrame({'Subject': ['subject-1', 'subject-1', 'subject-2'],
'SampleType': ['gut', 'tongue', 'gut']},
index=pd.Index(['S1', 'S2', 'S3'], name='id'))
metadata = qiime2.Metadata(df)
actual = metadata.ids(where="id='S2' OR id='S1'")
expected = {'S1', 'S2'}
self.assertEqual(actual, expected)
def test_no_columns(self):
fp = pkg_resources.resource_filename(
'qiime2.tests', 'data/metadata/no-columns.tsv')
metadata = qiime2.Metadata.load(fp)
obs = metadata.ids()
exp = {'a', 'b', 'id'}
self.assertEqual(obs, exp)
class TestEqualityOperators(unittest.TestCase, ReallyEqualMixin):
def setUp(self):
get_dummy_plugin()
def test_type_mismatch(self):
fp = pkg_resources.resource_filename(
'qiime2.tests', 'data/metadata/simple.tsv')
md = qiime2.Metadata.load(fp)
mdc = qiime2.MetadataCategory.load(fp, 'col1')
self.assertIsInstance(md, qiime2.Metadata)
self.assertIsInstance(mdc, qiime2.MetadataCategory)
self.assertReallyNotEqual(md, mdc)
def test_source_mismatch(self):
# Metadata created from an artifact vs not shouldn't compare equal,
# even if the data is the same.
artifact = qiime2.Artifact.import_data('Mapping', {'a': '1', 'b': '2'})
md_from_artifact = qiime2.Metadata.from_artifact(artifact)
md_no_artifact = qiime2.Metadata(pd.DataFrame(
{'a': '1', 'b': '2'}, index=['0']))
pdt.assert_frame_equal(md_from_artifact.to_dataframe(),
md_no_artifact.to_dataframe())
self.assertReallyNotEqual(md_from_artifact, md_no_artifact)
def test_artifact_mismatch(self):
# Metadata created from different artifacts shouldn't compare equal,
# even if the data is the same.
artifact1 = qiime2.Artifact.import_data('Mapping',
{'a': '1', 'b': '2'})
artifact2 = qiime2.Artifact.import_data('Mapping',
{'a': '1', 'b': '2'})
md1 = qiime2.Metadata.from_artifact(artifact1)
md2 = qiime2.Metadata.from_artifact(artifact2)
pdt.assert_frame_equal(md1.to_dataframe(), md2.to_dataframe())
self.assertReallyNotEqual(md1, md2)
def test_index_mismatch(self):
md1 = qiime2.Metadata(pd.DataFrame({'a': '1', 'b': '2'}, index=['0']))
md2 = qiime2.Metadata(pd.DataFrame({'a': '1', 'b': '2'}, index=['1']))
self.assertReallyNotEqual(md1, md2)
def test_column_mismatch(self):
md1 = qiime2.Metadata(pd.DataFrame({'a': '1', 'b': '2'}, index=['0']))
md2 = qiime2.Metadata(pd.DataFrame({'a': '1', 'c': '2'}, index=['0']))
self.assertReallyNotEqual(md1, md2)
def test_data_mismatch(self):
md1 = qiime2.Metadata(pd.DataFrame({'a': '1', 'b': '3'}, index=['0']))
md2 = qiime2.Metadata(pd.DataFrame({'a': '1', 'b': '2'}, index=['0']))
self.assertReallyNotEqual(md1, md2)
def test_equality_without_artifact(self):
md1 = qiime2.Metadata(pd.DataFrame({'a': '1', 'b': '3'}, index=['0']))
md2 = qiime2.Metadata(pd.DataFrame({'a': '1', 'b': '3'}, index=['0']))
self.assertReallyEqual(md1, md2)
def test_equality_with_artifact(self):
artifact = qiime2.Artifact.import_data('Mapping', {'a': '1', 'b': '2'})
md1 = qiime2.Metadata.from_artifact(artifact)
md2 = qiime2.Metadata.from_artifact(artifact)
self.assertReallyEqual(md1, md2)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
freedomtan/workload-automation | wlauto/instrumentation/fps/__init__.py | 2 | 15269 | # Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pylint: disable=W0613,E1101
from __future__ import division
import os
import sys
import time
import csv
import shutil
import threading
import errno
import tempfile
from distutils.version import LooseVersion
from wlauto import Instrument, Parameter, IterationResult
from wlauto.instrumentation import instrument_is_installed
from wlauto.exceptions import (InstrumentError, WorkerThreadError, ConfigError,
DeviceNotRespondingError, TimeoutError)
from wlauto.utils.types import boolean, numeric
try:
import pandas as pd
except ImportError:
pd = None
VSYNC_INTERVAL = 16666667
EPSYLON = 0.0001
class FpsInstrument(Instrument):
name = 'fps'
description = """
Measures Frames Per Second (FPS) and associated metrics for a workload's main View.
.. note:: This instrument depends on pandas Python library (which is not part of standard
WA dependencies), so you will need to install that first, before you can use it.
The view is specified by the workload as ``view`` attribute. This defaults
to ``'SurfaceView'`` for game workloads, and ``None`` for non-game
workloads (as for them FPS mesurement usually doesn't make sense).
Individual workloads may override this.
This instrument adds four metrics to the results:
:FPS: Frames Per Second. This is the frame rate of the workload.
:frames: The total number of frames rendered during the execution of
the workload.
:janks: The number of "janks" that occured during execution of the
workload. Janks are sudden shifts in frame rate. They result
in a "stuttery" UI. See http://jankfree.org/jank-busters-io
:not_at_vsync: The number of frames that did not render in a single
vsync cycle.
"""
supported_platforms = ['android']
parameters = [
Parameter('drop_threshold', kind=numeric, default=5,
description='Data points below this FPS will be dropped as they '
'do not constitute "real" gameplay. The assumption '
'being that while actually running, the FPS in the '
'game will not drop below X frames per second, '
'except on loading screens, menus, etc, which '
'should not contribute to FPS calculation. '),
Parameter('keep_raw', kind=boolean, default=False,
description='If set to ``True``, this will keep the raw dumpsys output '
'in the results directory (this is maily used for debugging) '
'Note: frames.csv with collected frames data will always be '
'generated regardless of this setting.'),
Parameter('generate_csv', kind=boolean, default=True,
description='If set to ``True``, this will produce temporal fps data '
'in the results directory, in a file named fps.csv '
'Note: fps data will appear as discrete step-like values '
'in order to produce a more meainingfull representation,'
'a rolling mean can be applied.'),
Parameter('crash_check', kind=boolean, default=True,
description="""
Specifies wither the instrument should check for crashed content by examining
frame data. If this is set, ``execution_time`` instrument must also be installed.
The check is performed by using the measured FPS and exection time to estimate the expected
frames cound and comparing that against the measured frames count. The the ratio of
measured/expected is too low, then it is assumed that the content has crashed part way
during the run. What is "too low" is determined by ``crash_threshold``.
.. note:: This is not 100\% fool-proof. If the crash occurs sufficiently close to
workload's termination, it may not be detected. If this is expected, the
threshold may be adjusted up to compensate.
"""),
Parameter('crash_threshold', kind=float, default=0.7,
description="""
Specifies the threshold used to decided whether a measured/expected frames ration indicates
a content crash. E.g. a value of ``0.75`` means the number of actual frames counted is a
quarter lower than expected, it will treated as a content crash.
"""),
]
clear_command = 'dumpsys SurfaceFlinger --latency-clear '
def __init__(self, device, **kwargs):
super(FpsInstrument, self).__init__(device, **kwargs)
self.collector = None
self.outfile = None
self.fps_outfile = None
self.is_enabled = True
def validate(self):
if not pd or LooseVersion(pd.__version__) < LooseVersion('0.13.1'):
message = ('fps instrument requires pandas Python package (version 0.13.1 or higher) to be installed.\n'
'You can install it with pip, e.g. "sudo pip install pandas"')
raise InstrumentError(message)
if self.crash_check and not instrument_is_installed('execution_time'):
raise ConfigError('execution_time instrument must be installed in order to check for content crash.')
def setup(self, context):
workload = context.workload
if hasattr(workload, 'view'):
self.fps_outfile = os.path.join(context.output_directory, 'fps.csv')
self.outfile = os.path.join(context.output_directory, 'frames.csv')
self.collector = LatencyCollector(self.outfile, self.device, workload.view or '', self.keep_raw, self.logger)
self.device.execute(self.clear_command)
else:
self.logger.debug('Workload does not contain a view; disabling...')
self.is_enabled = False
def start(self, context):
if self.is_enabled:
self.logger.debug('Starting SurfaceFlinger collection...')
self.collector.start()
def stop(self, context):
if self.is_enabled and self.collector.is_alive():
self.logger.debug('Stopping SurfaceFlinger collection...')
self.collector.stop()
def update_result(self, context):
if self.is_enabled:
data = pd.read_csv(self.outfile)
if not data.empty: # pylint: disable=maybe-no-member
per_frame_fps = self._update_stats(context, data)
if self.generate_csv:
per_frame_fps.to_csv(self.fps_outfile, index=False, header=True)
context.add_artifact('fps', path='fps.csv', kind='data')
else:
context.result.add_metric('FPS', float('nan'))
context.result.add_metric('frame_count', 0)
context.result.add_metric('janks', 0)
context.result.add_metric('not_at_vsync', 0)
def slow_update_result(self, context):
result = context.result
if result.has_metric('execution_time'):
self.logger.debug('Checking for crashed content.')
exec_time = result['execution_time'].value
fps = result['FPS'].value
frames = result['frame_count'].value
if all([exec_time, fps, frames]):
expected_frames = fps * exec_time
ratio = frames / expected_frames
self.logger.debug('actual/expected frames: {:.2}'.format(ratio))
if ratio < self.crash_threshold:
self.logger.error('Content for {} appears to have crashed.'.format(context.spec.label))
result.status = IterationResult.FAILED
result.add_event('Content crash detected (actual/expected frames: {:.2}).'.format(ratio))
def _update_stats(self, context, data):
vsync_interval = self.collector.refresh_period
actual_present_time_deltas = (data.actual_present_time - data.actual_present_time.shift()).drop(0) # pylint: disable=E1103
vsyncs_to_compose = (actual_present_time_deltas / vsync_interval).apply(lambda x: int(round(x, 0)))
# drop values lower than drop_threshold FPS as real in-game frame
# rate is unlikely to drop below that (except on loading screens
# etc, which should not be factored in frame rate calculation).
per_frame_fps = (1.0 / (vsyncs_to_compose * (vsync_interval / 1e9)))
keep_filter = per_frame_fps > self.drop_threshold
filtered_vsyncs_to_compose = vsyncs_to_compose[keep_filter]
if not filtered_vsyncs_to_compose.empty:
total_vsyncs = filtered_vsyncs_to_compose.sum()
if total_vsyncs:
frame_count = filtered_vsyncs_to_compose.size
fps = 1e9 * frame_count / (vsync_interval * total_vsyncs)
context.result.add_metric('FPS', fps)
context.result.add_metric('frame_count', frame_count)
else:
context.result.add_metric('FPS', float('nan'))
context.result.add_metric('frame_count', 0)
vtc_deltas = filtered_vsyncs_to_compose - filtered_vsyncs_to_compose.shift()
vtc_deltas.index = range(0, vtc_deltas.size)
vtc_deltas = vtc_deltas.drop(0).abs()
janks = vtc_deltas.apply(lambda x: (x > EPSYLON) and 1 or 0).sum()
not_at_vsync = vsyncs_to_compose.apply(lambda x: (abs(x - 1.0) > EPSYLON) and 1 or 0).sum()
context.result.add_metric('janks', janks)
context.result.add_metric('not_at_vsync', not_at_vsync)
else: # no filtered_vsyncs_to_compose
context.result.add_metric('FPS', float('nan'))
context.result.add_metric('frame_count', 0)
context.result.add_metric('janks', 0)
context.result.add_metric('not_at_vsync', 0)
per_frame_fps.name = 'fps'
return per_frame_fps
class LatencyCollector(threading.Thread):
# Note: the size of the frames buffer for a particular surface is defined
# by NUM_FRAME_RECORDS inside android/services/surfaceflinger/FrameTracker.h.
# At the time of writing, this was hard-coded to 128. So at 60 fps
# (and there is no reason to go above that, as it matches vsync rate
# on pretty much all phones), there is just over 2 seconds' worth of
# frames in there. Hence the sleep time of 2 seconds between dumps.
#command_template = 'while (true); do dumpsys SurfaceFlinger --latency {}; sleep 2; done'
command_template = 'dumpsys SurfaceFlinger --latency {}'
def __init__(self, outfile, device, activity, keep_raw, logger):
super(LatencyCollector, self).__init__()
self.outfile = outfile
self.device = device
self.command = self.command_template.format(activity)
self.keep_raw = keep_raw
self.logger = logger
self.stop_signal = threading.Event()
self.frames = []
self.last_ready_time = 0
self.refresh_period = VSYNC_INTERVAL
self.drop_threshold = self.refresh_period * 1000
self.exc = None
self.unresponsive_count = 0
def run(self):
try:
self.logger.debug('SurfaceFlinger collection started.')
self.stop_signal.clear()
fd, temp_file = tempfile.mkstemp()
self.logger.debug('temp file: {}'.format(temp_file))
wfh = os.fdopen(fd, 'wb')
try:
while not self.stop_signal.is_set():
wfh.write(self.device.execute(self.command))
time.sleep(2)
finally:
wfh.close()
# TODO: this can happen after the run during results processing
with open(temp_file) as fh:
text = fh.read().replace('\r\n', '\n').replace('\r', '\n')
for line in text.split('\n'):
line = line.strip()
if line:
self._process_trace_line(line)
if self.keep_raw:
raw_file = os.path.join(os.path.dirname(self.outfile), 'surfaceflinger.raw')
shutil.copy(temp_file, raw_file)
os.unlink(temp_file)
except (DeviceNotRespondingError, TimeoutError): # pylint: disable=W0703
raise
except Exception, e: # pylint: disable=W0703
self.logger.warning('Exception on collector thread: {}({})'.format(e.__class__.__name__, e))
self.exc = WorkerThreadError(self.name, sys.exc_info())
self.logger.debug('SurfaceFlinger collection stopped.')
with open(self.outfile, 'w') as wfh:
writer = csv.writer(wfh)
writer.writerow(['desired_present_time', 'actual_present_time', 'frame_ready_time'])
writer.writerows(self.frames)
self.logger.debug('Frames data written.')
def stop(self):
self.stop_signal.set()
self.join()
if self.unresponsive_count:
message = 'SurfaceFlinger was unrepsonsive {} times.'.format(self.unresponsive_count)
if self.unresponsive_count > 10:
self.logger.warning(message)
else:
self.logger.debug(message)
if self.exc:
raise self.exc # pylint: disable=E0702
self.logger.debug('FSP collection complete.')
def _process_trace_line(self, line):
parts = line.split()
if len(parts) == 3:
desired_present_time, actual_present_time, frame_ready_time = map(int, parts)
if frame_ready_time <= self.last_ready_time:
return # duplicate frame
if (frame_ready_time - desired_present_time) > self.drop_threshold:
self.logger.debug('Dropping bogus frame {}.'.format(line))
return # bogus data
self.last_ready_time = frame_ready_time
self.frames.append((desired_present_time, actual_present_time, frame_ready_time))
elif len(parts) == 1:
self.refresh_period = int(parts[0])
self.drop_threshold = self.refresh_period * 10
elif 'SurfaceFlinger appears to be unresponsive, dumping anyways' in line:
self.unresponsive_count += 1
else:
self.logger.warning('Unexpected SurfaceFlinger dump output: {}'.format(line))
| apache-2.0 |
BhallaLab/moose-full | moose-examples/snippets/MULTI/multi1_ee.py | 2 | 14165 | # multi1.py ---
# Upi Bhalla, NCBS Bangalore 2014.
#
# Commentary:
#
# This loads in a medium-detail model incorporating
# reac-diff and elec signaling in neurons. The reac-diff model
# has just Ca and CaM in it, and there are no-cross-compartment
# reactions though Ca diffuses everywhere. The elec model controls the
# Ca levels in the chem compartments.
# This version uses solvers for both chem and electrical parts.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street, Fifth
# Floor, Boston, MA 02110-1301, USA.
#
# Code:
import sys
sys.path.append('../../python')
import os
os.environ['NUMPTHREADS'] = '1'
import math
import numpy
import matplotlib.pyplot as plt
import moose
import proto18
EREST_ACT = -70e-3
def loadElec():
library = moose.Neutral( '/library' )
moose.setCwe( '/library' )
proto18.make_Ca()
proto18.make_Ca_conc()
proto18.make_K_AHP()
proto18.make_K_C()
proto18.make_Na()
proto18.make_K_DR()
proto18.make_K_A()
proto18.make_glu()
proto18.make_NMDA()
proto18.make_Ca_NMDA()
proto18.make_NMDA_Ca_conc()
proto18.make_axon()
moose.setCwe( '/library' )
model = moose.Neutral( '/model' )
cellId = moose.loadModel( 'ca1_asym.p', '/model/elec', "Neutral" )
return cellId
def loadChem( diffLength ):
chem = moose.Neutral( '/model/chem' )
neuroCompt = moose.NeuroMesh( '/model/chem/kinetics' )
neuroCompt.separateSpines = 1
neuroCompt.geometryPolicy = 'cylinder'
spineCompt = moose.SpineMesh( '/model/chem/compartment_1' )
moose.connect( neuroCompt, 'spineListOut', spineCompt, 'spineList', 'OneToOne' )
psdCompt = moose.PsdMesh( '/model/chem/compartment_2' )
#print 'Meshvolume[neuro, spine, psd] = ', neuroCompt.mesh[0].volume, spineCompt.mesh[0].volume, psdCompt.mesh[0].volume
moose.connect( neuroCompt, 'psdListOut', psdCompt, 'psdList', 'OneToOne' )
modelId = moose.loadModel( 'minimal.g', '/model/chem', 'ee' )
#modelId = moose.loadModel( 'psd_merged31d.g', '/model/chem', 'ee' )
neuroCompt.name = 'dend'
spineCompt.name = 'spine'
psdCompt.name = 'psd'
def makeNeuroMeshModel():
diffLength = 10e-6 # Aim for 2 soma compartments.
elec = loadElec()
loadChem( diffLength )
neuroCompt = moose.element( '/model/chem/dend' )
neuroCompt.diffLength = diffLength
neuroCompt.cellPortion( elec, '/model/elec/#' )
for x in moose.wildcardFind( '/model/chem/##[ISA=PoolBase]' ):
if (x.diffConst > 0):
x.diffConst = 1e-11
for x in moose.wildcardFind( '/model/chem/##/Ca' ):
x.diffConst = 1e-10
# Put in dend solvers
ns = neuroCompt.numSegments
ndc = neuroCompt.numDiffCompts
print 'ns = ', ns, ', ndc = ', ndc
assert( neuroCompt.numDiffCompts == neuroCompt.mesh.num )
assert( ns == 36 ) #
assert( ndc == 278 ) #
nmksolve = moose.Ksolve( '/model/chem/dend/ksolve' )
nmdsolve = moose.Dsolve( '/model/chem/dend/dsolve' )
nmstoich = moose.Stoich( '/model/chem/dend/stoich' )
nmstoich.compartment = neuroCompt
nmstoich.ksolve = nmksolve
nmstoich.dsolve = nmdsolve
nmstoich.path = "/model/chem/dend/##"
print 'done setting path, numPools = ', nmdsolve.numPools
assert( nmdsolve.numPools == 1 )
assert( nmdsolve.numAllVoxels == ndc )
assert( nmstoich.numAllPools == 1 )
# oddly, numLocalFields does not work.
ca = moose.element( '/model/chem/dend/DEND/Ca' )
assert( ca.numData == ndc )
# Put in spine solvers. Note that these get info from the neuroCompt
spineCompt = moose.element( '/model/chem/spine' )
sdc = spineCompt.mesh.num
print 'sdc = ', sdc
assert( sdc == 13 )
smksolve = moose.Ksolve( '/model/chem/spine/ksolve' )
smdsolve = moose.Dsolve( '/model/chem/spine/dsolve' )
smstoich = moose.Stoich( '/model/chem/spine/stoich' )
smstoich.compartment = spineCompt
smstoich.ksolve = smksolve
smstoich.dsolve = smdsolve
smstoich.path = "/model/chem/spine/##"
print 'spine num Pools = ', smstoich.numAllPools
assert( smstoich.numAllPools == 3 )
assert( smdsolve.numPools == 3 )
assert( smdsolve.numAllVoxels == sdc )
# Put in PSD solvers. Note that these get info from the neuroCompt
psdCompt = moose.element( '/model/chem/psd' )
pdc = psdCompt.mesh.num
assert( pdc == 13 )
pmksolve = moose.Ksolve( '/model/chem/psd/ksolve' )
pmdsolve = moose.Dsolve( '/model/chem/psd/dsolve' )
pmstoich = moose.Stoich( '/model/chem/psd/stoich' )
pmstoich.compartment = psdCompt
pmstoich.ksolve = pmksolve
pmstoich.dsolve = pmdsolve
pmstoich.path = "/model/chem/psd/##"
assert( pmstoich.numAllPools == 3 )
assert( pmdsolve.numPools == 3 )
assert( pmdsolve.numAllVoxels == pdc )
foo = moose.element( '/model/chem/psd/Ca' )
print 'PSD: numfoo = ', foo.numData
print 'PSD: numAllVoxels = ', pmksolve.numAllVoxels
# Put in junctions between the diffusion solvers
nmdsolve.buildNeuroMeshJunctions( smdsolve, pmdsolve )
"""
CaNpsd = moose.vec( '/model/chem/psdMesh/PSD/PP1_PSD/CaN' )
print 'numCaN in PSD = ', CaNpsd.nInit, ', vol = ', CaNpsd.volume
CaNspine = moose.vec( '/model/chem/spine/SPINE/CaN_BULK/CaN' )
print 'numCaN in spine = ', CaNspine.nInit, ', vol = ', CaNspine.volume
"""
##################################################################
# set up adaptors
aCa = moose.Adaptor( '/model/chem/spine/adaptCa', sdc )
adaptCa = moose.vec( '/model/chem/spine/adaptCa' )
chemCa = moose.vec( '/model/chem/spine/Ca' )
#print 'aCa = ', aCa, ' foo = ', foo, "len( ChemCa ) = ", len( chemCa ), ", numData = ", chemCa.numData, "len( adaptCa ) = ", len( adaptCa )
assert( len( adaptCa ) == sdc )
assert( len( chemCa ) == sdc )
for i in range( sdc ):
elecCa = moose.element( '/model/elec/spine_head_14_' + str(i+1) + '/NMDA_Ca_conc' )
#print elecCa
moose.connect( elecCa, 'concOut', adaptCa[i], 'input', 'Single' )
moose.connect( adaptCa, 'output', chemCa, 'setConc', 'OneToOne' )
adaptCa.inputOffset = 0.0 #
adaptCa.outputOffset = 0.00008 # 80 nM offset in chem.
adaptCa.scale = 1e-4 # 520 to 0.0052 mM
#print adaptCa.outputOffset
moose.le( '/model/chem/dend/DEND' )
compts = neuroCompt.elecComptList
begin = neuroCompt.startVoxelInCompt
end = neuroCompt.endVoxelInCompt
aCa = moose.Adaptor( '/model/chem/dend/DEND/adaptCa', len( compts))
adaptCa = moose.vec( '/model/chem/dend/DEND/adaptCa' )
chemCa = moose.vec( '/model/chem/dend/DEND/Ca' )
#print 'aCa = ', aCa, ' foo = ', foo, "len( ChemCa ) = ", len( chemCa ), ", numData = ", chemCa.numData, "len( adaptCa ) = ", len( adaptCa )
assert( len( chemCa ) == ndc )
for i in zip( compts, adaptCa, begin, end ):
name = i[0].path + '/Ca_conc'
if ( moose.exists( name ) ):
elecCa = moose.element( name )
#print i[2], i[3], ' ', elecCa
#print i[1]
moose.connect( elecCa, 'concOut', i[1], 'input', 'Single' )
for j in range( i[2], i[3] ):
moose.connect( i[1], 'output', chemCa[j], 'setConc', 'Single' )
adaptCa.inputOffset = 0.0 #
adaptCa.outputOffset = 0.00008 # 80 nM offset in chem.
adaptCa.scale = 20e-6 # 10 arb units to 2 uM.
def addPlot( objpath, field, plot ):
#assert moose.exists( objpath )
if moose.exists( objpath ):
tab = moose.Table( '/graphs/' + plot )
obj = moose.element( objpath )
if obj.className == 'Neutral':
print "addPlot failed: object is a Neutral: ", objpath
return moose.element( '/' )
else:
#print "object was found: ", objpath, obj.className
moose.connect( tab, 'requestOut', obj, field )
return tab
else:
print "addPlot failed: object not found: ", objpath
return moose.element( '/' )
def makeCaPlots():
graphs = moose.Neutral( '/graphs' )
ca = moose.Neutral( '/graphs/ca' )
addPlot( '/model/elec/soma/Ca_conc', 'getCa', 'ca/somaCa' )
addPlot( '/model/elec/lat_11_2/Ca_conc', 'getCa', 'ca/lat11Ca' )
addPlot( '/model/elec/spine_head_14_4/NMDA_Ca_conc', 'getCa', 'ca/spine4Ca' )
addPlot( '/model/elec/spine_head_14_12/NMDA_Ca_conc', 'getCa', 'ca/spine12Ca' )
def makeElecPlots():
graphs = moose.Neutral( '/graphs' )
elec = moose.Neutral( '/graphs/elec' )
addPlot( '/model/elec/soma', 'getVm', 'elec/somaVm' )
addPlot( '/model/elec/spine_head_14_4', 'getVm', 'elec/spineVm' )
def makeChemPlots():
graphs = moose.Neutral( '/graphs' )
chem = moose.Neutral( '/graphs/chem' )
addPlot( '/model/chem/psd/Ca_CaM', 'getConc', 'chem/psdCaCam' )
addPlot( '/model/chem/psd/Ca', 'getConc', 'chem/psdCa' )
addPlot( '/model/chem/spine/Ca_CaM', 'getConc', 'chem/spineCaCam' )
addPlot( '/model/chem/spine/Ca[3]', 'getConc', 'chem/spine4Ca' )
addPlot( '/model/chem/spine/Ca[11]', 'getConc', 'chem/spine12Ca' )
addPlot( '/model/chem/dend/DEND/Ca', 'getConc', 'chem/dendCa' )
addPlot( '/model/chem/dend/DEND/Ca[20]', 'getConc', 'chem/dendCa20' )
def makeGraphics( cPlotDt, ePlotDt ):
plt.ion()
fig = plt.figure( figsize=(10,16) )
chem = fig.add_subplot( 411 )
chem.set_ylim( 0, 0.006 )
plt.ylabel( 'Conc (mM)' )
plt.xlabel( 'time (seconds)' )
for x in moose.wildcardFind( '/graphs/chem/#[ISA=Table]' ):
pos = numpy.arange( 0, x.vector.size, 1 ) * cPlotDt
line1, = chem.plot( pos, x.vector, label=x.name )
plt.legend()
elec = fig.add_subplot( 412 )
plt.ylabel( 'Vm (V)' )
plt.xlabel( 'time (seconds)' )
for x in moose.wildcardFind( '/graphs/elec/#[ISA=Table]' ):
pos = numpy.arange( 0, x.vector.size, 1 ) * ePlotDt
line1, = elec.plot( pos, x.vector, label=x.name )
plt.legend()
ca = fig.add_subplot( 413 )
plt.ylabel( '[Ca] (mM)' )
plt.xlabel( 'time (seconds)' )
for x in moose.wildcardFind( '/graphs/ca/#[ISA=Table]' ):
pos = numpy.arange( 0, x.vector.size, 1 ) * ePlotDt
line1, = ca.plot( pos, x.vector, label=x.name )
plt.legend()
lenplot = fig.add_subplot( 414 )
plt.ylabel( 'Ca (mM )' )
plt.xlabel( 'Voxel#)' )
spineCa = moose.vec( '/model/chem/spine/Ca' )
dendCa = moose.vec( '/model/chem/dend/DEND/Ca' )
line1, = lenplot.plot( range( len( spineCa ) ), spineCa.conc, label='spine' )
line2, = lenplot.plot( range( len( dendCa ) ), dendCa.conc, label='dend' )
ca = [ x.Ca * 0.0001 for x in moose.wildcardFind( '/model/elec/##[ISA=CaConcBase]') ]
line3, = lenplot.plot( range( len( ca ) ), ca, label='elec' )
spineCaM = moose.vec( '/model/chem/spine/Ca_CaM' )
line4, = lenplot.plot( range( len( spineCaM ) ), spineCaM.conc, label='spineCaM' )
psdCaM = moose.vec( '/model/chem/psd/Ca_CaM' )
line5, = lenplot.plot( range( len( psdCaM ) ), psdCaM.conc, label='psdCaM' )
plt.legend()
fig.canvas.draw()
raw_input()
'''
for x in moose.wildcardFind( '/graphs/##[ISA=Table]' ):
t = numpy.arange( 0, x.vector.size, 1 )
pylab.plot( t, x.vector, label=x.name )
pylab.legend()
pylab.show()
'''
print 'All done'
def testNeuroMeshMultiscale():
runtime = 0.5
elecDt = 0.2e-6
chemDt = 0.005
ePlotDt = 0.5e-3
cPlotDt = 0.005
plotName = 'nm.plot'
makeNeuroMeshModel()
print "after model is completely done"
for i in moose.wildcardFind( '/model/chem/#/#/#/transloc#' ):
print i[0].name, i[0].Kf, i[0].Kb, i[0].kf, i[0].kb
makeChemPlots()
makeElecPlots()
makeCaPlots()
moose.setClock( 0, elecDt )
moose.setClock( 1, elecDt )
moose.setClock( 2, elecDt )
moose.setClock( 4, chemDt )
moose.setClock( 5, chemDt )
moose.setClock( 6, chemDt )
moose.setClock( 7, cPlotDt )
moose.setClock( 8, ePlotDt )
moose.useClock( 0, '/model/elec/##[ISA=Compartment]', 'init' )
moose.useClock( 1, '/model/elec/##[ISA=Compartment]', 'process' )
moose.useClock( 1, '/model/elec/##[ISA=SpikeGen]', 'process' )
moose.useClock( 2, '/model/elec/##[ISA=ChanBase],/model/##[ISA=SynBase],/model/##[ISA=CaConc]','process')
#moose.useClock( 2, '/model/##[ISA=SynBase],/model/##[ISA=CaConc]','process')
#moose.useClock( 5, '/model/chem/##[ISA=PoolBase],/model/##[ISA=ReacBase],/model/##[ISA=EnzBase]', 'process' )
#moose.useClock( 4, '/model/chem/##[ISA=Adaptor]', 'process' )
moose.useClock( 4, '/model/chem/#/dsolve', 'process' )
moose.useClock( 5, '/model/chem/#/ksolve', 'process' )
moose.useClock( 6, '/model/chem/spine/adaptCa', 'process' )
moose.useClock( 6, '/model/chem/dend/DEND/adaptCa', 'process' )
moose.useClock( 7, '/graphs/chem/#', 'process' )
moose.useClock( 8, '/graphs/elec/#,/graphs/ca/#', 'process' )
'''
hsolve = moose.HSolve( '/model/elec/hsolve' )
moose.useClock( 1, '/model/elec/hsolve', 'process' )
hsolve.dt = elecDt
hsolve.target = '/model/elec/compt'
'''
moose.reinit()
moose.element( '/model/elec/soma' ).inject = 2e-10
moose.element( '/model/chem/psd/Ca' ).concInit = 0.001
moose.element( '/model/chem/spine/Ca' ).concInit = 0.002
moose.element( '/model/chem/dend/DEND/Ca' ).concInit = 0.003
moose.reinit()
moose.start( runtime )
# moose.element( '/model/elec/soma' ).inject = 0
# moose.start( 0.25 )
makeGraphics( cPlotDt, ePlotDt )
def main():
testNeuroMeshMultiscale()
if __name__ == '__main__':
main()
#
# minimal.py ends here.
| gpl-2.0 |
phac-nml/bio_hansel | bio_hansel/qc/__init__.py | 1 | 1890 | # -*- coding: utf-8 -*-
from typing import List, Callable, Tuple
from pandas import DataFrame
from ..subtype import Subtype
from ..subtyping_params import SubtypingParams
from ..qc.const import QC
from ..qc.checks import \
is_missing_kmers, \
is_mixed_subtype, \
is_maybe_intermediate_subtype, \
is_missing_too_many_target_sites, \
is_missing_downstream_targets, \
is_overall_coverage_low
CHECKS = [is_missing_kmers,
is_mixed_subtype,
is_missing_too_many_target_sites,
is_missing_downstream_targets,
is_maybe_intermediate_subtype,
is_overall_coverage_low
] # type: List[Callable[[Subtype, DataFrame, SubtypingParams], Tuple[str, str]]]
def perform_quality_check(st: Subtype, df: DataFrame, subtyping_params: SubtypingParams) -> Tuple[str, str]:
"""Perform QC of subtyping results
Return immediate fail if subtype result is missing or if there are no detailed subtyping results.
Args:
st: Subtyping results.
df: DataFrame containing subtyping results.
p: Subtyping/QC parameters
Returns:
(QC status, QC messages)
"""
if st.subtype is None or len(st.subtype) == 0 \
or df is None or df.shape[0] == 0:
return QC.FAIL, QC.NO_SUBTYPE_RESULT
overall_qc_status = QC.PASS
messages = []
for func in CHECKS:
status, message = func(st, df, subtyping_params)
# If quality check function passes, move on to the next.
if status is None:
continue
messages.append('{}: {}'.format(status, message))
if overall_qc_status == QC.FAIL:
continue
if status == QC.FAIL:
overall_qc_status = QC.FAIL
continue
if status == QC.WARNING:
overall_qc_status = QC.WARNING
return overall_qc_status, ' | '.join(messages)
| apache-2.0 |
jsouza/pamtl | src/stl/pa.py | 1 | 1904 | from sklearn.base import BaseEstimator
from sklearn.metrics.pairwise import linear_kernel
from sklearn.utils import extmath
__author__ = 'desouza'
def linear_kernel_local(X, Y, gamma=None):
return linear_kernel(X, Y)
class PAEstimator(BaseEstimator):
def __init__(self, loss="pai", C=0.01, n_iter=1, fit_intercept=False):
self.loss = loss
self.C = C
self.n_iter = n_iter
self.fit_intercept = fit_intercept
self.coef_ = None
def _pa(self, loss_t, x_t):
denom = extmath.norm(x_t) ** 2.0
# special case when L_2 norm of x_t is zero (followed libol
# implementation)
if denom == 0:
return 1
d = loss_t / denom
return d
def _pai(self, loss_t, x_t):
pa = self._pa(loss_t, x_t)
return min(self.C, pa)
def _paii(self, loss_t, x_t):
# return loss_t / ((extmath.norm(x_t) ** 2.0) + (1.0 / (2.0 * self.C)))
return loss_t / ((extmath.norm(x_t) ** 2.0) + (0.5 / self.C))
class KernelPAEstimator(BaseEstimator):
def __init__(self, kernel="linear", gamma=0.01, loss="pai", C=0.01,
n_iter=1, fit_intercept=False):
self.kernel = kernel
self.gamma = gamma
self.loss = loss
self.C = C
self.n_iter = n_iter
self.fit_intercept = fit_intercept
self.support_ = []
self.alphas_ = []
def _pa(self, loss_t, kern_t):
denom = kern_t
# special case when L_2 norm of x_t is zero (followed libol
# implementation)
if denom == 0:
return 1
d = loss_t / denom
return d
def _pai(self, loss_t, x_t):
pa = self._pa(loss_t, x_t)
return min(self.C, pa)
def _paii(self, loss_t, kern_t):
# return loss_t / (kern_t + (1.0 / 2.0 * self.C))
return loss_t / (kern_t + (0.5 * self.C))
| mit |
pradyu1993/scikit-learn | sklearn/tests/test_preprocessing.py | 1 | 19085 | import numpy as np
import numpy.linalg as la
import scipy.sparse as sp
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.sparsefuncs import mean_variance_axis0
from sklearn.preprocessing import Binarizer
from sklearn.preprocessing import KernelCenterer
from sklearn.preprocessing import LabelBinarizer
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import Normalizer
from sklearn.preprocessing import normalize
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import scale
from sklearn.preprocessing import MinMaxScaler
from sklearn import datasets
from sklearn.linear_model.stochastic_gradient import SGDClassifier
iris = datasets.load_iris()
def toarray(a):
if hasattr(a, "toarray"):
a = a.toarray()
return a
def test_scaler_1d():
"""Test scaling of dataset along single axis"""
rng = np.random.RandomState(0)
X = rng.randn(5)
X_orig_copy = X.copy()
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X_orig_copy)
# Test with 1D list
X = [0., 1., 2, 0.4, 1.]
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
X_scaled = scale(X)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
def test_scaler_2d_arrays():
"""Test scaling of 2d array along first axis"""
rng = np.random.RandomState(0)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is not X)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_scaled = scale(X, axis=1, with_std=False)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=1), 4 * [0.0])
X_scaled = scale(X, axis=1, with_std=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=1), 4 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=1), 4 * [1.0])
# Check that the data hasn't been modified
assert_true(X_scaled is not X)
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is X)
X = rng.randn(4, 5)
X[:, 0] = 1.0 # first feature is a constant, non zero feature
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is not X)
def test_min_max_scaler():
X = iris.data
scaler = MinMaxScaler()
# default params
X_trans = scaler.fit_transform(X)
assert_array_equal(X_trans.min(axis=0), 0)
assert_array_equal(X_trans.min(axis=0), 0)
assert_array_equal(X_trans.max(axis=0), 1)
# not default params
scaler = MinMaxScaler(feature_range=(1, 2))
X_trans = scaler.fit_transform(X)
assert_array_equal(X_trans.min(axis=0), 1)
assert_array_equal(X_trans.max(axis=0), 2)
def test_scaler_without_centering():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sp.csr_matrix(X)
scaler = StandardScaler(with_mean=False).fit(X)
X_scaled = scaler.transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
scaler_csr = StandardScaler(with_mean=False).fit(X_csr)
X_csr_scaled = scaler_csr.transform(X_csr, copy=True)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
assert_equal(scaler.mean_, scaler_csr.mean_)
assert_array_almost_equal(scaler.std_, scaler_csr.std_)
assert_array_almost_equal(
X_scaled.mean(axis=0), [0., -0.01, 2.24, -0.35, -0.78], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis0(X_csr_scaled)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# Check that X has not been modified (copy)
assert_true(X_scaled is not X)
assert_true(X_csr_scaled is not X_csr)
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled)
assert_true(X_csr_scaled_back is not X_csr)
assert_true(X_csr_scaled_back is not X_csr_scaled)
assert_array_almost_equal(X_scaled_back, X)
def test_scaler_without_copy():
"""Check that StandardScaler.fit does not change input"""
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sp.csr_matrix(X)
X_copy = X.copy()
StandardScaler(copy=False).fit(X)
assert_array_equal(X, X_copy)
X_csr_copy = X_csr.copy()
StandardScaler(with_mean=False, copy=False).fit(X_csr)
assert_array_equal(X_csr.toarray(), X_csr_copy.toarray())
def test_scale_sparse_with_mean_raise_exception():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X_csr = sp.csr_matrix(X)
# check scaling and fit with direct calls on sparse data
assert_raises(ValueError, scale, X_csr, with_mean=True)
assert_raises(ValueError, StandardScaler(with_mean=True).fit, X_csr)
# check transform and inverse_transform after a fit on a dense array
scaler = StandardScaler(with_mean=True).fit(X)
assert_raises(ValueError, scaler.transform, X_csr)
X_transformed_csr = sp.csr_matrix(scaler.transform(X))
assert_raises(ValueError, scaler.inverse_transform, X_transformed_csr)
def test_scale_function_without_centering():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sp.csr_matrix(X)
X_scaled = scale(X, with_mean=False)
assert_false(np.any(np.isnan(X_scaled)))
X_csr_scaled = scale(X_csr, with_mean=False)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
assert_array_almost_equal(
X_scaled.mean(axis=0), [0., -0.01, 2.24, -0.35, -0.78], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is not X)
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis0(X_csr_scaled)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
def test_normalizer_l1():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sp.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sp.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='l1', copy=True)
X_norm = normalizer.transform(X)
assert_true(X_norm is not X)
X_norm1 = toarray(X_norm)
normalizer = Normalizer(norm='l1', copy=False)
X_norm = normalizer.transform(X)
assert_true(X_norm is X)
X_norm2 = toarray(X_norm)
for X_norm in (X_norm1, X_norm2):
row_sums = np.abs(X_norm).sum(axis=1)
for i in range(3):
assert_almost_equal(row_sums[i], 1.0)
assert_almost_equal(row_sums[3], 0.0)
# check input for which copy=False won't prevent a copy
for init in (sp.coo_matrix, sp.csc_matrix, sp.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sp.csr_matrix))
X_norm = toarray(X_norm)
for i in xrange(3):
assert_almost_equal(row_sums[i], 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalizer_l2():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sp.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sp.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='l2', copy=True)
X_norm1 = normalizer.transform(X)
assert_true(X_norm1 is not X)
X_norm1 = toarray(X_norm1)
normalizer = Normalizer(norm='l2', copy=False)
X_norm2 = normalizer.transform(X)
assert_true(X_norm2 is X)
X_norm2 = toarray(X_norm2)
for X_norm in (X_norm1, X_norm2):
for i in xrange(3):
assert_almost_equal(la.norm(X_norm[i]), 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
# check input for which copy=False won't prevent a copy
for init in (sp.coo_matrix, sp.csc_matrix, sp.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sp.csr_matrix))
X_norm = toarray(X_norm)
for i in xrange(3):
assert_almost_equal(la.norm(X_norm[i]), 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalize_errors():
"""Check that invalid arguments yield ValueError"""
assert_raises(ValueError, normalize, [[0]], axis=2)
assert_raises(ValueError, normalize, [[0]], norm='l3')
def test_binarizer():
X_ = np.array([[1, 0, 5], [2, 3, 0]])
for init in (np.array, sp.csr_matrix):
X = init(X_.copy())
binarizer = Binarizer(threshold=2.0, copy=True)
X_bin = toarray(binarizer.transform(X))
assert_equal(np.sum(X_bin == 0), 4)
assert_equal(np.sum(X_bin == 1), 2)
binarizer = Binarizer(copy=True).fit(X)
X_bin = toarray(binarizer.transform(X))
assert_true(X_bin is not X)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(copy=True)
X_bin = binarizer.transform(X)
assert_true(X_bin is not X)
X_bin = toarray(X_bin)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(copy=False)
X_bin = binarizer.transform(X)
assert_true(X_bin is X)
X_bin = toarray(X_bin)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
def test_label_binarizer():
lb = LabelBinarizer()
# two-class case
inp = ["neg", "pos", "pos", "neg"]
expected = np.array([[0, 1, 1, 0]]).T
got = lb.fit_transform(inp)
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
# multi-class case
inp = ["spam", "ham", "eggs", "ham", "0"]
expected = np.array([[0, 0, 0, 1],
[0, 0, 1, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[1, 0, 0, 0]])
got = lb.fit_transform(inp)
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
def test_label_binarizer_set_label_encoding():
lb = LabelBinarizer(neg_label=-2, pos_label=2)
# two-class case
inp = np.array([0, 1, 1, 0])
expected = np.array([[-2, 2, 2, -2]]).T
got = lb.fit_transform(inp)
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
# multi-class case
inp = np.array([3, 2, 1, 2, 0])
expected = np.array([[-2, -2, -2, +2],
[-2, -2, +2, -2],
[-2, +2, -2, -2],
[-2, -2, +2, -2],
[+2, -2, -2, -2]])
got = lb.fit_transform(inp)
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
def test_label_binarizer_multilabel():
lb = LabelBinarizer()
# test input as lists of tuples
inp = [(2, 3), (1,), (1, 2)]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
got = lb.fit_transform(inp)
assert_array_equal(indicator_mat, got)
assert_equal(lb.inverse_transform(got), inp)
# test input as label indicator matrix
lb.fit(indicator_mat)
assert_array_equal(indicator_mat,
lb.inverse_transform(indicator_mat))
# regression test for the two-class multilabel case
lb = LabelBinarizer()
inp = [[1, 0], [0], [1], [0, 1]]
expected = np.array([[1, 1],
[1, 0],
[0, 1],
[1, 1]])
got = lb.fit_transform(inp)
assert_array_equal(expected, got)
assert_equal([set(x) for x in lb.inverse_transform(got)],
[set(x) for x in inp])
def test_label_binarizer_errors():
"""Check that invalid arguments yield ValueError"""
one_class = np.array([0, 0, 0, 0])
lb = LabelBinarizer().fit(one_class)
multi_label = [(2, 3), (0,), (0, 2)]
assert_raises(ValueError, lb.transform, multi_label)
lb = LabelBinarizer()
assert_raises(ValueError, lb.transform, [])
assert_raises(ValueError, lb.inverse_transform, [])
assert_raises(ValueError, LabelBinarizer, neg_label=2, pos_label=1)
assert_raises(ValueError, LabelBinarizer, neg_label=2, pos_label=2)
def test_label_encoder():
"""Test LabelEncoder's transform and inverse_transform methods"""
le = LabelEncoder()
le.fit([1, 1, 4, 5, -1, 0])
assert_array_equal(le.classes_, [-1, 0, 1, 4, 5])
assert_array_equal(le.transform([0, 1, 4, 4, 5, -1, -1]),
[1, 2, 3, 3, 4, 0, 0])
assert_array_equal(le.inverse_transform([1, 2, 3, 3, 4, 0, 0]),
[0, 1, 4, 4, 5, -1, -1])
assert_raises(ValueError, le.transform, [0, 6])
def test_label_encoder_fit_transform():
"""Test fit_transform"""
le = LabelEncoder()
ret = le.fit_transform([1, 1, 4, 5, -1, 0])
assert_array_equal(ret, [2, 2, 3, 4, 0, 1])
le = LabelEncoder()
ret = le.fit_transform(["paris", "paris", "tokyo", "amsterdam"])
assert_array_equal(ret, [1, 1, 2, 0])
def test_label_encoder_string_labels():
"""Test LabelEncoder's transform and inverse_transform methods with
non-numeric labels"""
le = LabelEncoder()
le.fit(["paris", "paris", "tokyo", "amsterdam"])
assert_array_equal(le.classes_, ["amsterdam", "paris", "tokyo"])
assert_array_equal(le.transform(["tokyo", "tokyo", "paris"]),
[2, 2, 1])
assert_array_equal(le.inverse_transform([2, 2, 1]),
["tokyo", "tokyo", "paris"])
assert_raises(ValueError, le.transform, ["london"])
def test_label_encoder_errors():
"""Check that invalid arguments yield ValueError"""
le = LabelEncoder()
assert_raises(ValueError, le.transform, [])
assert_raises(ValueError, le.inverse_transform, [])
def test_label_binarizer_iris():
lb = LabelBinarizer()
Y = lb.fit_transform(iris.target)
clfs = [SGDClassifier().fit(iris.data, Y[:, k])
for k in range(len(lb.classes_))]
Y_pred = np.array([clf.decision_function(iris.data) for clf in clfs]).T
y_pred = lb.inverse_transform(Y_pred)
accuracy = np.mean(iris.target == y_pred)
y_pred2 = SGDClassifier().fit(iris.data, iris.target).predict(iris.data)
accuracy2 = np.mean(iris.target == y_pred2)
assert_almost_equal(accuracy, accuracy2)
def test_label_binarizer_multilabel_unlabeled():
"""Check that LabelBinarizer can handle an unlabeled sample"""
lb = LabelBinarizer()
y = [[1, 2], [1], []]
Y = np.array([[1, 1],
[1, 0],
[0, 0]])
assert_array_equal(lb.fit_transform(y), Y)
def test_center_kernel():
"""Test that KernelCenterer is equivalent to StandardScaler
in feature space"""
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
scaler = StandardScaler(with_std=False)
scaler.fit(X_fit)
X_fit_centered = scaler.transform(X_fit)
K_fit = np.dot(X_fit, X_fit.T)
# center fit time matrix
centerer = KernelCenterer()
K_fit_centered = np.dot(X_fit_centered, X_fit_centered.T)
K_fit_centered2 = centerer.fit_transform(K_fit)
assert_array_almost_equal(K_fit_centered, K_fit_centered2)
# center predict time matrix
X_pred = rng.random_sample((2, 4))
K_pred = np.dot(X_pred, X_fit.T)
X_pred_centered = scaler.transform(X_pred)
K_pred_centered = np.dot(X_pred_centered, X_fit_centered.T)
K_pred_centered2 = centerer.transform(K_pred)
assert_array_almost_equal(K_pred_centered, K_pred_centered2)
def test_fit_transform():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
for obj in ((StandardScaler(), Normalizer(), Binarizer())):
X_transformed = obj.fit(X).transform(X)
X_transformed2 = obj.fit_transform(X)
assert_array_equal(X_transformed, X_transformed2)
| bsd-3-clause |
strongh/GPy | GPy/util/datasets.py | 4 | 64376 | import csv
import os
import copy
import numpy as np
import GPy
import scipy.io
import cPickle as pickle
import zipfile
import tarfile
import datetime
import json
import re
from config import *
ipython_available=True
try:
import IPython
except ImportError:
ipython_available=False
import sys, urllib2
def reporthook(a,b,c):
# ',' at the end of the line is important!
#print "% 3.1f%% of %d bytes\r" % (min(100, float(a * b) / c * 100), c),
#you can also use sys.stdout.write
sys.stdout.write("\r% 3.1f%% of %d bytes" % (min(100, float(a * b) / c * 100), c))
sys.stdout.flush()
# Global variables
data_path = os.path.expandvars(config.get('datasets', 'dir'))
#data_path = os.path.join(os.path.dirname(__file__), 'datasets')
default_seed = 10000
overide_manual_authorize=False
neil_url = 'http://staffwww.dcs.shef.ac.uk/people/N.Lawrence/dataset_mirror/'
# Read data resources from json file.
# Don't do this when ReadTheDocs is scanning as it breaks things
on_rtd = os.environ.get('READTHEDOCS', None) == 'True' #Checks if RTD is scanning
if not (on_rtd):
path = os.path.join(os.path.dirname(__file__), 'data_resources.json')
json_data=open(path).read()
data_resources = json.loads(json_data)
if not (on_rtd):
path = os.path.join(os.path.dirname(__file__), 'football_teams.json')
json_data=open(path).read()
football_dict = json.loads(json_data)
def prompt_user(prompt):
"""Ask user for agreeing to data set licenses."""
# raw_input returns the empty string for "enter"
yes = set(['yes', 'y'])
no = set(['no','n'])
try:
print(prompt)
choice = raw_input().lower()
# would like to test for exception here, but not sure if we can do that without importing IPython
except:
print('Stdin is not implemented.')
print('You need to set')
print('overide_manual_authorize=True')
print('to proceed with the download. Please set that variable and continue.')
raise
if choice in yes:
return True
elif choice in no:
return False
else:
print("Your response was a " + choice)
print("Please respond with 'yes', 'y' or 'no', 'n'")
#return prompt_user()
def data_available(dataset_name=None):
"""Check if the data set is available on the local machine already."""
from itertools import izip_longest
dr = data_resources[dataset_name]
zip_urls = (dr['files'], )
if dr.has_key('save_names'): zip_urls += (dr['save_names'], )
else: zip_urls += ([],)
for file_list, save_list in izip_longest(*zip_urls, fillvalue=[]):
for f, s in izip_longest(file_list, save_list, fillvalue=None):
if s is not None: f=s # If there is a save_name given, use that one
if not os.path.exists(os.path.join(data_path, dataset_name, f)):
return False
return True
def download_url(url, store_directory, save_name=None, messages=True, suffix=''):
"""Download a file from a url and save it to disk."""
i = url.rfind('/')
file = url[i+1:]
print file
dir_name = os.path.join(data_path, store_directory)
if save_name is None: save_name = os.path.join(dir_name, file)
else: save_name = os.path.join(dir_name, save_name)
if suffix is None: suffix=''
print "Downloading ", url, "->", save_name
if not os.path.exists(dir_name):
os.makedirs(dir_name)
try:
response = urllib2.urlopen(url+suffix)
except urllib2.URLError, e:
if not hasattr(e, "code"):
raise
response = e
if response.code > 399 and response.code<500:
raise ValueError('Tried url ' + url + suffix + ' and received client error ' + str(response.code))
elif response.code > 499:
raise ValueError('Tried url ' + url + suffix + ' and received server error ' + str(response.code))
with open(save_name, 'wb') as f:
meta = response.info()
content_length_str = meta.getheaders("Content-Length")
if content_length_str:
file_size = int(content_length_str[0])
else:
file_size = None
status = ""
file_size_dl = 0
block_sz = 8192
line_length=30
while True:
buff = response.read(block_sz)
if not buff:
break
file_size_dl += len(buff)
f.write(buff)
sys.stdout.write(" "*(len(status)) + "\r")
if file_size:
status = r"[{perc: <{ll}}] {dl:7.3f}/{full:.3f}MB".format(dl=file_size_dl/(1048576.),
full=file_size/(1048576.), ll=line_length,
perc="="*int(line_length*float(file_size_dl)/file_size))
else:
status = r"[{perc: <{ll}}] {dl:7.3f}MB".format(dl=file_size_dl/(1048576.),
ll=line_length,
perc="."*int(line_length*float(file_size_dl/(10*1048576.))))
sys.stdout.write(status)
sys.stdout.flush()
sys.stdout.write(" "*(len(status)) + "\r")
print status
# if we wanted to get more sophisticated maybe we should check the response code here again even for successes.
#with open(save_name, 'wb') as f:
# f.write(response.read())
#urllib.urlretrieve(url+suffix, save_name, reporthook)
def authorize_download(dataset_name=None):
"""Check with the user that the are happy with terms and conditions for the data set."""
print('Acquiring resource: ' + dataset_name)
# TODO, check resource is in dictionary!
print('')
dr = data_resources[dataset_name]
print('Details of data: ')
print(dr['details'])
print('')
if dr['citation']:
print('Please cite:')
print(dr['citation'])
print('')
if dr['size']:
print('After downloading the data will take up ' + str(dr['size']) + ' bytes of space.')
print('')
print('Data will be stored in ' + os.path.join(data_path, dataset_name) + '.')
print('')
if overide_manual_authorize:
if dr['license']:
print('You have agreed to the following license:')
print(dr['license'])
print('')
return True
else:
if dr['license']:
print('You must also agree to the following license:')
print(dr['license'])
print('')
return prompt_user('Do you wish to proceed with the download? [yes/no]')
def download_data(dataset_name=None):
"""Check with the user that the are happy with terms and conditions for the data set, then download it."""
import itertools
dr = data_resources[dataset_name]
if not authorize_download(dataset_name):
raise Exception("Permission to download data set denied.")
zip_urls = (dr['urls'], dr['files'])
if dr.has_key('save_names'): zip_urls += (dr['save_names'], )
else: zip_urls += ([],)
if dr.has_key('suffices'): zip_urls += (dr['suffices'], )
else: zip_urls += ([],)
for url, files, save_names, suffices in itertools.izip_longest(*zip_urls, fillvalue=[]):
for f, save_name, suffix in itertools.izip_longest(files, save_names, suffices, fillvalue=None):
download_url(os.path.join(url,f), dataset_name, save_name, suffix=suffix)
return True
def data_details_return(data, data_set):
"""Update the data component of the data dictionary with details drawn from the data_resources."""
data.update(data_resources[data_set])
return data
def cmu_urls_files(subj_motions, messages = True):
'''
Find which resources are missing on the local disk for the requested CMU motion capture motions.
'''
dr = data_resources['cmu_mocap_full']
cmu_url = dr['urls'][0]
subjects_num = subj_motions[0]
motions_num = subj_motions[1]
resource = {'urls' : [], 'files' : []}
# Convert numbers to strings
subjects = []
motions = [list() for _ in range(len(subjects_num))]
for i in range(len(subjects_num)):
curSubj = str(int(subjects_num[i]))
if int(subjects_num[i]) < 10:
curSubj = '0' + curSubj
subjects.append(curSubj)
for j in range(len(motions_num[i])):
curMot = str(int(motions_num[i][j]))
if int(motions_num[i][j]) < 10:
curMot = '0' + curMot
motions[i].append(curMot)
all_skels = []
assert len(subjects) == len(motions)
all_motions = []
for i in range(len(subjects)):
skel_dir = os.path.join(data_path, 'cmu_mocap')
cur_skel_file = os.path.join(skel_dir, subjects[i] + '.asf')
url_required = False
file_download = []
if not os.path.exists(cur_skel_file):
# Current skel file doesn't exist.
if not os.path.isdir(skel_dir):
os.makedirs(skel_dir)
# Add skel file to list.
url_required = True
file_download.append(subjects[i] + '.asf')
for j in range(len(motions[i])):
file_name = subjects[i] + '_' + motions[i][j] + '.amc'
cur_motion_file = os.path.join(skel_dir, file_name)
if not os.path.exists(cur_motion_file):
url_required = True
file_download.append(subjects[i] + '_' + motions[i][j] + '.amc')
if url_required:
resource['urls'].append(cmu_url + '/' + subjects[i] + '/')
resource['files'].append(file_download)
return resource
try:
import gpxpy
import gpxpy.gpx
gpxpy_available = True
except ImportError:
gpxpy_available = False
if gpxpy_available:
def epomeo_gpx(data_set='epomeo_gpx', sample_every=4):
if not data_available(data_set):
download_data(data_set)
files = ['endomondo_1', 'endomondo_2', 'garmin_watch_via_endomondo','viewranger_phone', 'viewranger_tablet']
X = []
for file in files:
gpx_file = open(os.path.join(data_path, 'epomeo_gpx', file + '.gpx'), 'r')
gpx = gpxpy.parse(gpx_file)
segment = gpx.tracks[0].segments[0]
points = [point for track in gpx.tracks for segment in track.segments for point in segment.points]
data = [[(point.time-datetime.datetime(2013,8,21)).total_seconds(), point.latitude, point.longitude, point.elevation] for point in points]
X.append(np.asarray(data)[::sample_every, :])
gpx_file.close()
return data_details_return({'X' : X, 'info' : 'Data is an array containing time in seconds, latitude, longitude and elevation in that order.'}, data_set)
#del gpxpy_available
# Some general utilities.
def sample_class(f):
p = 1. / (1. + np.exp(-f))
c = np.random.binomial(1, p)
c = np.where(c, 1, -1)
return c
def boston_housing(data_set='boston_housing'):
if not data_available(data_set):
download_data(data_set)
all_data = np.genfromtxt(os.path.join(data_path, data_set, 'housing.data'))
X = all_data[:, 0:13]
Y = all_data[:, 13:14]
return data_details_return({'X' : X, 'Y': Y}, data_set)
def brendan_faces(data_set='brendan_faces'):
if not data_available(data_set):
download_data(data_set)
mat_data = scipy.io.loadmat(os.path.join(data_path, data_set, 'frey_rawface.mat'))
Y = mat_data['ff'].T
return data_details_return({'Y': Y}, data_set)
def della_gatta_TRP63_gene_expression(data_set='della_gatta', gene_number=None):
if not data_available(data_set):
download_data(data_set)
mat_data = scipy.io.loadmat(os.path.join(data_path, data_set, 'DellaGattadata.mat'))
X = np.double(mat_data['timepoints'])
if gene_number == None:
Y = mat_data['exprs_tp53_RMA']
else:
Y = mat_data['exprs_tp53_RMA'][:, gene_number]
if len(Y.shape) == 1:
Y = Y[:, None]
return data_details_return({'X': X, 'Y': Y, 'gene_number' : gene_number}, data_set)
def football_data(season='1314', data_set='football_data'):
"""Football data from English games since 1993. This downloads data from football-data.co.uk for the given season. """
def league2num(string):
league_dict = {'E0':0, 'E1':1, 'E2': 2, 'E3': 3, 'EC':4}
return league_dict[string]
def football2num(string):
if football_dict.has_key(string):
return football_dict[string]
else:
football_dict[string] = len(football_dict)+1
return len(football_dict)+1
data_set_season = data_set + '_' + season
data_resources[data_set_season] = copy.deepcopy(data_resources[data_set])
data_resources[data_set_season]['urls'][0]+=season + '/'
start_year = int(season[0:2])
end_year = int(season[2:4])
files = ['E0.csv', 'E1.csv', 'E2.csv', 'E3.csv']
if start_year>4 and start_year < 93:
files += ['EC.csv']
data_resources[data_set_season]['files'] = [files]
if not data_available(data_set_season):
download_data(data_set_season)
import pylab as pb
for file in reversed(files):
filename = os.path.join(data_path, data_set_season, file)
# rewrite files removing blank rows.
writename = os.path.join(data_path, data_set_season, 'temp.csv')
input = open(filename, 'rb')
output = open(writename, 'wb')
writer = csv.writer(output)
for row in csv.reader(input):
if any(field.strip() for field in row):
writer.writerow(row)
input.close()
output.close()
table = np.loadtxt(writename,skiprows=1, usecols=(0, 1, 2, 3, 4, 5), converters = {0: league2num, 1: pb.datestr2num, 2:football2num, 3:football2num}, delimiter=',')
X = table[:, :4]
Y = table[:, 4:]
return data_details_return({'X': X, 'Y': Y}, data_set)
def sod1_mouse(data_set='sod1_mouse'):
if not data_available(data_set):
download_data(data_set)
from pandas import read_csv
dir_path = os.path.join(data_path, data_set)
filename = os.path.join(dir_path, 'sod1_C57_129_exprs.csv')
Y = read_csv(filename, header=0, index_col=0)
num_repeats=4
num_time=4
num_cond=4
X = 1
return data_details_return({'X': X, 'Y': Y}, data_set)
def spellman_yeast(data_set='spellman_yeast'):
if not data_available(data_set):
download_data(data_set)
from pandas import read_csv
dir_path = os.path.join(data_path, data_set)
filename = os.path.join(dir_path, 'combined.txt')
Y = read_csv(filename, header=0, index_col=0, sep='\t')
return data_details_return({'Y': Y}, data_set)
def spellman_yeast_cdc15(data_set='spellman_yeast'):
if not data_available(data_set):
download_data(data_set)
from pandas import read_csv
dir_path = os.path.join(data_path, data_set)
filename = os.path.join(dir_path, 'combined.txt')
Y = read_csv(filename, header=0, index_col=0, sep='\t')
t = np.asarray([10, 30, 50, 70, 80, 90, 100, 110, 120, 130, 140, 150, 170, 180, 190, 200, 210, 220, 230, 240, 250, 270, 290])
times = ['cdc15_'+str(time) for time in t]
Y = Y[times].T
t = t[:, None]
return data_details_return({'Y' : Y, 't': t, 'info': 'Time series of synchronized yeast cells from the CDC-15 experiment of Spellman et al (1998).'}, data_set)
def lee_yeast_ChIP(data_set='lee_yeast_ChIP'):
if not data_available(data_set):
download_data(data_set)
from pandas import read_csv
import zipfile
dir_path = os.path.join(data_path, data_set)
filename = os.path.join(dir_path, 'binding_by_gene.tsv')
S = read_csv(filename, header=1, index_col=0, sep='\t')
transcription_factors = [col for col in S.columns if col[:7] != 'Unnamed']
annotations = S[['Unnamed: 1', 'Unnamed: 2', 'Unnamed: 3']]
S = S[transcription_factors]
return data_details_return({'annotations' : annotations, 'Y' : S, 'transcription_factors': transcription_factors}, data_set)
def fruitfly_tomancak(data_set='fruitfly_tomancak', gene_number=None):
if not data_available(data_set):
download_data(data_set)
from pandas import read_csv
dir_path = os.path.join(data_path, data_set)
filename = os.path.join(dir_path, 'tomancak_exprs.csv')
Y = read_csv(filename, header=0, index_col=0).T
num_repeats = 3
num_time = 12
xt = np.linspace(0, num_time-1, num_time)
xr = np.linspace(0, num_repeats-1, num_repeats)
xtime, xrepeat = np.meshgrid(xt, xr)
X = np.vstack((xtime.flatten(), xrepeat.flatten())).T
return data_details_return({'X': X, 'Y': Y, 'gene_number' : gene_number}, data_set)
def drosophila_protein(data_set='drosophila_protein'):
if not data_available(data_set):
download_data(data_set)
from pandas import read_csv
dir_path = os.path.join(data_path, data_set)
filename = os.path.join(dir_path, 'becker_et_al.csv')
Y = read_csv(filename, header=0)
return data_details_return({'Y': Y}, data_set)
def drosophila_knirps(data_set='drosophila_protein'):
if not data_available(data_set):
download_data(data_set)
from pandas import read_csv
dir_path = os.path.join(data_path, data_set)
filename = os.path.join(dir_path, 'becker_et_al.csv')
# in the csv file we have facts_kni and ext_kni. We treat facts_kni as protein and ext_kni as mRNA
df = read_csv(filename, header=0)
t = df['t'][:,None]
x = df['x'][:,None]
g = df['expression1'][:,None]
p = df['expression2'][:,None]
leng = x.shape[0]
T = np.vstack([t,t])
S = np.vstack([x,x])
inx = np.zeros(leng*2)[:,None]
inx[leng*2/2:leng*2]=1
X = np.hstack([T,S,inx])
Y = np.vstack([g,p])
return data_details_return({'Y': Y, 'X': X}, data_set)
# This will be for downloading google trends data.
def google_trends(query_terms=['big data', 'machine learning', 'data science'], data_set='google_trends', refresh_data=False):
"""Data downloaded from Google trends for given query terms. Warning, if you use this function multiple times in a row you get blocked due to terms of service violations. The function will cache the result of your query, if you wish to refresh an old query set refresh_data to True. The function is inspired by this notebook: http://nbviewer.ipython.org/github/sahuguet/notebooks/blob/master/GoogleTrends%20meet%20Notebook.ipynb"""
query_terms.sort()
import pandas
# Create directory name for data
dir_path = os.path.join(data_path,'google_trends')
if not os.path.isdir(dir_path):
os.makedirs(dir_path)
dir_name = '-'.join(query_terms)
dir_name = dir_name.replace(' ', '_')
dir_path = os.path.join(dir_path,dir_name)
file = 'data.csv'
file_name = os.path.join(dir_path,file)
if not os.path.exists(file_name) or refresh_data:
print "Accessing Google trends to acquire the data. Note that repeated accesses will result in a block due to a google terms of service violation. Failure at this point may be due to such blocks."
# quote the query terms.
quoted_terms = []
for term in query_terms:
quoted_terms.append(urllib2.quote(term))
print "Query terms: ", ', '.join(query_terms)
print "Fetching query:"
query = 'http://www.google.com/trends/fetchComponent?q=%s&cid=TIMESERIES_GRAPH_0&export=3' % ",".join(quoted_terms)
data = urllib2.urlopen(query).read()
print "Done."
# In the notebook they did some data cleaning: remove Javascript header+footer, and translate new Date(....,..,..) into YYYY-MM-DD.
header = """// Data table response\ngoogle.visualization.Query.setResponse("""
data = data[len(header):-2]
data = re.sub('new Date\((\d+),(\d+),(\d+)\)', (lambda m: '"%s-%02d-%02d"' % (m.group(1).strip(), 1+int(m.group(2)), int(m.group(3)))), data)
timeseries = json.loads(data)
columns = [k['label'] for k in timeseries['table']['cols']]
rows = map(lambda x: [k['v'] for k in x['c']], timeseries['table']['rows'])
df = pandas.DataFrame(rows, columns=columns)
if not os.path.isdir(dir_path):
os.makedirs(dir_path)
df.to_csv(file_name)
else:
print "Reading cached data for google trends. To refresh the cache set 'refresh_data=True' when calling this function."
print "Query terms: ", ', '.join(query_terms)
df = pandas.read_csv(file_name, parse_dates=[0])
columns = df.columns
terms = len(query_terms)
import datetime
X = np.asarray([(row, i) for i in range(terms) for row in df.index])
Y = np.asarray([[df.ix[row][query_terms[i]]] for i in range(terms) for row in df.index ])
output_info = columns[1:]
return data_details_return({'data frame' : df, 'X': X, 'Y': Y, 'query_terms': output_info, 'info': "Data downloaded from google trends with query terms: " + ', '.join(output_info) + '.'}, data_set)
# The data sets
def oil(data_set='three_phase_oil_flow'):
"""The three phase oil data from Bishop and James (1993)."""
if not data_available(data_set):
download_data(data_set)
oil_train_file = os.path.join(data_path, data_set, 'DataTrn.txt')
oil_trainlbls_file = os.path.join(data_path, data_set, 'DataTrnLbls.txt')
oil_test_file = os.path.join(data_path, data_set, 'DataTst.txt')
oil_testlbls_file = os.path.join(data_path, data_set, 'DataTstLbls.txt')
oil_valid_file = os.path.join(data_path, data_set, 'DataVdn.txt')
oil_validlbls_file = os.path.join(data_path, data_set, 'DataVdnLbls.txt')
fid = open(oil_train_file)
X = np.fromfile(fid, sep='\t').reshape((-1, 12))
fid.close()
fid = open(oil_test_file)
Xtest = np.fromfile(fid, sep='\t').reshape((-1, 12))
fid.close()
fid = open(oil_valid_file)
Xvalid = np.fromfile(fid, sep='\t').reshape((-1, 12))
fid.close()
fid = open(oil_trainlbls_file)
Y = np.fromfile(fid, sep='\t').reshape((-1, 3)) * 2. - 1.
fid.close()
fid = open(oil_testlbls_file)
Ytest = np.fromfile(fid, sep='\t').reshape((-1, 3)) * 2. - 1.
fid.close()
fid = open(oil_validlbls_file)
Yvalid = np.fromfile(fid, sep='\t').reshape((-1, 3)) * 2. - 1.
fid.close()
return data_details_return({'X': X, 'Y': Y, 'Xtest': Xtest, 'Ytest': Ytest, 'Xtest' : Xtest, 'Xvalid': Xvalid, 'Yvalid': Yvalid}, data_set)
#else:
# throw an error
def oil_100(seed=default_seed, data_set = 'three_phase_oil_flow'):
np.random.seed(seed=seed)
data = oil()
indices = np.random.permutation(1000)
indices = indices[0:100]
X = data['X'][indices, :]
Y = data['Y'][indices, :]
return data_details_return({'X': X, 'Y': Y, 'info': "Subsample of the full oil data extracting 100 values randomly without replacement, here seed was " + str(seed)}, data_set)
def pumadyn(seed=default_seed, data_set='pumadyn-32nm'):
if not data_available(data_set):
download_data(data_set)
path = os.path.join(data_path, data_set)
tar = tarfile.open(os.path.join(path, 'pumadyn-32nm.tar.gz'))
print('Extracting file.')
tar.extractall(path=path)
tar.close()
# Data is variance 1, no need to normalize.
data = np.loadtxt(os.path.join(data_path, data_set, 'pumadyn-32nm', 'Dataset.data.gz'))
indices = np.random.permutation(data.shape[0])
indicesTrain = indices[0:7168]
indicesTest = indices[7168:-1]
indicesTrain.sort(axis=0)
indicesTest.sort(axis=0)
X = data[indicesTrain, 0:-2]
Y = data[indicesTrain, -1][:, None]
Xtest = data[indicesTest, 0:-2]
Ytest = data[indicesTest, -1][:, None]
return data_details_return({'X': X, 'Y': Y, 'Xtest': Xtest, 'Ytest': Ytest, 'seed': seed}, data_set)
def robot_wireless(data_set='robot_wireless'):
# WiFi access point strengths on a tour around UW Paul Allen building.
if not data_available(data_set):
download_data(data_set)
file_name = os.path.join(data_path, data_set, 'uw-floor.txt')
all_time = np.genfromtxt(file_name, usecols=(0))
macaddress = np.genfromtxt(file_name, usecols=(1), dtype='string')
x = np.genfromtxt(file_name, usecols=(2))
y = np.genfromtxt(file_name, usecols=(3))
strength = np.genfromtxt(file_name, usecols=(4))
addresses = np.unique(macaddress)
times = np.unique(all_time)
addresses.sort()
times.sort()
allY = np.zeros((len(times), len(addresses)))
allX = np.zeros((len(times), 2))
allY[:]=-92.
strengths={}
for address, j in zip(addresses, range(len(addresses))):
ind = np.nonzero(address==macaddress)
temp_strengths=strength[ind]
temp_x=x[ind]
temp_y=y[ind]
temp_times = all_time[ind]
for time in temp_times:
vals = time==temp_times
if any(vals):
ind2 = np.nonzero(vals)
i = np.nonzero(time==times)
allY[i, j] = temp_strengths[ind2]
allX[i, 0] = temp_x[ind2]
allX[i, 1] = temp_y[ind2]
allY = (allY + 85.)/15.
X = allX[0:215, :]
Y = allY[0:215, :]
Xtest = allX[215:, :]
Ytest = allY[215:, :]
return data_details_return({'X': X, 'Y': Y, 'Xtest': Xtest, 'Ytest': Ytest, 'addresses' : addresses, 'times' : times}, data_set)
def silhouette(data_set='ankur_pose_data'):
# Ankur Agarwal and Bill Trigg's silhoutte data.
if not data_available(data_set):
download_data(data_set)
mat_data = scipy.io.loadmat(os.path.join(data_path, data_set, 'ankurDataPoseSilhouette.mat'))
inMean = np.mean(mat_data['Y'])
inScales = np.sqrt(np.var(mat_data['Y']))
X = mat_data['Y'] - inMean
X = X / inScales
Xtest = mat_data['Y_test'] - inMean
Xtest = Xtest / inScales
Y = mat_data['Z']
Ytest = mat_data['Z_test']
return data_details_return({'X': X, 'Y': Y, 'Xtest': Xtest, 'Ytest': Ytest}, data_set)
def decampos_digits(data_set='decampos_characters', which_digits=[0,1,2,3,4,5,6,7,8,9]):
if not data_available(data_set):
download_data(data_set)
path = os.path.join(data_path, data_set)
digits = np.load(os.path.join(path, 'digits.npy'))
digits = digits[which_digits,:,:,:]
num_classes, num_samples, height, width = digits.shape
Y = digits.reshape((digits.shape[0]*digits.shape[1],digits.shape[2]*digits.shape[3]))
lbls = np.array([[l]*num_samples for l in which_digits]).reshape(Y.shape[0], 1)
str_lbls = np.array([[str(l)]*num_samples for l in which_digits])
return data_details_return({'Y': Y, 'lbls': lbls, 'str_lbls' : str_lbls, 'info': 'Digits data set from the de Campos characters data'}, data_set)
def ripley_synth(data_set='ripley_prnn_data'):
if not data_available(data_set):
download_data(data_set)
train = np.genfromtxt(os.path.join(data_path, data_set, 'synth.tr'), skip_header=1)
X = train[:, 0:2]
y = train[:, 2:3]
test = np.genfromtxt(os.path.join(data_path, data_set, 'synth.te'), skip_header=1)
Xtest = test[:, 0:2]
ytest = test[:, 2:3]
return data_details_return({'X': X, 'Y': y, 'Xtest': Xtest, 'Ytest': ytest, 'info': 'Synthetic data generated by Ripley for a two class classification problem.'}, data_set)
def global_average_temperature(data_set='global_temperature', num_train=1000, refresh_data=False):
path = os.path.join(data_path, data_set)
if data_available(data_set) and not refresh_data:
print 'Using cached version of the data set, to use latest version set refresh_data to True'
else:
download_data(data_set)
data = np.loadtxt(os.path.join(data_path, data_set, 'GLBTS.long.data'))
print 'Most recent data observation from month ', data[-1, 1], ' in year ', data[-1, 0]
allX = data[data[:, 3]!=-99.99, 2:3]
allY = data[data[:, 3]!=-99.99, 3:4]
X = allX[:num_train, 0:1]
Xtest = allX[num_train:, 0:1]
Y = allY[:num_train, 0:1]
Ytest = allY[num_train:, 0:1]
return data_details_return({'X': X, 'Y': Y, 'Xtest': Xtest, 'Ytest': Ytest, 'info': "Mauna Loa data with " + str(num_train) + " values used as training points."}, data_set)
def mauna_loa(data_set='mauna_loa', num_train=545, refresh_data=False):
path = os.path.join(data_path, data_set)
if data_available(data_set) and not refresh_data:
print 'Using cached version of the data set, to use latest version set refresh_data to True'
else:
download_data(data_set)
data = np.loadtxt(os.path.join(data_path, data_set, 'co2_mm_mlo.txt'))
print 'Most recent data observation from month ', data[-1, 1], ' in year ', data[-1, 0]
allX = data[data[:, 3]!=-99.99, 2:3]
allY = data[data[:, 3]!=-99.99, 3:4]
X = allX[:num_train, 0:1]
Xtest = allX[num_train:, 0:1]
Y = allY[:num_train, 0:1]
Ytest = allY[num_train:, 0:1]
return data_details_return({'X': X, 'Y': Y, 'Xtest': Xtest, 'Ytest': Ytest, 'info': "Mauna Loa data with " + str(num_train) + " values used as training points."}, data_set)
def boxjenkins_airline(data_set='boxjenkins_airline', num_train=96):
path = os.path.join(data_path, data_set)
if not data_available(data_set):
download_data(data_set)
data = np.loadtxt(os.path.join(data_path, data_set, 'boxjenkins_airline.csv'), delimiter=',')
Y = data[:num_train, 1:2]
X = data[:num_train, 0:1]
Xtest = data[num_train:, 0:1]
Ytest = data[num_train:, 1:2]
return data_details_return({'X': X, 'Y': Y, 'Xtest': Xtest, 'Ytest': Ytest, 'info': "Montly airline passenger data from Box & Jenkins 1976."}, data_set)
def osu_run1(data_set='osu_run1', sample_every=4):
path = os.path.join(data_path, data_set)
if not data_available(data_set):
download_data(data_set)
zip = zipfile.ZipFile(os.path.join(data_path, data_set, 'run1TXT.ZIP'), 'r')
for name in zip.namelist():
zip.extract(name, path)
Y, connect = GPy.util.mocap.load_text_data('Aug210106', path)
Y = Y[0:-1:sample_every, :]
return data_details_return({'Y': Y, 'connect' : connect}, data_set)
def swiss_roll_generated(num_samples=1000, sigma=0.0):
with open(os.path.join(os.path.dirname(__file__), 'datasets', 'swiss_roll.pickle')) as f:
data = pickle.load(f)
Na = data['Y'].shape[0]
perm = np.random.permutation(np.r_[:Na])[:num_samples]
Y = data['Y'][perm, :]
t = data['t'][perm]
c = data['colors'][perm, :]
so = np.argsort(t)
Y = Y[so, :]
t = t[so]
c = c[so, :]
return {'Y':Y, 't':t, 'colors':c}
def hapmap3(data_set='hapmap3'):
"""
The HapMap phase three SNP dataset - 1184 samples out of 11 populations.
SNP_matrix (A) encoding [see Paschou et all. 2007 (PCA-Correlated SNPs...)]:
Let (B1,B2) be the alphabetically sorted bases, which occur in the j-th SNP, then
/ 1, iff SNPij==(B1,B1)
Aij = | 0, iff SNPij==(B1,B2)
\ -1, iff SNPij==(B2,B2)
The SNP data and the meta information (such as iid, sex and phenotype) are
stored in the dataframe datadf, index is the Individual ID,
with following columns for metainfo:
* family_id -> Family ID
* paternal_id -> Paternal ID
* maternal_id -> Maternal ID
* sex -> Sex (1=male; 2=female; other=unknown)
* phenotype -> Phenotype (-9, or 0 for unknown)
* population -> Population string (e.g. 'ASW' - 'YRI')
* rest are SNP rs (ids)
More information is given in infodf:
* Chromosome:
- autosomal chromosemes -> 1-22
- X X chromosome -> 23
- Y Y chromosome -> 24
- XY Pseudo-autosomal region of X -> 25
- MT Mitochondrial -> 26
* Relative Positon (to Chromosome) [base pairs]
"""
try:
from pandas import read_pickle, DataFrame
from sys import stdout
import bz2
except ImportError as i:
raise i, "Need pandas for hapmap dataset, make sure to install pandas (http://pandas.pydata.org/) before loading the hapmap dataset"
dir_path = os.path.join(data_path,'hapmap3')
hapmap_file_name = 'hapmap3_r2_b36_fwd.consensus.qc.poly'
unpacked_files = [os.path.join(dir_path, hapmap_file_name+ending) for ending in ['.ped', '.map']]
unpacked_files_exist = reduce(lambda a, b:a and b, map(os.path.exists, unpacked_files))
if not unpacked_files_exist and not data_available(data_set):
download_data(data_set)
preprocessed_data_paths = [os.path.join(dir_path,hapmap_file_name + file_name) for file_name in \
['.snps.pickle',
'.info.pickle',
'.nan.pickle']]
if not reduce(lambda a,b: a and b, map(os.path.exists, preprocessed_data_paths)):
if not overide_manual_authorize and not prompt_user("Preprocessing requires ~25GB "
"of memory and can take a (very) long time, continue? [Y/n]"):
print "Preprocessing required for further usage."
return
status = "Preprocessing data, please be patient..."
print status
def write_status(message, progress, status):
stdout.write(" "*len(status)); stdout.write("\r"); stdout.flush()
status = r"[{perc: <{ll}}] {message: <13s}".format(message=message, ll=20,
perc="="*int(20.*progress/100.))
stdout.write(status); stdout.flush()
return status
if not unpacked_files_exist:
status=write_status('unpacking...', 0, '')
curr = 0
for newfilepath in unpacked_files:
if not os.path.exists(newfilepath):
filepath = newfilepath + '.bz2'
file_size = os.path.getsize(filepath)
with open(newfilepath, 'wb') as new_file, open(filepath, 'rb') as f:
decomp = bz2.BZ2Decompressor()
file_processed = 0
buffsize = 100 * 1024
for data in iter(lambda : f.read(buffsize), b''):
new_file.write(decomp.decompress(data))
file_processed += len(data)
status=write_status('unpacking...', curr+12.*file_processed/(file_size), status)
curr += 12
status=write_status('unpacking...', curr, status)
os.remove(filepath)
status=write_status('reading .ped...', 25, status)
# Preprocess data:
snpstrnp = np.loadtxt(unpacked_files[0], dtype=str)
status=write_status('reading .map...', 33, status)
mapnp = np.loadtxt(unpacked_files[1], dtype=str)
status=write_status('reading relationships.txt...', 42, status)
# and metainfo:
infodf = DataFrame.from_csv(os.path.join(dir_path,'./relationships_w_pops_121708.txt'), header=0, sep='\t')
infodf.set_index('IID', inplace=1)
status=write_status('filtering nan...', 45, status)
snpstr = snpstrnp[:,6:].astype('S1').reshape(snpstrnp.shape[0], -1, 2)
inan = snpstr[:,:,0] == '0'
status=write_status('filtering reference alleles...', 55, status)
ref = np.array(map(lambda x: np.unique(x)[-2:], snpstr.swapaxes(0,1)[:,:,:]))
status=write_status('encoding snps...', 70, status)
# Encode the information for each gene in {-1,0,1}:
status=write_status('encoding snps...', 73, status)
snps = (snpstr==ref[None,:,:])
status=write_status('encoding snps...', 76, status)
snps = (snps*np.array([1,-1])[None,None,:])
status=write_status('encoding snps...', 78, status)
snps = snps.sum(-1)
status=write_status('encoding snps...', 81, status)
snps = snps.astype('i8')
status=write_status('marking nan values...', 88, status)
# put in nan values (masked as -128):
snps[inan] = -128
status=write_status('setting up meta...', 94, status)
# get meta information:
metaheader = np.r_[['family_id', 'iid', 'paternal_id', 'maternal_id', 'sex', 'phenotype']]
metadf = DataFrame(columns=metaheader, data=snpstrnp[:,:6])
metadf.set_index('iid', inplace=1)
metadf = metadf.join(infodf.population)
metadf.to_pickle(preprocessed_data_paths[1])
# put everything together:
status=write_status('setting up snps...', 96, status)
snpsdf = DataFrame(index=metadf.index, data=snps, columns=mapnp[:,1])
with open(preprocessed_data_paths[0], 'wb') as f:
pickle.dump(f, snpsdf, protocoll=-1)
status=write_status('setting up snps...', 98, status)
inandf = DataFrame(index=metadf.index, data=inan, columns=mapnp[:,1])
inandf.to_pickle(preprocessed_data_paths[2])
status=write_status('done :)', 100, status)
print ''
else:
print "loading snps..."
snpsdf = read_pickle(preprocessed_data_paths[0])
print "loading metainfo..."
metadf = read_pickle(preprocessed_data_paths[1])
print "loading nan entries..."
inandf = read_pickle(preprocessed_data_paths[2])
snps = snpsdf.values
populations = metadf.population.values.astype('S3')
hapmap = dict(name=data_set,
description='The HapMap phase three SNP dataset - '
'1184 samples out of 11 populations. inan is a '
'boolean array, containing wheather or not the '
'given entry is nan (nans are masked as '
'-128 in snps).',
snpsdf=snpsdf,
metadf=metadf,
snps=snps,
inan=inandf.values,
inandf=inandf,
populations=populations)
return hapmap
def singlecell(data_set='singlecell'):
if not data_available(data_set):
download_data(data_set)
from pandas import read_csv
dir_path = os.path.join(data_path, data_set)
filename = os.path.join(dir_path, 'singlecell.csv')
Y = read_csv(filename, header=0, index_col=0)
genes = Y.columns
labels = Y.index
# data = np.loadtxt(os.path.join(dir_path, 'singlecell.csv'), delimiter=",", dtype=str)
return data_details_return({'Y': Y, 'info' : "qPCR singlecell experiment in Mouse, measuring 48 gene expressions in 1-64 cell states. The labels have been created as in Guo et al. [2010]",
'genes': genes, 'labels':labels,
}, data_set)
def singlecell_rna_seq_islam(dataset='singlecell_islam'):
if not data_available(dataset):
download_data(dataset)
from pandas import read_csv, DataFrame, concat
dir_path = os.path.join(data_path, dataset)
filename = os.path.join(dir_path, 'GSE29087_L139_expression_tab.txt.gz')
data = read_csv(filename, sep='\t', skiprows=6, compression='gzip', header=None)
header1 = read_csv(filename, sep='\t', header=None, skiprows=5, nrows=1, compression='gzip')
header2 = read_csv(filename, sep='\t', header=None, skiprows=3, nrows=1, compression='gzip')
data.columns = np.concatenate((header1.ix[0, :], header2.ix[0, 7:]))
Y = data.set_index("Feature").ix[8:, 6:-4].T.astype(float)
# read the info .soft
filename = os.path.join(dir_path, 'GSE29087_family.soft.gz')
info = read_csv(filename, sep='\t', skiprows=0, compression='gzip', header=None)
# split at ' = '
info = DataFrame(info.ix[:,0].str.split(' = ').tolist())
# only take samples:
info = info[info[0].str.contains("!Sample")]
info[0] = info[0].apply(lambda row: row[len("!Sample_"):])
groups = info.groupby(0).groups
# remove 'GGG' from barcodes
barcode = info[1][groups['barcode']].apply(lambda row: row[:-3])
title = info[1][groups['title']]
title.index = barcode
title.name = 'title'
geo_accession = info[1][groups['geo_accession']]
geo_accession.index = barcode
geo_accession.name = 'geo_accession'
case_id = info[1][groups['source_name_ch1']]
case_id.index = barcode
case_id.name = 'source_name_ch1'
info = concat([title, geo_accession, case_id], axis=1)
labels = info.join(Y).source_name_ch1[:-4]
labels[labels=='Embryonic stem cell'] = "ES"
labels[labels=='Embryonic fibroblast'] = "MEF"
return data_details_return({'Y': Y,
'info': '92 single cells (48 mouse ES cells, 44 mouse embryonic fibroblasts and 4 negative controls) were analyzed by single-cell tagged reverse transcription (STRT)',
'genes': Y.columns,
'labels': labels,
'datadf': data,
'infodf': info}, dataset)
def singlecell_rna_seq_deng(dataset='singlecell_deng'):
if not data_available(dataset):
download_data(dataset)
from pandas import read_csv, isnull
dir_path = os.path.join(data_path, dataset)
# read the info .soft
filename = os.path.join(dir_path, 'GSE45719_series_matrix.txt.gz')
info = read_csv(filename, sep='\t', skiprows=0, compression='gzip', header=None, nrows=29, index_col=0)
summary = info.loc['!Series_summary'][1]
design = info.loc['!Series_overall_design']
# only take samples:
sample_info = read_csv(filename, sep='\t', skiprows=30, compression='gzip', header=0, index_col=0).T
sample_info.columns = sample_info.columns.to_series().apply(lambda row: row[len("!Sample_"):])
sample_info.columns.name = sample_info.columns.name[len("!Sample_"):]
sample_info = sample_info[['geo_accession', 'characteristics_ch1', 'description']]
sample_info = sample_info.iloc[:, np.r_[0:4, 5:sample_info.shape[1]]]
c = sample_info.columns.to_series()
c[1:4] = ['strain', 'cross', 'developmental_stage']
sample_info.columns = c
# get the labels right:
rep = re.compile('\(.*\)')
def filter_dev_stage(row):
if isnull(row):
row = "2-cell stage embryo"
if row.startswith("developmental stage: "):
row = row[len("developmental stage: "):]
if row == 'adult':
row += " liver"
row = row.replace(' stage ', ' ')
row = rep.sub(' ', row)
row = row.strip(' ')
return row
labels = sample_info.developmental_stage.apply(filter_dev_stage)
# Extract the tar file
filename = os.path.join(dir_path, 'GSE45719_Raw.tar')
with tarfile.open(filename, 'r') as files:
print "Extracting Archive {}...".format(files.name)
data = None
gene_info = None
message = ''
members = files.getmembers()
overall = len(members)
for i, file_info in enumerate(members):
f = files.extractfile(file_info)
inner = read_csv(f, sep='\t', header=0, compression='gzip', index_col=0)
print ' '*(len(message)+1) + '\r',
message = "{: >7.2%}: Extracting: {}".format(float(i+1)/overall, file_info.name[:20]+"...txt.gz")
print message,
if data is None:
data = inner.RPKM.to_frame()
data.columns = [file_info.name[:-18]]
gene_info = inner.Refseq_IDs.to_frame()
gene_info.columns = [file_info.name[:-18]]
else:
data[file_info.name[:-18]] = inner.RPKM
gene_info[file_info.name[:-18]] = inner.Refseq_IDs
# Strip GSM number off data index
rep = re.compile('GSM\d+_')
data.columns = data.columns.to_series().apply(lambda row: row[rep.match(row).end():])
data = data.T
# make sure the same index gets used
sample_info.index = data.index
# get the labels from the description
#rep = re.compile('fibroblast|\d+-cell|embryo|liver|early blastocyst|mid blastocyst|late blastocyst|blastomere|zygote', re.IGNORECASE)
sys.stdout.write(' '*len(message) + '\r')
sys.stdout.flush()
print
print "Read Archive {}".format(files.name)
return data_details_return({'Y': data,
'series_info': info,
'sample_info': sample_info,
'gene_info': gene_info,
'summary': summary,
'design': design,
'genes': data.columns,
'labels': labels,
}, dataset)
def swiss_roll_1000():
return swiss_roll(num_samples=1000)
def swiss_roll(num_samples=3000, data_set='swiss_roll'):
if not data_available(data_set):
download_data(data_set)
mat_data = scipy.io.loadmat(os.path.join(data_path, data_set, 'swiss_roll_data.mat'))
Y = mat_data['X_data'][:, 0:num_samples].transpose()
return data_details_return({'Y': Y, 'X': mat_data['X_data'], 'info': "The first " + str(num_samples) + " points from the swiss roll data of Tennenbaum, de Silva and Langford (2001)."}, data_set)
def isomap_faces(num_samples=698, data_set='isomap_face_data'):
if not data_available(data_set):
download_data(data_set)
mat_data = scipy.io.loadmat(os.path.join(data_path, data_set, 'face_data.mat'))
Y = mat_data['images'][:, 0:num_samples].transpose()
return data_details_return({'Y': Y, 'poses' : mat_data['poses'], 'lights': mat_data['lights'], 'info': "The first " + str(num_samples) + " points from the face data of Tennenbaum, de Silva and Langford (2001)."}, data_set)
def simulation_BGPLVM():
mat_data = scipy.io.loadmat(os.path.join(data_path, 'BGPLVMSimulation.mat'))
Y = np.array(mat_data['Y'], dtype=float)
S = np.array(mat_data['initS'], dtype=float)
mu = np.array(mat_data['initMu'], dtype=float)
#return data_details_return({'S': S, 'Y': Y, 'mu': mu}, data_set)
return {'Y': Y, 'S': S,
'mu' : mu,
'info': "Simulated test dataset generated in MATLAB to compare BGPLVM between python and MATLAB"}
def toy_rbf_1d(seed=default_seed, num_samples=500):
"""
Samples values of a function from an RBF covariance with very small noise for inputs uniformly distributed between -1 and 1.
:param seed: seed to use for random sampling.
:type seed: int
:param num_samples: number of samples to sample in the function (default 500).
:type num_samples: int
"""
np.random.seed(seed=seed)
num_in = 1
X = np.random.uniform(low= -1.0, high=1.0, size=(num_samples, num_in))
X.sort(axis=0)
rbf = GPy.kern.RBF(num_in, variance=1., lengthscale=np.array((0.25,)))
white = GPy.kern.White(num_in, variance=1e-2)
kernel = rbf + white
K = kernel.K(X)
y = np.reshape(np.random.multivariate_normal(np.zeros(num_samples), K), (num_samples, 1))
return {'X':X, 'Y':y, 'info': "Sampled " + str(num_samples) + " values of a function from an RBF covariance with very small noise for inputs uniformly distributed between -1 and 1."}
def toy_rbf_1d_50(seed=default_seed):
np.random.seed(seed=seed)
data = toy_rbf_1d()
indices = np.random.permutation(data['X'].shape[0])
indices = indices[0:50]
indices.sort(axis=0)
X = data['X'][indices, :]
Y = data['Y'][indices, :]
return {'X': X, 'Y': Y, 'info': "Subsamples the toy_rbf_sample with 50 values randomly taken from the original sample.", 'seed' : seed}
def toy_linear_1d_classification(seed=default_seed):
np.random.seed(seed=seed)
x1 = np.random.normal(-3, 5, 20)
x2 = np.random.normal(3, 5, 20)
X = (np.r_[x1, x2])[:, None]
return {'X': X, 'Y': sample_class(2.*X), 'F': 2.*X, 'seed' : seed}
def olivetti_glasses(data_set='olivetti_glasses', num_training=200, seed=default_seed):
path = os.path.join(data_path, data_set)
if not data_available(data_set):
download_data(data_set)
y = np.load(os.path.join(path, 'has_glasses.np'))
y = np.where(y=='y',1,0).reshape(-1,1)
faces = scipy.io.loadmat(os.path.join(path, 'olivettifaces.mat'))['faces'].T
np.random.seed(seed=seed)
index = np.random.permutation(faces.shape[0])
X = faces[index[:num_training],:]
Xtest = faces[index[num_training:],:]
Y = y[index[:num_training],:]
Ytest = y[index[num_training:]]
return data_details_return({'X': X, 'Y': Y, 'Xtest': Xtest, 'Ytest': Ytest, 'seed' : seed, 'info': "ORL Faces with labels identifiying who is wearing glasses and who isn't. Data is randomly partitioned according to given seed. Presence or absence of glasses was labelled by James Hensman."}, 'olivetti_faces')
def olivetti_faces(data_set='olivetti_faces'):
path = os.path.join(data_path, data_set)
if not data_available(data_set):
download_data(data_set)
zip = zipfile.ZipFile(os.path.join(path, 'att_faces.zip'), 'r')
for name in zip.namelist():
zip.extract(name, path)
Y = []
lbls = []
for subject in range(40):
for image in range(10):
image_path = os.path.join(path, 'orl_faces', 's'+str(subject+1), str(image+1) + '.pgm')
from GPy.util import netpbmfile
Y.append(netpbmfile.imread(image_path).flatten())
lbls.append(subject)
Y = np.asarray(Y)
lbls = np.asarray(lbls)[:, None]
return data_details_return({'Y': Y, 'lbls' : lbls, 'info': "ORL Faces processed to 64x64 images."}, data_set)
def xw_pen(data_set='xw_pen'):
if not data_available(data_set):
download_data(data_set)
Y = np.loadtxt(os.path.join(data_path, data_set, 'xw_pen_15.csv'), delimiter=',')
X = np.arange(485)[:, None]
return data_details_return({'Y': Y, 'X': X, 'info': "Tilt data from a personalized digital assistant pen. Plot in original paper showed regression between time steps 175 and 275."}, data_set)
def download_rogers_girolami_data(data_set='rogers_girolami_data'):
if not data_available('rogers_girolami_data'):
download_data(data_set)
path = os.path.join(data_path, data_set)
tar_file = os.path.join(path, 'firstcoursemldata.tar.gz')
tar = tarfile.open(tar_file)
print('Extracting file.')
tar.extractall(path=path)
tar.close()
def olympic_100m_men(data_set='rogers_girolami_data'):
download_rogers_girolami_data()
olympic_data = scipy.io.loadmat(os.path.join(data_path, data_set, 'data', 'olympics.mat'))['male100']
X = olympic_data[:, 0][:, None]
Y = olympic_data[:, 1][:, None]
return data_details_return({'X': X, 'Y': Y, 'info': "Olympic sprint times for 100 m men from 1896 until 2008. Example is from Rogers and Girolami's First Course in Machine Learning."}, data_set)
def olympic_100m_women(data_set='rogers_girolami_data'):
download_rogers_girolami_data()
olympic_data = scipy.io.loadmat(os.path.join(data_path, data_set, 'data', 'olympics.mat'))['female100']
X = olympic_data[:, 0][:, None]
Y = olympic_data[:, 1][:, None]
return data_details_return({'X': X, 'Y': Y, 'info': "Olympic sprint times for 100 m women from 1896 until 2008. Example is from Rogers and Girolami's First Course in Machine Learning."}, data_set)
def olympic_200m_women(data_set='rogers_girolami_data'):
download_rogers_girolami_data()
olympic_data = scipy.io.loadmat(os.path.join(data_path, data_set, 'data', 'olympics.mat'))['female200']
X = olympic_data[:, 0][:, None]
Y = olympic_data[:, 1][:, None]
return data_details_return({'X': X, 'Y': Y, 'info': "Olympic 200 m winning times for women from 1896 until 2008. Data is from Rogers and Girolami's First Course in Machine Learning."}, data_set)
def olympic_200m_men(data_set='rogers_girolami_data'):
download_rogers_girolami_data()
olympic_data = scipy.io.loadmat(os.path.join(data_path, data_set, 'data', 'olympics.mat'))['male200']
X = olympic_data[:, 0][:, None]
Y = olympic_data[:, 1][:, None]
return data_details_return({'X': X, 'Y': Y, 'info': "Male 200 m winning times for women from 1896 until 2008. Data is from Rogers and Girolami's First Course in Machine Learning."}, data_set)
def olympic_400m_women(data_set='rogers_girolami_data'):
download_rogers_girolami_data()
olympic_data = scipy.io.loadmat(os.path.join(data_path, data_set, 'data', 'olympics.mat'))['female400']
X = olympic_data[:, 0][:, None]
Y = olympic_data[:, 1][:, None]
return data_details_return({'X': X, 'Y': Y, 'info': "Olympic 400 m winning times for women until 2008. Data is from Rogers and Girolami's First Course in Machine Learning."}, data_set)
def olympic_400m_men(data_set='rogers_girolami_data'):
download_rogers_girolami_data()
olympic_data = scipy.io.loadmat(os.path.join(data_path, data_set, 'data', 'olympics.mat'))['male400']
X = olympic_data[:, 0][:, None]
Y = olympic_data[:, 1][:, None]
return data_details_return({'X': X, 'Y': Y, 'info': "Male 400 m winning times for women until 2008. Data is from Rogers and Girolami's First Course in Machine Learning."}, data_set)
def olympic_marathon_men(data_set='olympic_marathon_men'):
if not data_available(data_set):
download_data(data_set)
olympics = np.genfromtxt(os.path.join(data_path, data_set, 'olympicMarathonTimes.csv'), delimiter=',')
X = olympics[:, 0:1]
Y = olympics[:, 1:2]
return data_details_return({'X': X, 'Y': Y}, data_set)
def olympic_sprints(data_set='rogers_girolami_data'):
"""All olympics sprint winning times for multiple output prediction."""
X = np.zeros((0, 2))
Y = np.zeros((0, 1))
for i, dataset in enumerate([olympic_100m_men,
olympic_100m_women,
olympic_200m_men,
olympic_200m_women,
olympic_400m_men,
olympic_400m_women]):
data = dataset()
year = data['X']
time = data['Y']
X = np.vstack((X, np.hstack((year, np.ones_like(year)*i))))
Y = np.vstack((Y, time))
data['X'] = X
data['Y'] = Y
data['info'] = "Olympics sprint event winning for men and women to 2008. Data is from Rogers and Girolami's First Course in Machine Learning."
return data_details_return({
'X': X,
'Y': Y,
'info': "Olympics sprint event winning for men and women to 2008. Data is from Rogers and Girolami's First Course in Machine Learning.",
'output_info': {
0:'100m Men',
1:'100m Women',
2:'200m Men',
3:'200m Women',
4:'400m Men',
5:'400m Women'}
}, data_set)
# def movielens_small(partNo=1,seed=default_seed):
# np.random.seed(seed=seed)
# fileName = os.path.join(data_path, 'movielens', 'small', 'u' + str(partNo) + '.base')
# fid = open(fileName)
# uTrain = np.fromfile(fid, sep='\t', dtype=np.int16).reshape((-1, 4))
# fid.close()
# maxVals = np.amax(uTrain, axis=0)
# numUsers = maxVals[0]
# numFilms = maxVals[1]
# numRatings = uTrain.shape[0]
# Y = scipy.sparse.lil_matrix((numFilms, numUsers), dtype=np.int8)
# for i in range(numUsers):
# ind = pb.mlab.find(uTrain[:, 0]==i+1)
# Y[uTrain[ind, 1]-1, i] = uTrain[ind, 2]
# fileName = os.path.join(data_path, 'movielens', 'small', 'u' + str(partNo) + '.test')
# fid = open(fileName)
# uTest = np.fromfile(fid, sep='\t', dtype=np.int16).reshape((-1, 4))
# fid.close()
# numTestRatings = uTest.shape[0]
# Ytest = scipy.sparse.lil_matrix((numFilms, numUsers), dtype=np.int8)
# for i in range(numUsers):
# ind = pb.mlab.find(uTest[:, 0]==i+1)
# Ytest[uTest[ind, 1]-1, i] = uTest[ind, 2]
# lbls = np.empty((1,1))
# lblstest = np.empty((1,1))
# return {'Y':Y, 'lbls':lbls, 'Ytest':Ytest, 'lblstest':lblstest}
def crescent_data(num_data=200, seed=default_seed):
"""
Data set formed from a mixture of four Gaussians. In each class two of the Gaussians are elongated at right angles to each other and offset to form an approximation to the crescent data that is popular in semi-supervised learning as a toy problem.
:param num_data_part: number of data to be sampled (default is 200).
:type num_data: int
:param seed: random seed to be used for data generation.
:type seed: int
"""
np.random.seed(seed=seed)
sqrt2 = np.sqrt(2)
# Rotation matrix
R = np.array([[sqrt2 / 2, -sqrt2 / 2], [sqrt2 / 2, sqrt2 / 2]])
# Scaling matrices
scales = []
scales.append(np.array([[3, 0], [0, 1]]))
scales.append(np.array([[3, 0], [0, 1]]))
scales.append([[1, 0], [0, 3]])
scales.append([[1, 0], [0, 3]])
means = []
means.append(np.array([4, 4]))
means.append(np.array([0, 4]))
means.append(np.array([-4, -4]))
means.append(np.array([0, -4]))
Xparts = []
num_data_part = []
num_data_total = 0
for i in range(0, 4):
num_data_part.append(round(((i + 1) * num_data) / 4.))
num_data_part[i] -= num_data_total
part = np.random.normal(size=(num_data_part[i], 2))
part = np.dot(np.dot(part, scales[i]), R) + means[i]
Xparts.append(part)
num_data_total += num_data_part[i]
X = np.vstack((Xparts[0], Xparts[1], Xparts[2], Xparts[3]))
Y = np.vstack((np.ones((num_data_part[0] + num_data_part[1], 1)), -np.ones((num_data_part[2] + num_data_part[3], 1))))
return {'X':X, 'Y':Y, 'info': "Two separate classes of data formed approximately in the shape of two crescents."}
def creep_data(data_set='creep_rupture'):
"""Brun and Yoshida's metal creep rupture data."""
if not data_available(data_set):
download_data(data_set)
path = os.path.join(data_path, data_set)
tar_file = os.path.join(path, 'creeprupt.tar')
tar = tarfile.open(tar_file)
print('Extracting file.')
tar.extractall(path=path)
tar.close()
all_data = np.loadtxt(os.path.join(data_path, data_set, 'taka'))
y = all_data[:, 1:2].copy()
features = [0]
features.extend(range(2, 31))
X = all_data[:, features].copy()
return data_details_return({'X': X, 'y': y}, data_set)
def cifar10_patches(data_set='cifar-10'):
"""The Candian Institute for Advanced Research 10 image data set. Code for loading in this data is taken from this Boris Babenko's blog post, original code available here: http://bbabenko.tumblr.com/post/86756017649/learning-low-level-vision-feautres-in-10-lines-of-code"""
dir_path = os.path.join(data_path, data_set)
filename = os.path.join(dir_path, 'cifar-10-python.tar.gz')
if not data_available(data_set):
download_data(data_set)
import tarfile
# This code is from Boris Babenko's blog post.
# http://bbabenko.tumblr.com/post/86756017649/learning-low-level-vision-feautres-in-10-lines-of-code
tfile = tarfile.open(filename, 'r:gz')
tfile.extractall(dir_path)
with open(os.path.join(dir_path, 'cifar-10-batches-py','data_batch_1'),'rb') as f:
data = pickle.load(f)
images = data['data'].reshape((-1,3,32,32)).astype('float32')/255
images = np.rollaxis(images, 1, 4)
patches = np.zeros((0,5,5,3))
for x in range(0,32-5,5):
for y in range(0,32-5,5):
patches = np.concatenate((patches, images[:,x:x+5,y:y+5,:]), axis=0)
patches = patches.reshape((patches.shape[0],-1))
return data_details_return({'Y': patches, "info" : "32x32 pixel patches extracted from the CIFAR-10 data by Boris Babenko to demonstrate k-means features."}, data_set)
def cmu_mocap_49_balance(data_set='cmu_mocap'):
"""Load CMU subject 49's one legged balancing motion that was used by Alvarez, Luengo and Lawrence at AISTATS 2009."""
train_motions = ['18', '19']
test_motions = ['20']
data = cmu_mocap('49', train_motions, test_motions, sample_every=4, data_set=data_set)
data['info'] = "One legged balancing motions from CMU data base subject 49. As used in Alvarez, Luengo and Lawrence at AISTATS 2009. It consists of " + data['info']
return data
def cmu_mocap_35_walk_jog(data_set='cmu_mocap'):
"""Load CMU subject 35's walking and jogging motions, the same data that was used by Taylor, Roweis and Hinton at NIPS 2007. but without their preprocessing. Also used by Lawrence at AISTATS 2007."""
train_motions = ['01', '02', '03', '04', '05', '06',
'07', '08', '09', '10', '11', '12',
'13', '14', '15', '16', '17', '19',
'20', '21', '22', '23', '24', '25',
'26', '28', '30', '31', '32', '33', '34']
test_motions = ['18', '29']
data = cmu_mocap('35', train_motions, test_motions, sample_every=4, data_set=data_set)
data['info'] = "Walk and jog data from CMU data base subject 35. As used in Tayor, Roweis and Hinton at NIPS 2007, but without their pre-processing (i.e. as used by Lawrence at AISTATS 2007). It consists of " + data['info']
return data
def cmu_mocap(subject, train_motions, test_motions=[], sample_every=4, data_set='cmu_mocap'):
"""Load a given subject's training and test motions from the CMU motion capture data."""
# Load in subject skeleton.
subject_dir = os.path.join(data_path, data_set)
# Make sure the data is downloaded.
all_motions = train_motions + test_motions
resource = cmu_urls_files(([subject], [all_motions]))
data_resources[data_set] = data_resources['cmu_mocap_full'].copy()
data_resources[data_set]['files'] = resource['files']
data_resources[data_set]['urls'] = resource['urls']
if resource['urls']:
download_data(data_set)
skel = GPy.util.mocap.acclaim_skeleton(os.path.join(subject_dir, subject + '.asf'))
# Set up labels for each sequence
exlbls = np.eye(len(train_motions))
# Load sequences
tot_length = 0
temp_Y = []
temp_lbls = []
for i in range(len(train_motions)):
temp_chan = skel.load_channels(os.path.join(subject_dir, subject + '_' + train_motions[i] + '.amc'))
temp_Y.append(temp_chan[::sample_every, :])
temp_lbls.append(np.tile(exlbls[i, :], (temp_Y[i].shape[0], 1)))
tot_length += temp_Y[i].shape[0]
Y = np.zeros((tot_length, temp_Y[0].shape[1]))
lbls = np.zeros((tot_length, temp_lbls[0].shape[1]))
end_ind = 0
for i in range(len(temp_Y)):
start_ind = end_ind
end_ind += temp_Y[i].shape[0]
Y[start_ind:end_ind, :] = temp_Y[i]
lbls[start_ind:end_ind, :] = temp_lbls[i]
if len(test_motions) > 0:
temp_Ytest = []
temp_lblstest = []
testexlbls = np.eye(len(test_motions))
tot_test_length = 0
for i in range(len(test_motions)):
temp_chan = skel.load_channels(os.path.join(subject_dir, subject + '_' + test_motions[i] + '.amc'))
temp_Ytest.append(temp_chan[::sample_every, :])
temp_lblstest.append(np.tile(testexlbls[i, :], (temp_Ytest[i].shape[0], 1)))
tot_test_length += temp_Ytest[i].shape[0]
# Load test data
Ytest = np.zeros((tot_test_length, temp_Ytest[0].shape[1]))
lblstest = np.zeros((tot_test_length, temp_lblstest[0].shape[1]))
end_ind = 0
for i in range(len(temp_Ytest)):
start_ind = end_ind
end_ind += temp_Ytest[i].shape[0]
Ytest[start_ind:end_ind, :] = temp_Ytest[i]
lblstest[start_ind:end_ind, :] = temp_lblstest[i]
else:
Ytest = None
lblstest = None
info = 'Subject: ' + subject + '. Training motions: '
for motion in train_motions:
info += motion + ', '
info = info[:-2]
if len(test_motions) > 0:
info += '. Test motions: '
for motion in test_motions:
info += motion + ', '
info = info[:-2] + '.'
else:
info += '.'
if sample_every != 1:
info += ' Data is sub-sampled to every ' + str(sample_every) + ' frames.'
return data_details_return({'Y': Y, 'lbls' : lbls, 'Ytest': Ytest, 'lblstest' : lblstest, 'info': info, 'skel': skel}, data_set)
| bsd-3-clause |
jiangwen84/libmesh | doc/statistics/libmesh_svn.py | 7 | 4686 | #!/usr/bin/env python
import matplotlib.pyplot as plt
import numpy as np
# Import stuff for working with dates
from datetime import datetime
from matplotlib.dates import date2num
# After selecting the date range, scroll down to the bottom to see the sums...
# At some point, the names of these data columns changed to:
# .) Read transactions
# .) Write transactions
# .) Write files
# But I think the numbers are still consistent.
# If there were no transactions of any kind for a time period (see Sep. 2013)
# sourceforge won't even generate a plot!
# The first month with statistics is October 2007
# Read Write Total files updated
data = [
'Oct 2007', 174, 93, 633,
'Nov 2007', 5348, 258, 1281,
'Dec 2007', 184, 44, 86,
'Jan 2008', 110, 31, 74,
'Feb 2008', 5297, 88, 240,
'Mar 2008', 248, 43, 102,
'Apr 2008', 219, 53, 637,
'May 2008', 147, 37, 100,
'Jun 2008', 5656, 45, 140,
'Jul 2008', 144, 52, 316,
'Aug 2008', 660, 56, 103,
'Sep 2008', 634, 72, 979,
'Oct 2008', 305, 39, 153,
'Nov 2008', 433, 39, 116,
'Dec 2008', 1, 29, 70,
'Jan 2009', 0, 55, 182,
'Feb 2009', 0, 52, 178,
'Mar 2009', 248, 22, 55,
'Apr 2009', 18652, 32, 79,
'May 2009', 3560, 15, 52,
'Jun 2009', 330, 22, 60,
'Jul 2009', 374, 30, 68,
'Aug 2009', 3587, 6, 13,
'Sep 2009', 3693, 30, 51,
'Oct 2009', 606, 50, 274,
'Nov 2009', 3085, 46, 155,
'Dec 2009', 7438, 27, 39,
'Jan 2010', 293, 37, 58,
'Feb 2010', 6846, 47, 99,
'Mar 2010', 1004, 77, 582,
'Apr 2010', 4048, 35, 51,
'May 2010', 76137, 15, 19,
'Jun 2010', 7109, 50, 142,
'Jul 2010', 5343, 15, 650,
'Aug 2010', 3501, 64, 185,
'Sep 2010', 5614, 58, 129,
'Oct 2010', 8778, 64, 256,
'Nov 2010', 26312, 42, 98,
'Dec 2010', 55776, 27, 79,
'Jan 2011', 1022, 28, 47,
'Feb 2011', 9185, 30, 230,
'Mar 2011', 5403, 105, 410,
'Apr 2011', 38179, 128, 338,
'May 2011', 10012, 71, 464,
'Jun 2011', 3995, 111, 459,
'Jul 2011', 46641, 109, 2585,
'Aug 2011', 71837, 61, 230,
'Sep 2011', 6966, 19, 42,
'Oct 2011', 58461, 36, 110,
'Nov 2011', 39408, 106, 346,
'Dec 2011', 3192, 73, 1217,
'Jan 2012', 17189, 58, 4240,
'Feb 2012', 75335, 180, 656,
'Mar 2012', 25472, 338, 1635,
'Apr 2012', 52424, 146, 1483,
'May 2012', 6936, 50, 477,
'Jun 2012', 82413, 121, 1135,
'Jul 2012', 3722, 185, 982,
'Aug 2012', 9582, 84, 279,
'Sep 2012', 125646, 166, 3130,
'Oct 2012', 4145, 185, 766,
'Nov 2012', 37326, 637, 8690,
'Dec 2012', 18856, 109, 293, # libmesh switched to github Dec 10, 2012
'Jan 2013', 10975, 0, 0,
'Feb 2013', 657, 0, 0,
'Mar 2013', 264, 0, 0,
'Apr 2013', 80, 0, 0,
'May 2013', 68, 0, 0,
'Jun 2013', 34, 0, 0,
'Jul 2013', 6, 0, 0,
'Aug 2013', 2, 0, 0,
'Sep 2013', 0, 0, 0,
'Oct 2013', 0, 0, 0,
'Nov 2013', 0, 0, 0, # SVN repository deleted from sf.net Nov 11, 2013
'Dec 2013', 0, 0, 0,
'Jan 2014', 0, 0, 0,
'Feb 2014', 0, 0, 0,
'Mar 2014', 0, 0, 0,
'Apr 2014', 0, 0, 0, # As of June 1, 2014 the site above no longer exists...
]
# Extract list of date strings
date_strings = data[0::4]
# Convert date strings into numbers
date_nums = []
for d in date_strings:
date_nums.append(date2num(datetime.strptime(d, '%b %Y')))
# Extract the total number of files updated
tot_files = data[3::4]
# Get a reference to the figure
fig = plt.figure()
# 111 is equivalent to Matlab's subplot(1,1,1) command
ax = fig.add_subplot(111)
# Make the bar chart.
ax.bar(date_nums, tot_files, width=30, color='b')
# Create title
fig.suptitle('LibMesh SVN Files Updated/Month')
# Set tick labels at desired locations
xticklabels = ['Jan\n2008', 'Jan\n2009', 'Jan\n2010', 'Jan\n2011', 'Jan\n2012', 'Jan\n2013']
# Get numerical values for the tick labels
tick_nums = []
for x in xticklabels:
tick_nums.append(date2num(datetime.strptime(x, '%b\n%Y')))
ax.set_xticks(tick_nums)
ax.set_xticklabels(xticklabels)
# Make x-axis tick marks point outward
ax.get_xaxis().set_tick_params(direction='out')
# Set the xlimits
plt.xlim(date_nums[0], date_nums[-1]+30);
# Save as PDF
plt.savefig('libmesh_svn.pdf')
# Local Variables:
# python-indent: 2
# End:
| lgpl-2.1 |
kazemakase/scikit-learn | sklearn/qda.py | 140 | 7682 | """
Quadratic Discriminant Analysis
"""
# Author: Matthieu Perrot <[email protected]>
#
# License: BSD 3 clause
import warnings
import numpy as np
from .base import BaseEstimator, ClassifierMixin
from .externals.six.moves import xrange
from .utils import check_array, check_X_y
from .utils.validation import check_is_fitted
from .utils.fixes import bincount
__all__ = ['QDA']
class QDA(BaseEstimator, ClassifierMixin):
"""
Quadratic Discriminant Analysis (QDA)
A classifier with a quadratic decision boundary, generated
by fitting class conditional densities to the data
and using Bayes' rule.
The model fits a Gaussian density to each class.
Read more in the :ref:`User Guide <lda_qda>`.
Parameters
----------
priors : array, optional, shape = [n_classes]
Priors on classes
reg_param : float, optional
Regularizes the covariance estimate as
``(1-reg_param)*Sigma + reg_param*np.eye(n_features)``
Attributes
----------
covariances_ : list of array-like, shape = [n_features, n_features]
Covariance matrices of each class.
means_ : array-like, shape = [n_classes, n_features]
Class means.
priors_ : array-like, shape = [n_classes]
Class priors (sum to 1).
rotations_ : list of arrays
For each class k an array of shape [n_features, n_k], with
``n_k = min(n_features, number of elements in class k)``
It is the rotation of the Gaussian distribution, i.e. its
principal axis.
scalings_ : list of arrays
For each class k an array of shape [n_k]. It contains the scaling
of the Gaussian distributions along its principal axes, i.e. the
variance in the rotated coordinate system.
Examples
--------
>>> from sklearn.qda import QDA
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = QDA()
>>> clf.fit(X, y)
QDA(priors=None, reg_param=0.0)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
sklearn.lda.LDA: Linear discriminant analysis
"""
def __init__(self, priors=None, reg_param=0.):
self.priors = np.asarray(priors) if priors is not None else None
self.reg_param = reg_param
def fit(self, X, y, store_covariances=False, tol=1.0e-4):
"""
Fit the QDA model according to the given training data and parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target values (integers)
store_covariances : boolean
If True the covariance matrices are computed and stored in the
`self.covariances_` attribute.
tol : float, optional, default 1.0e-4
Threshold used for rank estimation.
"""
X, y = check_X_y(X, y)
self.classes_, y = np.unique(y, return_inverse=True)
n_samples, n_features = X.shape
n_classes = len(self.classes_)
if n_classes < 2:
raise ValueError('y has less than 2 classes')
if self.priors is None:
self.priors_ = bincount(y) / float(n_samples)
else:
self.priors_ = self.priors
cov = None
if store_covariances:
cov = []
means = []
scalings = []
rotations = []
for ind in xrange(n_classes):
Xg = X[y == ind, :]
meang = Xg.mean(0)
means.append(meang)
if len(Xg) == 1:
raise ValueError('y has only 1 sample in class %s, covariance '
'is ill defined.' % str(self.classes_[ind]))
Xgc = Xg - meang
# Xgc = U * S * V.T
U, S, Vt = np.linalg.svd(Xgc, full_matrices=False)
rank = np.sum(S > tol)
if rank < n_features:
warnings.warn("Variables are collinear")
S2 = (S ** 2) / (len(Xg) - 1)
S2 = ((1 - self.reg_param) * S2) + self.reg_param
if store_covariances:
# cov = V * (S^2 / (n-1)) * V.T
cov.append(np.dot(S2 * Vt.T, Vt))
scalings.append(S2)
rotations.append(Vt.T)
if store_covariances:
self.covariances_ = cov
self.means_ = np.asarray(means)
self.scalings_ = scalings
self.rotations_ = rotations
return self
def _decision_function(self, X):
check_is_fitted(self, 'classes_')
X = check_array(X)
norm2 = []
for i in range(len(self.classes_)):
R = self.rotations_[i]
S = self.scalings_[i]
Xm = X - self.means_[i]
X2 = np.dot(Xm, R * (S ** (-0.5)))
norm2.append(np.sum(X2 ** 2, 1))
norm2 = np.array(norm2).T # shape = [len(X), n_classes]
u = np.asarray([np.sum(np.log(s)) for s in self.scalings_])
return (-0.5 * (norm2 + u) + np.log(self.priors_))
def decision_function(self, X):
"""Apply decision function to an array of samples.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples (test vectors).
Returns
-------
C : array, shape = [n_samples, n_classes] or [n_samples,]
Decision function values related to each class, per sample.
In the two-class case, the shape is [n_samples,], giving the
log likelihood ratio of the positive class.
"""
dec_func = self._decision_function(X)
# handle special case of two classes
if len(self.classes_) == 2:
return dec_func[:, 1] - dec_func[:, 0]
return dec_func
def predict(self, X):
"""Perform classification on an array of test vectors X.
The predicted class C for each sample in X is returned.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
"""
d = self._decision_function(X)
y_pred = self.classes_.take(d.argmax(1))
return y_pred
def predict_proba(self, X):
"""Return posterior probabilities of classification.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples/test vectors.
Returns
-------
C : array, shape = [n_samples, n_classes]
Posterior probabilities of classification per class.
"""
values = self._decision_function(X)
# compute the likelihood of the underlying gaussian models
# up to a multiplicative constant.
likelihood = np.exp(values - values.max(axis=1)[:, np.newaxis])
# compute posterior probabilities
return likelihood / likelihood.sum(axis=1)[:, np.newaxis]
def predict_log_proba(self, X):
"""Return posterior probabilities of classification.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples/test vectors.
Returns
-------
C : array, shape = [n_samples, n_classes]
Posterior log-probabilities of classification per class.
"""
# XXX : can do better to avoid precision overflows
probas_ = self.predict_proba(X)
return np.log(probas_)
| bsd-3-clause |
barricklab/bpopsim | src/python/muller_plot.py | 2 | 5279 | # Code by Aaron Reba
# 12-09-24
import argparse
import tempfile
import os
import matplotlib.pyplot as plt
import Image
import numpy
def check_adjacent(text_data, x, y):
checking = []
if x != 0:
checking.append((x - 1, y))
if x != text_data.shape[0] - 1:
checking.append((x + 1, y))
if y != 0:
checking.append((x, y - 1))
if y != text_data.shape[1] - 1:
checking.append((x, y + 1))
adjacent = []
for check in checking:
if text_data[x][y] != text_data[check[0]][check[1]]:
adjacent.append(check)
return adjacent
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input', action='store', dest='file_name')
parser.add_argument('-o', '--output', action='store', dest='save_name')
parser.add_argument('-x', '--width', action='store', dest='width_expansion', default=1, type=int)
parser.add_argument('-y', '--height', action='store', dest='height_expansion', default=1, type=int)
results = parser.parse_args()
file_name = results.file_name
save_name = results.save_name
width_expansion = results.width_expansion
height_expansion = results.height_expansion
#make colors
color_list = []
for r in xrange(0, 256, 127):
for b in xrange(0, 256, 127):
for g in xrange(0, 256, 127):
color_list.append((r, g, b))
color_list.remove((0, 0, 0))
color_list.remove((127, 127, 127))
color_list.remove((254, 254, 254))
#temp = color_list[:3]
#color_list = temp
text_data = numpy.genfromtxt(file_name, dtype=None)
width, height = text_data.shape
plot = Image.new('RGB', (height * width_expansion, width * height_expansion))
unique_types = []
unique_locations = {} #indexed as unique
#yields a list of 3 lists:
#list of coords of that type
#list of surrounding coords that are not that type
#list of all coords belonging to this type
#find unique types
for x in xrange(width):
for y in xrange(height):
this_type = text_data[x][y]
if this_type not in unique_types:
unique_types.append(this_type)
unique_locations[this_type] = [[], set(), []]
adjacent = check_adjacent(text_data, x, y)
if adjacent:
unique_locations[this_type][0].append((x, y))
unique_locations[this_type][1].update(adjacent)
unique_locations[this_type][2].append((x, y))
#while all types aren't touching
retry = True
while retry:
color_map = {} #indexed as (x, y), yields rbg
color_index = 0
retry = False
#make color map
for unique in unique_types:
#if color_index loops back around to start_index, all colors
#have been checked and the current configuration is impossible.
start_index = color_index
#looping colors
for i in xrange(len(color_list)):
color = color_list[color_index]
#check to see if this type has any neighbors that are the
#same color
neighbor_position_list = unique_locations[unique][1]
for neighbor_position in neighbor_position_list:
if neighbor_position in color_map:
if color_map[neighbor_position] == color:
#neighbor with same color found,
break
else:
#a clean loop exit.
#no neighboring non-identical positions found that share
#same color. this is a good color choice. go on to the next
#set of uniques
#load colors into color_map
type_position_list = unique_locations[unique][2]
for type_position in type_position_list:
color_map[type_position] = color
break
color_index += 1
if color_index == len(color_list):
color_index = 0
if color_index == start_index:
#all colors have been tried, this combination will not
#work
retry = True
print 'Need more colors.'
return
break
else:
#clean exit. update color.
color_index += 1
if color_index == len(color_list):
color_index = 0
if retry:
break
else:
#a clean loop exit.
#a good combination has been found.
pass
#draw image
for y in xrange(height):
for x in xrange(width):
for h in xrange(height_expansion):
for w in xrange(width_expansion):
plot.putpixel(((height - y - 1) * width_expansion + w, x * height_expansion + h), color_map[(x, y)])
plot = plot.rotate(90)
plot.save(save_name)
main() | gpl-3.0 |
pkathail/magic | python/test/test.py | 1 | 4405 | #!/usr/bin/env python
from __future__ import print_function, division, absolute_import
import matplotlib as mpl
mpl.use("agg")
import magic
import numpy as np
import scprep
try:
import anndata
except (ImportError, SyntaxError):
# anndata not installed
pass
import os
data_path = os.path.join("..", "data", "test_data.csv")
if not os.path.isfile(data_path):
data_path = os.path.join("..", data_path)
scdata = scprep.io.load_csv(data_path, cell_names=False)
scdata = scprep.filter.filter_empty_cells(scdata)
scdata = scprep.filter.filter_empty_genes(scdata)
scdata = scprep.filter.filter_duplicates(scdata)
scdata_norm = scprep.normalize.library_size_normalize(scdata)
scdata_norm = scprep.transform.sqrt(scdata_norm)
def test_genes_str_int():
magic_op = magic.MAGIC(t="auto", decay=20, knn=10, verbose=False)
str_gene_magic = magic_op.fit_transform(scdata_norm, genes=["VIM", "ZEB1"])
int_gene_magic = magic_op.fit_transform(
scdata_norm, graph=magic_op.graph, genes=[-2, -1]
)
assert str_gene_magic.shape[0] == scdata_norm.shape[0]
np.testing.assert_array_equal(str_gene_magic, int_gene_magic)
def test_pca_only():
magic_op = magic.MAGIC(t="auto", decay=20, knn=10, verbose=False)
pca_magic = magic_op.fit_transform(scdata_norm, genes="pca_only")
assert pca_magic.shape[0] == scdata_norm.shape[0]
assert pca_magic.shape[1] == magic_op.n_pca
def test_all_genes():
magic_op = magic.MAGIC(t="auto", decay=20, knn=10, verbose=False, random_state=42)
int_gene_magic = magic_op.fit_transform(scdata_norm, genes=[-2, -1])
magic_all_genes = magic_op.fit_transform(scdata_norm, genes="all_genes")
assert scdata_norm.shape == magic_all_genes.shape
int_gene_magic2 = magic_op.transform(scdata_norm, genes=[-2, -1])
np.testing.assert_allclose(int_gene_magic, int_gene_magic2, rtol=0.015)
def test_all_genes_approx():
magic_op = magic.MAGIC(
t="auto", decay=20, knn=10, verbose=False, solver="approximate", random_state=42
)
int_gene_magic = magic_op.fit_transform(scdata_norm, genes=[-2, -1])
magic_all_genes = magic_op.fit_transform(scdata_norm, genes="all_genes")
assert scdata_norm.shape == magic_all_genes.shape
int_gene_magic2 = magic_op.transform(scdata_norm, genes=[-2, -1])
np.testing.assert_allclose(int_gene_magic, int_gene_magic2, atol=0.003, rtol=0.008)
def test_dremi():
magic_op = magic.MAGIC(t="auto", decay=20, knn=10, verbose=False)
# test DREMI: need numerical precision here
magic_op.set_params(random_state=42)
magic_op.fit(scdata_norm)
dremi = magic_op.knnDREMI("VIM", "ZEB1", plot=True)
np.testing.assert_allclose(dremi, 1.466004, atol=0.0000005)
def test_solver():
# Testing exact vs approximate solver
magic_op = magic.MAGIC(
t="auto", decay=20, knn=10, solver="exact", verbose=False, random_state=42
)
data_imputed_exact = magic_op.fit_transform(scdata_norm)
# should have exactly as many genes stored
assert magic_op.X_magic.shape[1] == scdata_norm.shape[1]
# should be nonzero
assert np.all(data_imputed_exact >= 0)
magic_op = magic.MAGIC(
t="auto",
decay=20,
knn=10,
n_pca=150,
solver="approximate",
verbose=False,
random_state=42,
)
# magic_op.set_params(solver='approximate')
data_imputed_apprx = magic_op.fit_transform(scdata_norm)
# should have n_pca genes stored
assert magic_op.X_magic.shape[1] == 150
# make sure they're close-ish
np.testing.assert_allclose(data_imputed_apprx, data_imputed_exact, atol=0.15)
# make sure they're not identical
assert np.any(data_imputed_apprx != data_imputed_exact)
def test_anndata():
try:
anndata
except NameError:
# anndata not installed
return
scdata = anndata.read_csv(data_path)
fast_magic_operator = magic.MAGIC(
t="auto", solver="approximate", decay=None, knn=10, verbose=False
)
sc_magic = fast_magic_operator.fit_transform(scdata, genes="all_genes")
assert np.all(sc_magic.var_names == scdata.var_names)
assert np.all(sc_magic.obs_names == scdata.obs_names)
sc_magic = fast_magic_operator.fit_transform(scdata, genes=["VIM", "ZEB1"])
assert np.all(sc_magic.var_names.values == np.array(["VIM", "ZEB1"]))
assert np.all(sc_magic.obs_names == scdata.obs_names)
| gpl-2.0 |
lbishal/scikit-learn | examples/cluster/plot_agglomerative_clustering.py | 343 | 2931 | """
Agglomerative clustering with and without structure
===================================================
This example shows the effect of imposing a connectivity graph to capture
local structure in the data. The graph is simply the graph of 20 nearest
neighbors.
Two consequences of imposing a connectivity can be seen. First clustering
with a connectivity matrix is much faster.
Second, when using a connectivity matrix, average and complete linkage are
unstable and tend to create a few clusters that grow very quickly. Indeed,
average and complete linkage fight this percolation behavior by considering all
the distances between two clusters when merging them. The connectivity
graph breaks this mechanism. This effect is more pronounced for very
sparse graphs (try decreasing the number of neighbors in
kneighbors_graph) and with complete linkage. In particular, having a very
small number of neighbors in the graph, imposes a geometry that is
close to that of single linkage, which is well known to have this
percolation instability.
"""
# Authors: Gael Varoquaux, Nelle Varoquaux
# License: BSD 3 clause
import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import AgglomerativeClustering
from sklearn.neighbors import kneighbors_graph
# Generate sample data
n_samples = 1500
np.random.seed(0)
t = 1.5 * np.pi * (1 + 3 * np.random.rand(1, n_samples))
x = t * np.cos(t)
y = t * np.sin(t)
X = np.concatenate((x, y))
X += .7 * np.random.randn(2, n_samples)
X = X.T
# Create a graph capturing local connectivity. Larger number of neighbors
# will give more homogeneous clusters to the cost of computation
# time. A very large number of neighbors gives more evenly distributed
# cluster sizes, but may not impose the local manifold structure of
# the data
knn_graph = kneighbors_graph(X, 30, include_self=False)
for connectivity in (None, knn_graph):
for n_clusters in (30, 3):
plt.figure(figsize=(10, 4))
for index, linkage in enumerate(('average', 'complete', 'ward')):
plt.subplot(1, 3, index + 1)
model = AgglomerativeClustering(linkage=linkage,
connectivity=connectivity,
n_clusters=n_clusters)
t0 = time.time()
model.fit(X)
elapsed_time = time.time() - t0
plt.scatter(X[:, 0], X[:, 1], c=model.labels_,
cmap=plt.cm.spectral)
plt.title('linkage=%s (time %.2fs)' % (linkage, elapsed_time),
fontdict=dict(verticalalignment='top'))
plt.axis('equal')
plt.axis('off')
plt.subplots_adjust(bottom=0, top=.89, wspace=0,
left=0, right=1)
plt.suptitle('n_cluster=%i, connectivity=%r' %
(n_clusters, connectivity is not None), size=17)
plt.show()
| bsd-3-clause |
bigdataelephants/scikit-learn | sklearn/utils/tests/test_murmurhash.py | 261 | 2836 | # Author: Olivier Grisel <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from sklearn.externals.six import b, u
from sklearn.utils.murmurhash import murmurhash3_32
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from nose.tools import assert_equal, assert_true
def test_mmhash3_int():
assert_equal(murmurhash3_32(3), 847579505)
assert_equal(murmurhash3_32(3, seed=0), 847579505)
assert_equal(murmurhash3_32(3, seed=42), -1823081949)
assert_equal(murmurhash3_32(3, positive=False), 847579505)
assert_equal(murmurhash3_32(3, seed=0, positive=False), 847579505)
assert_equal(murmurhash3_32(3, seed=42, positive=False), -1823081949)
assert_equal(murmurhash3_32(3, positive=True), 847579505)
assert_equal(murmurhash3_32(3, seed=0, positive=True), 847579505)
assert_equal(murmurhash3_32(3, seed=42, positive=True), 2471885347)
def test_mmhash3_int_array():
rng = np.random.RandomState(42)
keys = rng.randint(-5342534, 345345, size=3 * 2 * 1).astype(np.int32)
keys = keys.reshape((3, 2, 1))
for seed in [0, 42]:
expected = np.array([murmurhash3_32(int(k), seed)
for k in keys.flat])
expected = expected.reshape(keys.shape)
assert_array_equal(murmurhash3_32(keys, seed), expected)
for seed in [0, 42]:
expected = np.array([murmurhash3_32(k, seed, positive=True)
for k in keys.flat])
expected = expected.reshape(keys.shape)
assert_array_equal(murmurhash3_32(keys, seed, positive=True),
expected)
def test_mmhash3_bytes():
assert_equal(murmurhash3_32(b('foo'), 0), -156908512)
assert_equal(murmurhash3_32(b('foo'), 42), -1322301282)
assert_equal(murmurhash3_32(b('foo'), 0, positive=True), 4138058784)
assert_equal(murmurhash3_32(b('foo'), 42, positive=True), 2972666014)
def test_mmhash3_unicode():
assert_equal(murmurhash3_32(u('foo'), 0), -156908512)
assert_equal(murmurhash3_32(u('foo'), 42), -1322301282)
assert_equal(murmurhash3_32(u('foo'), 0, positive=True), 4138058784)
assert_equal(murmurhash3_32(u('foo'), 42, positive=True), 2972666014)
def test_no_collision_on_byte_range():
previous_hashes = set()
for i in range(100):
h = murmurhash3_32(' ' * i, 0)
assert_true(h not in previous_hashes,
"Found collision on growing empty string")
def test_uniform_distribution():
n_bins, n_samples = 10, 100000
bins = np.zeros(n_bins, dtype=np.float)
for i in range(n_samples):
bins[murmurhash3_32(i, positive=True) % n_bins] += 1
means = bins / n_samples
expected = np.ones(n_bins) / n_bins
assert_array_almost_equal(means / expected, np.ones(n_bins), 2)
| bsd-3-clause |
jcmcclurg/serverpower | unused/sleepdelay/powerGraph.py | 2 | 2105 | from pylab import *
import matplotlib.pyplot as plt
import matplotlib.animation as anim
import numpy as np
import sys
#import thread
class ydata(object):
def __init__(self, y):
self.y = y
self.lims = (np.min(y),np.max(y))
def add(self, newy):
self.y = roll(self.y,-1)
self.y[-1] = newy
self.lims = (np.min([self.lims[0], newy]),np.max([self.lims[1], newy]))
ion()
n = 100
x = transpose(array([linspace(0,1,n)]))
y = zeros((n,1))
why = ydata(y)
#q = Queue.Queue()
close()
fig = figure()
line, = plot(x,y,'.-')
def update_line(i,ydat,line):
if i:
line.set_ydata(ydat.y)
ylim(ydat.lims)
return line,
def data_gen( ):
try:
a = " "
while a:
a = raw_input()
print a
try:
why.add(float(a))
yield True
except ValueError:
yield False
except EOFError:
print "input is done"
pass
except KeyboardInterrupt:
pass
except GeneratorExit:
pass
"""
i = False
while not q.empty():
try:
why.add(q.get_nowait())
i = True
except Queue.Empty:
break
yield i
"""
an = anim.FuncAnimation(fig, func=update_line, frames=data_gen, fargs=(why, line),
interval=1, blit=True, repeat=False)
plt.show(block=True)
#show()
print "done."
"""
def thrd(q):
try:
while True:
a = float(raw_input())
print a
q.put(a)
except:
print "except"
#pass
thread.start_new_thread(thrd, (q,))
# If there's input ready, do something, else do something
# else. Note timeout is zero so select won't block at all.
while sys.stdin in select.select([sys.stdin], [], [], 0)[0]:
l = sys.stdin.readline()
if l:
eye.i = float(l)
else: # an empty line means stdin has been closed
eye.i = -1
else:
i = 0
"""
#while True:
# time.sleep(0.1)
| gpl-2.0 |
pgleeson/TestArea | models/Parallel/pythonScripts/perf.py | 1 | 1733 | import matplotlib.pyplot as plt
print "Going to plot performance of simulations on Legion"
times_l = {}
times_l[1] = 118.76
times_l[2] = 41.56
times_l[4] = 19.19
times_l[8] = 9.57
times_l[16] = 5.23
times_l[24] = 4.08
times_l[32] = 3.13
times_l[40] = 3.19
times_l[48] = 2.72
times_l[64] = 2.16
times_s = {}
times_s[1] = 91.02
times_s[4] = 20.64
times_s[8] = 10.46
times_s[12] = 7.26
times_s[16] = 5.8
times_b = {}
times_b[1] = 187.27
times_b[2] = 85.88
times_b[3] = 40.56
times_b[4] = 29.8
times_e = {}
times_e[1] = 74.9
times_e[2] = 47.63
times_e[3] = 36.61
times_e[4] = 33.93
def getXvals(times):
x = times.keys()
x.sort()
return x
def getYvals(times):
x = times.keys()
x.sort()
y = []
for t in x:
y.append(times[t])
return y
times_s_t = {}
for i in times_s.keys():
times_s_t[i] = times_s[1]/i
times_l_t = {}
for i in times_l.keys():
times_l_t[i] = times_l[1]/i
times_b_t = {}
for i in times_b.keys():
times_b_t[i] = times_b[1]/i
times_e_t = {}
for i in times_e.keys():
times_e_t[i] = times_e[1]/i
lines = plt.loglog(getXvals(times_l_t), getYvals(times_l_t), 'r:', \
getXvals(times_l), getYvals(times_l), 'ro-', \
getXvals(times_s_t), getYvals(times_s_t), 'g:', \
getXvals(times_s), getYvals(times_s), 'go-', \
getXvals(times_b_t), getYvals(times_b_t), 'b:', \
getXvals(times_b), getYvals(times_b), 'bo-', \
getXvals(times_e_t), getYvals(times_e_t), 'k:', \
getXvals(times_e), getYvals(times_e), 'ko-')
plt.ylabel('Simulation time')
plt.xlabel('Number of processors')
#plt.axis([-3, 36, -10, 200])
print lines[0]
lines[0].set_label('Legion')
c = plt
print c.__class__
print c
print dir(c)
print plt.xticks()
plt.show()
| gpl-2.0 |
ishank08/scikit-learn | examples/plot_digits_pipe.py | 65 | 1652 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Pipelining: chaining a PCA and a logistic regression
=========================================================
The PCA does an unsupervised dimensionality reduction, while the logistic
regression does the prediction.
We use a GridSearchCV to set the dimensionality of the PCA
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, decomposition, datasets
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
logistic = linear_model.LogisticRegression()
pca = decomposition.PCA()
pipe = Pipeline(steps=[('pca', pca), ('logistic', logistic)])
digits = datasets.load_digits()
X_digits = digits.data
y_digits = digits.target
# Plot the PCA spectrum
pca.fit(X_digits)
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.axes([.2, .2, .7, .7])
plt.plot(pca.explained_variance_, linewidth=2)
plt.axis('tight')
plt.xlabel('n_components')
plt.ylabel('explained_variance_')
# Prediction
n_components = [20, 40, 64]
Cs = np.logspace(-4, 4, 3)
# Parameters of pipelines can be set using ‘__’ separated parameter names:
estimator = GridSearchCV(pipe,
dict(pca__n_components=n_components,
logistic__C=Cs))
estimator.fit(X_digits, y_digits)
plt.axvline(estimator.best_estimator_.named_steps['pca'].n_components,
linestyle=':', label='n_components chosen')
plt.legend(prop=dict(size=12))
plt.show()
| bsd-3-clause |
JungeAlexander/cocoscore | tests/tagger/test_co_occurrence_score.py | 1 | 54674 | import numpy
import pandas
from pandas.util.testing import assert_frame_equal
from pytest import approx
from pytest import raises
import cocoscore.tagger.co_occurrence_score as co_occurrence_score
import cocoscore.tools.data_tools as dt
from cocoscore.ml.distance_scores import polynomial_decay_distance
from cocoscore.ml.fasttext_helpers import fasttext_fit_predict_default
def fasttext_function(train, valid, epochs, dim, bucket):
return fasttext_fit_predict_default(train, valid,
epochs=epochs,
dim=dim,
bucket=bucket)
class TestClass(object):
matches_file_path = 'tests/tagger/matches_file.tsv'
matches_file_same_type_path = 'tests/tagger/matches_file_same_type.tsv'
matches_document_level_comentions_file_path = 'tests/tagger/matches_file_document_level_comentions.tsv'
matches_file_single_matches_path = 'tests/tagger/matches_file_single_matches.tsv'
matches_file_cross_path = 'tests/tagger/matches_file_cross.tsv'
matches_file_cross_fantasy_types_path = 'tests/tagger/matches_file_cross_fantasy_types.tsv'
sentence_score_file_path = 'tests/tagger/sentence_scores_file.tsv'
paragraph_score_file_path = 'tests/tagger/paragraph_scores_file.tsv'
document_score_file_path = 'tests/tagger/document_scores_file.tsv'
paragraph_sentence_score_file_path = 'tests/tagger/paragraph_sentence_scores_file.tsv'
document_paragraph_sentence_score_file_path = 'tests/tagger/document_paragraph_sentence_scores_file.tsv'
document_paragraph_score_file_path = 'tests/tagger/document_paragraph_scores_file.tsv'
precedence_document_paragraph_sentence_score_file_path = \
'tests/tagger/precedence_document_paragraph_sentence_scores_file.tsv'
entity_file_path = 'tests/tagger/entities2.tsv.gz'
entity_fantasy_types_file_path = 'tests/tagger/entities2_fantasy_types.tsv.gz'
entity_file_same_type_path = 'tests/tagger/entities2_same_type.tsv.gz'
cos_cv_test_path = 'tests/ml/cos_simple_cv.txt'
def test_load_sentence_scores(self):
sentence_scores = co_occurrence_score.load_score_file(self.sentence_score_file_path)
assert {('--D', 'A'): {(1111, 1, 2): 0.9, (1111, 2, 3): 0.5,
(3333, 2, 2): 0.4, (3333, 2, 3): 0.44},
('B', 'C'): {(2222, 1, 1): 0}} == sentence_scores
def test_load_sentence_scores_score_cutoff(self):
sentence_scores = co_occurrence_score.load_score_file(self.sentence_score_file_path,
cutoff=0.5)
assert {('--D', 'A'): {(1111, 1, 2): 0.9, (1111, 2, 3): 0.5}} == sentence_scores
def test_load_paragraph_scores(self):
paragraph_scores = co_occurrence_score.load_score_file(self.paragraph_score_file_path)
assert {('--D', 'A'): {(1111, 1): 0.9, (1111, 2): 0.5,
(3333, 2): 0.4},
('B', 'C'): {(2222, 1): 0}} == paragraph_scores
def test_load_document_scores(self):
document_scores = co_occurrence_score.load_score_file(self.document_score_file_path)
assert {('--D', 'A'): {1111: 1,
3333: 2},
('B', 'C'): {2222: 3}} == document_scores
def test_weighted_counts_sentences(self):
sentence_scores = co_occurrence_score.load_score_file(self.sentence_score_file_path)
weighted_counts = co_occurrence_score.get_weighted_counts(None, sentence_scores, None, None, None,
first_type=9606, second_type=-26,
document_weight=15.0, paragraph_weight=0,
sentence_weight=1.0)
assert {('--D', 'A'): 15.9 + 15.44,
('B', 'C'): 15,
'A': 15.9 + 15.44,
'--D': 15.9 + 15.44,
'B': 15,
'C': 15,
None: 15.9 + 15.44 + 15} == approx(weighted_counts)
def test_weighted_counts_sentences_paragraphs(self):
scores = co_occurrence_score.load_score_file(self.paragraph_sentence_score_file_path)
sentence_scores, paragraph_scores, _ = co_occurrence_score.split_scores(scores)
weighted_counts = co_occurrence_score.get_weighted_counts(None, sentence_scores, paragraph_scores, None, None,
first_type=9606, second_type=-26,
document_weight=15.0, paragraph_weight=1.0,
sentence_weight=1.0)
assert {('--D', 'A'): 15.9 + 0.9 + 15.44 + 0.4,
('B', 'C'): 15,
'A': 15.9 + 0.9 + 15.44 + 0.4,
'--D': 15.9 + 0.9 + 15.44 + 0.4,
'B': 15,
'C': 15,
None: 15.9 + 0.9 + 15.44 + 0.4 + 15} == approx(weighted_counts)
def test_weighted_counts_paragraphs(self):
paragraph_scores = co_occurrence_score.load_score_file(self.paragraph_score_file_path)
weighted_counts = co_occurrence_score.get_weighted_counts(None, None, paragraph_scores, None, None,
first_type=9606, second_type=-26,
document_weight=15.0, paragraph_weight=1.0,
sentence_weight=1.0)
assert {('--D', 'A'): 15.0 + 0.9 + 15.0 + 0.4,
('B', 'C'): 15.0,
'A': 15.0 + 0.9 + 15.0 + 0.4,
'--D': 15.0 + 0.9 + 15.0 + 0.4,
'B': 15.0,
'C': 15.0,
None: 15.0 + 0.9 + 15.0 + 0.4 + 15.0} == approx(weighted_counts)
def test_weighted_counts_sentences_paragraphs_documents(self):
scores = co_occurrence_score.load_score_file(self.document_paragraph_sentence_score_file_path)
sentence_scores, paragraph_scores, document_scores = co_occurrence_score.split_scores(scores)
weighted_counts = co_occurrence_score.get_weighted_counts(None, sentence_scores, paragraph_scores,
document_scores, None,
first_type=9606, second_type=-26,
document_weight=2.0, paragraph_weight=1.0,
sentence_weight=1.0)
assert {('--D', 'A'): 0.9 + 0.9 + 1 * 2 + 0.44 + 0.4 + 2 * 2,
('B', 'C'): 3 * 2,
'A': 0.9 + 0.9 + 1 * 2 + 0.44 + 0.4 + 2 * 2,
'--D': 0.9 + 0.9 + 1 * 2 + 0.44 + 0.4 + 2 * 2,
'B': 3 * 2,
'C': 3 * 2,
None: 0.9 + 0.9 + 1 * 2 + 0.44 + 0.4 + 2 * 2 + 3 * 2} == weighted_counts
def test_weighted_counts_documents(self):
document_scores = co_occurrence_score.load_score_file(self.document_score_file_path)
weighted_counts = co_occurrence_score.get_weighted_counts(None, None, None,
document_scores, None,
first_type=9606, second_type=-26,
document_weight=2.0, paragraph_weight=1.0,
sentence_weight=2.0)
assert {('--D', 'A'): 1 * 2 + 2 * 2,
('B', 'C'): 3 * 2,
'A': 1 * 2 + 2 * 2,
'--D': 1 * 2 + 2 * 2,
'B': 3 * 2,
'C': 3 * 2,
None: 1 * 2 + 2 * 2 + 3 * 2} == weighted_counts
def test_weighted_counts_paragraphs_documents(self):
paragraph_scores = co_occurrence_score.load_score_file(self.paragraph_score_file_path, )
document_scores = co_occurrence_score.load_score_file(self.document_score_file_path)
weighted_counts = co_occurrence_score.get_weighted_counts(None, None, paragraph_scores,
document_scores, None,
first_type=9606, second_type=-26,
document_weight=2.0, paragraph_weight=1.0,
sentence_weight=1.0)
assert {('--D', 'A'): 0.9 + 1 * 2. + 0.4 + 2 * 2.,
('B', 'C'): 3 * 2.,
'A': 0.9 + 1 * 2. + 0.4 + 2 * 2.,
'--D': 0.9 + 1 * 2. + 0.4 + 2 * 2.,
'B': 3 * 2.,
'C': 3 * 2.,
None: 0.9 + 1 * 2. + 0.4 + 2 * 2. + 3 * 2.} == approx(weighted_counts)
def test_co_occurrence_score_sentences(self):
sentence_scores = co_occurrence_score.load_score_file(self.sentence_score_file_path)
document_weight = 15.0
paragraph_weight = 0
weighting_exponent = 0.6
counts = co_occurrence_score.get_weighted_counts(None, sentence_scores, None, None, None,
first_type=9606, second_type=-26,
document_weight=document_weight,
paragraph_weight=paragraph_weight,
sentence_weight=1.0)
scores = co_occurrence_score.co_occurrence_score(None, self.sentence_score_file_path, None,
first_type=9606, second_type=-26,
document_weight=document_weight,
paragraph_weight=paragraph_weight,
weighting_exponent=weighting_exponent)
c_a_d = counts[('--D', 'A')]
c_a = counts['A']
c_d = counts['--D']
c_all = counts[None]
s_a_d = c_a_d ** weighting_exponent * ((c_a_d * c_all) / (c_a * c_d)) ** (1 - weighting_exponent)
c_b_c = counts[('B', 'C')]
c_b = counts['B']
c_c = counts['C']
s_b_c = c_b_c ** weighting_exponent * ((c_b_c * c_all) / (c_b * c_c)) ** (1 - weighting_exponent)
assert s_a_d == approx(scores[('--D', 'A')])
assert s_b_c == approx(scores[('B', 'C')])
def test_co_occurrence_score_sentences_paragraphs(self):
scores = co_occurrence_score.load_score_file(self.paragraph_sentence_score_file_path)
sentence_scores, paragraph_scores, _ = co_occurrence_score.split_scores(scores)
document_weight = 15.0
paragraph_weight = 1.0
weighting_exponent = 0.6
counts = co_occurrence_score.get_weighted_counts(None, sentence_scores, paragraph_scores, None, None,
first_type=9606, second_type=-26,
document_weight=document_weight,
paragraph_weight=paragraph_weight,
sentence_weight=1.0)
scores = co_occurrence_score.co_occurrence_score(None, self.paragraph_sentence_score_file_path, None,
first_type=9606, second_type=-26,
document_weight=document_weight,
paragraph_weight=paragraph_weight,
weighting_exponent=weighting_exponent)
c_a_d = counts[('--D', 'A')]
c_a = counts['A']
c_d = counts['--D']
c_all = counts[None]
s_a_d = c_a_d ** weighting_exponent * ((c_a_d * c_all) / (c_a * c_d)) ** (1 - weighting_exponent)
c_b_c = counts[('B', 'C')]
c_b = counts['B']
c_c = counts['C']
s_b_c = c_b_c ** weighting_exponent * ((c_b_c * c_all) / (c_b * c_c)) ** (1 - weighting_exponent)
assert s_a_d == approx(scores[('--D', 'A')])
assert s_b_c == approx(scores[('B', 'C')])
def test_co_occurrence_score_sentences_documents(self):
scores = co_occurrence_score.load_score_file(self.document_paragraph_sentence_score_file_path)
sentence_scores, paragraph_scores, document_scores = co_occurrence_score.split_scores(scores)
document_weight = 15.0
paragraph_weight = 1.0
weighting_exponent = 0.6
counts = co_occurrence_score.get_weighted_counts(None, sentence_scores, paragraph_scores, document_scores, None,
first_type=9606, second_type=-26,
document_weight=document_weight,
paragraph_weight=paragraph_weight,
sentence_weight=1.0)
scores = co_occurrence_score.co_occurrence_score(None, self.document_paragraph_sentence_score_file_path, None,
first_type=9606, second_type=-26,
document_weight=document_weight,
paragraph_weight=paragraph_weight,
weighting_exponent=weighting_exponent)
c_a_d = counts[('--D', 'A')]
c_a = counts['A']
c_d = counts['--D']
c_all = counts[None]
s_a_d = c_a_d ** weighting_exponent * ((c_a_d * c_all) / (c_a * c_d)) ** (1 - weighting_exponent)
c_b_c = counts[('B', 'C')]
c_b = counts['B']
c_c = counts['C']
s_b_c = c_b_c ** weighting_exponent * ((c_b_c * c_all) / (c_b * c_c)) ** (1 - weighting_exponent)
assert s_a_d == approx(scores[('--D', 'A')])
assert s_b_c == approx(scores[('B', 'C')])
def test_co_occurrence_score_precedence_sentences_paragraphs_documents(self):
scores = co_occurrence_score.load_score_file(self.precedence_document_paragraph_sentence_score_file_path)
sentence_scores, paragraph_scores, document_scores = co_occurrence_score.split_scores(scores)
document_weight = 2.0
paragraph_weight = 1.0
sentence_weight = 1.0
weighted_counts = co_occurrence_score.get_weighted_counts(None, sentence_scores, paragraph_scores,
document_scores, None,
first_type=9606, second_type=-26,
document_weight=document_weight,
paragraph_weight=paragraph_weight,
sentence_weight=sentence_weight,
ignore_scores=True)
weight_sum = document_weight + paragraph_weight + sentence_weight
assert {('B', 'C'): weight_sum,
'B': weight_sum,
'C': weight_sum,
None: weight_sum} == weighted_counts
def test_weighted_counts_sentences_only_diseases(self):
sentence_scores = co_occurrence_score.load_score_file(self.sentence_score_file_path)
weighted_counts = co_occurrence_score.get_weighted_counts(None, sentence_scores, None, None, None,
first_type=9606, second_type=-26,
document_weight=15.0, paragraph_weight=0,
sentence_weight=1.0,
ignore_scores=True)
assert {('--D', 'A'): 32,
('B', 'C'): 16,
'A': 32,
'--D': 32,
'B': 16,
'C': 16,
None: 48} == weighted_counts
def test_co_occurrence_score_sentences_only_diseases(self):
scores = co_occurrence_score.load_score_file(self.sentence_score_file_path)
sentence_scores, _, _ = co_occurrence_score.split_scores(scores)
document_weight = 15.0
paragraph_weight = 0
weighting_exponent = 0.6
counts = co_occurrence_score.get_weighted_counts(None, sentence_scores, None, None, None,
first_type=9606, second_type=-26,
document_weight=document_weight,
paragraph_weight=paragraph_weight,
sentence_weight=1.0,
ignore_scores=True)
scores = co_occurrence_score.co_occurrence_score(None, self.sentence_score_file_path, None,
first_type=9606, second_type=-26,
document_weight=document_weight,
paragraph_weight=paragraph_weight,
weighting_exponent=weighting_exponent,
ignore_scores=True)
c_a_d = counts[('--D', 'A')]
c_a = counts['A']
c_d = counts['--D']
c_all = counts[None]
s_a_d = c_a_d ** weighting_exponent * ((c_a_d * c_all) / (c_a * c_d)) ** (1 - weighting_exponent)
c_b_c = counts[('B', 'C')]
c_b = counts['B']
c_c = counts['C']
s_b_c = c_b_c ** weighting_exponent * ((c_b_c * c_all) / (c_b * c_c)) ** (1 - weighting_exponent)
assert s_a_d == approx(scores[('--D', 'A')])
assert s_b_c == approx(scores[('B', 'C')])
def test_weighted_counts_matches_file(self):
sentence_scores = co_occurrence_score.load_score_file(self.sentence_score_file_path)
weighted_counts = co_occurrence_score.get_weighted_counts(self.matches_file_path, sentence_scores, None, None,
self.entity_file_path,
first_type=9606, second_type=-26,
document_weight=15.0, paragraph_weight=0,
sentence_weight=1.0)
assert 15.9 + 15.44 + 15. == approx(weighted_counts[None]) # needed due to floating point strangeness
del weighted_counts[None]
assert {('--D', 'A'): 15.9 + 15.44,
('B', 'C'): 15.,
'A': 15.9 + 15.44,
'--D': 15.9 + 15.44,
'B': 15.,
'C': 15.} == weighted_counts
def test_co_occurrence_score_matches_file(self):
scores = co_occurrence_score.load_score_file(self.sentence_score_file_path)
sentence_scores, _, _ = co_occurrence_score.split_scores(scores)
document_weight = 15.0
paragraph_weight = 0
weighting_exponent = 0.6
counts = co_occurrence_score.get_weighted_counts(self.matches_file_path, sentence_scores, None, None,
self.entity_file_path,
first_type=9606, second_type=-26,
document_weight=document_weight,
paragraph_weight=paragraph_weight,
sentence_weight=1.0)
scores = co_occurrence_score.co_occurrence_score(self.matches_file_path, self.sentence_score_file_path,
self.entity_file_path,
first_type=9606, second_type=-26,
document_weight=document_weight,
paragraph_weight=paragraph_weight,
weighting_exponent=weighting_exponent)
c_a_d = counts[('--D', 'A')]
c_a = counts['A']
c_d = counts['--D']
c_all = counts[None]
s_a_d = c_a_d ** weighting_exponent * ((c_a_d * c_all) / (c_a * c_d)) ** (1 - weighting_exponent)
c_b_c = counts[('B', 'C')]
c_b = counts['B']
c_c = counts['C']
s_b_c = c_b_c ** weighting_exponent * ((c_b_c * c_all) / (c_b * c_c)) ** (1 - weighting_exponent)
assert s_a_d == approx(scores[('--D', 'A')])
assert s_b_c == approx(scores[('B', 'C')])
def test_co_occurrence_score_matches_file_same_type(self):
scores = co_occurrence_score.load_score_file(self.sentence_score_file_path)
sentence_scores, _, _ = co_occurrence_score.split_scores(scores)
document_weight = 15.0
paragraph_weight = 0
weighting_exponent = 0.6
counts = co_occurrence_score.get_weighted_counts(self.matches_file_same_type_path, sentence_scores, None, None,
self.entity_file_same_type_path,
first_type=2, second_type=2,
document_weight=document_weight,
paragraph_weight=paragraph_weight,
sentence_weight=1.0)
scores = co_occurrence_score.co_occurrence_score(self.matches_file_same_type_path,
self.sentence_score_file_path,
self.entity_file_same_type_path,
first_type=2, second_type=2,
document_weight=document_weight,
paragraph_weight=paragraph_weight,
weighting_exponent=weighting_exponent)
c_a_d = counts[('--D', 'A')]
c_a = counts['A']
c_d = counts['--D']
c_all = counts[None]
s_a_d = c_a_d ** weighting_exponent * ((c_a_d * c_all) / (c_a * c_d)) ** (1 - weighting_exponent)
c_b_c = counts[('B', 'C')]
c_b = counts['B']
c_c = counts['C']
s_b_c = c_b_c ** weighting_exponent * ((c_b_c * c_all) / (c_b * c_c)) ** (1 - weighting_exponent)
assert s_a_d == approx(scores[('--D', 'A')])
assert s_b_c == approx(scores[('B', 'C')])
def test_co_occurrence_score_matches_file_diseases(self):
sentence_scores = co_occurrence_score.load_score_file(self.sentence_score_file_path)
document_weight = 15.0
paragraph_weight = 0
sentence_weight = 1.0
weighting_exponent = 0.6
counts = co_occurrence_score.get_weighted_counts(self.matches_file_path, sentence_scores, None, None,
self.entity_file_path,
first_type=9606, second_type=-26,
document_weight=document_weight,
paragraph_weight=paragraph_weight,
sentence_weight=1.0,
ignore_scores=True)
scores = co_occurrence_score.co_occurrence_score_diseases(self.matches_file_path,
self.entity_file_path,
document_weight=document_weight,
sentence_weight=sentence_weight)
c_a_d = counts[('--D', 'A')]
c_a = counts['A']
c_d = counts['--D']
c_all = counts[None]
s_a_d = c_a_d ** weighting_exponent * ((c_a_d * c_all) / (c_a * c_d)) ** (1 - weighting_exponent)
c_b_c = counts[('B', 'C')]
c_b = counts['B']
c_c = counts['C']
s_b_c = c_b_c ** weighting_exponent * ((c_b_c * c_all) / (c_b * c_c)) ** (1 - weighting_exponent)
assert s_a_d == approx(scores[('--D', 'A')])
assert s_b_c == approx(scores[('B', 'C')])
def test_weighted_counts_matches_document_level_comentions_file(self):
sentence_scores = co_occurrence_score.load_score_file(self.sentence_score_file_path)
weighted_counts = co_occurrence_score.get_weighted_counts(self.matches_document_level_comentions_file_path,
sentence_scores, None, None,
self.entity_file_path,
first_type=9606, second_type=-26,
document_weight=15.0, paragraph_weight=0,
sentence_weight=1.0)
assert {('--D', 'A'): 15. + 15.44,
('B', 'C'): 15.,
'A': 15. + 15.44,
'--D': 15. + 15.44,
'B': 15.,
'C': 15.,
None: 15. + 15.44 + 15.} == weighted_counts
def test_co_occurrence_score_matches_document_level_comentions_file(self):
scores = co_occurrence_score.load_score_file(self.sentence_score_file_path)
sentence_scores, _, _ = co_occurrence_score.split_scores(scores)
document_weight = 15.0
paragraph_weight = 0
weighting_exponent = 0.6
counts = co_occurrence_score.get_weighted_counts(self.matches_document_level_comentions_file_path,
sentence_scores, None, None,
self.entity_file_path,
first_type=9606, second_type=-26,
document_weight=document_weight,
paragraph_weight=paragraph_weight,
sentence_weight=1.0)
scores = co_occurrence_score.co_occurrence_score(self.matches_document_level_comentions_file_path,
self.sentence_score_file_path,
self.entity_file_path,
first_type=9606, second_type=-26,
document_weight=document_weight,
paragraph_weight=paragraph_weight,
weighting_exponent=weighting_exponent)
c_a_d = counts[('--D', 'A')]
c_a = counts['A']
c_d = counts['--D']
c_all = counts[None]
s_a_d = c_a_d ** weighting_exponent * ((c_a_d * c_all) / (c_a * c_d)) ** (1 - weighting_exponent)
c_b_c = counts[('B', 'C')]
c_b = counts['B']
c_c = counts['C']
s_b_c = c_b_c ** weighting_exponent * ((c_b_c * c_all) / (c_b * c_c)) ** (1 - weighting_exponent)
assert s_a_d == approx(scores[('--D', 'A')])
assert s_b_c == approx(scores[('B', 'C')])
def test_co_occurrence_score_matches_document_level_comentions_file_diseases(self):
sentence_scores = co_occurrence_score.load_score_file(self.sentence_score_file_path)
document_weight = 15.0
paragraph_weight = 0
weighting_exponent = 0.6
sentence_weight = 1.0
counts = co_occurrence_score.get_weighted_counts(self.matches_document_level_comentions_file_path,
sentence_scores, None, None, self.entity_file_path,
first_type=9606, second_type=-26,
document_weight=document_weight,
paragraph_weight=paragraph_weight,
sentence_weight=sentence_weight,
ignore_scores=True)
scores = co_occurrence_score.co_occurrence_score_diseases(self.matches_document_level_comentions_file_path,
self.entity_file_path,
document_weight=document_weight,
sentence_weight=sentence_weight)
c_a_d = counts[('--D', 'A')]
c_a = counts['A']
c_d = counts['--D']
c_all = counts[None]
s_a_d = c_a_d ** weighting_exponent * ((c_a_d * c_all) / (c_a * c_d)) ** (1 - weighting_exponent)
c_b_c = counts[('B', 'C')]
c_b = counts['B']
c_c = counts['C']
s_b_c = c_b_c ** weighting_exponent * ((c_b_c * c_all) / (c_b * c_c)) ** (1 - weighting_exponent)
assert s_a_d == approx(scores[('--D', 'A')])
assert s_b_c == approx(scores[('B', 'C')])
def test_weighted_counts_matches_single_matches_file(self):
sentence_scores = co_occurrence_score.load_score_file(self.sentence_score_file_path)
weighted_counts = co_occurrence_score.get_weighted_counts(self.matches_file_single_matches_path,
sentence_scores, None, None,
self.entity_file_path,
first_type=9606, second_type=-26,
document_weight=15.0, paragraph_weight=0,
sentence_weight=1.0)
assert 15.9 + 15.44 + 15. == approx(weighted_counts[None]) # needed due to floating point strangeness
del weighted_counts[None]
assert {('--D', 'A'): 15.9 + 15.44,
('B', 'C'): 15.,
'A': 15.9 + 15.44,
'--D': 15.9 + 15.44,
'B': 15.,
'C': 15.} == weighted_counts
def test_co_occurrence_score_matches_single_matches_file(self):
scores = co_occurrence_score.load_score_file(self.sentence_score_file_path)
sentence_scores, _, _ = co_occurrence_score.split_scores(scores)
document_weight = 15.0
paragraph_weight = 0
weighting_exponent = 0.6
counts = co_occurrence_score.get_weighted_counts(self.matches_file_single_matches_path,
sentence_scores, None, None,
self.entity_file_path,
first_type=9606, second_type=-26,
document_weight=document_weight,
paragraph_weight=paragraph_weight,
sentence_weight=1.0)
scores = co_occurrence_score.co_occurrence_score(self.matches_file_single_matches_path,
self.sentence_score_file_path,
self.entity_file_path,
first_type=9606, second_type=-26,
document_weight=document_weight,
paragraph_weight=paragraph_weight,
weighting_exponent=weighting_exponent)
c_a_d = counts[('--D', 'A')]
c_a = counts['A']
c_d = counts['--D']
c_all = counts[None]
s_a_d = c_a_d ** weighting_exponent * ((c_a_d * c_all) / (c_a * c_d)) ** (1 - weighting_exponent)
c_b_c = counts[('B', 'C')]
c_b = counts['B']
c_c = counts['C']
s_b_c = c_b_c ** weighting_exponent * ((c_b_c * c_all) / (c_b * c_c)) ** (1 - weighting_exponent)
assert s_a_d == approx(scores[('--D', 'A')])
assert s_b_c == approx(scores[('B', 'C')])
def test_co_occurrence_score_matches_single_matches_file_diseases(self):
sentence_scores = co_occurrence_score.load_score_file(self.sentence_score_file_path)
document_weight = 15.0
paragraph_weight = 0
weighting_exponent = 0.6
sentence_weight = 1.0
counts = co_occurrence_score.get_weighted_counts(self.matches_file_single_matches_path,
sentence_scores, None, None, self.entity_file_path,
first_type=9606, second_type=-26,
document_weight=document_weight,
paragraph_weight=paragraph_weight,
sentence_weight=sentence_weight,
ignore_scores=True)
scores = co_occurrence_score.co_occurrence_score_diseases(self.matches_file_path,
self.entity_file_path,
document_weight=document_weight,
sentence_weight=sentence_weight)
c_a_d = counts[('--D', 'A')]
c_a = counts['A']
c_d = counts['--D']
c_all = counts[None]
s_a_d = c_a_d ** weighting_exponent * ((c_a_d * c_all) / (c_a * c_d)) ** (1 - weighting_exponent)
c_b_c = counts[('B', 'C')]
c_b = counts['B']
c_c = counts['C']
s_b_c = c_b_c ** weighting_exponent * ((c_b_c * c_all) / (c_b * c_c)) ** (1 - weighting_exponent)
assert s_a_d == approx(scores[('--D', 'A')])
assert s_b_c == approx(scores[('B', 'C')])
def test_weighted_counts_matches_file_cross(self):
sentence_scores = co_occurrence_score.load_score_file(self.sentence_score_file_path)
weighted_counts = co_occurrence_score.get_weighted_counts(self.matches_file_cross_path, sentence_scores,
None, None,
self.entity_file_path,
first_type=9606, second_type=-26,
document_weight=15.0, paragraph_weight=0,
sentence_weight=1.0)
assert 15.9 + 15.44 + 15. + 15. == approx(weighted_counts[None]) # needed due to float inaccuracy
del weighted_counts[None]
assert 15.9 + 15.44 + 15. == approx(weighted_counts['--D'])
del weighted_counts['--D']
assert {('--D', 'A'): 15.9 + 15.44,
('--D', 'B'): 15.,
('B', 'C'): 15.,
'A': 15.9 + 15.44,
'B': 15. + 15.,
'C': 15.} == weighted_counts
def test_co_occurrence_score_matches_file_cross(self):
scores = co_occurrence_score.load_score_file(self.sentence_score_file_path)
sentence_scores, _, _ = co_occurrence_score.split_scores(scores)
document_weight = 15.0
paragraph_weight = 0
weighting_exponent = 0.6
counts = co_occurrence_score.get_weighted_counts(self.matches_file_cross_path, sentence_scores, None, None,
self.entity_file_path,
first_type=9606, second_type=-26,
document_weight=document_weight,
paragraph_weight=paragraph_weight,
sentence_weight=1.0)
scores = co_occurrence_score.co_occurrence_score(self.matches_file_cross_path, self.sentence_score_file_path,
self.entity_file_path,
first_type=9606, second_type=-26,
document_weight=document_weight,
paragraph_weight=paragraph_weight,
weighting_exponent=weighting_exponent)
c_a_d = counts[('--D', 'A')]
c_d_b = counts[('--D', 'B')]
c_a = counts['A']
c_d = counts['--D']
c_all = counts[None]
s_a_d = c_a_d ** weighting_exponent * ((c_a_d * c_all) / (c_a * c_d)) ** (1 - weighting_exponent)
c_b_c = counts[('B', 'C')]
c_b = counts['B']
c_c = counts['C']
s_b_c = c_b_c ** weighting_exponent * ((c_b_c * c_all) / (c_b * c_c)) ** (1 - weighting_exponent)
s_d_b = c_d_b ** weighting_exponent * ((c_d_b * c_all) / (c_b * c_d)) ** (1 - weighting_exponent)
assert s_a_d == approx(scores[('--D', 'A')])
assert s_b_c == approx(scores[('B', 'C')])
assert s_d_b == approx(scores[('--D', 'B')])
def test_co_occurrence_score_matches_file_cross_swap_types(self):
scores = co_occurrence_score.load_score_file(self.sentence_score_file_path)
sentence_scores, _, _ = co_occurrence_score.split_scores(scores)
document_weight = 15.0
paragraph_weight = 0
weighting_exponent = 0.6
counts = co_occurrence_score.get_weighted_counts(self.matches_file_cross_path, sentence_scores,
None, None,
self.entity_file_path,
first_type=-26, second_type=9606,
document_weight=document_weight,
paragraph_weight=paragraph_weight,
sentence_weight=1.0)
scores = co_occurrence_score.co_occurrence_score(self.matches_file_cross_path, self.sentence_score_file_path,
self.entity_file_path,
first_type=-26, second_type=9606,
document_weight=document_weight,
paragraph_weight=paragraph_weight,
weighting_exponent=weighting_exponent)
c_a_d = counts[('--D', 'A')]
c_d_b = counts[('--D', 'B')]
c_a = counts['A']
c_d = counts['--D']
c_all = counts[None]
s_a_d = c_a_d ** weighting_exponent * ((c_a_d * c_all) / (c_a * c_d)) ** (1 - weighting_exponent)
c_b_c = counts[('B', 'C')]
c_b = counts['B']
c_c = counts['C']
s_b_c = c_b_c ** weighting_exponent * ((c_b_c * c_all) / (c_b * c_c)) ** (1 - weighting_exponent)
s_d_b = c_d_b ** weighting_exponent * ((c_d_b * c_all) / (c_b * c_d)) ** (1 - weighting_exponent)
assert s_a_d == approx(scores[('--D', 'A')])
assert s_b_c == approx(scores[('B', 'C')])
assert s_d_b == approx(scores[('--D', 'B')])
def test_co_occurrence_score_matches_file_cross_fantasy_types(self):
scores = co_occurrence_score.load_score_file(self.sentence_score_file_path)
sentence_scores, _, _ = co_occurrence_score.split_scores(scores)
document_weight = 15.0
paragraph_weight = 0
weighting_exponent = 0.6
counts = co_occurrence_score.get_weighted_counts(self.matches_file_cross_fantasy_types_path, sentence_scores,
None, None,
self.entity_fantasy_types_file_path,
first_type=1, second_type=2,
document_weight=document_weight,
paragraph_weight=paragraph_weight,
sentence_weight=1.0)
scores = co_occurrence_score.co_occurrence_score(self.matches_file_cross_fantasy_types_path,
self.sentence_score_file_path,
self.entity_fantasy_types_file_path,
first_type=1, second_type=2,
document_weight=document_weight,
paragraph_weight=paragraph_weight,
weighting_exponent=weighting_exponent)
c_a_d = counts[('--D', 'A')]
c_d_b = counts[('--D', 'B')]
c_a = counts['A']
c_d = counts['--D']
c_all = counts[None]
s_a_d = c_a_d ** weighting_exponent * ((c_a_d * c_all) / (c_a * c_d)) ** (1 - weighting_exponent)
c_b_c = counts[('B', 'C')]
c_b = counts['B']
c_c = counts['C']
s_b_c = c_b_c ** weighting_exponent * ((c_b_c * c_all) / (c_b * c_c)) ** (1 - weighting_exponent)
s_d_b = c_d_b ** weighting_exponent * ((c_d_b * c_all) / (c_b * c_d)) ** (1 - weighting_exponent)
assert s_a_d == approx(scores[('--D', 'A')])
assert s_b_c == approx(scores[('B', 'C')])
assert s_d_b == approx(scores[('--D', 'B')])
def test_co_occurrence_score_matches_file_cross_diseases(self):
sentence_scores = co_occurrence_score.load_score_file(self.sentence_score_file_path)
document_weight = 15.0
paragraph_weight = 0
weighting_exponent = 0.6
sentence_weight = 1.0
counts = co_occurrence_score.get_weighted_counts(self.matches_file_cross_path, sentence_scores,
None, None,
self.entity_file_path,
first_type=9606, second_type=-26,
document_weight=document_weight,
paragraph_weight=paragraph_weight,
sentence_weight=sentence_weight,
ignore_scores=True)
scores = co_occurrence_score.co_occurrence_score_diseases(self.matches_file_cross_path,
self.entity_file_path,
document_weight=document_weight,
sentence_weight=sentence_weight)
c_a_d = counts[('--D', 'A')]
c_d_b = counts[('--D', 'B')]
c_a = counts['A']
c_d = counts['--D']
c_all = counts[None]
s_a_d = c_a_d ** weighting_exponent * ((c_a_d * c_all) / (c_a * c_d)) ** (1 - weighting_exponent)
c_b_c = counts[('B', 'C')]
c_b = counts['B']
c_c = counts['C']
s_b_c = c_b_c ** weighting_exponent * ((c_b_c * c_all) / (c_b * c_c)) ** (1 - weighting_exponent)
s_d_b = c_d_b ** weighting_exponent * ((c_d_b * c_all) / (c_b * c_d)) ** (1 - weighting_exponent)
assert s_a_d == approx(scores[('--D', 'A')])
assert s_b_c == approx(scores[('B', 'C')])
assert s_d_b == approx(scores[('--D', 'B')])
def test_cocoscore_cv_independent_associations(self):
sentence_weight = 1
paragraph_weight = 1
document_weight = 1
cv_folds = 2
test_df = dt.load_data_frame(self.cos_cv_test_path, match_distance=True)
test_df['text'] = test_df['text'].apply(lambda s: s.strip().lower())
cv_results = co_occurrence_score.cv_independent_associations(test_df,
{'sentence_weight': sentence_weight,
'paragraph_weight': paragraph_weight,
'document_weight': document_weight,
},
cv_folds=cv_folds,
random_state=numpy.random.RandomState(3),
fasttext_epochs=5,
fasttext_bucket=1000,
fasttext_dim=20)
expected_col_names = [
'mean_test_score',
'stdev_test_score',
'mean_train_score',
'stdev_train_score',
'split_0_test_score',
'split_0_train_score',
'split_0_n_test',
'split_0_pos_test',
'split_0_n_train',
'split_0_pos_train',
'split_1_test_score',
'split_1_train_score',
'split_1_n_test',
'split_1_pos_test',
'split_1_n_train',
'split_1_pos_train',
]
cv_runs = 1
expected_values = [
[1.0] * cv_runs,
[0.0] * cv_runs,
[1.0] * cv_runs,
[0.0] * cv_runs,
[1.0] * cv_runs,
[1.0] * cv_runs,
[24] * cv_runs,
[0.5] * cv_runs,
[24] * cv_runs,
[0.5] * cv_runs,
[1.0] * cv_runs,
[1.0] * cv_runs,
[24] * cv_runs,
[0.5] * cv_runs,
[24] * cv_runs,
[0.5] * cv_runs,
]
expected_df = pandas.DataFrame({col: values for col, values in zip(expected_col_names, expected_values)},
columns=expected_col_names)
assert_frame_equal(cv_results, expected_df)
def test_cocoscore_cv_independent_associations_bad_param(self):
test_df = dt.load_data_frame(self.cos_cv_test_path, match_distance=True)
test_df['text'] = test_df['text'].apply(lambda s: s.strip().lower())
with raises(TypeError, match="got an unexpected keyword argument"):
_ = co_occurrence_score.cv_independent_associations(test_df, {'sentence_weightXXXX': 1,
'paragraph_weight': 1,
'document_weight': 1,
},
cv_folds=2,
random_state=numpy.random.RandomState(3),
fasttext_epochs=5,
fasttext_bucket=1000,
fasttext_dim=20,
constant_scoring='document')
def test_cocoscore_cv_independent_associations_bad_constant_scoring(self):
test_df = dt.load_data_frame(self.cos_cv_test_path, match_distance=True)
test_df['text'] = test_df['text'].apply(lambda s: s.strip().lower())
with raises(ValueError, match='Unknown constant_scoring parameter: documenti'):
_ = co_occurrence_score.cv_independent_associations(test_df, {'sentence_weight': 1,
'paragraph_weight': 1,
'document_weight': 1,
},
cv_folds=2,
random_state=numpy.random.RandomState(3),
fasttext_epochs=5,
fasttext_bucket=1000,
fasttext_dim=20,
constant_scoring='documenti')
def test_cocoscore_constant_sentence_scoring(self):
df = dt.load_data_frame(self.cos_cv_test_path, match_distance=True)
df['text'] = df['text'].apply(lambda s: s.strip().lower())
train_df = df.copy()
test_df = df.copy()
def nmdf(data_frame):
return polynomial_decay_distance(data_frame, 0, -2, 1)
train_scores, test_scores = co_occurrence_score._get_train_test_scores(train_df, test_df, fasttext_function,
fasttext_epochs=5, fasttext_dim=20,
fasttext_bucket=1000,
match_distance_function=nmdf,
constant_scoring='sentence')
sentence_matches = numpy.logical_and(df['sentence'] != -1, df['paragraph'] != -1)
non_sentence_matches = numpy.logical_not(sentence_matches)
for scores in (train_scores, test_scores):
assert (scores[sentence_matches] == 1).all()
assert (scores[non_sentence_matches] == -1).all()
def test_cocoscore_constant_paragraph_scoring(self):
df = dt.load_data_frame(self.cos_cv_test_path, match_distance=True)
df['text'] = df['text'].apply(lambda s: s.strip().lower())
train_df = df.copy()
test_df = df.copy()
def nmdf(data_frame):
return polynomial_decay_distance(data_frame, 0, -2, 1)
train_scores, test_scores = co_occurrence_score._get_train_test_scores(train_df, test_df, fasttext_function,
fasttext_epochs=5, fasttext_dim=20,
fasttext_bucket=1000,
match_distance_function=nmdf,
constant_scoring='paragraph')
paragraph_matches = numpy.logical_and(df['sentence'] == -1, df['paragraph'] != -1)
document_matches = numpy.logical_and(df['sentence'] == -1, df['paragraph'] == -1)
for scores in (train_scores, test_scores):
assert (scores[paragraph_matches] == 1).all()
assert (scores[document_matches] == -1).all()
def test_cocoscore_constant_document_scoring(self):
df = dt.load_data_frame(self.cos_cv_test_path, match_distance=True)
df['text'] = df['text'].apply(lambda s: s.strip().lower())
train_df = df.copy()
test_df = df.copy()
def nmdf(data_frame):
return polynomial_decay_distance(data_frame, 0, -2, 1)
train_scores, test_scores = co_occurrence_score._get_train_test_scores(train_df, test_df, fasttext_function,
fasttext_epochs=5, fasttext_dim=20,
fasttext_bucket=1000,
match_distance_function=nmdf,
constant_scoring='document')
paragraph_matches = numpy.logical_and(df['sentence'] == -1, df['paragraph'] != -1)
document_matches = numpy.logical_and(df['sentence'] == -1, df['paragraph'] == -1)
for scores in (train_scores, test_scores):
assert (scores[paragraph_matches] == -1).all()
assert (scores[document_matches] == 1).all()
def test_fit_score_default(self):
df = dt.load_data_frame(self.cos_cv_test_path, match_distance=True)
train_df = df.copy()
test_df = df.copy()
pairs = [('A', 'B'), ('C', 'D'), ('E', 'F'), ('G', 'H')]
train_scores, test_scores = co_occurrence_score.fit_score_default(train_df, test_df,
fasttext_epochs=5,
fasttext_dim=20,
fasttext_bucket=1000)
for pair in pairs:
assert train_scores[pair] > 0
assert test_scores[pair] > 0
| mit |
ianatpn/nupictest | external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_fltkagg.py | 69 | 20839 | """
A backend for FLTK
Copyright: Gregory Lielens, Free Field Technologies SA and
John D. Hunter 2004
This code is released under the matplotlib license
"""
from __future__ import division
import os, sys, math
import fltk as Fltk
from backend_agg import FigureCanvasAgg
import os.path
import matplotlib
from matplotlib import rcParams, verbose
from matplotlib.cbook import is_string_like
from matplotlib.backend_bases import \
RendererBase, GraphicsContextBase, FigureManagerBase, FigureCanvasBase,\
NavigationToolbar2, cursors
from matplotlib.figure import Figure
from matplotlib._pylab_helpers import Gcf
import matplotlib.windowing as windowing
from matplotlib.widgets import SubplotTool
import thread,time
Fl_running=thread.allocate_lock()
def Fltk_run_interactive():
global Fl_running
if Fl_running.acquire(0):
while True:
Fltk.Fl.check()
time.sleep(0.005)
else:
print "fl loop already running"
# the true dots per inch on the screen; should be display dependent
# see http://groups.google.com/groups?q=screen+dpi+x11&hl=en&lr=&ie=UTF-8&oe=UTF-8&safe=off&selm=7077.26e81ad5%40swift.cs.tcd.ie&rnum=5 for some info about screen dpi
PIXELS_PER_INCH = 75
cursord= {
cursors.HAND: Fltk.FL_CURSOR_HAND,
cursors.POINTER: Fltk.FL_CURSOR_ARROW,
cursors.SELECT_REGION: Fltk.FL_CURSOR_CROSS,
cursors.MOVE: Fltk.FL_CURSOR_MOVE
}
special_key={
Fltk.FL_Shift_R:'shift',
Fltk.FL_Shift_L:'shift',
Fltk.FL_Control_R:'control',
Fltk.FL_Control_L:'control',
Fltk.FL_Control_R:'control',
Fltk.FL_Control_L:'control',
65515:'win',
65516:'win',
}
def error_msg_fltk(msg, parent=None):
Fltk.fl_message(msg)
def draw_if_interactive():
if matplotlib.is_interactive():
figManager = Gcf.get_active()
if figManager is not None:
figManager.canvas.draw()
def ishow():
"""
Show all the figures and enter the fltk mainloop in another thread
This allows to keep hand in interractive python session
Warning: does not work under windows
This should be the last line of your script
"""
for manager in Gcf.get_all_fig_managers():
manager.show()
if show._needmain:
thread.start_new_thread(Fltk_run_interactive,())
show._needmain = False
def show():
"""
Show all the figures and enter the fltk mainloop
This should be the last line of your script
"""
for manager in Gcf.get_all_fig_managers():
manager.show()
#mainloop, if an fltk program exist no need to call that
#threaded (and interractive) version
if show._needmain:
Fltk.Fl.run()
show._needmain = False
show._needmain = True
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
FigureClass = kwargs.pop('FigureClass', Figure)
figure = FigureClass(*args, **kwargs)
window = Fltk.Fl_Double_Window(10,10,30,30)
canvas = FigureCanvasFltkAgg(figure)
window.end()
window.show()
window.make_current()
figManager = FigureManagerFltkAgg(canvas, num, window)
if matplotlib.is_interactive():
figManager.show()
return figManager
class FltkCanvas(Fltk.Fl_Widget):
def __init__(self,x,y,w,h,l,source):
Fltk.Fl_Widget.__init__(self, 0, 0, w, h, "canvas")
self._source=source
self._oldsize=(None,None)
self._draw_overlay = False
self._button = None
self._key = None
def draw(self):
newsize=(self.w(),self.h())
if(self._oldsize !=newsize):
self._oldsize =newsize
self._source.resize(newsize)
self._source.draw()
t1,t2,w,h = self._source.figure.bbox.bounds
Fltk.fl_draw_image(self._source.buffer_rgba(0,0),0,0,int(w),int(h),4,0)
self.redraw()
def blit(self,bbox=None):
if bbox is None:
t1,t2,w,h = self._source.figure.bbox.bounds
else:
t1o,t2o,wo,ho = self._source.figure.bbox.bounds
t1,t2,w,h = bbox.bounds
x,y=int(t1),int(t2)
Fltk.fl_draw_image(self._source.buffer_rgba(x,y),x,y,int(w),int(h),4,int(wo)*4)
#self.redraw()
def handle(self, event):
x=Fltk.Fl.event_x()
y=Fltk.Fl.event_y()
yf=self._source.figure.bbox.height() - y
if event == Fltk.FL_FOCUS or event == Fltk.FL_UNFOCUS:
return 1
elif event == Fltk.FL_KEYDOWN:
ikey= Fltk.Fl.event_key()
if(ikey<=255):
self._key=chr(ikey)
else:
try:
self._key=special_key[ikey]
except:
self._key=None
FigureCanvasBase.key_press_event(self._source, self._key)
return 1
elif event == Fltk.FL_KEYUP:
FigureCanvasBase.key_release_event(self._source, self._key)
self._key=None
elif event == Fltk.FL_PUSH:
self.window().make_current()
if Fltk.Fl.event_button1():
self._button = 1
elif Fltk.Fl.event_button2():
self._button = 2
elif Fltk.Fl.event_button3():
self._button = 3
else:
self._button = None
if self._draw_overlay:
self._oldx=x
self._oldy=y
if Fltk.Fl.event_clicks():
FigureCanvasBase.button_press_event(self._source, x, yf, self._button)
return 1
else:
FigureCanvasBase.button_press_event(self._source, x, yf, self._button)
return 1
elif event == Fltk.FL_ENTER:
self.take_focus()
return 1
elif event == Fltk.FL_LEAVE:
return 1
elif event == Fltk.FL_MOVE:
FigureCanvasBase.motion_notify_event(self._source, x, yf)
return 1
elif event == Fltk.FL_DRAG:
self.window().make_current()
if self._draw_overlay:
self._dx=Fltk.Fl.event_x()-self._oldx
self._dy=Fltk.Fl.event_y()-self._oldy
Fltk.fl_overlay_rect(self._oldx,self._oldy,self._dx,self._dy)
FigureCanvasBase.motion_notify_event(self._source, x, yf)
return 1
elif event == Fltk.FL_RELEASE:
self.window().make_current()
if self._draw_overlay:
Fltk.fl_overlay_clear()
FigureCanvasBase.button_release_event(self._source, x, yf, self._button)
self._button = None
return 1
return 0
class FigureCanvasFltkAgg(FigureCanvasAgg):
def __init__(self, figure):
FigureCanvasAgg.__init__(self,figure)
t1,t2,w,h = self.figure.bbox.bounds
w, h = int(w), int(h)
self.canvas=FltkCanvas(0, 0, w, h, "canvas",self)
#self.draw()
def resize(self,size):
w, h = size
# compute desired figure size in inches
dpival = self.figure.dpi.get()
winch = w/dpival
hinch = h/dpival
self.figure.set_size_inches(winch,hinch)
def draw(self):
FigureCanvasAgg.draw(self)
self.canvas.redraw()
def blit(self,bbox):
self.canvas.blit(bbox)
show = draw
def widget(self):
return self.canvas
def start_event_loop(self,timeout):
FigureCanvasBase.start_event_loop_default(self,timeout)
start_event_loop.__doc__=FigureCanvasBase.start_event_loop_default.__doc__
def stop_event_loop(self):
FigureCanvasBase.stop_event_loop_default(self)
stop_event_loop.__doc__=FigureCanvasBase.stop_event_loop_default.__doc__
def destroy_figure(ptr,figman):
figman.window.hide()
Gcf.destroy(figman._num)
class FigureManagerFltkAgg(FigureManagerBase):
"""
Public attributes
canvas : The FigureCanvas instance
num : The Figure number
toolbar : The fltk.Toolbar
window : The fltk.Window
"""
def __init__(self, canvas, num, window):
FigureManagerBase.__init__(self, canvas, num)
#Fltk container window
t1,t2,w,h = canvas.figure.bbox.bounds
w, h = int(w), int(h)
self.window = window
self.window.size(w,h+30)
self.window_title="Figure %d" % num
self.window.label(self.window_title)
self.window.size_range(350,200)
self.window.callback(destroy_figure,self)
self.canvas = canvas
self._num = num
if matplotlib.rcParams['toolbar']=='classic':
self.toolbar = NavigationToolbar( canvas, self )
elif matplotlib.rcParams['toolbar']=='toolbar2':
self.toolbar = NavigationToolbar2FltkAgg( canvas, self )
else:
self.toolbar = None
self.window.add_resizable(canvas.widget())
if self.toolbar:
self.window.add(self.toolbar.widget())
self.toolbar.update()
self.window.show()
def notify_axes_change(fig):
'this will be called whenever the current axes is changed'
if self.toolbar != None: self.toolbar.update()
self.canvas.figure.add_axobserver(notify_axes_change)
def resize(self, event):
width, height = event.width, event.height
self.toolbar.configure(width=width) # , height=height)
def show(self):
_focus = windowing.FocusManager()
self.canvas.draw()
self.window.redraw()
def set_window_title(self, title):
self.window_title=title
self.window.label(title)
class AxisMenu:
def __init__(self, toolbar):
self.toolbar=toolbar
self._naxes = toolbar.naxes
self._mbutton = Fltk.Fl_Menu_Button(0,0,50,10,"Axes")
self._mbutton.add("Select All",0,select_all,self,0)
self._mbutton.add("Invert All",0,invert_all,self,Fltk.FL_MENU_DIVIDER)
self._axis_txt=[]
self._axis_var=[]
for i in range(self._naxes):
self._axis_txt.append("Axis %d" % (i+1))
self._mbutton.add(self._axis_txt[i],0,set_active,self,Fltk.FL_MENU_TOGGLE)
for i in range(self._naxes):
self._axis_var.append(self._mbutton.find_item(self._axis_txt[i]))
self._axis_var[i].set()
def adjust(self, naxes):
if self._naxes < naxes:
for i in range(self._naxes, naxes):
self._axis_txt.append("Axis %d" % (i+1))
self._mbutton.add(self._axis_txt[i],0,set_active,self,Fltk.FL_MENU_TOGGLE)
for i in range(self._naxes, naxes):
self._axis_var.append(self._mbutton.find_item(self._axis_txt[i]))
self._axis_var[i].set()
elif self._naxes > naxes:
for i in range(self._naxes-1, naxes-1, -1):
self._mbutton.remove(i+2)
if(naxes):
self._axis_var=self._axis_var[:naxes-1]
self._axis_txt=self._axis_txt[:naxes-1]
else:
self._axis_var=[]
self._axis_txt=[]
self._naxes = naxes
set_active(0,self)
def widget(self):
return self._mbutton
def get_indices(self):
a = [i for i in range(len(self._axis_var)) if self._axis_var[i].value()]
return a
def set_active(ptr,amenu):
amenu.toolbar.set_active(amenu.get_indices())
def invert_all(ptr,amenu):
for a in amenu._axis_var:
if not a.value(): a.set()
set_active(ptr,amenu)
def select_all(ptr,amenu):
for a in amenu._axis_var:
a.set()
set_active(ptr,amenu)
class FLTKButton:
def __init__(self, text, file, command,argument,type="classic"):
file = os.path.join(rcParams['datapath'], 'images', file)
self.im = Fltk.Fl_PNM_Image(file)
size=26
if type=="repeat":
self.b = Fltk.Fl_Repeat_Button(0,0,size,10)
self.b.box(Fltk.FL_THIN_UP_BOX)
elif type=="classic":
self.b = Fltk.Fl_Button(0,0,size,10)
self.b.box(Fltk.FL_THIN_UP_BOX)
elif type=="light":
self.b = Fltk.Fl_Light_Button(0,0,size+20,10)
self.b.box(Fltk.FL_THIN_UP_BOX)
elif type=="pushed":
self.b = Fltk.Fl_Button(0,0,size,10)
self.b.box(Fltk.FL_UP_BOX)
self.b.down_box(Fltk.FL_DOWN_BOX)
self.b.type(Fltk.FL_TOGGLE_BUTTON)
self.tooltiptext=text+" "
self.b.tooltip(self.tooltiptext)
self.b.callback(command,argument)
self.b.image(self.im)
self.b.deimage(self.im)
self.type=type
def widget(self):
return self.b
class NavigationToolbar:
"""
Public attriubutes
canvas - the FigureCanvas (FigureCanvasFltkAgg = customised fltk.Widget)
"""
def __init__(self, canvas, figman):
#xmin, xmax = canvas.figure.bbox.intervalx().get_bounds()
#height, width = 50, xmax-xmin
self.canvas = canvas
self.figman = figman
Fltk.Fl_File_Icon.load_system_icons()
self._fc = Fltk.Fl_File_Chooser( ".", "*", Fltk.Fl_File_Chooser.CREATE, "Save Figure" )
self._fc.hide()
t1,t2,w,h = canvas.figure.bbox.bounds
w, h = int(w), int(h)
self._group = Fltk.Fl_Pack(0,h+2,1000,26)
self._group.type(Fltk.FL_HORIZONTAL)
self._axes=self.canvas.figure.axes
self.naxes = len(self._axes)
self.omenu = AxisMenu( toolbar=self)
self.bLeft = FLTKButton(
text="Left", file="stock_left.ppm",
command=pan,argument=(self,1,'x'),type="repeat")
self.bRight = FLTKButton(
text="Right", file="stock_right.ppm",
command=pan,argument=(self,-1,'x'),type="repeat")
self.bZoomInX = FLTKButton(
text="ZoomInX",file="stock_zoom-in.ppm",
command=zoom,argument=(self,1,'x'),type="repeat")
self.bZoomOutX = FLTKButton(
text="ZoomOutX", file="stock_zoom-out.ppm",
command=zoom, argument=(self,-1,'x'),type="repeat")
self.bUp = FLTKButton(
text="Up", file="stock_up.ppm",
command=pan,argument=(self,1,'y'),type="repeat")
self.bDown = FLTKButton(
text="Down", file="stock_down.ppm",
command=pan,argument=(self,-1,'y'),type="repeat")
self.bZoomInY = FLTKButton(
text="ZoomInY", file="stock_zoom-in.ppm",
command=zoom,argument=(self,1,'y'),type="repeat")
self.bZoomOutY = FLTKButton(
text="ZoomOutY",file="stock_zoom-out.ppm",
command=zoom, argument=(self,-1,'y'),type="repeat")
self.bSave = FLTKButton(
text="Save", file="stock_save_as.ppm",
command=save_figure, argument=self)
self._group.end()
def widget(self):
return self._group
def close(self):
Gcf.destroy(self.figman._num)
def set_active(self, ind):
self._ind = ind
self._active = [ self._axes[i] for i in self._ind ]
def update(self):
self._axes = self.canvas.figure.axes
naxes = len(self._axes)
self.omenu.adjust(naxes)
def pan(ptr, arg):
base,direction,axe=arg
for a in base._active:
if(axe=='x'):
a.panx(direction)
else:
a.pany(direction)
base.figman.show()
def zoom(ptr, arg):
base,direction,axe=arg
for a in base._active:
if(axe=='x'):
a.zoomx(direction)
else:
a.zoomy(direction)
base.figman.show()
def save_figure(ptr,base):
filetypes = base.canvas.get_supported_filetypes()
default_filetype = base.canvas.get_default_filetype()
sorted_filetypes = filetypes.items()
sorted_filetypes.sort()
selected_filter = 0
filters = []
for i, (ext, name) in enumerate(sorted_filetypes):
filter = '%s (*.%s)' % (name, ext)
filters.append(filter)
if ext == default_filetype:
selected_filter = i
filters = '\t'.join(filters)
file_chooser=base._fc
file_chooser.filter(filters)
file_chooser.filter_value(selected_filter)
file_chooser.show()
while file_chooser.visible() :
Fltk.Fl.wait()
fname=None
if(file_chooser.count() and file_chooser.value(0) != None):
fname=""
(status,fname)=Fltk.fl_filename_absolute(fname, 1024, file_chooser.value(0))
if fname is None: # Cancel
return
#start from last directory
lastDir = os.path.dirname(fname)
file_chooser.directory(lastDir)
format = sorted_filetypes[file_chooser.filter_value()][0]
try:
base.canvas.print_figure(fname, format=format)
except IOError, msg:
err = '\n'.join(map(str, msg))
msg = 'Failed to save %s: Error msg was\n\n%s' % (
fname, err)
error_msg_fltk(msg)
class NavigationToolbar2FltkAgg(NavigationToolbar2):
"""
Public attriubutes
canvas - the FigureCanvas
figman - the Figure manager
"""
def __init__(self, canvas, figman):
self.canvas = canvas
self.figman = figman
NavigationToolbar2.__init__(self, canvas)
self.pan_selected=False
self.zoom_selected=False
def set_cursor(self, cursor):
Fltk.fl_cursor(cursord[cursor],Fltk.FL_BLACK,Fltk.FL_WHITE)
def dynamic_update(self):
self.canvas.draw()
def pan(self,*args):
self.pan_selected=not self.pan_selected
self.zoom_selected = False
self.canvas.canvas._draw_overlay= False
if self.pan_selected:
self.bPan.widget().value(1)
else:
self.bPan.widget().value(0)
if self.zoom_selected:
self.bZoom.widget().value(1)
else:
self.bZoom.widget().value(0)
NavigationToolbar2.pan(self,args)
def zoom(self,*args):
self.zoom_selected=not self.zoom_selected
self.canvas.canvas._draw_overlay=self.zoom_selected
self.pan_selected = False
if self.pan_selected:
self.bPan.widget().value(1)
else:
self.bPan.widget().value(0)
if self.zoom_selected:
self.bZoom.widget().value(1)
else:
self.bZoom.widget().value(0)
NavigationToolbar2.zoom(self,args)
def configure_subplots(self,*args):
window = Fltk.Fl_Double_Window(100,100,480,240)
toolfig = Figure(figsize=(6,3))
canvas = FigureCanvasFltkAgg(toolfig)
window.end()
toolfig.subplots_adjust(top=0.9)
tool = SubplotTool(self.canvas.figure, toolfig)
window.show()
canvas.show()
def _init_toolbar(self):
Fltk.Fl_File_Icon.load_system_icons()
self._fc = Fltk.Fl_File_Chooser( ".", "*", Fltk.Fl_File_Chooser.CREATE, "Save Figure" )
self._fc.hide()
t1,t2,w,h = self.canvas.figure.bbox.bounds
w, h = int(w), int(h)
self._group = Fltk.Fl_Pack(0,h+2,1000,26)
self._group.type(Fltk.FL_HORIZONTAL)
self._axes=self.canvas.figure.axes
self.naxes = len(self._axes)
self.omenu = AxisMenu( toolbar=self)
self.bHome = FLTKButton(
text="Home", file="home.ppm",
command=self.home,argument=self)
self.bBack = FLTKButton(
text="Back", file="back.ppm",
command=self.back,argument=self)
self.bForward = FLTKButton(
text="Forward", file="forward.ppm",
command=self.forward,argument=self)
self.bPan = FLTKButton(
text="Pan/Zoom",file="move.ppm",
command=self.pan,argument=self,type="pushed")
self.bZoom = FLTKButton(
text="Zoom to rectangle",file="zoom_to_rect.ppm",
command=self.zoom,argument=self,type="pushed")
self.bsubplot = FLTKButton( text="Configure Subplots", file="subplots.ppm",
command = self.configure_subplots,argument=self,type="pushed")
self.bSave = FLTKButton(
text="Save", file="filesave.ppm",
command=save_figure, argument=self)
self._group.end()
self.message = Fltk.Fl_Output(0,0,w,8)
self._group.add_resizable(self.message)
self.update()
def widget(self):
return self._group
def close(self):
Gcf.destroy(self.figman._num)
def set_active(self, ind):
self._ind = ind
self._active = [ self._axes[i] for i in self._ind ]
def update(self):
self._axes = self.canvas.figure.axes
naxes = len(self._axes)
self.omenu.adjust(naxes)
NavigationToolbar2.update(self)
def set_message(self, s):
self.message.value(s)
FigureManager = FigureManagerFltkAgg
| gpl-3.0 |
kaylanb/SkinApp | machine_learn/Blob/machine_learn.py | 1 | 8778 | '''when this module is called, do ML and output 3 colm text file containing: prediction, answer, url'''
from numpy.random import rand
from numpy import ones, zeros, concatenate
import numpy as np
from pandas import read_csv, DataFrame, concat
# from sklearn.cross_validation import cross_val_score
from sklearn.ensemble import RandomForestClassifier
# from sklearn.ensemble import ExtraTreesClassifier
# from sklearn.ensemble import AdaBoostClassifier
# from sklearn.tree import DecisionTreeClassifier
class FacesAndLimbsTrainingSet():
def __init__(self,Food_df,Faces_df,SkinNoFaces_df):
self.Food= Food_df.ix[:251,:].copy()
self.People= self.Food.copy()
for i in np.arange(0,134):
self.People.ix[i,:]= Faces_df.ix[i,:].copy()
cnt=0
for i in np.arange(134,250):
self.People.ix[i,:]= SkinNoFaces_df.ix[cnt,:].copy()
cnt+=1
class NoLimbsTrainingSet():
def __init__(self,Food_df,Faces_df):
self.Food= Food_df.ix[:251,:].copy()
self.People= Faces_df.ix[:251,:].copy()
class NoFacesTrainingSet():
def __init__(self,Food_df,SkinNoFaces_df):
self.Food= Food_df.ix[:117,:].copy()
self.People= SkinNoFaces_df.ix[:117,:].copy()
class Team_or_Kay_Features():
def __init__(self,Food_df,Faces_df,SkinNoFaces_df):
self.FacesAndLimbs= FacesAndLimbsTrainingSet(Food_df,Faces_df,SkinNoFaces_df)
self.NoLims= NoLimbsTrainingSet(Food_df,Faces_df)
self.NoFaces= NoFacesTrainingSet(Food_df,SkinNoFaces_df)
class AddTeamCols():
def __init__(self, Food_KayF,Faces_KayF,SkinNoFaces_KayF,
Food_TeamF,Faces_TeamF,SkinNoFaces_TeamF):
self.Food= Food_KayF.copy()
cols= Food_TeamF.columns[2:12]
for col in cols:
self.Food[col]= Food_TeamF[col]
self.Faces= Faces_KayF.copy()
cols= Faces_TeamF.columns[2:12]
for col in cols:
self.Faces[col]= Faces_TeamF[col]
self.SkinNoFaces= SkinNoFaces_KayF.copy()
cols= SkinNoFaces_TeamF.columns[2:12]
for col in cols:
self.SkinNoFaces[col]= SkinNoFaces_TeamF[col]
def totp(ans):
return float( np.sum(ans.astype('bool')) )
def totn(ans):
return float( np.sum(ans.astype('bool') == False) )
def tp(predict,ans):
return float( len(np.where(predict.astype('bool') & ans.astype('bool'))[0]) )
def fp(predict,ans):
return float( len(np.where((predict.astype('bool')==False) & ans.astype('bool'))[0]) )
def fn(predict,ans):
return float( len(np.where((predict.astype('bool')) & (ans.astype('bool')==False))[0]) )
def precision(predict,ans):
prec= tp(predict,ans)/(tp(predict,ans) + fp(predict,ans))
tp_norm= tp(predict,ans)/totp(ans)
fp_norm= fp(predict,ans)/totp(ans)
print "tp/totp,fp/totp,precision= %f %f %f" % \
(tp_norm,fp_norm,prec)
return prec,tp_norm,fp_norm
def recall(predict,ans):
rec= tp(predict,ans)/(tp(predict,ans) + fn(predict,ans))
tp_norm= tp(predict,ans)/totp(ans)
fn_norm= fn(predict,ans)/totn(ans)
print "tp/totp,fn/totn,recall= %f %f %f" % \
(tp_norm,fn_norm,rec)
return rec,tp_norm,fn_norm
def fraction_correct(predict,ans):
tp= float( len(np.where(predict.astype('bool') & ans.astype('bool'))[0]) )
tn= float( len(np.where( (predict.astype('bool')==False) & (ans.astype('bool')==False) )[0]) )
return (tp+tn)/len(ans)
def best_machine_learn_NoRandOrd(TrainX,TrainY,TestX,\
n_estim=100,min_samples_spl=2,scale=False):
forest1 = RandomForestClassifier(n_estimators=n_estim, max_depth=None,
min_samples_split=min_samples_spl, random_state=0,
compute_importances=True)
forest1.fit(TrainX,TrainY)
forestOut1 = forest1.predict(TestX)
# precision(forestOut1,TestY)
# recall(forestOut1,TestY)
# print sum(forestOut1 == TestY)/float(len(forestOut1))
# forest2 = ExtraTreesClassifier(n_estimators=n_estim, max_depth=None,
# min_samples_split=min_samples_spl, random_state=0,
# compute_importances=True)
# forest2.fit(TrainX,TrainY)
# forestOut2 = forest2.predict(TestX)
# precision(forestOut2,TestY)
# recall(forestOut2,TestY)
# print sum(forestOut2 == TestY)/float(len(forestOut2))
# forest3 = AdaBoostClassifier(n_estimators=n_estim, random_state=0)
# forest3.fit(TrainX,TrainY)
# forestOut3 = forest3.predict(TestX)
# precision(forestOut3,TestY)
# recall(forestOut3,TestY)
# print sum(forestOut3 == TestY)/float(len(forestOut3))
#most important features in each classifier
def ImpFeatures(forest,feature_list):
df= DataFrame()
df["importance"]=forest.feature_importances_
df.sort(columns="importance", inplace=True,ascending=False)
df["features"]= feature_list[df.index]
return df
# if importance:
# t_df= ImpFeatures(tree,Food.columns)
# f1_df= ImpFeatures(forest1,Food.columns)
# f2_df= ImpFeatures(forest2,Food.columns)
# #AdaBoostClassifier not have: compute_importances??
# print "tree\n",t_df.head()
# print "forest\n", f1_df.head()
# print "forest2\n",f2_df.head()
# return forestOut2,TestY,TestX,cTestP,cTestF,People_all,Food_all
return forest1,forestOut1
def Train_the_RandomForest():
Food_KayF = read_csv('csv_features/NewTraining_Food_everyones_KFeat_Toddmap.csv')
Faces_KayF = read_csv('csv_features/NewTraining_Faces_everyones_KFeat_Toddmap.csv')
SkinNoFaces_KayF = read_csv('csv_features/NewTraining_SkinNoFaces_everyones_KFeat_Toddmap.csv')
Food_TeamF = read_csv('csv_features/NewTraining_Food_everyones_TeamFeat_Toddmap.csv')
Faces_TeamF = read_csv('csv_features/NewTraining_Faces_everyones_TeamFeat_Toddmap.csv')
SkinNoFaces_TeamF = read_csv('csv_features/NewTraining_SkinNoFaces_everyones_TeamFeat_Toddmap.csv')
#team feature numbers for different definitions of Food,People
team= Team_or_Kay_Features(Food_TeamF,Faces_TeamF,SkinNoFaces_TeamF)
#kay feature numbers for different definitions of Food,People
kay= Team_or_Kay_Features(Food_KayF,Faces_KayF,SkinNoFaces_KayF)
#kay feature numbers + team feature number for skin maps for different definitions of Food,People
extend= AddTeamCols(Food_KayF,Faces_KayF,SkinNoFaces_KayF,
Food_TeamF,Faces_TeamF,SkinNoFaces_TeamF)
kay_extend= Team_or_Kay_Features(extend.Food,extend.Faces,extend.SkinNoFaces)
##
#make training and test sets
Food_all = kay_extend.NoLims.Food
People_all= kay_extend.NoLims.People
###
Food=Food_all.ix[:,2:]
People=People_all.ix[:,2:]
sh= Food.values.shape
max=int(sh[0]/2.)
TrainF= Food.values[0:max,:]
TestF = Food.values[max:,:]
#want urls in test set to find image user selects
TestF_URL= Food_all.URL.values[max:]
sh= People.values.shape
max=int(sh[0]/2.)
TrainP= People.values[0:max,:]
TestP = People.values[max:,:]
#want urls in test set to find image user selects
TestP_URL= People_all.URL.values[max:]
TrainX = concatenate([TrainP, TrainF])
TestX = concatenate([TestP, TestF])
#want urls in test set to find image user selects
TestX_URL = concatenate([TestP_URL, TestF_URL])
TrainY = concatenate([zeros(len(TrainP)), ones(len(TrainF))])
TestY = concatenate([zeros(len(TestP)), ones(len(TestF))])
scale=False
if scale:## SCALE X DATA
from sklearn import preprocessing
TrainX = preprocessing.scale(TrainX)
TestX = preprocessing.scale(TestX)
#run ML on Food vs. People train/test set of choice
RF = RandomForestClassifier(n_estimators=100, max_depth=None,
min_samples_split=2, random_state=0,
compute_importances=True)
RF.fit(TrainX,TrainY)
return RF, TestX,TestY,TestX_URL
def output_results():
(RF, TestX,TestY,TestX_URL)= Train_the_RandomForest()
Y_predict= RF.predict(TestX)
results_df= DataFrame()
results_df["url"]=TestX_URL
results_df["answer"]=TestY
results_df["predict"]=Y_predict
import pickle
fout = open("RandForestTrained.pickle", 'w')
pickle.dump(RF, fout)
fout.close()
fout = open("TestImageSet_predictions_answers.pickle", 'w')
pickle.dump(results_df, fout)
fout.close()
# print np.where(Y_predict.astype('int') == TestY.astype('int'))[0].shape[0]/float(TestX_URL.size)
#
#
# url= TestX_URL[50]
# ind=np.where(TestX_URL == url)[0]
# if ind.size != 1: print "bad"
# else:
# print "good"
# ind=ind[0]
| bsd-3-clause |
arahuja/scikit-learn | examples/cluster/plot_lena_ward_segmentation.py | 271 | 1998 | """
===============================================================
A demo of structured Ward hierarchical clustering on Lena image
===============================================================
Compute the segmentation of a 2D image with Ward hierarchical
clustering. The clustering is spatially constrained in order
for each segmented region to be in one piece.
"""
# Author : Vincent Michel, 2010
# Alexandre Gramfort, 2011
# License: BSD 3 clause
print(__doc__)
import time as time
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn.feature_extraction.image import grid_to_graph
from sklearn.cluster import AgglomerativeClustering
###############################################################################
# Generate data
lena = sp.misc.lena()
# Downsample the image by a factor of 4
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
X = np.reshape(lena, (-1, 1))
###############################################################################
# Define the structure A of the data. Pixels connected to their neighbors.
connectivity = grid_to_graph(*lena.shape)
###############################################################################
# Compute clustering
print("Compute structured hierarchical clustering...")
st = time.time()
n_clusters = 15 # number of regions
ward = AgglomerativeClustering(n_clusters=n_clusters,
linkage='ward', connectivity=connectivity).fit(X)
label = np.reshape(ward.labels_, lena.shape)
print("Elapsed time: ", time.time() - st)
print("Number of pixels: ", label.size)
print("Number of clusters: ", np.unique(label).size)
###############################################################################
# Plot the results on an image
plt.figure(figsize=(5, 5))
plt.imshow(lena, cmap=plt.cm.gray)
for l in range(n_clusters):
plt.contour(label == l, contours=1,
colors=[plt.cm.spectral(l / float(n_clusters)), ])
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
mengxn/tensorflow | tensorflow/contrib/learn/python/learn/estimators/estimator_test.py | 8 | 42354 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Estimator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import itertools
import json
import os
import tempfile
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.contrib import learn
from tensorflow.contrib import lookup
from tensorflow.contrib.framework.python.ops import variables
from tensorflow.contrib.layers.python.layers import feature_column as feature_column_lib
from tensorflow.contrib.layers.python.layers import optimizers
from tensorflow.contrib.learn.python.learn import experiment
from tensorflow.contrib.learn.python.learn import models
from tensorflow.contrib.learn.python.learn import monitors as monitors_lib
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import linear
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.utils import input_fn_utils
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.contrib.testing.python.framework import util_test
from tensorflow.python.client import session as session_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.saved_model import loader
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import monitored_session
from tensorflow.python.training import queue_runner_impl
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.training import session_run_hook
from tensorflow.python.util import compat
_BOSTON_INPUT_DIM = 13
_IRIS_INPUT_DIM = 4
def boston_input_fn(num_epochs=None):
boston = base.load_boston()
features = input_lib.limit_epochs(
array_ops.reshape(
constant_op.constant(boston.data), [-1, _BOSTON_INPUT_DIM]),
num_epochs=num_epochs)
labels = array_ops.reshape(constant_op.constant(boston.target), [-1, 1])
return features, labels
def boston_input_fn_with_queue(num_epochs=None):
features, labels = boston_input_fn(num_epochs=num_epochs)
# Create a minimal queue runner.
fake_queue = data_flow_ops.FIFOQueue(30, dtypes.int32)
queue_runner = queue_runner_impl.QueueRunner(fake_queue,
[constant_op.constant(0)])
queue_runner_impl.add_queue_runner(queue_runner)
return features, labels
def iris_input_fn():
iris = base.load_iris()
features = array_ops.reshape(
constant_op.constant(iris.data), [-1, _IRIS_INPUT_DIM])
labels = array_ops.reshape(constant_op.constant(iris.target), [-1])
return features, labels
def iris_input_fn_labels_dict():
iris = base.load_iris()
features = array_ops.reshape(
constant_op.constant(iris.data), [-1, _IRIS_INPUT_DIM])
labels = {
'labels': array_ops.reshape(constant_op.constant(iris.target), [-1])
}
return features, labels
def boston_eval_fn():
boston = base.load_boston()
n_examples = len(boston.target)
features = array_ops.reshape(
constant_op.constant(boston.data), [n_examples, _BOSTON_INPUT_DIM])
labels = array_ops.reshape(
constant_op.constant(boston.target), [n_examples, 1])
return array_ops.concat([features, features], 0), array_ops.concat(
[labels, labels], 0)
def extract(data, key):
if isinstance(data, dict):
assert key in data
return data[key]
else:
return data
def linear_model_params_fn(features, labels, mode, params):
features = extract(features, 'input')
labels = extract(labels, 'labels')
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss,
variables.get_global_step(),
optimizer='Adagrad',
learning_rate=params['learning_rate'])
return prediction, loss, train_op
def linear_model_fn(features, labels, mode):
features = extract(features, 'input')
labels = extract(labels, 'labels')
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
if isinstance(features, dict):
(_, features), = features.items()
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss, variables.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
return prediction, loss, train_op
def linear_model_fn_with_model_fn_ops(features, labels, mode):
"""Same as linear_model_fn, but returns `ModelFnOps`."""
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss, variables.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
return model_fn.ModelFnOps(
mode=mode, predictions=prediction, loss=loss, train_op=train_op)
def logistic_model_no_mode_fn(features, labels):
features = extract(features, 'input')
labels = extract(labels, 'labels')
labels = array_ops.one_hot(labels, 3, 1, 0)
prediction, loss = (models.logistic_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss, variables.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
return {
'class': math_ops.argmax(prediction, 1),
'prob': prediction
}, loss, train_op
VOCAB_FILE_CONTENT = 'emerson\nlake\npalmer\n'
EXTRA_FILE_CONTENT = 'kermit\npiggy\nralph\n'
def _build_estimator_for_export_tests(tmpdir):
def _input_fn():
iris = base.load_iris()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[150], dtype=dtypes.int32)
feature_columns = [
feature_column_lib.real_valued_column(
'feature', dimension=4)
]
est = linear.LinearRegressor(feature_columns)
est.fit(input_fn=_input_fn, steps=20)
feature_spec = feature_column_lib.create_feature_spec_for_parsing(
feature_columns)
serving_input_fn = input_fn_utils.build_parsing_serving_input_fn(feature_spec)
# hack in an op that uses an asset, in order to test asset export.
# this is not actually valid, of course.
def serving_input_fn_with_asset():
features, labels, inputs = serving_input_fn()
vocab_file_name = os.path.join(tmpdir, 'my_vocab_file')
vocab_file = gfile.GFile(vocab_file_name, mode='w')
vocab_file.write(VOCAB_FILE_CONTENT)
vocab_file.close()
hashtable = lookup.HashTable(
lookup.TextFileStringTableInitializer(vocab_file_name), 'x')
features['bogus_lookup'] = hashtable.lookup(
math_ops.to_int64(features['feature']))
return input_fn_utils.InputFnOps(features, labels, inputs)
return est, serving_input_fn_with_asset
def _build_estimator_for_resource_export_test():
def _input_fn():
iris = base.load_iris()
return {
'feature': constant_op.constant(iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[150], dtype=dtypes.int32)
feature_columns = [
feature_column_lib.real_valued_column('feature', dimension=4)
]
def resource_constant_model_fn(unused_features, unused_labels, mode):
"""A model_fn that loads a constant from a resource and serves it."""
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
const = constant_op.constant(-1, dtype=dtypes.int64)
table = lookup.MutableHashTable(
dtypes.string, dtypes.int64, const, name='LookupTableModel')
if mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL):
key = constant_op.constant(['key'])
value = constant_op.constant([42], dtype=dtypes.int64)
train_op_1 = table.insert(key, value)
training_state = lookup.MutableHashTable(
dtypes.string, dtypes.int64, const, name='LookupTableTrainingState')
training_op_2 = training_state.insert(key, value)
return const, const, control_flow_ops.group(train_op_1, training_op_2)
if mode == model_fn.ModeKeys.INFER:
key = constant_op.constant(['key'])
prediction = table.lookup(key)
return prediction, const, control_flow_ops.no_op()
est = estimator.Estimator(model_fn=resource_constant_model_fn)
est.fit(input_fn=_input_fn, steps=1)
feature_spec = feature_column_lib.create_feature_spec_for_parsing(
feature_columns)
serving_input_fn = input_fn_utils.build_parsing_serving_input_fn(feature_spec)
return est, serving_input_fn
class CheckCallsMonitor(monitors_lib.BaseMonitor):
def __init__(self, expect_calls):
super(CheckCallsMonitor, self).__init__()
self.begin_calls = None
self.end_calls = None
self.expect_calls = expect_calls
def begin(self, max_steps):
self.begin_calls = 0
self.end_calls = 0
def step_begin(self, step):
self.begin_calls += 1
return {}
def step_end(self, step, outputs):
self.end_calls += 1
return False
def end(self):
assert (self.end_calls == self.expect_calls and
self.begin_calls == self.expect_calls)
class EstimatorTest(test.TestCase):
def testExperimentIntegration(self):
exp = experiment.Experiment(
estimator=estimator.Estimator(model_fn=linear_model_fn),
train_input_fn=boston_input_fn,
eval_input_fn=boston_input_fn)
exp.test()
def testModelFnArgs(self):
expected_param = {'some_param': 'some_value'}
expected_config = run_config.RunConfig()
expected_config.i_am_test = True
def _argument_checker(features, labels, mode, params, config):
_, _ = features, labels
self.assertEqual(model_fn.ModeKeys.TRAIN, mode)
self.assertEqual(expected_param, params)
self.assertTrue(config.i_am_test)
return constant_op.constant(0.), constant_op.constant(
0.), constant_op.constant(0.)
est = estimator.Estimator(
model_fn=_argument_checker,
params=expected_param,
config=expected_config)
est.fit(input_fn=boston_input_fn, steps=1)
def testModelFnWithModelDir(self):
expected_param = {'some_param': 'some_value'}
expected_model_dir = tempfile.mkdtemp()
def _argument_checker(features, labels, mode, params, config=None,
model_dir=None):
_, _, _ = features, labels, config
self.assertEqual(model_fn.ModeKeys.TRAIN, mode)
self.assertEqual(expected_param, params)
self.assertEqual(model_dir, expected_model_dir)
return constant_op.constant(0.), constant_op.constant(
0.), constant_op.constant(0.)
est = estimator.Estimator(model_fn=_argument_checker,
params=expected_param,
model_dir=expected_model_dir)
est.fit(input_fn=boston_input_fn, steps=1)
def testInvalidModelFn_no_train_op(self):
def _invalid_model_fn(features, labels):
# pylint: disable=unused-argument
w = variables_lib.Variable(42.0, 'weight')
loss = 100.0 - w
return None, loss, None
est = estimator.Estimator(model_fn=_invalid_model_fn)
with self.assertRaisesRegexp(ValueError, 'Missing training_op'):
est.fit(input_fn=boston_input_fn, steps=1)
def testInvalidModelFn_no_loss(self):
def _invalid_model_fn(features, labels, mode):
# pylint: disable=unused-argument
w = variables_lib.Variable(42.0, 'weight')
loss = 100.0 - w
train_op = w.assign_add(loss / 100.0)
predictions = loss
if mode == model_fn.ModeKeys.EVAL:
loss = None
return predictions, loss, train_op
est = estimator.Estimator(model_fn=_invalid_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
with self.assertRaisesRegexp(ValueError, 'Missing loss'):
est.evaluate(input_fn=boston_eval_fn, steps=1)
def testInvalidModelFn_no_prediction(self):
def _invalid_model_fn(features, labels):
# pylint: disable=unused-argument
w = variables_lib.Variable(42.0, 'weight')
loss = 100.0 - w
train_op = w.assign_add(loss / 100.0)
return None, loss, train_op
est = estimator.Estimator(model_fn=_invalid_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
with self.assertRaisesRegexp(ValueError, 'Missing prediction'):
est.evaluate(input_fn=boston_eval_fn, steps=1)
with self.assertRaisesRegexp(ValueError, 'Missing prediction'):
est.predict(input_fn=boston_input_fn)
with self.assertRaisesRegexp(ValueError, 'Missing prediction'):
est.predict(
input_fn=functools.partial(
boston_input_fn, num_epochs=1),
as_iterable=True)
def testModelFnScaffoldInTraining(self):
self.is_init_fn_called = False
def _init_fn(scaffold, session):
_, _ = scaffold, session
self.is_init_fn_called = True
def _model_fn_scaffold(features, labels, mode):
_, _ = features, labels
return model_fn.ModelFnOps(
mode=mode,
predictions=constant_op.constant(0.),
loss=constant_op.constant(0.),
train_op=constant_op.constant(0.),
scaffold=monitored_session.Scaffold(init_fn=_init_fn))
est = estimator.Estimator(model_fn=_model_fn_scaffold)
est.fit(input_fn=boston_input_fn, steps=1)
self.assertTrue(self.is_init_fn_called)
def testModelFnScaffoldSaverUsage(self):
def _model_fn_scaffold(features, labels, mode):
_, _ = features, labels
variables_lib.Variable(1., 'weight')
real_saver = saver_lib.Saver()
self.mock_saver = test.mock.Mock(
wraps=real_saver, saver_def=real_saver.saver_def)
return model_fn.ModelFnOps(
mode=mode,
predictions=constant_op.constant([[1.]]),
loss=constant_op.constant(0.),
train_op=constant_op.constant(0.),
scaffold=monitored_session.Scaffold(saver=self.mock_saver))
def input_fn():
return {
'x': constant_op.constant([[1.]]),
}, constant_op.constant([[1.]])
est = estimator.Estimator(model_fn=_model_fn_scaffold)
est.fit(input_fn=input_fn, steps=1)
self.assertTrue(self.mock_saver.save.called)
est.evaluate(input_fn=input_fn, steps=1)
self.assertTrue(self.mock_saver.restore.called)
est.predict(input_fn=input_fn)
self.assertTrue(self.mock_saver.restore.called)
def serving_input_fn():
serialized_tf_example = array_ops.placeholder(dtype=dtypes.string,
shape=[None],
name='input_example_tensor')
features, labels = input_fn()
return input_fn_utils.InputFnOps(
features, labels, {'examples': serialized_tf_example})
est.export_savedmodel(est.model_dir + '/export', serving_input_fn)
self.assertTrue(self.mock_saver.restore.called)
def testCheckpointSaverHookSuppressesTheDefaultOne(self):
saver_hook = test.mock.Mock(
spec=basic_session_run_hooks.CheckpointSaverHook)
saver_hook.before_run.return_value = None
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=1, monitors=[saver_hook])
# test nothing is saved, due to suppressing default saver
with self.assertRaises(learn.NotFittedError):
est.evaluate(input_fn=boston_input_fn, steps=1)
def testCustomConfig(self):
test_random_seed = 5783452
class TestInput(object):
def __init__(self):
self.random_seed = 0
def config_test_input_fn(self):
self.random_seed = ops.get_default_graph().seed
return constant_op.constant([[1.]]), constant_op.constant([1.])
config = run_config.RunConfig(tf_random_seed=test_random_seed)
test_input = TestInput()
est = estimator.Estimator(model_fn=linear_model_fn, config=config)
est.fit(input_fn=test_input.config_test_input_fn, steps=1)
# If input_fn ran, it will have given us the random seed set on the graph.
self.assertEquals(test_random_seed, test_input.random_seed)
def testRunConfigModelDir(self):
config = run_config.RunConfig(model_dir='test_dir')
est = estimator.Estimator(model_fn=linear_model_fn,
config=config)
self.assertEqual('test_dir', est.config.model_dir)
self.assertEqual('test_dir', est.model_dir)
def testModelDirAndRunConfigModelDir(self):
config = run_config.RunConfig(model_dir='test_dir')
est = estimator.Estimator(model_fn=linear_model_fn,
config=config,
model_dir='test_dir')
self.assertEqual('test_dir', est.config.model_dir)
with self.assertRaisesRegexp(
ValueError,
'model_dir are set both in constructor and RunConfig, '
'but with different'):
estimator.Estimator(model_fn=linear_model_fn,
config=config,
model_dir='different_dir')
def testModelDirIsCopiedToRunConfig(self):
config = run_config.RunConfig()
self.assertIsNone(config.model_dir)
est = estimator.Estimator(model_fn=linear_model_fn,
model_dir='test_dir',
config=config)
self.assertEqual('test_dir', est.config.model_dir)
self.assertEqual('test_dir', est.model_dir)
def testModelDirAsTempDir(self):
with test.mock.patch.object(tempfile, 'mkdtemp', return_value='temp_dir'):
est = estimator.Estimator(model_fn=linear_model_fn)
self.assertEqual('temp_dir', est.config.model_dir)
self.assertEqual('temp_dir', est.model_dir)
def testCheckInputs(self):
est = estimator.SKCompat(estimator.Estimator(model_fn=linear_model_fn))
# Lambdas so we have to different objects to compare
right_features = lambda: np.ones(shape=[7, 8], dtype=np.float32)
right_labels = lambda: np.ones(shape=[7, 10], dtype=np.int32)
est.fit(right_features(), right_labels(), steps=1)
# TODO(wicke): This does not fail for np.int32 because of data_feeder magic.
wrong_type_features = np.ones(shape=[7, 8], dtype=np.int64)
wrong_size_features = np.ones(shape=[7, 10])
wrong_type_labels = np.ones(shape=[7, 10], dtype=np.float32)
wrong_size_labels = np.ones(shape=[7, 11])
est.fit(x=right_features(), y=right_labels(), steps=1)
with self.assertRaises(ValueError):
est.fit(x=wrong_type_features, y=right_labels(), steps=1)
with self.assertRaises(ValueError):
est.fit(x=wrong_size_features, y=right_labels(), steps=1)
with self.assertRaises(ValueError):
est.fit(x=right_features(), y=wrong_type_labels, steps=1)
with self.assertRaises(ValueError):
est.fit(x=right_features(), y=wrong_size_labels, steps=1)
def testBadInput(self):
est = estimator.Estimator(model_fn=linear_model_fn)
self.assertRaisesRegexp(
ValueError,
'Either x or input_fn must be provided.',
est.fit,
x=None,
input_fn=None,
steps=1)
self.assertRaisesRegexp(
ValueError,
'Can not provide both input_fn and x or y',
est.fit,
x='X',
input_fn=iris_input_fn,
steps=1)
self.assertRaisesRegexp(
ValueError,
'Can not provide both input_fn and x or y',
est.fit,
y='Y',
input_fn=iris_input_fn,
steps=1)
self.assertRaisesRegexp(
ValueError,
'Can not provide both input_fn and batch_size',
est.fit,
input_fn=iris_input_fn,
batch_size=100,
steps=1)
self.assertRaisesRegexp(
ValueError,
'Inputs cannot be tensors. Please provide input_fn.',
est.fit,
x=constant_op.constant(1.),
steps=1)
def testUntrained(self):
boston = base.load_boston()
est = estimator.SKCompat(estimator.Estimator(model_fn=linear_model_fn))
with self.assertRaises(learn.NotFittedError):
_ = est.score(x=boston.data, y=boston.target.astype(np.float64))
with self.assertRaises(learn.NotFittedError):
est.predict(x=boston.data)
def testContinueTraining(self):
boston = base.load_boston()
output_dir = tempfile.mkdtemp()
est = estimator.SKCompat(
estimator.Estimator(
model_fn=linear_model_fn, model_dir=output_dir))
float64_labels = boston.target.astype(np.float64)
est.fit(x=boston.data, y=float64_labels, steps=50)
scores = est.score(
x=boston.data,
y=float64_labels,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
del est
# Create another estimator object with the same output dir.
est2 = estimator.SKCompat(
estimator.Estimator(
model_fn=linear_model_fn, model_dir=output_dir))
# Check we can evaluate and predict.
scores2 = est2.score(
x=boston.data,
y=float64_labels,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
self.assertAllClose(scores['MSE'], scores2['MSE'])
predictions = np.array(list(est2.predict(x=boston.data)))
other_score = _sklearn.mean_squared_error(predictions, float64_labels)
self.assertAllClose(scores['MSE'], other_score)
# Check we can keep training.
est2.fit(x=boston.data, y=float64_labels, steps=100)
scores3 = est2.score(
x=boston.data,
y=float64_labels,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
self.assertLess(scores3['MSE'], scores['MSE'])
def testEstimatorParams(self):
boston = base.load_boston()
est = estimator.SKCompat(
estimator.Estimator(
model_fn=linear_model_params_fn, params={'learning_rate': 0.01}))
est.fit(x=boston.data, y=boston.target, steps=100)
def testHooksNotChanged(self):
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
# We pass empty array and expect it to remain empty after calling
# fit and evaluate. Requires inside to copy this array if any hooks were
# added.
my_array = []
est.fit(input_fn=iris_input_fn, steps=100, monitors=my_array)
_ = est.evaluate(input_fn=iris_input_fn, steps=1, hooks=my_array)
self.assertEqual(my_array, [])
def testIrisIterator(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 100)
y_iter = itertools.islice(iris.target, 100)
estimator.SKCompat(est).fit(x_iter, y_iter, steps=20)
eval_result = est.evaluate(input_fn=iris_input_fn, steps=1)
x_iter_eval = itertools.islice(iris.data, 100)
y_iter_eval = itertools.islice(iris.target, 100)
score_result = estimator.SKCompat(est).score(x_iter_eval, y_iter_eval)
print(score_result)
self.assertItemsEqual(eval_result.keys(), score_result.keys())
self.assertItemsEqual(['global_step', 'loss'], score_result.keys())
predictions = estimator.SKCompat(est).predict(x=iris.data)['class']
self.assertEqual(len(predictions), iris.target.shape[0])
def testIrisIteratorArray(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 100)
y_iter = (np.array(x) for x in iris.target)
est.fit(x_iter, y_iter, steps=100)
_ = est.evaluate(input_fn=iris_input_fn, steps=1)
_ = six.next(est.predict(x=iris.data))['class']
def testIrisIteratorPlainInt(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 100)
y_iter = (v for v in iris.target)
est.fit(x_iter, y_iter, steps=100)
_ = est.evaluate(input_fn=iris_input_fn, steps=1)
_ = six.next(est.predict(x=iris.data))['class']
def testIrisTruncatedIterator(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 50)
y_iter = ([np.int32(v)] for v in iris.target)
est.fit(x_iter, y_iter, steps=100)
def testTrainStepsIsIncremental(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=10)
self.assertEqual(10, est.get_variable_value('global_step'))
est.fit(input_fn=boston_input_fn, steps=15)
self.assertEqual(25, est.get_variable_value('global_step'))
def testTrainMaxStepsIsNotIncremental(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, max_steps=10)
self.assertEqual(10, est.get_variable_value('global_step'))
est.fit(input_fn=boston_input_fn, max_steps=15)
self.assertEqual(15, est.get_variable_value('global_step'))
def testPredict(self):
est = estimator.Estimator(model_fn=linear_model_fn)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
output = list(est.predict(x=boston.data, batch_size=10))
self.assertEqual(len(output), boston.target.shape[0])
def testWithModelFnOps(self):
"""Test for model_fn that returns `ModelFnOps`."""
est = estimator.Estimator(model_fn=linear_model_fn_with_model_fn_ops)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
input_fn = functools.partial(boston_input_fn, num_epochs=1)
scores = est.evaluate(input_fn=input_fn, steps=1)
self.assertIn('loss', scores.keys())
output = list(est.predict(input_fn=input_fn))
self.assertEqual(len(output), boston.target.shape[0])
def testWrongInput(self):
def other_input_fn():
return {
'other': constant_op.constant([0, 0, 0])
}, constant_op.constant([0, 0, 0])
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
with self.assertRaises(ValueError):
est.fit(input_fn=other_input_fn, steps=1)
def testMonitorsForFit(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn,
steps=21,
monitors=[CheckCallsMonitor(expect_calls=21)])
def testHooksForEvaluate(self):
class CheckCallHook(session_run_hook.SessionRunHook):
def __init__(self):
self.run_count = 0
def after_run(self, run_context, run_values):
self.run_count += 1
est = learn.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
hook = CheckCallHook()
est.evaluate(input_fn=boston_eval_fn, steps=3, hooks=[hook])
self.assertEqual(3, hook.run_count)
def testSummaryWriting(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=200)
est.evaluate(input_fn=boston_input_fn, steps=200)
loss_summary = util_test.simple_values_from_events(
util_test.latest_events(est.model_dir), ['OptimizeLoss/loss'])
self.assertEqual(1, len(loss_summary))
def testLossInGraphCollection(self):
class _LossCheckerHook(session_run_hook.SessionRunHook):
def begin(self):
self.loss_collection = ops.get_collection(ops.GraphKeys.LOSSES)
hook = _LossCheckerHook()
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=200, monitors=[hook])
self.assertTrue(hook.loss_collection)
def test_export_returns_exported_dirname(self):
expected = '/path/to/some_dir'
with test.mock.patch.object(estimator, 'export') as mock_export_module:
mock_export_module._export_estimator.return_value = expected
est = estimator.Estimator(model_fn=linear_model_fn)
actual = est.export('/path/to')
self.assertEquals(expected, actual)
def test_export_savedmodel(self):
tmpdir = tempfile.mkdtemp()
est, serving_input_fn = _build_estimator_for_export_tests(tmpdir)
extra_file_name = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('my_extra_file'))
extra_file = gfile.GFile(extra_file_name, mode='w')
extra_file.write(EXTRA_FILE_CONTENT)
extra_file.close()
assets_extra = {'some/sub/directory/my_extra_file': extra_file_name}
export_dir_base = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('export'))
export_dir = est.export_savedmodel(
export_dir_base, serving_input_fn, assets_extra=assets_extra)
self.assertTrue(gfile.Exists(export_dir_base))
self.assertTrue(gfile.Exists(export_dir))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes(
'saved_model.pb'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('variables'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.index'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.data-00000-of-00001'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('assets'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('assets/my_vocab_file'))))
self.assertEqual(
compat.as_bytes(VOCAB_FILE_CONTENT),
compat.as_bytes(
gfile.GFile(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('assets/my_vocab_file'))).read()))
expected_extra_path = os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('assets.extra/some/sub/directory/my_extra_file'))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('assets.extra'))))
self.assertTrue(gfile.Exists(expected_extra_path))
self.assertEqual(
compat.as_bytes(EXTRA_FILE_CONTENT),
compat.as_bytes(gfile.GFile(expected_extra_path).read()))
expected_vocab_file = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('my_vocab_file'))
# Restore, to validate that the export was well-formed.
with ops.Graph().as_default() as graph:
with session_lib.Session(graph=graph) as sess:
loader.load(sess, [tag_constants.SERVING], export_dir)
assets = [
x.eval()
for x in graph.get_collection(ops.GraphKeys.ASSET_FILEPATHS)
]
self.assertItemsEqual([expected_vocab_file], assets)
graph_ops = [x.name for x in graph.get_operations()]
self.assertTrue('input_example_tensor' in graph_ops)
self.assertTrue('ParseExample/ParseExample' in graph_ops)
self.assertTrue('linear/linear/feature/matmul' in graph_ops)
# cleanup
gfile.DeleteRecursively(tmpdir)
def test_export_savedmodel_with_resource(self):
tmpdir = tempfile.mkdtemp()
est, serving_input_fn = _build_estimator_for_resource_export_test()
export_dir_base = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('export'))
export_dir = est.export_savedmodel(export_dir_base, serving_input_fn)
self.assertTrue(gfile.Exists(export_dir_base))
self.assertTrue(gfile.Exists(export_dir))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes(
'saved_model.pb'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('variables'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.index'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.data-00000-of-00001'))))
# Restore, to validate that the export was well-formed.
with ops.Graph().as_default() as graph:
with session_lib.Session(graph=graph) as sess:
loader.load(sess, [tag_constants.SERVING], export_dir)
graph_ops = [x.name for x in graph.get_operations()]
self.assertTrue('input_example_tensor' in graph_ops)
self.assertTrue('ParseExample/ParseExample' in graph_ops)
self.assertTrue('LookupTableModel' in graph_ops)
self.assertFalse('LookupTableTrainingState' in graph_ops)
# cleanup
gfile.DeleteRecursively(tmpdir)
class InferRealValuedColumnsTest(test.TestCase):
def testInvalidArgs(self):
with self.assertRaisesRegexp(ValueError, 'x or input_fn must be provided'):
estimator.infer_real_valued_columns_from_input(None)
with self.assertRaisesRegexp(ValueError, 'cannot be tensors'):
estimator.infer_real_valued_columns_from_input(constant_op.constant(1.0))
def _assert_single_feature_column(self, expected_shape, expected_dtype,
feature_columns):
self.assertEqual(1, len(feature_columns))
feature_column = feature_columns[0]
self.assertEqual('', feature_column.name)
self.assertEqual(
{
'':
parsing_ops.FixedLenFeature(
shape=expected_shape, dtype=expected_dtype)
},
feature_column.config)
def testInt32Input(self):
feature_columns = estimator.infer_real_valued_columns_from_input(
np.ones(
shape=[7, 8], dtype=np.int32))
self._assert_single_feature_column([8], dtypes.int32, feature_columns)
def testInt32InputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
lambda: (array_ops.ones(shape=[7, 8], dtype=dtypes.int32), None))
self._assert_single_feature_column([8], dtypes.int32, feature_columns)
def testInt64Input(self):
feature_columns = estimator.infer_real_valued_columns_from_input(
np.ones(
shape=[7, 8], dtype=np.int64))
self._assert_single_feature_column([8], dtypes.int64, feature_columns)
def testInt64InputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
lambda: (array_ops.ones(shape=[7, 8], dtype=dtypes.int64), None))
self._assert_single_feature_column([8], dtypes.int64, feature_columns)
def testFloat32Input(self):
feature_columns = estimator.infer_real_valued_columns_from_input(
np.ones(
shape=[7, 8], dtype=np.float32))
self._assert_single_feature_column([8], dtypes.float32, feature_columns)
def testFloat32InputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
lambda: (array_ops.ones(shape=[7, 8], dtype=dtypes.float32), None))
self._assert_single_feature_column([8], dtypes.float32, feature_columns)
def testFloat64Input(self):
feature_columns = estimator.infer_real_valued_columns_from_input(
np.ones(
shape=[7, 8], dtype=np.float64))
self._assert_single_feature_column([8], dtypes.float64, feature_columns)
def testFloat64InputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
lambda: (array_ops.ones(shape=[7, 8], dtype=dtypes.float64), None))
self._assert_single_feature_column([8], dtypes.float64, feature_columns)
def testBoolInput(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
estimator.infer_real_valued_columns_from_input(
np.array([[False for _ in xrange(8)] for _ in xrange(7)]))
def testBoolInputFn(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
# pylint: disable=g-long-lambda
estimator.infer_real_valued_columns_from_input_fn(
lambda: (constant_op.constant(False, shape=[7, 8], dtype=dtypes.bool),
None))
def testStringInput(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
# pylint: disable=g-long-lambda
estimator.infer_real_valued_columns_from_input(
np.array([['%d.0' % i for i in xrange(8)] for _ in xrange(7)]))
def testStringInputFn(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
# pylint: disable=g-long-lambda
estimator.infer_real_valued_columns_from_input_fn(
lambda: (
constant_op.constant([['%d.0' % i
for i in xrange(8)]
for _ in xrange(7)]),
None))
def testBostonInputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
boston_input_fn)
self._assert_single_feature_column([_BOSTON_INPUT_DIM], dtypes.float64,
feature_columns)
def testIrisInputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
iris_input_fn)
self._assert_single_feature_column([_IRIS_INPUT_DIM], dtypes.float64,
feature_columns)
class ReplicaDeviceSetterTest(test.TestCase):
def testVariablesAreOnPs(self):
tf_config = {'cluster': {run_config.TaskType.PS: ['fake_ps_0']}}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig()
with ops.device(estimator._get_replica_device_setter(config)):
v = variables_lib.Variable([1, 2])
w = variables_lib.Variable([2, 1])
a = v + w
self.assertDeviceEqual('/job:ps/task:0', v.device)
self.assertDeviceEqual('/job:ps/task:0', v.initializer.device)
self.assertDeviceEqual('/job:ps/task:0', w.device)
self.assertDeviceEqual('/job:ps/task:0', w.initializer.device)
self.assertDeviceEqual('/job:worker', a.device)
def testVariablesAreLocal(self):
with ops.device(
estimator._get_replica_device_setter(run_config.RunConfig())):
v = variables_lib.Variable([1, 2])
w = variables_lib.Variable([2, 1])
a = v + w
self.assertDeviceEqual('', v.device)
self.assertDeviceEqual('', v.initializer.device)
self.assertDeviceEqual('', w.device)
self.assertDeviceEqual('', w.initializer.device)
self.assertDeviceEqual('', a.device)
def testMutableHashTableIsOnPs(self):
tf_config = {'cluster': {run_config.TaskType.PS: ['fake_ps_0']}}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig()
with ops.device(estimator._get_replica_device_setter(config)):
default_val = constant_op.constant([-1, -1], dtypes.int64)
table = lookup.MutableHashTable(dtypes.string, dtypes.int64,
default_val)
input_string = constant_op.constant(['brain', 'salad', 'tank'])
output = table.lookup(input_string)
self.assertDeviceEqual('/job:ps/task:0', table._table_ref.device)
self.assertDeviceEqual('/job:ps/task:0', output.device)
def testMutableHashTableIsLocal(self):
with ops.device(
estimator._get_replica_device_setter(run_config.RunConfig())):
default_val = constant_op.constant([-1, -1], dtypes.int64)
table = lookup.MutableHashTable(dtypes.string, dtypes.int64,
default_val)
input_string = constant_op.constant(['brain', 'salad', 'tank'])
output = table.lookup(input_string)
self.assertDeviceEqual('', table._table_ref.device)
self.assertDeviceEqual('', output.device)
def testTaskIsSetOnWorkerWhenJobNameIsSet(self):
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0']
},
'task': {
'type': run_config.TaskType.WORKER,
'index': 3
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig()
with ops.device(estimator._get_replica_device_setter(config)):
v = variables_lib.Variable([1, 2])
w = variables_lib.Variable([2, 1])
a = v + w
self.assertDeviceEqual('/job:ps/task:0', v.device)
self.assertDeviceEqual('/job:ps/task:0', v.initializer.device)
self.assertDeviceEqual('/job:ps/task:0', w.device)
self.assertDeviceEqual('/job:ps/task:0', w.initializer.device)
self.assertDeviceEqual('/job:worker/task:3', a.device)
if __name__ == '__main__':
test.main()
| apache-2.0 |
DonBeo/scikit-learn | examples/decomposition/plot_pca_vs_fa_model_selection.py | 30 | 4516 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=================================================================
Model selection with Probabilistic (PCA) and Factor Analysis (FA)
=================================================================
Probabilistic PCA and Factor Analysis are probabilistic models.
The consequence is that the likelihood of new data can be used
for model selection and covariance estimation.
Here we compare PCA and FA with cross-validation on low rank data corrupted
with homoscedastic noise (noise variance
is the same for each feature) or heteroscedastic noise (noise variance
is the different for each feature). In a second step we compare the model
likelihood to the likelihoods obtained from shrinkage covariance estimators.
One can observe that with homoscedastic noise both FA and PCA succeed
in recovering the size of the low rank subspace. The likelihood with PCA
is higher than FA in this case. However PCA fails and overestimates
the rank when heteroscedastic noise is present. Under appropriate
circumstances the low rank models are more likely than shrinkage models.
The automatic estimation from
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604
by Thomas P. Minka is also compared.
"""
print(__doc__)
# Authors: Alexandre Gramfort
# Denis A. Engemann
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg
from sklearn.decomposition import PCA, FactorAnalysis
from sklearn.covariance import ShrunkCovariance, LedoitWolf
from sklearn.cross_validation import cross_val_score
from sklearn.grid_search import GridSearchCV
###############################################################################
# Create the data
n_samples, n_features, rank = 1000, 50, 10
sigma = 1.
rng = np.random.RandomState(42)
U, _, _ = linalg.svd(rng.randn(n_features, n_features))
X = np.dot(rng.randn(n_samples, rank), U[:, :rank].T)
# Adding homoscedastic noise
X_homo = X + sigma * rng.randn(n_samples, n_features)
# Adding heteroscedastic noise
sigmas = sigma * rng.rand(n_features) + sigma / 2.
X_hetero = X + rng.randn(n_samples, n_features) * sigmas
###############################################################################
# Fit the models
n_components = np.arange(0, n_features, 5) # options for n_components
def compute_scores(X):
pca = PCA()
fa = FactorAnalysis()
pca_scores, fa_scores = [], []
for n in n_components:
pca.n_components = n
fa.n_components = n
pca_scores.append(np.mean(cross_val_score(pca, X)))
fa_scores.append(np.mean(cross_val_score(fa, X)))
return pca_scores, fa_scores
def shrunk_cov_score(X):
shrinkages = np.logspace(-2, 0, 30)
cv = GridSearchCV(ShrunkCovariance(), {'shrinkage': shrinkages})
return np.mean(cross_val_score(cv.fit(X).best_estimator_, X))
def lw_score(X):
return np.mean(cross_val_score(LedoitWolf(), X))
for X, title in [(X_homo, 'Homoscedastic Noise'),
(X_hetero, 'Heteroscedastic Noise')]:
pca_scores, fa_scores = compute_scores(X)
n_components_pca = n_components[np.argmax(pca_scores)]
n_components_fa = n_components[np.argmax(fa_scores)]
pca = PCA(n_components='mle')
pca.fit(X)
n_components_pca_mle = pca.n_components_
print("best n_components by PCA CV = %d" % n_components_pca)
print("best n_components by FactorAnalysis CV = %d" % n_components_fa)
print("best n_components by PCA MLE = %d" % n_components_pca_mle)
plt.figure()
plt.plot(n_components, pca_scores, 'b', label='PCA scores')
plt.plot(n_components, fa_scores, 'r', label='FA scores')
plt.axvline(rank, color='g', label='TRUTH: %d' % rank, linestyle='-')
plt.axvline(n_components_pca, color='b',
label='PCA CV: %d' % n_components_pca, linestyle='--')
plt.axvline(n_components_fa, color='r',
label='FactorAnalysis CV: %d' % n_components_fa, linestyle='--')
plt.axvline(n_components_pca_mle, color='k',
label='PCA MLE: %d' % n_components_pca_mle, linestyle='--')
# compare with other covariance estimators
plt.axhline(shrunk_cov_score(X), color='violet',
label='Shrunk Covariance MLE', linestyle='-.')
plt.axhline(lw_score(X), color='orange',
label='LedoitWolf MLE' % n_components_pca_mle, linestyle='-.')
plt.xlabel('nb of components')
plt.ylabel('CV scores')
plt.legend(loc='lower right')
plt.title(title)
plt.show()
| bsd-3-clause |
mhdella/scikit-learn | sklearn/feature_extraction/text.py | 110 | 50157 | # -*- coding: utf-8 -*-
# Authors: Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Lars Buitinck <[email protected]>
# Robert Layton <[email protected]>
# Jochen Wersdörfer <[email protected]>
# Roman Sinayev <[email protected]>
#
# License: BSD 3 clause
"""
The :mod:`sklearn.feature_extraction.text` submodule gathers utilities to
build feature vectors from text documents.
"""
from __future__ import unicode_literals
import array
from collections import Mapping, defaultdict
import numbers
from operator import itemgetter
import re
import unicodedata
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.six.moves import xrange
from ..preprocessing import normalize
from .hashing import FeatureHasher
from .stop_words import ENGLISH_STOP_WORDS
from ..utils import deprecated
from ..utils.fixes import frombuffer_empty, bincount
from ..utils.validation import check_is_fitted
__all__ = ['CountVectorizer',
'ENGLISH_STOP_WORDS',
'TfidfTransformer',
'TfidfVectorizer',
'strip_accents_ascii',
'strip_accents_unicode',
'strip_tags']
def strip_accents_unicode(s):
"""Transform accentuated unicode symbols into their simple counterpart
Warning: the python-level loop and join operations make this
implementation 20 times slower than the strip_accents_ascii basic
normalization.
See also
--------
strip_accents_ascii
Remove accentuated char for any unicode symbol that has a direct
ASCII equivalent.
"""
return ''.join([c for c in unicodedata.normalize('NFKD', s)
if not unicodedata.combining(c)])
def strip_accents_ascii(s):
"""Transform accentuated unicode symbols into ascii or nothing
Warning: this solution is only suited for languages that have a direct
transliteration to ASCII symbols.
See also
--------
strip_accents_unicode
Remove accentuated char for any unicode symbol.
"""
nkfd_form = unicodedata.normalize('NFKD', s)
return nkfd_form.encode('ASCII', 'ignore').decode('ASCII')
def strip_tags(s):
"""Basic regexp based HTML / XML tag stripper function
For serious HTML/XML preprocessing you should rather use an external
library such as lxml or BeautifulSoup.
"""
return re.compile(r"<([^>]+)>", flags=re.UNICODE).sub(" ", s)
def _check_stop_list(stop):
if stop == "english":
return ENGLISH_STOP_WORDS
elif isinstance(stop, six.string_types):
raise ValueError("not a built-in stop list: %s" % stop)
elif stop is None:
return None
else: # assume it's a collection
return frozenset(stop)
class VectorizerMixin(object):
"""Provides common code for text vectorizers (tokenization logic)."""
_white_spaces = re.compile(r"\s\s+")
def decode(self, doc):
"""Decode the input into a string of unicode symbols
The decoding strategy depends on the vectorizer parameters.
"""
if self.input == 'filename':
with open(doc, 'rb') as fh:
doc = fh.read()
elif self.input == 'file':
doc = doc.read()
if isinstance(doc, bytes):
doc = doc.decode(self.encoding, self.decode_error)
if doc is np.nan:
raise ValueError("np.nan is an invalid document, expected byte or "
"unicode string.")
return doc
def _word_ngrams(self, tokens, stop_words=None):
"""Turn tokens into a sequence of n-grams after stop words filtering"""
# handle stop words
if stop_words is not None:
tokens = [w for w in tokens if w not in stop_words]
# handle token n-grams
min_n, max_n = self.ngram_range
if max_n != 1:
original_tokens = tokens
tokens = []
n_original_tokens = len(original_tokens)
for n in xrange(min_n,
min(max_n + 1, n_original_tokens + 1)):
for i in xrange(n_original_tokens - n + 1):
tokens.append(" ".join(original_tokens[i: i + n]))
return tokens
def _char_ngrams(self, text_document):
"""Tokenize text_document into a sequence of character n-grams"""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
text_len = len(text_document)
ngrams = []
min_n, max_n = self.ngram_range
for n in xrange(min_n, min(max_n + 1, text_len + 1)):
for i in xrange(text_len - n + 1):
ngrams.append(text_document[i: i + n])
return ngrams
def _char_wb_ngrams(self, text_document):
"""Whitespace sensitive char-n-gram tokenization.
Tokenize text_document into a sequence of character n-grams
excluding any whitespace (operating only inside word boundaries)"""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
min_n, max_n = self.ngram_range
ngrams = []
for w in text_document.split():
w = ' ' + w + ' '
w_len = len(w)
for n in xrange(min_n, max_n + 1):
offset = 0
ngrams.append(w[offset:offset + n])
while offset + n < w_len:
offset += 1
ngrams.append(w[offset:offset + n])
if offset == 0: # count a short word (w_len < n) only once
break
return ngrams
def build_preprocessor(self):
"""Return a function to preprocess the text before tokenization"""
if self.preprocessor is not None:
return self.preprocessor
# unfortunately python functools package does not have an efficient
# `compose` function that would have allowed us to chain a dynamic
# number of functions. However the cost of a lambda call is a few
# hundreds of nanoseconds which is negligible when compared to the
# cost of tokenizing a string of 1000 chars for instance.
noop = lambda x: x
# accent stripping
if not self.strip_accents:
strip_accents = noop
elif callable(self.strip_accents):
strip_accents = self.strip_accents
elif self.strip_accents == 'ascii':
strip_accents = strip_accents_ascii
elif self.strip_accents == 'unicode':
strip_accents = strip_accents_unicode
else:
raise ValueError('Invalid value for "strip_accents": %s' %
self.strip_accents)
if self.lowercase:
return lambda x: strip_accents(x.lower())
else:
return strip_accents
def build_tokenizer(self):
"""Return a function that splits a string into a sequence of tokens"""
if self.tokenizer is not None:
return self.tokenizer
token_pattern = re.compile(self.token_pattern)
return lambda doc: token_pattern.findall(doc)
def get_stop_words(self):
"""Build or fetch the effective stop words list"""
return _check_stop_list(self.stop_words)
def build_analyzer(self):
"""Return a callable that handles preprocessing and tokenization"""
if callable(self.analyzer):
return self.analyzer
preprocess = self.build_preprocessor()
if self.analyzer == 'char':
return lambda doc: self._char_ngrams(preprocess(self.decode(doc)))
elif self.analyzer == 'char_wb':
return lambda doc: self._char_wb_ngrams(
preprocess(self.decode(doc)))
elif self.analyzer == 'word':
stop_words = self.get_stop_words()
tokenize = self.build_tokenizer()
return lambda doc: self._word_ngrams(
tokenize(preprocess(self.decode(doc))), stop_words)
else:
raise ValueError('%s is not a valid tokenization scheme/analyzer' %
self.analyzer)
def _validate_vocabulary(self):
vocabulary = self.vocabulary
if vocabulary is not None:
if not isinstance(vocabulary, Mapping):
vocab = {}
for i, t in enumerate(vocabulary):
if vocab.setdefault(t, i) != i:
msg = "Duplicate term in vocabulary: %r" % t
raise ValueError(msg)
vocabulary = vocab
else:
indices = set(six.itervalues(vocabulary))
if len(indices) != len(vocabulary):
raise ValueError("Vocabulary contains repeated indices.")
for i in xrange(len(vocabulary)):
if i not in indices:
msg = ("Vocabulary of size %d doesn't contain index "
"%d." % (len(vocabulary), i))
raise ValueError(msg)
if not vocabulary:
raise ValueError("empty vocabulary passed to fit")
self.fixed_vocabulary_ = True
self.vocabulary_ = dict(vocabulary)
else:
self.fixed_vocabulary_ = False
def _check_vocabulary(self):
"""Check if vocabulary is empty or missing (not fit-ed)"""
msg = "%(name)s - Vocabulary wasn't fitted."
check_is_fitted(self, 'vocabulary_', msg=msg),
if len(self.vocabulary_) == 0:
raise ValueError("Vocabulary is empty")
@property
@deprecated("The `fixed_vocabulary` attribute is deprecated and will be "
"removed in 0.18. Please use `fixed_vocabulary_` instead.")
def fixed_vocabulary(self):
return self.fixed_vocabulary_
class HashingVectorizer(BaseEstimator, VectorizerMixin):
"""Convert a collection of text documents to a matrix of token occurrences
It turns a collection of text documents into a scipy.sparse matrix holding
token occurrence counts (or binary occurrence information), possibly
normalized as token frequencies if norm='l1' or projected on the euclidean
unit sphere if norm='l2'.
This text vectorizer implementation uses the hashing trick to find the
token string name to feature integer index mapping.
This strategy has several advantages:
- it is very low memory scalable to large datasets as there is no need to
store a vocabulary dictionary in memory
- it is fast to pickle and un-pickle as it holds no state besides the
constructor parameters
- it can be used in a streaming (partial fit) or parallel pipeline as there
is no state computed during fit.
There are also a couple of cons (vs using a CountVectorizer with an
in-memory vocabulary):
- there is no way to compute the inverse transform (from feature indices to
string feature names) which can be a problem when trying to introspect
which features are most important to a model.
- there can be collisions: distinct tokens can be mapped to the same
feature index. However in practice this is rarely an issue if n_features
is large enough (e.g. 2 ** 18 for text classification problems).
- no IDF weighting as this would render the transformer stateful.
The hash function employed is the signed 32-bit version of Murmurhash3.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, default='utf-8'
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char', 'char_wb'} or callable
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n), default=(1, 1)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If 'english', a built-in stop word list for English is used.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
lowercase : boolean, default=True
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
n_features : integer, default=(2 ** 20)
The number of features (columns) in the output matrices. Small numbers
of features are likely to cause hash collisions, but large numbers
will cause larger coefficient dimensions in linear learners.
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
binary: boolean, default=False.
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype: type, optional
Type of the matrix returned by fit_transform() or transform().
non_negative : boolean, default=False
Whether output matrices should contain non-negative values only;
effectively calls abs on the matrix prior to returning it.
When True, output values can be interpreted as frequencies.
When False, output values will have expected value zero.
See also
--------
CountVectorizer, TfidfVectorizer
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), analyzer='word', n_features=(2 ** 20),
binary=False, norm='l2', non_negative=False,
dtype=np.float64):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.n_features = n_features
self.ngram_range = ngram_range
self.binary = binary
self.norm = norm
self.non_negative = non_negative
self.dtype = dtype
def partial_fit(self, X, y=None):
"""Does nothing: this transformer is stateless.
This method is just there to mark the fact that this transformer
can work in a streaming setup.
"""
return self
def fit(self, X, y=None):
"""Does nothing: this transformer is stateless."""
# triggers a parameter validation
self._get_hasher().fit(X, y=y)
return self
def transform(self, X, y=None):
"""Transform a sequence of documents to a document-term matrix.
Parameters
----------
X : iterable over raw text documents, length = n_samples
Samples. Each sample must be a text document (either bytes or
unicode strings, file name or file object depending on the
constructor argument) which will be tokenized and hashed.
y : (ignored)
Returns
-------
X : scipy.sparse matrix, shape = (n_samples, self.n_features)
Document-term matrix.
"""
analyzer = self.build_analyzer()
X = self._get_hasher().transform(analyzer(doc) for doc in X)
if self.binary:
X.data.fill(1)
if self.norm is not None:
X = normalize(X, norm=self.norm, copy=False)
return X
# Alias transform to fit_transform for convenience
fit_transform = transform
def _get_hasher(self):
return FeatureHasher(n_features=self.n_features,
input_type='string', dtype=self.dtype,
non_negative=self.non_negative)
def _document_frequency(X):
"""Count the number of non-zero values for each feature in sparse X."""
if sp.isspmatrix_csr(X):
return bincount(X.indices, minlength=X.shape[1])
else:
return np.diff(sp.csc_matrix(X, copy=False).indptr)
class CountVectorizer(BaseEstimator, VectorizerMixin):
"""Convert a collection of text documents to a matrix of token counts
This implementation produces a sparse representation of the counts using
scipy.sparse.coo_matrix.
If you do not provide an a-priori dictionary and you do not use an analyzer
that does some kind of feature selection then the number of features will
be equal to the vocabulary size found by analyzing the data.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char', 'char_wb'} or callable
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
Only applies if ``analyzer == 'word'``.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If 'english', a built-in stop word list for English is used.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
lowercase : boolean, True by default
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp select tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
max_df : float in range [0.0, 1.0] or int, default=1.0
When building the vocabulary ignore terms that have a document
frequency strictly higher than the given threshold (corpus-specific
stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, default=1
When building the vocabulary ignore terms that have a document
frequency strictly lower than the given threshold. This value is also
called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : int or None, default=None
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, optional
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents. Indices
in the mapping should not be repeated and should not have any gap
between 0 and the largest index.
binary : boolean, default=False
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
Attributes
----------
vocabulary_ : dict
A mapping of terms to feature indices.
stop_words_ : set
Terms that were ignored because they either:
- occurred in too many documents (`max_df`)
- occurred in too few documents (`min_df`)
- were cut off by feature selection (`max_features`).
This is only available if no vocabulary was given.
See also
--------
HashingVectorizer, TfidfVectorizer
Notes
-----
The ``stop_words_`` attribute can get large and increase the model size
when pickling. This attribute is provided only for introspection and can
be safely removed using delattr or set to None before pickling.
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), analyzer='word',
max_df=1.0, min_df=1, max_features=None,
vocabulary=None, binary=False, dtype=np.int64):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.max_df = max_df
self.min_df = min_df
if max_df < 0 or min_df < 0:
raise ValueError("negative value for max_df of min_df")
self.max_features = max_features
if max_features is not None:
if (not isinstance(max_features, numbers.Integral) or
max_features <= 0):
raise ValueError(
"max_features=%r, neither a positive integer nor None"
% max_features)
self.ngram_range = ngram_range
self.vocabulary = vocabulary
self.binary = binary
self.dtype = dtype
def _sort_features(self, X, vocabulary):
"""Sort features by name
Returns a reordered matrix and modifies the vocabulary in place
"""
sorted_features = sorted(six.iteritems(vocabulary))
map_index = np.empty(len(sorted_features), dtype=np.int32)
for new_val, (term, old_val) in enumerate(sorted_features):
map_index[new_val] = old_val
vocabulary[term] = new_val
return X[:, map_index]
def _limit_features(self, X, vocabulary, high=None, low=None,
limit=None):
"""Remove too rare or too common features.
Prune features that are non zero in more samples than high or less
documents than low, modifying the vocabulary, and restricting it to
at most the limit most frequent.
This does not prune samples with zero features.
"""
if high is None and low is None and limit is None:
return X, set()
# Calculate a mask based on document frequencies
dfs = _document_frequency(X)
tfs = np.asarray(X.sum(axis=0)).ravel()
mask = np.ones(len(dfs), dtype=bool)
if high is not None:
mask &= dfs <= high
if low is not None:
mask &= dfs >= low
if limit is not None and mask.sum() > limit:
mask_inds = (-tfs[mask]).argsort()[:limit]
new_mask = np.zeros(len(dfs), dtype=bool)
new_mask[np.where(mask)[0][mask_inds]] = True
mask = new_mask
new_indices = np.cumsum(mask) - 1 # maps old indices to new
removed_terms = set()
for term, old_index in list(six.iteritems(vocabulary)):
if mask[old_index]:
vocabulary[term] = new_indices[old_index]
else:
del vocabulary[term]
removed_terms.add(term)
kept_indices = np.where(mask)[0]
if len(kept_indices) == 0:
raise ValueError("After pruning, no terms remain. Try a lower"
" min_df or a higher max_df.")
return X[:, kept_indices], removed_terms
def _count_vocab(self, raw_documents, fixed_vocab):
"""Create sparse feature matrix, and vocabulary where fixed_vocab=False
"""
if fixed_vocab:
vocabulary = self.vocabulary_
else:
# Add a new value when a new vocabulary item is seen
vocabulary = defaultdict()
vocabulary.default_factory = vocabulary.__len__
analyze = self.build_analyzer()
j_indices = _make_int_array()
indptr = _make_int_array()
indptr.append(0)
for doc in raw_documents:
for feature in analyze(doc):
try:
j_indices.append(vocabulary[feature])
except KeyError:
# Ignore out-of-vocabulary items for fixed_vocab=True
continue
indptr.append(len(j_indices))
if not fixed_vocab:
# disable defaultdict behaviour
vocabulary = dict(vocabulary)
if not vocabulary:
raise ValueError("empty vocabulary; perhaps the documents only"
" contain stop words")
j_indices = frombuffer_empty(j_indices, dtype=np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc)
values = np.ones(len(j_indices))
X = sp.csr_matrix((values, j_indices, indptr),
shape=(len(indptr) - 1, len(vocabulary)),
dtype=self.dtype)
X.sum_duplicates()
return vocabulary, X
def fit(self, raw_documents, y=None):
"""Learn a vocabulary dictionary of all tokens in the raw documents.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
self
"""
self.fit_transform(raw_documents)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn the vocabulary dictionary and return term-document matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
X : array, [n_samples, n_features]
Document-term matrix.
"""
# We intentionally don't call the transform method to make
# fit_transform overridable without unwanted side effects in
# TfidfVectorizer.
self._validate_vocabulary()
max_df = self.max_df
min_df = self.min_df
max_features = self.max_features
vocabulary, X = self._count_vocab(raw_documents,
self.fixed_vocabulary_)
if self.binary:
X.data.fill(1)
if not self.fixed_vocabulary_:
X = self._sort_features(X, vocabulary)
n_doc = X.shape[0]
max_doc_count = (max_df
if isinstance(max_df, numbers.Integral)
else max_df * n_doc)
min_doc_count = (min_df
if isinstance(min_df, numbers.Integral)
else min_df * n_doc)
if max_doc_count < min_doc_count:
raise ValueError(
"max_df corresponds to < documents than min_df")
X, self.stop_words_ = self._limit_features(X, vocabulary,
max_doc_count,
min_doc_count,
max_features)
self.vocabulary_ = vocabulary
return X
def transform(self, raw_documents):
"""Transform documents to document-term matrix.
Extract token counts out of raw text documents using the vocabulary
fitted with fit or the one provided to the constructor.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
X : sparse matrix, [n_samples, n_features]
Document-term matrix.
"""
if not hasattr(self, 'vocabulary_'):
self._validate_vocabulary()
self._check_vocabulary()
# use the same matrix-building strategy as fit_transform
_, X = self._count_vocab(raw_documents, fixed_vocab=True)
if self.binary:
X.data.fill(1)
return X
def inverse_transform(self, X):
"""Return terms per document with nonzero entries in X.
Parameters
----------
X : {array, sparse matrix}, shape = [n_samples, n_features]
Returns
-------
X_inv : list of arrays, len = n_samples
List of arrays of terms.
"""
self._check_vocabulary()
if sp.issparse(X):
# We need CSR format for fast row manipulations.
X = X.tocsr()
else:
# We need to convert X to a matrix, so that the indexing
# returns 2D objects
X = np.asmatrix(X)
n_samples = X.shape[0]
terms = np.array(list(self.vocabulary_.keys()))
indices = np.array(list(self.vocabulary_.values()))
inverse_vocabulary = terms[np.argsort(indices)]
return [inverse_vocabulary[X[i, :].nonzero()[1]].ravel()
for i in range(n_samples)]
def get_feature_names(self):
"""Array mapping from feature integer indices to feature name"""
self._check_vocabulary()
return [t for t, i in sorted(six.iteritems(self.vocabulary_),
key=itemgetter(1))]
def _make_int_array():
"""Construct an array.array of a type suitable for scipy.sparse indices."""
return array.array(str("i"))
class TfidfTransformer(BaseEstimator, TransformerMixin):
"""Transform a count matrix to a normalized tf or tf-idf representation
Tf means term-frequency while tf-idf means term-frequency times inverse
document-frequency. This is a common term weighting scheme in information
retrieval, that has also found good use in document classification.
The goal of using tf-idf instead of the raw frequencies of occurrence of a
token in a given document is to scale down the impact of tokens that occur
very frequently in a given corpus and that are hence empirically less
informative than features that occur in a small fraction of the training
corpus.
The actual formula used for tf-idf is tf * (idf + 1) = tf + tf * idf,
instead of tf * idf. The effect of this is that terms with zero idf, i.e.
that occur in all documents of a training set, will not be entirely
ignored. The formulas used to compute tf and idf depend on parameter
settings that correspond to the SMART notation used in IR, as follows:
Tf is "n" (natural) by default, "l" (logarithmic) when sublinear_tf=True.
Idf is "t" when use_idf is given, "n" (none) otherwise.
Normalization is "c" (cosine) when norm='l2', "n" (none) when norm=None.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
use_idf : boolean, default=True
Enable inverse-document-frequency reweighting.
smooth_idf : boolean, default=True
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : boolean, default=False
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
References
----------
.. [Yates2011] `R. Baeza-Yates and B. Ribeiro-Neto (2011). Modern
Information Retrieval. Addison Wesley, pp. 68-74.`
.. [MRS2008] `C.D. Manning, P. Raghavan and H. Schuetze (2008).
Introduction to Information Retrieval. Cambridge University
Press, pp. 118-120.`
"""
def __init__(self, norm='l2', use_idf=True, smooth_idf=True,
sublinear_tf=False):
self.norm = norm
self.use_idf = use_idf
self.smooth_idf = smooth_idf
self.sublinear_tf = sublinear_tf
def fit(self, X, y=None):
"""Learn the idf vector (global term weights)
Parameters
----------
X : sparse matrix, [n_samples, n_features]
a matrix of term/token counts
"""
if not sp.issparse(X):
X = sp.csc_matrix(X)
if self.use_idf:
n_samples, n_features = X.shape
df = _document_frequency(X)
# perform idf smoothing if required
df += int(self.smooth_idf)
n_samples += int(self.smooth_idf)
# log+1 instead of log makes sure terms with zero idf don't get
# suppressed entirely.
idf = np.log(float(n_samples) / df) + 1.0
self._idf_diag = sp.spdiags(idf,
diags=0, m=n_features, n=n_features)
return self
def transform(self, X, copy=True):
"""Transform a count matrix to a tf or tf-idf representation
Parameters
----------
X : sparse matrix, [n_samples, n_features]
a matrix of term/token counts
copy : boolean, default True
Whether to copy X and operate on the copy or perform in-place
operations.
Returns
-------
vectors : sparse matrix, [n_samples, n_features]
"""
if hasattr(X, 'dtype') and np.issubdtype(X.dtype, np.float):
# preserve float family dtype
X = sp.csr_matrix(X, copy=copy)
else:
# convert counts or binary occurrences to floats
X = sp.csr_matrix(X, dtype=np.float64, copy=copy)
n_samples, n_features = X.shape
if self.sublinear_tf:
np.log(X.data, X.data)
X.data += 1
if self.use_idf:
check_is_fitted(self, '_idf_diag', 'idf vector is not fitted')
expected_n_features = self._idf_diag.shape[0]
if n_features != expected_n_features:
raise ValueError("Input has n_features=%d while the model"
" has been trained with n_features=%d" % (
n_features, expected_n_features))
# *= doesn't work
X = X * self._idf_diag
if self.norm:
X = normalize(X, norm=self.norm, copy=False)
return X
@property
def idf_(self):
if hasattr(self, "_idf_diag"):
return np.ravel(self._idf_diag.sum(axis=0))
else:
return None
class TfidfVectorizer(CountVectorizer):
"""Convert a collection of raw documents to a matrix of TF-IDF features.
Equivalent to CountVectorizer followed by TfidfTransformer.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char'} or callable
Whether the feature should be made of word or character n-grams.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If a string, it is passed to _check_stop_list and the appropriate stop
list is returned. 'english' is currently the only supported string
value.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
lowercase : boolean, default True
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
max_df : float in range [0.0, 1.0] or int, default=1.0
When building the vocabulary ignore terms that have a document
frequency strictly higher than the given threshold (corpus-specific
stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, default=1
When building the vocabulary ignore terms that have a document
frequency strictly lower than the given threshold. This value is also
called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : int or None, default=None
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, optional
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents.
binary : boolean, default=False
If True, all non-zero term counts are set to 1. This does not mean
outputs will have only 0/1 values, only that the tf term in tf-idf
is binary. (Set idf and normalization to False to get 0/1 outputs.)
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
use_idf : boolean, default=True
Enable inverse-document-frequency reweighting.
smooth_idf : boolean, default=True
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : boolean, default=False
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
Attributes
----------
idf_ : array, shape = [n_features], or None
The learned idf vector (global term weights)
when ``use_idf`` is set to True, None otherwise.
stop_words_ : set
Terms that were ignored because they either:
- occurred in too many documents (`max_df`)
- occurred in too few documents (`min_df`)
- were cut off by feature selection (`max_features`).
This is only available if no vocabulary was given.
See also
--------
CountVectorizer
Tokenize the documents and count the occurrences of token and return
them as a sparse matrix
TfidfTransformer
Apply Term Frequency Inverse Document Frequency normalization to a
sparse matrix of occurrence counts.
Notes
-----
The ``stop_words_`` attribute can get large and increase the model size
when pickling. This attribute is provided only for introspection and can
be safely removed using delattr or set to None before pickling.
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None, lowercase=True,
preprocessor=None, tokenizer=None, analyzer='word',
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), max_df=1.0, min_df=1,
max_features=None, vocabulary=None, binary=False,
dtype=np.int64, norm='l2', use_idf=True, smooth_idf=True,
sublinear_tf=False):
super(TfidfVectorizer, self).__init__(
input=input, encoding=encoding, decode_error=decode_error,
strip_accents=strip_accents, lowercase=lowercase,
preprocessor=preprocessor, tokenizer=tokenizer, analyzer=analyzer,
stop_words=stop_words, token_pattern=token_pattern,
ngram_range=ngram_range, max_df=max_df, min_df=min_df,
max_features=max_features, vocabulary=vocabulary, binary=binary,
dtype=dtype)
self._tfidf = TfidfTransformer(norm=norm, use_idf=use_idf,
smooth_idf=smooth_idf,
sublinear_tf=sublinear_tf)
# Broadcast the TF-IDF parameters to the underlying transformer instance
# for easy grid search and repr
@property
def norm(self):
return self._tfidf.norm
@norm.setter
def norm(self, value):
self._tfidf.norm = value
@property
def use_idf(self):
return self._tfidf.use_idf
@use_idf.setter
def use_idf(self, value):
self._tfidf.use_idf = value
@property
def smooth_idf(self):
return self._tfidf.smooth_idf
@smooth_idf.setter
def smooth_idf(self, value):
self._tfidf.smooth_idf = value
@property
def sublinear_tf(self):
return self._tfidf.sublinear_tf
@sublinear_tf.setter
def sublinear_tf(self, value):
self._tfidf.sublinear_tf = value
@property
def idf_(self):
return self._tfidf.idf_
def fit(self, raw_documents, y=None):
"""Learn vocabulary and idf from training set.
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
Returns
-------
self : TfidfVectorizer
"""
X = super(TfidfVectorizer, self).fit_transform(raw_documents)
self._tfidf.fit(X)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn vocabulary and idf, return term-document matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
Returns
-------
X : sparse matrix, [n_samples, n_features]
Tf-idf-weighted document-term matrix.
"""
X = super(TfidfVectorizer, self).fit_transform(raw_documents)
self._tfidf.fit(X)
# X is already a transformed view of raw_documents so
# we set copy to False
return self._tfidf.transform(X, copy=False)
def transform(self, raw_documents, copy=True):
"""Transform documents to document-term matrix.
Uses the vocabulary and document frequencies (df) learned by fit (or
fit_transform).
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
copy : boolean, default True
Whether to copy X and operate on the copy or perform in-place
operations.
Returns
-------
X : sparse matrix, [n_samples, n_features]
Tf-idf-weighted document-term matrix.
"""
check_is_fitted(self, '_tfidf', 'The tfidf vector is not fitted')
X = super(TfidfVectorizer, self).transform(raw_documents)
return self._tfidf.transform(X, copy=False)
| bsd-3-clause |
pavanvidem/tools-iuc | tools/table_compute/scripts/table_compute.py | 10 | 14161 | #!/usr/bin/env python3
"""
Table Compute tool - a wrapper around pandas with parameter input validation.
"""
__version__ = "0.9.2"
import csv
import math
from sys import argv
import numpy as np
import pandas as pd
from safety import Safety
if len(argv) == 2 and argv[1] == "--version":
print(__version__)
exit(-1)
# The import below should be generated in the same directory as
# the table_compute.py script.
# It is placed here so that the --version switch does not fail
import userconfig as uc # noqa: I100,I202
class Utils:
@staticmethod
def getOneValueMathOp(op_name):
"Returns a simple one value math operator such as log, sqrt, etc"
return getattr(math, op_name)
@staticmethod
def getVectorPandaOp(op_name):
"Returns a valid DataFrame vector operator"
return getattr(pd.DataFrame, op_name)
@staticmethod
def getTwoValuePandaOp(op_name, pd_obj):
"Returns a valid two value DataFrame or Series operator"
return getattr(type(pd_obj), "__" + op_name + "__")
@staticmethod
def readcsv(filedict, narm):
data = pd.read_csv(
filedict["file"],
header=filedict["header"],
index_col=filedict["row_names"],
keep_default_na=narm,
nrows=filedict["nrows"],
skipfooter=filedict["skipfooter"],
skip_blank_lines=filedict["skip_blank_lines"],
sep='\t'
)
# Fix whitespace issues in index or column names
data.columns = [col.strip() if type(col) is str else col
for col in data.columns]
data.index = [row.strip() if type(row) is str else row
for row in data.index]
return(data)
@staticmethod
def rangemaker(tab):
# e.g. "1:3,2:-2" specifies "1,2,3,2,1,0,-1,-2" to give [0,1,2,1,0,-1,-2]
# Positive indices are decremented by 1 to reference 0-base numbering
# Negative indices are unaltered, so that -1 refers to the last column
out = []
err_mess = None
for ranges in tab.split(","):
nums = ranges.split(":")
if len(nums) == 1:
numb = int(nums[0])
# Positive numbers get decremented.
# i.e. column "3" refers to index 2
# column "-1" still refers to index -1
if numb != 0:
out.append(numb if (numb < 0) else (numb - 1))
else:
err_mess = "Please do not use 0 as an index"
elif len(nums) == 2:
left, right = map(int, nums)
if 0 in (left, right):
err_mess = "Please do not use 0 as an index"
elif left < right:
if left > 0: # and right > 0 too
# 1:3 to 0,1,2
out.extend(range(left - 1, right))
elif right < 0: # and left < 0 too
# -3:-1 to -3,-2,-1
out.extend(range(left, right + 1))
elif left < 0 and right > 0:
# -2:2 to -2,-1,0,1
out.extend(range(left, 0))
out.extend(range(0, right))
elif right < left:
if right > 0: # and left > 0
# 3:1 to 2,1,0
out.extend(range(left - 1, right - 2, -1))
elif left < 0: # and right < 0
# -1:-3 to -1,-2,-3
out.extend(range(left, right - 1, -1))
elif right < 0 and left > 0:
# 2:-2 to 1,0,-1,-2
out.extend(range(left - 1, right - 1, -1))
else:
err_mess = "%s should not be equal or contain a zero" % nums
if err_mess:
print(err_mess)
return(None)
return(out)
# Set decimal precision
pd.options.display.precision = uc.Default["precision"]
user_mode = uc.Default["user_mode"]
user_mode_single = None
out_table = None
params = uc.Data["params"]
if user_mode == "single":
# Read in TSV file
data = Utils.readcsv(uc.Data["tables"][0], uc.Default["narm"])
user_mode_single = params["user_mode_single"]
if user_mode_single == "precision":
# Useful for changing decimal precision on write out
out_table = data
elif user_mode_single == "select":
cols_specified = params["select_cols_wanted"]
rows_specified = params["select_rows_wanted"]
# Select all indexes if empty array of values
if cols_specified:
cols_specified = Utils.rangemaker(cols_specified)
else:
cols_specified = range(len(data.columns))
if rows_specified:
rows_specified = Utils.rangemaker(rows_specified)
else:
rows_specified = range(len(data))
# do not use duplicate indexes
# e.g. [2,3,2,5,5,4,2] to [2,3,5,4]
nodupes_col = not params["select_cols_unique"]
nodupes_row = not params["select_rows_unique"]
if nodupes_col:
cols_specified = [x for i, x in enumerate(cols_specified)
if x not in cols_specified[:i]]
if nodupes_row:
rows_specified = [x for i, x in enumerate(rows_specified)
if x not in rows_specified[:i]]
out_table = data.iloc[rows_specified, cols_specified]
elif user_mode_single == "filtersumval":
mode = params["filtersumval_mode"]
axis = params["filtersumval_axis"]
operation = params["filtersumval_op"]
compare_operation = params["filtersumval_compare"]
value = params["filtersumval_against"]
minmatch = params["filtersumval_minmatch"]
if mode == "operation":
# Perform axis operation
summary_op = Utils.getVectorPandaOp(operation)
axis_summary = summary_op(data, axis=axis)
# Perform vector comparison
compare_op = Utils.getTwoValuePandaOp(
compare_operation, axis_summary
)
axis_bool = compare_op(axis_summary, value)
elif mode == "element":
if operation.startswith("str_"):
data = data.astype("str")
value = str(value)
# Convert str_eq to eq
operation = operation[4:]
else:
value = float(value)
op = Utils.getTwoValuePandaOp(operation, data)
bool_mat = op(data, value)
axis_bool = np.sum(bool_mat, axis=axis) >= minmatch
out_table = data.loc[:, axis_bool] if axis == 0 else data.loc[axis_bool, :]
elif user_mode_single == "matrixapply":
# 0 - column, 1 - row
axis = params["matrixapply_dimension"]
# sd, mean, max, min, sum, median, summary
operation = params["matrixapply_op"]
if operation is None:
use_custom = params["matrixapply_custom"]
if use_custom:
custom_func = params["matrixapply_custom_func"]
def fun(vec):
"""Dummy Function"""
return vec
ss = Safety(custom_func, ['vec'], 'pd.Series')
fun_string = ss.generateFunction()
exec(fun_string) # SUPER DUPER SAFE...
out_table = data.apply(fun, axis)
else:
print("No operation given")
exit(-1)
else:
op = getattr(pd.DataFrame, operation)
out_table = op(data, axis)
elif user_mode_single == "element":
# lt, gt, ge, etc.
operation = params["element_op"]
bool_mat = None
if operation is not None:
if operation == "rowcol":
# Select all indexes if empty array of values
if "element_cols" in params:
cols_specified = Utils.rangemaker(params["element_cols"])
else:
cols_specified = range(len(data.columns))
if "element_rows" in params:
rows_specified = Utils.rangemaker(params["element_rows"])
else:
rows_specified = range(len(data))
# Inclusive selection:
# - True: Giving a row or column will match all elements in that row or column
# - False: Give a row or column will match only elements in both those rows or columns
inclusive = params["element_inclusive"]
# Create a bool matrix (intialised to False) with selected
# rows and columns set to True
bool_mat = data.copy()
bool_mat[:] = False
if inclusive:
bool_mat.iloc[rows_specified, :] = True
bool_mat.iloc[:, cols_specified] = True
else:
bool_mat.iloc[rows_specified, cols_specified] = True
else:
op = Utils.getTwoValuePandaOp(operation, data)
value = params["element_value"]
try:
# Could be numeric
value = float(value)
except ValueError:
pass
# generate filter matrix of True/False values
bool_mat = op(data, value)
else:
# implement no filtering through a filter matrix filled with
# True values.
bool_mat = np.full(data.shape, True)
# Get the main processing mode
mode = params["element_mode"]
if mode == "replace":
replacement_val = params["element_replace"]
out_table = data.mask(
bool_mat,
data.where(bool_mat).applymap(
lambda x: replacement_val.format(elem=x)
)
)
elif mode == "modify":
mod_op = Utils.getOneValueMathOp(params["element_modify_op"])
out_table = data.mask(
bool_mat, data.where(bool_mat).applymap(mod_op)
)
elif mode == "scale":
scale_op = Utils.getTwoValuePandaOp(
params["element_scale_op"], data
)
scale_value = params["element_scale_value"]
out_table = data.mask(
bool_mat, scale_op(data.where(bool_mat), scale_value)
)
elif mode == "custom":
element_customop = params["element_customop"]
def fun(elem):
"""Dummy Function"""
return elem
ss = Safety(element_customop, ['elem'])
fun_string = ss.generateFunction()
exec(fun_string) # SUPER DUPER SAFE...
out_table = data.mask(
bool_mat, data.where(bool_mat).applymap(fun)
)
else:
print("No such element mode!", mode)
exit(-1)
elif user_mode_single == "fulltable":
general_mode = params["mode"]
if general_mode == "transpose":
out_table = data.T
elif general_mode == "melt":
melt_ids = params["MELT"]["melt_ids"]
melt_values = params["MELT"]["melt_values"]
out_table = pd.melt(data, id_vars=melt_ids, value_vars=melt_values)
elif general_mode == "pivot":
pivot_index = params["PIVOT"]["pivot_index"]
pivot_column = params["PIVOT"]["pivot_column"]
pivot_values = params["PIVOT"]["pivot_values"]
out_table = data.pivot(
index=pivot_index, columns=pivot_column, values=pivot_values
)
elif general_mode == "custom":
custom_func = params["fulltable_customop"]
def fun(tableau):
"""Dummy Function"""
return tableau
ss = Safety(custom_func, ['table'], 'pd.DataFrame')
fun_string = ss.generateFunction()
exec(fun_string) # SUPER DUPER SAFE...
out_table = fun(data)
else:
print("No such mode!", user_mode_single)
exit(-1)
elif user_mode == "multiple":
table_sections = uc.Data["tables"]
if not table_sections:
print("Multiple table sets not given!")
exit(-1)
reader_skip = uc.Default["reader_skip"]
# Data
table = []
# 1-based handlers for users "table1", "table2", etc.
table_names = []
# Actual 0-based references "table[0]", "table[1]", etc.
table_names_real = []
# Read and populate tables
for x, t_sect in enumerate(table_sections):
tmp = Utils.readcsv(t_sect, uc.Default["narm"])
table.append(tmp)
table_names.append("table" + str(x + 1))
table_names_real.append("table[" + str(x) + "]")
custom_op = params["fulltable_customop"]
ss = Safety(custom_op, table_names, 'pd.DataFrame')
fun_string = ss.generateFunction()
# Change the argument to table
fun_string = fun_string.replace("fun(table1):", "fun():")
# table1 to table[1]
for name, name_real in zip(table_names, table_names_real):
fun_string = fun_string.replace(name, name_real)
fun_string = fun_string.replace("fun():", "fun(table):")
exec(fun_string) # SUPER DUPER SAFE...
out_table = fun(table)
else:
print("No such mode!", user_mode)
exit(-1)
if not isinstance(out_table, (pd.DataFrame, pd.Series)):
print('The specified operation did not result in a table to return.')
raise RuntimeError(
'The operation did not generate a pd.DataFrame or pd.Series to return.'
)
out_parameters = {
"sep": "\t",
"float_format": "%%.%df" % pd.options.display.precision,
"header": uc.Default["out_headers_col"],
"index": uc.Default["out_headers_row"]
}
if user_mode_single not in ('matrixapply', None):
out_parameters["quoting"] = csv.QUOTE_NONE
out_table.to_csv(uc.Default["outtable"], **out_parameters)
| mit |
victorbergelin/scikit-learn | examples/neighbors/plot_nearest_centroid.py | 264 | 1804 | """
===============================
Nearest Centroid Classification
===============================
Sample usage of Nearest Centroid classification.
It will plot the decision boundaries for each class.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn import datasets
from sklearn.neighbors import NearestCentroid
n_neighbors = 15
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# Create color maps
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
for shrinkage in [None, 0.1]:
# we create an instance of Neighbours Classifier and fit the data.
clf = NearestCentroid(shrink_threshold=shrinkage)
clf.fit(X, y)
y_pred = clf.predict(X)
print(shrinkage, np.mean(y == y_pred))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
plt.title("3-Class classification (shrink_threshold=%r)"
% shrinkage)
plt.axis('tight')
plt.show()
| bsd-3-clause |
SeanCameronConklin/aima-python | submissions/Hess/myNN.py | 13 | 1067 | import traceback
from sklearn.neural_network import MLPClassifier
from submissions.Hess import cars
class DataFrame:
data = []
feature_names = []
target = []
target_names = []
guzzle = DataFrame()
guzzle.target = []
guzzle.data = []
guzzle = cars.get_cars()
def guzzleTarget(string):
if (info['Fuel Information']['City mph'] < 14):
return 1
return 0
for info in guzzle:
try:
guzzle.data.append(guzzleTarget(info['Fuel Information']['City mph']))
fuelCity = float(info['Fuel Information']['City mph']) # they misspelled mpg
year = float(info['Identification']['Year'])
guzzle.data.apend([fuelCity, year])
except:
traceback.print_exc()
guzzle.feature_names = [
"City mph"
"Year"
]
guzzle.target_names = [
"New Car is < 14 MPG"
"New Car is > 14 MPG"
]
mlpc = MLPClassifier(
solver='sgd',
learning_rate = 'adaptive',
)
Examples = {
'Guzzle': {
'frame': guzzle,
},
'GuzzleMLPC': {
'frame': guzzle,
'mlpc': mlpc
},
}
| mit |
petebachant/seaborn | seaborn/tests/test_utils.py | 11 | 11537 | """Tests for plotting utilities."""
import warnings
import tempfile
import shutil
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import nose
import nose.tools as nt
from nose.tools import assert_equal, raises
import numpy.testing as npt
import pandas.util.testing as pdt
from distutils.version import LooseVersion
pandas_has_categoricals = LooseVersion(pd.__version__) >= "0.15"
from pandas.util.testing import network
from ..utils import get_dataset_names, load_dataset
try:
from bs4 import BeautifulSoup
except ImportError:
BeautifulSoup = None
from .. import utils, rcmod
a_norm = np.random.randn(100)
def test_pmf_hist_basics():
"""Test the function to return barplot args for pmf hist."""
out = utils.pmf_hist(a_norm)
assert_equal(len(out), 3)
x, h, w = out
assert_equal(len(x), len(h))
# Test simple case
a = np.arange(10)
x, h, w = utils.pmf_hist(a, 10)
nose.tools.assert_true(np.all(h == h[0]))
def test_pmf_hist_widths():
"""Test histogram width is correct."""
x, h, w = utils.pmf_hist(a_norm)
assert_equal(x[1] - x[0], w)
def test_pmf_hist_normalization():
"""Test that output data behaves like a PMF."""
x, h, w = utils.pmf_hist(a_norm)
nose.tools.assert_almost_equal(sum(h), 1)
nose.tools.assert_less_equal(h.max(), 1)
def test_pmf_hist_bins():
"""Test bin specification."""
x, h, w = utils.pmf_hist(a_norm, 20)
assert_equal(len(x), 20)
def test_ci_to_errsize():
"""Test behavior of ci_to_errsize."""
cis = [[.5, .5],
[1.25, 1.5]]
heights = [1, 1.5]
actual_errsize = np.array([[.5, 1],
[.25, 0]])
test_errsize = utils.ci_to_errsize(cis, heights)
npt.assert_array_equal(actual_errsize, test_errsize)
def test_desaturate():
"""Test color desaturation."""
out1 = utils.desaturate("red", .5)
assert_equal(out1, (.75, .25, .25))
out2 = utils.desaturate("#00FF00", .5)
assert_equal(out2, (.25, .75, .25))
out3 = utils.desaturate((0, 0, 1), .5)
assert_equal(out3, (.25, .25, .75))
out4 = utils.desaturate("red", .5)
assert_equal(out4, (.75, .25, .25))
@raises(ValueError)
def test_desaturation_prop():
"""Test that pct outside of [0, 1] raises exception."""
utils.desaturate("blue", 50)
def test_saturate():
"""Test performance of saturation function."""
out = utils.saturate((.75, .25, .25))
assert_equal(out, (1, 0, 0))
def test_iqr():
"""Test the IQR function."""
a = np.arange(5)
iqr = utils.iqr(a)
assert_equal(iqr, 2)
class TestSpineUtils(object):
sides = ["left", "right", "bottom", "top"]
outer_sides = ["top", "right"]
inner_sides = ["left", "bottom"]
offset = 10
original_position = ("outward", 0)
offset_position = ("outward", offset)
def test_despine(self):
f, ax = plt.subplots()
for side in self.sides:
nt.assert_true(ax.spines[side].get_visible())
utils.despine()
for side in self.outer_sides:
nt.assert_true(~ax.spines[side].get_visible())
for side in self.inner_sides:
nt.assert_true(ax.spines[side].get_visible())
utils.despine(**dict(zip(self.sides, [True] * 4)))
for side in self.sides:
nt.assert_true(~ax.spines[side].get_visible())
plt.close("all")
def test_despine_specific_axes(self):
f, (ax1, ax2) = plt.subplots(2, 1)
utils.despine(ax=ax2)
for side in self.sides:
nt.assert_true(ax1.spines[side].get_visible())
for side in self.outer_sides:
nt.assert_true(~ax2.spines[side].get_visible())
for side in self.inner_sides:
nt.assert_true(ax2.spines[side].get_visible())
plt.close("all")
def test_despine_with_offset(self):
f, ax = plt.subplots()
for side in self.sides:
nt.assert_equal(ax.spines[side].get_position(),
self.original_position)
utils.despine(ax=ax, offset=self.offset)
for side in self.sides:
is_visible = ax.spines[side].get_visible()
new_position = ax.spines[side].get_position()
if is_visible:
nt.assert_equal(new_position, self.offset_position)
else:
nt.assert_equal(new_position, self.original_position)
plt.close("all")
def test_despine_with_offset_specific_axes(self):
f, (ax1, ax2) = plt.subplots(2, 1)
utils.despine(offset=self.offset, ax=ax2)
for side in self.sides:
nt.assert_equal(ax1.spines[side].get_position(),
self.original_position)
if ax2.spines[side].get_visible():
nt.assert_equal(ax2.spines[side].get_position(),
self.offset_position)
else:
nt.assert_equal(ax2.spines[side].get_position(),
self.original_position)
plt.close("all")
def test_despine_trim_spines(self):
f, ax = plt.subplots()
ax.plot([1, 2, 3], [1, 2, 3])
ax.set_xlim(.75, 3.25)
utils.despine(trim=True)
for side in self.inner_sides:
bounds = ax.spines[side].get_bounds()
nt.assert_equal(bounds, (1, 3))
plt.close("all")
def test_despine_trim_inverted(self):
f, ax = plt.subplots()
ax.plot([1, 2, 3], [1, 2, 3])
ax.set_ylim(.85, 3.15)
ax.invert_yaxis()
utils.despine(trim=True)
for side in self.inner_sides:
bounds = ax.spines[side].get_bounds()
nt.assert_equal(bounds, (1, 3))
plt.close("all")
def test_despine_trim_noticks(self):
f, ax = plt.subplots()
ax.plot([1, 2, 3], [1, 2, 3])
ax.set_yticks([])
utils.despine(trim=True)
nt.assert_equal(ax.get_yticks().size, 0)
def test_offset_spines_warns(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", category=UserWarning)
f, ax = plt.subplots()
utils.offset_spines(offset=self.offset)
nt.assert_true('deprecated' in str(w[0].message))
nt.assert_true(issubclass(w[0].category, UserWarning))
plt.close('all')
def test_offset_spines(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", category=UserWarning)
f, ax = plt.subplots()
for side in self.sides:
nt.assert_equal(ax.spines[side].get_position(),
self.original_position)
utils.offset_spines(offset=self.offset)
for side in self.sides:
nt.assert_equal(ax.spines[side].get_position(),
self.offset_position)
plt.close("all")
def test_offset_spines_specific_axes(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", category=UserWarning)
f, (ax1, ax2) = plt.subplots(2, 1)
utils.offset_spines(offset=self.offset, ax=ax2)
for side in self.sides:
nt.assert_equal(ax1.spines[side].get_position(),
self.original_position)
nt.assert_equal(ax2.spines[side].get_position(),
self.offset_position)
plt.close("all")
def test_ticklabels_overlap():
rcmod.set()
f, ax = plt.subplots(figsize=(2, 2))
f.tight_layout() # This gets the Agg renderer working
assert not utils.axis_ticklabels_overlap(ax.get_xticklabels())
big_strings = "abcdefgh", "ijklmnop"
ax.set_xlim(-.5, 1.5)
ax.set_xticks([0, 1])
ax.set_xticklabels(big_strings)
assert utils.axis_ticklabels_overlap(ax.get_xticklabels())
x, y = utils.axes_ticklabels_overlap(ax)
assert x
assert not y
def test_categorical_order():
x = ["a", "c", "c", "b", "a", "d"]
y = [3, 2, 5, 1, 4]
order = ["a", "b", "c", "d"]
out = utils.categorical_order(x)
nt.assert_equal(out, ["a", "c", "b", "d"])
out = utils.categorical_order(x, order)
nt.assert_equal(out, order)
out = utils.categorical_order(x, ["b", "a"])
nt.assert_equal(out, ["b", "a"])
out = utils.categorical_order(np.array(x))
nt.assert_equal(out, ["a", "c", "b", "d"])
out = utils.categorical_order(pd.Series(x))
nt.assert_equal(out, ["a", "c", "b", "d"])
out = utils.categorical_order(y)
nt.assert_equal(out, [1, 2, 3, 4, 5])
out = utils.categorical_order(np.array(y))
nt.assert_equal(out, [1, 2, 3, 4, 5])
out = utils.categorical_order(pd.Series(y))
nt.assert_equal(out, [1, 2, 3, 4, 5])
if pandas_has_categoricals:
x = pd.Categorical(x, order)
out = utils.categorical_order(x)
nt.assert_equal(out, list(x.categories))
x = pd.Series(x)
out = utils.categorical_order(x)
nt.assert_equal(out, list(x.cat.categories))
out = utils.categorical_order(x, ["b", "a"])
nt.assert_equal(out, ["b", "a"])
x = ["a", np.nan, "c", "c", "b", "a", "d"]
out = utils.categorical_order(x)
nt.assert_equal(out, ["a", "c", "b", "d"])
if LooseVersion(pd.__version__) >= "0.15":
def check_load_dataset(name):
ds = load_dataset(name, cache=False)
assert(isinstance(ds, pd.DataFrame))
def check_load_cached_dataset(name):
# Test the cacheing using a temporary file.
# With Python 3.2+, we could use the tempfile.TemporaryDirectory()
# context manager instead of this try...finally statement
tmpdir = tempfile.mkdtemp()
try:
# download and cache
ds = load_dataset(name, cache=True, data_home=tmpdir)
# use cached version
ds2 = load_dataset(name, cache=True, data_home=tmpdir)
pdt.assert_frame_equal(ds, ds2)
finally:
shutil.rmtree(tmpdir)
@network(url="https://github.com/mwaskom/seaborn-data")
def test_get_dataset_names():
if not BeautifulSoup:
raise nose.SkipTest("No BeautifulSoup available for parsing html")
names = get_dataset_names()
assert(len(names) > 0)
assert(u"titanic" in names)
@network(url="https://github.com/mwaskom/seaborn-data")
def test_load_datasets():
if not BeautifulSoup:
raise nose.SkipTest("No BeautifulSoup available for parsing html")
# Heavy test to verify that we can load all available datasets
for name in get_dataset_names():
# unfortunately @network somehow obscures this generator so it
# does not get in effect, so we need to call explicitly
# yield check_load_dataset, name
check_load_dataset(name)
@network(url="https://github.com/mwaskom/seaborn-data")
def test_load_cached_datasets():
if not BeautifulSoup:
raise nose.SkipTest("No BeautifulSoup available for parsing html")
# Heavy test to verify that we can load all available datasets
for name in get_dataset_names():
# unfortunately @network somehow obscures this generator so it
# does not get in effect, so we need to call explicitly
# yield check_load_dataset, name
check_load_cached_dataset(name)
| bsd-3-clause |
crisis-economics/housingModel | src/main/resources/calibration/code/SaleReprice.py | 2 | 7180 | # -*- coding: utf-8 -*-
"""
Defines several classes to study the reprice or price decrease behaviour of households trying to sell their houses. It
uses Zoopla data
@author: daniel, Adrian Carro
"""
import Datasets as ds
import numpy as np
from datetime import datetime
import matplotlib.pyplot as plt
import scipy.stats as stats
import math
class DiscountDistribution:
"""Class to collect and store the distribution of price discounts per month"""
# Number of months on the market to consider as sample (bins in the x axis for building the pdf)
x_size = 48 # 4 years
# For every month in the sample, countNoChange stores the number of properties not experiencing a drop in price
# between -90% and -0.2%
countNoChange = np.zeros(x_size)
# For every month in the sample, countTotal stores the number of properties in the data during that month
countTotal = np.zeros(x_size)
# For every month in the sample, changesByMonth stores a list of the logarithmic percent changes (in absolute value)
# in price of every property experiencing a drop in price between -90% and -0.2%
changesByMonth = [[] for i in range(x_size)]
# Need to implement an __init__ method
def __init__(self):
pass
# Record one listing with no change of price between start and end months
def record_no_change(self, start, end):
if end >= self.x_size:
end = self.x_size - 1
for month in range(start, end + 1):
self.countNoChange[month] += 1
self.countTotal[month] += 1
# Record one listing with a drop in price between -90% and -0.2% at month
def record_change(self, start, month, percent):
if -90 < percent < -0.2:
self.record_no_change(start, month - 1) # Record the listing as no change before month
if month < self.x_size: # Only record the change if month is within the sample
self.countTotal[month] += 1
self.changesByMonth[month].append(math.log(math.fabs(percent)))
else:
self.record_no_change(start, month)
# Probability that price will not change in a given month (given that the property is still on the market)
def probability_no_change(self):
return np.divide(self.countNoChange, self.countTotal)
# Probability that there will be no change per month, integrated over all months
def probability_no_change_all_time(self):
return self.probability_no_change().sum() / self.x_size
# Get a list of all changes, i.e., changesByMonth in a single list instead of a list of lists
def list_all_changes(self):
return [x for month in self.changesByMonth for x in month]
class PropertyRecord:
"""Class to function as record of the most recent price, initial price and days on market for a given property"""
current_price = 0
initial_market_price = 0
days_on_market = 0
last_change_date = 0
def __init__(self, initial_date, initial_price):
self.current_price = initial_price
self.initial_market_price = initial_price
self.days_on_market = 0
self.last_change_date = datetime.strptime(initial_date, "%Y-%m-%d")
def update_price(self, date_string, price):
new_date = datetime.strptime(date_string, "%Y-%m-%d")
previous_days_on_market = self.days_on_market
new_days_on_market = self.days_on_market + (new_date - self.last_change_date).days
reduction = (price - self.current_price) * 100.0 / self.current_price
# Previous equation: Discounts were computed as price difference between current and previous price over
# initial price
# reduction = (price - self.current_price) * 100.0 / self.initial_market_price
self.current_price = price
self.days_on_market = new_days_on_market
self.last_change_date = new_date
return previous_days_on_market, new_days_on_market, reduction
def plot_probability(mat):
"""Plot a matrix mat as a colour plot, used for plotting a pdf"""
plt.figure(figsize=(10, 10))
im = plt.imshow(mat, origin='low', cmap=plt.get_cmap("jet"))
plt.colorbar(im, orientation='horizontal')
plt.show()
def calculate_price_changes(filtered_zoopla_data):
"""Compute and return the discount distribution"""
distribution = DiscountDistribution()
dict_of_property_records = {}
for index, row in filtered_zoopla_data.iterrows():
# If listing is already at price_map...
if row["LISTING ID"] in dict_of_property_records:
# ...recover its PriceCalc object as last_record
last_record = dict_of_property_records[row["LISTING ID"]]
# ...store the PriceCalc object previous current_price as old_price
old_price = last_record.current_price
# ...update the PriceCalc object with the most recent information (day and price)
prev_days_on_market, new_days_on_market, reduction = last_record.update_price(row["DAY"], row["PRICE"])
# If price has not changed, then record the no change to the DiscountDistribution
if old_price == row["PRICE"]:
distribution.record_no_change(prev_days_on_market / 30, new_days_on_market / 30)
# Otherwise, record the change to the DiscountDistribution
else:
distribution.record_change(prev_days_on_market / 30, new_days_on_market / 30, reduction)
# Otherwise, add PriceCalc object of the listing to price_map
else:
dict_of_property_records[row["LISTING ID"]] = PropertyRecord(row["DAY"], row["PRICE"])
return distribution
# Read and filter data from Zoopla
data = ds.ZooplaMatchedDaily()
chunk = data.read(200000)
filtered_chunk = chunk[(chunk["MARKET"] == "SALE") & (chunk["PRICE"] > 0)][["LISTING ID", "DAY", "PRICE"]]
# Compute probability distribution of price discounts
dist = calculate_price_changes(filtered_chunk)
# Plot probability of no change per month on market
print "Average probability of no change per month"
print dist.probability_no_change().sum() / dist.probability_no_change().size
print "Probability of no change per month"
print dist.probability_no_change()
plt.figure()
plt.plot(dist.probability_no_change())
plt.xlabel("Months on market")
plt.ylabel("Probability of no price change")
# Plot average price discount per month on market
mean, sd = stats.norm.fit(dist.list_all_changes())
monthlyMeans = [stats.norm.fit(dist.changesByMonth[i])[0] for i in range(dist.x_size)]
print "Best mean and standard deviation of percentage change per month given change"
print mean, sd
print "Monthly Means"
print monthlyMeans
plt.figure()
plt.plot(monthlyMeans)
plt.xlabel("Months on market")
plt.ylabel("Percent discount")
# Plot probability distribution of price discounts (independent of month on market)
curve = [stats.norm.pdf(i * 0.05, mean, sd) for i in range(-35, 100)]
plt.figure()
plt.hist(dist.list_all_changes(), bins=50, density=True, label="Data")
plt.plot([i * 0.05 for i in range(-35, 100)], curve, label="Normal fit")
plt.xlabel("Percent discount")
plt.ylabel("Probability")
plt.legend()
plt.show()
| mit |
miguel-branco/pyrawcore | pyrawcore/csv/csv.py | 1 | 8850 | # -*- coding: utf-8 -*-
#
# The MIT License (MIT)
#
# Copyright (c) 2014
# Data Intensive Applications and Systems laboratory (DIAS)
# École Polytechnique Fédérale de Lausanne
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import collections
import os
import pandas
from ..core import get_option, Table
def get_chunk_schema(chunk):
schema = collections.OrderedDict()
for name in chunk.columns:
schema[str(name)] = chunk[name].dtype
return schema
base_path = get_option('files', 'base_path')
class Csv(Table):
CHUNK_SIZE = 10000
class Column(object):
def __init__(self, parent, column):
self.parent = parent
self.column = column
def __iter__(self):
schema = None
for chunk in pandas.read_csv(self.parent._get_path(), chunksize=self.parent.CHUNK_SIZE, usecols=[self.column], **self.parent.args):
chunk_schema = get_chunk_schema(chunk)
if not schema:
schema = chunk_schema
#elif schema != chunk_schema:
# raise RuntimeError('incompatible chunk schema')
for row in chunk.values:
yield row[0]
def __get_key(self, key):
if key < 0:
raise NotImplementedError('index backward not support')
schema = None
for chunk in pandas.read_csv(self.parent._get_path(), chunksize=self.parent.CHUNK_SIZE, usecols=[self.column], **self.parent.args):
chunk_schema = get_chunk_schema(chunk)
if not schema:
schema = chunk_schema
#elif schema != chunk_schema:
# raise RuntimeError('incompatible chunk schema')
if key < self.parent.CHUNK_SIZE:
try:
return chunk.values[key][0]
except IndexError:
# Replace Pandas error message since it is confusing
raise IndexError('index out of range')
else:
key -= self.parent.CHUNK_SIZE
raise IndexError('index out of range')
def __get_slice(self, slice):
if slice.step:
raise NotImplementedError('slice step not supported')
start, stop = slice.start, slice.stop
if not start:
start = 0
if stop is not None and stop < start:
raise NotImplementedError('slice backward not supported')
schema = None
for chunk in pandas.read_csv(self.parent._get_path(), chunksize=self.parent.CHUNK_SIZE, usecols=[self.column], **self.parent.args):
chunk_schema = get_chunk_schema(chunk)
if not schema:
schema = chunk_schema
#elif schema != chunk_schema:
# raise RuntimeError('incompatible chunk schema')
if start < self.parent.CHUNK_SIZE and stop is not None and stop < self.parent.CHUNK_SIZE:
for row in chunk.values[start:stop]:
yield row[0]
return
elif start < self.parent.CHUNK_SIZE:
for row in chunk.values[start:]:
yield row[0]
start = 0
if stop is not None:
stop -= self.parent.CHUNK_SIZE
else:
start -= self.parent.CHUNK_SIZE
if stop is not None:
stop -= self.parent.CHUNK_SIZE
def __getitem__(self, key):
if isinstance(key, (int, long)):
return self.__get_key(key)
elif isinstance(key, slice):
return self.__get_slice(key)
raise ValueError('key is not an int, long or slice')
def __init__(self, path, args, columns_added=[], columns_hidden=[]):
super(Csv, self).__init__(columns_added=columns_added, columns_hidden=columns_hidden)
# TODO: Validate path, args, ...
self.path = path
self.args = args
def _get_path(self):
if base_path:
return os.path.join(base_path, self.path)
return self.path
@staticmethod
def from_json(payload):
return Csv(
payload['path'],
args=payload['args'],
columns_added=Table._decode_columns_added(payload),
columns_hidden=Table._decode_columns_hidden(payload))
def to_json(self):
return dict(
name='csv',
payload=dict(
path=self.path,
args=self.args,
columns_added=self._encode_columns_added(),
columns_hidden=self._encode_columns_hidden()))
def _get_iterator(self):
schema = None
for chunk in pandas.read_csv(self._get_path(), chunksize=self.CHUNK_SIZE, **self.args):
chunk_schema = get_chunk_schema(chunk)
if not schema:
schema = chunk_schema
#elif schema != chunk_schema:
# raise RuntimeError('incompatible chunk schema')
for row in chunk.values:
yield self._new_tuple(schema, row)
def _get_keys(self):
try:
chunk = next(iter(pandas.read_csv(self._get_path(), chunksize=self.CHUNK_SIZE, **self.args)))
except StopIteration:
return []
else:
return [name for name in chunk.columns]
def _get_key(self, key):
if key < 0:
raise NotImplementedError('index backward not support')
schema = None
for chunk in pandas.read_csv(self._get_path(), chunksize=self.CHUNK_SIZE, **self.args):
chunk_schema = get_chunk_schema(chunk)
if not schema:
schema = chunk_schema
#elif schema != chunk_schema:
# raise RuntimeError('incompatible chunk schema')
if key < self.CHUNK_SIZE:
try:
return self._new_tuple(schema, chunk.values[key])
except IndexError:
# Replace Pandas error message since it is confusing
raise IndexError('index out of range')
else:
key -= self.CHUNK_SIZE
raise IndexError('index out of range')
def _get_slice(self, slice):
if slice.step:
raise NotImplementedError('slice step not supported')
start, stop = slice.start, slice.stop
if not start:
start = 0
if stop is not None and stop < start:
raise NotImplementedError('slice backward not supported')
schema = None
for chunk in pandas.read_csv(self._get_path(), chunksize=self.CHUNK_SIZE, **self.args):
chunk_schema = get_chunk_schema(chunk)
if not schema:
schema = chunk_schema
#elif schema != chunk_schema:
# raise RuntimeError('incompatible chunk schema')
if start < self.CHUNK_SIZE and stop is not None and stop < self.CHUNK_SIZE:
for row in chunk.values[start:stop]:
yield self._new_tuple(schema, row)
return
elif start < self.CHUNK_SIZE:
for row in chunk.values[start:]:
yield self._new_tuple(schema, row)
start = 0
if stop is not None:
stop -= self.CHUNK_SIZE
else:
start -= self.CHUNK_SIZE
if stop is not None:
stop -= self.CHUNK_SIZE
def _get_column(self, name):
return Csv.Column(self, name)
| mit |
brchiu/tensorflow | tensorflow/contrib/learn/python/learn/estimators/linear_test.py | 1 | 77825 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for estimators.linear."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import json
import tempfile
import numpy as np
from tensorflow.contrib.layers.python.layers import feature_column as feature_column_lib
from tensorflow.contrib.learn.python.learn import experiment
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import estimator_test_utils
from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
from tensorflow.contrib.learn.python.learn.estimators import linear
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.estimators import test_data
from tensorflow.contrib.learn.python.learn.metric_spec import MetricSpec
from tensorflow.contrib.linear_optimizer.python import sdca_optimizer as sdca_optimizer_lib
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.python.feature_column import feature_column_lib as fc_core
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.platform import test
from tensorflow.python.training import ftrl
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import server_lib
def _prepare_iris_data_for_logistic_regression():
# Converts iris data to a logistic regression problem.
iris = base.load_iris()
ids = np.where((iris.target == 0) | (iris.target == 1))
iris = base.Dataset(data=iris.data[ids], target=iris.target[ids])
return iris
class LinearClassifierTest(test.TestCase):
def testExperimentIntegration(self):
cont_features = [
feature_column_lib.real_valued_column(
'feature', dimension=4)
]
exp = experiment.Experiment(
estimator=linear.LinearClassifier(
n_classes=3, feature_columns=cont_features),
train_input_fn=test_data.iris_input_multiclass_fn,
eval_input_fn=test_data.iris_input_multiclass_fn)
exp.test()
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(self,
linear.LinearClassifier)
def testTrain(self):
"""Tests that loss goes down with training."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
classifier = linear.LinearClassifier(feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100)
loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
classifier.fit(input_fn=input_fn, steps=200)
loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
self.assertLess(loss2, 0.01)
def testJointTrain(self):
"""Tests that loss goes down with training with joint weights."""
def input_fn():
return {
'age':
sparse_tensor.SparseTensor(
values=['1'], indices=[[0, 0]], dense_shape=[1, 1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.sparse_column_with_hash_bucket('age', 2)
classifier = linear.LinearClassifier(
_joint_weight=True, feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100)
loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
classifier.fit(input_fn=input_fn, steps=200)
loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
self.assertLess(loss2, 0.01)
def testMultiClass_MatrixData(self):
"""Tests multi-class classification using matrix data as input."""
feature_column = feature_column_lib.real_valued_column(
'feature', dimension=4)
classifier = linear.LinearClassifier(
n_classes=3, feature_columns=[feature_column])
classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_multiclass_fn, steps=100)
self.assertGreater(scores['accuracy'], 0.9)
def testMultiClass_MatrixData_Labels1D(self):
"""Same as the last test, but labels shape is [150] instead of [150, 1]."""
def _input_fn():
iris = base.load_iris()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[150], dtype=dtypes.int32)
feature_column = feature_column_lib.real_valued_column(
'feature', dimension=4)
classifier = linear.LinearClassifier(
n_classes=3, feature_columns=[feature_column])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testMultiClass_NpMatrixData(self):
"""Tests multi-class classification using numpy matrix data as input."""
iris = base.load_iris()
train_x = iris.data
train_y = iris.target
feature_column = feature_column_lib.real_valued_column('', dimension=4)
classifier = linear.LinearClassifier(
n_classes=3, feature_columns=[feature_column])
classifier.fit(x=train_x, y=train_y, steps=100)
scores = classifier.evaluate(x=train_x, y=train_y, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testMultiClassLabelKeys(self):
"""Tests n_classes > 2 with label_keys vocabulary for labels."""
# Byte literals needed for python3 test to pass.
label_keys = [b'label0', b'label1', b'label2']
def _input_fn(num_epochs=None):
features = {
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant(
[[label_keys[1]], [label_keys[0]], [label_keys[0]]],
dtype=dtypes.string)
return features, labels
language_column = feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
classifier = linear.LinearClassifier(
n_classes=3,
feature_columns=[language_column],
label_keys=label_keys)
classifier.fit(input_fn=_input_fn, steps=50)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
self.assertIn('loss', scores)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_classes = list(
classifier.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
self.assertEqual(3, len(predicted_classes))
for pred in predicted_classes:
self.assertIn(pred, label_keys)
predictions = list(
classifier.predict(input_fn=predict_input_fn, as_iterable=True))
self.assertAllEqual(predicted_classes, predictions)
def testLogisticRegression_MatrixData(self):
"""Tests binary classification using matrix data as input."""
def _input_fn():
iris = _prepare_iris_data_for_logistic_regression()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[100, 1], dtype=dtypes.int32)
feature_column = feature_column_lib.real_valued_column(
'feature', dimension=4)
classifier = linear.LinearClassifier(feature_columns=[feature_column])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testEstimatorWithCoreFeatureColumns(self):
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [0.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
language_column = fc_core.categorical_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [language_column, fc_core.numeric_column('age')]
classifier = linear.LinearClassifier(feature_columns=feature_columns)
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testLogisticRegression_MatrixData_Labels1D(self):
"""Same as the last test, but labels shape is [100] instead of [100, 1]."""
def _input_fn():
iris = _prepare_iris_data_for_logistic_regression()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[100], dtype=dtypes.int32)
feature_column = feature_column_lib.real_valued_column(
'feature', dimension=4)
classifier = linear.LinearClassifier(feature_columns=[feature_column])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testLogisticRegression_NpMatrixData(self):
"""Tests binary classification using numpy matrix data as input."""
iris = _prepare_iris_data_for_logistic_regression()
train_x = iris.data
train_y = iris.target
feature_columns = [feature_column_lib.real_valued_column('', dimension=4)]
classifier = linear.LinearClassifier(feature_columns=feature_columns)
classifier.fit(x=train_x, y=train_y, steps=100)
scores = classifier.evaluate(x=train_x, y=train_y, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testWeightAndBiasNames(self):
"""Tests that weight and bias names haven't changed."""
feature_column = feature_column_lib.real_valued_column(
'feature', dimension=4)
classifier = linear.LinearClassifier(
n_classes=3, feature_columns=[feature_column])
classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)
variable_names = classifier.get_variable_names()
self.assertIn('linear/feature/weight', variable_names)
self.assertIn('linear/bias_weight', variable_names)
self.assertEqual(
4, len(classifier.get_variable_value('linear/feature/weight')))
self.assertEqual(
3, len(classifier.get_variable_value('linear/bias_weight')))
def testCustomOptimizerByObject(self):
"""Tests multi-class classification using matrix data as input."""
feature_column = feature_column_lib.real_valued_column(
'feature', dimension=4)
classifier = linear.LinearClassifier(
n_classes=3,
optimizer=ftrl.FtrlOptimizer(learning_rate=0.1),
feature_columns=[feature_column])
classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_multiclass_fn, steps=100)
self.assertGreater(scores['accuracy'], 0.9)
def testCustomOptimizerByString(self):
"""Tests multi-class classification using matrix data as input."""
feature_column = feature_column_lib.real_valued_column(
'feature', dimension=4)
def _optimizer():
return ftrl.FtrlOptimizer(learning_rate=0.1)
classifier = linear.LinearClassifier(
n_classes=3, optimizer=_optimizer, feature_columns=[feature_column])
classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_multiclass_fn, steps=100)
self.assertGreater(scores['accuracy'], 0.9)
def testCustomOptimizerByFunction(self):
"""Tests multi-class classification using matrix data as input."""
feature_column = feature_column_lib.real_valued_column(
'feature', dimension=4)
classifier = linear.LinearClassifier(
n_classes=3, optimizer='Ftrl', feature_columns=[feature_column])
classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_multiclass_fn, steps=100)
self.assertGreater(scores['accuracy'], 0.9)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1], [0], [0], [0]], dtype=dtypes.float32)
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs)
}
return features, labels
def _my_metric_op(predictions, labels):
# For the case of binary classification, the 2nd column of "predictions"
# denotes the model predictions.
predictions = array_ops.strided_slice(
predictions, [0, 1], [-1, 2], end_mask=1)
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
classifier = linear.LinearClassifier(
feature_columns=[feature_column_lib.real_valued_column('x')])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
'my_accuracy':
MetricSpec(
metric_fn=metric_ops.streaming_accuracy,
prediction_key='classes'),
'my_precision':
MetricSpec(
metric_fn=metric_ops.streaming_precision,
prediction_key='classes'),
'my_metric':
MetricSpec(
metric_fn=_my_metric_op, prediction_key='probabilities')
})
self.assertTrue(
set(['loss', 'my_accuracy', 'my_precision', 'my_metric']).issubset(
set(scores.keys())))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(classifier.predict_classes(
input_fn=predict_input_fn)))
self.assertEqual(
_sklearn.accuracy_score([1, 0, 0, 0], predictions),
scores['my_accuracy'])
# Tests the case where the prediction_key is neither "classes" nor
# "probabilities".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
'bad_name':
MetricSpec(
metric_fn=metric_ops.streaming_auc,
prediction_key='bad_type')
})
# Tests the case where the 2nd element of the key is neither "classes" nor
# "probabilities".
with self.assertRaises(KeyError):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={('bad_name', 'bad_type'): metric_ops.streaming_auc})
# Tests the case where the tuple of the key doesn't have 2 elements.
with self.assertRaises(ValueError):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
('bad_length_name', 'classes', 'bad_length'):
metric_ops.streaming_accuracy
})
def testLogisticFractionalLabels(self):
"""Tests logistic training with fractional labels."""
def input_fn(num_epochs=None):
return {
'age':
input_lib.limit_epochs(
constant_op.constant([[1], [2]]), num_epochs=num_epochs),
}, constant_op.constant(
[[.7], [0]], dtype=dtypes.float32)
age = feature_column_lib.real_valued_column('age')
classifier = linear.LinearClassifier(
feature_columns=[age], config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=input_fn, steps=500)
predict_input_fn = functools.partial(input_fn, num_epochs=1)
predictions_proba = list(
classifier.predict_proba(input_fn=predict_input_fn))
# Prediction probabilities mirror the labels column, which proves that the
# classifier learns from float input.
self.assertAllClose([[.3, .7], [1., 0.]], predictions_proba, atol=.1)
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn():
features = {
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant([[1], [0], [0]])
return features, labels
sparse_features = [
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=2e7)
]
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig()
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config._cluster_spec = server_lib.ClusterSpec({})
classifier = linear.LinearClassifier(
feature_columns=sparse_features, config=config)
classifier.fit(input_fn=_input_fn, steps=200)
loss = classifier.evaluate(input_fn=_input_fn, steps=1)['loss']
self.assertLess(loss, 0.07)
def testTrainSaveLoad(self):
"""Tests that insures you can save and reload a trained model."""
def input_fn(num_epochs=None):
return {
'age':
input_lib.limit_epochs(
constant_op.constant([1]), num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1]),
}, constant_op.constant([[1]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
model_dir = tempfile.mkdtemp()
classifier = linear.LinearClassifier(
model_dir=model_dir, feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=30)
predict_input_fn = functools.partial(input_fn, num_epochs=1)
out1_class = list(
classifier.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
out1_proba = list(
classifier.predict_proba(
input_fn=predict_input_fn, as_iterable=True))
del classifier
classifier2 = linear.LinearClassifier(
model_dir=model_dir, feature_columns=[age, language])
out2_class = list(
classifier2.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
out2_proba = list(
classifier2.predict_proba(
input_fn=predict_input_fn, as_iterable=True))
self.assertTrue(np.array_equal(out1_class, out2_class))
self.assertTrue(np.array_equal(out1_proba, out2_proba))
def testWeightColumn(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = constant_op.constant([[1], [1], [1], [1]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
classifier = linear.LinearClassifier(
weight_column_name='w',
feature_columns=[feature_column_lib.real_valued_column('x')],
config=run_config.RunConfig(tf_random_seed=3))
classifier.fit(input_fn=_input_fn_train, steps=100)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
# All examples in eval data set are y=x.
self.assertGreater(scores['labels/actual_label_mean'], 0.9)
# If there were no weight column, model would learn y=Not(x). Because of
# weights, it learns y=x.
self.assertGreater(scores['labels/prediction_mean'], 0.9)
# All examples in eval data set are y=x. So if weight column were ignored,
# then accuracy would be zero. Because of weights, accuracy should be close
# to 1.0.
self.assertGreater(scores['accuracy'], 0.9)
scores_train_set = classifier.evaluate(input_fn=_input_fn_train, steps=1)
# Considering weights, the mean label should be close to 1.0.
# If weights were ignored, it would be 0.25.
self.assertGreater(scores_train_set['labels/actual_label_mean'], 0.9)
# The classifier has learned y=x. If weight column were ignored in
# evaluation, then accuracy for the train set would be 0.25.
# Because weight is not ignored, accuracy is greater than 0.6.
self.assertGreater(scores_train_set['accuracy'], 0.6)
def testWeightColumnLoss(self):
"""Test ensures that you can specify per-example weights for loss."""
def _input_fn():
features = {
'age': constant_op.constant([[20], [20], [20]]),
'weights': constant_op.constant([[100], [1], [1]]),
}
labels = constant_op.constant([[1], [0], [0]])
return features, labels
age = feature_column_lib.real_valued_column('age')
classifier = linear.LinearClassifier(feature_columns=[age])
classifier.fit(input_fn=_input_fn, steps=100)
loss_unweighted = classifier.evaluate(input_fn=_input_fn, steps=1)['loss']
classifier = linear.LinearClassifier(
feature_columns=[age], weight_column_name='weights')
classifier.fit(input_fn=_input_fn, steps=100)
loss_weighted = classifier.evaluate(input_fn=_input_fn, steps=1)['loss']
self.assertLess(loss_weighted, loss_unweighted)
def testExport(self):
"""Tests that export model for servo works."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
classifier = linear.LinearClassifier(feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100)
export_dir = tempfile.mkdtemp()
classifier.export(export_dir)
def testDisableCenteredBias(self):
"""Tests that we can disable centered bias."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
classifier = linear.LinearClassifier(
feature_columns=[age, language], enable_centered_bias=False)
classifier.fit(input_fn=input_fn, steps=100)
self.assertNotIn('centered_bias_weight', classifier.get_variable_names())
def testEnableCenteredBias(self):
"""Tests that we can enable centered bias."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
classifier = linear.LinearClassifier(
feature_columns=[age, language], enable_centered_bias=True)
classifier.fit(input_fn=input_fn, steps=100)
self.assertIn('linear/binary_logistic_head/centered_bias_weight',
classifier.get_variable_names())
def testTrainOptimizerWithL1Reg(self):
"""Tests l1 regularized model has higher loss."""
def input_fn():
return {
'language':
sparse_tensor.SparseTensor(
values=['hindi'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
classifier_no_reg = linear.LinearClassifier(feature_columns=[language])
classifier_with_reg = linear.LinearClassifier(
feature_columns=[language],
optimizer=ftrl.FtrlOptimizer(
learning_rate=1.0, l1_regularization_strength=100.))
loss_no_reg = classifier_no_reg.fit(input_fn=input_fn, steps=100).evaluate(
input_fn=input_fn, steps=1)['loss']
loss_with_reg = classifier_with_reg.fit(input_fn=input_fn,
steps=100).evaluate(
input_fn=input_fn,
steps=1)['loss']
self.assertLess(loss_no_reg, loss_with_reg)
def testTrainWithMissingFeature(self):
"""Tests that training works with missing features."""
def input_fn():
return {
'language':
sparse_tensor.SparseTensor(
values=['Swahili', 'turkish'],
indices=[[0, 0], [2, 0]],
dense_shape=[3, 1])
}, constant_op.constant([[1], [1], [1]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
classifier = linear.LinearClassifier(feature_columns=[language])
classifier.fit(input_fn=input_fn, steps=100)
loss = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.07)
def testSdcaOptimizerRealValuedFeatures(self):
"""Tests LinearClassifier with SDCAOptimizer and real valued features."""
def input_fn():
return {
'example_id': constant_op.constant(['1', '2']),
'maintenance_cost': constant_op.constant([[500.0], [200.0]]),
'sq_footage': constant_op.constant([[800.0], [600.0]]),
'weights': constant_op.constant([[1.0], [1.0]])
}, constant_op.constant([[0], [1]])
maintenance_cost = feature_column_lib.real_valued_column('maintenance_cost')
sq_footage = feature_column_lib.real_valued_column('sq_footage')
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
classifier = linear.LinearClassifier(
feature_columns=[maintenance_cost, sq_footage],
weight_column_name='weights',
optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=100)
loss = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.05)
def testSdcaOptimizerRealValuedFeatureWithHigherDimension(self):
"""Tests SDCAOptimizer with real valued features of higher dimension."""
# input_fn is identical to the one in testSdcaOptimizerRealValuedFeatures
# where 2 1-dimensional dense features have been replaced by 1 2-dimensional
# feature.
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2']),
'dense_feature':
constant_op.constant([[500.0, 800.0], [200.0, 600.0]])
}, constant_op.constant([[0], [1]])
dense_feature = feature_column_lib.real_valued_column(
'dense_feature', dimension=2)
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
classifier = linear.LinearClassifier(
feature_columns=[dense_feature], optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=100)
loss = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.05)
def testSdcaOptimizerBucketizedFeatures(self):
"""Tests LinearClassifier with SDCAOptimizer and bucketized features."""
def input_fn():
return {
'example_id': constant_op.constant(['1', '2', '3']),
'price': constant_op.constant([[600.0], [1000.0], [400.0]]),
'sq_footage': constant_op.constant([[1000.0], [600.0], [700.0]]),
'weights': constant_op.constant([[1.0], [1.0], [1.0]])
}, constant_op.constant([[1], [0], [1]])
price_bucket = feature_column_lib.bucketized_column(
feature_column_lib.real_valued_column('price'),
boundaries=[500.0, 700.0])
sq_footage_bucket = feature_column_lib.bucketized_column(
feature_column_lib.real_valued_column('sq_footage'), boundaries=[650.0])
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id', symmetric_l2_regularization=1.0)
classifier = linear.LinearClassifier(
feature_columns=[price_bucket, sq_footage_bucket],
weight_column_name='weights',
optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=50)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testSdcaOptimizerSparseFeatures(self):
"""Tests LinearClassifier with SDCAOptimizer and sparse features."""
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2', '3']),
'price':
constant_op.constant([0.4, 0.6, 0.3]),
'country':
sparse_tensor.SparseTensor(
values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 3], [2, 1]],
dense_shape=[3, 5]),
'weights':
constant_op.constant([[1.0], [1.0], [1.0]])
}, constant_op.constant([[1], [0], [1]])
price = feature_column_lib.real_valued_column('price')
country = feature_column_lib.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
classifier = linear.LinearClassifier(
feature_columns=[price, country],
weight_column_name='weights',
optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=50)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testSdcaOptimizerWeightedSparseFeatures(self):
"""LinearClassifier with SDCAOptimizer and weighted sparse features."""
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2', '3']),
'price':
sparse_tensor.SparseTensor(
values=[2., 3., 1.],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 5]),
'country':
sparse_tensor.SparseTensor(
values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 5])
}, constant_op.constant([[1], [0], [1]])
country = feature_column_lib.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
country_weighted_by_price = feature_column_lib.weighted_sparse_column(
country, 'price')
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
classifier = linear.LinearClassifier(
feature_columns=[country_weighted_by_price], optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=50)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testSdcaOptimizerWeightedSparseFeaturesOOVWithNoOOVBuckets(self):
"""LinearClassifier with SDCAOptimizer with OOV features (-1 IDs)."""
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2', '3']),
'price':
sparse_tensor.SparseTensor(
values=[2., 3., 1.],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 5]),
'country':
sparse_tensor.SparseTensor(
# 'GB' is out of the vocabulary.
values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 5])
}, constant_op.constant([[1], [0], [1]])
country = feature_column_lib.sparse_column_with_keys(
'country', keys=['US', 'CA', 'MK', 'IT', 'CN'])
country_weighted_by_price = feature_column_lib.weighted_sparse_column(
country, 'price')
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
classifier = linear.LinearClassifier(
feature_columns=[country_weighted_by_price], optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=50)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testSdcaOptimizerCrossedFeatures(self):
"""Tests LinearClassifier with SDCAOptimizer and crossed features."""
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2', '3']),
'language':
sparse_tensor.SparseTensor(
values=['english', 'italian', 'spanish'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
'country':
sparse_tensor.SparseTensor(
values=['US', 'IT', 'MX'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1])
}, constant_op.constant([[0], [0], [1]])
language = feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=5)
country = feature_column_lib.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
country_language = feature_column_lib.crossed_column(
[language, country], hash_bucket_size=10)
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
classifier = linear.LinearClassifier(
feature_columns=[country_language], optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=10)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testSdcaOptimizerMixedFeatures(self):
"""Tests LinearClassifier with SDCAOptimizer and a mix of features."""
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2', '3']),
'price':
constant_op.constant([[0.6], [0.8], [0.3]]),
'sq_footage':
constant_op.constant([[900.0], [700.0], [600.0]]),
'country':
sparse_tensor.SparseTensor(
values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 3], [2, 1]],
dense_shape=[3, 5]),
'weights':
constant_op.constant([[3.0], [1.0], [1.0]])
}, constant_op.constant([[1], [0], [1]])
price = feature_column_lib.real_valued_column('price')
sq_footage_bucket = feature_column_lib.bucketized_column(
feature_column_lib.real_valued_column('sq_footage'),
boundaries=[650.0, 800.0])
country = feature_column_lib.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
sq_footage_country = feature_column_lib.crossed_column(
[sq_footage_bucket, country], hash_bucket_size=10)
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
classifier = linear.LinearClassifier(
feature_columns=[price, sq_footage_bucket, country, sq_footage_country],
weight_column_name='weights',
optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=50)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testSdcaOptimizerPartitionedVariables(self):
"""Tests LinearClassifier with SDCAOptimizer with partitioned variables."""
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2', '3']),
'price':
constant_op.constant([[0.6], [0.8], [0.3]]),
'sq_footage':
constant_op.constant([[900.0], [700.0], [600.0]]),
'country':
sparse_tensor.SparseTensor(
values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 3], [2, 1]],
dense_shape=[3, 5]),
'weights':
constant_op.constant([[3.0], [1.0], [1.0]])
}, constant_op.constant([[1], [0], [1]])
price = feature_column_lib.real_valued_column('price')
sq_footage_bucket = feature_column_lib.bucketized_column(
feature_column_lib.real_valued_column('sq_footage'),
boundaries=[650.0, 800.0])
country = feature_column_lib.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
sq_footage_country = feature_column_lib.crossed_column(
[sq_footage_bucket, country], hash_bucket_size=10)
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id',
partitioner=partitioned_variables.fixed_size_partitioner(
num_shards=2, axis=0))
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig()
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config._cluster_spec = server_lib.ClusterSpec({})
classifier = linear.LinearClassifier(
feature_columns=[price, sq_footage_bucket, country, sq_footage_country],
weight_column_name='weights',
optimizer=sdca_optimizer,
config=config)
classifier.fit(input_fn=input_fn, steps=50)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
print('all scores = {}'.format(scores))
self.assertGreater(scores['accuracy'], 0.9)
def testEval(self):
"""Tests that eval produces correct metrics.
"""
def input_fn():
return {
'age':
constant_op.constant([[1], [2]]),
'language':
sparse_tensor.SparseTensor(
values=['greek', 'chinese'],
indices=[[0, 0], [1, 0]],
dense_shape=[2, 1]),
}, constant_op.constant([[1], [0]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
classifier = linear.LinearClassifier(feature_columns=[age, language])
# Evaluate on trained model
classifier.fit(input_fn=input_fn, steps=100)
classifier.evaluate(input_fn=input_fn, steps=1)
# TODO(ispir): Enable accuracy check after resolving the randomness issue.
# self.assertLess(evaluated_values['loss/mean'], 0.3)
# self.assertGreater(evaluated_values['accuracy/mean'], .95)
class LinearRegressorTest(test.TestCase):
def testExperimentIntegration(self):
cont_features = [
feature_column_lib.real_valued_column(
'feature', dimension=4)
]
exp = experiment.Experiment(
estimator=linear.LinearRegressor(feature_columns=cont_features),
train_input_fn=test_data.iris_input_logistic_fn,
eval_input_fn=test_data.iris_input_logistic_fn)
exp.test()
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(self, linear.LinearRegressor)
def testRegression(self):
"""Tests that loss goes down with training."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[10.]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
classifier = linear.LinearRegressor(feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100)
loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
classifier.fit(input_fn=input_fn, steps=200)
loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
self.assertLess(loss2, 0.5)
def testRegression_MatrixData(self):
"""Tests regression using matrix data as input."""
cont_features = [
feature_column_lib.real_valued_column(
'feature', dimension=4)
]
regressor = linear.LinearRegressor(
feature_columns=cont_features,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)
scores = regressor.evaluate(
input_fn=test_data.iris_input_multiclass_fn, steps=1)
self.assertLess(scores['loss'], 0.2)
def testRegression_TensorData(self):
"""Tests regression using tensor data as input."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(
[1.0, 0., 0.2], dtype=dtypes.float32)
feature_columns = [
feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20),
feature_column_lib.real_valued_column('age')
]
regressor = linear.LinearRegressor(
feature_columns=feature_columns,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.2)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
regressor = linear.LinearRegressor(
feature_columns=[feature_column_lib.real_valued_column('x')],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_train, steps=1)
# Average square loss = (0.75^2 + 3*0.25^2) / 4 = 0.1875
self.assertAlmostEqual(0.1875, scores['loss'], delta=0.1)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
def _input_fn_eval():
# 4 rows, with different weights.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[7.], [1.], [1.], [1.]])
}
return features, labels
regressor = linear.LinearRegressor(
weight_column_name='w',
feature_columns=[feature_column_lib.real_valued_column('x')],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
# Weighted average square loss = (7*0.75^2 + 3*0.25^2) / 10 = 0.4125
self.assertAlmostEqual(0.4125, scores['loss'], delta=0.1)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = constant_op.constant([[1.], [1.], [1.], [1.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
regressor = linear.LinearRegressor(
weight_column_name='w',
feature_columns=[feature_column_lib.real_valued_column('x')],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
# The model should learn (y = x) because of the weights, so the loss should
# be close to zero.
self.assertLess(scores['loss'], 0.1)
def testPredict_AsIterableFalse(self):
"""Tests predict method with as_iterable=False."""
labels = [1.0, 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(labels, dtype=dtypes.float32)
feature_columns = [
feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20),
feature_column_lib.real_valued_column('age')
]
regressor = linear.LinearRegressor(
feature_columns=feature_columns,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.1)
predicted_scores = regressor.predict_scores(
input_fn=_input_fn, as_iterable=False)
self.assertAllClose(labels, predicted_scores, atol=0.1)
predictions = regressor.predict(input_fn=_input_fn, as_iterable=False)
self.assertAllClose(predicted_scores, predictions)
def testPredict_AsIterable(self):
"""Tests predict method with as_iterable=True."""
labels = [1.0, 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(labels, dtype=dtypes.float32)
feature_columns = [
feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20),
feature_column_lib.real_valued_column('age')
]
regressor = linear.LinearRegressor(
feature_columns=feature_columns,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.1)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_scores = list(
regressor.predict_scores(
input_fn=predict_input_fn, as_iterable=True))
self.assertAllClose(labels, predicted_scores, atol=0.1)
predictions = list(
regressor.predict(
input_fn=predict_input_fn, as_iterable=True))
self.assertAllClose(predicted_scores, predictions)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs)
}
return features, labels
def _my_metric_op(predictions, labels):
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
regressor = linear.LinearRegressor(
feature_columns=[feature_column_lib.real_valued_column('x')],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'my_error':
MetricSpec(
metric_fn=metric_ops.streaming_mean_squared_error,
prediction_key='scores'),
'my_metric':
MetricSpec(
metric_fn=_my_metric_op, prediction_key='scores')
})
self.assertIn('loss', set(scores.keys()))
self.assertIn('my_error', set(scores.keys()))
self.assertIn('my_metric', set(scores.keys()))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(
regressor.predict_scores(input_fn=predict_input_fn)))
self.assertAlmostEqual(
_sklearn.mean_squared_error(np.array([1, 0, 0, 0]), predictions),
scores['my_error'])
# Tests the case where the prediction_key is not "scores".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'bad_name':
MetricSpec(
metric_fn=metric_ops.streaming_auc,
prediction_key='bad_type')
})
# Tests the case where the 2nd element of the key is not "scores".
with self.assertRaises(KeyError):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
('my_error', 'predictions'):
metric_ops.streaming_mean_squared_error
})
# Tests the case where the tuple of the key doesn't have 2 elements.
with self.assertRaises(ValueError):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
('bad_length_name', 'scores', 'bad_length'):
metric_ops.streaming_mean_squared_error
})
def testTrainSaveLoad(self):
"""Tests that insures you can save and reload a trained model."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(
[1.0, 0., 0.2], dtype=dtypes.float32)
feature_columns = [
feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20),
feature_column_lib.real_valued_column('age')
]
model_dir = tempfile.mkdtemp()
regressor = linear.LinearRegressor(
model_dir=model_dir,
feature_columns=feature_columns,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = list(regressor.predict_scores(input_fn=predict_input_fn))
del regressor
regressor2 = linear.LinearRegressor(
model_dir=model_dir, feature_columns=feature_columns)
predictions2 = list(regressor2.predict_scores(input_fn=predict_input_fn))
self.assertAllClose(predictions, predictions2)
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(
[1.0, 0., 0.2], dtype=dtypes.float32)
feature_columns = [
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=2e7),
feature_column_lib.real_valued_column('age')
]
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig(tf_random_seed=1)
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config._cluster_spec = server_lib.ClusterSpec({})
regressor = linear.LinearRegressor(
feature_columns=feature_columns, config=config)
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.1)
def testDisableCenteredBias(self):
"""Tests that we can disable centered bias."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(
[1.0, 0., 0.2], dtype=dtypes.float32)
feature_columns = [
feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20),
feature_column_lib.real_valued_column('age')
]
regressor = linear.LinearRegressor(
feature_columns=feature_columns,
enable_centered_bias=False,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.1)
def testRecoverWeights(self):
rng = np.random.RandomState(67)
n = 1000
n_weights = 10
bias = 2
x = rng.uniform(-1, 1, (n, n_weights))
weights = 10 * rng.randn(n_weights)
y = np.dot(x, weights)
y += rng.randn(len(x)) * 0.05 + rng.normal(bias, 0.01)
feature_columns = estimator.infer_real_valued_columns_from_input(x)
regressor = linear.LinearRegressor(
feature_columns=feature_columns,
optimizer=ftrl.FtrlOptimizer(learning_rate=0.8))
regressor.fit(x, y, batch_size=64, steps=2000)
self.assertIn('linear//weight', regressor.get_variable_names())
regressor_weights = regressor.get_variable_value('linear//weight')
# Have to flatten weights since they come in (x, 1) shape.
self.assertAllClose(weights, regressor_weights.flatten(), rtol=1)
# TODO(ispir): Disable centered_bias.
# assert abs(bias - regressor.bias_) < 0.1
def testSdcaOptimizerRealValuedLinearFeatures(self):
"""Tests LinearRegressor with SDCAOptimizer and real valued features."""
x = [[1.2, 2.0, -1.5], [-2.0, 3.0, -0.5], [1.0, -0.5, 4.0]]
weights = [[3.0], [-1.2], [0.5]]
y = np.dot(x, weights)
def input_fn():
return {
'example_id': constant_op.constant(['1', '2', '3']),
'x': constant_op.constant(x),
'weights': constant_op.constant([[10.0], [10.0], [10.0]])
}, constant_op.constant(y)
x_column = feature_column_lib.real_valued_column('x', dimension=3)
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
regressor = linear.LinearRegressor(
feature_columns=[x_column],
weight_column_name='weights',
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=20)
loss = regressor.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.01)
self.assertIn('linear/x/weight', regressor.get_variable_names())
regressor_weights = regressor.get_variable_value('linear/x/weight')
self.assertAllClose(
[w[0] for w in weights], regressor_weights.flatten(), rtol=0.1)
def testSdcaOptimizerMixedFeaturesArbitraryWeights(self):
"""Tests LinearRegressor with SDCAOptimizer and a mix of features."""
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2', '3']),
'price':
constant_op.constant([0.6, 0.8, 0.3]),
'sq_footage':
constant_op.constant([[900.0], [700.0], [600.0]]),
'country':
sparse_tensor.SparseTensor(
values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 3], [2, 1]],
dense_shape=[3, 5]),
'weights':
constant_op.constant([[3.0], [5.0], [7.0]])
}, constant_op.constant([[1.55], [-1.25], [-3.0]])
price = feature_column_lib.real_valued_column('price')
sq_footage_bucket = feature_column_lib.bucketized_column(
feature_column_lib.real_valued_column('sq_footage'),
boundaries=[650.0, 800.0])
country = feature_column_lib.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
sq_footage_country = feature_column_lib.crossed_column(
[sq_footage_bucket, country], hash_bucket_size=10)
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id', symmetric_l2_regularization=1.0)
regressor = linear.LinearRegressor(
feature_columns=[price, sq_footage_bucket, country, sq_footage_country],
weight_column_name='weights',
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=20)
loss = regressor.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.05)
def testSdcaOptimizerPartitionedVariables(self):
"""Tests LinearRegressor with SDCAOptimizer with partitioned variables."""
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2', '3']),
'price':
constant_op.constant([0.6, 0.8, 0.3]),
'sq_footage':
constant_op.constant([[900.0], [700.0], [600.0]]),
'country':
sparse_tensor.SparseTensor(
values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 3], [2, 1]],
dense_shape=[3, 5]),
'weights':
constant_op.constant([[3.0], [5.0], [7.0]])
}, constant_op.constant([[1.55], [-1.25], [-3.0]])
price = feature_column_lib.real_valued_column('price')
sq_footage_bucket = feature_column_lib.bucketized_column(
feature_column_lib.real_valued_column('sq_footage'),
boundaries=[650.0, 800.0])
country = feature_column_lib.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
sq_footage_country = feature_column_lib.crossed_column(
[sq_footage_bucket, country], hash_bucket_size=10)
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id', symmetric_l2_regularization=1.0,
partitioner=partitioned_variables.fixed_size_partitioner(
num_shards=2, axis=0))
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig()
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config._cluster_spec = server_lib.ClusterSpec({})
regressor = linear.LinearRegressor(
feature_columns=[price, sq_footage_bucket, country, sq_footage_country],
weight_column_name='weights',
optimizer=sdca_optimizer,
config=config)
regressor.fit(input_fn=input_fn, steps=20)
loss = regressor.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.05)
def testSdcaOptimizerSparseFeaturesWithL1Reg(self):
"""Tests LinearClassifier with SDCAOptimizer and sparse features."""
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2', '3']),
'price':
constant_op.constant([[0.4], [0.6], [0.3]]),
'country':
sparse_tensor.SparseTensor(
values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 3], [2, 1]],
dense_shape=[3, 5]),
'weights':
constant_op.constant([[10.0], [10.0], [10.0]])
}, constant_op.constant([[1.4], [-0.8], [2.6]])
price = feature_column_lib.real_valued_column('price')
country = feature_column_lib.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
# Regressor with no L1 regularization.
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
regressor = linear.LinearRegressor(
feature_columns=[price, country],
weight_column_name='weights',
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=20)
no_l1_reg_loss = regressor.evaluate(input_fn=input_fn, steps=1)['loss']
variable_names = regressor.get_variable_names()
self.assertIn('linear/price/weight', variable_names)
self.assertIn('linear/country/weights', variable_names)
no_l1_reg_weights = {
'linear/price/weight': regressor.get_variable_value(
'linear/price/weight'),
'linear/country/weights': regressor.get_variable_value(
'linear/country/weights'),
}
# Regressor with L1 regularization.
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id', symmetric_l1_regularization=1.0)
regressor = linear.LinearRegressor(
feature_columns=[price, country],
weight_column_name='weights',
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=20)
l1_reg_loss = regressor.evaluate(input_fn=input_fn, steps=1)['loss']
l1_reg_weights = {
'linear/price/weight': regressor.get_variable_value(
'linear/price/weight'),
'linear/country/weights': regressor.get_variable_value(
'linear/country/weights'),
}
# Unregularized loss is lower when there is no L1 regularization.
self.assertLess(no_l1_reg_loss, l1_reg_loss)
self.assertLess(no_l1_reg_loss, 0.05)
# But weights returned by the regressor with L1 regularization have smaller
# L1 norm.
l1_reg_weights_norm, no_l1_reg_weights_norm = 0.0, 0.0
for var_name in sorted(l1_reg_weights):
l1_reg_weights_norm += sum(
np.absolute(l1_reg_weights[var_name].flatten()))
no_l1_reg_weights_norm += sum(
np.absolute(no_l1_reg_weights[var_name].flatten()))
print('Var name: %s, value: %s' %
(var_name, no_l1_reg_weights[var_name].flatten()))
self.assertLess(l1_reg_weights_norm, no_l1_reg_weights_norm)
def testSdcaOptimizerBiasOnly(self):
"""Tests LinearClassifier with SDCAOptimizer and validates bias weight."""
def input_fn():
"""Testing the bias weight when it's the only feature present.
All of the instances in this input only have the bias feature, and a
1/4 of the labels are positive. This means that the expected weight for
the bias should be close to the average prediction, i.e 0.25.
Returns:
Training data for the test.
"""
num_examples = 40
return {
'example_id':
constant_op.constant([str(x + 1) for x in range(num_examples)]),
# place_holder is an empty column which is always 0 (absent), because
# LinearClassifier requires at least one column.
'place_holder':
constant_op.constant([[0.0]] * num_examples),
}, constant_op.constant(
[[1 if i % 4 is 0 else 0] for i in range(num_examples)])
place_holder = feature_column_lib.real_valued_column('place_holder')
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
regressor = linear.LinearRegressor(
feature_columns=[place_holder], optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=100)
self.assertNear(
regressor.get_variable_value('linear/bias_weight')[0], 0.25, err=0.1)
def testSdcaOptimizerBiasAndOtherColumns(self):
"""Tests LinearClassifier with SDCAOptimizer and validates bias weight."""
def input_fn():
"""Testing the bias weight when there are other features present.
1/2 of the instances in this input have feature 'a', the rest have
feature 'b', and we expect the bias to be added to each instance as well.
0.4 of all instances that have feature 'a' are positive, and 0.2 of all
instances that have feature 'b' are positive. The labels in the dataset
are ordered to appear shuffled since SDCA expects shuffled data, and
converges faster with this pseudo-random ordering.
If the bias was centered we would expect the weights to be:
bias: 0.3
a: 0.1
b: -0.1
Until b/29339026 is resolved, the bias gets regularized with the same
global value for the other columns, and so the expected weights get
shifted and are:
bias: 0.2
a: 0.2
b: 0.0
Returns:
The test dataset.
"""
num_examples = 200
half = int(num_examples / 2)
return {
'example_id':
constant_op.constant([str(x + 1) for x in range(num_examples)]),
'a':
constant_op.constant([[1]] * int(half) + [[0]] * int(half)),
'b':
constant_op.constant([[0]] * int(half) + [[1]] * int(half)),
}, constant_op.constant(
[[x]
for x in [1, 0, 0, 1, 1, 0, 0, 0, 1, 0] * int(half / 10) +
[0, 1, 0, 0, 0, 0, 0, 0, 1, 0] * int(half / 10)])
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
regressor = linear.LinearRegressor(
feature_columns=[
feature_column_lib.real_valued_column('a'),
feature_column_lib.real_valued_column('b')
],
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=200)
variable_names = regressor.get_variable_names()
self.assertIn('linear/bias_weight', variable_names)
self.assertIn('linear/a/weight', variable_names)
self.assertIn('linear/b/weight', variable_names)
# TODO(b/29339026): Change the expected results to expect a centered bias.
self.assertNear(
regressor.get_variable_value('linear/bias_weight')[0], 0.2, err=0.05)
self.assertNear(
regressor.get_variable_value('linear/a/weight')[0], 0.2, err=0.05)
self.assertNear(
regressor.get_variable_value('linear/b/weight')[0], 0.0, err=0.05)
def testSdcaOptimizerBiasAndOtherColumnsFabricatedCentered(self):
"""Tests LinearClassifier with SDCAOptimizer and validates bias weight."""
def input_fn():
"""Testing the bias weight when there are other features present.
1/2 of the instances in this input have feature 'a', the rest have
feature 'b', and we expect the bias to be added to each instance as well.
0.1 of all instances that have feature 'a' have a label of 1, and 0.1 of
all instances that have feature 'b' have a label of -1.
We can expect the weights to be:
bias: 0.0
a: 0.1
b: -0.1
Returns:
The test dataset.
"""
num_examples = 200
half = int(num_examples / 2)
return {
'example_id':
constant_op.constant([str(x + 1) for x in range(num_examples)]),
'a':
constant_op.constant([[1]] * int(half) + [[0]] * int(half)),
'b':
constant_op.constant([[0]] * int(half) + [[1]] * int(half)),
}, constant_op.constant([[1 if x % 10 == 0 else 0] for x in range(half)] +
[[-1 if x % 10 == 0 else 0] for x in range(half)])
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
regressor = linear.LinearRegressor(
feature_columns=[
feature_column_lib.real_valued_column('a'),
feature_column_lib.real_valued_column('b')
],
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=100)
variable_names = regressor.get_variable_names()
self.assertIn('linear/bias_weight', variable_names)
self.assertIn('linear/a/weight', variable_names)
self.assertIn('linear/b/weight', variable_names)
self.assertNear(
regressor.get_variable_value('linear/bias_weight')[0], 0.0, err=0.05)
self.assertNear(
regressor.get_variable_value('linear/a/weight')[0], 0.1, err=0.05)
self.assertNear(
regressor.get_variable_value('linear/b/weight')[0], -0.1, err=0.05)
class LinearEstimatorTest(test.TestCase):
def testExperimentIntegration(self):
cont_features = [
feature_column_lib.real_valued_column(
'feature', dimension=4)
]
exp = experiment.Experiment(
estimator=linear.LinearEstimator(feature_columns=cont_features,
head=head_lib.regression_head()),
train_input_fn=test_data.iris_input_logistic_fn,
eval_input_fn=test_data.iris_input_logistic_fn)
exp.test()
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(self,
linear.LinearEstimator)
def testLinearRegression(self):
"""Tests that loss goes down with training."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[10.]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
linear_estimator = linear.LinearEstimator(feature_columns=[age, language],
head=head_lib.regression_head())
linear_estimator.fit(input_fn=input_fn, steps=100)
loss1 = linear_estimator.evaluate(input_fn=input_fn, steps=1)['loss']
linear_estimator.fit(input_fn=input_fn, steps=400)
loss2 = linear_estimator.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
self.assertLess(loss2, 0.5)
def testPoissonRegression(self):
"""Tests that loss goes down with training."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[10.]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
linear_estimator = linear.LinearEstimator(
feature_columns=[age, language],
head=head_lib.poisson_regression_head())
linear_estimator.fit(input_fn=input_fn, steps=10)
loss1 = linear_estimator.evaluate(input_fn=input_fn, steps=1)['loss']
linear_estimator.fit(input_fn=input_fn, steps=100)
loss2 = linear_estimator.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
# Here loss of 2.1 implies a prediction of ~9.9998
self.assertLess(loss2, 2.1)
def testSDCANotSupported(self):
"""Tests that we detect error for SDCA."""
maintenance_cost = feature_column_lib.real_valued_column('maintenance_cost')
sq_footage = feature_column_lib.real_valued_column('sq_footage')
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
with self.assertRaises(ValueError):
linear.LinearEstimator(
head=head_lib.regression_head(label_dimension=1),
feature_columns=[maintenance_cost, sq_footage],
optimizer=sdca_optimizer,
_joint_weights=True)
def boston_input_fn():
boston = base.load_boston()
features = math_ops.cast(
array_ops.reshape(constant_op.constant(boston.data), [-1, 13]),
dtypes.float32)
labels = math_ops.cast(
array_ops.reshape(constant_op.constant(boston.target), [-1, 1]),
dtypes.float32)
return features, labels
class FeatureColumnTest(test.TestCase):
def testTrain(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
boston_input_fn)
est = linear.LinearRegressor(feature_columns=feature_columns)
est.fit(input_fn=boston_input_fn, steps=1)
_ = est.evaluate(input_fn=boston_input_fn, steps=1)
if __name__ == '__main__':
test.main()
| apache-2.0 |
treycausey/scikit-learn | sklearn/linear_model/tests/test_omp.py | 3 | 8168 | # Author: Vlad Niculae
# Licence: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.linear_model import (orthogonal_mp, orthogonal_mp_gram,
OrthogonalMatchingPursuit,
OrthogonalMatchingPursuitCV,
LinearRegression)
from sklearn.utils import check_random_state
from sklearn.datasets import make_sparse_coded_signal
n_samples, n_features, n_nonzero_coefs, n_targets = 20, 30, 5, 3
y, X, gamma = make_sparse_coded_signal(n_targets, n_features, n_samples,
n_nonzero_coefs, random_state=0)
G, Xy = np.dot(X.T, X), np.dot(X.T, y)
# this makes X (n_samples, n_features)
# and y (n_samples, 3)
def test_correct_shapes():
assert_equal(orthogonal_mp(X, y[:, 0], n_nonzero_coefs=5).shape,
(n_features,))
assert_equal(orthogonal_mp(X, y, n_nonzero_coefs=5).shape,
(n_features, 3))
def test_correct_shapes_gram():
assert_equal(orthogonal_mp_gram(G, Xy[:, 0], n_nonzero_coefs=5).shape,
(n_features,))
assert_equal(orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5).shape,
(n_features, 3))
def test_n_nonzero_coefs():
assert_true(np.count_nonzero(orthogonal_mp(X, y[:, 0],
n_nonzero_coefs=5)) <= 5)
assert_true(np.count_nonzero(orthogonal_mp(X, y[:, 0], n_nonzero_coefs=5,
precompute=True)) <= 5)
def test_tol():
tol = 0.5
gamma = orthogonal_mp(X, y[:, 0], tol=tol)
gamma_gram = orthogonal_mp(X, y[:, 0], tol=tol, precompute=True)
assert_true(np.sum((y[:, 0] - np.dot(X, gamma)) ** 2) <= tol)
assert_true(np.sum((y[:, 0] - np.dot(X, gamma_gram)) ** 2) <= tol)
def test_with_without_gram():
assert_array_almost_equal(
orthogonal_mp(X, y, n_nonzero_coefs=5),
orthogonal_mp(X, y, n_nonzero_coefs=5, precompute=True))
def test_with_without_gram_tol():
assert_array_almost_equal(
orthogonal_mp(X, y, tol=1.),
orthogonal_mp(X, y, tol=1., precompute=True))
def test_unreachable_accuracy():
assert_array_almost_equal(
orthogonal_mp(X, y, tol=0),
orthogonal_mp(X, y, n_nonzero_coefs=n_features))
assert_array_almost_equal(
assert_warns(RuntimeWarning, orthogonal_mp, X, y, tol=0,
precompute=True),
orthogonal_mp(X, y, precompute=True,
n_nonzero_coefs=n_features))
def test_bad_input():
assert_raises(ValueError, orthogonal_mp, X, y, tol=-1)
assert_raises(ValueError, orthogonal_mp, X, y, n_nonzero_coefs=-1)
assert_raises(ValueError, orthogonal_mp, X, y,
n_nonzero_coefs=n_features + 1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy, tol=-1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy, n_nonzero_coefs=-1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy,
n_nonzero_coefs=n_features + 1)
def test_perfect_signal_recovery():
idx, = gamma[:, 0].nonzero()
gamma_rec = orthogonal_mp(X, y[:, 0], 5)
gamma_gram = orthogonal_mp_gram(G, Xy[:, 0], 5)
assert_array_equal(idx, np.flatnonzero(gamma_rec))
assert_array_equal(idx, np.flatnonzero(gamma_gram))
assert_array_almost_equal(gamma[:, 0], gamma_rec, decimal=2)
assert_array_almost_equal(gamma[:, 0], gamma_gram, decimal=2)
def test_estimator():
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coefs)
omp.fit(X, y[:, 0])
assert_equal(omp.coef_.shape, (n_features,))
assert_equal(omp.intercept_.shape, ())
assert_true(np.count_nonzero(omp.coef_) <= n_nonzero_coefs)
omp.fit(X, y)
assert_equal(omp.coef_.shape, (n_targets, n_features))
assert_equal(omp.intercept_.shape, (n_targets,))
assert_true(np.count_nonzero(omp.coef_) <= n_targets * n_nonzero_coefs)
omp.set_params(fit_intercept=False, normalize=False)
assert_warns(DeprecationWarning, omp.fit, X, y[:, 0], Gram=G, Xy=Xy[:, 0])
assert_equal(omp.coef_.shape, (n_features,))
assert_equal(omp.intercept_, 0)
assert_true(np.count_nonzero(omp.coef_) <= n_nonzero_coefs)
assert_warns(DeprecationWarning, omp.fit, X, y, Gram=G, Xy=Xy)
assert_equal(omp.coef_.shape, (n_targets, n_features))
assert_equal(omp.intercept_, 0)
assert_true(np.count_nonzero(omp.coef_) <= n_targets * n_nonzero_coefs)
def test_scaling_with_gram():
omp1 = OrthogonalMatchingPursuit(n_nonzero_coefs=1,
fit_intercept=False, normalize=False)
omp2 = OrthogonalMatchingPursuit(n_nonzero_coefs=1,
fit_intercept=True, normalize=False)
omp3 = OrthogonalMatchingPursuit(n_nonzero_coefs=1,
fit_intercept=False, normalize=True)
f, w = assert_warns, DeprecationWarning
f(w, omp1.fit, X, y, Gram=G)
f(w, omp1.fit, X, y, Gram=G, Xy=Xy)
f(w, omp2.fit, X, y, Gram=G)
f(w, omp2.fit, X, y, Gram=G, Xy=Xy)
f(w, omp3.fit, X, y, Gram=G)
f(w, omp3.fit, X, y, Gram=G, Xy=Xy)
def test_identical_regressors():
newX = X.copy()
newX[:, 1] = newX[:, 0]
gamma = np.zeros(n_features)
gamma[0] = gamma[1] = 1.
newy = np.dot(newX, gamma)
assert_warns(RuntimeWarning, orthogonal_mp, newX, newy, 2)
def test_swapped_regressors():
gamma = np.zeros(n_features)
# X[:, 21] should be selected first, then X[:, 0] selected second,
# which will take X[:, 21]'s place in case the algorithm does
# column swapping for optimization (which is the case at the moment)
gamma[21] = 1.0
gamma[0] = 0.5
new_y = np.dot(X, gamma)
new_Xy = np.dot(X.T, new_y)
gamma_hat = orthogonal_mp(X, new_y, 2)
gamma_hat_gram = orthogonal_mp_gram(G, new_Xy, 2)
assert_array_equal(np.flatnonzero(gamma_hat), [0, 21])
assert_array_equal(np.flatnonzero(gamma_hat_gram), [0, 21])
def test_no_atoms():
y_empty = np.zeros_like(y)
Xy_empty = np.dot(X.T, y_empty)
gamma_empty = ignore_warnings(orthogonal_mp)(X, y_empty, 1)
gamma_empty_gram = ignore_warnings(orthogonal_mp)(G, Xy_empty, 1)
assert_equal(np.all(gamma_empty == 0), True)
assert_equal(np.all(gamma_empty_gram == 0), True)
def test_omp_path():
path = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=True)
last = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=False)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
path = orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5, return_path=True)
last = orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5, return_path=False)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
def test_omp_cv():
y_ = y[:, 0]
gamma_ = gamma[:, 0]
ompcv = OrthogonalMatchingPursuitCV(normalize=True, fit_intercept=False,
max_iter=10, cv=5)
ompcv.fit(X, y_)
assert_equal(ompcv.n_nonzero_coefs_, n_nonzero_coefs)
assert_array_almost_equal(ompcv.coef_, gamma_)
omp = OrthogonalMatchingPursuit(normalize=True, fit_intercept=False,
n_nonzero_coefs=ompcv.n_nonzero_coefs_)
omp.fit(X, y_)
assert_array_almost_equal(ompcv.coef_, omp.coef_)
def test_omp_reaches_least_squares():
# Use small simple data; it's a sanity check but OMP can stop early
rng = check_random_state(0)
n_samples, n_features = (10, 8)
n_targets = 3
X = rng.randn(n_samples, n_features)
Y = rng.randn(n_samples, n_targets)
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_features)
lstsq = LinearRegression()
omp.fit(X, Y)
lstsq.fit(X, Y)
assert_array_almost_equal(omp.coef_, lstsq.coef_)
| bsd-3-clause |
RayMick/scikit-learn | sklearn/metrics/regression.py | 175 | 16953 | """Metrics to assess performance on regression task
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Arnaud Joly <[email protected]>
# Jochen Wersdorfer <[email protected]>
# Lars Buitinck <[email protected]>
# Joel Nothman <[email protected]>
# Noel Dawe <[email protected]>
# Manoj Kumar <[email protected]>
# Michael Eickenberg <[email protected]>
# Konstantin Shmelkov <[email protected]>
# License: BSD 3 clause
from __future__ import division
import numpy as np
from ..utils.validation import check_array, check_consistent_length
from ..utils.validation import column_or_1d
import warnings
__ALL__ = [
"mean_absolute_error",
"mean_squared_error",
"median_absolute_error",
"r2_score",
"explained_variance_score"
]
def _check_reg_targets(y_true, y_pred, multioutput):
"""Check that y_true and y_pred belong to the same regression task
Parameters
----------
y_true : array-like,
y_pred : array-like,
multioutput : array-like or string in ['raw_values', uniform_average',
'variance_weighted'] or None
None is accepted due to backward compatibility of r2_score().
Returns
-------
type_true : one of {'continuous', continuous-multioutput'}
The type of the true target data, as output by
'utils.multiclass.type_of_target'
y_true : array-like of shape = (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples, n_outputs)
Estimated target values.
multioutput : array-like of shape = (n_outputs) or string in ['raw_values',
uniform_average', 'variance_weighted'] or None
Custom output weights if ``multioutput`` is array-like or
just the corresponding argument if ``multioutput`` is a
correct keyword.
"""
check_consistent_length(y_true, y_pred)
y_true = check_array(y_true, ensure_2d=False)
y_pred = check_array(y_pred, ensure_2d=False)
if y_true.ndim == 1:
y_true = y_true.reshape((-1, 1))
if y_pred.ndim == 1:
y_pred = y_pred.reshape((-1, 1))
if y_true.shape[1] != y_pred.shape[1]:
raise ValueError("y_true and y_pred have different number of output "
"({0}!={1})".format(y_true.shape[1], y_pred.shape[1]))
n_outputs = y_true.shape[1]
multioutput_options = (None, 'raw_values', 'uniform_average',
'variance_weighted')
if multioutput not in multioutput_options:
multioutput = check_array(multioutput, ensure_2d=False)
if n_outputs == 1:
raise ValueError("Custom weights are useful only in "
"multi-output cases.")
elif n_outputs != len(multioutput):
raise ValueError(("There must be equally many custom weights "
"(%d) as outputs (%d).") %
(len(multioutput), n_outputs))
y_type = 'continuous' if n_outputs == 1 else 'continuous-multioutput'
return y_type, y_true, y_pred, multioutput
def mean_absolute_error(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average'):
"""Mean absolute error regression loss
Read more in the :ref:`User Guide <mean_absolute_error>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average']
or array-like of shape (n_outputs)
Defines aggregating of multiple output values.
Array-like value defines weights used to average errors.
'raw_values' :
Returns a full set of errors in case of multioutput input.
'uniform_average' :
Errors of all outputs are averaged with uniform weight.
Returns
-------
loss : float or ndarray of floats
If multioutput is 'raw_values', then mean absolute error is returned
for each output separately.
If multioutput is 'uniform_average' or an ndarray of weights, then the
weighted average of all output errors is returned.
MAE output is non-negative floating point. The best value is 0.0.
Examples
--------
>>> from sklearn.metrics import mean_absolute_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> mean_absolute_error(y_true, y_pred)
0.5
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> mean_absolute_error(y_true, y_pred)
0.75
>>> mean_absolute_error(y_true, y_pred, multioutput='raw_values')
array([ 0.5, 1. ])
>>> mean_absolute_error(y_true, y_pred, multioutput=[0.3, 0.7])
... # doctest: +ELLIPSIS
0.849...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
output_errors = np.average(np.abs(y_pred - y_true),
weights=sample_weight, axis=0)
if multioutput == 'raw_values':
return output_errors
elif multioutput == 'uniform_average':
# pass None as weights to np.average: uniform mean
multioutput = None
return np.average(output_errors, weights=multioutput)
def mean_squared_error(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average'):
"""Mean squared error regression loss
Read more in the :ref:`User Guide <mean_squared_error>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average']
or array-like of shape (n_outputs)
Defines aggregating of multiple output values.
Array-like value defines weights used to average errors.
'raw_values' :
Returns a full set of errors in case of multioutput input.
'uniform_average' :
Errors of all outputs are averaged with uniform weight.
Returns
-------
loss : float or ndarray of floats
A non-negative floating point value (the best value is 0.0), or an
array of floating point values, one for each individual target.
Examples
--------
>>> from sklearn.metrics import mean_squared_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> mean_squared_error(y_true, y_pred)
0.375
>>> y_true = [[0.5, 1],[-1, 1],[7, -6]]
>>> y_pred = [[0, 2],[-1, 2],[8, -5]]
>>> mean_squared_error(y_true, y_pred) # doctest: +ELLIPSIS
0.708...
>>> mean_squared_error(y_true, y_pred, multioutput='raw_values')
... # doctest: +ELLIPSIS
array([ 0.416..., 1. ])
>>> mean_squared_error(y_true, y_pred, multioutput=[0.3, 0.7])
... # doctest: +ELLIPSIS
0.824...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
output_errors = np.average((y_true - y_pred) ** 2, axis=0,
weights=sample_weight)
if multioutput == 'raw_values':
return output_errors
elif multioutput == 'uniform_average':
# pass None as weights to np.average: uniform mean
multioutput = None
return np.average(output_errors, weights=multioutput)
def median_absolute_error(y_true, y_pred):
"""Median absolute error regression loss
Read more in the :ref:`User Guide <median_absolute_error>`.
Parameters
----------
y_true : array-like of shape = (n_samples)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples)
Estimated target values.
Returns
-------
loss : float
A positive floating point value (the best value is 0.0).
Examples
--------
>>> from sklearn.metrics import median_absolute_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> median_absolute_error(y_true, y_pred)
0.5
"""
y_type, y_true, y_pred, _ = _check_reg_targets(y_true, y_pred,
'uniform_average')
if y_type == 'continuous-multioutput':
raise ValueError("Multioutput not supported in median_absolute_error")
return np.median(np.abs(y_pred - y_true))
def explained_variance_score(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average'):
"""Explained variance regression score function
Best possible score is 1.0, lower values are worse.
Read more in the :ref:`User Guide <explained_variance_score>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average', \
'variance_weighted'] or array-like of shape (n_outputs)
Defines aggregating of multiple output scores.
Array-like value defines weights used to average scores.
'raw_values' :
Returns a full set of scores in case of multioutput input.
'uniform_average' :
Scores of all outputs are averaged with uniform weight.
'variance_weighted' :
Scores of all outputs are averaged, weighted by the variances
of each individual output.
Returns
-------
score : float or ndarray of floats
The explained variance or ndarray if 'multioutput' is 'raw_values'.
Notes
-----
This is not a symmetric function.
Examples
--------
>>> from sklearn.metrics import explained_variance_score
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> explained_variance_score(y_true, y_pred) # doctest: +ELLIPSIS
0.957...
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> explained_variance_score(y_true, y_pred, multioutput='uniform_average')
... # doctest: +ELLIPSIS
0.983...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
y_diff_avg = np.average(y_true - y_pred, weights=sample_weight, axis=0)
numerator = np.average((y_true - y_pred - y_diff_avg) ** 2,
weights=sample_weight, axis=0)
y_true_avg = np.average(y_true, weights=sample_weight, axis=0)
denominator = np.average((y_true - y_true_avg) ** 2,
weights=sample_weight, axis=0)
nonzero_numerator = numerator != 0
nonzero_denominator = denominator != 0
valid_score = nonzero_numerator & nonzero_denominator
output_scores = np.ones(y_true.shape[1])
output_scores[valid_score] = 1 - (numerator[valid_score] /
denominator[valid_score])
output_scores[nonzero_numerator & ~nonzero_denominator] = 0.
if multioutput == 'raw_values':
# return scores individually
return output_scores
elif multioutput == 'uniform_average':
# passing to np.average() None as weights results is uniform mean
avg_weights = None
elif multioutput == 'variance_weighted':
avg_weights = denominator
else:
avg_weights = multioutput
return np.average(output_scores, weights=avg_weights)
def r2_score(y_true, y_pred,
sample_weight=None,
multioutput=None):
"""R^2 (coefficient of determination) regression score function.
Best possible score is 1.0 and it can be negative (because the
model can be arbitrarily worse). A constant model that always
predicts the expected value of y, disregarding the input features,
would get a R^2 score of 0.0.
Read more in the :ref:`User Guide <r2_score>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average',
'variance_weighted'] or None or array-like of shape (n_outputs)
Defines aggregating of multiple output scores.
Array-like value defines weights used to average scores.
Default value correponds to 'variance_weighted', but
will be changed to 'uniform_average' in next versions.
'raw_values' :
Returns a full set of scores in case of multioutput input.
'uniform_average' :
Scores of all outputs are averaged with uniform weight.
'variance_weighted' :
Scores of all outputs are averaged, weighted by the variances
of each individual output.
Returns
-------
z : float or ndarray of floats
The R^2 score or ndarray of scores if 'multioutput' is
'raw_values'.
Notes
-----
This is not a symmetric function.
Unlike most other scores, R^2 score may be negative (it need not actually
be the square of a quantity R).
References
----------
.. [1] `Wikipedia entry on the Coefficient of determination
<http://en.wikipedia.org/wiki/Coefficient_of_determination>`_
Examples
--------
>>> from sklearn.metrics import r2_score
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> r2_score(y_true, y_pred) # doctest: +ELLIPSIS
0.948...
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> r2_score(y_true, y_pred, multioutput='variance_weighted') # doctest: +ELLIPSIS
0.938...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
if sample_weight is not None:
sample_weight = column_or_1d(sample_weight)
weight = sample_weight[:, np.newaxis]
else:
weight = 1.
numerator = (weight * (y_true - y_pred) ** 2).sum(axis=0,
dtype=np.float64)
denominator = (weight * (y_true - np.average(
y_true, axis=0, weights=sample_weight)) ** 2).sum(axis=0,
dtype=np.float64)
nonzero_denominator = denominator != 0
nonzero_numerator = numerator != 0
valid_score = nonzero_denominator & nonzero_numerator
output_scores = np.ones([y_true.shape[1]])
output_scores[valid_score] = 1 - (numerator[valid_score] /
denominator[valid_score])
# arbitrary set to zero to avoid -inf scores, having a constant
# y_true is not interesting for scoring a regression anyway
output_scores[nonzero_numerator & ~nonzero_denominator] = 0.
if multioutput is None and y_true.shape[1] != 1:
# @FIXME change in 0.18
warnings.warn("Default 'multioutput' behavior now corresponds to "
"'variance_weighted' value, it will be changed "
"to 'uniform_average' in 0.18.",
DeprecationWarning)
multioutput = 'variance_weighted'
if multioutput == 'raw_values':
# return scores individually
return output_scores
elif multioutput == 'uniform_average':
# passing None as weights results is uniform mean
avg_weights = None
elif multioutput == 'variance_weighted':
avg_weights = denominator
# avoid fail on constant y or one-element arrays
if not np.any(nonzero_denominator):
if not np.any(nonzero_numerator):
return 1.0
else:
return 0.0
else:
avg_weights = multioutput
return np.average(output_scores, weights=avg_weights)
| bsd-3-clause |
macks22/scikit-learn | examples/linear_model/plot_lasso_and_elasticnet.py | 249 | 1982 | """
========================================
Lasso and Elastic Net for Sparse Signals
========================================
Estimates Lasso and Elastic-Net regression models on a manually generated
sparse signal corrupted with an additive noise. Estimated coefficients are
compared with the ground-truth.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import r2_score
###############################################################################
# generate some sparse data to play with
np.random.seed(42)
n_samples, n_features = 50, 200
X = np.random.randn(n_samples, n_features)
coef = 3 * np.random.randn(n_features)
inds = np.arange(n_features)
np.random.shuffle(inds)
coef[inds[10:]] = 0 # sparsify coef
y = np.dot(X, coef)
# add noise
y += 0.01 * np.random.normal((n_samples,))
# Split data in train set and test set
n_samples = X.shape[0]
X_train, y_train = X[:n_samples / 2], y[:n_samples / 2]
X_test, y_test = X[n_samples / 2:], y[n_samples / 2:]
###############################################################################
# Lasso
from sklearn.linear_model import Lasso
alpha = 0.1
lasso = Lasso(alpha=alpha)
y_pred_lasso = lasso.fit(X_train, y_train).predict(X_test)
r2_score_lasso = r2_score(y_test, y_pred_lasso)
print(lasso)
print("r^2 on test data : %f" % r2_score_lasso)
###############################################################################
# ElasticNet
from sklearn.linear_model import ElasticNet
enet = ElasticNet(alpha=alpha, l1_ratio=0.7)
y_pred_enet = enet.fit(X_train, y_train).predict(X_test)
r2_score_enet = r2_score(y_test, y_pred_enet)
print(enet)
print("r^2 on test data : %f" % r2_score_enet)
plt.plot(enet.coef_, label='Elastic net coefficients')
plt.plot(lasso.coef_, label='Lasso coefficients')
plt.plot(coef, '--', label='original coefficients')
plt.legend(loc='best')
plt.title("Lasso R^2: %f, Elastic Net R^2: %f"
% (r2_score_lasso, r2_score_enet))
plt.show()
| bsd-3-clause |
ThomasSweijen/yadesolute2 | doc/sphinx/conf.py | 3 | 27794 | # -*- coding: utf-8 -*-
#
# Yade documentation build configuration file, created by
# sphinx-quickstart on Mon Nov 16 21:49:34 2009.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# relevant posts to sphinx ML
# http://groups.google.com/group/sphinx-dev/browse_thread/thread/b4fbc8d31d230fc4
# http://groups.google.com/group/sphinx-dev/browse_thread/thread/118598245d5f479b
#####################
## custom yade roles
#####################
##
## http://docutils.sourceforge.net/docs/howto/rst-roles.html
import sys, os, re
from docutils import nodes
from sphinx import addnodes
from sphinx.roles import XRefRole
import docutils
#
# needed for creating hyperlink targets.
# it should be cleand up and unified for both LaTeX and HTML via
# the pending_xref node which gets resolved to real link target
# by sphinx automatically once all docs have been processed.
#
# xrefs: http://groups.google.com/group/sphinx-dev/browse_thread/thread/d719d19307654548
#
#
import __builtin__
if 'latex' in sys.argv: __builtin__.writer='latex'
elif 'html' in sys.argv: __builtin__.writer='html'
elif 'epub' in sys.argv: __builtin__.writer='epub'
else: raise RuntimeError("Must have either 'latex' or 'html' on the command line (hack for reference styles)")
def yaderef_role(role,rawtext,text,lineno,inliner,options={},content=[]):
"Handle the :yref:`` role, by making hyperlink to yade.wrapper.*. It supports :yref:`Link text<link target>` syntax, like usual hyperlinking roles."
id=rawtext.split(':',2)[2][1:-1]
txt=id; explicitText=False
m=re.match('(.*)\s*<(.*)>\s*',id)
if m:
explicitText=True
txt,id=m.group(1),m.group(2)
id=id.replace('::','.')
#node=nodes.reference(rawtext,docutils.utils.unescape(txt),refuri='http://beta.arcig.cz/~eudoxos/yade/doxygen/?search=%s'%id,**options)
#node=nodes.reference(rawtext,docutils.utils.unescape(txt),refuri='yade.wrapper.html#yade.wrapper.%s'%id,**options)
return [mkYrefNode(id,txt,rawtext,role,explicitText,lineno,options)],[]
def yadesrc_role(role,rawtext,lineno,inliner,options={},content=[]):
"Handle the :ysrc:`` role, making hyperlink to git repository webpage with that path. Supports :ysrc:`Link text<file/name>` syntax, like usual hyperlinking roles. If target ends with ``/``, it is assumed to be a directory."
id=rawtext.split(':',2)[2][1:-1]
txt=id
m=re.match('(.*)\s*<(.*)>\s*',id)
if m:
txt,id=m.group(1),m.group(2)
return [nodes.reference(rawtext,docutils.utils.unescape(txt),refuri='https://github.com/yade/trunk/blob/master/%s'%id)],[] ### **options should be passed to nodes.reference as well
# map modules to their html (rst) filenames. Used for sub-modules, where e.g. SpherePack is yade._packSphere.SpherePack, but is documented from yade.pack.rst
moduleMap={
'yade._packPredicates':'yade.pack',
'yade._packSpheres':'yade.pack',
'yade._packObb':'yade.pack'
}
class YadeXRefRole(XRefRole):
#def process_link
def process_link(self, env, refnode, has_explicit_title, title, target):
print 'TARGET:','yade.wrapper.'+target
return '[['+title+']]','yade.wrapper.'+target
def mkYrefNode(target,text,rawtext,role,explicitText,lineno,options={}):
"""Create hyperlink to yade target. Targets starting with literal 'yade.' are absolute, but the leading 'yade.' will be stripped from the link text. Absolute tergets are supposed to live in page named yade.[module].html, anchored at #yade.[module2].[rest of target], where [module2] is identical to [module], unless mapped over by moduleMap.
Other targets are supposed to live in yade.wrapper (such as c++ classes)."""
writer=__builtin__.writer # to make sure not shadowed by a local var
import string
if target.startswith('yade.'):
module='.'.join(target.split('.')[0:2])
module2=(module if module not in moduleMap.keys() else moduleMap[module])
if target==module: target='' # to reference the module itself
uri=('%%%s#%s'%(module2,target) if writer=='latex' else '%s.html#%s'%(module2,target))
if not explicitText and module!=module2:
text=module2+'.'+'.'.join(target.split('.')[2:])
text=string.replace(text,'yade.','',1)
elif target.startswith('external:'):
exttarget=target.split(':',1)[1]
if not explicitText: text=exttarget
target=exttarget if '.' in exttarget else 'module-'+exttarget
uri=(('%%external#%s'%target) if writer=='latex' else 'external.html#%s'%target)
else:
uri=(('%%yade.wrapper#yade.wrapper.%s'%target) if writer=='latex' else 'yade.wrapper.html#yade.wrapper.%s'%target)
#print writer,uri
if 0:
refnode=addnodes.pending_xref(rawtext,reftype=role,refexplicit=explicitText,reftarget=target)
#refnode.line=lineno
#refnode+=nodes.literal(rawtext,text,classes=['ref',role])
return [refnode],[]
#ret.rawtext,reftype=role,
else:
return nodes.reference(rawtext,docutils.utils.unescape(text),refuri=uri,**options)
#return [refnode],[]
def ydefault_role(role,rawtext,text,lineno,inliner,options={},content=[]):
"Handle the :ydefault:`something` role. fixSignature handles it now in the member signature itself, this merely expands to nothing."
return [],[]
def yattrtype_role(role,rawtext,text,lineno,inliner,options={},content=[]):
"Handle the :yattrtype:`something` role. fixSignature handles it now in the member signature itself, this merely expands to nothing."
return [],[]
# FIXME: should return readable representation of bits of the number (yade.wrapper.AttrFlags enum)
def yattrflags_role(role,rawtext,text,lineno,inliner,options={},content=[]):
"Handle the :yattrflags:`something` role. fixSignature handles it now in the member signature itself."
return [],[]
from docutils.parsers.rst import roles
def yaderef_role_2(type,rawtext,text,lineno,inliner,options={},content=[]): return YadeXRefRole()('yref',rawtext,text,lineno,inliner,options,content)
roles.register_canonical_role('yref', yaderef_role)
roles.register_canonical_role('ysrc', yadesrc_role)
roles.register_canonical_role('ydefault', ydefault_role)
roles.register_canonical_role('yattrtype', yattrtype_role)
roles.register_canonical_role('yattrflags', yattrflags_role)
## http://sphinx.pocoo.org/config.html#confval-rst_epilog
rst_epilog = """
.. |yupdate| replace:: *(auto-updated)*
.. |ycomp| replace:: *(auto-computed)*
.. |ystatic| replace:: *(static)*
"""
import collections
def customExclude(app, what, name, obj, skip, options):
if name=='clone':
if 'Serializable.clone' in str(obj): return False
return True
#escape crash on non iterable __doc__ in some qt object
if hasattr(obj,'__doc__') and obj.__doc__ and not isinstance(obj.__doc__, collections.Iterable): return True
if hasattr(obj,'__doc__') and obj.__doc__ and ('|ydeprecated|' in obj.__doc__ or '|yhidden|' in obj.__doc__): return True
#if re.match(r'\b(__init__|__reduce__|__repr__|__str__)\b',name): return True
if name.startswith('_'):
if name=='__init__':
# skip boost classes with parameterless ctor (arg1=implicit self)
if obj.__doc__=="\n__init__( (object)arg1) -> None": return True
# skip undocumented ctors
if not obj.__doc__: return True
# skip default ctor for serializable, taking dict of attrs
if obj.__doc__=='\n__init__( (object)arg1) -> None\n\nobject __init__(tuple args, dict kwds)': return True
#for i,l in enumerate(obj.__doc__.split('\n')): print name,i,l,'##'
return False
return True
return False
def isBoostFunc(what,obj):
return what=='function' and obj.__repr__().startswith('<Boost.Python.function object at 0x')
def isBoostMethod(what,obj):
"I don't know how to distinguish boost and non-boost methods..."
return what=='method' and obj.__repr__().startswith('<unbound method ');
def replaceLaTeX(s):
# replace single non-escaped dollars $...$ by :math:`...`
# then \$ by single $
s=re.sub(r'(?<!\\)\$([^\$]+)(?<!\\)\$',r'\ :math:`\1`\ ',s)
return re.sub(r'\\\$',r'$',s)
def fixSrc(app,docname,source):
source[0]=replaceLaTeX(source[0])
def fixDocstring(app,what,name,obj,options,lines):
# remove empty default roles, which is not properly interpreted by docutils parser
for i in range(0,len(lines)):
lines[i]=lines[i].replace(':ydefault:``','')
lines[i]=lines[i].replace(':yattrtype:``','')
lines[i]=lines[i].replace(':yattrflags:``','')
#lines[i]=re.sub(':``',':` `',lines[i])
# remove signature of boost::python function docstring, which is the first line of the docstring
if isBoostFunc(what,obj):
l2=boostFuncSignature(name,obj)[1]
# we must replace lines one by one (in-place) :-|
# knowing that l2 is always shorter than lines (l2 is docstring with the signature stripped off)
for i in range(0,len(lines)):
lines[i]=l2[i] if i<len(l2) else ''
elif isBoostMethod(what,obj):
l2=boostFuncSignature(name,obj)[1]
for i in range(0,len(lines)):
lines[i]=l2[i] if i<len(l2) else ''
# LaTeX: replace $...$ by :math:`...`
# must be done after calling boostFuncSignature which uses original docstring
for i in range(0,len(lines)): lines[i]=replaceLaTeX(lines[i])
def boostFuncSignature(name,obj,removeSelf=False):
"""Scan docstring of obj, returning tuple of properly formatted boost python signature
(first line of the docstring) and the rest of docstring (as list of lines).
The rest of docstring is stripped of 4 leading spaces which are automatically
added by boost.
removeSelf will attempt to remove the first argument from the signature.
"""
doc=obj.__doc__
if doc==None: # not a boost method
return None,None
nname=name.split('.')[-1]
docc=doc.split('\n')
if len(docc)<2: return None,docc
doc1=docc[1]
# functions with weird docstring, likely not documented by boost
if not re.match('^'+nname+r'(.*)->.*$',doc1):
return None,docc
if doc1.endswith(':'): doc1=doc1[:-1]
strippedDoc=doc.split('\n')[2:]
# check if all lines are padded
allLinesHave4LeadingSpaces=True
for l in strippedDoc:
if l.startswith(' '): continue
allLinesHave4LeadingSpaces=False; break
# remove the padding if so
if allLinesHave4LeadingSpaces: strippedDoc=[l[4:] for l in strippedDoc]
for i in range(len(strippedDoc)):
# fix signatures inside docstring (one function with multiple signatures)
strippedDoc[i],n=re.subn(r'([a-zA-Z_][a-zA-Z0-9_]*\() \(object\)arg1(, |)',r'\1',strippedDoc[i].replace('->','→'))
# inspect dosctring after mangling
if 'getViscoelasticFromSpheresInteraction' in name and False:
print name
print strippedDoc
print '======================'
for l in strippedDoc: print l
print '======================'
sig=doc1.split('(',1)[1]
if removeSelf:
# remove up to the first comma; if no comma present, then the method takes no arguments
# if [ precedes the comma, add it to the result (ugly!)
try:
ss=sig.split(',',1)
if ss[0].endswith('['): sig='['+ss[1]
else: sig=ss[1]
except IndexError:
# grab the return value
try:
sig=') -> '+sig.split('->')[-1]
#if 'Serializable' in name: print 1000*'#',name
except IndexError:
sig=')'
return '('+sig,strippedDoc
def fixSignature(app, what, name, obj, options, signature, return_annotation):
#print what,name,obj,signature#,dir(obj)
if what=='attribute':
doc=unicode(obj.__doc__)
ret=''
m=re.match('.*:ydefault:`(.*?)`.*',doc)
if m:
typ=''
#try:
# clss='.'.join(name.split('.')[:-1])
# instance=eval(clss+'()')
# typ='; '+getattr(instance,name.split('.')[-1]).__class__.__name__
# if typ=='; NoneType': typ=''
#except TypeError: ##no registered converted
# typ=''
dfl=m.group(1)
m2=re.match(r'\s*\(\s*\(\s*void\s*\)\s*\"(.*)\"\s*,\s*(.*)\s*\)\s*',dfl)
if m2: dfl="%s, %s"%(m2.group(2),m2.group(1))
if dfl!='': ret+=' (='+dfl+'%s)'%typ
else: ret+=' (=uninitalized%s)'%typ
#m=re.match('.*\[(.{,8})\].*',doc)
#m=re.match('.*:yunit:`(.?*)`.*',doc)
#if m:
# units=m.group(1)
# print '@@@@@@@@@@@@@@@@@@@@@',name,units
# ret+=' ['+units+']'
return ret,None
elif what=='class':
ret=[]
if len(obj.__bases__)>0:
base=obj.__bases__[0]
while base.__module__!='Boost.Python':
ret+=[base.__name__]
if len(base.__bases__)>0: base=base.__bases__[0]
else: break
if len(ret):
return ' (inherits '+u' → '.join(ret)+')',None
else: return None,None
elif isBoostFunc(what,obj):
sig=boostFuncSignature(name,obj)[0] or ' (wrapped c++ function)'
return sig,None
elif isBoostMethod(what,obj):
sig=boostFuncSignature(name,obj,removeSelf=True)[0]
return sig,None
#else: print what,name,obj.__repr__()
#return None,None
from sphinx import addnodes
def parse_ystaticattr(env,attr,attrnode):
m=re.match(r'([a-zA-Z0-9_]+)\.(.*)\(=(.*)\)',attr)
if not m:
print 100*'@'+' Static attribute %s not matched'%attr
attrnode+=addnodes.desc_name(attr,attr)
klass,name,default=m.groups()
#attrnode+=addnodes.desc_type('static','static')
attrnode+=addnodes.desc_name(name,name)
plist=addnodes.desc_parameterlist()
if default=='': default='unspecified'
plist+=addnodes.desc_parameter('='+default,'='+default)
attrnode+=plist
attrnode+=addnodes.desc_annotation(' [static]',' [static]')
return klass+'.'+name
#############################
## set tab size
###################
## http://groups.google.com/group/sphinx-dev/browse_thread/thread/35b8071ffe9a8feb
def setup(app):
from sphinx.highlighting import lexers
from pygments.lexers.compiled import CppLexer
lexers['cpp'] = CppLexer(tabsize=3)
lexers['c++'] = CppLexer(tabsize=3)
from pygments.lexers.agile import PythonLexer
lexers['python'] = PythonLexer(tabsize=3)
app.connect('source-read',fixSrc)
app.connect('autodoc-skip-member',customExclude)
app.connect('autodoc-process-signature',fixSignature)
app.connect('autodoc-process-docstring',fixDocstring)
app.add_description_unit('ystaticattr',None,objname='static attribute',indextemplate='pair: %s; static method',parse_node=parse_ystaticattr)
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
#
# HACK: change ipython console regexp from ipython_console_highlighting.py
import re
sys.path.append(os.path.abspath('.'))
import yade.config
if 1:
if yade.runtime.ipython_version<12:
import ipython_directive as id
else:
if 12<=yade.runtime.ipython_version<13:
import ipython_directive012 as id
else:
import ipython_directive013 as id
#The next four lines are for compatibility with IPython 0.13.1
ipython_rgxin =re.compile(r'(?:In |Yade )\[(\d+)\]:\s?(.*)\s*')
ipython_rgxout=re.compile(r'(?:Out| -> )\[(\d+)\]:\s?(.*)\s*')
ipython_promptin ='Yade [%d]:'
ipython_promptout=' -> [%d]: '
ipython_cont_spaces=' '
#For IPython <=0.12, the following lines are used
id.rgxin =re.compile(r'(?:In |Yade )\[(\d+)\]:\s?(.*)\s*')
id.rgxout=re.compile(r'(?:Out| -> )\[(\d+)\]:\s?(.*)\s*')
id.rgxcont=re.compile(r'(?: +)\.\.+:\s?(.*)\s*')
id.fmtin ='Yade [%d]:'
id.fmtout =' -> [%d]: ' # for some reason, out and cont must have the trailing space
id.fmtcont=' .\D.: '
id.rc_override=dict(prompt_in1="Yade [\#]:",prompt_in2=" .\D.:",prompt_out=r" -> [\#]: ")
if yade.runtime.ipython_version<12:
id.reconfig_shell()
import ipython_console_highlighting as ich
ich.IPythonConsoleLexer.input_prompt = re.compile("(Yade \[[0-9]+\]: )")
ich.IPythonConsoleLexer.output_prompt = re.compile("(( -> |Out)|\[[0-9]+\]: )")
ich.IPythonConsoleLexer.continue_prompt = re.compile("\s+\.\.\.+:")
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.coverage',
'sphinx.ext.pngmath',
'sphinx.ext.graphviz',
'sphinx.ext.viewcode',
'sphinx.ext.inheritance_diagram',
'matplotlib.sphinxext.plot_directive',
'matplotlib.sphinxext.only_directives',
#'matplotlib.sphinxext.mathmpl',
'ipython_console_highlighting',
'youtube',
'sphinx.ext.todo',
]
if yade.runtime.ipython_version<12:
extensions.append('ipython_directive')
else:
if 12<=yade.runtime.ipython_version<13:
extensions.append('ipython_directive012')
else:
extensions.append('ipython_directive013')
# the sidebar extension
if False:
if writer=='html':
extensions+=['sphinx.ext.sidebar']
sidebar_all=True
sidebar_relling=True
#sidebar_abbrev=True
sidebar_tocdepth=3
## http://trac.sagemath.org/sage_trac/attachment/ticket/7549/trac_7549-doc_inheritance_underscore.patch
# GraphViz includes dot, neato, twopi, circo, fdp.
graphviz_dot = 'dot'
inheritance_graph_attrs = { 'rankdir' : 'BT' }
inheritance_node_attrs = { 'height' : 0.5, 'fontsize' : 12, 'shape' : 'oval' }
inheritance_edge_attrs = {}
my_latex_preamble=r'''
\usepackage{euler} % must be loaded before fontspec for the whole doc (below); this must be kept for pngmath, however
\usepackage{hyperref}
\usepackage{amsmath}
\usepackage{amsbsy}
%\usepackage{mathabx}
\usepackage{underscore}
\usepackage[all]{xy}
% Metadata of the pdf output
\hypersetup{pdftitle={Yade Documentation}}
\hypersetup{pdfauthor={V. Smilauer, E. Catalano, B. Chareyre, S. Dorofeenko, J. Duriez, A. Gladky, J. Kozicki, C. Modenese, L. Scholtes, L. Sibille, J. Stransky, K. Thoeni}}
% symbols
\let\mat\boldsymbol % matrix
\let\vec\boldsymbol % vector
\let\tens\boldsymbol % tensor
\def\normalized#1{\widehat{#1}}
\def\locframe#1{\widetilde{#1}}
% timestep
\def\Dt{\Delta t}
\def\Dtcr{\Dt_{\rm cr}}
% algorithm complexity
\def\bigO#1{\ensuremath{\mathcal{O}(#1)}}
% variants for greek symbols
\let\epsilon\varepsilon
\let\theta\vartheta
\let\phi\varphi
% shorthands
\let\sig\sigma
\let\eps\epsilon
% variables at different points of time
\def\prev#1{#1^-}
\def\pprev#1{#1^\ominus}
\def\curr#1{#1^{\circ}}
\def\nnext#1{#1^\oplus}
\def\next#1{#1^+}
% shorthands for geometry
\def\currn{\curr{\vec{n}}}
\def\currC{\curr{\vec{C}}}
\def\uT{\vec{u}_T}
\def\curruT{\curr{\vec{u}}_T}
\def\prevuT{\prev{\vec{u}}_T}
\def\currn{\curr{\vec{n}}}
\def\prevn{\prev{\vec{n}}}
% motion
\def\pprevvel{\pprev{\dot{\vec{u}}}}
\def\nnextvel{\nnext{\dot{\vec{u}}}}
\def\curraccel{\curr{\ddot{\vec{u}}}}
\def\prevpos{\prev{\vec{u}}}
\def\currpos{\curr{\vec{u}}}
\def\nextpos{\next{\vec{u}}}
\def\curraaccel{\curr{\dot{\vec{\omega}}}}
\def\pprevangvel{\pprev{\vec{\omega}}}
\def\nnextangvel{\nnext{\vec{\omega}}}
\def\loccurr#1{\curr{\locframe{#1}}}
\def\numCPU{n_{\rm cpu}}
\DeclareMathOperator{\Align}{Align}
\DeclareMathOperator{\sign}{sgn}
% sorting algorithms
\def\isleq#1{\currelem{#1}\ar@/^/[ll]^{\leq}}
\def\isnleq#1{\currelem{#1}\ar@/^/[ll]^{\not\leq}}
\def\currelem#1{\fbox{$#1$}}
\def\sortSep{||}
\def\sortInv{\hbox{\phantom{||}}}
\def\sortlines#1{\xymatrix@=3pt{#1}}
\def\crossBound{||\mkern-18mu<}
'''
pngmath_latex_preamble=r'\usepackage[active]{preview}'+my_latex_preamble
pngmath_use_preview=True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index-toctree'
# General information about the project.
project = u'Yade'
copyright = u'2009, Václav Šmilauer'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = yade.config.version
# The full version, including alpha/beta/rc tags.
release = yade.config.revision
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
modindex_common_prefix = ['yade.']
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {'stickysidebar':'true','collapsiblesidebar':'true','rightsidebar':'false'}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'fig/yade-logo.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'fig/yade-favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['static-html']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
html_index='index.html'
# Additional templates that should be rendered to pages, maps page names to
# template names.
html_additional_pages = { 'index':'index.html'}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'Yadedoc'
# -- Options for LaTeX output --------------------------------------------------
my_maketitle=r'''
\begin{titlepage}
\begin{flushright}
\hrule{}
% Upper part of the page
\begin{flushright}
\includegraphics[width=0.15\textwidth]{yade-logo.png}\par
\end{flushright}
\vspace{20 mm}
\text{\sffamily\bfseries\Huge Yade Documentation}\\
\vspace{5 mm}
\vspace{70 mm}
\begin{sffamily}\bfseries\Large
V\'{a}clav \v{S}milauer, Emanuele Catalano, Bruno Chareyre, Sergei Dorofeenko, Jerome Duriez, Anton Gladky, Janek Kozicki, Chiara Modenese, Luc Scholt\`{e}s, Luc Sibille, Jan Str\'{a}nsk\'{y}, Klaus Thoeni
\end{sffamily}
\vspace{20 mm}
\hrule{}
\vfill
% Bottom of the page
\textit{\Large Release '''\
+yade.config.revision\
+r''', \today}
\end{flushright}
\end{titlepage}
\text{\sffamily\bfseries\LARGE Authors}\\
\\
\text{\sffamily\bfseries\Large V\'{a}clav \v{S}milauer}\\
\text{\sffamily\Large University of Innsbruck}\\
\\
\text{\sffamily\bfseries\Large Emanuele Catalano}\\
\text{\sffamily\Large Grenoble INP, UJF, CNRS, lab. 3SR}\\
\\
\text{\sffamily\bfseries\Large Bruno Chareyre}\\
\text{\sffamily\Large Grenoble INP, UJF, CNRS, lab. 3SR}\\
\\
\text{\sffamily\bfseries\Large Sergei Dorofeenko}\\
\text{\sffamily\Large IPCP RAS, Chernogolovka}\\
\\
\text{\sffamily\bfseries\Large Jerome Duriez}\\
\text{\sffamily\Large Grenoble INP, UJF, CNRS, lab. 3SR}\\
\\
\text{\sffamily\bfseries\Large Anton Gladky}\\
\text{\sffamily\Large TU Bergakademie Freiberg}\\
\\
\text{\sffamily\bfseries\Large Janek Kozicki}\\
\text{\sffamily\Large Gdansk University of Technology - lab. 3SR Grenoble University }\\
\\
\text{\sffamily\bfseries\Large Chiara Modenese}\\
\text{\sffamily\Large University of Oxford}\\
\\
\text{\sffamily\bfseries\Large Luc Scholt\`{e}s}\\
\text{\sffamily\Large Grenoble INP, UJF, CNRS, lab. 3SR}\\
\\
\text{\sffamily\bfseries\Large Luc Sibille}\\
\text{\sffamily\Large University of Nantes, lab. GeM}\\
\\
\text{\sffamily\bfseries\Large Jan Str\'{a}nsk\'{y}}\\
\text{\sffamily\Large CVUT Prague}\\
\\
\text{\sffamily\bfseries\Large Klaus Thoeni}
\text{\sffamily\Large The University of Newcastle (Australia)}\\
\text{\sffamily\bfseries\large Citing this document}\\
In order to let users cite Yade consistently in publications, we provide a list of bibliographic references for the different parts of the documentation. This way of acknowledging Yade is also a way to make developments and documentation of Yade more attractive for researchers, who are evaluated on the basis of citations of their work by others. We therefore kindly ask users to cite Yade as accurately as possible in their papers, as explained in http://yade-dem/doc/citing.html.
'''
latex_elements=dict(
papersize='a4paper',
fontpkg=r'''
\usepackage{euler}
\usepackage{fontspec,xunicode,xltxtra}
%\setmainfont[BoldFont={LMRoman10 Bold}]{CMU Concrete} %% CMU Concrete must be installed by hand as otf
''',
utf8extra='',
fncychap='',
preamble=my_latex_preamble,
footer='',
inputenc='',
fontenc='',
maketitle=my_maketitle,
)
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index-toctree', 'Yade.tex', u'Yade Documentation',
u'Václav Šmilauer', 'manual'),
('index-toctree_manuals', 'YadeManuals.tex', u'Yade Tutorial and Manuals',
u'Václav Šmilauer', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = 'fig/yade-logo.png'
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
| gpl-2.0 |
gamahead/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/projections/polar.py | 69 | 20981 | import math
import numpy as npy
import matplotlib
rcParams = matplotlib.rcParams
from matplotlib.artist import kwdocd
from matplotlib.axes import Axes
from matplotlib import cbook
from matplotlib.patches import Circle
from matplotlib.path import Path
from matplotlib.ticker import Formatter, Locator
from matplotlib.transforms import Affine2D, Affine2DBase, Bbox, \
BboxTransformTo, IdentityTransform, Transform, TransformWrapper
class PolarAxes(Axes):
"""
A polar graph projection, where the input dimensions are *theta*, *r*.
Theta starts pointing east and goes anti-clockwise.
"""
name = 'polar'
class PolarTransform(Transform):
"""
The base polar transform. This handles projection *theta* and
*r* into Cartesian coordinate space *x* and *y*, but does not
perform the ultimate affine transformation into the correct
position.
"""
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
"""
Create a new polar transform. Resolution is the number of steps
to interpolate between each input line segment to approximate its
path in curved polar space.
"""
Transform.__init__(self)
self._resolution = resolution
def transform(self, tr):
xy = npy.zeros(tr.shape, npy.float_)
t = tr[:, 0:1]
r = tr[:, 1:2]
x = xy[:, 0:1]
y = xy[:, 1:2]
x[:] = r * npy.cos(t)
y[:] = r * npy.sin(t)
return xy
transform.__doc__ = Transform.transform.__doc__
transform_non_affine = transform
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def transform_path(self, path):
vertices = path.vertices
t = vertices[:, 0:1]
t[t != (npy.pi * 2.0)] %= (npy.pi * 2.0)
if len(vertices) == 2 and vertices[0, 0] == vertices[1, 0]:
return Path(self.transform(vertices), path.codes)
ipath = path.interpolated(self._resolution)
return Path(self.transform(ipath.vertices), ipath.codes)
transform_path.__doc__ = Transform.transform_path.__doc__
transform_path_non_affine = transform_path
transform_path_non_affine.__doc__ = Transform.transform_path_non_affine.__doc__
def inverted(self):
return PolarAxes.InvertedPolarTransform(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
class PolarAffine(Affine2DBase):
"""
The affine part of the polar projection. Scales the output so
that maximum radius rests on the edge of the axes circle.
"""
def __init__(self, scale_transform, limits):
u"""
*limits* is the view limit of the data. The only part of
its bounds that is used is ymax (for the radius maximum).
The theta range is always fixed to (0, 2\u03c0).
"""
Affine2DBase.__init__(self)
self._scale_transform = scale_transform
self._limits = limits
self.set_children(scale_transform, limits)
self._mtx = None
def get_matrix(self):
if self._invalid:
limits_scaled = self._limits.transformed(self._scale_transform)
ymax = limits_scaled.ymax
affine = Affine2D() \
.scale(0.5 / ymax) \
.translate(0.5, 0.5)
self._mtx = affine.get_matrix()
self._inverted = None
self._invalid = 0
return self._mtx
get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__
class InvertedPolarTransform(Transform):
"""
The inverse of the polar transform, mapping Cartesian
coordinate space *x* and *y* back to *theta* and *r*.
"""
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
Transform.__init__(self)
self._resolution = resolution
def transform(self, xy):
x = xy[:, 0:1]
y = xy[:, 1:]
r = npy.sqrt(x*x + y*y)
theta = npy.arccos(x / r)
theta = npy.where(y < 0, 2 * npy.pi - theta, theta)
return npy.concatenate((theta, r), 1)
transform.__doc__ = Transform.transform.__doc__
def inverted(self):
return PolarAxes.PolarTransform(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
class ThetaFormatter(Formatter):
u"""
Used to format the *theta* tick labels. Converts the
native unit of radians into degrees and adds a degree symbol
(\u00b0).
"""
def __call__(self, x, pos=None):
# \u00b0 : degree symbol
if rcParams['text.usetex'] and not rcParams['text.latex.unicode']:
return r"$%0.0f^\circ$" % ((x / npy.pi) * 180.0)
else:
# we use unicode, rather than mathtext with \circ, so
# that it will work correctly with any arbitrary font
# (assuming it has a degree sign), whereas $5\circ$
# will only work correctly with one of the supported
# math fonts (Computer Modern and STIX)
return u"%0.0f\u00b0" % ((x / npy.pi) * 180.0)
class RadialLocator(Locator):
"""
Used to locate radius ticks.
Ensures that all ticks are strictly positive. For all other
tasks, it delegates to the base
:class:`~matplotlib.ticker.Locator` (which may be different
depending on the scale of the *r*-axis.
"""
def __init__(self, base):
self.base = base
def __call__(self):
ticks = self.base()
return [x for x in ticks if x > 0]
def autoscale(self):
return self.base.autoscale()
def pan(self, numsteps):
return self.base.pan(numsteps)
def zoom(self, direction):
return self.base.zoom(direction)
def refresh(self):
return self.base.refresh()
RESOLUTION = 75
def __init__(self, *args, **kwargs):
"""
Create a new Polar Axes for a polar plot.
"""
self._rpad = 0.05
self.resolution = kwargs.pop('resolution', self.RESOLUTION)
Axes.__init__(self, *args, **kwargs)
self.set_aspect('equal', adjustable='box', anchor='C')
self.cla()
__init__.__doc__ = Axes.__init__.__doc__
def cla(self):
Axes.cla(self)
self.title.set_y(1.05)
self.xaxis.set_major_formatter(self.ThetaFormatter())
angles = npy.arange(0.0, 360.0, 45.0)
self.set_thetagrids(angles)
self.yaxis.set_major_locator(self.RadialLocator(self.yaxis.get_major_locator()))
self.grid(rcParams['polaraxes.grid'])
self.xaxis.set_ticks_position('none')
self.yaxis.set_ticks_position('none')
def _set_lim_and_transforms(self):
self.transAxes = BboxTransformTo(self.bbox)
# Transforms the x and y axis separately by a scale factor
# It is assumed that this part will have non-linear components
self.transScale = TransformWrapper(IdentityTransform())
# A (possibly non-linear) projection on the (already scaled) data
self.transProjection = self.PolarTransform(self.resolution)
# An affine transformation on the data, generally to limit the
# range of the axes
self.transProjectionAffine = self.PolarAffine(self.transScale, self.viewLim)
# The complete data transformation stack -- from data all the
# way to display coordinates
self.transData = self.transScale + self.transProjection + \
(self.transProjectionAffine + self.transAxes)
# This is the transform for theta-axis ticks. It is
# equivalent to transData, except it always puts r == 1.0 at
# the edge of the axis circle.
self._xaxis_transform = (
self.transProjection +
self.PolarAffine(IdentityTransform(), Bbox.unit()) +
self.transAxes)
# The theta labels are moved from radius == 0.0 to radius == 1.1
self._theta_label1_position = Affine2D().translate(0.0, 1.1)
self._xaxis_text1_transform = (
self._theta_label1_position +
self._xaxis_transform)
self._theta_label2_position = Affine2D().translate(0.0, 1.0 / 1.1)
self._xaxis_text2_transform = (
self._theta_label2_position +
self._xaxis_transform)
# This is the transform for r-axis ticks. It scales the theta
# axis so the gridlines from 0.0 to 1.0, now go from 0.0 to
# 2pi.
self._yaxis_transform = (
Affine2D().scale(npy.pi * 2.0, 1.0) +
self.transData)
# The r-axis labels are put at an angle and padded in the r-direction
self._r_label1_position = Affine2D().translate(22.5, self._rpad)
self._yaxis_text1_transform = (
self._r_label1_position +
Affine2D().scale(1.0 / 360.0, 1.0) +
self._yaxis_transform
)
self._r_label2_position = Affine2D().translate(22.5, self._rpad)
self._yaxis_text2_transform = (
self._r_label2_position +
Affine2D().scale(1.0 / 360.0, 1.0) +
self._yaxis_transform
)
def get_xaxis_transform(self):
return self._xaxis_transform
def get_xaxis_text1_transform(self, pad):
return self._xaxis_text1_transform, 'center', 'center'
def get_xaxis_text2_transform(self, pad):
return self._xaxis_text2_transform, 'center', 'center'
def get_yaxis_transform(self):
return self._yaxis_transform
def get_yaxis_text1_transform(self, pad):
return self._yaxis_text1_transform, 'center', 'center'
def get_yaxis_text2_transform(self, pad):
return self._yaxis_text2_transform, 'center', 'center'
def _gen_axes_patch(self):
return Circle((0.5, 0.5), 0.5)
def set_rmax(self, rmax):
self.viewLim.y1 = rmax
angle = self._r_label1_position.to_values()[4]
self._r_label1_position.clear().translate(
angle, rmax * self._rpad)
self._r_label2_position.clear().translate(
angle, -rmax * self._rpad)
def get_rmax(self):
return self.viewLim.ymax
def set_yscale(self, *args, **kwargs):
Axes.set_yscale(self, *args, **kwargs)
self.yaxis.set_major_locator(
self.RadialLocator(self.yaxis.get_major_locator()))
set_rscale = Axes.set_yscale
set_rticks = Axes.set_yticks
def set_thetagrids(self, angles, labels=None, frac=None,
**kwargs):
"""
Set the angles at which to place the theta grids (these
gridlines are equal along the theta dimension). *angles* is in
degrees.
*labels*, if not None, is a ``len(angles)`` list of strings of
the labels to use at each angle.
If *labels* is None, the labels will be ``fmt %% angle``
*frac* is the fraction of the polar axes radius at which to
place the label (1 is the edge). Eg. 1.05 is outside the axes
and 0.95 is inside the axes.
Return value is a list of tuples (*line*, *label*), where
*line* is :class:`~matplotlib.lines.Line2D` instances and the
*label* is :class:`~matplotlib.text.Text` instances.
kwargs are optional text properties for the labels:
%(Text)s
ACCEPTS: sequence of floats
"""
angles = npy.asarray(angles, npy.float_)
self.set_xticks(angles * (npy.pi / 180.0))
if labels is not None:
self.set_xticklabels(labels)
if frac is not None:
self._theta_label1_position.clear().translate(0.0, frac)
self._theta_label2_position.clear().translate(0.0, 1.0 / frac)
for t in self.xaxis.get_ticklabels():
t.update(kwargs)
return self.xaxis.get_ticklines(), self.xaxis.get_ticklabels()
set_thetagrids.__doc__ = cbook.dedent(set_thetagrids.__doc__) % kwdocd
def set_rgrids(self, radii, labels=None, angle=None, rpad=None, **kwargs):
"""
Set the radial locations and labels of the *r* grids.
The labels will appear at radial distances *radii* at the
given *angle* in degrees.
*labels*, if not None, is a ``len(radii)`` list of strings of the
labels to use at each radius.
If *labels* is None, the built-in formatter will be used.
*rpad* is a fraction of the max of *radii* which will pad each of
the radial labels in the radial direction.
Return value is a list of tuples (*line*, *label*), where
*line* is :class:`~matplotlib.lines.Line2D` instances and the
*label* is :class:`~matplotlib.text.Text` instances.
kwargs are optional text properties for the labels:
%(Text)s
ACCEPTS: sequence of floats
"""
radii = npy.asarray(radii)
rmin = radii.min()
if rmin <= 0:
raise ValueError('radial grids must be strictly positive')
self.set_yticks(radii)
if labels is not None:
self.set_yticklabels(labels)
if angle is None:
angle = self._r_label1_position.to_values()[4]
if rpad is not None:
self._rpad = rpad
rmax = self.get_rmax()
self._r_label1_position.clear().translate(angle, self._rpad * rmax)
self._r_label2_position.clear().translate(angle, -self._rpad * rmax)
for t in self.yaxis.get_ticklabels():
t.update(kwargs)
return self.yaxis.get_ticklines(), self.yaxis.get_ticklabels()
set_rgrids.__doc__ = cbook.dedent(set_rgrids.__doc__) % kwdocd
def set_xscale(self, scale, *args, **kwargs):
if scale != 'linear':
raise NotImplementedError("You can not set the xscale on a polar plot.")
def set_xlim(self, *args, **kargs):
# The xlim is fixed, no matter what you do
self.viewLim.intervalx = (0.0, npy.pi * 2.0)
def format_coord(self, theta, r):
"""
Return a format string formatting the coordinate using Unicode
characters.
"""
theta /= math.pi
# \u03b8: lower-case theta
# \u03c0: lower-case pi
# \u00b0: degree symbol
return u'\u03b8=%0.3f\u03c0 (%0.3f\u00b0), r=%0.3f' % (theta, theta * 180.0, r)
def get_data_ratio(self):
'''
Return the aspect ratio of the data itself. For a polar plot,
this should always be 1.0
'''
return 1.0
### Interactive panning
def can_zoom(self):
"""
Return True if this axes support the zoom box
"""
return False
def start_pan(self, x, y, button):
angle = self._r_label1_position.to_values()[4] / 180.0 * npy.pi
mode = ''
if button == 1:
epsilon = npy.pi / 45.0
t, r = self.transData.inverted().transform_point((x, y))
if t >= angle - epsilon and t <= angle + epsilon:
mode = 'drag_r_labels'
elif button == 3:
mode = 'zoom'
self._pan_start = cbook.Bunch(
rmax = self.get_rmax(),
trans = self.transData.frozen(),
trans_inverse = self.transData.inverted().frozen(),
r_label_angle = self._r_label1_position.to_values()[4],
x = x,
y = y,
mode = mode
)
def end_pan(self):
del self._pan_start
def drag_pan(self, button, key, x, y):
p = self._pan_start
if p.mode == 'drag_r_labels':
startt, startr = p.trans_inverse.transform_point((p.x, p.y))
t, r = p.trans_inverse.transform_point((x, y))
# Deal with theta
dt0 = t - startt
dt1 = startt - t
if abs(dt1) < abs(dt0):
dt = abs(dt1) * sign(dt0) * -1.0
else:
dt = dt0 * -1.0
dt = (dt / npy.pi) * 180.0
rpad = self._r_label1_position.to_values()[5]
self._r_label1_position.clear().translate(
p.r_label_angle - dt, rpad)
self._r_label2_position.clear().translate(
p.r_label_angle - dt, -rpad)
elif p.mode == 'zoom':
startt, startr = p.trans_inverse.transform_point((p.x, p.y))
t, r = p.trans_inverse.transform_point((x, y))
dr = r - startr
# Deal with r
scale = r / startr
self.set_rmax(p.rmax / scale)
# These are a couple of aborted attempts to project a polar plot using
# cubic bezier curves.
# def transform_path(self, path):
# twopi = 2.0 * npy.pi
# halfpi = 0.5 * npy.pi
# vertices = path.vertices
# t0 = vertices[0:-1, 0]
# t1 = vertices[1: , 0]
# td = npy.where(t1 > t0, t1 - t0, twopi - (t0 - t1))
# maxtd = td.max()
# interpolate = npy.ceil(maxtd / halfpi)
# if interpolate > 1.0:
# vertices = self.interpolate(vertices, interpolate)
# vertices = self.transform(vertices)
# result = npy.zeros((len(vertices) * 3 - 2, 2), npy.float_)
# codes = mpath.Path.CURVE4 * npy.ones((len(vertices) * 3 - 2, ), mpath.Path.code_type)
# result[0] = vertices[0]
# codes[0] = mpath.Path.MOVETO
# kappa = 4.0 * ((npy.sqrt(2.0) - 1.0) / 3.0)
# kappa = 0.5
# p0 = vertices[0:-1]
# p1 = vertices[1: ]
# x0 = p0[:, 0:1]
# y0 = p0[:, 1: ]
# b0 = ((y0 - x0) - y0) / ((x0 + y0) - x0)
# a0 = y0 - b0*x0
# x1 = p1[:, 0:1]
# y1 = p1[:, 1: ]
# b1 = ((y1 - x1) - y1) / ((x1 + y1) - x1)
# a1 = y1 - b1*x1
# x = -(a0-a1) / (b0-b1)
# y = a0 + b0*x
# xk = (x - x0) * kappa + x0
# yk = (y - y0) * kappa + y0
# result[1::3, 0:1] = xk
# result[1::3, 1: ] = yk
# xk = (x - x1) * kappa + x1
# yk = (y - y1) * kappa + y1
# result[2::3, 0:1] = xk
# result[2::3, 1: ] = yk
# result[3::3] = p1
# print vertices[-2:]
# print result[-2:]
# return mpath.Path(result, codes)
# twopi = 2.0 * npy.pi
# halfpi = 0.5 * npy.pi
# vertices = path.vertices
# t0 = vertices[0:-1, 0]
# t1 = vertices[1: , 0]
# td = npy.where(t1 > t0, t1 - t0, twopi - (t0 - t1))
# maxtd = td.max()
# interpolate = npy.ceil(maxtd / halfpi)
# print "interpolate", interpolate
# if interpolate > 1.0:
# vertices = self.interpolate(vertices, interpolate)
# result = npy.zeros((len(vertices) * 3 - 2, 2), npy.float_)
# codes = mpath.Path.CURVE4 * npy.ones((len(vertices) * 3 - 2, ), mpath.Path.code_type)
# result[0] = vertices[0]
# codes[0] = mpath.Path.MOVETO
# kappa = 4.0 * ((npy.sqrt(2.0) - 1.0) / 3.0)
# tkappa = npy.arctan(kappa)
# hyp_kappa = npy.sqrt(kappa*kappa + 1.0)
# t0 = vertices[0:-1, 0]
# t1 = vertices[1: , 0]
# r0 = vertices[0:-1, 1]
# r1 = vertices[1: , 1]
# td = npy.where(t1 > t0, t1 - t0, twopi - (t0 - t1))
# td_scaled = td / (npy.pi * 0.5)
# rd = r1 - r0
# r0kappa = r0 * kappa * td_scaled
# r1kappa = r1 * kappa * td_scaled
# ravg_kappa = ((r1 + r0) / 2.0) * kappa * td_scaled
# result[1::3, 0] = t0 + (tkappa * td_scaled)
# result[1::3, 1] = r0*hyp_kappa
# # result[1::3, 1] = r0 / npy.cos(tkappa * td_scaled) # npy.sqrt(r0*r0 + ravg_kappa*ravg_kappa)
# result[2::3, 0] = t1 - (tkappa * td_scaled)
# result[2::3, 1] = r1*hyp_kappa
# # result[2::3, 1] = r1 / npy.cos(tkappa * td_scaled) # npy.sqrt(r1*r1 + ravg_kappa*ravg_kappa)
# result[3::3, 0] = t1
# result[3::3, 1] = r1
# print vertices[:6], result[:6], t0[:6], t1[:6], td[:6], td_scaled[:6], tkappa
# result = self.transform(result)
# return mpath.Path(result, codes)
# transform_path_non_affine = transform_path
| gpl-3.0 |
Phobia0ptik/ThinkStats2 | code/populations.py | 68 | 2609 | """This file contains code used in "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2010 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import csv
import logging
import sys
import numpy as np
import pandas
import thinkplot
import thinkstats2
def ReadData(filename='PEP_2012_PEPANNRES_with_ann.csv'):
"""Reads filename and returns populations in thousands
filename: string
returns: pandas Series of populations in thousands
"""
df = pandas.read_csv(filename, header=None, skiprows=2,
encoding='iso-8859-1')
populations = df[7]
populations.replace(0, np.nan, inplace=True)
return populations.dropna()
def MakeFigures():
"""Plots the CDF of populations in several forms.
On a log-log scale the tail of the CCDF looks like a straight line,
which suggests a Pareto distribution, but that turns out to be misleading.
On a log-x scale the distribution has the characteristic sigmoid of
a lognormal distribution.
The normal probability plot of log(sizes) confirms that the data fit the
lognormal model very well.
Many phenomena that have been described with Pareto models can be described
as well, or better, with lognormal models.
"""
pops = ReadData()
print('Number of cities/towns', len(pops))
log_pops = np.log10(pops)
cdf = thinkstats2.Cdf(pops, label='data')
cdf_log = thinkstats2.Cdf(log_pops, label='data')
# pareto plot
xs, ys = thinkstats2.RenderParetoCdf(xmin=5000, alpha=1.4, low=0, high=1e7)
thinkplot.Plot(np.log10(xs), 1-ys, label='model', color='0.8')
thinkplot.Cdf(cdf_log, complement=True)
thinkplot.Config(xlabel='log10 population',
ylabel='CCDF',
yscale='log')
thinkplot.Save(root='populations_pareto')
# lognormal plot
thinkplot.PrePlot(cols=2)
mu, sigma = log_pops.mean(), log_pops.std()
xs, ps = thinkstats2.RenderNormalCdf(mu, sigma, low=0, high=8)
thinkplot.Plot(xs, ps, label='model', color='0.8')
thinkplot.Cdf(cdf_log)
thinkplot.Config(xlabel='log10 population',
ylabel='CDF')
thinkplot.SubPlot(2)
thinkstats2.NormalProbabilityPlot(log_pops, label='data')
thinkplot.Config(xlabel='z',
ylabel='log10 population',
xlim=[-5, 5])
thinkplot.Save(root='populations_normal')
def main():
thinkstats2.RandomSeed(17)
MakeFigures()
if __name__ == "__main__":
main()
| gpl-3.0 |
quheng/scikit-learn | sklearn/preprocessing/tests/test_function_transformer.py | 176 | 2169 | from nose.tools import assert_equal
import numpy as np
from sklearn.preprocessing import FunctionTransformer
def _make_func(args_store, kwargs_store, func=lambda X, *a, **k: X):
def _func(X, *args, **kwargs):
args_store.append(X)
args_store.extend(args)
kwargs_store.update(kwargs)
return func(X)
return _func
def test_delegate_to_func():
# (args|kwargs)_store will hold the positional and keyword arguments
# passed to the function inside the FunctionTransformer.
args_store = []
kwargs_store = {}
X = np.arange(10).reshape((5, 2))
np.testing.assert_array_equal(
FunctionTransformer(_make_func(args_store, kwargs_store)).transform(X),
X,
'transform should have returned X unchanged',
)
# The function should only have recieved X.
assert_equal(
args_store,
[X],
'Incorrect positional arguments passed to func: {args}'.format(
args=args_store,
),
)
assert_equal(
kwargs_store,
{},
'Unexpected keyword arguments passed to func: {args}'.format(
args=kwargs_store,
),
)
# reset the argument stores.
args_store[:] = [] # python2 compatible inplace list clear.
kwargs_store.clear()
y = object()
np.testing.assert_array_equal(
FunctionTransformer(
_make_func(args_store, kwargs_store),
pass_y=True,
).transform(X, y),
X,
'transform should have returned X unchanged',
)
# The function should have recieved X and y.
assert_equal(
args_store,
[X, y],
'Incorrect positional arguments passed to func: {args}'.format(
args=args_store,
),
)
assert_equal(
kwargs_store,
{},
'Unexpected keyword arguments passed to func: {args}'.format(
args=kwargs_store,
),
)
def test_np_log():
X = np.arange(10).reshape((5, 2))
# Test that the numpy.log example still works.
np.testing.assert_array_equal(
FunctionTransformer(np.log1p).transform(X),
np.log1p(X),
)
| bsd-3-clause |
bolmez/Class-HARK | cstwMPC/MakeCSTWfigsForSlides.py | 3 | 4139 | '''
This module / script makes some fairly simple figures used in a version of the slides.
All Booleans at the top of SetupParamsCSTW should be set to False, as this module
imports cstwMPC; there's no need to actually do anything but load the model.
'''
from cstwMPC import *
import matplotlib.pyplot as plt
plot_range = (0.0,30.0)
points = 200
m = np.linspace(plot_range[0],plot_range[1],points)
InfiniteType(a_size=16)
InfiniteType.update()
thorn = 1.0025*0.99325/(1.01)
mTargFunc = lambda x : (1 - thorn)*x + thorn
mystr = lambda number : "{:.3f}".format(number)
mystrx = lambda number : "{:.0f}".format(number)
def epaKernel(X):
K = 0.75*(1.0 - X**2.0)
K[np.abs(X) > 1] = 0
return K
def doCforBetaEquals(beta,m):
InfiniteType(beta=beta);
InfiniteType.solve();
InfiniteType.unpack_cFunc()
c = InfiniteType.cFunc[0](m)
InfiniteType.beta = Params.beta_guess
InfiniteType.simulateCSTWc()
m_hist = InfiniteType.m_history
m_temp = np.reshape(m_hist[100:Params.sim_periods,:],((Params.sim_periods-100)*Params.sim_pop_size,1))
n = m_temp.size
h = m[2] - m[0]
m_dist = np.zeros(m.shape) + np.nan
for j in range(m.size):
x = (m_temp - m[j])/h
m_dist[j] = np.sum(epaKernel(x))/(n*h)
print('did beta= ' + str(beta))
return c, m_dist
c_array = np.zeros((17,points)) + np.nan
pdf_array = np.zeros((17,points)) + np.nan
for b in range(17):
beta = 0.978 + b*0.001
c_array[b,], pdf_array[b,] = doCforBetaEquals(beta,m)
for b in range(17):
beta = 0.978 + b*0.001
highest = np.max(pdf_array[b,])
scale = 1.5/highest
scale = 4.0
plt.ylim(0,2.5)
plt.plot(m,scale*pdf_array[b,],'-c')
plt.fill_between(m,np.zeros(m.shape),scale*pdf_array[b,],facecolor='c',alpha=0.5)
plt.plot(m,mTargFunc(m),'-r')
plt.plot(m,c_array[b,],'-k',linewidth=1.5)
plt.text(10,2.2,r'$\beta=$' + str(beta),fontsize=20)
plt.xlabel(r'Cash on hand $m_t$',fontsize=14)
plt.ylabel(r'Consumption $c_t$',fontsize=14)
plt.savefig('./Figures/mDistBeta0' + mystrx(1000*beta) + '.pdf')
plt.show()
plt.plot(m,c_array[12,],'-k',linewidth=1.5)
plt.ylim(0,1.25)
plt.xlim(0,15)
plt.xlabel(r'Cash on hand $m_t$',fontsize=14)
plt.ylabel(r'Consumption $c_t$',fontsize=14)
plt.savefig('./Figures/ConFunc.pdf')
plt.plot(m,mTargFunc(m),'-r')
plt.plot(np.array([9.95,9.95]),np.array([0,1.5]),'--k')
plt.savefig('./Figures/mTargBase.pdf')
plt.fill_between(m,np.zeros(m.shape),scale*2*pdf_array[12,],facecolor='c',alpha=0.5)
plt.savefig('./Figures/mDistBase.pdf')
plt.show()
InfiniteType(beta=0.99);
InfiniteType.solve();
InfiniteType.unpack_cFunc()
m_new = np.linspace(0,15,points)
kappa_vec = InfiniteType.cFunc[0].derivative(m_new)
plt.plot(m_new,kappa_vec,'-k',linewidth=1.5)
plt.xlim(0,15)
plt.ylim(0,1.02)
plt.xlabel(r'Cash on hand $m_t$',fontsize=14)
plt.ylabel(r'Marginal consumption $\kappa_t$',fontsize=14)
plt.savefig('./Figures/kappaFuncBase.pdf')
plt.plot(np.array([9.95,9.95]),np.array([0,1.5]),'--k')
plt.fill_between(m,np.zeros(m.shape),scale*2*pdf_array[12,],facecolor='c',alpha=0.5)
plt.savefig('./Figures/mDistVsKappa.pdf')
plt.show()
plt.plot(m,mTargFunc(m),'-r')
plt.ylim(0,2.5)
plt.xlim(0,30)
for b in range(17):
plt.plot(m,c_array[b,],'-k',linewidth=1.5)
#idx = np.sum(c_array[b,] - mTargFunc(m) < 0)
#mTarg = m[idx]
#plt.plot(np.array([mTarg,mTarg]),np.array([0,2.5]),'--k')
plt.plot(m,mTargFunc(m),'-r')
plt.xlabel(r'Cash on hand $m_t$',fontsize=14)
plt.ylabel(r'Consumption $c_t$',fontsize=14)
plt.savefig('./Figures/ManycFuncs.pdf')
plt.show()
InfiniteType(beta=0.98);
InfiniteType.solve();
InfiniteType.unpack_cFunc()
m_new = np.linspace(0,15,points)
kappa_vec = InfiniteType.cFunc[0].derivative(m_new)
plt.plot(m_new,kappa_vec,'-k',linewidth=1.5)
plt.xlim(0,15)
plt.ylim(0,1.02)
plt.xlabel(r'Cash on hand $m_t$',fontsize=14)
plt.ylabel(r'Marginal consumption $\kappa_t$',fontsize=14)
plt.savefig('./Figures/kappaFuncLowBeta.pdf')
plt.fill_between(m,np.zeros(m.shape),scale*0.33*pdf_array[2,],facecolor='c',alpha=0.5)
plt.savefig('./Figures/mDistVsKappaLowBeta.pdf')
plt.show()
| apache-2.0 |
ternaus/kaggle_otto | src/cross_fold_submission.py | 1 | 1156 | #!/usr/bin/env python
from __future__ import division
__author__ = 'Vladimir Iglovikov'
from sklearn import cross_validation
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import log_loss
import pandas as pd
train = pd.read_csv('../data/train.csv')
test = pd.read_csv('../data/test.csv')
target = train["target"].values
training = train.drop(["id", 'target'], 1).values
testing = test.drop("id", 1)
skf = cross_validation.StratifiedKFold(target, n_folds=10, random_state=42)
params = {'n_estimators': 1000,
'n_jobs': -1}
ind = 1
for train_index, test_index in skf:
X_train, X_test = training[train_index], target[train_index]
clf = RandomForestClassifier(**params)
fit = clf.fit(X_train, X_test)
prediction_1 = fit.predict_proba(training[test_index])
print log_loss(target[test_index], prediction_1)
prediction_2 = fit.predict_proba(testing.values)
submission = pd.DataFrame(prediction_2)
submission.columns = ["Class_" + str(i) for i in range(1, 10)]
submission["id"] = test["id"]
submission.to_csv("rf_1000_cv10_ind{ind}.csv".format(ind=ind), index=False)
ind += 1
| mit |
reichelu/copasul | src/copasul_preproc.py | 1 | 61579 |
# author: Uwe Reichel, Budapest, 2016
import os
import mylib as myl
import pandas as pd
import numpy as np
import scipy as si
import sigFunc as sif
import sys
import re
import copy as cp
### main ######################################
# adds:
# .myFileIdx
# .file_stem
# .ii - input idx (from lists in fsys, starting with 1)
# .preproc
# .glob -> [[on off]...]
# .loc -> [[on off center]...]
# .f0 -> [[time f0]...]
# .bv -> f0BaseValue
# main preproc bracket
# IN:
# copa
# (logFileHandle)
# OUT: (ii=fileIdx, i=channelIdx, j=segmentIdx,
# k=myTierNameIndex, l=myBoundaryIndex)
# +['config']
# +['data'][ii][i]['fsys'][indicesInConfig[Fsys]Lists]
# ['f0'|'aud'|'glob'|'loc'|'bnd']['dir'|'stm'|'ext'|'typ']
# ['tier*']
# ['f0']['t'|'y'|'bv']
# ['glob'][j]['t'|'to'|'ri'|'lab'|'tier']
# ['loc'][j]['t'|'to'|'ri'|'lab_ag'|'lab_acc'|'tier_ag'|'tier_acc']
# ['bnd'][k][l]['t'|'to'|'lab'|'tier']
# ['gnl_f0'][k][l]['t'|'to'|'lab'|'tier']
# ['gnl_en'][k][l]['t'|'to'|'lab'|'tier']
# ['rhy_f0'][k][l]['t'|'to'|'lab'|'tier']
# ['rhy_en'][k][l]['t'|'to'|'lab'|'tier']
def pp_main(copa,f_log_in=''):
global f_log
f_log = f_log_in
myLog("DOING: preprocessing ...")
# detach config
opt = cp.deepcopy(copa['config'])
# ff['f0'|'aud'|'annot'|'pulse'] list of full/path/files
ff = pp_file_collector(opt)
# over files
for ii in range(len(ff['f0'])):
#print(ff['annot'][ii]) #!c
copa['data'][ii]={}
# f0 and annotation file content
f0_dat = myl.input_wrapper(ff['f0'][ii],opt['fsys']['f0']['typ'])
annot_dat = myl.input_wrapper(ff['annot'][ii],opt['fsys']['annot']['typ'])
# over channels
for i in range(opt['fsys']['nc']):
myLog("\tfile {}, channel {}".format(myl.stm(ff['f0'][ii]), i+1))
copa['data'][ii][i]={}
copa = pp_channel(copa,opt,ii,i,f0_dat,annot_dat,ff,f_log)
# f0 semitone conversion by grouping factor
copa = pp_f0_st_wrapper(copa)
return copa
# f0 semitone conversion by grouping factor
# IN:
# copa
# OUT:
# copa with converted f0 values in ['data'][myFi][myCi]['f0']['y']
# REMARKS:
# groupingValues not differentiated across groupingVariables of different
# channels
# e.g. base_prct_grp.1 = 'spk1' (e.g. ='abc')
# base_prct_grp.2 = 'spk2' (e.g. ='abc')
# -> 1 base value is calculated for 'abc' and not 2 for spk1_abc and
# spk2_abc, respectively
def pp_f0_st_wrapper(copa):
opt = copa['config']
## do nothing
# ('base_prct_grp' is deleted in copasul_init.py if not
# needed, incl. the case that base_prct==0)
if 'base_prct_grp' not in opt['preproc']:
return copa
## 1. collect f0 for each grouping level
# over file/channel idx
# fg[myGrpLevel] = concatenatedF0
fg = {}
# lci[myChannelIdx] = myGroupingVar
lci = copa['config']['preproc']['base_prct_grp']
for ii in myl.numkeys(copa['data']):
for i in myl.numkeys(copa['data'][ii]):
fg = pp_f0_grp_fg(fg,copa,ii,i,lci)
## 2. base value for each grouping level
#bv[myGrpLevel] = myBaseValue
bv = pp_f0_grp_bv(fg,opt)
## 3. group-wise semitone conversion
for ii in myl.numkeys(copa['data']):
for i in myl.numkeys(copa['data'][ii]):
copa = pp_f0_grp_st(copa,ii,i,bv,lci,opt)
return copa
# towards grp-wise semitone conversion: fg update
# IN:
# fg: fg[myGrpLevel] = concatenatedF0
# copa
# ii: fileIdx
# i: channelIdx
# lci: copa['config']['base_prct_grp']
# OUT:
# fg: updated
def pp_f0_grp_fg(fg,copa,ii,i,lci):
z = copa['data'][ii][i]
# channel-related grouping factor level
x = z['grp'][lci[i]]
if x not in fg:
fg[x] = myl.ea()
fg[x] = np.append(fg[x],z['f0']['y'])
return fg
# calculate base value for each grouping level
# IN:
# fg: fg[myGrpLevel] = concatenatedF0
# opt
# OUT:
# bv[myGrpLevel] = myBaseValue
def pp_f0_grp_bv(fg,opt):
# base prct
b = opt['preproc']['base_prct']
bv = {}
for x in fg:
yi = myl.find(fg[x],'>',0)
cbv, b = pp_bv(fg[x][yi],opt)
bv[x] = cbv
return bv
# grp-wise semitone conversion
# IN:
# copa
# ii: fileIdx
# i: channelIdx
# bv: bv[myGrpLevel]=myBaseValue
# lci: copa['config']['base_prct_grp']
# opt
# OUT:
# copa with st-transformed f0
def pp_f0_grp_st(copa,ii,i,bv,lci,opt):
# grp value
gv = copa['data'][ii][i]['grp'][lci[i]]
y = copa['data'][ii][i]['f0']['y']
yr, z = pp_semton(y,opt,bv[gv])
copa['data'][ii][i]['f0']['y'] = yr
copa['data'][ii][i]['f0']['bv'] = bv[gv]
return copa
# standalone to modify only grouping fields in copa dict
# (via pp_main all subsequent processing would be overwritten)
# hacky call from copasul.py; opt['navigate']['do_export']
# for cases where json config did contain faulty fsys.grp settings
def pp_grp_wrapper(copa):
for ii in myl.numkeys(copa['data']):
for i in myl.numkeys(copa['data'][ii]):
copa = pp_grp(copa,ii,i)
return copa
# file x channel-wise filling of copa['data']
# IN:
# copa
# opt: copa['config']
# ii - fileIdx
# i - channelIdx
# f0_dat - f0 file content
# annot_dat - annotation file content
# ff - file system dict by pp_file_collector()
# f_log_in - log file handle, for calls from augmentation
# environment (without pp_main())
# OUT:
# copa
# +['data'][ii][i]['fsys'][indicesInConfig[Fsys]Lists]
# ['f0'|'aud'|'glob'|'loc'|'bnd']['dir'|'stm'|'ext'|'typ']
# ['tier']
# ['f0']['t'|'y'|'bv']
# ['glob'][j]['t'|'to'|'ri'|'lab'|'tier']
# ['loc'][j]['t'|'to'|'ri'|'lab_ag'|'lab_acc'|'tier_ag'|'tier_acc']
# ['bnd'][k][l]['t'|'to'|'lab'|'tier']
# ['gnl_f0'][k][l]['t'|'to'|'lab'|'tier']
# ['gnl_en'][k][l]['t'|'to'|'lab'|'tier']
# ['rhy_f0'][k][l]['t'|'to'|'lab'|'tier']
# ['fyn_en'][k][l]['t'|'to'|'lab'|'tier']
# for one input file's channel i
def pp_channel(copa,opt,ii,i,f0_dat,annot_dat,ff,f_log_in=''):
global f_log
f_log = f_log_in
## fsys subdict
# ['i'] - file idx
# ['f0|aud|annot|glob|loc|bnd|...']['stm|...']
copa['data'][ii][i]['fsys'] = pp_fsys(opt['fsys'],ff,ii,i)
## grouping
copa = pp_grp(copa,ii,i)
## F0 (1) ########################
# if embedded in augmentation f0 preproc already done
if 'skip_f0' in opt:
t = copa['data'][ii][i]['f0']['t']
y = copa['data'][ii][i]['f0']['y']
f0 = np.concatenate((t[:,None],y[:,None]),axis=1)
f0_ut = f0
else:
f0, f0_ut = pp_read_f0(f0_dat,opt['fsys']['f0'],i)
## chunk ########################
# list of tiernames for channel i (only one element for chunk)
tn = pp_tiernames(opt['fsys'],'chunk','tier',i)
if len(tn)>0:
stm = copa['data'][ii][i]['fsys']['chunk']['stm']
chunk, chunk_ut, lab_chunk = pp_read(annot_dat,opt['fsys']['chunk'],tn[0],stm,'chunk')
else:
chunk = myl.ea()
# no chunks -> file
if len(chunk)==0:
chunk = np.asarray([[f0[0,0],f0[-1,0]]])
chunk_ut = np.asarray([[f0_ut[0,0],f0_ut[-1,0]]])
lab_chunk = opt['fsys']['label']['chunk']
## glob #########################
tn = pp_tiernames(opt['fsys'],'glob','tier',i)
if len(tn)>0:
stm = copa['data'][ii][i]['fsys']['glob']['stm']
glb, glb_ut, lab_glb = pp_read(annot_dat,opt['fsys']['chunk'],tn[0],stm,'glob')
else:
glb = myl.ea()
# point -> segment tier
if len(glb)>0 and np.size(glb,1)==1:
glb, glb_ut, lab_glb = pp_point2segment(glb,glb_ut,lab_glb,chunk,chunk_ut,opt['fsys']['chunk'])
# no glob segs -> chunks
if len(glb)==0:
glb=cp.deepcopy(chunk)
glb_ut=cp.deepcopy(chunk_ut)
lab_glb=cp.deepcopy(lab_chunk)
## loc ##########################
tn_loc = set()
tn_acc = pp_tiernames(opt['fsys'],'loc','tier_acc',i)
tn_ag = pp_tiernames(opt['fsys'],'loc','tier_ag',i)
stm = copa['data'][ii][i]['fsys']['loc']['stm']
if len(tn_ag)>0:
loc_ag, loc_ag_ut, lab_loc_ag = pp_read(annot_dat,opt['fsys']['chunk'],tn_ag[0],stm,'loc')
tn_loc.add(tn_ag[0])
else:
loc_ag, loc_ag_ut, lab_loc_ag = pp_read_empty()
if len(tn_acc)>0:
loc_acc, loc_acc_ut, lab_loc_acc = pp_read(annot_dat,opt['fsys']['chunk'],tn_acc[0],stm,'loc')
tn_loc.add(tn_acc[0])
else:
loc_acc, loc_acc_ut, lab_loc_acc = pp_read_empty()
loc, loc_ut = myl.ea(2)
# [[on off center]...]
if (len(loc_ag)>0 and len(loc_acc)>0):
# assigning corresponding ag and acc items
loc,loc_ut,lab_ag,lab_acc = pp_loc_merge(loc_ag_ut,lab_loc_ag,
loc_acc_ut,lab_loc_acc,
opt['preproc'])
# [[on off]...]
elif len(loc_ag)>0:
loc = loc_ag
loc_ut = loc_ag_ut
lab_ag = lab_loc_ag
lab_acc = []
# [[center]...]
elif len(loc_acc)>0:
loc = loc_acc
loc_ut = loc_acc_ut
lab_ag = []
lab_acc = lab_loc_acc
# no loc segs
if len(loc)==0:
lab_ag = []
lab_acc = []
## F0 (2) ################################
## preproc + filling copa.f0 #############
if 'skip_f0' not in opt:
f0, t, y, bv = pp_f0_preproc(f0,glb[-1][1],opt)
copa['data'][ii][i]['f0'] = {'t':t, 'y':y, 'bv':bv}
else:
# horiz extrapolation only to sync f0 and glob
# for embedding in augment
f0 = pp_zp(f0,glb[-1][1],opt,True)
copa['data'][ii][i]['f0'] = {'t':f0[:,0], 'y':f0[:,1]}
## error?
if np.max(y)==0:
myLog("ERROR! {} contains only zeros that will cause trouble later on.\nPlease remove f0, audio and annotation file from data and re-start the analysis.".format(ff['f0'][ii]),True)
## sync onsets of glob and loc to f0 #####
if len(glb)>0:
glb[0,0] = np.max([glb[0,0],f0[0,0]])
glb_ut[0,0] = np.max([glb_ut[0,0],f0[0,0]])
if len(loc)>0:
loc[0,0] = np.max([loc[0,0],f0[0,0]])
loc_ut[0,0] = np.max([loc_ut[0,0],f0[0,0]])
if len(chunk)>0:
chunk[0,0] = np.max([chunk[0,0],f0[0,0]])
chunk_ut[0,0] = np.max([chunk_ut[0,0],f0[0,0]])
# for warnings
fstm = copa['data'][ii][i]['fsys']['annot']['stm']
## copa.chunk ############################
copa['data'][ii][i]['chunk'] = {}
jj, bad_j, good_j = 0, np.asarray([]).astype(int), np.asarray([]).astype(int)
for j in range(len(chunk)):
if too_short('chunk',chunk[j,],fstm):
bad_j = np.append(bad_j,j)
continue
good_j = np.append(good_j,j)
copa['data'][ii][i]['chunk'][jj] = {}
copa['data'][ii][i]['chunk'][jj]['t'] = chunk[j,]
copa['data'][ii][i]['chunk'][jj]['to'] = chunk_ut[j,]
if len(lab_chunk)>0:
copa['data'][ii][i]['chunk'][jj]['lab'] = lab_chunk[j]
else:
copa['data'][ii][i]['chunk'][jj]['lab'] = ''
jj+=1
if len(bad_j)>0:
chunk = chunk[good_j,]
chunk_ut = chunk_ut[good_j,]
## copa.glob ############################
copa['data'][ii][i]['glob'] = {}
jj, bad_j, good_j = 0, np.asarray([]).astype(int), np.asarray([]).astype(int)
for j in range(len(glb)):
if too_short('glob',glb[j,],fstm):
bad_j = np.append(bad_j,j)
continue
good_j = np.append(good_j,j)
copa['data'][ii][i]['glob'][jj] = {}
copa['data'][ii][i]['glob'][jj]['t'] = glb[j,]
copa['data'][ii][i]['glob'][jj]['to'] = glb_ut[j,]
copa['data'][ii][i]['glob'][jj]['ri'] = np.array([]).astype(int)
if len(lab_glb)>0:
copa['data'][ii][i]['glob'][jj]['lab'] = lab_glb[j]
else:
copa['data'][ii][i]['glob'][jj]['lab'] = ''
jj+=1
if len(bad_j)>0:
glb = glb[good_j,]
glb_ut = glb_ut[good_j,]
# within-chunk position of glb
rci = pp_apply_along_axis(pp_link,1,glb,chunk)
for j in myl.idx(glb):
is_init, is_fin = pp_initFin(rci,j)
copa['data'][ii][i]['glob'][j]['is_init_chunk'] = is_init
copa['data'][ii][i]['glob'][j]['is_fin_chunk'] = is_fin
copa['data'][ii][i]['glob'][j]['tier']=tn[0]
## copa.loc #############################
copa['data'][ii][i]['loc'] = {}
jj, bad_j, good_j = 0, np.asarray([]).astype(int), np.asarray([]).astype(int)
# [center...] to sync gnl feats if required by opt['preproc']['loc_sync']
loc_t = myl.ea()
# row-wise application: uniformly 3 time stamps: [[on off center]...]
# - midpoint in case no accent is given
# - symmetric window around accent in case no AG is given
loc = pp_apply_along_axis(pp_loc,1,loc,opt)
# link each idx in loc.t to idx in glob.t
# index in ri: index of locseg; value in ri: index of globseg
ri = pp_apply_along_axis(pp_link,1,loc,glb)
# ... same for loc.t to chunk.t
rci = pp_apply_along_axis(pp_link,1,loc,chunk)
# over segments [[on off center] ...]
for j in myl.idx(loc):
# no parenting global segment -> skip
if ri[j] < 0:
bad_j = np.append(bad_j,j)
continue
# strict layer loc limit (not crossing glb boundaries)
locSl = pp_slayp(loc[j,:],glb[ri[j],:])
# [on off] of normalization window (for gnl features)
# not crossing globseg, not smaller than locSl[1:2]
loc_tn = pp_loc_w_nrm(loc[j,:],glb[ri[j],:],opt)
if too_short('loc',locSl,fstm):
bad_j = np.append(bad_j,j)
continue
good_j = np.append(good_j,j)
copa['data'][ii][i]['loc'][jj] = {}
copa['data'][ii][i]['loc'][jj]['ri'] = ri[j]
#### position of local segment in global one and in chunk
# 'is_fin', 'is_init', both 'yes' or 'no'
is_init, is_fin = pp_initFin(ri,j)
#### same for within chunk position
is_init_chunk, is_fin_chunk = pp_initFin(rci,j)
copa['data'][ii][i]['loc'][jj]['is_init'] = is_init
copa['data'][ii][i]['loc'][jj]['is_fin'] = is_fin
copa['data'][ii][i]['loc'][jj]['is_init_chunk'] = is_init_chunk
copa['data'][ii][i]['loc'][jj]['is_fin_chunk'] = is_fin_chunk
if len(tn_ag)>0:
copa['data'][ii][i]['loc'][jj]['tier_ag'] = tn_ag[0]
else:
copa['data'][ii][i]['loc'][jj]['tier_ag'] = ''
if len(tn_acc)>0:
copa['data'][ii][i]['loc'][jj]['tier_acc'] = tn_acc[0]
else:
copa['data'][ii][i]['loc'][jj]['tier_acc'] = ''
#### labels
if len(lab_ag)>0:
copa['data'][ii][i]['loc'][jj]['lab_ag'] = lab_ag[j]
else:
copa['data'][ii][i]['loc'][jj]['lab_ag'] = ''
if len(lab_acc)>0:
copa['data'][ii][i]['loc'][jj]['lab_acc'] = lab_acc[j]
else:
copa['data'][ii][i]['loc'][jj]['lab_acc'] = ''
copa['data'][ii][i]['loc'][jj]['t'] = locSl
copa['data'][ii][i]['loc'][jj]['to'] = loc_ut[j,:]
copa['data'][ii][i]['loc'][jj]['tn'] = loc_tn
loc_t = np.append(loc_t,locSl[2])
if (ri[j]>-1):
copa['data'][ii][i]['glob'][ri[j]]['ri'] = np.concatenate((copa['data'][ii][i]['glob'][ri[j]]['ri'],[jj]),axis=0)
jj+=1
if len(bad_j)>0:
loc = loc[good_j,]
loc_ut = loc_ut[good_j,]
### bnd, gnl_*, rhy_* input #############################
# additional tier index layer, since features can be derived
# from several tiers
# copa['data'][ii][i][bnd|gnl_*|rhy_*][tierIdx][segmentIdx]
# (as opposed to chunk, glob, loc)
# keys: tierNameIdx in opt, values: t, ot, lab, tier
# in accent augment embedding feature sets will be time-synced
# to loc segments (see copasul_augment.aug_prep_copy())
if 'loc_sync' not in opt['preproc']:
doSync = False
else:
doSync = opt['preproc']['loc_sync']
# over feature set (bnd etc)
for ft in myl.lists('bgd'):
if ft not in opt['fsys']:
continue
r = {} # becomes copa['data'][ii][i][ft]
# tier idx
k=0
lab_pau = opt['fsys'][ft]['lab_pau']
## over tiers for channel i
for tn in pp_tiernames(opt['fsys'],ft,'tier',i):
# pp_tiernames overgeneralizes in some contexts -> skip
# non-existent names
if not pp_tier_in_annot(tn,annot_dat,opt['fsys'][ft]['typ']):
continue
tx, to, lab = pp_read(annot_dat,opt['fsys'][ft],tn,'','bdg')
# time to intervals (analysis + norm windows)
# t_nrm: local normalization window limited by chunk boundaries
# t_trend: windows from chunk onset to boundary, and
# from boundary to chunk offset
t, t_nrm, t_trend = pp_t2i(tx,ft,opt,chunk)
r[k] = {}
jj, bad_j, good_j = 0, np.asarray([]).astype(int), np.asarray([]).astype(int)
# for sync, use each loc interval only once
blocked_i = {}
## over segment index
for a in myl.idx(lab):
# skip too short segments until sync with locseg is required
# that will be checked right below
if (too_short(tn,t[a,:],fstm) and
((not doSync) or (tn not in tn_loc))):
bad_j = np.append(bad_j,a)
continue
if (doSync and (tn in tn_loc)):
mfi = myl.find_interval(loc_t,t[a,:])
if len(mfi) == 0:
bad_j = np.append(bad_j,a)
continue
# all mfi indices already consumed?
all_consumed=True
for ij in range(len(mfi)):
if mfi[ij] not in blocked_i:
ijz=ij
all_consumed=False
blocked_i[mfi[ij]]=True
if not all_consumed:
break
if all_consumed:
bad_j = np.append(bad_j,a)
continue
good_j = np.append(good_j,a)
r[k][jj] = {'tier':tn,'t':t[a,:],'to':to[a,:],
'tn':t_nrm[a,:],'tt':t_trend[a,:],
'lab':lab[a]}
jj+=1
if len(bad_j)>0:
t = t[good_j,]
tx = tx[good_j,]
### position in glob and chunk segments
# links to parent segments (see above loc, or glb)
ri = pp_apply_along_axis(pp_link,1,tx,glb)
rci = pp_apply_along_axis(pp_link,1,tx,chunk)
# over segment idx
for j in myl.idx(tx):
is_init, is_fin = pp_initFin(ri,j)
is_init_chunk, is_fin_chunk = pp_initFin(rci,j)
r[k][j]['is_init'] = is_init
r[k][j]['is_fin'] = is_fin
r[k][j]['is_init_chunk'] = is_init_chunk
r[k][j]['is_fin_chunk'] = is_fin_chunk
# rates of tier_rate entries for each segment
# (all rate tiers of same channel as tn)
if re.search('^rhy_',ft):
#...['tier_rate'] -> npArray of time values (1- or 2-dim)
tt = pp_tier_time_to_tab(annot_dat,opt['fsys'][ft]['tier_rate'],i,lab_pau)
# add rate[myTier] = myRate
# ri[myTier] = list of reference idx
# segment index
# (too short segments already removed from t)
for a in range(len(t)):
r[k][a]['rate']={}
r[k][a]['ri']={}
# over rate tiers
for b in tt:
rate, ri = pp_rate_in_interval(tt[b],r[k][a]['t'])
r[k][a]['rate'][b] = rate
r[k][a]['ri'][b] = ri
k+=1
copa['data'][ii][i][ft] = r
if ft == 'rhy_f0':
copa['data'][ii][i]['rate'] = pp_f_rate(annot_dat,opt,ft,i)
#sys.exit() #!
return copa
# checks for a segment/time stamp X whether in which position it
# is within the parent segment Y (+/- inital, +/- final)
# IN:
# ri: list of reverse indices (index in list: index of X in its tier,
# value: index of Y in parent tier)
# j: index of current X
# OUT:
# is_init: 'yes'|'no' X is in initial position in Y
# is_fin: 'yes'|'no' X is in final position in Y
def pp_initFin(ri,j):
# does not belong to any parent segment
if ri[j] < 0:
return 'no', 'no'
# initial?
if j==0 or ri[j-1] != ri[j]:
is_init='yes'
else:
is_init='no'
# final?
if j==len(ri)-1 or ri[j+1] != ri[j]:
is_fin='yes'
else:
is_fin='no'
return is_init, is_fin
# transforms glob point into segment tier
# - points are considered to be right segment boundaries
# - segments do not cross chunk boundaries
# - pause labeled points are skipped
# IN:
# pnt: [[timePoint]...] of global segment right boundaries
# pnt_ut [[timePoint]...] same with orig time
# pnt_lab: [label ...] f.a. timePoint
# chunk [[on off] ...] of chunks not to be crossed
# chunk_ut [[on off] ...] same with orig time
# opt with key 'lab_pau':
# OUT:
# seg [[on off]...] from pnt
# seg_ut [[on off]...] from pnt_ut
# seg_lab [lab ...] from pnt_lab
def pp_point2segment(pnt,pnt_ut,pnt_lab,chunk,chunk_ut,opt): #!g
# output
seg, seg_ut, seg_lab = myl.ea(), myl.ea(), []
# phrase onset, current chunk idx
t_on, t_on_ut, c_on = chunk[0,0], chunk_ut[0,0], 0
for i in myl.idx_a(len(pnt)):
# pause -> only shift onset
if pp_is_pau(pnt_lab[i],opt['lab_pau']):
t_on, t_on_ut = pnt[i,0], pnt_ut[i,0]
c_on = myl.first_interval(t_on,chunk)
continue
# current offset
t_off, t_off_ut = pnt[i,0], pnt_ut[i,0]
# if needed right shift onset to chunk start
c_off = myl.first_interval(t_off,chunk)
if min(c_on,c_off)>-1 and c_off > c_on:
t_on, t_on_ut = chunk[c_off,0], chunk_ut[c_off,0]
# update output
seg = myl.push(seg,[t_on, t_off])
seg_ut = myl.push(seg_ut,[t_on_ut, t_off_ut])
seg_lab.append(pnt_lab[i])
# update time stamps
t_on = t_off
t_on_ut = t_off_ut
c_on = c_off
return seg, seg_ut, seg_lab
# normalization window for local segment
# - centered on locseg center
# - limited by parenting glb segment
# - not smaller than loc segment
# IN:
# loc - current local segment [on off center]
# glb - current global segment [on off]
# opt - copa['config']
# OUT:
# tn - normalization window [on off]
def pp_loc_w_nrm(loc,glb,opt):
# special window for loc?
if (('loc' in opt['preproc']) and ('nrm_win' in opt['preproc']['loc'])):
w = opt['preproc']['loc']['nrm_win']/2
else:
w = opt['preproc']['nrm_win']/2
c = loc[2]
tn = np.asarray([c-w,c+w])
tn[0] = max([min([tn[0],loc[0]]),glb[0]])
tn[1] = min([max([tn[1],loc[1]]),glb[1]])
return myl.cellwise(myl.trunc2,tn)
# signals and log-warns if segment is too short
# IN:
# type of segment 'chunk|glob|...'
# seg row ([on off] or [on off center])
# fileStem for log warning
# OUT:
# True|False if too short
# warning message in log file
def too_short(typ,seg,fstm):
if ((seg[1] <= seg[0]) or
(len(seg)>2 and (seg[2] < seg[0] or seg[2] > seg[1]))):
myLog("WARNING! {}: {} segment too short: {} {}. Segment skipped.".format(fstm,typ,seg[0],seg[1]))
return True
return False
def rm_too_short(typ,dat,fstm):
d = dat[:,1]-dat[:,0]
bad_i = myl.find(d,'<=',0)
if len(bad_i)>0:
good_i = myl.find(d,'>',0)
myLog("WARNING! {}: file contains too short {} segments, which were removed.".format(fstm,typ))
dat = dat[good_i,]
return dat
# F0 preprocessing:
# - zero padding
# - outlier identification
# - interpolation over voiceless segments and outliers
# - smoothing
# - semtione transform
# IN:
# f0: [[t f0]...]
# t_max: max time to which contour is needed
# opt: copa['config']
# OUT:
# f0: [[t zero-padded]...]
# t: time vector
# y: preprocessed f0 vector
# bv: f0 base value (Hz)
def pp_f0_preproc(f0,t_max,opt):
# zero padding
f0 = pp_zp(f0,t_max,opt)
# detach time and f0
t,y = f0[:,0], f0[:,1]
# do nothing with zero-only segments
# (-> will be reported as error later on)
if np.max(y)==0:
return y, t, y, 1
# setting outlier to 0
y = sif.pp_outl(y,opt['preproc']['out'])
# interpolation over 0
y = sif.pp_interp(y,opt['preproc']['interp'])
# smoothing
if 'smooth' in opt['preproc']:
y = sif.pp_smooth(y,opt['preproc']['smooth'])
# <0 -> 0
y[myl.find(y,'<',0)]=0
# semitone transform, base ref value (in Hz)
# later by calculating the base value over a grp factor (e.g. spk)
if 'base_prct_grp' in opt['preproc']:
bv = -1
else:
y, bv = pp_semton(y,opt)
return f0, t, y, bv
# merging AG segment and ACC event tiers to n x 3 array [[on off center]...]
# opt['preproc']['loc_align']='skip': only keeping AGs and ACC for which exactly 1 ACC is within AG
# 'left': if >1 acc in ag keeping first one
# 'right': if >1 acc in ag keeping last one
# IN:
# ag: nx2 [[on off]...] of AGs
# lab_ag: list of AG labels
# acc: mx1 [[timeStamp]...]
# lab_acc: list of ACC labels
# opt: opt['preproc']
# OUT:
# d: ox3 [[on off center]...] ox3, %.2f trunc times
# d_ut: same not trunc'd
# lag: list of AG labels
# lacc: list of ACC labels
def pp_loc_merge(ag,lab_ag,acc,lab_acc,opt):
d = myl.ea()
lag = []
lacc = []
for i in range(len(ag)):
j = myl.find_interval(acc,ag[i,:])
jj = -1
#!aa
#if len(j)>1:
# print('err > 1')
# myl.stopgo()
#elif len(j)<1:
# print('err < 1')
# myl.stopgo()
if len(j)==1:
jj = j[0]
elif len(j)>1 and opt['loc_align'] != 'skip':
if opt['loc_align']=='left':
jj = j[0]
elif opt['loc_align']=='right':
jj = j[-1]
if jj < 0:
continue
d = myl.push(d,[ag[i,0],ag[i,1],acc[jj]])
lag.append(lab_ag[i])
lacc.append(lab_acc[jj])
return myl.cellwise(myl.trunc2,d),d,lag,lacc
# grouping values from filename
# IN:
# copa
# ii fileIdx
# i channelIdx
# OUT:
# +['data'][ii][i]['grp'][myVar] = myVal
def pp_grp(copa,ii,i):
copa['data'][ii][i]['grp']={}
# grouping options
opt = copa['config']['fsys']['grp']
if len(opt['lab'])>0:
myStm = copa['data'][ii][i]['fsys'][opt['src']]['stm']
g = re.split(opt['sep'], myStm)
for j in myl.idx_a(len(g)):
if j >= len(opt['lab']):
myLog("ERROR! {} cannot be split into grouping values".format(myStm),True)
lab = opt['lab'][j]
if len(lab)==0: continue
copa['data'][ii][i]['grp'][lab]=g[j]
return copa
# robustness wrapper (for non-empty lists only)
def pp_apply_along_axis(fun,dim,var,opt):
if len(var)>0:
return np.apply_along_axis(fun,dim,var,opt)
return []
# file level rates
# IN:
# tg - annot file dict
# opt - from opt['fsys'][myFeatSet]
# ft - featureSetName
# i - channelIdx
# OUT:
# rate - dict for rate of myRateTier events/intervals in file
def pp_f_rate(tg,opt,ft,i):
fsys = opt['fsys'][ft]
lab_pau = fsys['lab_pau']
rate={}
# over tier_rate names
if 'tier_rate' not in fsys:
return rate
# tier names for resp channel
tn_opt = {'ignore_sylbnd':True}
for rt in pp_tiernames(opt['fsys'],ft,'tier_rate',i,tn_opt):
if rt in rate: continue
# hacky workaround since pp_tiernames() also outputs tier names not in TextGrid
if rt not in tg['item_name']:
continue
if rt not in tg['item_name']:
# if called by augmentation
if 'sloppy' in opt:
myLog("WARNING! Annotation file does not (yet) contain tier {} which is required by the tier_rate element for feature set {}. Might be added by augmentation. If not this missing tier will result in an error later on.".format(rt,ft))
continue
else:
myLog("ERROR! Annotation file does not (yet) contain tier {} which is required by the tier_rate element for feature set {}.".format(rt,ft),True)
t = tg['item'][tg['item_name'][rt]]
# file duration
l = tg['head']['xmax']-tg['head']['xmin']
if 'intervals' in t:
sk = 'intervals'
fld = 'text'
else:
sk = 'points'
fld = 'mark'
# empty tier
if sk not in t:
rate[rt] = 0
else:
n=0
for k in myl.numkeys(t[sk]):
if pp_is_pau(t[sk][k][fld],lab_pau): continue
n += 1
rate[rt] = n/l
return rate
# gives interval or event rate within on and offset in bnd
# IN:
# t ndarray 1-dim for events, 2-dim for intervals
# bnd [on off]
# OUT:
# r - rate
# ri - ndarray indices of contained segments
def pp_rate_in_interval(t,bnd):
l = bnd[1]-bnd[0]
# point tier
if myl.ndim(t)==1:
i = myl.find_interval(t,bnd)
n = len(i)
# interval tier
else:
i = myl.intersect(myl.find(t[:,0],'<',bnd[1]),
myl.find(t[:,1],'>',bnd[0]))
n = len(i)
# partial intervals within bnd
if n>0:
if t[0,0]<bnd[0]:
n -= (1-((t[i[0],1]-bnd[0])/(t[i[0],1]-t[i[0],0])))
if t[-1,1]>bnd[1]:
n -= (1-((bnd[1]-t[i[-1],0])/(t[i[-1],1]-t[i[-1],0])))
n = max([n,0])
return n/l, i
# returns time info of tiers as table
# IN:
# f - textGrid dict
# rts - list of tiernames to be processed
# ci - channelIdx
# lab_pau - pause label
# OUT:
# tt[myTier] = ndarray of time stamps, resp. on- offsets
# REMARKS:
# as in pp_read() pause intervals are skipped, thus output of both functions is in sync
def pp_tier_time_to_tab(tg,rts,ci,lab_pau):
tt={}
# over tier names for which event rate is to be determined
for rt in rts:
x = rt
if rt not in tg['item_name']:
##!!ci crt = "{}_{}".format(rt,ci)
crt = "{}_{}".format(rt,int(ci+1))
if crt not in tg['item_name']:
myLog("WARNING! Tier {} does not exist. Cannot determine event rates for this tier.".format(rt))
continue
else:
x = crt
tt[x] = myl.ea()
t = tg['item'][tg['item_name'][x]]
if 'intervals' in t:
for i in myl.numkeys(t['intervals']):
if pp_is_pau(t['intervals'][i]['text'],lab_pau): continue
tt[x]=myl.push(tt[x],np.asarray([t['intervals'][i]['xmin'],t['intervals'][i]['xmax']]))
elif 'points' in t:
for i in myl.numkeys(t['points']):
tt[x]=myl.push(tt[x],t['points'][i]['time'])
tt[x] = myl.cellwise(myl.trunc2,tt[x])
return tt
# returns list of tiernames from fsys of certain TYP in certain channel idx
# IN:
# fsys subdict (=copa['config']['fsys'] or
# copa['config']['fsys']['augment'])
# fld subfield: 'chunk', 'syl', 'glob', 'loc', 'rhy_*', etc
# typ tierType: 'tier', 'tier_ag', 'tier_acc', 'tier_rate', 'tier_out_stm' ...
# ci channelIdx
# tn_opt <{}> caller-customized options
# 'ignore_sylbnd' TRUE
# OUT:
# tn listOfTierNames for channel ci in fsys[fld][typ]
# Remarks:
# - tn elements are either given in fsys as full names or as stems
# (for the latter: e.g. *['tier_out_stm']*, ['chunk']['tier'])
# - In the latter case the full name 'myTierName_myChannelIdx' has been
# added to the fsys['channel'] keys in copasul_analysis and
# is added as full name to tn
# - tn DOES NOT check, whether its elements are contained in annotation file
def pp_tiernames(fsys,fld,typ,ci,tn_opt={}):
tn = []
if ((fld not in fsys) or (typ not in fsys[fld])):
return tn
# over tiernames
xx = cp.deepcopy(fsys[fld][typ])
if type(xx) is not list:
xx = [xx]
for x in xx:
# append channel idx for tiers generated in augmentation step
# add bnd infix for syllable augmentation
xc = "{}_{}".format(x,int(ci+1))
if 'ignore_sylbnd' not in tn_opt.keys():
xbc = "{}_bnd_{}".format(x,int(ci+1))
yy = [x,xc,xbc]
else:
yy = [x,xc]
for y in yy:
if ((y in fsys['channel']) and (fsys['channel'][y]==ci)):
tn.append(y)
if (fld == 'glob' and typ == 'tier' and ('syl' in fsys) and ('tier_out_stm' in fsys['syl'])):
add = "{}_bnd_{}".format(fsys['syl']['tier_out_stm'],int(ci+1))
if add not in tn:
tn.append(add)
elif (fld == 'loc' and typ == 'tier_acc' and ('syl' in fsys) and ('tier_out_stm' in fsys['syl'])):
add = "{}_{}".format(fsys['syl']['tier_out_stm'],int(ci+1))
if add not in tn:
tn.append(add)
return tn
### returns input file lists
# IN:
# option dict
# OUT:
# ff['f0'|'aud'|'annot'|'pulse'] list of full/path/files
# only defined for those keys,
# where files are available
# checks for list lengths
def pp_file_collector(opt):
ff={}
for x in myl.lists('afa'):
if x not in opt['fsys']:
continue
f = myl.file_collector(opt['fsys'][x]['dir'],
opt['fsys'][x]['ext'])
if len(f)>0 or x=='annot':
ff[x]=f
# length check
# file lists must have length 0 or equal length
# at least one list must have length > 0
# annotation files can be generated from scratch
# in this case stems of f0 (or aud) files are taken over
for xy in ['f0', 'aud', 'annot']:
if xy not in ff:
myLog("ERROR! No {} files found!".format(xy),True)
l = max(len(ff['f0']),len(ff['aud']),len(ff['annot']))
#print(len(ff['f0']),len(ff['aud']),len(ff['annot']))
if l==0:
myLog("ERROR! Neither signal nor annotation files found!",True)
for x in myl.lists('afa'):
if x not in ff:
continue
if ((len(ff[x])>0) and (len(ff[x]) != l)):
myLog("ERROR! Numbers of f0/annotation/audio/pulse files must be 0 or equal!",True)
if len(ff['annot'])==0:
if ((not opt['fsys']['annot']) or (not opt['fsys']['annot']['dir']) or
(not opt['fsys']['annot']['ext']) or (not opt['fsys']['annot']['typ'])):
myLog("ERROR! Directory, type, and extension must be specified for annotation files generated from scratch!",True)
if len(ff['f0'])>0:
gg = ff['f0']
else:
gg = ff['aud']
for i in range(len(gg)):
f = os.path.join(opt['fsys']['annot']['dir'],
"{}.{}".format(myl.stm(gg[i]),opt['fsys']['annot']['ext']))
ff['annot'].append(f)
return ff
### bnd data: time stamps to adjacent intervals
### gnl_*|rhy_* data: time stamps to intervals centered on time stamp
### ! chunk constraint: interval boundaries are limited by chunk boundaries if any
### normalizing time windows
### bb becomes copa...[myFeatSet]['t']
### bb_nrm becomes copa...[myFeatSet]['tn']
### bb_trend becomes copa...[myFeatSet]['tt']
# IN:
# b - 2-dim time stamp [[x],[x],...] or interval [[x,x],[x,x]...] array
# typ - 'bnd'|'gnl_*'|'rhy_*'
# opt - copa['config']
# t_chunks - mx2 array: [start end] of interpausal chunk segments (at least file [start end])
# untrunc: <False>|True
# if True, returns original (not truncated) time values,
# if False: trunc2
# OUT:
# bb - nx2 array: [[start end]...]
# GNL_*, RHY_*: analysis window
# segment input: same as input
# time stamp input: window centered on time stamp (length, see below)
# BND: adjacent segments
# segment input: same as input
# time stamp input: segment between two time stamps (starting from chunk onset)
# bb_nrm - nx2 array
# GNL_*, RHY_*: [[start end]...] normalization windows
# segment input: centered on segment midpoint
# minimum length: input segment
# time stamp input: centered on time stamp
# BND: uniform boundary styl window independent of length of adjacent segments
# segment input: [[start segmentOFFSET (segmentONSET) end]...]
# time stamp input: [[start timeStamp end]...]
# bb_trend - nx3 array for trend window pairs in current chunk
# probably relevant for BND only
# GNL_*, RHY_*:
# segment input: [[chunkOnset segmentMIDPOINT chunkOffset]...]
# time stamp input : [[chunkOnset timeStamp chunkOffset]...]
# BND:
# segment input:
# non-chunk-final: [[chunkOnset segmentOFFSET segmentONSET chunkOffset]...]
# chunk-final: [[chunk[I]Onset segmentOFFSET segmentONSET chunk[I+1]Offset]...]
# for ['cross_chunk']=False, chunk-final is same as non-chunk-final with
# segmentOFFSET=segmentONSET
# time stamp input : [[chunkOnset timeStamp chunkOffset]...]
# REMARKS:
# - all windows (analysis, norm, trend) are limited by chunk boundaries
# - analysis window length: opt['preproc']['point_win']
# - norm window length: opt['preproc']['nrm_win'] for GNL_*, RHY_*
# opt['bnd']['win'] for BND
# - BND: - segmentOFFSET and segmentONSET refer to subsequent segments
# -- for non-chunk-final segments: always in same chunk
# -- for chunk-final segments: if ['cross_chunk'] is False than
# segmentOFFSET is set to segmentONSET.
# If ['cross_chunk'] is True, then segmentONSET refers to
# the initial segment of the next chunk and Offset refers to the next chunk
# -- for non-final segments OR for ['cross_chunk']=True pauses between the
# segments are not considered for F0 stylization and pause length is an interpretable
# boundary feature. For final segments AND ['cross_chunk']=False always zero pause
# length is measured, so that it is not interpretable (since ONSET==OFFSET)
# -> for segments always recommended to set ['cross_chunk']=TRUE
# -> for time stamps depending on the meaning of the markers (if refering to labelled
# boundaries, then TRUE is recommended; if referring to other events restricted
# to a chunk only, then FALSE)
# - analogously chunk-final time stamps are processed dep on the ['cross_chunk'] value
# - for time stamps no difference between final- and non-final position
# a: analysis window
# n: normalization window
# t: trend window
# x: event
## GNL_*, RHY_*
# segment input: [... [...]_a ...]_n
# event input: [... [.x.]_a ...]_n
## BND
# segment input: [... [...]_a ...]_n
# [ ]
# event input: [...|...]_a x [...|...]_a
# [ ]_n
def pp_t2i(b,typ,opt,t_chunks,untrunc=False):
bb, bb_nrm, bb_trend = myl.ea(3)
## column number
# nc=1: time stamps, =2: intervals
nc = myl.ncol(b)
## window half lengths
# wl - analysis -> bb
# wl_nrm - normalization -> bb_nrm
# for bnd: longer norm window in ['styl']['bnd']['win'] is taken
if ((typ in opt['preproc']) and ('point_win' in opt['preproc'][typ])):
wl = opt['preproc'][typ]['point_win']/2
else:
wl = opt['preproc']['point_win']/2
if typ == 'bnd':
wl_nrm = opt['styl']['bnd']['win']/2
else:
if ((typ in opt['preproc']) and ('nrm_win' in opt['preproc'][typ])):
wl_nrm = opt['preproc'][typ]['nrm_win']/2
else:
wl_nrm = opt['preproc']['nrm_win']/2
#### bnd
if typ=='bnd':
if nc==1:
### time stamp input
# first onset
o=t_chunks[0,0]
for i in range(len(b)):
# time point: current time stamp
c = b[i,0]
## analysis window
# chunk idx of onset and current time stamp
ci1 = myl.first_interval(o,t_chunks)
ci2 = myl.first_interval(c,t_chunks)
# next segments chunk to get offset in case of wanted chunk-crossing
# for trend windows
if i+1 < len(b) and opt['styl']['bnd']['cross_chunk']:
ci3 = myl.first_interval(b[i+1,0],t_chunks)
else:
ci3 = ci2
# same chunk or chunk-crossing wanted -> adjacent
if (ci1==ci2 or ci2<0 or opt['styl']['bnd']['cross_chunk']):
bb = myl.push(bb,[o,c])
# different chunks -> onset is chunk onset of current time stamp
else:
bb = myl.push(bb,[t_chunks[ci2,0],c])
## nrm window
ww = pp_limit_win(c,wl_nrm,t_chunks[ci2,:])
bb_nrm = myl.push(bb_nrm,[ww[0],c,ww[1]])
## trend window
bb_trend = myl.push(bb_trend,[t_chunks[ci2,0],c,t_chunks[ci3,1]])
# update onset
o = c
# last segment: current time stamp to chunk offset
ci = myl.first_interval(o,t_chunks)
if ci<0: ci = len(t_chunks)-1
if o<t_chunks[ci,1]:
bb = myl.push(bb,[o,t_chunks[ci,1]])
else:
### segment input -> simple copy
## analysis windows
bb = b
for i in range(len(b)):
# time point: segment offset
c = b[i,1]
# its chunk idx
ci1 = myl.first_interval(c,t_chunks)
# its chunk limitations
r = pp_chunk_limit(c,t_chunks)
# next segment
if i+1<len(b):
# next segments onset
c2 = b[i+1,0]
# next segment's chunk
ci2 = myl.first_interval(c2,t_chunks)
# range-offset and next segment's onset for trend window
r2t = r[1]
c2t = c2
# crossing chunk boundaries
# -> adjust segmentOnset c2t and chunkOffset r2t for trend window
if ci2 > ci1:
if opt['styl']['bnd']['cross_chunk']:
r2t = t_chunks[ci2,1]
else:
c2t = c
# for norm window
c2 = c
else:
c2 = c
c2t = c
r2t = r[1]
## nrm window: limit to chunk boundaries
ww = pp_limit_win(c,wl_nrm,r)
if c2 != c:
vv = pp_limit_win(c2,wl_nrm,r)
bb_nrm = myl.push(bb_nrm,[ww[0],c,c2,vv[1]])
else:
bb_nrm = myl.push(bb_nrm,[ww[0],c,c2,ww[1]])
## trend window
bb_trend = myl.push(bb_trend,[r[0],c,c2t,r2t])
# gnl, rhy
else:
if nc>1:
### segment input (simple copy)
## analysis windows
bb = b
# if needed: event -> segment, nrm window
for i in range(len(b)):
# center (same for time stamps and segments)
c = np.mean(b[i,:])
# chunk bnd limits
r = pp_chunk_limit(c,t_chunks)
### event input
if nc==1:
## analysis window
bb = myl.push(bb,pp_limit_win(c,wl,r))
# nrm interval
oo = pp_limit_win(c,wl_nrm,r)
# set minimal length to analysis window
on = min([bb[i,0],oo[0]])
off = max([bb[i,1],oo[1]])
## nrm window
bb_nrm = myl.push(bb_nrm,[on,off])
## trend window
bb_trend = myl.push(bb_trend,[r[0],c,r[1]])
if untrunc==False:
bb = myl.cellwise(myl.trunc2,bb)
bb_nrm = myl.cellwise(myl.trunc2,bb_nrm)
bb_trend = myl.cellwise(myl.trunc2,bb_trend)
return bb, bb_nrm, bb_trend
# limits window of HALF length w centered on time stamp c to range r
# IN:
# c: timeStamp
# w: window half length
# r: limitating range
# OUT:
# s: [on off]
def pp_limit_win(c,w,r):
on = max([r[0],c-w])
off = min([r[1],c+w])
return np.asarray([on,off])
# returns [on off] of chunk in which time stamp is located
# IN:
# c: time stamp
# t_chunks [[on off]...] of chunks
def pp_chunk_limit(c,t_chunks):
ci = myl.first_interval(c,t_chunks)
if ci<0:
# fallback: file boundaries
r = [t_chunks[0,0],t_chunks[-1,1]]
else:
# current chunk boundaries
r = t_chunks[ci,:]
return r
### add file-system info to dict at file-level
# IN:
# config['fsys']
# ff['f0'|'aud'|'annot'] -> list of files
# ii fileIdx
# i channelIdx
# OUT:
# fsys spec
# [i] - fileIdx
# ['f0'|'aud'|'glob'|'loc'|...]
# [stm|dir|typ|tier*|lab*] stem|path|mime|tierNames|pauseEtcLabel
# REMARK:
# tierNames only for respective channel i
def pp_fsys(fsys,ff,ii,i):
fs = {'i':ii}
# 'f0'|'aud'|'augment'|'pulse'|'glob'|'loc'...
for x in myl.lists('facafa'):
# skip 'pulse' etc if not available
if x not in fsys:
continue
# 'f0'|'aud'|'annot'|'pulse' or featSet keys
if x in ff:
fs[x]={'stm':myl.stm(ff[x][ii])}
else:
fs[x]={'stm':myl.stm(ff['annot'][ii])}
for y in fsys[x]:
if y == 'dir':
if x in ff:
fs[x][y] = os.path.dirname(ff[x][ii])
else:
fs[x][y] = os.path.dirname(ff['annot'][ii])
else:
fs[x][y] = fsys[x][y]
return fs
### strict layer principle; limit loc bounds to globseg
# IN:
# loc 1x3 row from locseg array [on off center]
# glb 1x2 row spanning loc row from globseg array
# OUT:
# loc 1x3 row limited to bounds of glob seg
def pp_slayp(loc,glb):
loc[0] = np.max([loc[0],glb[0]])
if loc[1] > loc[0]:
loc[1] = np.min([loc[1],glb[1]])
else:
loc[1] = glb[1]
loc[2] = np.min([loc[1],loc[2]])
loc[2] = np.max([loc[0],loc[2]])
return loc
### row linking from loc to globseg ##################
# IN:
# x row in loc|glob etc (identified by its length;
# loc: len 3, other len 1 or 2)
# y glb matrix
# OUT:
# i rowIdx in glb
# (-1 if not connected)
# REMARK: not yet strict layer constrained fulfilled, thus
# robust assignment
def pp_link(x,y):
if len(y)==0:
return -1
if len(x)>2:
i = myl.intersect(myl.find(y[:,0],'<=',x[2]),
myl.find(y[:,1],'>=',x[2]))
else:
m = np.mean(x)
i = myl.intersect(myl.find(y[:,0],'<=',m),
myl.find(y[:,1],'>=',m))
if len(i)==0:
i = -1
else:
i = i[0]
return int(i)
# checks whether tier or tier_myChannelIdx is contained in annotation
# IN:
# tn - tiername
# an - annotation dict
# typ - 'xml'|'TextGrid'
# ci - <0> channel idx
# OUT:
# True|False
def pp_tier_in_annot(tn,an,typ,ci=0):
##!!ci tc = "{}_{}".format(tn,ci)
tc = "{}_{}".format(tn,int(ci+1))
if (typ == 'xml' and
(tn in an or tc in an)):
return True
if ((typ == 'TextGrid') and ('item_name' in an) and
((tn in an['item_name']) or (tc in an['item_name']))):
return True
return False
# checks whether tier is contained in annotation (not checking for
# tier_myChannelIdx as opposed to pp_tier_in_annot()
# IN:
# tn - tiername
# an - annotation dict
# typ - 'xml'|'TextGrid'
# OUT:
# True|False
def pp_tier_in_annot_strict(tn,an,typ):
if (typ == 'xml' and (tn in an)):
return True
if ((typ == 'TextGrid') and ('item_name' in an) and
(tn in an['item_name'])):
return True
return False
# returns class of tier 'segment'|'event'|''
# IN:
# tn - tierName
# annot - annot dict
# typ - annot type
# OUT: 'segment'|'event'|'' (TextGrid types 'intervals','points' matched segment/event)
def pp_tier_class(tn,annot,typ):
if typ=='xml':
if tn in annot:
return annot[tn]['type']
elif typ=='TextGrid':
if tn in annot['item_name']:
if 'intervals' in annot['item'][annot['item_name'][tn]]:
return 'segment'
return 'event'
return ''
# reads data from table or annotation file
# IN:
# dat - table or xml or TextGridContent
# opt - opt['fsys'][myDomain], relevant sub-keys: 'lab_pau', 'typ' in {'tab', 'xml', 'TextGrid'}
# opt['fsys']['augment'][myDomain] (relevant subdicts copied there in copasul_analysis:opt_init())
# tn - tierName to select content (only relevant for xml and TextGrid)
# fn - fileName for error messages
# call - 'glob'|'loc' etc for customized settings (e.g. pauses are not skipped for glob point tier input)
# OUT:
# d - 2-d array [[on off]...] or [[timeStamp] ...] values truncated as %.2f
# d_ut - same as d with original untruncated time values
# lab - list of labels (empty for 'tab' input)
# REMARK:
# for TextGrid interval tier input, pause labelled segments are skipped
def pp_read(an,opt,tn='',fn='',call=''):
lab = []
## tab input
if opt['typ']=='tab':
d = an
## xml input
elif opt['typ']=='xml':
if not pp_tier_in_annot(tn,an,opt['typ']):
myLog("ERROR! {}: does not contain tier {}".format(fn,tn))
d = myl.ea()
# selected tier
t = an[tn]
# 'segment' or 'event'
tt = t['type']
for i in myl.numkeys(t['items']):
lab.append(t['items'][i]['label'])
if tt=='segment':
d = myl.push(d,[float(t['items'][i]['t_start']),float(t['items'][i]['t_end'])])
else:
d = myl.push(d,float(t['items'][i]['t']))
## TextGrid input
elif opt['typ']=='TextGrid':
if not pp_tier_in_annot(tn,an,opt['typ']):
myLog("ERROR! {}: does not contain tier {}".format(fn,tn))
d = myl.ea()
# selected tier
#print(an['item_name']) #!v
#!e
t = an['item'][an['item_name'][tn]]
# 'interals'/'text' or 'points'/'mark'
if 'intervals' in t:
kk='intervals'
kt='text'
else:
kk='points'
kt='mark'
# skip empty tier
if kk not in t:
return d,d,lab
for i in myl.numkeys(t[kk]):
if pp_is_pau(t[kk][i][kt],opt['lab_pau']):
# keep pauses for glob point tier input since used
# for later transformation into segment tiers
if not (kk=='points' and call=='glob'):
continue
lab.append(t[kk][i][kt])
if kk=='intervals':
d = myl.push(d,[float(t[kk][i]['xmin']),float(t[kk][i]['xmax'])])
else:
d = myl.push(d,[float(t[kk][i]['time'])])
# Warnings
if len(d)==0:
if opt['typ']=='tab':
myLog("WARNING! {}: empty table\n".format(fn))
else:
myLog("WARNING! {}: no labelled segments contained in tier {}. Replacing by default domain\n".format(fn,tn))
return myl.cellwise(myl.trunc2,d), d, lab
# wrapper around pp_read for f0 input
# - extract channel i
# - resample to 100 Hz
# IN:
# f0_dat: f0 table (1st col: time, 2-end column: channels)
# opt: opt['fsys']['f0']
# i: channelIdx
# OUT:
# f0: [[time f0InChannelI]...]
# f0_ut: same without %.2f trunc values
def pp_read_f0(f0_dat,opt,i):
f0, f0_ut, dummy_lab_f0 = pp_read(f0_dat,opt)
# extract channel from f0 [t f0FromChannelI]
# i+1 since first column is time
f0 = f0[:,[0,i+1]]
f0_ut = f0_ut[:,[0,i+1]]
# kind of resampling to 100 Hz
# correct for praat rounding errors
f0 = pp_t_uniq(f0)
return f0, f0_ut
# checks whether opt contains field with list at least as long as channel idx,
# and non-empty list or string at this position
# IN:
# opt dict
# fld keyName
# OUT: boolean
def pp_opt_contains(opt,fld):
if (fld in opt):
return True
return False
# returns TRUE if label indicates pause (length 0, or pattern match
# with string lab_pau). Else FALSE.
# IN:
# s label
# s pause-pattern
# OUT:
# True|False
def pp_is_pau(lab,lab_pau):
if lab_pau=='' and len(lab)==0:
return True
p = re.compile(lab_pau)
if len(lab)==0 or p.search(lab):
return True
return False
# time stamps might be non-unique and there might be gaps
# due to praat rounding errors
# -> unique and continuous
def pp_t_uniq(x):
sts = 0.01
# 1. unique
t, i = np.unique(x[:,0],return_index=True)
x = np.concatenate((myl.lol(t).T,myl.lol(x[i,1]).T),axis=1)
# 2. missing time values?
tc = np.arange(t[0],t[-1]+sts,sts)
if len(t)==len(tc):
return x
else:
# add [missingTime 0] rows
d = set(tc)-set(t)
if len(d)==0:
return x
d = np.asarray(list(d))
z = np.zeros(len(d))
add = np.concatenate((myl.lol(d).T,myl.lol(z).T),axis=1)
x = np.concatenate((x,add),axis=0)
# include new rows at correct position
t, i = np.unique(x[:,0],return_index=True)
x = np.concatenate((myl.lol(t).T,myl.lol(x[i,1]).T),axis=1)
#for ii in range(len(x)):
#print(x[ii,])
#myl.stopgo()
return x
### local segments ###################################
# transform
# [[center]...] | [[on off]...]
# to
# [[on off center]...]
# [center] -> symmetric window
# [on off] -> midpoint
def pp_loc(x,opt):
# special point win for loc?
if (('loc' in opt['preproc']) and 'point_win' in opt['preproc']['loc']):
wl = opt['preproc']['loc']['point_win']/2
else:
wl = opt['preproc']['point_win']/2
if len(x) == 1:
x = [max(0,x[0]-wl), x[0]+wl, x[0]]
elif len(x) == 2:
x = [x[0], x[1], np.mean(x)]
return myl.cellwise(myl.trunc2,x)
### semitone transformation ##########################
def pp_semton(y,opt,bv=-1):
yi = myl.find(y,'>',0)
if opt['preproc']['base_prct']>0 and bv < 0:
bv, b = pp_bv(y[yi],opt)
elif bv > 0:
b = max(bv,1)
else:
bv, b = 0, 1
if opt['preproc']['st']==1:
y[yi] = 12*np.log2(y[yi]/b)
else:
y = y - bv
return y, bv
# calculate base and semitone conversion reference value
# IN:
# yp: f0 values (>0 only!, see pp_semton())
# OUT:
# bv: base value in Hz
# b: semtion conversion reference value (corrected bv)
def pp_bv(yp,opt):
px = np.percentile(yp,opt['preproc']['base_prct'])
yy = yp[myl.find(yp,'<=',px)]
bv = np.median(yp[myl.find(yp,'<=',px)])
b = max(bv,1)
return bv, b
### zero padding ##############################
# IN:
# f0: [[t f0]...]
# rng_max: max time value for which f0 contour is needed
# opt: copa['config']
# extrap: <False> if set then horizontal extrapolation instead of zero pad
# OUT:
# f0: [[t f0]...] with zero (or horizontal) padding left to first sampled value (at sts),
# right to t_max (in sec)
def pp_zp(f0,t_max,opt,extrap=False):
# stepsize
sts = 1/opt['fs']
if extrap:
zpl, zpr = f0[0,1], f0[-1,1]
else:
zpl, zpr = 0, 0
#if sts < f0[0,0]:
# prf = np.arange(sts,f0[0,0],sts)
if 0 < f0[0,0]:
prf = np.arange(0,f0[0,0],sts)
else:
prf = myl.ea()
if f0[-1,0] < t_max:
sfx = np.arange(f0[-1,0]+sts,t_max+sts,sts)
else:
sfx = myl.ea()
if len(prf)>0:
zz = zpl*np.ones(len(prf))
prf = np.concatenate(([prf],[zz]),axis=0).T
f0 = np.append(prf,f0,axis=0)
if len(sfx)>0:
zz = zpr*np.ones(len(sfx))
sfx = np.concatenate(([sfx],[zz]),axis=0).T
f0 = np.append(f0,sfx,axis=0)
return f0
### copa init/preproc diagnosis #######################################
# warnings to logfile:
# not-linked globseg/locseg
def diagnosis(copa,h_log):
h_log.write('# Diagnosis\n')
# error code
ec = 0
c = copa['data']
for ii in myl.numkeys(c):
for i in myl.numkeys(c):
ec = diagnosis_seg(c,ii,i,'glob',ec,h_log)
ec = diagnosis_seg(c,ii,i,'loc',ec,h_log)
#ec = diagnosis_config(copa['config'],ec,h_log)
if ec==2:
pp_log('Too many errors! Exit.',True)
if ec==0:
pp_log("Everything seems to be ok!\n")
return ec
# log file output (if filehandle), else terminal output
# IN:
# msg message string
# e <False>|True do exit
def myLog(msg,e=False):
global f_log
try: f_log
except: f_log = ''
if type(f_log) is not str:
f_log.write("{}\n".format(msg))
if e:
f_log.close()
sys.exit(msg)
else:
if e:
sys.exit(msg)
else:
print(msg)
# checks config syntax
def diagnosis_config(opt,ec,h_log):
for x in ['f0','glob','loc','bnd','gnl_f0','gnl_en','rhy_f0','rhy_en']:
if x not in opt['fsys']:
if x=='f0':
h_log.write("ERROR! config.fsys does not contain f0 file field.\n")
ec = 2
continue
for y in ['dir','ext','typ']:
if y not in opt['fsys'][x]:
h_log.write("ERROR! config.fsys.{} does not contain {} field.\n".format(x,y))
ec = 2
continue
# bnd, gnl_* lol
for x in myl.lists('bgd'):
ti = []
tp = []
ps = []
if 'tier' in opt['fsys'][x]:
ti = opt['fsys'][x]['tier']
if 'lab_pau' in opt['fsys'][x]:
ps = opt['fsys'][x]['lab_pau']
return ec
# checks initialized copa subdicts glob, loc
# outputs warnings/errors to log file
# returns error code (0=ok, 1=erroneous, 2=fatal)
def diagnosis_seg(c,ii,i,dom,ec,h_log):
# min seg length
min_l = 0.03
f = "{}/{}.{}".format(c[ii][i]['fsys'][dom]['dir'],c[ii][i]['fsys'][dom]['stm'],c[ii][i]['fsys'][dom]['ext'])
for j in myl.numkeys(c[ii][i][dom]):
# segment not linked
if (('ri' not in c[ii][i][dom][j]) or
((type(c[ii][i][dom][j]['ri']) is list) and
len(c[ii][i][dom][j]['ri'])==0) or
c[ii][i][dom][j]['ri']==''):
ec = 1
if dom=='glob':
h_log.write("WARNING! {}:interval {} {}:global segment does not dominate any local segment\n".format(f,c[ii][i][dom][j]['to'][0],c[ii][i][dom][j]['to'][1]))
else:
h_log.write("WARNING! {}:interval {} {}:local segment is not dominated by any gobal segment\n",f,c[ii][i][dom][j]['to'][0],c[ii][i][dom][j]['to'][1])
# segment too short
if c[ii][i][dom][j]['t'][1]-c[ii][i][dom][j]['t'][0] < min_l:
h_log.write("ERROR! {}:interval {} {}:{} segment too short!\n".format(f,c[ii][i][dom][j]['to'][0],c[ii][i][dom][j]['to'][1],dom))
ec = 2
# locseg center (3rd col) not within [on off] (1st, 2nd col)
if (dom=='loc' and len(c[ii][i][dom][0]['t'])==3):
for j in myl.numkeys(c[ii][i][dom]):
if ((c[ii][i][dom][j]['t'][2] <= c[ii][i][dom][j]['t'][0]) or
(c[ii][i][dom][j]['t'][2] >= c[ii][i][dom][j]['t'][1])):
h_log.write("WARNING! {}:interval {} {}:local segments center not within its intervals. Set to midpoint\n",f,c[ii][i][dom][j]['to'][0],c[ii][i][dom][j]['to'][1])
c[ii][i][dom][j]['t'][2] = myl.trunc2((c[ii][i][dom][j]['t'][0]+c[ii][i][dom][j]['t'][1])/2)
return ec
# returns empty time arrays and label lists (analogously to pp_read())
def pp_read_empty():
return myl.ea(), myl.ea(), []
| mit |
YinongLong/scikit-learn | examples/covariance/plot_robust_vs_empirical_covariance.py | 73 | 6451 | r"""
=======================================
Robust vs Empirical covariance estimate
=======================================
The usual covariance maximum likelihood estimate is very sensitive to the
presence of outliers in the data set. In such a case, it would be better to
use a robust estimator of covariance to guarantee that the estimation is
resistant to "erroneous" observations in the data set.
Minimum Covariance Determinant Estimator
----------------------------------------
The Minimum Covariance Determinant estimator is a robust, high-breakdown point
(i.e. it can be used to estimate the covariance matrix of highly contaminated
datasets, up to
:math:`\frac{n_\text{samples} - n_\text{features}-1}{2}` outliers) estimator of
covariance. The idea is to find
:math:`\frac{n_\text{samples} + n_\text{features}+1}{2}`
observations whose empirical covariance has the smallest determinant, yielding
a "pure" subset of observations from which to compute standards estimates of
location and covariance. After a correction step aiming at compensating the
fact that the estimates were learned from only a portion of the initial data,
we end up with robust estimates of the data set location and covariance.
The Minimum Covariance Determinant estimator (MCD) has been introduced by
P.J.Rousseuw in [1]_.
Evaluation
----------
In this example, we compare the estimation errors that are made when using
various types of location and covariance estimates on contaminated Gaussian
distributed data sets:
- The mean and the empirical covariance of the full dataset, which break
down as soon as there are outliers in the data set
- The robust MCD, that has a low error provided
:math:`n_\text{samples} > 5n_\text{features}`
- The mean and the empirical covariance of the observations that are known
to be good ones. This can be considered as a "perfect" MCD estimation,
so one can trust our implementation by comparing to this case.
References
----------
.. [1] P. J. Rousseeuw. Least median of squares regression. Journal of American
Statistical Ass., 79:871, 1984.
.. [2] Johanna Hardin, David M Rocke. The distribution of robust distances.
Journal of Computational and Graphical Statistics. December 1, 2005,
14(4): 928-946.
.. [3] Zoubir A., Koivunen V., Chakhchoukh Y. and Muma M. (2012). Robust
estimation in signal processing: A tutorial-style treatment of
fundamental concepts. IEEE Signal Processing Magazine 29(4), 61-80.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn.covariance import EmpiricalCovariance, MinCovDet
# example settings
n_samples = 80
n_features = 5
repeat = 10
range_n_outliers = np.concatenate(
(np.linspace(0, n_samples / 8, 5),
np.linspace(n_samples / 8, n_samples / 2, 5)[1:-1]))
# definition of arrays to store results
err_loc_mcd = np.zeros((range_n_outliers.size, repeat))
err_cov_mcd = np.zeros((range_n_outliers.size, repeat))
err_loc_emp_full = np.zeros((range_n_outliers.size, repeat))
err_cov_emp_full = np.zeros((range_n_outliers.size, repeat))
err_loc_emp_pure = np.zeros((range_n_outliers.size, repeat))
err_cov_emp_pure = np.zeros((range_n_outliers.size, repeat))
# computation
for i, n_outliers in enumerate(range_n_outliers):
for j in range(repeat):
rng = np.random.RandomState(i * j)
# generate data
X = rng.randn(n_samples, n_features)
# add some outliers
outliers_index = rng.permutation(n_samples)[:n_outliers]
outliers_offset = 10. * \
(np.random.randint(2, size=(n_outliers, n_features)) - 0.5)
X[outliers_index] += outliers_offset
inliers_mask = np.ones(n_samples).astype(bool)
inliers_mask[outliers_index] = False
# fit a Minimum Covariance Determinant (MCD) robust estimator to data
mcd = MinCovDet().fit(X)
# compare raw robust estimates with the true location and covariance
err_loc_mcd[i, j] = np.sum(mcd.location_ ** 2)
err_cov_mcd[i, j] = mcd.error_norm(np.eye(n_features))
# compare estimators learned from the full data set with true
# parameters
err_loc_emp_full[i, j] = np.sum(X.mean(0) ** 2)
err_cov_emp_full[i, j] = EmpiricalCovariance().fit(X).error_norm(
np.eye(n_features))
# compare with an empirical covariance learned from a pure data set
# (i.e. "perfect" mcd)
pure_X = X[inliers_mask]
pure_location = pure_X.mean(0)
pure_emp_cov = EmpiricalCovariance().fit(pure_X)
err_loc_emp_pure[i, j] = np.sum(pure_location ** 2)
err_cov_emp_pure[i, j] = pure_emp_cov.error_norm(np.eye(n_features))
# Display results
font_prop = matplotlib.font_manager.FontProperties(size=11)
plt.subplot(2, 1, 1)
lw = 2
plt.errorbar(range_n_outliers, err_loc_mcd.mean(1),
yerr=err_loc_mcd.std(1) / np.sqrt(repeat),
label="Robust location", lw=lw, color='m')
plt.errorbar(range_n_outliers, err_loc_emp_full.mean(1),
yerr=err_loc_emp_full.std(1) / np.sqrt(repeat),
label="Full data set mean", lw=lw, color='green')
plt.errorbar(range_n_outliers, err_loc_emp_pure.mean(1),
yerr=err_loc_emp_pure.std(1) / np.sqrt(repeat),
label="Pure data set mean", lw=lw, color='black')
plt.title("Influence of outliers on the location estimation")
plt.ylabel(r"Error ($||\mu - \hat{\mu}||_2^2$)")
plt.legend(loc="upper left", prop=font_prop)
plt.subplot(2, 1, 2)
x_size = range_n_outliers.size
plt.errorbar(range_n_outliers, err_cov_mcd.mean(1),
yerr=err_cov_mcd.std(1),
label="Robust covariance (mcd)", color='m')
plt.errorbar(range_n_outliers[:(x_size / 5 + 1)],
err_cov_emp_full.mean(1)[:(x_size / 5 + 1)],
yerr=err_cov_emp_full.std(1)[:(x_size / 5 + 1)],
label="Full data set empirical covariance", color='green')
plt.plot(range_n_outliers[(x_size / 5):(x_size / 2 - 1)],
err_cov_emp_full.mean(1)[(x_size / 5):(x_size / 2 - 1)], color='green',
ls='--')
plt.errorbar(range_n_outliers, err_cov_emp_pure.mean(1),
yerr=err_cov_emp_pure.std(1),
label="Pure data set empirical covariance", color='black')
plt.title("Influence of outliers on the covariance estimation")
plt.xlabel("Amount of contamination (%)")
plt.ylabel("RMSE")
plt.legend(loc="upper center", prop=font_prop)
plt.show()
| bsd-3-clause |
GeoStoner/Final-Project-Magnetite-Diffusion | Simple_He_Diffusion_colorball_plt_RS.py | 1 | 4778 | # -*- coding: utf-8 -*-
"""
Created on Wed Apr 20 11:59:58 2016
@author: ryanstoner
Model to calculate diffusion of He out of Magnetite at high diffusivities
relative to the production of He. Magnetite model from Blackburn et al. (2007)
Changing color in plot.
"""
"""
Initialize
"""
import numpy as np
import matplotlib.pyplot as plt
# Setting up initial parameters for simple Arrhenius equation.
a = 0.0001 # Grain size [m], 100 microns radius
D0a2 = 10**6.8 # Diffusivity [s^-1] @ "infinite" temp. norm.
# to grain size.
Ea = 220*10**3 # Arrhenius activation energy [J/mol]
T = 673 # Temperature [K]
R = 8.1345 # Ideal gas constant [J/Kmol]
Ndpart = 10**(-8) # Partial pressure of He [Pa]
# Grain dimensions
rmin = 0 # Minimum radius [m]
rmax = np.copy(a) # Radius [m]
dr = 10**-6 # Distance step [m]
r = np.arange(rmin, rmax,dr) # Radius array [m]
Nd = np.zeros(len(r)) # Array of concentration [m]
Nd.fill(Ndpart)
# Calculate diffusivity to evalue stability to find time step.
D = D0a2*np.exp(-Ea/(R*T)) # Diffusivity [m^2/s]
dttheory = (dr**2)/(2*D*a**2) # Max. time step for stability [s]
dt = 10**6 # Actual time step [s], approx. 30 yrs
if dttheory<=dt: # Check if dt is too small and poss. print
print('Unstable code. Time step too large')
print('Your time step (dt) is:' + str(dt) + '. It should be less than:' + \
str(dttheory))
"""
Loop
"""
total_time = 2*10**10 # Time for diffusion to take place (s)
pltint = 800 # Number of loops between plots
time = np.arange(0,total_time,dt) # Time array, t
dNdr = np.zeros(len(Nd)-1) # Empty flux array for 2/r*dN/dr term
d2Ndr2 = np.zeros(len(Nd)-1) # Empty flux array for d^2N/dr^2 term
q = np.zeros(len(Nd)) # Empty diff array for d^2N/dr^2 term
Ndlen = len(Nd) # Length of Nd just to save time later
counter = 0 # Count which loop we're on
for i in np.array(time):
# First find flux for 2/r dN/dr term. Also find gradient for first term.
# Then find gradient for d^2N/dr^2. Gradient also accounts for first term.
dNdr[1:] = (Nd[2:Ndlen]-\
Nd[0:Ndlen-2])/(2*dr)
dNdr[0]=(dNdr[1]-dNdr[0])/2*dr
# d2Ndr2 = np.gradient(Nd)[0:Ndlen-1]/(dr**2)
q[1:] = np.diff(Nd)/dr
d2Ndr2 = np.diff(q)/dr
# Calculate change in concentration over time.
dNdt = D*a**2*(d2Ndr2+(2/r[1:Ndlen])*dNdr)
Nd[:(Ndlen-1)] += dNdt*dt
Nd[Ndlen-1] = 0
counter += 1
if counter % pltint==0:
# Create figure
fig = plt.figure(1)
plt.clf()
ax = fig.add_subplot(111, projection='3d')
# Converting to spherical coordinates
u = np.linspace(0, 2 * np.pi, 100)
v = np.linspace(0, np.pi, 100)
# Coords. for sphere marking surface of the grain
x = (rmax) * np.outer(np.cos(u), np.sin(v))
y = (rmax) * np.outer(np.sin(u), np.sin(v))
z = (rmax) * np.outer(np.ones(np.size(u)), np.cos(v))
# Coords. for inner sphere marking "contour" of concentrations
x_contour = 0.5 * rmax * np.outer(np.cos(u), np.sin(v))
y_contour = 0.5 * rmax * np.outer(np.sin(u), np.sin(v))
z_contour = 0.5 * rmax * np.outer(np.ones(np.size(u)), np.cos(v))
# Using to convert from m to mm in plotting. Alpha changes w. conc.
scaling_factor = 1000
ax.plot_surface(x*scaling_factor, y*scaling_factor, z*scaling_factor\
,rstride=4, cstride=4, color='b', alpha=Nd[Ndlen-(Ndlen/2)]/Ndpart)
ax.plot_surface(x_contour*scaling_factor, y_contour*scaling_factor,\
z_contour*scaling_factor ,rstride=4, cstride=4, color='b',\
alpha=Nd[Ndlen-(Ndlen/2)]/Ndpart)
# Setting plotting details
titlefont = {'fontname':'Verdana'}
font = {'family': 'serif',
'color': 'darkred',
'weight': 'normal',
'size': 16,
}
ax.set_xlabel('x dimension (m)',**titlefont)
ax.set_ylabel('y dimension (m)',**titlefont)
ax.set_zlabel('z dimension (m)',**titlefont)
ax.set_zlim(-a*scaling_factor, a*scaling_factor)
time_string = str(round(i/(31.536*10**6),1))
title = plt.title(' Magnetite Diffusion Example \n'+
'Time elapsed: ' + time_string + ' yrs \n \n',**titlefont)
plt.pause(0.001)
plt.draw()
| mit |
Jimmy-Morzaria/scikit-learn | examples/plot_johnson_lindenstrauss_bound.py | 134 | 7452 | """
=====================================================================
The Johnson-Lindenstrauss bound for embedding with random projections
=====================================================================
The `Johnson-Lindenstrauss lemma`_ states that any high dimensional
dataset can be randomly projected into a lower dimensional Euclidean
space while controlling the distortion in the pairwise distances.
.. _`Johnson-Lindenstrauss lemma`: http://en.wikipedia.org/wiki/Johnson%E2%80%93Lindenstrauss_lemma
Theoretical bounds
==================
The distortion introduced by a random projection `p` is asserted by
the fact that `p` is defining an eps-embedding with good probability
as defined by:
(1 - eps) ||u - v||^2 < ||p(u) - p(v)||^2 < (1 + eps) ||u - v||^2
Where u and v are any rows taken from a dataset of shape [n_samples,
n_features] and p is a projection by a random Gaussian N(0, 1) matrix
with shape [n_components, n_features] (or a sparse Achlioptas matrix).
The minimum number of components to guarantees the eps-embedding is
given by:
n_components >= 4 log(n_samples) / (eps^2 / 2 - eps^3 / 3)
The first plot shows that with an increasing number of samples ``n_samples``,
the minimal number of dimensions ``n_components`` increased logarithmically
in order to guarantee an ``eps``-embedding.
The second plot shows that an increase of the admissible
distortion ``eps`` allows to reduce drastically the minimal number of
dimensions ``n_components`` for a given number of samples ``n_samples``
Empirical validation
====================
We validate the above bounds on the the digits dataset or on the 20 newsgroups
text document (TF-IDF word frequencies) dataset:
- for the digits dataset, some 8x8 gray level pixels data for 500
handwritten digits pictures are randomly projected to spaces for various
larger number of dimensions ``n_components``.
- for the 20 newsgroups dataset some 500 documents with 100k
features in total are projected using a sparse random matrix to smaller
euclidean spaces with various values for the target number of dimensions
``n_components``.
The default dataset is the digits dataset. To run the example on the twenty
newsgroups dataset, pass the --twenty-newsgroups command line argument to this
script.
For each value of ``n_components``, we plot:
- 2D distribution of sample pairs with pairwise distances in original
and projected spaces as x and y axis respectively.
- 1D histogram of the ratio of those distances (projected / original).
We can see that for low values of ``n_components`` the distribution is wide
with many distorted pairs and a skewed distribution (due to the hard
limit of zero ratio on the left as distances are always positives)
while for larger values of n_components the distortion is controlled
and the distances are well preserved by the random projection.
Remarks
=======
According to the JL lemma, projecting 500 samples without too much distortion
will require at least several thousands dimensions, irrespective of the
number of features of the original dataset.
Hence using random projections on the digits dataset which only has 64 features
in the input space does not make sense: it does not allow for dimensionality
reduction in this case.
On the twenty newsgroups on the other hand the dimensionality can be decreased
from 56436 down to 10000 while reasonably preserving pairwise distances.
"""
print(__doc__)
import sys
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.random_projection import johnson_lindenstrauss_min_dim
from sklearn.random_projection import SparseRandomProjection
from sklearn.datasets import fetch_20newsgroups_vectorized
from sklearn.datasets import load_digits
from sklearn.metrics.pairwise import euclidean_distances
# Part 1: plot the theoretical dependency between n_components_min and
# n_samples
# range of admissible distortions
eps_range = np.linspace(0.1, 0.99, 5)
colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(eps_range)))
# range of number of samples (observation) to embed
n_samples_range = np.logspace(1, 9, 9)
plt.figure()
for eps, color in zip(eps_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples_range, eps=eps)
plt.loglog(n_samples_range, min_n_components, color=color)
plt.legend(["eps = %0.1f" % eps for eps in eps_range], loc="lower right")
plt.xlabel("Number of observations to eps-embed")
plt.ylabel("Minimum number of dimensions")
plt.title("Johnson-Lindenstrauss bounds:\nn_samples vs n_components")
# range of admissible distortions
eps_range = np.linspace(0.01, 0.99, 100)
# range of number of samples (observation) to embed
n_samples_range = np.logspace(2, 6, 5)
colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(n_samples_range)))
plt.figure()
for n_samples, color in zip(n_samples_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples, eps=eps_range)
plt.semilogy(eps_range, min_n_components, color=color)
plt.legend(["n_samples = %d" % n for n in n_samples_range], loc="upper right")
plt.xlabel("Distortion eps")
plt.ylabel("Minimum number of dimensions")
plt.title("Johnson-Lindenstrauss bounds:\nn_components vs eps")
# Part 2: perform sparse random projection of some digits images which are
# quite low dimensional and dense or documents of the 20 newsgroups dataset
# which is both high dimensional and sparse
if '--twenty-newsgroups' in sys.argv:
# Need an internet connection hence not enabled by default
data = fetch_20newsgroups_vectorized().data[:500]
else:
data = load_digits().data[:500]
n_samples, n_features = data.shape
print("Embedding %d samples with dim %d using various random projections"
% (n_samples, n_features))
n_components_range = np.array([300, 1000, 10000])
dists = euclidean_distances(data, squared=True).ravel()
# select only non-identical samples pairs
nonzero = dists != 0
dists = dists[nonzero]
for n_components in n_components_range:
t0 = time()
rp = SparseRandomProjection(n_components=n_components)
projected_data = rp.fit_transform(data)
print("Projected %d samples from %d to %d in %0.3fs"
% (n_samples, n_features, n_components, time() - t0))
if hasattr(rp, 'components_'):
n_bytes = rp.components_.data.nbytes
n_bytes += rp.components_.indices.nbytes
print("Random matrix with size: %0.3fMB" % (n_bytes / 1e6))
projected_dists = euclidean_distances(
projected_data, squared=True).ravel()[nonzero]
plt.figure()
plt.hexbin(dists, projected_dists, gridsize=100, cmap=plt.cm.PuBu)
plt.xlabel("Pairwise squared distances in original space")
plt.ylabel("Pairwise squared distances in projected space")
plt.title("Pairwise distances distribution for n_components=%d" %
n_components)
cb = plt.colorbar()
cb.set_label('Sample pairs counts')
rates = projected_dists / dists
print("Mean distances rate: %0.2f (%0.2f)"
% (np.mean(rates), np.std(rates)))
plt.figure()
plt.hist(rates, bins=50, normed=True, range=(0., 2.))
plt.xlabel("Squared distances rate: projected / original")
plt.ylabel("Distribution of samples pairs")
plt.title("Histogram of pairwise distance rates for n_components=%d" %
n_components)
# TODO: compute the expected value of eps and add them to the previous plot
# as vertical lines / region
plt.show()
| bsd-3-clause |
rs2/pandas | pandas/tests/indexes/timedeltas/test_shift.py | 2 | 2772 | import pytest
from pandas.errors import NullFrequencyError
import pandas as pd
from pandas import TimedeltaIndex
import pandas._testing as tm
class TestTimedeltaIndexShift:
# -------------------------------------------------------------
# TimedeltaIndex.shift is used by __add__/__sub__
def test_tdi_shift_empty(self):
# GH#9903
idx = pd.TimedeltaIndex([], name="xxx")
tm.assert_index_equal(idx.shift(0, freq="H"), idx)
tm.assert_index_equal(idx.shift(3, freq="H"), idx)
def test_tdi_shift_hours(self):
# GH#9903
idx = pd.TimedeltaIndex(["5 hours", "6 hours", "9 hours"], name="xxx")
tm.assert_index_equal(idx.shift(0, freq="H"), idx)
exp = pd.TimedeltaIndex(["8 hours", "9 hours", "12 hours"], name="xxx")
tm.assert_index_equal(idx.shift(3, freq="H"), exp)
exp = pd.TimedeltaIndex(["2 hours", "3 hours", "6 hours"], name="xxx")
tm.assert_index_equal(idx.shift(-3, freq="H"), exp)
def test_tdi_shift_minutes(self):
# GH#9903
idx = pd.TimedeltaIndex(["5 hours", "6 hours", "9 hours"], name="xxx")
tm.assert_index_equal(idx.shift(0, freq="T"), idx)
exp = pd.TimedeltaIndex(["05:03:00", "06:03:00", "9:03:00"], name="xxx")
tm.assert_index_equal(idx.shift(3, freq="T"), exp)
exp = pd.TimedeltaIndex(["04:57:00", "05:57:00", "8:57:00"], name="xxx")
tm.assert_index_equal(idx.shift(-3, freq="T"), exp)
def test_tdi_shift_int(self):
# GH#8083
tdi = pd.to_timedelta(range(5), unit="d")
trange = tdi._with_freq("infer") + pd.offsets.Hour(1)
result = trange.shift(1)
expected = TimedeltaIndex(
[
"1 days 01:00:00",
"2 days 01:00:00",
"3 days 01:00:00",
"4 days 01:00:00",
"5 days 01:00:00",
],
freq="D",
)
tm.assert_index_equal(result, expected)
def test_tdi_shift_nonstandard_freq(self):
# GH#8083
tdi = pd.to_timedelta(range(5), unit="d")
trange = tdi._with_freq("infer") + pd.offsets.Hour(1)
result = trange.shift(3, freq="2D 1s")
expected = TimedeltaIndex(
[
"6 days 01:00:03",
"7 days 01:00:03",
"8 days 01:00:03",
"9 days 01:00:03",
"10 days 01:00:03",
],
freq="D",
)
tm.assert_index_equal(result, expected)
def test_shift_no_freq(self):
# GH#19147
tdi = TimedeltaIndex(["1 days 01:00:00", "2 days 01:00:00"], freq=None)
with pytest.raises(NullFrequencyError, match="Cannot shift with no freq"):
tdi.shift(2)
| bsd-3-clause |
joshloyal/scikit-learn | sklearn/decomposition/tests/test_fastica.py | 70 | 7808 | """
Test the fastica algorithm.
"""
import itertools
import warnings
import numpy as np
from scipy import stats
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_raises
from sklearn.decomposition import FastICA, fastica, PCA
from sklearn.decomposition.fastica_ import _gs_decorrelation
from sklearn.externals.six import moves
def center_and_norm(x, axis=-1):
""" Centers and norms x **in place**
Parameters
-----------
x: ndarray
Array with an axis of observations (statistical units) measured on
random variables.
axis: int, optional
Axis along which the mean and variance are calculated.
"""
x = np.rollaxis(x, axis)
x -= x.mean(axis=0)
x /= x.std(axis=0)
def test_gs():
# Test gram schmidt orthonormalization
# generate a random orthogonal matrix
rng = np.random.RandomState(0)
W, _, _ = np.linalg.svd(rng.randn(10, 10))
w = rng.randn(10)
_gs_decorrelation(w, W, 10)
assert_less((w ** 2).sum(), 1.e-10)
w = rng.randn(10)
u = _gs_decorrelation(w, W, 5)
tmp = np.dot(u, W.T)
assert_less((tmp[:5] ** 2).sum(), 1.e-10)
def test_fastica_simple(add_noise=False):
# Test the FastICA algorithm on very simple data.
rng = np.random.RandomState(0)
# scipy.stats uses the global RNG:
np.random.seed(0)
n_samples = 1000
# Generate two sources:
s1 = (2 * np.sin(np.linspace(0, 100, n_samples)) > 0) - 1
s2 = stats.t.rvs(1, size=n_samples)
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing angle
phi = 0.6
mixing = np.array([[np.cos(phi), np.sin(phi)],
[np.sin(phi), -np.cos(phi)]])
m = np.dot(mixing, s)
if add_noise:
m += 0.1 * rng.randn(2, 1000)
center_and_norm(m)
# function as fun arg
def g_test(x):
return x ** 3, (3 * x ** 2).mean(axis=-1)
algos = ['parallel', 'deflation']
nls = ['logcosh', 'exp', 'cube', g_test]
whitening = [True, False]
for algo, nl, whiten in itertools.product(algos, nls, whitening):
if whiten:
k_, mixing_, s_ = fastica(m.T, fun=nl, algorithm=algo)
assert_raises(ValueError, fastica, m.T, fun=np.tanh,
algorithm=algo)
else:
X = PCA(n_components=2, whiten=True).fit_transform(m.T)
k_, mixing_, s_ = fastica(X, fun=nl, algorithm=algo, whiten=False)
assert_raises(ValueError, fastica, X, fun=np.tanh,
algorithm=algo)
s_ = s_.T
# Check that the mixing model described in the docstring holds:
if whiten:
assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m))
center_and_norm(s_)
s1_, s2_ = s_
# Check to see if the sources have been estimated
# in the wrong order
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
s2_, s1_ = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
# Check that we have estimated the original sources
if not add_noise:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=2)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=2)
else:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=1)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=1)
# Test FastICA class
_, _, sources_fun = fastica(m.T, fun=nl, algorithm=algo, random_state=0)
ica = FastICA(fun=nl, algorithm=algo, random_state=0)
sources = ica.fit_transform(m.T)
assert_equal(ica.components_.shape, (2, 2))
assert_equal(sources.shape, (1000, 2))
assert_array_almost_equal(sources_fun, sources)
assert_array_almost_equal(sources, ica.transform(m.T))
assert_equal(ica.mixing_.shape, (2, 2))
for fn in [np.tanh, "exp(-.5(x^2))"]:
ica = FastICA(fun=fn, algorithm=algo, random_state=0)
assert_raises(ValueError, ica.fit, m.T)
assert_raises(TypeError, FastICA(fun=moves.xrange(10)).fit, m.T)
def test_fastica_nowhiten():
m = [[0, 1], [1, 0]]
# test for issue #697
ica = FastICA(n_components=1, whiten=False, random_state=0)
assert_warns(UserWarning, ica.fit, m)
assert_true(hasattr(ica, 'mixing_'))
def test_non_square_fastica(add_noise=False):
# Test the FastICA algorithm on very simple data.
rng = np.random.RandomState(0)
n_samples = 1000
# Generate two sources:
t = np.linspace(0, 100, n_samples)
s1 = np.sin(t)
s2 = np.ceil(np.sin(np.pi * t))
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing matrix
mixing = rng.randn(6, 2)
m = np.dot(mixing, s)
if add_noise:
m += 0.1 * rng.randn(6, n_samples)
center_and_norm(m)
k_, mixing_, s_ = fastica(m.T, n_components=2, random_state=rng)
s_ = s_.T
# Check that the mixing model described in the docstring holds:
assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m))
center_and_norm(s_)
s1_, s2_ = s_
# Check to see if the sources have been estimated
# in the wrong order
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
s2_, s1_ = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
# Check that we have estimated the original sources
if not add_noise:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=3)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=3)
def test_fit_transform():
# Test FastICA.fit_transform
rng = np.random.RandomState(0)
X = rng.random_sample((100, 10))
for whiten, n_components in [[True, 5], [False, None]]:
n_components_ = (n_components if n_components is not None else
X.shape[1])
ica = FastICA(n_components=n_components, whiten=whiten, random_state=0)
Xt = ica.fit_transform(X)
assert_equal(ica.components_.shape, (n_components_, 10))
assert_equal(Xt.shape, (100, n_components_))
ica = FastICA(n_components=n_components, whiten=whiten, random_state=0)
ica.fit(X)
assert_equal(ica.components_.shape, (n_components_, 10))
Xt2 = ica.transform(X)
assert_array_almost_equal(Xt, Xt2)
def test_inverse_transform():
# Test FastICA.inverse_transform
n_features = 10
n_samples = 100
n1, n2 = 5, 10
rng = np.random.RandomState(0)
X = rng.random_sample((n_samples, n_features))
expected = {(True, n1): (n_features, n1),
(True, n2): (n_features, n2),
(False, n1): (n_features, n2),
(False, n2): (n_features, n2)}
for whiten in [True, False]:
for n_components in [n1, n2]:
n_components_ = (n_components if n_components is not None else
X.shape[1])
ica = FastICA(n_components=n_components, random_state=rng,
whiten=whiten)
with warnings.catch_warnings(record=True):
# catch "n_components ignored" warning
Xt = ica.fit_transform(X)
expected_shape = expected[(whiten, n_components_)]
assert_equal(ica.mixing_.shape, expected_shape)
X2 = ica.inverse_transform(Xt)
assert_equal(X.shape, X2.shape)
# reversibility test in non-reduction case
if n_components == X.shape[1]:
assert_array_almost_equal(X, X2)
| bsd-3-clause |
valexandersaulys/prudential_insurance_kaggle | venv/lib/python2.7/site-packages/sklearn/setup.py | 225 | 2856 | import os
from os.path import join
import warnings
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
from numpy.distutils.system_info import get_info, BlasNotFoundError
import numpy
libraries = []
if os.name == 'posix':
libraries.append('m')
config = Configuration('sklearn', parent_package, top_path)
config.add_subpackage('__check_build')
config.add_subpackage('svm')
config.add_subpackage('datasets')
config.add_subpackage('datasets/tests')
config.add_subpackage('feature_extraction')
config.add_subpackage('feature_extraction/tests')
config.add_subpackage('cluster')
config.add_subpackage('cluster/tests')
config.add_subpackage('covariance')
config.add_subpackage('covariance/tests')
config.add_subpackage('cross_decomposition')
config.add_subpackage('decomposition')
config.add_subpackage('decomposition/tests')
config.add_subpackage("ensemble")
config.add_subpackage("ensemble/tests")
config.add_subpackage('feature_selection')
config.add_subpackage('feature_selection/tests')
config.add_subpackage('utils')
config.add_subpackage('utils/tests')
config.add_subpackage('externals')
config.add_subpackage('mixture')
config.add_subpackage('mixture/tests')
config.add_subpackage('gaussian_process')
config.add_subpackage('gaussian_process/tests')
config.add_subpackage('neighbors')
config.add_subpackage('neural_network')
config.add_subpackage('preprocessing')
config.add_subpackage('manifold')
config.add_subpackage('metrics')
config.add_subpackage('semi_supervised')
config.add_subpackage("tree")
config.add_subpackage("tree/tests")
config.add_subpackage('metrics/tests')
config.add_subpackage('metrics/cluster')
config.add_subpackage('metrics/cluster/tests')
# add cython extension module for isotonic regression
config.add_extension(
'_isotonic',
sources=['_isotonic.c'],
include_dirs=[numpy.get_include()],
libraries=libraries,
)
# some libs needs cblas, fortran-compiled BLAS will not be sufficient
blas_info = get_info('blas_opt', 0)
if (not blas_info) or (
('NO_ATLAS_INFO', 1) in blas_info.get('define_macros', [])):
config.add_library('cblas',
sources=[join('src', 'cblas', '*.c')])
warnings.warn(BlasNotFoundError.__doc__)
# the following packages depend on cblas, so they have to be build
# after the above.
config.add_subpackage('linear_model')
config.add_subpackage('utils')
# add the test directory
config.add_subpackage('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| gpl-2.0 |
sibis-platform/ncanda-datacore | scripts/reporting/test/test_outlier_detection.py | 2 | 3837 | import pytest
import pandas as pd
import numpy as np
from check_univariate_outliers import pick_univariate_outliers
s = np.random.seed(54920)
def make_df_with_outliers(mean, std, size, colname, values_to_insert=None, **kwargs):
data = np.random.normal(loc=mean, scale=std, size=size)
if values_to_insert:
data = np.append(np.array(values_to_insert), data)
df_args = kwargs
df_args[colname] = data
return pd.DataFrame(df_args)
# make_df_with_outliers(2000, 100, 10, colname="brain_volume",
# values_to_insert=[1600, 2400], arm="standard", visit="baseline")
@pytest.fixture
def df_within_limits(mean=1000, sd=100, size=1000):
df = make_df_with_outliers(mean, sd, size, colname="brain_volume",
arm="standard", visit="baseline")
df.index.names = ['subject']
df.set_index(['visit', 'arm'], append=True, inplace=True)
return df
@pytest.fixture
def df_baseline(mean=2000, sd=100, size=1000):
df = make_df_with_outliers(mean, sd, size, colname="brain_volume",
values_to_insert=[mean - 4*sd, mean + 4*sd],
arm="standard", visit="baseline")
df.index.names = ['subject']
df.set_index(['visit', 'arm'], append=True, inplace=True)
return df
@pytest.fixture
def df_year1(mean=2000, sd=50, size=1000):
df = make_df_with_outliers(mean, sd, size, colname="brain_volume",
values_to_insert=[mean - 4*sd, mean + 4*sd],
arm="standard", visit="followup_1y")
df.index.names = ['subject']
df.set_index(['visit', 'arm'], append=True, inplace=True)
return df
@pytest.fixture
def df_nice():
data = [0] * 5 + [10] * 90 + [1000] * 4 + [10000] # mean: 149, sd = 1008.9
df = make_df_with_outliers(0, 1, 0, colname="brain_volume",
values_to_insert=data,
arm="standard", visit="baseline")
df.index.names = ['subject']
df.set_index(['visit', 'arm'], append=True, inplace=True)
return df
def test_catches_outlier(df_nice):
result = df_nice.mask(~df_nice.apply(pick_univariate_outliers,
sd_count=3)).stack()
assert result.shape[0] == 1
assert result.values == [10000]
# 0. Sanity checks:
# a. Should return a series
# b. Should return no outliers if everything is within limits
def test_returns_series(df_within_limits):
result = df_within_limits.mask(
~df_within_limits.apply(pick_univariate_outliers,
sd_count=3.5)).stack()
assert type(result) == pd.core.series.Series
assert result.shape[0] == 0
# Others:
# - It should throw an error if the data frame is not indexed / indexes are not
# named correctly
# - Should error out if there is now baseline visit
# Tests:
# 1. Testing df_baseline should find the two outliers
def test_baseline_finds(df_baseline):
result = df_baseline.mask(~df_baseline.apply(pick_univariate_outliers)).stack()
assert result.shape[0] >= 2
assert result.shape[0] <= 2 + int(np.round(0.002 * df_baseline.shape[0]))
# 2. Testing df_baseline + df_year1 should find two outliers if baseline-only
# is enabled, four if not
def test_year1_ok_if_baseline_only(df_baseline, df_year1):
df = pd.concat([df_baseline, df_year1], axis=0)
result = df.mask(~df.apply(pick_univariate_outliers, baseline_only=True)).stack()
assert (result.reset_index(['visit'])['visit'] != "followup_1y").all()
def test_year1_outliers_if_per_year(df_baseline, df_year1):
df = pd.concat([df_baseline, df_year1], axis=0)
result = df.mask(~df.apply(pick_univariate_outliers, baseline_only=False)).stack()
assert (result.reset_index(['visit'])['visit'] == "followup_1y").any()
| bsd-3-clause |
zhenv5/scikit-learn | sklearn/svm/classes.py | 126 | 40114 | import warnings
import numpy as np
from .base import _fit_liblinear, BaseSVC, BaseLibSVM
from ..base import BaseEstimator, RegressorMixin
from ..linear_model.base import LinearClassifierMixin, SparseCoefMixin, \
LinearModel
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import check_X_y
from ..utils.validation import _num_samples
class LinearSVC(BaseEstimator, LinearClassifierMixin,
_LearntSelectorMixin, SparseCoefMixin):
"""Linear Support Vector Classification.
Similar to SVC with parameter kernel='linear', but implemented in terms of
liblinear rather than libsvm, so it has more flexibility in the choice of
penalties and loss functions and should scale better to large numbers of
samples.
This class supports both dense and sparse input and the multiclass support
is handled according to a one-vs-the-rest scheme.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
loss : string, 'hinge' or 'squared_hinge' (default='squared_hinge')
Specifies the loss function. 'hinge' is the standard SVM loss
(used e.g. by the SVC class) while 'squared_hinge' is the
square of the hinge loss.
penalty : string, 'l1' or 'l2' (default='l2')
Specifies the norm used in the penalization. The 'l2'
penalty is the standard used in SVC. The 'l1' leads to `coef_`
vectors that are sparse.
dual : bool, (default=True)
Select the algorithm to either solve the dual or primal
optimization problem. Prefer dual=False when n_samples > n_features.
tol : float, optional (default=1e-4)
Tolerance for stopping criteria.
multi_class: string, 'ovr' or 'crammer_singer' (default='ovr')
Determines the multi-class strategy if `y` contains more than
two classes.
`ovr` trains n_classes one-vs-rest classifiers, while `crammer_singer`
optimizes a joint objective over all classes.
While `crammer_singer` is interesting from a theoretical perspective
as it is consistent, it is seldom used in practice as it rarely leads
to better accuracy and is more expensive to compute.
If `crammer_singer` is chosen, the options loss, penalty and dual will
be ignored.
fit_intercept : boolean, optional (default=True)
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be already centered).
intercept_scaling : float, optional (default=1)
When self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
class_weight : {dict, 'balanced'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
verbose : int, (default=0)
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in liblinear that, if enabled, may not work
properly in a multithreaded context.
random_state : int seed, RandomState instance, or None (default=None)
The seed of the pseudo random number generator to use when
shuffling the data.
max_iter : int, (default=1000)
The maximum number of iterations to be run.
Attributes
----------
coef_ : array, shape = [n_features] if n_classes == 2
else [n_classes, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is a readonly property derived from `raw_coef_` that
follows the internal memory layout of liblinear.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
Notes
-----
The underlying C implementation uses a random number generator to
select features when fitting the model. It is thus not uncommon
to have slightly different results for the same input data. If
that happens, try with a smaller ``tol`` parameter.
The underlying implementation (liblinear) uses a sparse internal
representation for the data that will incur a memory copy.
Predict output may not match that of standalone liblinear in certain
cases. See :ref:`differences from liblinear <liblinear_differences>`
in the narrative documentation.
**References:**
`LIBLINEAR: A Library for Large Linear Classification
<http://www.csie.ntu.edu.tw/~cjlin/liblinear/>`__
See also
--------
SVC
Implementation of Support Vector Machine classifier using libsvm:
the kernel can be non-linear but its SMO algorithm does not
scale to large number of samples as LinearSVC does.
Furthermore SVC multi-class mode is implemented using one
vs one scheme while LinearSVC uses one vs the rest. It is
possible to implement one vs the rest with SVC by using the
:class:`sklearn.multiclass.OneVsRestClassifier` wrapper.
Finally SVC can fit dense data without memory copy if the input
is C-contiguous. Sparse data will still incur memory copy though.
sklearn.linear_model.SGDClassifier
SGDClassifier can optimize the same cost function as LinearSVC
by adjusting the penalty and loss parameters. In addition it requires
less memory, allows incremental (online) learning, and implements
various loss functions and regularization regimes.
"""
def __init__(self, penalty='l2', loss='squared_hinge', dual=True, tol=1e-4,
C=1.0, multi_class='ovr', fit_intercept=True,
intercept_scaling=1, class_weight=None, verbose=0,
random_state=None, max_iter=1000):
self.dual = dual
self.tol = tol
self.C = C
self.multi_class = multi_class
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.class_weight = class_weight
self.verbose = verbose
self.random_state = random_state
self.max_iter = max_iter
self.penalty = penalty
self.loss = loss
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target vector relative to X
Returns
-------
self : object
Returns self.
"""
# FIXME Remove l1/l2 support in 1.0 -----------------------------------
loss_l = self.loss.lower()
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the loss='%s' will be removed in %s")
# FIXME change loss_l --> self.loss after 0.18
if loss_l in ('l1', 'l2'):
old_loss = self.loss
self.loss = {'l1': 'hinge', 'l2': 'squared_hinge'}.get(loss_l)
warnings.warn(msg % (old_loss, self.loss, old_loss, '1.0'),
DeprecationWarning)
# ---------------------------------------------------------------------
if self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
X, y = check_X_y(X, y, accept_sparse='csr',
dtype=np.float64, order="C")
self.classes_ = np.unique(y)
self.coef_, self.intercept_, self.n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
self.class_weight, self.penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state, self.multi_class,
self.loss)
if self.multi_class == "crammer_singer" and len(self.classes_) == 2:
self.coef_ = (self.coef_[1] - self.coef_[0]).reshape(1, -1)
if self.fit_intercept:
intercept = self.intercept_[1] - self.intercept_[0]
self.intercept_ = np.array([intercept])
return self
class LinearSVR(LinearModel, RegressorMixin):
"""Linear Support Vector Regression.
Similar to SVR with parameter kernel='linear', but implemented in terms of
liblinear rather than libsvm, so it has more flexibility in the choice of
penalties and loss functions and should scale better to large numbers of
samples.
This class supports both dense and sparse input.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term. The penalty is a squared
l2 penalty. The bigger this parameter, the less regularization is used.
loss : string, 'epsilon_insensitive' or 'squared_epsilon_insensitive'
(default='epsilon_insensitive')
Specifies the loss function. 'l1' is the epsilon-insensitive loss
(standard SVR) while 'l2' is the squared epsilon-insensitive loss.
epsilon : float, optional (default=0.1)
Epsilon parameter in the epsilon-insensitive loss function. Note
that the value of this parameter depends on the scale of the target
variable y. If unsure, set epsilon=0.
dual : bool, (default=True)
Select the algorithm to either solve the dual or primal
optimization problem. Prefer dual=False when n_samples > n_features.
tol : float, optional (default=1e-4)
Tolerance for stopping criteria.
fit_intercept : boolean, optional (default=True)
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be already centered).
intercept_scaling : float, optional (default=1)
When self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
verbose : int, (default=0)
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in liblinear that, if enabled, may not work
properly in a multithreaded context.
random_state : int seed, RandomState instance, or None (default=None)
The seed of the pseudo random number generator to use when
shuffling the data.
max_iter : int, (default=1000)
The maximum number of iterations to be run.
Attributes
----------
coef_ : array, shape = [n_features] if n_classes == 2
else [n_classes, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is a readonly property derived from `raw_coef_` that
follows the internal memory layout of liblinear.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
See also
--------
LinearSVC
Implementation of Support Vector Machine classifier using the
same library as this class (liblinear).
SVR
Implementation of Support Vector Machine regression using libsvm:
the kernel can be non-linear but its SMO algorithm does not
scale to large number of samples as LinearSVC does.
sklearn.linear_model.SGDRegressor
SGDRegressor can optimize the same cost function as LinearSVR
by adjusting the penalty and loss parameters. In addition it requires
less memory, allows incremental (online) learning, and implements
various loss functions and regularization regimes.
"""
def __init__(self, epsilon=0.0, tol=1e-4, C=1.0,
loss='epsilon_insensitive', fit_intercept=True,
intercept_scaling=1., dual=True, verbose=0,
random_state=None, max_iter=1000):
self.tol = tol
self.C = C
self.epsilon = epsilon
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.verbose = verbose
self.random_state = random_state
self.max_iter = max_iter
self.dual = dual
self.loss = loss
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target vector relative to X
Returns
-------
self : object
Returns self.
"""
# FIXME Remove l1/l2 support in 1.0 -----------------------------------
loss_l = self.loss.lower()
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the loss='%s' will be removed in %s")
# FIXME change loss_l --> self.loss after 0.18
if loss_l in ('l1', 'l2'):
old_loss = self.loss
self.loss = {'l1': 'epsilon_insensitive',
'l2': 'squared_epsilon_insensitive'
}.get(loss_l)
warnings.warn(msg % (old_loss, self.loss, old_loss, '1.0'),
DeprecationWarning)
# ---------------------------------------------------------------------
if self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
X, y = check_X_y(X, y, accept_sparse='csr',
dtype=np.float64, order="C")
penalty = 'l2' # SVR only accepts l2 penalty
self.coef_, self.intercept_, self.n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
None, penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state, loss=self.loss,
epsilon=self.epsilon)
self.coef_ = self.coef_.ravel()
return self
class SVC(BaseSVC):
"""C-Support Vector Classification.
The implementation is based on libsvm. The fit time complexity
is more than quadratic with the number of samples which makes it hard
to scale to dataset with more than a couple of 10000 samples.
The multiclass support is handled according to a one-vs-one scheme.
For details on the precise mathematical formulation of the provided
kernel functions and how `gamma`, `coef0` and `degree` affect each
other, see the corresponding section in the narrative documentation:
:ref:`svm_kernels`.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to pre-compute the kernel matrix from data matrices; that matrix
should be an array of shape ``(n_samples, n_samples)``.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
probability : boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling `fit`, and will slow down that method.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
class_weight : {dict, 'balanced'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
decision_function_shape : 'ovo', 'ovr' or None, default=None
Whether to return a one-vs-rest ('ovr') ecision function of shape
(n_samples, n_classes) as all other classifiers, or the original
one-vs-one ('ovo') decision function of libsvm which has shape
(n_samples, n_classes * (n_classes - 1) / 2).
The default of None will currently behave as 'ovo' for backward
compatibility and raise a deprecation warning, but will change 'ovr'
in 0.18.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [n_SV, n_features]
Support vectors.
n_support_ : array-like, dtype=int32, shape = [n_class]
Number of support vectors for each class.
dual_coef_ : array, shape = [n_class-1, n_SV]
Coefficients of the support vector in the decision function.
For multiclass, coefficient for all 1-vs-1 classifiers.
The layout of the coefficients in the multiclass case is somewhat
non-trivial. See the section about multi-class classification in the
SVM section of the User Guide for details.
coef_ : array, shape = [n_class-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is a readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> from sklearn.svm import SVC
>>> clf = SVC()
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0,
decision_function_shape=None, degree=3, gamma='auto', kernel='rbf',
max_iter=-1, probability=False, random_state=None, shrinking=True,
tol=0.001, verbose=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
SVR
Support Vector Machine for Regression implemented using libsvm.
LinearSVC
Scalable Linear Support Vector Machine for classification
implemented using liblinear. Check the See also section of
LinearSVC for more comparison element.
"""
def __init__(self, C=1.0, kernel='rbf', degree=3, gamma='auto',
coef0=0.0, shrinking=True, probability=False,
tol=1e-3, cache_size=200, class_weight=None,
verbose=False, max_iter=-1, decision_function_shape=None,
random_state=None):
super(SVC, self).__init__(
impl='c_svc', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=C, nu=0., shrinking=shrinking,
probability=probability, cache_size=cache_size,
class_weight=class_weight, verbose=verbose, max_iter=max_iter,
decision_function_shape=decision_function_shape,
random_state=random_state)
class NuSVC(BaseSVC):
"""Nu-Support Vector Classification.
Similar to SVC but uses a parameter to control the number of support
vectors.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
nu : float, optional (default=0.5)
An upper bound on the fraction of training errors and a lower
bound of the fraction of support vectors. Should be in the
interval (0, 1].
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
probability : boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling `fit`, and will slow down that method.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
class_weight : {dict, 'auto'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one. The 'auto' mode uses the values of y to
automatically adjust weights inversely proportional to
class frequencies.
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
decision_function_shape : 'ovo', 'ovr' or None, default=None
Whether to return a one-vs-rest ('ovr') ecision function of shape
(n_samples, n_classes) as all other classifiers, or the original
one-vs-one ('ovo') decision function of libsvm which has shape
(n_samples, n_classes * (n_classes - 1) / 2).
The default of None will currently behave as 'ovo' for backward
compatibility and raise a deprecation warning, but will change 'ovr'
in 0.18.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [n_SV, n_features]
Support vectors.
n_support_ : array-like, dtype=int32, shape = [n_class]
Number of support vectors for each class.
dual_coef_ : array, shape = [n_class-1, n_SV]
Coefficients of the support vector in the decision function.
For multiclass, coefficient for all 1-vs-1 classifiers.
The layout of the coefficients in the multiclass case is somewhat
non-trivial. See the section about multi-class classification in
the SVM section of the User Guide for details.
coef_ : array, shape = [n_class-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> from sklearn.svm import NuSVC
>>> clf = NuSVC()
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
NuSVC(cache_size=200, class_weight=None, coef0=0.0,
decision_function_shape=None, degree=3, gamma='auto', kernel='rbf',
max_iter=-1, nu=0.5, probability=False, random_state=None,
shrinking=True, tol=0.001, verbose=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
SVC
Support Vector Machine for classification using libsvm.
LinearSVC
Scalable linear Support Vector Machine for classification using
liblinear.
"""
def __init__(self, nu=0.5, kernel='rbf', degree=3, gamma='auto',
coef0=0.0, shrinking=True, probability=False,
tol=1e-3, cache_size=200, class_weight=None, verbose=False,
max_iter=-1, decision_function_shape=None, random_state=None):
super(NuSVC, self).__init__(
impl='nu_svc', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=0., nu=nu, shrinking=shrinking,
probability=probability, cache_size=cache_size,
class_weight=class_weight, verbose=verbose, max_iter=max_iter,
decision_function_shape=decision_function_shape,
random_state=random_state)
class SVR(BaseLibSVM, RegressorMixin):
"""Epsilon-Support Vector Regression.
The free parameters in the model are C and epsilon.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
epsilon : float, optional (default=0.1)
Epsilon in the epsilon-SVR model. It specifies the epsilon-tube
within which no penalty is associated in the training loss function
with points predicted within a distance epsilon from the actual
value.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [1, n_SV]
Coefficients of the support vector in the decision function.
coef_ : array, shape = [1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [1]
Constants in decision function.
Examples
--------
>>> from sklearn.svm import SVR
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = SVR(C=1.0, epsilon=0.2)
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
SVR(C=1.0, cache_size=200, coef0=0.0, degree=3, epsilon=0.2, gamma='auto',
kernel='rbf', max_iter=-1, shrinking=True, tol=0.001, verbose=False)
See also
--------
NuSVR
Support Vector Machine for regression implemented using libsvm
using a parameter to control the number of support vectors.
LinearSVR
Scalable Linear Support Vector Machine for regression
implemented using liblinear.
"""
def __init__(self, kernel='rbf', degree=3, gamma='auto', coef0=0.0,
tol=1e-3, C=1.0, epsilon=0.1, shrinking=True,
cache_size=200, verbose=False, max_iter=-1):
super(SVR, self).__init__(
'epsilon_svr', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=C, nu=0., epsilon=epsilon, verbose=verbose,
shrinking=shrinking, probability=False, cache_size=cache_size,
class_weight=None, max_iter=max_iter, random_state=None)
class NuSVR(BaseLibSVM, RegressorMixin):
"""Nu Support Vector Regression.
Similar to NuSVC, for regression, uses a parameter nu to control
the number of support vectors. However, unlike NuSVC, where nu
replaces C, here nu replaces the parameter epsilon of epsilon-SVR.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
nu : float, optional
An upper bound on the fraction of training errors and a lower bound of
the fraction of support vectors. Should be in the interval (0, 1]. By
default 0.5 will be taken.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [1, n_SV]
Coefficients of the support vector in the decision function.
coef_ : array, shape = [1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [1]
Constants in decision function.
Examples
--------
>>> from sklearn.svm import NuSVR
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = NuSVR(C=1.0, nu=0.1)
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
NuSVR(C=1.0, cache_size=200, coef0=0.0, degree=3, gamma='auto',
kernel='rbf', max_iter=-1, nu=0.1, shrinking=True, tol=0.001,
verbose=False)
See also
--------
NuSVC
Support Vector Machine for classification implemented with libsvm
with a parameter to control the number of support vectors.
SVR
epsilon Support Vector Machine for regression implemented with libsvm.
"""
def __init__(self, nu=0.5, C=1.0, kernel='rbf', degree=3,
gamma='auto', coef0=0.0, shrinking=True, tol=1e-3,
cache_size=200, verbose=False, max_iter=-1):
super(NuSVR, self).__init__(
'nu_svr', kernel=kernel, degree=degree, gamma=gamma, coef0=coef0,
tol=tol, C=C, nu=nu, epsilon=0., shrinking=shrinking,
probability=False, cache_size=cache_size, class_weight=None,
verbose=verbose, max_iter=max_iter, random_state=None)
class OneClassSVM(BaseLibSVM):
"""Unsupervised Outlier Detection.
Estimate the support of a high-dimensional distribution.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_outlier_detection>`.
Parameters
----------
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
nu : float, optional
An upper bound on the fraction of training
errors and a lower bound of the fraction of support
vectors. Should be in the interval (0, 1]. By default 0.5
will be taken.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
tol : float, optional
Tolerance for stopping criterion.
shrinking : boolean, optional
Whether to use the shrinking heuristic.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [n_classes-1, n_SV]
Coefficients of the support vectors in the decision function.
coef_ : array, shape = [n_classes-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`
intercept_ : array, shape = [n_classes-1]
Constants in decision function.
"""
def __init__(self, kernel='rbf', degree=3, gamma='auto', coef0=0.0,
tol=1e-3, nu=0.5, shrinking=True, cache_size=200,
verbose=False, max_iter=-1, random_state=None):
super(OneClassSVM, self).__init__(
'one_class', kernel, degree, gamma, coef0, tol, 0., nu, 0.,
shrinking, False, cache_size, None, verbose, max_iter,
random_state)
def fit(self, X, y=None, sample_weight=None, **params):
"""
Detects the soft boundary of the set of samples X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Set of samples, where n_samples is the number of samples and
n_features is the number of features.
sample_weight : array-like, shape (n_samples,)
Per-sample weights. Rescale C per sample. Higher weights
force the classifier to put more emphasis on these points.
Returns
-------
self : object
Returns self.
Notes
-----
If X is not a C-ordered contiguous array it is copied.
"""
super(OneClassSVM, self).fit(X, np.ones(_num_samples(X)), sample_weight=sample_weight,
**params)
return self
def decision_function(self, X):
"""Distance of the samples X to the separating hyperplane.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
X : array-like, shape (n_samples,)
Returns the decision function of the samples.
"""
dec = self._decision_function(X)
return dec
| bsd-3-clause |
wiheto/teneto | teneto/plot/graphlet_stack_plot.py | 1 | 11452 | """Plots graphlet stack plot"""
import matplotlib.pyplot as plt
import numpy as np
from scipy import ndimage
from ..utils import contact2graphlet, check_input
plt.rcParams['axes.facecolor'] = 'white'
def graphlet_stack_plot(netin, ax, q=10, cmap='Reds', gridcolor='k',
borderwidth=2, bordercolor=None, Fs=1, timeunit='', t0=1,
sharpen='yes', vminmax='minmax'):
r"""
Returns matplotlib axis handle for graphlet_stack_plot. This is a row of transformed connectivity matrices to look like a 3D stack.
Parameters
----------
netin : array, dict
network input (graphlet or contact)
ax : matplotlib ax handles.
q : int
Quality. Increaseing this will lead to smoother axis but take up more memory.
cmap : str
Colormap (matplotlib) of graphlets
Fs : int
Sampling rate. Same as contact-representation (if netin is contact,
and input is unset, contact dictionary is used)
timeunit : str
Unit of time for xlabel. Same as contact-representation (if netin is contact,
and input is unset, contact dictionary is used)
t0 : int
What should the first time point be called. Should be integer. Default 1.
gridcolor : str
The color of the grid section of the graphlets. Set to 'none' if not wanted.
borderwidth : int
Scales the size of border.
bordorcolor :
color of the border (at the moment it must be in RGB values between 0 and 1
-> this will be changed sometime in the future). Default: black.
vminmax : str
'maxabs', 'minmax' (default), or list/array with length of 2.
Specifies the min and max colormap value of graphlets.
Maxabs entails [-max(abs(G)),max(abs(G))], minmax entails [min(G), max(G)].
Returns
--------
ax : matplotlib ax handle
Note
------
This function can require a lot of RAM with larger networks.
Note
------
At the momenet bordercolor cannot be set to zero. To remove border, set bordorwidth=1 and bordercolor=[1,1,1] for temporay workaround.
Examples
-------
Create a network with some metadata
>>> import numpy as np
>>> import teneto
>>> import matplotlib.pyplot as plt
>>> np.random.seed(2017) # For reproduceability
>>> N = 5 # Number of nodes
>>> T = 10 # Number of timepoints
>>> # Probability of edge activation
>>> birth_rate = 0.2
>>> death_rate = .9
>>> # Add node names into the network and say time units are years, go 1 year per
graphlet and startyear is 2007
>>> cfg={}
>>> cfg['Fs'] = 1
>>> cfg['timeunit'] = 'Years'
>>> cfg['t0'] = 2007 #First year in network
>>> #Generate network
>>> C = teneto.generatenetwork.rand_binomial([N,T],[birth_rate, death_rate],'contact','bu',netinfo=cfg)
Now this network can be plotted
>>> fig,ax = plt.subplots(figsize=(10,3))
>>> ax = teneto.plot.graphlet_stack_plot(C,ax,q=10,cmap='Greys')
>>> fig.show()
.. plot::
import numpy as np
import teneto
import matplotlib.pyplot as plt
np.random.seed(2017) # For reproduceability
N = 5 # Number of nodes
T = 10 # Number of timepoints
# Probability of edge activation
birth_rate = 0.2
death_rate = .9
# Add node names into the network and say time units are years, go 1 year per graphlet and startyear is 2007
cfg={}
cfg['Fs'] = 1
cfg['timeunit'] = 'Years'
cfg['t0'] = 2007 #First year in network
#Generate network
C = teneto.generatenetwork.rand_binomial([N,T],[birth_rate, death_rate],'contact','bu',netinfo=cfg)
fig,ax = plt.subplots(figsize=(10,3))
cmap = 'Greys'
ax = teneto.plot.graphlet_stack_plot(C,ax,q=10,cmap=cmap)
fig.show()
"""
# Get input type (C, G, TO)
inputType = check_input(netin)
# Convert TO to C representation
if inputType == 'TO':
netin = netin.contact
inputType = 'C'
# Convert C representation to G
if inputType == 'C':
if timeunit == '':
timeunit = netin['timeunit']
if t0 == 1:
t0 = netin['t0']
if Fs == 1:
Fs = netin['Fs']
netin = contact2graphlet(netin)
if timeunit != '':
timeunit = ' (' + timeunit + ')'
if bordercolor is None:
bordercolor = [0, 0, 0]
if not isinstance(borderwidth, int):
borderwidth = int(borderwidth)
print('Warning: borderwidth should be an integer. Converting to integer.')
# x and y ranges for each of the graphlet plots
v = np.arange(0, netin.shape[0] + 1)
vr = np.arange(netin.shape[0], -1, -1)
# Preallocatie matrix
if vminmax == '' or vminmax == 'absmax' or vminmax == 'maxabs':
vminmax = [-np.nanmax(np.abs(netin)), np.nanmax(np.abs(netin))]
elif vminmax == 'minmax':
vminmax = [np.nanmin(netin), np.nanmax(netin)]
if borderwidth == 0:
addon = 1
lw = 0
else:
addon = 0
lw = q * 2
qb = q * borderwidth + addon
figmat = np.zeros([80 * q + (qb * 2), int(((netin.shape[-1]) *
(80 * q) + (qb * 2)) - ((netin.shape[-1] - 1) * q * 80) / 2), 4])
for n in range(0, netin.shape[-1]):
# Create graphlet
figtmp, axtmp = plt.subplots(
1, facecolor='white', figsize=(q, q), dpi=80)
axtmp.pcolormesh(v, vr, netin[:, :, n], cmap=cmap, edgecolor=gridcolor,
linewidth=lw, vmin=vminmax[0], vmax=vminmax[1])
axtmp.set_xticklabels('')
axtmp.set_yticklabels('')
axtmp.set_xticks([])
axtmp.set_yticks([])
x0, x1 = axtmp.get_xlim()
y0, y1 = axtmp.get_ylim()
axtmp.set_aspect((x1 - x0) / (y1 - y0))
axtmp.spines['left'].set_visible(False)
axtmp.spines['right'].set_visible(False)
axtmp.spines['top'].set_visible(False)
axtmp.spines['bottom'].set_visible(False)
plt.subplots_adjust(left=0, bottom=0, right=1,
top=1, wspace=0, hspace=0)
# Convert graphlet to RGB values
figtmp.canvas.draw()
figmattmp = np.fromstring(
figtmp.canvas.tostring_rgb(), dtype=np.uint8, sep='')
figmattmp = figmattmp.reshape(
figtmp.canvas.get_width_height()[::-1] + (3,))
# Close figure for memory
plt.close(figtmp)
# Manually add a border
figmattmp_withborder = np.zeros(
[figmattmp.shape[0] + (qb * 2), figmattmp.shape[1] + (qb * 2), 3]) + (np.array(bordercolor) * 255)
figmattmp_withborder[qb:-qb, qb:-qb, :] = figmattmp
# Make corners rounded. First make a circle and then take the relevant quarter for each corner.
y, x = np.ogrid[-qb: qb + 1, -qb: qb + 1]
mask = x * x + y * y <= qb * qb
# A little clumsy. Should improve
Mq1 = np.vstack([[mask[:qb, :qb] == 0], [mask[:qb, :qb] == 0], [
mask[:qb, :qb] == 0]]).transpose([1, 2, 0])
figmattmp_withborder[:qb, :qb, :][Mq1] = 255
Mq1 = np.vstack([[mask[:qb, -qb:] == 0], [mask[:qb, -qb:]
== 0], [mask[:qb, -qb:] == 0]]).transpose([1, 2, 0])
figmattmp_withborder[:qb, -qb:, :][Mq1] = 255
Mq1 = np.vstack([[mask[-qb:, :qb] == 0], [mask[-qb:, :qb]
== 0], [mask[-qb:, :qb] == 0]]).transpose([1, 2, 0])
figmattmp_withborder[-qb:, :qb, :][Mq1] = 255
Mq1 = np.vstack([[mask[-qb:, -qb:] == 0], [mask[-qb:, -qb:]
== 0], [mask[-qb:, -qb:] == 0]]).transpose([1, 2, 0])
figmattmp_withborder[-qb:, -qb:, :][Mq1] = 255
#scale and sheer
scale = np.matrix([[1.5, 0, 0], [0, 3, 0], [0, 0, 1]])
sheer = np.matrix([[1, np.tan(np.pi / 12), 0], [0, 1, 0], [0, 0, 1]])
# apply affine transformation
figmattmp = ndimage.affine_transform(
figmattmp_withborder, sheer * (scale), offset=[-35 * q, 0, 0], cval=255)
# At the moment the alpha part does not work if the background colour is anything but white.
# Also used for detecting where the graphlets are in the image.
trans = np.where(np.sum(figmattmp, axis=2) == 255 * 3)
alphamat = np.ones([figmattmp.shape[0], figmattmp.shape[0]])
alphamat[trans[0], trans[1]] = 0
figmattmp = np.dstack([figmattmp, alphamat])
# Add graphlet to matrix
if n == 0:
figmat[:, n * (80 * q):((n + 1) * (80 * q) + (qb * 2))] = figmattmp
else:
figmat[:, n * (80 * q) - int((n * q * 80) / 2):int(((n + 1)
* (80 * q) + (qb * 2)) - (n * q * 80) / 2)] = figmattmp
# Fix colours - due to imshows weirdness when taking nxnx3
figmat[:, :, 0:3] = figmat[:, :, 0:3] / 255
# Cut end of matrix off that isn't need
figmat = figmat[:, :-int((q / 2) * 80), :]
fid = np.where(figmat[:, :, -1] > 0)
fargmin = np.argmin(fid[0])
ymax = np.max(fid[0])
yright = np.max(np.where(figmat[:, fid[1][fargmin], -1] > 0))
xtickloc = np.where(figmat[ymax, :, -1] > 0)[0]
# In case there are multiple cases of xtickloc in same graphlet (i.e. they all have the same lowest value)
xtickloc = np.delete(xtickloc, np.where(np.diff(xtickloc) == 1)[0] + 1)
fid = np.where(figmat[:, :, -1] > 0)
ymin = np.min(fid[0])
topfig = np.where(figmat[ymin, :, -1] > 0)[0]
topfig = topfig[0:len(topfig):int(len(topfig) / netin.shape[-1])]
# Make squares of non transparency around each figure (this fixes transparency issues when white is in the colormap)
# for n in range(0,len(topfig)):
# fid=np.where(figmat[ymin:ymax,xtickloc[n]:topfig[n],-1]==0)
# figmat[ymin:ymax,xtickloc[n]:topfig[n],:3][fid[0],fid[1]]=1
# figmat[ymin+q:ymax-q,xtickloc[n]+q:topfig[n]-q,-1]=1
# Create figure
# Sharped edges of figure with median filter
if sharpen == 'yes':
figmat[:, :, :-1] = ndimage.median_filter(figmat[:, :, :-1], 3)
ax.imshow(figmat[:, :, :-1], zorder=1)
ax.spines['left'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.set_xticklabels('')
ax.set_yticklabels('')
ax.set_xticks([])
ax.set_yticks([])
L = int((((netin.shape[-1] - 3) + 1) * (80 * q) +
(qb * 2)) - ((netin.shape[-1] - 3) * q * 80) / 2 - q)
_ = [ax.plot(range(topfig[i], xt), np.zeros(len(range(topfig[i], xt))) + yright,
color='k', linestyle=':', zorder=2) for i, xt in enumerate(xtickloc[1:])]
ax.plot(range(0, L), np.zeros(L) + ymax,
color='k', linestyle=':', zorder=2)
_ = [ax.plot(np.zeros(q * 10) + xt, np.arange(ymax, ymax + q * 10),
color='k', linestyle=':', zorder=2) for xt in xtickloc]
_ = [ax.text(xt, ymax + q * 20, str(round((i + t0) * Fs, 5)),
horizontalalignment='center',) for i, xt in enumerate(xtickloc)]
ylim = ax.axes.get_ylim()
xlim = ax.axes.get_xlim()
ax.set_ylim(ylim[0] + q * 15, 0)
ax.set_xlim(xlim[0] - q * 20, xlim[1])
ax.set_xlabel('Time' + timeunit)
return ax
| gpl-3.0 |
goodalljl/hydroinformatics_class | Class13_InClassDemo_Clean.py | 1 | 1844 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Simple demo of using PyMySQL and matplotlib to create a series data plot
from data stored in an ODM database.
"""
import pymysql
import matplotlib.pyplot as plt
from matplotlib import dates
from matplotlib import rc
import datetime
#inputs
SiteID = '2'
VariableID = '36'
StartLocalDateTime = "'2008-01-01'"
EndLocalDateTime = "'2008-12-31'"
#connect to database
conn = pymysql.connect(host='localhost', port=3306, user='root', \
passwd='', db='LBRODM_small')
#extract time series from database
sql_statement = 'SELECT LocalDateTime, DataValue FROM DataValues \
WHERE SiteID = ' + SiteID + ' AND VariableID = ' + VariableID + ' AND \
QualityControlLevelID = 1 AND LocalDateTime >= ' + StartLocalDateTime \
+ ' AND LocalDateTime <= ' + EndLocalDateTime + \
' ORDER BY LocalDateTime'
cursor = conn.cursor()
cursor.execute(sql_statement)
rows = cursor.fetchall()
localDateTimes, dataValues = zip(*rows)
#create plot
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(localDateTimes, dataValues, color='grey', linestyle='solid', \
markersize=0)
#set plot properties
ax.set_ylabel("Temperature ($^\circ$C)")
ax.set_xlabel("Date/Time")
ax.xaxis.set_minor_locator(dates.MonthLocator())
ax.xaxis.set_minor_formatter(dates.DateFormatter('%b'))
ax.xaxis.set_major_locator(dates.YearLocator())
ax.xaxis.set_major_formatter(dates.DateFormatter('\n%Y'))
ax.grid(True)
ax.set_title('Water temperature at Little Bear River \n at McMurdy Hollow \
near Paradise, Utah') #hard coded for now. Should update when SiteID is updated.
fig.tight_layout()
#set font type and size for plot
font = {'family' : 'sans-serif', #changed from 'normal' to remove warning
'weight' : 'normal',
'size' : 12}
rc('font', **font)
fig.savefig('Class13_InClassDemo_Clean.png')
| mit |
GehenHe/Recognize-Face-on-Android | tensorflow/contrib/factorization/python/ops/gmm.py | 11 | 12252 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of Gaussian mixture model (GMM) clustering.
This goes on top of skflow API.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
from tensorflow.contrib import framework
from tensorflow.contrib.factorization.python.ops import gmm_ops
from tensorflow.contrib.framework.python.framework import checkpoint_utils
from tensorflow.contrib.framework.python.ops import variables
from tensorflow.contrib.learn.python.learn import graph_actions
from tensorflow.contrib.learn.python.learn import monitors as monitor_lib
from tensorflow.contrib.learn.python.learn.estimators import estimator as estimator_lib
from tensorflow.contrib.learn.python.learn.estimators import model_fn as model_fn_lib
from tensorflow.contrib.learn.python.learn.estimators._sklearn import TransformerMixin
from tensorflow.contrib.learn.python.learn.learn_io import data_feeder
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed as random_seed_lib
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops.control_flow_ops import with_dependencies
from tensorflow.python.platform import tf_logging as logging
def _streaming_sum(scalar_tensor):
"""Create a sum metric and update op."""
sum_metric = framework.local_variable(constant_op.constant(0.0))
sum_update = sum_metric.assign_add(scalar_tensor)
return sum_metric, sum_update
class GMM(estimator_lib.Estimator, TransformerMixin):
"""GMM clustering."""
SCORES = 'scores'
ASSIGNMENTS = 'assignments'
ALL_SCORES = 'all_scores'
def __init__(self,
num_clusters,
model_dir=None,
random_seed=0,
params='wmc',
initial_clusters='random',
covariance_type='full',
batch_size=128,
steps=10,
continue_training=False,
config=None,
verbose=1):
"""Creates a model for running GMM training and inference.
Args:
num_clusters: number of clusters to train.
model_dir: the directory to save the model results and log files.
random_seed: Python integer. Seed for PRNG used to initialize centers.
params: Controls which parameters are updated in the training process.
Can contain any combination of "w" for weights, "m" for means,
and "c" for covars.
initial_clusters: specifies how to initialize the clusters for training.
See gmm_ops.gmm for the possible values.
covariance_type: one of "full", "diag".
batch_size: See Estimator
steps: See Estimator
continue_training: See Estimator
config: See Estimator
verbose: See Estimator
"""
super(GMM, self).__init__(model_dir=model_dir, config=config)
self.batch_size = batch_size
self.steps = steps
self.continue_training = continue_training
self.verbose = verbose
self._num_clusters = num_clusters
self._params = params
self._training_initial_clusters = initial_clusters
self._covariance_type = covariance_type
self._training_graph = None
self._random_seed = random_seed
def fit(self, x, y=None, monitors=None, logdir=None, steps=None):
"""Trains a GMM clustering on x.
Note: See Estimator for logic for continuous training and graph
construction across multiple calls to fit.
Args:
x: training input matrix of shape [n_samples, n_features].
y: labels. Should be None.
monitors: List of `Monitor` objects to print training progress and
invoke early stopping.
logdir: the directory to save the log file that can be used for optional
visualization.
steps: number of training steps. If not None, overrides the value passed
in constructor.
Returns:
Returns self.
"""
if logdir is not None:
self._model_dir = logdir
self._data_feeder = data_feeder.setup_train_data_feeder(x, None,
self._num_clusters,
self.batch_size)
_legacy_train_model( # pylint: disable=protected-access
self,
input_fn=self._data_feeder.input_builder,
feed_fn=self._data_feeder.get_feed_dict_fn(),
steps=steps or self.steps,
monitors=monitors,
init_feed_fn=self._data_feeder.get_feed_dict_fn())
return self
def predict(self, x, batch_size=None):
"""Predict cluster id for each element in x.
Args:
x: 2-D matrix or iterator.
batch_size: size to use for batching up x for querying the model.
Returns:
Array with same number of rows as x, containing cluster ids.
"""
return np.array([
prediction[GMM.ASSIGNMENTS]
for prediction in super(GMM, self).predict(
x=x, batch_size=batch_size, as_iterable=True)
])
def score(self, x, batch_size=None):
"""Predict total sum of distances to nearest clusters.
Args:
x: 2-D matrix or iterator.
batch_size: size to use for batching up x for querying the model.
Returns:
Total score.
"""
return np.sum(self.evaluate(x=x, batch_size=batch_size)[GMM.SCORES])
def transform(self, x, batch_size=None):
"""Transforms each element in x to distances to cluster centers.
Args:
x: 2-D matrix or iterator.
batch_size: size to use for batching up x for querying the model.
Returns:
Array with same number of rows as x, and num_clusters columns, containing
distances to the cluster centers.
"""
return np.array([
prediction[GMM.ALL_SCORES]
for prediction in super(GMM, self).predict(
x=x, batch_size=batch_size, as_iterable=True)
])
def clusters(self):
"""Returns cluster centers."""
clusters = checkpoint_utils.load_variable(
self.model_dir, gmm_ops.GmmAlgorithm.CLUSTERS_VARIABLE)
return np.squeeze(clusters, 1)
def covariances(self):
"""Returns the covariances."""
return checkpoint_utils.load_variable(
self.model_dir, gmm_ops.GmmAlgorithm.CLUSTERS_COVS_VARIABLE)
def _parse_tensor_or_dict(self, features):
if isinstance(features, dict):
return array_ops.concat([features[k] for k in sorted(features.keys())], 1)
return features
def _get_train_ops(self, features, _):
(_, _, losses, training_op) = gmm_ops.gmm(
self._parse_tensor_or_dict(features), self._training_initial_clusters,
self._num_clusters, self._random_seed, self._covariance_type,
self._params)
incr_step = state_ops.assign_add(variables.get_global_step(), 1)
loss = math_ops.reduce_sum(losses)
training_op = with_dependencies([training_op, incr_step], loss)
return training_op, loss
def _get_predict_ops(self, features):
(all_scores, model_predictions, _, _) = gmm_ops.gmm(
self._parse_tensor_or_dict(features), self._training_initial_clusters,
self._num_clusters, self._random_seed, self._covariance_type,
self._params)
return {
GMM.ALL_SCORES: all_scores[0],
GMM.ASSIGNMENTS: model_predictions[0][0],
}
def _get_eval_ops(self, features, _, unused_metrics):
(_,
_,
losses,
_) = gmm_ops.gmm(
self._parse_tensor_or_dict(features),
self._training_initial_clusters,
self._num_clusters,
self._random_seed,
self._covariance_type,
self._params)
return {GMM.SCORES: _streaming_sum(math_ops.reduce_sum(losses))}
# TODO(xavigonzalvo): delete this after implementing model-fn based Estimator.
def _legacy_train_model(estimator,
input_fn,
steps,
feed_fn=None,
init_op=None,
init_feed_fn=None,
init_fn=None,
device_fn=None,
monitors=None,
log_every_steps=100,
fail_on_nan_loss=True,
max_steps=None):
"""Legacy train function of Estimator."""
if hasattr(estimator.config, 'execution_mode'):
if estimator.config.execution_mode not in ('all', 'train'):
return
# Stagger startup of worker sessions based on task id.
sleep_secs = min(
estimator.config.training_worker_max_startup_secs,
estimator.config.task_id *
estimator.config.training_worker_session_startup_stagger_secs)
if sleep_secs:
logging.info('Waiting %d secs before starting task %d.', sleep_secs,
estimator.config.task_id)
time.sleep(sleep_secs)
# Device allocation
device_fn = device_fn or estimator._device_fn # pylint: disable=protected-access
with ops.Graph().as_default() as g, g.device(device_fn):
random_seed_lib.set_random_seed(estimator.config.tf_random_seed)
global_step = framework.create_global_step(g)
features, labels = input_fn()
estimator._check_inputs(features, labels) # pylint: disable=protected-access
# The default return type of _get_train_ops is ModelFnOps. But there are
# some subclasses of tf.contrib.learn.Estimator which override this
# method and use the legacy signature, namely _get_train_ops returns a
# (train_op, loss) tuple. The following else-statement code covers these
# cases, but will soon be deleted after the subclasses are updated.
# TODO(b/32664904): Update subclasses and delete the else-statement.
train_ops = estimator._get_train_ops(features, labels) # pylint: disable=protected-access
if isinstance(train_ops, model_fn_lib.ModelFnOps): # Default signature
train_op = train_ops.train_op
loss_op = train_ops.loss
if estimator.config.is_chief:
hooks = train_ops.training_chief_hooks + train_ops.training_hooks
else:
hooks = train_ops.training_hooks
else: # Legacy signature
if len(train_ops) != 2:
raise ValueError('Expected a tuple of train_op and loss, got {}'.format(
train_ops))
train_op = train_ops[0]
loss_op = train_ops[1]
hooks = []
hooks += monitor_lib.replace_monitors_with_hooks(monitors, estimator)
ops.add_to_collection(ops.GraphKeys.LOSSES, loss_op)
return graph_actions._monitored_train( # pylint: disable=protected-access
graph=g,
output_dir=estimator.model_dir,
train_op=train_op,
loss_op=loss_op,
global_step_tensor=global_step,
init_op=init_op,
init_feed_dict=init_feed_fn() if init_feed_fn is not None else None,
init_fn=init_fn,
log_every_steps=log_every_steps,
supervisor_is_chief=estimator.config.is_chief,
supervisor_master=estimator.config.master,
supervisor_save_model_secs=estimator.config.save_checkpoints_secs,
supervisor_save_model_steps=estimator.config.save_checkpoints_steps,
supervisor_save_summaries_steps=estimator.config.save_summary_steps,
keep_checkpoint_max=estimator.config.keep_checkpoint_max,
keep_checkpoint_every_n_hours=(
estimator.config.keep_checkpoint_every_n_hours),
feed_fn=feed_fn,
steps=steps,
fail_on_nan_loss=fail_on_nan_loss,
hooks=hooks,
max_steps=max_steps)
| apache-2.0 |
SEMAFORInformatik/femagtools | examples/calculation/pm_sym_fast_shortcircuit.py | 1 | 2625 | import femagtools
import femagtools.plot
import femagtools.machine
import logging
import matplotlib.pyplot as plt
import os
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(message)s')
machine = dict(
name="PM 270 L8",
lfe=0.08356,
poles=8,
outer_diam=0.26924,
bore_diam=0.16192,
inner_diam=0.092,
airgap=0.00075,
stator=dict(
num_slots=48,
nodedist=2.5,
mcvkey_yoke='M330-50A',
statorRotor3=dict(
slot_height=0.0335,
slot_h1=0.001,
slot_h2=0.0,
slot_r1=0.0001,
slot_r2=0.00282,
wedge_width1=0.00295,
wedge_width2=0,
middle_line=0,
tooth_width=0.0,
slot_top_sh=0.0,
slot_width=0.00193)
),
magnet=dict(
mcvkey_yoke='M330-50A',
magnetIronV=dict(
magn_width=18e-3,
magn_height=6.48e-3,
magn_angle=145,
magn_num=1,
gap_ma_iron=0.2e-3,
air_triangle=1e-3,
iron_height=2.61e-3,
iron_hs=0.1e-3,
shaft_rad=55.32e-3,
iron_shape=80.2e-3,
air_space_h=5.5e-3,
iron_bfe=3e-3,
magn_di_ra=6e-3,
corner_r=0,
air_sp_ori=1,
magn_ori=1,
condshaft_r=55.32e-3)
),
windings=dict(
num_phases=3,
num_wires=9,
coil_span=6.0,
num_layers=1)
)
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(message)s')
workdir = os.path.join(
os.path.expanduser('~'), 'femag')
try:
os.makedirs(workdir)
except OSError:
pass
femag = femagtools.Femag(workdir, magnetizingCurves='../magnetcurves')
pmRelSim = dict(
angl_i_up=-39.3,
calculationMode="pm_sym_fast",
wind_temp=60.0,
magn_temp=60.0,
current=76.43,
period_frac=6,
speed=50.0,
shortCircuit=True,
l_end_winding=0,
l_external=0,
sc_type=3,
initial=2,
allow_demagn=0,
sim_demagn=1)
r = femag(machine,
pmRelSim)
print('Torque [Nm] = {}'.format(r.machine['torque']))
print('''
Short Circuit Current Torque
Peak iks {2:8.1f} A tks {3:8.1f} Nm
Stationary ikd {0:8.1f} A tkd {1:8.1f} Nm
peak winding currents {4}
'''.format(r.scData['ikd'],
r.scData['tkd'],
r.scData['iks'],
r.scData['tks'],
r.scData['peakWindingCurrents']))
print('Demag {}'.format(r.demag[-1]))
fig, ax = plt.subplots()
femagtools.plot.transientsc(r)
plt.show()
| bsd-2-clause |
iamharshit/ML_works | Face Recognisation/tain.py | 1 | 1905 | from sklearn.datasets import fetch_lfw_people
from sklearn.cross_validation import train_test_split
from sklearn.decomposition import RandomizedPCA
from sklearn.grid_search import GridSearchCV
from sklearn.svm import SVC
from sklearn.metrics import confusion_matrix
import pylab as pl
# Downloading the data
lfw_people = fetch_lfw_people(min_faces_per_person=70)
X = lfw_people.data
n_samples, height, width = lfw_people.images.shape
n_features = X.shape[1]
Y = lfw_people.target
n_classes = lfw_people.target_names.shape[0]
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2)
# Reducing the No. of features
pca = RandomizedPCA(n_components=150, whiten=True).fit(X_train)
eigenfaces = pca.n_components_.reshape((150, height, width) )
X_train_pca = pca.transform(X_train)
X_test_pca = pca.transform(X_test)
# Training SVM classifier
para = {
'C': [1e3, 5e3, 1e4, 5e4, 1e5],
'gamma': [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.1],
}
clf = GridSearchCV(SVC(kernel='rbf', class_weight='balanced'), para).fit(X_test_pca, Y_train)
# Testing clf on test set
Y_pred = clf.predict(X_test_pca)
acc = confusion_matrix(Y_test, Y_pred, labels=range(n_classes))
# Plotting the predictions on a portion of test set
def title(i):
pred_name = target_names[Y_pred[i]].rsplit(' ')[-1]
true_name = target_names[Y_pred[i]].rsplit(' ')[-1]
return 'predicted: %s\ntrue: %s' % (pred_name, true_name)
def plot_gallery(images, prediction_titles, n_row=3, n_col=4):
pl.figure(figsize=(1.8*n_col, 2.4*n_row) )
pl.subplots_adjust(bottom=0, left=.01, right=.99, top=.90, hspace=.35)
for i in range(n_row*n_col):
pl.subplot(n_row, n_col, i+1)
pl.imshow(images[i], cmap=pl.cm.gray)
pl.title(prediction_titles[i], size=12)
pl.x_ticks(())
pl.y_ticks(())
pl.show()
prediction_titles = [title(i) for i in range(Y_pred.shape[0])]
plot_gallery(X_test_pca, prediction_titles)
| mit |
woutdenolf/spectrocrunch | scraps/id16normspec.py | 1 | 2047 | # -*- coding: utf-8 -*-
filename = "/data/id16b/inhouse1/comm_17jan/restart/sofc/26jan/6100h_fluoXAS_0/results/6100h_fluoXAS_0/test.h5"
name = "/detectorsum/Ni-K"
new = "/detectorsum/Ni-K_norm"
specfile = "/data/id16b/inhouse1/comm_17jan/ma3257/align.spec"
specscannumber = 33
energyname = "energy"
fluxname = "flux_It"
###### Import libraries ######
from spectrocrunch.io.spec import spec
from spectrocrunch.h5stacks.get_hdf5_imagestacks import get_hdf5_imagestacks
from spectrocrunch.math.interpolate import extrap1d
from scipy.interpolate import interp1d
import spectrocrunch.io.nexus as nexus
import h5py
import numpy as np
import matplotlib.pyplot as plt
###### Get flux from spec file ######
fspec = spec(specfile)
data, info = fspec.getdata(specscannumber, [energyname, fluxname])
energy = data[:, 0]
Inorm = data[:, 1]
Inorm[:] = 2.0
fluxfunc = extrap1d(interp1d(energy, Inorm))
###### Get stack to normalize ######
stacks, axes = get_hdf5_imagestacks(filename, ["detectorsum"])
###### Add normalized dataset ######
fh5 = h5py.File(filename)
stackdim = 2
stackenergy = fh5[axes[stackdim]["fullname"]][:]
stacknorm = fluxfunc(stackenergy)
plt.plot(energy, Inorm, "-", label="Spec")
plt.plot(stackenergy, stacknorm, "or", label="fluoXAS")
plt.xlabel("Energy (keV)")
plt.ylabel("Flux")
plt.title("Flux used for normalization")
plt.legend()
plt.show()
tmp = [s for s in new.split("/") if len(s) > 0]
nxdatagrp = nexus.newNXdata(fh5[tmp[0]], "/".join(tmp[1:]), "")
dset = nexus.createNXdataSignal(
nxdatagrp,
shape=fh5[name]["data"][:].shape,
chunks=True,
dtype=fh5[name]["data"][:].dtype,
)
nexus.linkaxes(fh5, axes, [nxdatagrp])
data = fh5[name]["data"][:]
s = np.array(data.shape)
# stackdim = np.where(s==stackenergy.size)[0][0]
s = np.delete(s, stackdim)
if stackdim == 0:
snew = (s[1], s[0], stacknorm.size)
elif stackdim == 1:
snew = (s[1], stacknorm.size, s[0])
else:
snew = (stacknorm.size, s[1], s[0])
data /= np.tile(stacknorm, (s[0] * s[1], 1)).T.reshape(snew).T
dset[:] = data
fh5.close()
| mit |
pizzathief/scipy | scipy/optimize/minpack.py | 2 | 34796 | import warnings
from . import _minpack
import numpy as np
from numpy import (atleast_1d, dot, take, triu, shape, eye,
transpose, zeros, prod, greater,
asarray, inf,
finfo, inexact, issubdtype, dtype)
from scipy.linalg import svd, cholesky, solve_triangular, LinAlgError, inv
from scipy._lib._util import _asarray_validated, _lazywhere
from scipy._lib._util import getfullargspec_no_self as _getfullargspec
from .optimize import OptimizeResult, _check_unknown_options, OptimizeWarning
from ._lsq import least_squares
# from ._lsq.common import make_strictly_feasible
from ._lsq.least_squares import prepare_bounds
error = _minpack.error
__all__ = ['fsolve', 'leastsq', 'fixed_point', 'curve_fit']
def _check_func(checker, argname, thefunc, x0, args, numinputs,
output_shape=None):
res = atleast_1d(thefunc(*((x0[:numinputs],) + args)))
if (output_shape is not None) and (shape(res) != output_shape):
if (output_shape[0] != 1):
if len(output_shape) > 1:
if output_shape[1] == 1:
return shape(res)
msg = "%s: there is a mismatch between the input and output " \
"shape of the '%s' argument" % (checker, argname)
func_name = getattr(thefunc, '__name__', None)
if func_name:
msg += " '%s'." % func_name
else:
msg += "."
msg += 'Shape should be %s but it is %s.' % (output_shape, shape(res))
raise TypeError(msg)
if issubdtype(res.dtype, inexact):
dt = res.dtype
else:
dt = dtype(float)
return shape(res), dt
def fsolve(func, x0, args=(), fprime=None, full_output=0,
col_deriv=0, xtol=1.49012e-8, maxfev=0, band=None,
epsfcn=None, factor=100, diag=None):
"""
Find the roots of a function.
Return the roots of the (non-linear) equations defined by
``func(x) = 0`` given a starting estimate.
Parameters
----------
func : callable ``f(x, *args)``
A function that takes at least one (possibly vector) argument,
and returns a value of the same length.
x0 : ndarray
The starting estimate for the roots of ``func(x) = 0``.
args : tuple, optional
Any extra arguments to `func`.
fprime : callable ``f(x, *args)``, optional
A function to compute the Jacobian of `func` with derivatives
across the rows. By default, the Jacobian will be estimated.
full_output : bool, optional
If True, return optional outputs.
col_deriv : bool, optional
Specify whether the Jacobian function computes derivatives down
the columns (faster, because there is no transpose operation).
xtol : float, optional
The calculation will terminate if the relative error between two
consecutive iterates is at most `xtol`.
maxfev : int, optional
The maximum number of calls to the function. If zero, then
``100*(N+1)`` is the maximum where N is the number of elements
in `x0`.
band : tuple, optional
If set to a two-sequence containing the number of sub- and
super-diagonals within the band of the Jacobi matrix, the
Jacobi matrix is considered banded (only for ``fprime=None``).
epsfcn : float, optional
A suitable step length for the forward-difference
approximation of the Jacobian (for ``fprime=None``). If
`epsfcn` is less than the machine precision, it is assumed
that the relative errors in the functions are of the order of
the machine precision.
factor : float, optional
A parameter determining the initial step bound
(``factor * || diag * x||``). Should be in the interval
``(0.1, 100)``.
diag : sequence, optional
N positive entries that serve as a scale factors for the
variables.
Returns
-------
x : ndarray
The solution (or the result of the last iteration for
an unsuccessful call).
infodict : dict
A dictionary of optional outputs with the keys:
``nfev``
number of function calls
``njev``
number of Jacobian calls
``fvec``
function evaluated at the output
``fjac``
the orthogonal matrix, q, produced by the QR
factorization of the final approximate Jacobian
matrix, stored column wise
``r``
upper triangular matrix produced by QR factorization
of the same matrix
``qtf``
the vector ``(transpose(q) * fvec)``
ier : int
An integer flag. Set to 1 if a solution was found, otherwise refer
to `mesg` for more information.
mesg : str
If no solution is found, `mesg` details the cause of failure.
See Also
--------
root : Interface to root finding algorithms for multivariate
functions. See the ``method=='hybr'`` in particular.
Notes
-----
``fsolve`` is a wrapper around MINPACK's hybrd and hybrj algorithms.
Examples
--------
Find a solution to the system of equations:
``x0*cos(x1) = 4, x1*x0 - x1 = 5``.
>>> from scipy.optimize import fsolve
>>> def func(x):
... return [x[0] * np.cos(x[1]) - 4,
... x[1] * x[0] - x[1] - 5]
>>> root = fsolve(func, [1, 1])
>>> root
array([6.50409711, 0.90841421])
>>> np.isclose(func(root), [0.0, 0.0]) # func(root) should be almost 0.0.
array([ True, True])
"""
options = {'col_deriv': col_deriv,
'xtol': xtol,
'maxfev': maxfev,
'band': band,
'eps': epsfcn,
'factor': factor,
'diag': diag}
res = _root_hybr(func, x0, args, jac=fprime, **options)
if full_output:
x = res['x']
info = dict((k, res.get(k))
for k in ('nfev', 'njev', 'fjac', 'r', 'qtf') if k in res)
info['fvec'] = res['fun']
return x, info, res['status'], res['message']
else:
status = res['status']
msg = res['message']
if status == 0:
raise TypeError(msg)
elif status == 1:
pass
elif status in [2, 3, 4, 5]:
warnings.warn(msg, RuntimeWarning)
else:
raise TypeError(msg)
return res['x']
def _root_hybr(func, x0, args=(), jac=None,
col_deriv=0, xtol=1.49012e-08, maxfev=0, band=None, eps=None,
factor=100, diag=None, **unknown_options):
"""
Find the roots of a multivariate function using MINPACK's hybrd and
hybrj routines (modified Powell method).
Options
-------
col_deriv : bool
Specify whether the Jacobian function computes derivatives down
the columns (faster, because there is no transpose operation).
xtol : float
The calculation will terminate if the relative error between two
consecutive iterates is at most `xtol`.
maxfev : int
The maximum number of calls to the function. If zero, then
``100*(N+1)`` is the maximum where N is the number of elements
in `x0`.
band : tuple
If set to a two-sequence containing the number of sub- and
super-diagonals within the band of the Jacobi matrix, the
Jacobi matrix is considered banded (only for ``fprime=None``).
eps : float
A suitable step length for the forward-difference
approximation of the Jacobian (for ``fprime=None``). If
`eps` is less than the machine precision, it is assumed
that the relative errors in the functions are of the order of
the machine precision.
factor : float
A parameter determining the initial step bound
(``factor * || diag * x||``). Should be in the interval
``(0.1, 100)``.
diag : sequence
N positive entries that serve as a scale factors for the
variables.
"""
_check_unknown_options(unknown_options)
epsfcn = eps
x0 = asarray(x0).flatten()
n = len(x0)
if not isinstance(args, tuple):
args = (args,)
shape, dtype = _check_func('fsolve', 'func', func, x0, args, n, (n,))
if epsfcn is None:
epsfcn = finfo(dtype).eps
Dfun = jac
if Dfun is None:
if band is None:
ml, mu = -10, -10
else:
ml, mu = band[:2]
if maxfev == 0:
maxfev = 200 * (n + 1)
retval = _minpack._hybrd(func, x0, args, 1, xtol, maxfev,
ml, mu, epsfcn, factor, diag)
else:
_check_func('fsolve', 'fprime', Dfun, x0, args, n, (n, n))
if (maxfev == 0):
maxfev = 100 * (n + 1)
retval = _minpack._hybrj(func, Dfun, x0, args, 1,
col_deriv, xtol, maxfev, factor, diag)
x, status = retval[0], retval[-1]
errors = {0: "Improper input parameters were entered.",
1: "The solution converged.",
2: "The number of calls to function has "
"reached maxfev = %d." % maxfev,
3: "xtol=%f is too small, no further improvement "
"in the approximate\n solution "
"is possible." % xtol,
4: "The iteration is not making good progress, as measured "
"by the \n improvement from the last five "
"Jacobian evaluations.",
5: "The iteration is not making good progress, "
"as measured by the \n improvement from the last "
"ten iterations.",
'unknown': "An error occurred."}
info = retval[1]
info['fun'] = info.pop('fvec')
sol = OptimizeResult(x=x, success=(status == 1), status=status)
sol.update(info)
try:
sol['message'] = errors[status]
except KeyError:
sol['message'] = errors['unknown']
return sol
LEASTSQ_SUCCESS = [1, 2, 3, 4]
LEASTSQ_FAILURE = [5, 6, 7, 8]
def leastsq(func, x0, args=(), Dfun=None, full_output=0,
col_deriv=0, ftol=1.49012e-8, xtol=1.49012e-8,
gtol=0.0, maxfev=0, epsfcn=None, factor=100, diag=None):
"""
Minimize the sum of squares of a set of equations.
::
x = arg min(sum(func(y)**2,axis=0))
y
Parameters
----------
func : callable
Should take at least one (possibly length N vector) argument and
returns M floating point numbers. It must not return NaNs or
fitting might fail.
x0 : ndarray
The starting estimate for the minimization.
args : tuple, optional
Any extra arguments to func are placed in this tuple.
Dfun : callable, optional
A function or method to compute the Jacobian of func with derivatives
across the rows. If this is None, the Jacobian will be estimated.
full_output : bool, optional
non-zero to return all optional outputs.
col_deriv : bool, optional
non-zero to specify that the Jacobian function computes derivatives
down the columns (faster, because there is no transpose operation).
ftol : float, optional
Relative error desired in the sum of squares.
xtol : float, optional
Relative error desired in the approximate solution.
gtol : float, optional
Orthogonality desired between the function vector and the columns of
the Jacobian.
maxfev : int, optional
The maximum number of calls to the function. If `Dfun` is provided,
then the default `maxfev` is 100*(N+1) where N is the number of elements
in x0, otherwise the default `maxfev` is 200*(N+1).
epsfcn : float, optional
A variable used in determining a suitable step length for the forward-
difference approximation of the Jacobian (for Dfun=None).
Normally the actual step length will be sqrt(epsfcn)*x
If epsfcn is less than the machine precision, it is assumed that the
relative errors are of the order of the machine precision.
factor : float, optional
A parameter determining the initial step bound
(``factor * || diag * x||``). Should be in interval ``(0.1, 100)``.
diag : sequence, optional
N positive entries that serve as a scale factors for the variables.
Returns
-------
x : ndarray
The solution (or the result of the last iteration for an unsuccessful
call).
cov_x : ndarray
The inverse of the Hessian. `fjac` and `ipvt` are used to construct an
estimate of the Hessian. A value of None indicates a singular matrix,
which means the curvature in parameters `x` is numerically flat. To
obtain the covariance matrix of the parameters `x`, `cov_x` must be
multiplied by the variance of the residuals -- see curve_fit.
infodict : dict
a dictionary of optional outputs with the keys:
``nfev``
The number of function calls
``fvec``
The function evaluated at the output
``fjac``
A permutation of the R matrix of a QR
factorization of the final approximate
Jacobian matrix, stored column wise.
Together with ipvt, the covariance of the
estimate can be approximated.
``ipvt``
An integer array of length N which defines
a permutation matrix, p, such that
fjac*p = q*r, where r is upper triangular
with diagonal elements of nonincreasing
magnitude. Column j of p is column ipvt(j)
of the identity matrix.
``qtf``
The vector (transpose(q) * fvec).
mesg : str
A string message giving information about the cause of failure.
ier : int
An integer flag. If it is equal to 1, 2, 3 or 4, the solution was
found. Otherwise, the solution was not found. In either case, the
optional output variable 'mesg' gives more information.
See Also
--------
least_squares : Newer interface to solve nonlinear least-squares problems
with bounds on the variables. See ``method=='lm'`` in particular.
Notes
-----
"leastsq" is a wrapper around MINPACK's lmdif and lmder algorithms.
cov_x is a Jacobian approximation to the Hessian of the least squares
objective function.
This approximation assumes that the objective function is based on the
difference between some observed target data (ydata) and a (non-linear)
function of the parameters `f(xdata, params)` ::
func(params) = ydata - f(xdata, params)
so that the objective function is ::
min sum((ydata - f(xdata, params))**2, axis=0)
params
The solution, `x`, is always a 1-D array, regardless of the shape of `x0`,
or whether `x0` is a scalar.
Examples
--------
>>> from scipy.optimize import leastsq
>>> def func(x):
... return 2*(x-3)**2+1
>>> leastsq(func, 0)
(array([2.99999999]), 1)
"""
x0 = asarray(x0).flatten()
n = len(x0)
if not isinstance(args, tuple):
args = (args,)
shape, dtype = _check_func('leastsq', 'func', func, x0, args, n)
m = shape[0]
if n > m:
raise TypeError('Improper input: N=%s must not exceed M=%s' % (n, m))
if epsfcn is None:
epsfcn = finfo(dtype).eps
if Dfun is None:
if maxfev == 0:
maxfev = 200*(n + 1)
retval = _minpack._lmdif(func, x0, args, full_output, ftol, xtol,
gtol, maxfev, epsfcn, factor, diag)
else:
if col_deriv:
_check_func('leastsq', 'Dfun', Dfun, x0, args, n, (n, m))
else:
_check_func('leastsq', 'Dfun', Dfun, x0, args, n, (m, n))
if maxfev == 0:
maxfev = 100 * (n + 1)
retval = _minpack._lmder(func, Dfun, x0, args, full_output,
col_deriv, ftol, xtol, gtol, maxfev,
factor, diag)
errors = {0: ["Improper input parameters.", TypeError],
1: ["Both actual and predicted relative reductions "
"in the sum of squares\n are at most %f" % ftol, None],
2: ["The relative error between two consecutive "
"iterates is at most %f" % xtol, None],
3: ["Both actual and predicted relative reductions in "
"the sum of squares\n are at most %f and the "
"relative error between two consecutive "
"iterates is at \n most %f" % (ftol, xtol), None],
4: ["The cosine of the angle between func(x) and any "
"column of the\n Jacobian is at most %f in "
"absolute value" % gtol, None],
5: ["Number of calls to function has reached "
"maxfev = %d." % maxfev, ValueError],
6: ["ftol=%f is too small, no further reduction "
"in the sum of squares\n is possible." % ftol,
ValueError],
7: ["xtol=%f is too small, no further improvement in "
"the approximate\n solution is possible." % xtol,
ValueError],
8: ["gtol=%f is too small, func(x) is orthogonal to the "
"columns of\n the Jacobian to machine "
"precision." % gtol, ValueError]}
# The FORTRAN return value (possible return values are >= 0 and <= 8)
info = retval[-1]
if full_output:
cov_x = None
if info in LEASTSQ_SUCCESS:
perm = take(eye(n), retval[1]['ipvt'] - 1, 0)
r = triu(transpose(retval[1]['fjac'])[:n, :])
R = dot(r, perm)
try:
cov_x = inv(dot(transpose(R), R))
except (LinAlgError, ValueError):
pass
return (retval[0], cov_x) + retval[1:-1] + (errors[info][0], info)
else:
if info in LEASTSQ_FAILURE:
warnings.warn(errors[info][0], RuntimeWarning)
elif info == 0:
raise errors[info][1](errors[info][0])
return retval[0], info
def _wrap_func(func, xdata, ydata, transform):
if transform is None:
def func_wrapped(params):
return func(xdata, *params) - ydata
elif transform.ndim == 1:
def func_wrapped(params):
return transform * (func(xdata, *params) - ydata)
else:
# Chisq = (y - yd)^T C^{-1} (y-yd)
# transform = L such that C = L L^T
# C^{-1} = L^{-T} L^{-1}
# Chisq = (y - yd)^T L^{-T} L^{-1} (y-yd)
# Define (y-yd)' = L^{-1} (y-yd)
# by solving
# L (y-yd)' = (y-yd)
# and minimize (y-yd)'^T (y-yd)'
def func_wrapped(params):
return solve_triangular(transform, func(xdata, *params) - ydata, lower=True)
return func_wrapped
def _wrap_jac(jac, xdata, transform):
if transform is None:
def jac_wrapped(params):
return jac(xdata, *params)
elif transform.ndim == 1:
def jac_wrapped(params):
return transform[:, np.newaxis] * np.asarray(jac(xdata, *params))
else:
def jac_wrapped(params):
return solve_triangular(transform, np.asarray(jac(xdata, *params)), lower=True)
return jac_wrapped
def _initialize_feasible(lb, ub):
p0 = np.ones_like(lb)
lb_finite = np.isfinite(lb)
ub_finite = np.isfinite(ub)
mask = lb_finite & ub_finite
p0[mask] = 0.5 * (lb[mask] + ub[mask])
mask = lb_finite & ~ub_finite
p0[mask] = lb[mask] + 1
mask = ~lb_finite & ub_finite
p0[mask] = ub[mask] - 1
return p0
def curve_fit(f, xdata, ydata, p0=None, sigma=None, absolute_sigma=False,
check_finite=True, bounds=(-np.inf, np.inf), method=None,
jac=None, **kwargs):
"""
Use non-linear least squares to fit a function, f, to data.
Assumes ``ydata = f(xdata, *params) + eps``.
Parameters
----------
f : callable
The model function, f(x, ...). It must take the independent
variable as the first argument and the parameters to fit as
separate remaining arguments.
xdata : array_like or object
The independent variable where the data is measured.
Should usually be an M-length sequence or an (k,M)-shaped array for
functions with k predictors, but can actually be any object.
ydata : array_like
The dependent data, a length M array - nominally ``f(xdata, ...)``.
p0 : array_like, optional
Initial guess for the parameters (length N). If None, then the
initial values will all be 1 (if the number of parameters for the
function can be determined using introspection, otherwise a
ValueError is raised).
sigma : None or M-length sequence or MxM array, optional
Determines the uncertainty in `ydata`. If we define residuals as
``r = ydata - f(xdata, *popt)``, then the interpretation of `sigma`
depends on its number of dimensions:
- A 1-D `sigma` should contain values of standard deviations of
errors in `ydata`. In this case, the optimized function is
``chisq = sum((r / sigma) ** 2)``.
- A 2-D `sigma` should contain the covariance matrix of
errors in `ydata`. In this case, the optimized function is
``chisq = r.T @ inv(sigma) @ r``.
.. versionadded:: 0.19
None (default) is equivalent of 1-D `sigma` filled with ones.
absolute_sigma : bool, optional
If True, `sigma` is used in an absolute sense and the estimated parameter
covariance `pcov` reflects these absolute values.
If False (default), only the relative magnitudes of the `sigma` values matter.
The returned parameter covariance matrix `pcov` is based on scaling
`sigma` by a constant factor. This constant is set by demanding that the
reduced `chisq` for the optimal parameters `popt` when using the
*scaled* `sigma` equals unity. In other words, `sigma` is scaled to
match the sample variance of the residuals after the fit. Default is False.
Mathematically,
``pcov(absolute_sigma=False) = pcov(absolute_sigma=True) * chisq(popt)/(M-N)``
check_finite : bool, optional
If True, check that the input arrays do not contain nans of infs,
and raise a ValueError if they do. Setting this parameter to
False may silently produce nonsensical results if the input arrays
do contain nans. Default is True.
bounds : 2-tuple of array_like, optional
Lower and upper bounds on parameters. Defaults to no bounds.
Each element of the tuple must be either an array with the length equal
to the number of parameters, or a scalar (in which case the bound is
taken to be the same for all parameters). Use ``np.inf`` with an
appropriate sign to disable bounds on all or some parameters.
.. versionadded:: 0.17
method : {'lm', 'trf', 'dogbox'}, optional
Method to use for optimization. See `least_squares` for more details.
Default is 'lm' for unconstrained problems and 'trf' if `bounds` are
provided. The method 'lm' won't work when the number of observations
is less than the number of variables, use 'trf' or 'dogbox' in this
case.
.. versionadded:: 0.17
jac : callable, string or None, optional
Function with signature ``jac(x, ...)`` which computes the Jacobian
matrix of the model function with respect to parameters as a dense
array_like structure. It will be scaled according to provided `sigma`.
If None (default), the Jacobian will be estimated numerically.
String keywords for 'trf' and 'dogbox' methods can be used to select
a finite difference scheme, see `least_squares`.
.. versionadded:: 0.18
kwargs
Keyword arguments passed to `leastsq` for ``method='lm'`` or
`least_squares` otherwise.
Returns
-------
popt : array
Optimal values for the parameters so that the sum of the squared
residuals of ``f(xdata, *popt) - ydata`` is minimized.
pcov : 2-D array
The estimated covariance of popt. The diagonals provide the variance
of the parameter estimate. To compute one standard deviation errors
on the parameters use ``perr = np.sqrt(np.diag(pcov))``.
How the `sigma` parameter affects the estimated covariance
depends on `absolute_sigma` argument, as described above.
If the Jacobian matrix at the solution doesn't have a full rank, then
'lm' method returns a matrix filled with ``np.inf``, on the other hand
'trf' and 'dogbox' methods use Moore-Penrose pseudoinverse to compute
the covariance matrix.
Raises
------
ValueError
if either `ydata` or `xdata` contain NaNs, or if incompatible options
are used.
RuntimeError
if the least-squares minimization fails.
OptimizeWarning
if covariance of the parameters can not be estimated.
See Also
--------
least_squares : Minimize the sum of squares of nonlinear functions.
scipy.stats.linregress : Calculate a linear least squares regression for
two sets of measurements.
Notes
-----
With ``method='lm'``, the algorithm uses the Levenberg-Marquardt algorithm
through `leastsq`. Note that this algorithm can only deal with
unconstrained problems.
Box constraints can be handled by methods 'trf' and 'dogbox'. Refer to
the docstring of `least_squares` for more information.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.optimize import curve_fit
>>> def func(x, a, b, c):
... return a * np.exp(-b * x) + c
Define the data to be fit with some noise:
>>> xdata = np.linspace(0, 4, 50)
>>> y = func(xdata, 2.5, 1.3, 0.5)
>>> np.random.seed(1729)
>>> y_noise = 0.2 * np.random.normal(size=xdata.size)
>>> ydata = y + y_noise
>>> plt.plot(xdata, ydata, 'b-', label='data')
Fit for the parameters a, b, c of the function `func`:
>>> popt, pcov = curve_fit(func, xdata, ydata)
>>> popt
array([ 2.55423706, 1.35190947, 0.47450618])
>>> plt.plot(xdata, func(xdata, *popt), 'r-',
... label='fit: a=%5.3f, b=%5.3f, c=%5.3f' % tuple(popt))
Constrain the optimization to the region of ``0 <= a <= 3``,
``0 <= b <= 1`` and ``0 <= c <= 0.5``:
>>> popt, pcov = curve_fit(func, xdata, ydata, bounds=(0, [3., 1., 0.5]))
>>> popt
array([ 2.43708906, 1. , 0.35015434])
>>> plt.plot(xdata, func(xdata, *popt), 'g--',
... label='fit: a=%5.3f, b=%5.3f, c=%5.3f' % tuple(popt))
>>> plt.xlabel('x')
>>> plt.ylabel('y')
>>> plt.legend()
>>> plt.show()
"""
if p0 is None:
# determine number of parameters by inspecting the function
sig = _getfullargspec(f)
args = sig.args
if len(args) < 2:
raise ValueError("Unable to determine number of fit parameters.")
n = len(args) - 1
else:
p0 = np.atleast_1d(p0)
n = p0.size
lb, ub = prepare_bounds(bounds, n)
if p0 is None:
p0 = _initialize_feasible(lb, ub)
bounded_problem = np.any((lb > -np.inf) | (ub < np.inf))
if method is None:
if bounded_problem:
method = 'trf'
else:
method = 'lm'
if method == 'lm' and bounded_problem:
raise ValueError("Method 'lm' only works for unconstrained problems. "
"Use 'trf' or 'dogbox' instead.")
# optimization may produce garbage for float32 inputs, cast them to float64
# NaNs cannot be handled
if check_finite:
ydata = np.asarray_chkfinite(ydata, float)
else:
ydata = np.asarray(ydata, float)
if isinstance(xdata, (list, tuple, np.ndarray)):
# `xdata` is passed straight to the user-defined `f`, so allow
# non-array_like `xdata`.
if check_finite:
xdata = np.asarray_chkfinite(xdata, float)
else:
xdata = np.asarray(xdata, float)
if ydata.size == 0:
raise ValueError("`ydata` must not be empty!")
# Determine type of sigma
if sigma is not None:
sigma = np.asarray(sigma)
# if 1-D, sigma are errors, define transform = 1/sigma
if sigma.shape == (ydata.size, ):
transform = 1.0 / sigma
# if 2-D, sigma is the covariance matrix,
# define transform = L such that L L^T = C
elif sigma.shape == (ydata.size, ydata.size):
try:
# scipy.linalg.cholesky requires lower=True to return L L^T = A
transform = cholesky(sigma, lower=True)
except LinAlgError:
raise ValueError("`sigma` must be positive definite.")
else:
raise ValueError("`sigma` has incorrect shape.")
else:
transform = None
func = _wrap_func(f, xdata, ydata, transform)
if callable(jac):
jac = _wrap_jac(jac, xdata, transform)
elif jac is None and method != 'lm':
jac = '2-point'
if 'args' in kwargs:
# The specification for the model function `f` does not support
# additional arguments. Refer to the `curve_fit` docstring for
# acceptable call signatures of `f`.
raise ValueError("'args' is not a supported keyword argument.")
if method == 'lm':
# Remove full_output from kwargs, otherwise we're passing it in twice.
return_full = kwargs.pop('full_output', False)
res = leastsq(func, p0, Dfun=jac, full_output=1, **kwargs)
popt, pcov, infodict, errmsg, ier = res
ysize = len(infodict['fvec'])
cost = np.sum(infodict['fvec'] ** 2)
if ier not in [1, 2, 3, 4]:
raise RuntimeError("Optimal parameters not found: " + errmsg)
else:
# Rename maxfev (leastsq) to max_nfev (least_squares), if specified.
if 'max_nfev' not in kwargs:
kwargs['max_nfev'] = kwargs.pop('maxfev', None)
res = least_squares(func, p0, jac=jac, bounds=bounds, method=method,
**kwargs)
if not res.success:
raise RuntimeError("Optimal parameters not found: " + res.message)
ysize = len(res.fun)
cost = 2 * res.cost # res.cost is half sum of squares!
popt = res.x
# Do Moore-Penrose inverse discarding zero singular values.
_, s, VT = svd(res.jac, full_matrices=False)
threshold = np.finfo(float).eps * max(res.jac.shape) * s[0]
s = s[s > threshold]
VT = VT[:s.size]
pcov = np.dot(VT.T / s**2, VT)
return_full = False
warn_cov = False
if pcov is None:
# indeterminate covariance
pcov = zeros((len(popt), len(popt)), dtype=float)
pcov.fill(inf)
warn_cov = True
elif not absolute_sigma:
if ysize > p0.size:
s_sq = cost / (ysize - p0.size)
pcov = pcov * s_sq
else:
pcov.fill(inf)
warn_cov = True
if warn_cov:
warnings.warn('Covariance of the parameters could not be estimated',
category=OptimizeWarning)
if return_full:
return popt, pcov, infodict, errmsg, ier
else:
return popt, pcov
def check_gradient(fcn, Dfcn, x0, args=(), col_deriv=0):
"""Perform a simple check on the gradient for correctness.
"""
x = atleast_1d(x0)
n = len(x)
x = x.reshape((n,))
fvec = atleast_1d(fcn(x, *args))
m = len(fvec)
fvec = fvec.reshape((m,))
ldfjac = m
fjac = atleast_1d(Dfcn(x, *args))
fjac = fjac.reshape((m, n))
if col_deriv == 0:
fjac = transpose(fjac)
xp = zeros((n,), float)
err = zeros((m,), float)
fvecp = None
_minpack._chkder(m, n, x, fvec, fjac, ldfjac, xp, fvecp, 1, err)
fvecp = atleast_1d(fcn(xp, *args))
fvecp = fvecp.reshape((m,))
_minpack._chkder(m, n, x, fvec, fjac, ldfjac, xp, fvecp, 2, err)
good = (prod(greater(err, 0.5), axis=0))
return (good, err)
def _del2(p0, p1, d):
return p0 - np.square(p1 - p0) / d
def _relerr(actual, desired):
return (actual - desired) / desired
def _fixed_point_helper(func, x0, args, xtol, maxiter, use_accel):
p0 = x0
for i in range(maxiter):
p1 = func(p0, *args)
if use_accel:
p2 = func(p1, *args)
d = p2 - 2.0 * p1 + p0
p = _lazywhere(d != 0, (p0, p1, d), f=_del2, fillvalue=p2)
else:
p = p1
relerr = _lazywhere(p0 != 0, (p, p0), f=_relerr, fillvalue=p)
if np.all(np.abs(relerr) < xtol):
return p
p0 = p
msg = "Failed to converge after %d iterations, value is %s" % (maxiter, p)
raise RuntimeError(msg)
def fixed_point(func, x0, args=(), xtol=1e-8, maxiter=500, method='del2'):
"""
Find a fixed point of the function.
Given a function of one or more variables and a starting point, find a
fixed point of the function: i.e., where ``func(x0) == x0``.
Parameters
----------
func : function
Function to evaluate.
x0 : array_like
Fixed point of function.
args : tuple, optional
Extra arguments to `func`.
xtol : float, optional
Convergence tolerance, defaults to 1e-08.
maxiter : int, optional
Maximum number of iterations, defaults to 500.
method : {"del2", "iteration"}, optional
Method of finding the fixed-point, defaults to "del2",
which uses Steffensen's Method with Aitken's ``Del^2``
convergence acceleration [1]_. The "iteration" method simply iterates
the function until convergence is detected, without attempting to
accelerate the convergence.
References
----------
.. [1] Burden, Faires, "Numerical Analysis", 5th edition, pg. 80
Examples
--------
>>> from scipy import optimize
>>> def func(x, c1, c2):
... return np.sqrt(c1/(x+c2))
>>> c1 = np.array([10,12.])
>>> c2 = np.array([3, 5.])
>>> optimize.fixed_point(func, [1.2, 1.3], args=(c1,c2))
array([ 1.4920333 , 1.37228132])
"""
use_accel = {'del2': True, 'iteration': False}[method]
x0 = _asarray_validated(x0, as_inexact=True)
return _fixed_point_helper(func, x0, args, xtol, maxiter, use_accel)
| bsd-3-clause |
pompiduskus/scikit-learn | examples/plot_johnson_lindenstrauss_bound.py | 134 | 7452 | """
=====================================================================
The Johnson-Lindenstrauss bound for embedding with random projections
=====================================================================
The `Johnson-Lindenstrauss lemma`_ states that any high dimensional
dataset can be randomly projected into a lower dimensional Euclidean
space while controlling the distortion in the pairwise distances.
.. _`Johnson-Lindenstrauss lemma`: http://en.wikipedia.org/wiki/Johnson%E2%80%93Lindenstrauss_lemma
Theoretical bounds
==================
The distortion introduced by a random projection `p` is asserted by
the fact that `p` is defining an eps-embedding with good probability
as defined by:
(1 - eps) ||u - v||^2 < ||p(u) - p(v)||^2 < (1 + eps) ||u - v||^2
Where u and v are any rows taken from a dataset of shape [n_samples,
n_features] and p is a projection by a random Gaussian N(0, 1) matrix
with shape [n_components, n_features] (or a sparse Achlioptas matrix).
The minimum number of components to guarantees the eps-embedding is
given by:
n_components >= 4 log(n_samples) / (eps^2 / 2 - eps^3 / 3)
The first plot shows that with an increasing number of samples ``n_samples``,
the minimal number of dimensions ``n_components`` increased logarithmically
in order to guarantee an ``eps``-embedding.
The second plot shows that an increase of the admissible
distortion ``eps`` allows to reduce drastically the minimal number of
dimensions ``n_components`` for a given number of samples ``n_samples``
Empirical validation
====================
We validate the above bounds on the the digits dataset or on the 20 newsgroups
text document (TF-IDF word frequencies) dataset:
- for the digits dataset, some 8x8 gray level pixels data for 500
handwritten digits pictures are randomly projected to spaces for various
larger number of dimensions ``n_components``.
- for the 20 newsgroups dataset some 500 documents with 100k
features in total are projected using a sparse random matrix to smaller
euclidean spaces with various values for the target number of dimensions
``n_components``.
The default dataset is the digits dataset. To run the example on the twenty
newsgroups dataset, pass the --twenty-newsgroups command line argument to this
script.
For each value of ``n_components``, we plot:
- 2D distribution of sample pairs with pairwise distances in original
and projected spaces as x and y axis respectively.
- 1D histogram of the ratio of those distances (projected / original).
We can see that for low values of ``n_components`` the distribution is wide
with many distorted pairs and a skewed distribution (due to the hard
limit of zero ratio on the left as distances are always positives)
while for larger values of n_components the distortion is controlled
and the distances are well preserved by the random projection.
Remarks
=======
According to the JL lemma, projecting 500 samples without too much distortion
will require at least several thousands dimensions, irrespective of the
number of features of the original dataset.
Hence using random projections on the digits dataset which only has 64 features
in the input space does not make sense: it does not allow for dimensionality
reduction in this case.
On the twenty newsgroups on the other hand the dimensionality can be decreased
from 56436 down to 10000 while reasonably preserving pairwise distances.
"""
print(__doc__)
import sys
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.random_projection import johnson_lindenstrauss_min_dim
from sklearn.random_projection import SparseRandomProjection
from sklearn.datasets import fetch_20newsgroups_vectorized
from sklearn.datasets import load_digits
from sklearn.metrics.pairwise import euclidean_distances
# Part 1: plot the theoretical dependency between n_components_min and
# n_samples
# range of admissible distortions
eps_range = np.linspace(0.1, 0.99, 5)
colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(eps_range)))
# range of number of samples (observation) to embed
n_samples_range = np.logspace(1, 9, 9)
plt.figure()
for eps, color in zip(eps_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples_range, eps=eps)
plt.loglog(n_samples_range, min_n_components, color=color)
plt.legend(["eps = %0.1f" % eps for eps in eps_range], loc="lower right")
plt.xlabel("Number of observations to eps-embed")
plt.ylabel("Minimum number of dimensions")
plt.title("Johnson-Lindenstrauss bounds:\nn_samples vs n_components")
# range of admissible distortions
eps_range = np.linspace(0.01, 0.99, 100)
# range of number of samples (observation) to embed
n_samples_range = np.logspace(2, 6, 5)
colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(n_samples_range)))
plt.figure()
for n_samples, color in zip(n_samples_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples, eps=eps_range)
plt.semilogy(eps_range, min_n_components, color=color)
plt.legend(["n_samples = %d" % n for n in n_samples_range], loc="upper right")
plt.xlabel("Distortion eps")
plt.ylabel("Minimum number of dimensions")
plt.title("Johnson-Lindenstrauss bounds:\nn_components vs eps")
# Part 2: perform sparse random projection of some digits images which are
# quite low dimensional and dense or documents of the 20 newsgroups dataset
# which is both high dimensional and sparse
if '--twenty-newsgroups' in sys.argv:
# Need an internet connection hence not enabled by default
data = fetch_20newsgroups_vectorized().data[:500]
else:
data = load_digits().data[:500]
n_samples, n_features = data.shape
print("Embedding %d samples with dim %d using various random projections"
% (n_samples, n_features))
n_components_range = np.array([300, 1000, 10000])
dists = euclidean_distances(data, squared=True).ravel()
# select only non-identical samples pairs
nonzero = dists != 0
dists = dists[nonzero]
for n_components in n_components_range:
t0 = time()
rp = SparseRandomProjection(n_components=n_components)
projected_data = rp.fit_transform(data)
print("Projected %d samples from %d to %d in %0.3fs"
% (n_samples, n_features, n_components, time() - t0))
if hasattr(rp, 'components_'):
n_bytes = rp.components_.data.nbytes
n_bytes += rp.components_.indices.nbytes
print("Random matrix with size: %0.3fMB" % (n_bytes / 1e6))
projected_dists = euclidean_distances(
projected_data, squared=True).ravel()[nonzero]
plt.figure()
plt.hexbin(dists, projected_dists, gridsize=100, cmap=plt.cm.PuBu)
plt.xlabel("Pairwise squared distances in original space")
plt.ylabel("Pairwise squared distances in projected space")
plt.title("Pairwise distances distribution for n_components=%d" %
n_components)
cb = plt.colorbar()
cb.set_label('Sample pairs counts')
rates = projected_dists / dists
print("Mean distances rate: %0.2f (%0.2f)"
% (np.mean(rates), np.std(rates)))
plt.figure()
plt.hist(rates, bins=50, normed=True, range=(0., 2.))
plt.xlabel("Squared distances rate: projected / original")
plt.ylabel("Distribution of samples pairs")
plt.title("Histogram of pairwise distance rates for n_components=%d" %
n_components)
# TODO: compute the expected value of eps and add them to the previous plot
# as vertical lines / region
plt.show()
| bsd-3-clause |
tlackemann/hubert | script/index.py | 1 | 12830 | #!/usr/bin/python
##
# Hubert
# This script fetches light event data from Cassandra, normalizes it, and runs
# it against a ridge regresssion model to gain predictive intelligence on
# future light events.
#
# The algorithm searches for the best polynomial fit and ridge alpha based on
# the mean_squared_error. The degree and alpha with the lowest
# mean_squared_error is then used to make a prediction for a light based on the
# current hour.
#
# Disclaimer: I am an absolute beginner at machine learning. For all I know
# this algorithm is fundamentally flawed. I'm open to feedback and suggestions
# via pull requests or by opening an issue on GitHub.
##
import operator
import json
import time
import calendar
import numpy as np
import cassandra
import pika
from datetime import datetime, timedelta
from time import sleep
from sklearn.metrics import mean_squared_error
from sklearn import datasets
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import Ridge
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
from cassandra.cluster import Cluster
start_time = time.time()
print '%.2f - Initializing ...' % (time.time())
RABBITMQ_QUEUE = 'hubert_events'
CQL_LIMIT = 1000000
print '%.2f - Connecting to RabbitMQ ...' % (time.time())
rmq_credentials = pika.PlainCredentials('hubert', 'hubert')
rmq = pika.BlockingConnection(pika.ConnectionParameters('hubert.rabbitmq', credentials=rmq_credentials))
rmq_channel = rmq.channel()
rmq_channel.queue_declare(queue=RABBITMQ_QUEUE)
print '%.2f - Connecting to Cassandra ...' % (time.time())
# Connect to Cassandra
cluster = Cluster(['cassandra'])
session = cluster.connect('hubert')
print '%.2f - Fetching lights ...' % (time.time())
lights = session.execute('SELECT * FROM lights')
print '%.2f - Fetched %s lights from database ...' % (time.time(), len(lights.current_rows))
for light in lights:
print '%.2f - "%s">> Processing light ...' % (time.time(), light.name)
# Declare our X input (week hour)
X = []
# Declare our Y output (reachable && state_on, bri, hue, sat, x, y)
Y = []
# Now load all the events for this light
sql = 'SELECT light_id, state_on, reachable, bri, hue, sat, x, y, ts FROM light_events WHERE light_id = %s ORDER BY ts DESC LIMIT %s'
light_events = session.execute(sql, [light.light_id, CQL_LIMIT])
# We need to know how many rows there are, this is stupid but it works
# The reasone is because of how cassandra driver works - it will not fetch
# all rows but rather the first 5000 and then rely on fetch_next_page()
# or require a for loop. Since that is just stupid, there's this ...
sql_count = 'SELECT count(*) as c FROM light_events WHERE light_id = %s ORDER BY ts DESC LIMIT %s'
light_event_count = session.execute(sql_count, [light.light_id, CQL_LIMIT])
total_rows = light_event_count.current_rows[0].c
print '%.2f - "%s">> Fetched %s total rows' % (time.time(), light.name, total_rows)
# Loop over each light and store the data in X and Y
minutes_in_day = 1440
recordings_per_minute = 6 # @todo - Expects default setting to record every 10 seconds
recordings_per_day = minutes_in_day * recordings_per_minute
# Store the conditionals for our phases
phase_1_condition = total_rows < recordings_per_day * 14
phase_2_condition = total_rows >= recordings_per_day * 14 and total_rows < recordings_per_day * 60
# We need at least a week's worth of data before we start predicting
if total_rows >= recordings_per_day * 7:
print '%.2f - "%s">> Preparing to format data ...' % (time.time(), light.name)
for event in light_events:
# Get the datetime of the event
event_time = cassandra.util.datetime_from_uuid1(event.ts)
# Monday is 0 and Sunday is 6
day_of_week = event_time.weekday()
current_day = event_time.day
current_month = event_time.month
current_year = event_time.year
current_hour = event_time.hour
current_minute = event_time.minute
# Light is ON if it's both reachable and declared on
event_state = 1 if (event.reachable and event.state_on) else 0
# Depending on the amount of data we have, we're going to build the
# linear regression model slightly different to get the best performance
# out of the model.
# Phase I: Train by minutes in day (2 days+)
# X = Total amount of minutes passed on recorded day (0-1439)
if phase_1_condition:
X.append([(current_hour * 60) + current_minute])
# Features: state
Y.append([event_state])
# Phase II: Train by minutes in week (14 days+)
# X = Total amount of minutes passed during recorded week (0-10079)
elif phase_2_condition:
if day_of_week > 0:
X.append([((current_hour * 60) + current_minute) * day_of_week])
else:
X.append([(current_hour * 60) + current_minute])
# Features: state, hue, bri, sat
Y.append([event_state, event.hue, event.bri, event.sat])
# Phase III: Train by minutes in month (2 months+)
# X = Total amount of minutes passed during recorded month (0-n)
else:
eq = (current_hour * 60) + current_minute
if current_day > 0:
eq = eq * current_day
X.append([eq])
# Features: state, hue, bri, sat, x, y
Y.append([event_state, event.hue, event.bri, event.sat, event.x, event.y])
# Split the data into training/testing sets
# We'll do an 80/20 split
split_80 = int(round(total_rows * 0.8))
split_20 = total_rows - split_80
print '%.2f - "%s">> Total rows: %s, Split 80: %s, Split 20: %s ...' % (time.time(), light.name, total_rows, split_80, split_20)
X_train = X[:-1 * split_20]
X_test = X[-1 * split_20:]
# Split the targets into training/testing sets
Y_train = Y[:-1 * split_20]
Y_test = Y[-1 * split_20:]
# Find the optimal polynomial degree
final_est = False
final_degree = 0
final_alpha = 0.
final_train_error = False
final_test_error = False
print '%.2f - "%s">> Finding best fit for ridge regression model ...' % (time.time(), light.name)
for degree in range(10):
# Find the optimal l2_penalty/alpha
for alpha in [0.0, 1e-8, 1e-5, 1e-1]:
est = make_pipeline(PolynomialFeatures(degree), Ridge(alpha=alpha))
est.fit(X_train, Y_train)
# Training error
tmp_alpha_train_error = mean_squared_error(Y_train, est.predict(X_train))
# Test error
tmp_alpha_test_error = mean_squared_error(Y_test, est.predict(X_test))
# Is it the lowest one?
if final_est == False or abs(tmp_alpha_test_error) < final_test_error:
final_est = est
final_alpha = alpha
final_degree = degree
final_test_error = tmp_alpha_test_error
final_train_error = tmp_alpha_train_error
rss = final_test_error
print '%.2f - "%s">> Using degree=%s and alpha=%s for ridge regression algorithm' % (time.time(), light.name, final_degree, final_alpha)
print '%.2f - "%s">> Best training error: %.6f' % (time.time(), light.name, final_train_error)
print '%.2f - "%s">> Best test error: %.6f' % (time.time(), light.name, final_test_error)
print '%.2f - "%s">> Residual sum of squares: %.2f' % (time.time(), light.name, rss)
# Now that we've run our model, let's determine if we should alter the state
# of the lights
right_now = datetime.now()
# We want to be pretty sure we're going to do something right
if rss < 0.1:
# Make sure we have enough observations
if phase_1_condition:
# Predict based on the current minute
prediction_minutes = (right_now.hour * 60) + right_now.minute
prediction = final_est.predict(prediction_minutes)
predicted_state = {
'id': light.light_id,
'on': True if int(round(prediction[0][0])) == 1 else False
}
elif phase_2_condition:
# Predict based on the current minute in the week
prediction_minutes = (right_now.hour * 60) + right_now.minute
right_now_weekday = right_now.weekday()
if right_now_weekday > 0:
prediction_minutes = prediction_minutes * right_now_weekday
prediction = final_est.predict(prediction_minutes)
predicted_state = {
'id': light.light_id,
'on': True if int(round(prediction[0][0])) == 1 else False,
'hue': int(prediction[0][1]),
'bri': int(prediction[0][2]),
'sat': int(prediction[0][3])
}
else:
# Predict based on the current minute in the month
prediction_minutes = (right_now.hour * 60) + right_now.minute
right_now_weekday = right_now.weekday()
if right_now_weekday > 0:
prediction_minutes = prediction_minutes * right_now_weekday
if right_now.day > 0:
prediction_minutes = prediction_minutes * right_now.day
prediction = final_est.predict(prediction_minutes)
predicted_state = {
'id': light.light_id,
'on': True if int(round(prediction[0][0])) == 1 else False,
'hue': int(prediction[0][1]),
'bri': int(prediction[0][2]),
'sat': int(prediction[0][3]),
'xy': [ round(prediction[0][4], 4), round(prediction[0][5], 4) ]
}
# Features: state, hue, bri, sat, x, y
confidence = final_est.score(X_test, Y_test) # 1 is perfect prediction
state_message = json.dumps(predicted_state)
print '%.2f - "%s">> Modifying state of light ...' % (time.time(), light.name)
print '%.2f - "%s">> The time is %s' % (time.time(), light.name, right_now)
print '%.2f - "%s">> Predicting for current minute %s/%s' % (time.time(), light.name, prediction_minutes, minutes_in_day - 1)
print '%.2f - "%s">> Predicting state: %s (Confidence: %.2f)' % (time.time(), light.name, 'ON' if predicted_state['on'] else 'OFF', confidence)
if 'hue' in predicted_state:
print '%.2f - "%s">> Predicting hue: %s (Confidence: %.2f)' % (time.time(), light.name, predicted_state['hue'], confidence)
if 'bri' in predicted_state:
print '%.2f - "%s">> Predicting bri: %s (Confidence: %.2f)' % (time.time(), light.name, predicted_state['bri'], confidence)
if 'sat' in predicted_state:
print '%.2f - "%s">> Predicting sat: %s (Confidence: %.2f)' % (time.time(), light.name, predicted_state['sat'], confidence)
if 'xy' in predicted_state:
print '%.2f - "%s">> Predicting xy: %s (Confidence: %.2f)' % (time.time(), light.name, predicted_state['xy'], confidence)
# Update the state of our light
# But only if we're pretty confident
if confidence > 0.8:
print '%.2f - "%s">> Sending message for processing ...' % (time.time(), light.name)
rmq_channel.basic_publish(exchange='', routing_key=RABBITMQ_QUEUE, body=state_message)
else:
print '%.2f - "%s">> Confidence too low to process' % (time.time(), light.name)
# RSS too high
else:
print '%.2f - "%s">> RSS too high, nothing to do' % (time.time(), light.name)
print '%.2f - "%s">> Done processing light' % (time.time(), light.name)
else:
print '%.2f - "%s">> Skipping light, not enough data to significantly train/test' % (time.time(), light.name)
# @todo - Save the weights of the algorithm to feed in later, this is going to
# get *super* expensive to run every single minute for anything over 5+ lights
#
# Testing with 20k+ rows + 5 lights takes ~14 seconds to complete
# Done!
end_time = time.time()
total_time = end_time - start_time
rmq.close()
cluster.shutdown()
print '%.2f - Done! (Ran in %.6f seconds)' % (time.time(), total_time)
| apache-2.0 |
xuewei4d/scikit-learn | sklearn/covariance/tests/test_graphical_lasso.py | 9 | 8406 | """ Test the graphical_lasso module.
"""
import sys
import pytest
import numpy as np
from scipy import linalg
from numpy.testing import assert_allclose
from sklearn.utils._testing import assert_array_almost_equal
from sklearn.utils._testing import assert_array_less
from sklearn.covariance import (graphical_lasso, GraphicalLasso,
GraphicalLassoCV, empirical_covariance)
from sklearn.datasets import make_sparse_spd_matrix
from io import StringIO
from sklearn.utils import check_random_state
from sklearn import datasets
def test_graphical_lasso(random_state=0):
# Sample data from a sparse multivariate normal
dim = 20
n_samples = 100
random_state = check_random_state(random_state)
prec = make_sparse_spd_matrix(dim, alpha=.95,
random_state=random_state)
cov = linalg.inv(prec)
X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
emp_cov = empirical_covariance(X)
for alpha in (0., .1, .25):
covs = dict()
icovs = dict()
for method in ('cd', 'lars'):
cov_, icov_, costs = graphical_lasso(emp_cov, return_costs=True,
alpha=alpha, mode=method)
covs[method] = cov_
icovs[method] = icov_
costs, dual_gap = np.array(costs).T
# Check that the costs always decrease (doesn't hold if alpha == 0)
if not alpha == 0:
assert_array_less(np.diff(costs), 0)
# Check that the 2 approaches give similar results
assert_array_almost_equal(covs['cd'], covs['lars'], decimal=4)
assert_array_almost_equal(icovs['cd'], icovs['lars'], decimal=4)
# Smoke test the estimator
model = GraphicalLasso(alpha=.25).fit(X)
model.score(X)
assert_array_almost_equal(model.covariance_, covs['cd'], decimal=4)
assert_array_almost_equal(model.covariance_, covs['lars'], decimal=4)
# For a centered matrix, assume_centered could be chosen True or False
# Check that this returns indeed the same result for centered data
Z = X - X.mean(0)
precs = list()
for assume_centered in (False, True):
prec_ = GraphicalLasso(
assume_centered=assume_centered).fit(Z).precision_
precs.append(prec_)
assert_array_almost_equal(precs[0], precs[1])
def test_graphical_lasso_iris():
# Hard-coded solution from R glasso package for alpha=1.0
# (need to set penalize.diagonal to FALSE)
cov_R = np.array([
[0.68112222, 0.0000000, 0.265820, 0.02464314],
[0.00000000, 0.1887129, 0.000000, 0.00000000],
[0.26582000, 0.0000000, 3.095503, 0.28697200],
[0.02464314, 0.0000000, 0.286972, 0.57713289]
])
icov_R = np.array([
[1.5190747, 0.000000, -0.1304475, 0.0000000],
[0.0000000, 5.299055, 0.0000000, 0.0000000],
[-0.1304475, 0.000000, 0.3498624, -0.1683946],
[0.0000000, 0.000000, -0.1683946, 1.8164353]
])
X = datasets.load_iris().data
emp_cov = empirical_covariance(X)
for method in ('cd', 'lars'):
cov, icov = graphical_lasso(emp_cov, alpha=1.0, return_costs=False,
mode=method)
assert_array_almost_equal(cov, cov_R)
assert_array_almost_equal(icov, icov_R)
def test_graph_lasso_2D():
# Hard-coded solution from Python skggm package
# obtained by calling `quic(emp_cov, lam=.1, tol=1e-8)`
cov_skggm = np.array([[3.09550269, 1.186972],
[1.186972, 0.57713289]])
icov_skggm = np.array([[1.52836773, -3.14334831],
[-3.14334831, 8.19753385]])
X = datasets.load_iris().data[:, 2:]
emp_cov = empirical_covariance(X)
for method in ('cd', 'lars'):
cov, icov = graphical_lasso(emp_cov, alpha=.1, return_costs=False,
mode=method)
assert_array_almost_equal(cov, cov_skggm)
assert_array_almost_equal(icov, icov_skggm)
def test_graphical_lasso_iris_singular():
# Small subset of rows to test the rank-deficient case
# Need to choose samples such that none of the variances are zero
indices = np.arange(10, 13)
# Hard-coded solution from R glasso package for alpha=0.01
cov_R = np.array([
[0.08, 0.056666662595, 0.00229729713223, 0.00153153142149],
[0.056666662595, 0.082222222222, 0.00333333333333, 0.00222222222222],
[0.002297297132, 0.003333333333, 0.00666666666667, 0.00009009009009],
[0.001531531421, 0.002222222222, 0.00009009009009, 0.00222222222222]
])
icov_R = np.array([
[24.42244057, -16.831679593, 0.0, 0.0],
[-16.83168201, 24.351841681, -6.206896552, -12.5],
[0.0, -6.206896171, 153.103448276, 0.0],
[0.0, -12.499999143, 0.0, 462.5]
])
X = datasets.load_iris().data[indices, :]
emp_cov = empirical_covariance(X)
for method in ('cd', 'lars'):
cov, icov = graphical_lasso(emp_cov, alpha=0.01, return_costs=False,
mode=method)
assert_array_almost_equal(cov, cov_R, decimal=5)
assert_array_almost_equal(icov, icov_R, decimal=5)
def test_graphical_lasso_cv(random_state=1):
# Sample data from a sparse multivariate normal
dim = 5
n_samples = 6
random_state = check_random_state(random_state)
prec = make_sparse_spd_matrix(dim, alpha=.96,
random_state=random_state)
cov = linalg.inv(prec)
X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
# Capture stdout, to smoke test the verbose mode
orig_stdout = sys.stdout
try:
sys.stdout = StringIO()
# We need verbose very high so that Parallel prints on stdout
GraphicalLassoCV(verbose=100, alphas=5, tol=1e-1).fit(X)
finally:
sys.stdout = orig_stdout
# Smoke test with specified alphas
GraphicalLassoCV(alphas=[0.8, 0.5], tol=1e-1, n_jobs=1).fit(X)
# TODO: Remove in 1.1 when grid_scores_ is deprecated
def test_graphical_lasso_cv_grid_scores_and_cv_alphas_deprecated():
splits = 4
n_alphas = 5
n_refinements = 3
true_cov = np.array([[0.8, 0.0, 0.2, 0.0],
[0.0, 0.4, 0.0, 0.0],
[0.2, 0.0, 0.3, 0.1],
[0.0, 0.0, 0.1, 0.7]])
rng = np.random.RandomState(0)
X = rng.multivariate_normal(mean=[0, 0, 0, 0], cov=true_cov, size=200)
cov = GraphicalLassoCV(cv=splits, alphas=n_alphas,
n_refinements=n_refinements).fit(X)
total_alphas = n_refinements * n_alphas + 1
msg = (r"The grid_scores_ attribute is deprecated in version 0\.24 in "
r"favor of cv_results_ and will be removed in version 1\.1 "
r"\(renaming of 0\.26\).")
with pytest.warns(FutureWarning, match=msg):
assert cov.grid_scores_.shape == (total_alphas, splits)
msg = (r"The cv_alphas_ attribute is deprecated in version 0\.24 in "
r"favor of cv_results_\['alpha'\] and will be removed in version "
r"1\.1 \(renaming of 0\.26\)")
with pytest.warns(FutureWarning, match=msg):
assert len(cov.cv_alphas_) == total_alphas
def test_graphical_lasso_cv_scores():
splits = 4
n_alphas = 5
n_refinements = 3
true_cov = np.array([[0.8, 0.0, 0.2, 0.0],
[0.0, 0.4, 0.0, 0.0],
[0.2, 0.0, 0.3, 0.1],
[0.0, 0.0, 0.1, 0.7]])
rng = np.random.RandomState(0)
X = rng.multivariate_normal(mean=[0, 0, 0, 0], cov=true_cov, size=200)
cov = GraphicalLassoCV(cv=splits, alphas=n_alphas,
n_refinements=n_refinements).fit(X)
cv_results = cov.cv_results_
# alpha and one for each split
total_alphas = n_refinements * n_alphas + 1
keys = ['alphas']
split_keys = ['split{}_score'.format(i) for i in range(splits)]
for key in keys + split_keys:
assert key in cv_results
assert len(cv_results[key]) == total_alphas
cv_scores = np.asarray([cov.cv_results_[key] for key in split_keys])
expected_mean = cv_scores.mean(axis=0)
expected_std = cv_scores.std(axis=0)
assert_allclose(cov.cv_results_["mean_score"], expected_mean)
assert_allclose(cov.cv_results_["std_score"], expected_std)
| bsd-3-clause |
semonte/intellij-community | python/helpers/pydev/pydevconsole.py | 6 | 17664 | '''
Entry point module to start the interactive console.
'''
from _pydev_imps._pydev_saved_modules import thread
start_new_thread = thread.start_new_thread
try:
from code import InteractiveConsole
except ImportError:
from _pydevd_bundle.pydevconsole_code_for_ironpython import InteractiveConsole
from code import compile_command
from code import InteractiveInterpreter
import os
import sys
from _pydev_imps._pydev_saved_modules import threading
from _pydevd_bundle.pydevd_constants import INTERACTIVE_MODE_AVAILABLE
import traceback
from _pydev_bundle import fix_getpass
fix_getpass.fix_getpass()
from _pydevd_bundle import pydevd_vars, pydevd_save_locals
from _pydev_bundle.pydev_imports import Exec, _queue
try:
import __builtin__
except:
import builtins as __builtin__ # @UnresolvedImport
try:
False
True
except NameError: # version < 2.3 -- didn't have the True/False builtins
import __builtin__
setattr(__builtin__, 'True', 1) #Python 3.0 does not accept __builtin__.True = 1 in its syntax
setattr(__builtin__, 'False', 0)
from _pydev_bundle.pydev_console_utils import BaseInterpreterInterface, BaseStdIn
from _pydev_bundle.pydev_console_utils import CodeFragment
IS_PYTHON_3K = False
IS_PY24 = False
try:
if sys.version_info[0] == 3:
IS_PYTHON_3K = True
elif sys.version_info[0] == 2 and sys.version_info[1] == 4:
IS_PY24 = True
except:
#That's OK, not all versions of python have sys.version_info
pass
class Command:
def __init__(self, interpreter, code_fragment):
"""
:type code_fragment: CodeFragment
:type interpreter: InteractiveConsole
"""
self.interpreter = interpreter
self.code_fragment = code_fragment
self.more = None
def symbol_for_fragment(code_fragment):
if code_fragment.is_single_line:
symbol = 'single'
else:
symbol = 'exec' # Jython doesn't support this
return symbol
symbol_for_fragment = staticmethod(symbol_for_fragment)
def run(self):
text = self.code_fragment.text
symbol = self.symbol_for_fragment(self.code_fragment)
self.more = self.interpreter.runsource(text, '<input>', symbol)
try:
try:
execfile #Not in Py3k
except NameError:
from _pydev_bundle.pydev_imports import execfile
__builtin__.execfile = execfile
except:
pass
# Pull in runfile, the interface to UMD that wraps execfile
from _pydev_bundle.pydev_umd import runfile, _set_globals_function
try:
import builtins # @UnresolvedImport
builtins.runfile = runfile
except:
import __builtin__
__builtin__.runfile = runfile
#=======================================================================================================================
# InterpreterInterface
#=======================================================================================================================
class InterpreterInterface(BaseInterpreterInterface):
'''
The methods in this class should be registered in the xml-rpc server.
'''
def __init__(self, host, client_port, mainThread, show_banner=True):
BaseInterpreterInterface.__init__(self, mainThread)
self.client_port = client_port
self.host = host
self.namespace = {}
self.interpreter = InteractiveConsole(self.namespace)
self._input_error_printed = False
def do_add_exec(self, codeFragment):
command = Command(self.interpreter, codeFragment)
command.run()
return command.more
def get_namespace(self):
return self.namespace
def getCompletions(self, text, act_tok):
try:
from _pydev_bundle._pydev_completer import Completer
completer = Completer(self.namespace, None)
return completer.complete(act_tok)
except:
import traceback
traceback.print_exc()
return []
def close(self):
sys.exit(0)
def get_greeting_msg(self):
return 'PyDev console: starting.\n'
class _ProcessExecQueueHelper:
_debug_hook = None
_return_control_osc = False
def set_debug_hook(debug_hook):
_ProcessExecQueueHelper._debug_hook = debug_hook
def init_mpl_in_console(interpreter):
from pydev_ipython.inputhook import set_return_control_callback
def return_control():
''' A function that the inputhooks can call (via inputhook.stdin_ready()) to find
out if they should cede control and return '''
if _ProcessExecQueueHelper._debug_hook:
# Some of the input hooks check return control without doing
# a single operation, so we don't return True on every
# call when the debug hook is in place to allow the GUI to run
# XXX: Eventually the inputhook code will have diverged enough
# from the IPython source that it will be worthwhile rewriting
# it rather than pretending to maintain the old API
_ProcessExecQueueHelper._return_control_osc = not _ProcessExecQueueHelper._return_control_osc
if _ProcessExecQueueHelper._return_control_osc:
return True
if not interpreter.exec_queue.empty():
return True
return False
set_return_control_callback(return_control)
if not INTERACTIVE_MODE_AVAILABLE:
return
from _pydev_bundle.pydev_import_hook import import_hook_manager
from pydev_ipython.matplotlibtools import activate_matplotlib, activate_pylab, activate_pyplot
import_hook_manager.add_module_name("matplotlib", lambda: activate_matplotlib(interpreter.enableGui))
# enable_gui_function in activate_matplotlib should be called in main thread. That's why we call
# interpreter.enableGui which put it into the interpreter's exec_queue and executes it in the main thread.
import_hook_manager.add_module_name("pylab", activate_pylab)
import_hook_manager.add_module_name("pyplot", activate_pyplot)
def process_exec_queue(interpreter):
init_mpl_in_console(interpreter)
from pydev_ipython.inputhook import get_inputhook
while 1:
# Running the request may have changed the inputhook in use
inputhook = get_inputhook()
if _ProcessExecQueueHelper._debug_hook:
_ProcessExecQueueHelper._debug_hook()
if inputhook:
try:
# Note: it'll block here until return_control returns True.
inputhook()
except:
import traceback;traceback.print_exc()
try:
try:
code_fragment = interpreter.exec_queue.get(block=True, timeout=1/20.) # 20 calls/second
except _queue.Empty:
continue
if hasattr(code_fragment, '__call__'):
# It can be a callable (i.e.: something that must run in the main
# thread can be put in the queue for later execution).
code_fragment()
else:
more = interpreter.add_exec(code_fragment)
except KeyboardInterrupt:
interpreter.buffer = None
continue
except SystemExit:
raise
except:
type, value, tb = sys.exc_info()
traceback.print_exception(type, value, tb, file=sys.__stderr__)
exit()
if 'IPYTHONENABLE' in os.environ:
IPYTHON = os.environ['IPYTHONENABLE'] == 'True'
else:
IPYTHON = True
try:
try:
exitfunc = sys.exitfunc
except AttributeError:
exitfunc = None
if IPYTHON:
from _pydev_bundle.pydev_ipython_console import InterpreterInterface
if exitfunc is not None:
sys.exitfunc = exitfunc
else:
try:
delattr(sys, 'exitfunc')
except:
pass
except:
IPYTHON = False
pass
#=======================================================================================================================
# _DoExit
#=======================================================================================================================
def do_exit(*args):
'''
We have to override the exit because calling sys.exit will only actually exit the main thread,
and as we're in a Xml-rpc server, that won't work.
'''
try:
import java.lang.System
java.lang.System.exit(1)
except ImportError:
if len(args) == 1:
os._exit(args[0])
else:
os._exit(0)
def handshake():
return "PyCharm"
#=======================================================================================================================
# start_console_server
#=======================================================================================================================
def start_console_server(host, port, interpreter):
if port == 0:
host = ''
#I.e.: supporting the internal Jython version in PyDev to create a Jython interactive console inside Eclipse.
from _pydev_bundle.pydev_imports import SimpleXMLRPCServer as XMLRPCServer #@Reimport
try:
if IS_PY24:
server = XMLRPCServer((host, port), logRequests=False)
else:
server = XMLRPCServer((host, port), logRequests=False, allow_none=True)
except:
sys.stderr.write('Error starting server with host: "%s", port: "%s", client_port: "%s"\n' % (host, port, interpreter.client_port))
sys.stderr.flush()
raise
# Tell UMD the proper default namespace
_set_globals_function(interpreter.get_namespace)
server.register_function(interpreter.execLine)
server.register_function(interpreter.execMultipleLines)
server.register_function(interpreter.getCompletions)
server.register_function(interpreter.getFrame)
server.register_function(interpreter.getVariable)
server.register_function(interpreter.changeVariable)
server.register_function(interpreter.getDescription)
server.register_function(interpreter.close)
server.register_function(interpreter.interrupt)
server.register_function(handshake)
server.register_function(interpreter.connectToDebugger)
server.register_function(interpreter.hello)
server.register_function(interpreter.getArray)
server.register_function(interpreter.evaluate)
server.register_function(interpreter.ShowConsole)
# Functions for GUI main loop integration
server.register_function(interpreter.enableGui)
if port == 0:
(h, port) = server.socket.getsockname()
print(port)
print(interpreter.client_port)
sys.stderr.write(interpreter.get_greeting_msg())
sys.stderr.flush()
while True:
try:
server.serve_forever()
except:
# Ugly code to be py2/3 compatible
# https://sw-brainwy.rhcloud.com/tracker/PyDev/534:
# Unhandled "interrupted system call" error in the pydevconsol.py
e = sys.exc_info()[1]
retry = False
try:
retry = e.args[0] == 4 #errno.EINTR
except:
pass
if not retry:
raise
# Otherwise, keep on going
return server
def start_server(host, port, client_port):
#replace exit (see comments on method)
#note that this does not work in jython!!! (sys method can't be replaced).
sys.exit = do_exit
interpreter = InterpreterInterface(host, client_port, threading.currentThread())
start_new_thread(start_console_server,(host, port, interpreter))
process_exec_queue(interpreter)
def get_ipython_hidden_vars():
if IPYTHON and hasattr(__builtin__, 'interpreter'):
interpreter = get_interpreter()
return interpreter.get_ipython_hidden_vars_dict()
def get_interpreter():
try:
interpreterInterface = getattr(__builtin__, 'interpreter')
except AttributeError:
interpreterInterface = InterpreterInterface(None, None, threading.currentThread())
setattr(__builtin__, 'interpreter', interpreterInterface)
sys.stderr.write(interpreterInterface.get_greeting_msg())
sys.stderr.flush()
return interpreterInterface
def get_completions(text, token, globals, locals):
interpreterInterface = get_interpreter()
interpreterInterface.interpreter.update(globals, locals)
return interpreterInterface.getCompletions(text, token)
#===============================================================================
# Debugger integration
#===============================================================================
def exec_code(code, globals, locals, debugger):
interpreterInterface = get_interpreter()
interpreterInterface.interpreter.update(globals, locals)
res = interpreterInterface.need_more(code)
if res:
return True
interpreterInterface.add_exec(code, debugger)
return False
class ConsoleWriter(InteractiveInterpreter):
skip = 0
def __init__(self, locals=None):
InteractiveInterpreter.__init__(self, locals)
def write(self, data):
#if (data.find("global_vars") == -1 and data.find("pydevd") == -1):
if self.skip > 0:
self.skip -= 1
else:
if data == "Traceback (most recent call last):\n":
self.skip = 1
sys.stderr.write(data)
def showsyntaxerror(self, filename=None):
"""Display the syntax error that just occurred."""
#Override for avoid using sys.excepthook PY-12600
type, value, tb = sys.exc_info()
sys.last_type = type
sys.last_value = value
sys.last_traceback = tb
if filename and type is SyntaxError:
# Work hard to stuff the correct filename in the exception
try:
msg, (dummy_filename, lineno, offset, line) = value.args
except ValueError:
# Not the format we expect; leave it alone
pass
else:
# Stuff in the right filename
value = SyntaxError(msg, (filename, lineno, offset, line))
sys.last_value = value
list = traceback.format_exception_only(type, value)
sys.stderr.write(''.join(list))
def showtraceback(self):
"""Display the exception that just occurred."""
#Override for avoid using sys.excepthook PY-12600
try:
type, value, tb = sys.exc_info()
sys.last_type = type
sys.last_value = value
sys.last_traceback = tb
tblist = traceback.extract_tb(tb)
del tblist[:1]
lines = traceback.format_list(tblist)
if lines:
lines.insert(0, "Traceback (most recent call last):\n")
lines.extend(traceback.format_exception_only(type, value))
finally:
tblist = tb = None
sys.stderr.write(''.join(lines))
def console_exec(thread_id, frame_id, expression, dbg):
"""returns 'False' in case expression is partially correct
"""
frame = pydevd_vars.find_frame(thread_id, frame_id)
is_multiline = expression.count('@LINE@') > 1
expression = str(expression.replace('@LINE@', '\n'))
#Not using frame.f_globals because of https://sourceforge.net/tracker2/?func=detail&aid=2541355&group_id=85796&atid=577329
#(Names not resolved in generator expression in method)
#See message: http://mail.python.org/pipermail/python-list/2009-January/526522.html
updated_globals = {}
updated_globals.update(frame.f_globals)
updated_globals.update(frame.f_locals) #locals later because it has precedence over the actual globals
if IPYTHON:
need_more = exec_code(CodeFragment(expression), updated_globals, frame.f_locals, dbg)
if not need_more:
pydevd_save_locals.save_locals(frame)
return need_more
interpreter = ConsoleWriter()
if not is_multiline:
try:
code = compile_command(expression)
except (OverflowError, SyntaxError, ValueError):
# Case 1
interpreter.showsyntaxerror()
return False
if code is None:
# Case 2
return True
else:
code = expression
#Case 3
try:
Exec(code, updated_globals, frame.f_locals)
except SystemExit:
raise
except:
interpreter.showtraceback()
else:
pydevd_save_locals.save_locals(frame)
return False
#=======================================================================================================================
# main
#=======================================================================================================================
if __name__ == '__main__':
#Important: don't use this module directly as the __main__ module, rather, import itself as pydevconsole
#so that we don't get multiple pydevconsole modules if it's executed directly (otherwise we'd have multiple
#representations of its classes).
#See: https://sw-brainwy.rhcloud.com/tracker/PyDev/446:
#'Variables' and 'Expressions' views stopped working when debugging interactive console
import pydevconsole
sys.stdin = pydevconsole.BaseStdIn(sys.stdin)
port, client_port = sys.argv[1:3]
from _pydev_bundle import pydev_localhost
if int(port) == 0 and int(client_port) == 0:
(h, p) = pydev_localhost.get_socket_name()
client_port = p
pydevconsole.start_server(pydev_localhost.get_localhost(), int(port), int(client_port))
| apache-2.0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.