repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
ilyes14/scikit-learn | sklearn/semi_supervised/tests/test_label_propagation.py | 307 | 1974 | """ test the label propagation module """
import nose
import numpy as np
from sklearn.semi_supervised import label_propagation
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
ESTIMATORS = [
(label_propagation.LabelPropagation, {'kernel': 'rbf'}),
(label_propagation.LabelPropagation, {'kernel': 'knn', 'n_neighbors': 2}),
(label_propagation.LabelSpreading, {'kernel': 'rbf'}),
(label_propagation.LabelSpreading, {'kernel': 'knn', 'n_neighbors': 2})
]
def test_fit_transduction():
samples = [[1., 0.], [0., 2.], [1., 3.]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
nose.tools.assert_equal(clf.transduction_[2], 1)
def test_distribution():
samples = [[1., 0.], [0., 1.], [1., 1.]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
if parameters['kernel'] == 'knn':
continue # unstable test; changes in k-NN ordering break it
assert_array_almost_equal(clf.predict_proba([[1., 0.0]]),
np.array([[1., 0.]]), 2)
else:
assert_array_almost_equal(np.asarray(clf.label_distributions_[2]),
np.array([.5, .5]), 2)
def test_predict():
samples = [[1., 0.], [0., 2.], [1., 3.]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
assert_array_equal(clf.predict([[0.5, 2.5]]), np.array([1]))
def test_predict_proba():
samples = [[1., 0.], [0., 1.], [1., 2.5]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
assert_array_almost_equal(clf.predict_proba([[1., 1.]]),
np.array([[0.5, 0.5]]))
| bsd-3-clause |
xavierwu/scikit-learn | sklearn/manifold/setup.py | 99 | 1243 | import os
from os.path import join
import numpy
from numpy.distutils.misc_util import Configuration
from sklearn._build_utils import get_blas_info
def configuration(parent_package="", top_path=None):
config = Configuration("manifold", parent_package, top_path)
libraries = []
if os.name == 'posix':
libraries.append('m')
config.add_extension("_utils",
sources=["_utils.c"],
include_dirs=[numpy.get_include()],
libraries=libraries,
extra_compile_args=["-O3"])
cblas_libs, blas_info = get_blas_info()
eca = blas_info.pop('extra_compile_args', [])
eca.append("-O4")
config.add_extension("_barnes_hut_tsne",
libraries=cblas_libs,
sources=["_barnes_hut_tsne.c"],
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=eca, **blas_info)
return config
if __name__ == "__main__":
from numpy.distutils.core import setup
setup(**configuration().todict())
| bsd-3-clause |
ZHYfeng/malicious-code-conceal | 3-2-multi-programmes-big/Multiverso-master/binding/python/examples/theano/cnn.py | 6 | 5128 | #!/usr/bin/env python
# coding:utf8
"""
This code is adapted from
https://github.com/benanne/theano-tutorial/blob/master/6_convnet.py
The MIT License (MIT)
Copyright (c) 2015 Sander Dieleman
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import theano
import theano.tensor as T
import numpy as np
import matplotlib.pyplot as plt
plt.ion()
import load_data
from theano.tensor.nnet import conv
from theano.tensor.signal import downsample
# MULTIVERSO: import multiverso
import multiverso as mv
# MULTIVERSO: the sharedvar in theano_ext acts same like Theano's
# sharedVariables. But it use multiverso as the backend
from multiverso.theano_ext import sharedvar
x_train, t_train, x_test, t_test = load_data.load_cifar10()
labels_test = np.argmax(t_test, axis=1)
# reshape data
x_train = x_train.reshape((x_train.shape[0], 3, 32, 32))
x_test = x_test.reshape((x_test.shape[0], 3, 32, 32))
# define symbolic Theano variables
x = T.tensor4()
t = T.matrix()
# define model: neural network
def floatX(x):
return np.asarray(x, dtype=theano.config.floatX)
def init_weights(shape, name):
# MULTIVERSO: relace the shared variable with mv_shared
return sharedvar.mv_shared(floatX(np.random.randn(*shape) * 0.1), name=name)
def momentum(cost, params, learning_rate, momentum):
grads = theano.grad(cost, params)
updates = []
for p, g in zip(params, grads):
# MULTIVERSO: relace the shared variable with mv_shared
mparam_i = sharedvar.mv_shared(np.zeros(p.get_value().shape, dtype=theano.config.floatX))
v = momentum * mparam_i - learning_rate * g
updates.append((mparam_i, v))
updates.append((p, p + v))
return updates
def model(x, w_c1, b_c1, w_c2, b_c2, w_h3, b_h3, w_o, b_o):
c1 = T.maximum(0, conv.conv2d(x, w_c1) + b_c1.dimshuffle('x', 0, 'x', 'x'))
p1 = downsample.max_pool_2d(c1, (3, 3))
c2 = T.maximum(0, conv.conv2d(p1, w_c2) + b_c2.dimshuffle('x', 0, 'x', 'x'))
p2 = downsample.max_pool_2d(c2, (2, 2))
p2_flat = p2.flatten(2)
h3 = T.maximum(0, T.dot(p2_flat, w_h3) + b_h3)
p_y_given_x = T.nnet.softmax(T.dot(h3, w_o) + b_o)
return p_y_given_x
# MULTIVERSO: you should call mv.init before call multiverso apis
mv.init()
worker_id = mv.worker_id()
# MULTIVERSO: every process has distinct worker id
workers_num = mv.workers_num()
w_c1 = init_weights((4, 3, 3, 3), name="w_c1")
b_c1 = init_weights((4,), name="b_c1")
w_c2 = init_weights((8, 4, 3, 3), name="w_c2")
b_c2 = init_weights((8,), name="b_c2")
w_h3 = init_weights((8 * 4 * 4, 100), name="w_h3")
b_h3 = init_weights((100,), name="b_h3")
w_o = init_weights((100, 10), name="w_o")
b_o = init_weights((10,), name="b_o")
params = [w_c1, b_c1, w_c2, b_c2, w_h3, b_h3, w_o, b_o]
p_y_given_x = model(x, *params)
y = T.argmax(p_y_given_x, axis=1)
cost = T.mean(T.nnet.categorical_crossentropy(p_y_given_x, t))
updates = momentum(cost, params, learning_rate=0.01, momentum=0.9)
# compile theano functions
train = theano.function([x, t], cost, updates=updates, allow_input_downcast=True)
predict = theano.function([x], y, allow_input_downcast=True)
# MULTIVERSO: all the workers will synchronize at the place you call barrier
mv.barrier()
# train model
batch_size = 50
for i in range(50):
for start in range(0, len(x_train), batch_size):
# every process only train batches assigned to itself
if start / batch_size % workers_num != worker_id:
continue
x_batch = x_train[start:start + batch_size]
t_batch = t_train[start:start + batch_size]
cost = train(x_batch, t_batch)
# MULTIVERSO: sync value with multiverso after every batch
sharedvar.sync_all_mv_shared_vars()
# MULTIVERSO: all the workers will synchronize at the place you call barrier
mv.barrier() # barrier every epoch
# master will calc the accuracy
if mv.is_master_worker():
predictions_test = predict(x_test)
accuracy = np.mean(predictions_test == labels_test)
print "epoch %d - accuracy: %.4f" % (i + 1, accuracy)
# MULTIVERSO: You must call shutdown at the end of the file
mv.shutdown()
| apache-2.0 |
hsiaoyi0504/scikit-learn | sklearn/svm/tests/test_bounds.py | 280 | 2541 | import nose
from nose.tools import assert_equal, assert_true
from sklearn.utils.testing import clean_warning_registry
import warnings
import numpy as np
from scipy import sparse as sp
from sklearn.svm.bounds import l1_min_c
from sklearn.svm import LinearSVC
from sklearn.linear_model.logistic import LogisticRegression
dense_X = [[-1, 0], [0, 1], [1, 1], [1, 1]]
sparse_X = sp.csr_matrix(dense_X)
Y1 = [0, 1, 1, 1]
Y2 = [2, 1, 0, 0]
def test_l1_min_c():
losses = ['squared_hinge', 'log']
Xs = {'sparse': sparse_X, 'dense': dense_X}
Ys = {'two-classes': Y1, 'multi-class': Y2}
intercepts = {'no-intercept': {'fit_intercept': False},
'fit-intercept': {'fit_intercept': True,
'intercept_scaling': 10}}
for loss in losses:
for X_label, X in Xs.items():
for Y_label, Y in Ys.items():
for intercept_label, intercept_params in intercepts.items():
check = lambda: check_l1_min_c(X, Y, loss,
**intercept_params)
check.description = ('Test l1_min_c loss=%r %s %s %s' %
(loss, X_label, Y_label,
intercept_label))
yield check
def test_l2_deprecation():
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
assert_equal(l1_min_c(dense_X, Y1, "l2"),
l1_min_c(dense_X, Y1, "squared_hinge"))
assert_equal(w[0].category, DeprecationWarning)
def check_l1_min_c(X, y, loss, fit_intercept=True, intercept_scaling=None):
min_c = l1_min_c(X, y, loss, fit_intercept, intercept_scaling)
clf = {
'log': LogisticRegression(penalty='l1'),
'squared_hinge': LinearSVC(loss='squared_hinge',
penalty='l1', dual=False),
}[loss]
clf.fit_intercept = fit_intercept
clf.intercept_scaling = intercept_scaling
clf.C = min_c
clf.fit(X, y)
assert_true((np.asarray(clf.coef_) == 0).all())
assert_true((np.asarray(clf.intercept_) == 0).all())
clf.C = min_c * 1.01
clf.fit(X, y)
assert_true((np.asarray(clf.coef_) != 0).any() or
(np.asarray(clf.intercept_) != 0).any())
@nose.tools.raises(ValueError)
def test_ill_posed_min_c():
X = [[0, 0], [0, 0]]
y = [0, 1]
l1_min_c(X, y)
@nose.tools.raises(ValueError)
def test_unsupported_loss():
l1_min_c(dense_X, Y1, 'l1')
| bsd-3-clause |
themrmax/scikit-learn | examples/cluster/plot_adjusted_for_chance_measures.py | 105 | 4300 | """
==========================================================
Adjustment for chance in clustering performance evaluation
==========================================================
The following plots demonstrate the impact of the number of clusters and
number of samples on various clustering performance evaluation metrics.
Non-adjusted measures such as the V-Measure show a dependency between
the number of clusters and the number of samples: the mean V-Measure
of random labeling increases significantly as the number of clusters is
closer to the total number of samples used to compute the measure.
Adjusted for chance measure such as ARI display some random variations
centered around a mean score of 0.0 for any number of samples and
clusters.
Only adjusted measures can hence safely be used as a consensus index
to evaluate the average stability of clustering algorithms for a given
value of k on various overlapping sub-samples of the dataset.
"""
print(__doc__)
# Author: Olivier Grisel <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from time import time
from sklearn import metrics
def uniform_labelings_scores(score_func, n_samples, n_clusters_range,
fixed_n_classes=None, n_runs=5, seed=42):
"""Compute score for 2 random uniform cluster labelings.
Both random labelings have the same number of clusters for each value
possible value in ``n_clusters_range``.
When fixed_n_classes is not None the first labeling is considered a ground
truth class assignment with fixed number of classes.
"""
random_labels = np.random.RandomState(seed).randint
scores = np.zeros((len(n_clusters_range), n_runs))
if fixed_n_classes is not None:
labels_a = random_labels(low=0, high=fixed_n_classes, size=n_samples)
for i, k in enumerate(n_clusters_range):
for j in range(n_runs):
if fixed_n_classes is None:
labels_a = random_labels(low=0, high=k, size=n_samples)
labels_b = random_labels(low=0, high=k, size=n_samples)
scores[i, j] = score_func(labels_a, labels_b)
return scores
score_funcs = [
metrics.adjusted_rand_score,
metrics.v_measure_score,
metrics.adjusted_mutual_info_score,
metrics.mutual_info_score,
]
# 2 independent random clusterings with equal cluster number
n_samples = 100
n_clusters_range = np.linspace(2, n_samples, 10).astype(np.int)
plt.figure(1)
plots = []
names = []
for score_func in score_funcs:
print("Computing %s for %d values of n_clusters and n_samples=%d"
% (score_func.__name__, len(n_clusters_range), n_samples))
t0 = time()
scores = uniform_labelings_scores(score_func, n_samples, n_clusters_range)
print("done in %0.3fs" % (time() - t0))
plots.append(plt.errorbar(
n_clusters_range, np.median(scores, axis=1), scores.std(axis=1))[0])
names.append(score_func.__name__)
plt.title("Clustering measures for 2 random uniform labelings\n"
"with equal number of clusters")
plt.xlabel('Number of clusters (Number of samples is fixed to %d)' % n_samples)
plt.ylabel('Score value')
plt.legend(plots, names)
plt.ylim(ymin=-0.05, ymax=1.05)
# Random labeling with varying n_clusters against ground class labels
# with fixed number of clusters
n_samples = 1000
n_clusters_range = np.linspace(2, 100, 10).astype(np.int)
n_classes = 10
plt.figure(2)
plots = []
names = []
for score_func in score_funcs:
print("Computing %s for %d values of n_clusters and n_samples=%d"
% (score_func.__name__, len(n_clusters_range), n_samples))
t0 = time()
scores = uniform_labelings_scores(score_func, n_samples, n_clusters_range,
fixed_n_classes=n_classes)
print("done in %0.3fs" % (time() - t0))
plots.append(plt.errorbar(
n_clusters_range, scores.mean(axis=1), scores.std(axis=1))[0])
names.append(score_func.__name__)
plt.title("Clustering measures for random uniform labeling\n"
"against reference assignment with %d classes" % n_classes)
plt.xlabel('Number of clusters (Number of samples is fixed to %d)' % n_samples)
plt.ylabel('Score value')
plt.ylim(ymin=-0.05, ymax=1.05)
plt.legend(plots, names)
plt.show()
| bsd-3-clause |
RobertABT/heightmap | build/scipy/scipy/stats/_discrete_distns.py | 4 | 20332 | #
# Author: Travis Oliphant 2002-2011 with contributions from
# SciPy Developers 2004-2011
#
from __future__ import division, print_function, absolute_import
from scipy import special
from scipy.special import gammaln as gamln
from numpy import floor, ceil, log, exp, sqrt, log1p, expm1, tanh, cosh, sinh
import numpy as np
import numpy.random as mtrand
from ._distn_infrastructure import (
rv_discrete, _lazywhere, _ncx2_pdf, _ncx2_cdf)
__all__ = [
'binom', 'bernoulli', 'nbinom', 'geom', 'hypergeom',
'logser', 'poisson', 'planck', 'boltzmann', 'randint',
'zipf', 'dlaplace', 'skellam'
]
class binom_gen(rv_discrete):
"""A binomial discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `binom` is::
binom.pmf(k) = choose(n, k) * p**k * (1-p)**(n-k)
for ``k`` in ``{0, 1,..., n}``.
`binom` takes ``n`` and ``p`` as shape parameters.
%(example)s
"""
def _rvs(self, n, p):
return mtrand.binomial(n, p, self._size)
def _argcheck(self, n, p):
self.b = n
return (n >= 0) & (p >= 0) & (p <= 1)
def _logpmf(self, x, n, p):
k = floor(x)
combiln = (gamln(n+1) - (gamln(k+1) + gamln(n-k+1)))
return combiln + special.xlogy(k, p) + special.xlog1py(n-k, -p)
def _pmf(self, x, n, p):
return exp(self._logpmf(x, n, p))
def _cdf(self, x, n, p):
k = floor(x)
vals = special.bdtr(k, n, p)
return vals
def _sf(self, x, n, p):
k = floor(x)
return special.bdtrc(k, n, p)
def _ppf(self, q, n, p):
vals = ceil(special.bdtrik(q, n, p))
vals1 = np.maximum(vals - 1, 0)
temp = special.bdtr(vals1, n, p)
return np.where(temp >= q, vals1, vals)
def _stats(self, n, p):
q = 1.0-p
mu = n * p
var = n * p * q
g1 = (q-p) / sqrt(n*p*q)
g2 = (1.0-6*p*q)/(n*p*q)
return mu, var, g1, g2
def _entropy(self, n, p):
k = np.r_[0:n + 1]
vals = self._pmf(k, n, p)
h = -np.sum(special.xlogy(vals, vals), axis=0)
return h
binom = binom_gen(name='binom')
class bernoulli_gen(binom_gen):
"""A Bernoulli discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `bernoulli` is::
bernoulli.pmf(k) = 1-p if k = 0
= p if k = 1
for ``k`` in ``{0, 1}``.
`bernoulli` takes ``p`` as shape parameter.
%(example)s
"""
def _rvs(self, p):
return binom_gen._rvs(self, 1, p)
def _argcheck(self, p):
return (p >= 0) & (p <= 1)
def _logpmf(self, x, p):
return binom._logpmf(x, 1, p)
def _pmf(self, x, p):
return binom._pmf(x, 1, p)
def _cdf(self, x, p):
return binom._cdf(x, 1, p)
def _sf(self, x, p):
return binom._sf(x, 1, p)
def _ppf(self, q, p):
return binom._ppf(q, 1, p)
def _stats(self, p):
return binom._stats(1, p)
def _entropy(self, p):
h = -special.xlogy(p, p) - special.xlogy(1 - p, 1 - p)
return h
bernoulli = bernoulli_gen(b=1, name='bernoulli')
class nbinom_gen(rv_discrete):
"""A negative binomial discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `nbinom` is::
nbinom.pmf(k) = choose(k+n-1, n-1) * p**n * (1-p)**k
for ``k >= 0``.
`nbinom` takes ``n`` and ``p`` as shape parameters.
%(example)s
"""
def _rvs(self, n, p):
return mtrand.negative_binomial(n, p, self._size)
def _argcheck(self, n, p):
return (n >= 0) & (p >= 0) & (p <= 1)
def _pmf(self, x, n, p):
return exp(self._logpmf(x, n, p))
def _logpmf(self, x, n, p):
coeff = gamln(n+x) - gamln(x+1) - gamln(n)
return coeff + n*log(p) + x*log(1-p)
def _cdf(self, x, n, p):
k = floor(x)
return special.betainc(n, k+1, p)
def _sf_skip(self, x, n, p):
# skip because special.nbdtrc doesn't work for 0<n<1
k = floor(x)
return special.nbdtrc(k, n, p)
def _ppf(self, q, n, p):
vals = ceil(special.nbdtrik(q, n, p))
vals1 = (vals-1).clip(0.0, np.inf)
temp = self._cdf(vals1, n, p)
return np.where(temp >= q, vals1, vals)
def _stats(self, n, p):
Q = 1.0 / p
P = Q - 1.0
mu = n*P
var = n*P*Q
g1 = (Q+P)/sqrt(n*P*Q)
g2 = (1.0 + 6*P*Q) / (n*P*Q)
return mu, var, g1, g2
nbinom = nbinom_gen(name='nbinom')
class geom_gen(rv_discrete):
"""A geometric discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `geom` is::
geom.pmf(k) = (1-p)**(k-1)*p
for ``k >= 1``.
`geom` takes ``p`` as shape parameter.
%(example)s
"""
def _rvs(self, p):
return mtrand.geometric(p, size=self._size)
def _argcheck(self, p):
return (p <= 1) & (p >= 0)
def _pmf(self, k, p):
return np.power(1-p, k-1) * p
def _logpmf(self, k, p):
return (k-1) * log(1-p) + log(p)
def _cdf(self, x, p):
k = floor(x)
return -expm1(log1p(-p)*k)
def _sf(self, x, p):
return np.exp(self._logsf(x, p))
def _logsf(self, x, p):
k = floor(x)
return k*log1p(-p)
def _ppf(self, q, p):
vals = ceil(log(1.0-q)/log(1-p))
temp = self._cdf(vals-1, p)
return np.where((temp >= q) & (vals > 0), vals-1, vals)
def _stats(self, p):
mu = 1.0/p
qr = 1.0-p
var = qr / p / p
g1 = (2.0-p) / sqrt(qr)
g2 = np.polyval([1, -6, 6], p)/(1.0-p)
return mu, var, g1, g2
geom = geom_gen(a=1, name='geom', longname="A geometric")
class hypergeom_gen(rv_discrete):
"""A hypergeometric discrete random variable.
The hypergeometric distribution models drawing objects from a bin.
M is the total number of objects, n is total number of Type I objects.
The random variate represents the number of Type I objects in N drawn
without replacement from the total population.
%(before_notes)s
Notes
-----
The probability mass function is defined as::
pmf(k, M, n, N) = choose(n, k) * choose(M - n, N - k) / choose(M, N),
for max(0, N - (M-n)) <= k <= min(n, N)
Examples
--------
>>> from scipy.stats import hypergeom
>>> import matplotlib.pyplot as plt
Suppose we have a collection of 20 animals, of which 7 are dogs. Then if
we want to know the probability of finding a given number of dogs if we
choose at random 12 of the 20 animals, we can initialize a frozen
distribution and plot the probability mass function:
>>> [M, n, N] = [20, 7, 12]
>>> rv = hypergeom(M, n, N)
>>> x = np.arange(0, n+1)
>>> pmf_dogs = rv.pmf(x)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x, pmf_dogs, 'bo')
>>> ax.vlines(x, 0, pmf_dogs, lw=2)
>>> ax.set_xlabel('# of dogs in our group of chosen animals')
>>> ax.set_ylabel('hypergeom PMF')
>>> plt.show()
Instead of using a frozen distribution we can also use `hypergeom`
methods directly. To for example obtain the cumulative distribution
function, use:
>>> prb = hypergeom.cdf(x, M, n, N)
And to generate random numbers:
>>> R = hypergeom.rvs(M, n, N, size=10)
"""
def _rvs(self, M, n, N):
return mtrand.hypergeometric(n, M-n, N, size=self._size)
def _argcheck(self, M, n, N):
cond = rv_discrete._argcheck(self, M, n, N)
cond &= (n <= M) & (N <= M)
self.a = max(N-(M-n), 0)
self.b = min(n, N)
return cond
def _logpmf(self, k, M, n, N):
tot, good = M, n
bad = tot - good
return gamln(good+1) - gamln(good-k+1) - gamln(k+1) + gamln(bad+1) \
- gamln(bad-N+k+1) - gamln(N-k+1) - gamln(tot+1) + gamln(tot-N+1) \
+ gamln(N+1)
def _pmf(self, k, M, n, N):
# same as the following but numerically more precise
# return comb(good, k) * comb(bad, N-k) / comb(tot, N)
return exp(self._logpmf(k, M, n, N))
def _stats(self, M, n, N):
# tot, good, sample_size = M, n, N
# "wikipedia".replace('N', 'M').replace('n', 'N').replace('K', 'n')
M, n, N = 1.*M, 1.*n, 1.*N
m = M - n
p = n/M
mu = N*p
var = m*n*N*(M - N)*1.0/(M*M*(M-1))
g1 = (m - n)*(M-2*N) / (M-2.0) * sqrt((M-1.0) / (m*n*N*(M-N)))
g2 = M*(M+1) - 6.*N*(M-N) - 6.*n*m
g2 *= (M-1)*M*M
g2 += 6.*n*N*(M-N)*m*(5.*M-6)
g2 /= n * N * (M-N) * m * (M-2.) * (M-3.)
return mu, var, g1, g2
def _entropy(self, M, n, N):
k = np.r_[N - (M - n):min(n, N) + 1]
vals = self.pmf(k, M, n, N)
h = -np.sum(special.xlogy(vals, vals), axis=0)
return h
def _sf(self, k, M, n, N):
"""More precise calculation, 1 - cdf doesn't cut it."""
# This for loop is needed because `k` can be an array. If that's the
# case, the sf() method makes M, n and N arrays of the same shape. We
# therefore unpack all inputs args, so we can do the manual
# integration.
res = []
for quant, tot, good, draw in zip(k, M, n, N):
# Manual integration over probability mass function. More accurate
# than integrate.quad.
k2 = np.arange(quant + 1, draw + 1)
res.append(np.sum(self._pmf(k2, tot, good, draw)))
return np.asarray(res)
hypergeom = hypergeom_gen(name='hypergeom')
# FIXME: Fails _cdfvec
class logser_gen(rv_discrete):
"""A Logarithmic (Log-Series, Series) discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `logser` is::
logser.pmf(k) = - p**k / (k*log(1-p))
for ``k >= 1``.
`logser` takes ``p`` as shape parameter.
%(example)s
"""
def _rvs(self, p):
# looks wrong for p>0.5, too few k=1
# trying to use generic is worse, no k=1 at all
return mtrand.logseries(p, size=self._size)
def _argcheck(self, p):
return (p > 0) & (p < 1)
def _pmf(self, k, p):
return -np.power(p, k) * 1.0 / k / log(1 - p)
def _stats(self, p):
r = log(1 - p)
mu = p / (p - 1.0) / r
mu2p = -p / r / (p - 1.0)**2
var = mu2p - mu*mu
mu3p = -p / r * (1.0+p) / (1.0 - p)**3
mu3 = mu3p - 3*mu*mu2p + 2*mu**3
g1 = mu3 / np.power(var, 1.5)
mu4p = -p / r * (
1.0 / (p-1)**2 - 6*p / (p - 1)**3 + 6*p*p / (p-1)**4)
mu4 = mu4p - 4*mu3p*mu + 6*mu2p*mu*mu - 3*mu**4
g2 = mu4 / var**2 - 3.0
return mu, var, g1, g2
logser = logser_gen(a=1, name='logser', longname='A logarithmic')
class poisson_gen(rv_discrete):
"""A Poisson discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `poisson` is::
poisson.pmf(k) = exp(-mu) * mu**k / k!
for ``k >= 0``.
`poisson` takes ``mu`` as shape parameter.
%(example)s
"""
def _rvs(self, mu):
return mtrand.poisson(mu, self._size)
def _logpmf(self, k, mu):
Pk = k*log(mu)-gamln(k+1) - mu
return Pk
def _pmf(self, k, mu):
return exp(self._logpmf(k, mu))
def _cdf(self, x, mu):
k = floor(x)
return special.pdtr(k, mu)
def _sf(self, x, mu):
k = floor(x)
return special.pdtrc(k, mu)
def _ppf(self, q, mu):
vals = ceil(special.pdtrik(q, mu))
vals1 = np.maximum(vals - 1, 0)
temp = special.pdtr(vals1, mu)
return np.where(temp >= q, vals1, vals)
def _stats(self, mu):
var = mu
tmp = np.asarray(mu)
g1 = sqrt(1.0 / tmp)
g2 = 1.0 / tmp
return mu, var, g1, g2
poisson = poisson_gen(name="poisson", longname='A Poisson')
class planck_gen(rv_discrete):
"""A Planck discrete exponential random variable.
%(before_notes)s
Notes
-----
The probability mass function for `planck` is::
planck.pmf(k) = (1-exp(-lambda_))*exp(-lambda_*k)
for ``k*lambda_ >= 0``.
`planck` takes ``lambda_`` as shape parameter.
%(example)s
"""
def _argcheck(self, lambda_):
if (lambda_ > 0):
self.a = 0
self.b = np.inf
return 1
elif (lambda_ < 0):
self.a = -np.inf
self.b = 0
return 1
else:
return 0
def _pmf(self, k, lambda_):
fact = (1-exp(-lambda_))
return fact*exp(-lambda_*k)
def _cdf(self, x, lambda_):
k = floor(x)
return 1-exp(-lambda_*(k+1))
def _ppf(self, q, lambda_):
vals = ceil(-1.0/lambda_ * log1p(-q)-1)
vals1 = (vals-1).clip(self.a, np.inf)
temp = self._cdf(vals1, lambda_)
return np.where(temp >= q, vals1, vals)
def _stats(self, lambda_):
mu = 1/(exp(lambda_)-1)
var = exp(-lambda_)/(expm1(-lambda_))**2
g1 = 2*cosh(lambda_/2.0)
g2 = 4+2*cosh(lambda_)
return mu, var, g1, g2
def _entropy(self, lambda_):
l = lambda_
C = (1-exp(-l))
return l*exp(-l)/C - log(C)
planck = planck_gen(name='planck', longname='A discrete exponential ')
class boltzmann_gen(rv_discrete):
"""A Boltzmann (Truncated Discrete Exponential) random variable.
%(before_notes)s
Notes
-----
The probability mass function for `boltzmann` is::
boltzmann.pmf(k) = (1-exp(-lambda_)*exp(-lambda_*k)/(1-exp(-lambda_*N))
for ``k = 0,..., N-1``.
`boltzmann` takes ``lambda_`` and ``N`` as shape parameters.
%(example)s
"""
def _pmf(self, k, lambda_, N):
fact = (1-exp(-lambda_))/(1-exp(-lambda_*N))
return fact*exp(-lambda_*k)
def _cdf(self, x, lambda_, N):
k = floor(x)
return (1-exp(-lambda_*(k+1)))/(1-exp(-lambda_*N))
def _ppf(self, q, lambda_, N):
qnew = q*(1-exp(-lambda_*N))
vals = ceil(-1.0/lambda_ * log(1-qnew)-1)
vals1 = (vals-1).clip(0.0, np.inf)
temp = self._cdf(vals1, lambda_, N)
return np.where(temp >= q, vals1, vals)
def _stats(self, lambda_, N):
z = exp(-lambda_)
zN = exp(-lambda_*N)
mu = z/(1.0-z)-N*zN/(1-zN)
var = z/(1.0-z)**2 - N*N*zN/(1-zN)**2
trm = (1-zN)/(1-z)
trm2 = (z*trm**2 - N*N*zN)
g1 = z*(1+z)*trm**3 - N**3*zN*(1+zN)
g1 = g1 / trm2**(1.5)
g2 = z*(1+4*z+z*z)*trm**4 - N**4 * zN*(1+4*zN+zN*zN)
g2 = g2 / trm2 / trm2
return mu, var, g1, g2
boltzmann = boltzmann_gen(name='boltzmann',
longname='A truncated discrete exponential ')
class randint_gen(rv_discrete):
"""A uniform discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `randint` is::
randint.pmf(k) = 1./(high - low)
for ``k = low, ..., high - 1``.
`randint` takes ``low`` and ``high`` as shape parameters.
Note the difference to the numpy ``random_integers`` which
returns integers on a *closed* interval ``[low, high]``.
%(example)s
"""
def _argcheck(self, low, high):
self.a = low
self.b = high - 1
return (high > low)
def _pmf(self, k, low, high):
p = np.ones_like(k) / (high - low)
return np.where((k >= low) & (k < high), p, 0.)
def _cdf(self, x, low, high):
k = floor(x)
return (k - low + 1.) / (high - low)
def _ppf(self, q, low, high):
vals = ceil(q * (high - low) + low) - 1
vals1 = (vals - 1).clip(low, high)
temp = self._cdf(vals1, low, high)
return np.where(temp >= q, vals1, vals)
def _stats(self, low, high):
m2, m1 = np.asarray(high), np.asarray(low)
mu = (m2 + m1 - 1.0) / 2
d = m2 - m1
var = (d*d - 1) / 12.0
g1 = 0.0
g2 = -6.0/5.0 * (d*d + 1.0) / (d*d - 1.0)
return mu, var, g1, g2
def _rvs(self, low, high=None):
"""An array of *size* random integers >= ``low`` and < ``high``.
If ``high`` is ``None``, then range is >=0 and < low
"""
return mtrand.randint(low, high, self._size)
def _entropy(self, low, high):
return log(high - low)
randint = randint_gen(name='randint', longname='A discrete uniform '
'(random integer)')
# FIXME: problems sampling.
class zipf_gen(rv_discrete):
"""A Zipf discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `zipf` is::
zipf.pmf(k, a) = 1/(zeta(a) * k**a)
for ``k >= 1``.
`zipf` takes ``a`` as shape parameter.
%(example)s
"""
def _rvs(self, a):
return mtrand.zipf(a, size=self._size)
def _argcheck(self, a):
return a > 1
def _pmf(self, k, a):
Pk = 1.0 / special.zeta(a, 1) / k**a
return Pk
def _munp(self, n, a):
return _lazywhere(
a > n + 1, (a, n),
lambda a, n: special.zeta(a - n, 1) / special.zeta(a, 1),
np.inf)
zipf = zipf_gen(a=1, name='zipf', longname='A Zipf')
class dlaplace_gen(rv_discrete):
"""A Laplacian discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `dlaplace` is::
dlaplace.pmf(k) = tanh(a/2) * exp(-a*abs(k))
for ``a > 0``.
`dlaplace` takes ``a`` as shape parameter.
%(example)s
"""
def _pmf(self, k, a):
return tanh(a/2.0) * exp(-a * abs(k))
def _cdf(self, x, a):
k = floor(x)
f = lambda k, a: 1.0 - exp(-a * k) / (exp(a) + 1)
f2 = lambda k, a: exp(a * (k+1)) / (exp(a) + 1)
return _lazywhere(k >= 0, (k, a), f=f, f2=f2)
def _ppf(self, q, a):
const = 1 + exp(a)
vals = ceil(np.where(q < 1.0 / (1 + exp(-a)), log(q*const) / a - 1,
-log((1-q) * const) / a))
vals1 = vals - 1
return np.where(self._cdf(vals1, a) >= q, vals1, vals)
def _stats(self, a):
ea = exp(a)
mu2 = 2.*ea/(ea-1.)**2
mu4 = 2.*ea*(ea**2+10.*ea+1.) / (ea-1.)**4
return 0., mu2, 0., mu4/mu2**2 - 3.
def _entropy(self, a):
return a / sinh(a) - log(tanh(a/2.0))
dlaplace = dlaplace_gen(a=-np.inf,
name='dlaplace', longname='A discrete Laplacian')
class skellam_gen(rv_discrete):
"""A Skellam discrete random variable.
%(before_notes)s
Notes
-----
Probability distribution of the difference of two correlated or
uncorrelated Poisson random variables.
Let k1 and k2 be two Poisson-distributed r.v. with expected values
lam1 and lam2. Then, ``k1 - k2`` follows a Skellam distribution with
parameters ``mu1 = lam1 - rho*sqrt(lam1*lam2)`` and
``mu2 = lam2 - rho*sqrt(lam1*lam2)``, where rho is the correlation
coefficient between k1 and k2. If the two Poisson-distributed r.v.
are independent then ``rho = 0``.
Parameters mu1 and mu2 must be strictly positive.
For details see: http://en.wikipedia.org/wiki/Skellam_distribution
`skellam` takes ``mu1`` and ``mu2`` as shape parameters.
%(example)s
"""
def _rvs(self, mu1, mu2):
n = self._size
return mtrand.poisson(mu1, n) - mtrand.poisson(mu2, n)
def _pmf(self, x, mu1, mu2):
px = np.where(x < 0,
_ncx2_pdf(2*mu2, 2*(1-x), 2*mu1)*2,
_ncx2_pdf(2*mu1, 2*(1+x), 2*mu2)*2)
# ncx2.pdf() returns nan's for extremely low probabilities
return px
def _cdf(self, x, mu1, mu2):
x = floor(x)
px = np.where(x < 0,
_ncx2_cdf(2*mu2, -2*x, 2*mu1),
1-_ncx2_cdf(2*mu1, 2*(x+1), 2*mu2))
return px
def _stats(self, mu1, mu2):
mean = mu1 - mu2
var = mu1 + mu2
g1 = mean / sqrt((var)**3)
g2 = 1 / var
return mean, var, g1, g2
skellam = skellam_gen(a=-np.inf, name="skellam", longname='A Skellam')
| mit |
Obus/scikit-learn | examples/decomposition/plot_image_denoising.py | 84 | 5820 | """
=========================================
Image denoising using dictionary learning
=========================================
An example comparing the effect of reconstructing noisy fragments
of the Lena image using firstly online :ref:`DictionaryLearning` and
various transform methods.
The dictionary is fitted on the distorted left half of the image, and
subsequently used to reconstruct the right half. Note that even better
performance could be achieved by fitting to an undistorted (i.e.
noiseless) image, but here we start from the assumption that it is not
available.
A common practice for evaluating the results of image denoising is by looking
at the difference between the reconstruction and the original image. If the
reconstruction is perfect this will look like Gaussian noise.
It can be seen from the plots that the results of :ref:`omp` with two
non-zero coefficients is a bit less biased than when keeping only one
(the edges look less prominent). It is in addition closer from the ground
truth in Frobenius norm.
The result of :ref:`least_angle_regression` is much more strongly biased: the
difference is reminiscent of the local intensity value of the original image.
Thresholding is clearly not useful for denoising, but it is here to show that
it can produce a suggestive output with very high speed, and thus be useful
for other tasks such as object classification, where performance is not
necessarily related to visualisation.
"""
print(__doc__)
from time import time
import matplotlib.pyplot as plt
import numpy as np
from scipy.misc import lena
from sklearn.decomposition import MiniBatchDictionaryLearning
from sklearn.feature_extraction.image import extract_patches_2d
from sklearn.feature_extraction.image import reconstruct_from_patches_2d
###############################################################################
# Load Lena image and extract patches
lena = lena() / 256.0
# downsample for higher speed
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
lena /= 4.0
height, width = lena.shape
# Distort the right half of the image
print('Distorting image...')
distorted = lena.copy()
distorted[:, height // 2:] += 0.075 * np.random.randn(width, height // 2)
# Extract all reference patches from the left half of the image
print('Extracting reference patches...')
t0 = time()
patch_size = (7, 7)
data = extract_patches_2d(distorted[:, :height // 2], patch_size)
data = data.reshape(data.shape[0], -1)
data -= np.mean(data, axis=0)
data /= np.std(data, axis=0)
print('done in %.2fs.' % (time() - t0))
###############################################################################
# Learn the dictionary from reference patches
print('Learning the dictionary...')
t0 = time()
dico = MiniBatchDictionaryLearning(n_components=100, alpha=1, n_iter=500)
V = dico.fit(data).components_
dt = time() - t0
print('done in %.2fs.' % dt)
plt.figure(figsize=(4.2, 4))
for i, comp in enumerate(V[:100]):
plt.subplot(10, 10, i + 1)
plt.imshow(comp.reshape(patch_size), cmap=plt.cm.gray_r,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('Dictionary learned from Lena patches\n' +
'Train time %.1fs on %d patches' % (dt, len(data)),
fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
###############################################################################
# Display the distorted image
def show_with_diff(image, reference, title):
"""Helper function to display denoising"""
plt.figure(figsize=(5, 3.3))
plt.subplot(1, 2, 1)
plt.title('Image')
plt.imshow(image, vmin=0, vmax=1, cmap=plt.cm.gray, interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.subplot(1, 2, 2)
difference = image - reference
plt.title('Difference (norm: %.2f)' % np.sqrt(np.sum(difference ** 2)))
plt.imshow(difference, vmin=-0.5, vmax=0.5, cmap=plt.cm.PuOr,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle(title, size=16)
plt.subplots_adjust(0.02, 0.02, 0.98, 0.79, 0.02, 0.2)
show_with_diff(distorted, lena, 'Distorted image')
###############################################################################
# Extract noisy patches and reconstruct them using the dictionary
print('Extracting noisy patches... ')
t0 = time()
data = extract_patches_2d(distorted[:, height // 2:], patch_size)
data = data.reshape(data.shape[0], -1)
intercept = np.mean(data, axis=0)
data -= intercept
print('done in %.2fs.' % (time() - t0))
transform_algorithms = [
('Orthogonal Matching Pursuit\n1 atom', 'omp',
{'transform_n_nonzero_coefs': 1}),
('Orthogonal Matching Pursuit\n2 atoms', 'omp',
{'transform_n_nonzero_coefs': 2}),
('Least-angle regression\n5 atoms', 'lars',
{'transform_n_nonzero_coefs': 5}),
('Thresholding\n alpha=0.1', 'threshold', {'transform_alpha': .1})]
reconstructions = {}
for title, transform_algorithm, kwargs in transform_algorithms:
print(title + '...')
reconstructions[title] = lena.copy()
t0 = time()
dico.set_params(transform_algorithm=transform_algorithm, **kwargs)
code = dico.transform(data)
patches = np.dot(code, V)
if transform_algorithm == 'threshold':
patches -= patches.min()
patches /= patches.max()
patches += intercept
patches = patches.reshape(len(data), *patch_size)
if transform_algorithm == 'threshold':
patches -= patches.min()
patches /= patches.max()
reconstructions[title][:, height // 2:] = reconstruct_from_patches_2d(
patches, (width, height // 2))
dt = time() - t0
print('done in %.2fs.' % dt)
show_with_diff(reconstructions[title], lena,
title + ' (time: %.1fs)' % dt)
plt.show()
| bsd-3-clause |
bartosh/zipline | tests/test_benchmark.py | 5 | 8023 | #
# Copyright 2015 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pandas as pd
from pandas.util.testing import assert_series_equal
from zipline.data.data_portal import DataPortal
from zipline.errors import (
BenchmarkAssetNotAvailableTooEarly,
BenchmarkAssetNotAvailableTooLate,
InvalidBenchmarkAsset)
from zipline.sources.benchmark_source import BenchmarkSource
from zipline.testing import (
MockDailyBarReader,
create_minute_bar_data,
tmp_bcolz_equity_minute_bar_reader,
)
from zipline.testing.fixtures import (
WithDataPortal,
WithSimParams,
WithTradingCalendars,
ZiplineTestCase,
)
class TestBenchmark(WithDataPortal, WithSimParams, WithTradingCalendars,
ZiplineTestCase):
START_DATE = pd.Timestamp('2006-01-03', tz='utc')
END_DATE = pd.Timestamp('2006-12-29', tz='utc')
@classmethod
def make_equity_info(cls):
return pd.DataFrame.from_dict(
{
1: {
'symbol': 'A',
'start_date': cls.START_DATE,
'end_date': cls.END_DATE + pd.Timedelta(days=1),
"exchange": "TEST",
},
2: {
'symbol': 'B',
'start_date': cls.START_DATE,
'end_date': cls.END_DATE + pd.Timedelta(days=1),
"exchange": "TEST",
},
3: {
'symbol': 'C',
'start_date': pd.Timestamp('2006-05-26', tz='utc'),
'end_date': pd.Timestamp('2006-08-09', tz='utc'),
"exchange": "TEST",
},
4: {
'symbol': 'D',
'start_date': cls.START_DATE,
'end_date': cls.END_DATE + pd.Timedelta(days=1),
"exchange": "TEST",
},
},
orient='index',
)
@classmethod
def make_adjustment_writer_equity_daily_bar_reader(cls):
return MockDailyBarReader()
@classmethod
def make_stock_dividends_data(cls):
declared_date = cls.sim_params.sessions[45]
ex_date = cls.sim_params.sessions[50]
record_date = pay_date = cls.sim_params.sessions[55]
return pd.DataFrame({
'sid': np.array([4], dtype=np.uint32),
'payment_sid': np.array([5], dtype=np.uint32),
'ratio': np.array([2], dtype=np.float64),
'declared_date': np.array([declared_date], dtype='datetime64[ns]'),
'ex_date': np.array([ex_date], dtype='datetime64[ns]'),
'record_date': np.array([record_date], dtype='datetime64[ns]'),
'pay_date': np.array([pay_date], dtype='datetime64[ns]'),
})
def test_normal(self):
days_to_use = self.sim_params.sessions[1:]
source = BenchmarkSource(
self.env.asset_finder.retrieve_asset(1),
self.trading_calendar,
days_to_use,
self.data_portal
)
# should be the equivalent of getting the price history, then doing
# a pct_change on it
manually_calculated = self.data_portal.get_history_window(
[1],
days_to_use[-1],
len(days_to_use),
"1d",
"close",
"daily",
)[1].pct_change()
# compare all the fields except the first one, for which we don't have
# data in manually_calculated
for idx, day in enumerate(days_to_use[1:]):
self.assertEqual(
source.get_value(day),
manually_calculated[idx + 1]
)
# compare a slice of the data
assert_series_equal(
source.get_range(days_to_use[1], days_to_use[10]),
manually_calculated[1:11]
)
def test_asset_not_trading(self):
benchmark = self.env.asset_finder.retrieve_asset(3)
benchmark_start = benchmark.start_date
benchmark_end = benchmark.end_date
with self.assertRaises(BenchmarkAssetNotAvailableTooEarly) as exc:
BenchmarkSource(
benchmark,
self.trading_calendar,
self.sim_params.sessions[1:],
self.data_portal
)
self.assertEqual(
'Equity(3 [C]) does not exist on %s. It started trading on %s.' %
(self.sim_params.sessions[1], benchmark_start),
exc.exception.message
)
with self.assertRaises(BenchmarkAssetNotAvailableTooLate) as exc2:
BenchmarkSource(
benchmark,
self.trading_calendar,
self.sim_params.sessions[120:],
self.data_portal
)
self.assertEqual(
'Equity(3 [C]) does not exist on %s. It stopped trading on %s.' %
(self.sim_params.sessions[-1], benchmark_end),
exc2.exception.message
)
def test_asset_IPOed_same_day(self):
# gotta get some minute data up in here.
# add sid 4 for a couple of days
minutes = self.trading_calendar.minutes_for_sessions_in_range(
self.sim_params.sessions[0],
self.sim_params.sessions[5]
)
tmp_reader = tmp_bcolz_equity_minute_bar_reader(
self.trading_calendar,
self.trading_calendar.all_sessions,
create_minute_bar_data(minutes, [2]),
)
with tmp_reader as reader:
data_portal = DataPortal(
self.env.asset_finder, self.trading_calendar,
first_trading_day=reader.first_trading_day,
equity_minute_reader=reader,
equity_daily_reader=self.bcolz_equity_daily_bar_reader,
adjustment_reader=self.adjustment_reader,
)
source = BenchmarkSource(
self.env.asset_finder.retrieve_asset(2),
self.trading_calendar,
self.sim_params.sessions,
data_portal
)
days_to_use = self.sim_params.sessions
# first value should be 0.0, coming from daily data
self.assertAlmostEquals(0.0, source.get_value(days_to_use[0]))
manually_calculated = data_portal.get_history_window(
[2], days_to_use[-1],
len(days_to_use),
"1d",
"close",
"daily",
)[2].pct_change()
for idx, day in enumerate(days_to_use[1:]):
self.assertEqual(
source.get_value(day),
manually_calculated[idx + 1]
)
def test_no_stock_dividends_allowed(self):
# try to use sid(4) as benchmark, should blow up due to the presence
# of a stock dividend
with self.assertRaises(InvalidBenchmarkAsset) as exc:
BenchmarkSource(
self.env.asset_finder.retrieve_asset(4),
self.trading_calendar,
self.sim_params.sessions,
self.data_portal
)
self.assertEqual("Equity(4 [D]) cannot be used as the benchmark "
"because it has a stock dividend on 2006-03-16 "
"00:00:00. Choose another asset to use as the "
"benchmark.",
exc.exception.message)
| apache-2.0 |
StuntsPT/Structure_threader | setup.py | 1 | 3703 | #!/usr/bin/python3
# Copyright 2016-2021 Francisco Pina Martins <[email protected]>
# This file is part of structure_threader.
# structure_threader is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# structure_threader is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with structure_threader. If not, see <http://www.gnu.org/licenses/>.
import sys
try:
from setuptools import setup
except ImportError:
import ez_setup
ez_setup.use_setuptools()
from setuptools import setup
class NotSupportedException(BaseException):
pass
if sys.version_info.major < 3:
raise NotSupportedException("Only Python 3.x Supported")
def platform_detection(install_binaries=True):
"""
Detect the platform and adapt the binaries location.
"""
if install_binaries is True:
if sys.platform == "linux":
bin_dir = "structure_threader/bins/linux"
elif sys.platform == "darwin":
bin_dir = "structure_threader/bins/osx"
else:
return None
else:
return None
structure_bin = bin_dir + "/structure"
faststructure_bin = bin_dir + "/fastStructure"
maverick_bin = bin_dir + "/MavericK"
return [('bin', [faststructure_bin, structure_bin, maverick_bin])]
# Set some variables (PKGBUILD inspired)
DATA_FILES = platform_detection()
try:
DATA_FILES[0][1].append("structure_threader/wrappers/alstructure_wrapper.R")
except TypeError:
DATA_FILES = [('bin',
["structure_threader/wrappers/alstructure_wrapper.R"])]
VERSION = "1.3.10"
URL = "https://gitlab.com/StuntsPT/Structure_threader"
setup(
name="structure_threader",
version=VERSION,
packages=["structure_threader",
"structure_threader.evanno",
"structure_threader.plotter",
"structure_threader.sanity_checks",
"structure_threader.colorer",
"structure_threader.wrappers",
"structure_threader.skeletons"],
install_requires=["plotly>=4.1.1",
"colorlover",
"numpy>=1.12.1",
"matplotlib"],
description=("A program to parallelize runs of 'Structure', "
"'fastStructure' and 'MavericK'."),
url=URL,
download_url="{0}/-/archive/{1}/Structure_threader-{1}.tar.gz".format(URL, VERSION),
author="Francisco Pina-Martins",
author_email="[email protected]",
license="GPL3",
classifiers=["Intended Audience :: Science/Research",
"License :: OSI Approved :: GNU General Public License v3 ("
"GPLv3)",
"Natural Language :: English",
"Operating System :: POSIX :: Linux",
"Topic :: Scientific/Engineering :: Bio-Informatics",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7"],
data_files=DATA_FILES,
entry_points={
"console_scripts": [
"structure_threader = structure_threader.structure_threader:main",
]
},
)
| gpl-3.0 |
zrhans/pythonanywhere | .virtualenvs/django19/lib/python3.4/site-packages/pandas/tests/test_compat.py | 9 | 2357 | # -*- coding: utf-8 -*-
"""
Testing that functions from compat work as expected
"""
from pandas.compat import (
range, zip, map, filter,
lrange, lzip, lmap, lfilter,
builtins
)
import unittest
import nose
import pandas.util.testing as tm
class TestBuiltinIterators(tm.TestCase):
def check_result(self, actual, expected, lengths):
for (iter_res, list_res), exp, length in zip(actual, expected, lengths):
self.assertNotIsInstance(iter_res, list)
tm.assertIsInstance(list_res, list)
iter_res = list(iter_res)
self.assertEqual(len(list_res), length)
self.assertEqual(len(iter_res), length)
self.assertEqual(iter_res, exp)
self.assertEqual(list_res, exp)
def test_range(self):
actual1 = range(10)
actual2 = lrange(10)
actual = [actual1, actual2],
expected = list(builtins.range(10)),
lengths = 10,
actual1 = range(1, 10, 2)
actual2 = lrange(1, 10, 2)
actual += [actual1, actual2],
lengths += 5,
expected += list(builtins.range(1, 10, 2)),
self.check_result(actual, expected, lengths)
def test_map(self):
func = lambda x, y, z: x + y + z
lst = [builtins.range(10), builtins.range(10), builtins.range(10)]
actual1 = map(func, *lst)
actual2 = lmap(func, *lst)
actual = [actual1, actual2],
expected = list(builtins.map(func, *lst)),
lengths = 10,
self.check_result(actual, expected, lengths)
def test_filter(self):
func = lambda x: x
lst = list(builtins.range(10))
actual1 = filter(func, lst)
actual2 = lfilter(func, lst)
actual = [actual1, actual2],
lengths = 9,
expected = list(builtins.filter(func, lst)),
self.check_result(actual, expected, lengths)
def test_zip(self):
lst = [builtins.range(10), builtins.range(10), builtins.range(10)]
actual = [zip(*lst), lzip(*lst)],
expected = list(builtins.zip(*lst)),
lengths = 10,
self.check_result(actual, expected, lengths)
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
# '--with-coverage', '--cover-package=pandas.core'],
exit=False)
| apache-2.0 |
icdishb/scikit-learn | examples/plot_kernel_approximation.py | 262 | 8004 | """
==================================================
Explicit feature map approximation for RBF kernels
==================================================
An example illustrating the approximation of the feature map
of an RBF kernel.
.. currentmodule:: sklearn.kernel_approximation
It shows how to use :class:`RBFSampler` and :class:`Nystroem` to
approximate the feature map of an RBF kernel for classification with an SVM on
the digits dataset. Results using a linear SVM in the original space, a linear
SVM using the approximate mappings and using a kernelized SVM are compared.
Timings and accuracy for varying amounts of Monte Carlo samplings (in the case
of :class:`RBFSampler`, which uses random Fourier features) and different sized
subsets of the training set (for :class:`Nystroem`) for the approximate mapping
are shown.
Please note that the dataset here is not large enough to show the benefits
of kernel approximation, as the exact SVM is still reasonably fast.
Sampling more dimensions clearly leads to better classification results, but
comes at a greater cost. This means there is a tradeoff between runtime and
accuracy, given by the parameter n_components. Note that solving the Linear
SVM and also the approximate kernel SVM could be greatly accelerated by using
stochastic gradient descent via :class:`sklearn.linear_model.SGDClassifier`.
This is not easily possible for the case of the kernelized SVM.
The second plot visualized the decision surfaces of the RBF kernel SVM and
the linear SVM with approximate kernel maps.
The plot shows decision surfaces of the classifiers projected onto
the first two principal components of the data. This visualization should
be taken with a grain of salt since it is just an interesting slice through
the decision surface in 64 dimensions. In particular note that
a datapoint (represented as a dot) does not necessarily be classified
into the region it is lying in, since it will not lie on the plane
that the first two principal components span.
The usage of :class:`RBFSampler` and :class:`Nystroem` is described in detail
in :ref:`kernel_approximation`.
"""
print(__doc__)
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# Andreas Mueller <[email protected]>
# License: BSD 3 clause
# Standard scientific Python imports
import matplotlib.pyplot as plt
import numpy as np
from time import time
# Import datasets, classifiers and performance metrics
from sklearn import datasets, svm, pipeline
from sklearn.kernel_approximation import (RBFSampler,
Nystroem)
from sklearn.decomposition import PCA
# The digits dataset
digits = datasets.load_digits(n_class=9)
# To apply an classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.data)
data = digits.data / 16.
data -= data.mean(axis=0)
# We learn the digits on the first half of the digits
data_train, targets_train = data[:n_samples / 2], digits.target[:n_samples / 2]
# Now predict the value of the digit on the second half:
data_test, targets_test = data[n_samples / 2:], digits.target[n_samples / 2:]
#data_test = scaler.transform(data_test)
# Create a classifier: a support vector classifier
kernel_svm = svm.SVC(gamma=.2)
linear_svm = svm.LinearSVC()
# create pipeline from kernel approximation
# and linear svm
feature_map_fourier = RBFSampler(gamma=.2, random_state=1)
feature_map_nystroem = Nystroem(gamma=.2, random_state=1)
fourier_approx_svm = pipeline.Pipeline([("feature_map", feature_map_fourier),
("svm", svm.LinearSVC())])
nystroem_approx_svm = pipeline.Pipeline([("feature_map", feature_map_nystroem),
("svm", svm.LinearSVC())])
# fit and predict using linear and kernel svm:
kernel_svm_time = time()
kernel_svm.fit(data_train, targets_train)
kernel_svm_score = kernel_svm.score(data_test, targets_test)
kernel_svm_time = time() - kernel_svm_time
linear_svm_time = time()
linear_svm.fit(data_train, targets_train)
linear_svm_score = linear_svm.score(data_test, targets_test)
linear_svm_time = time() - linear_svm_time
sample_sizes = 30 * np.arange(1, 10)
fourier_scores = []
nystroem_scores = []
fourier_times = []
nystroem_times = []
for D in sample_sizes:
fourier_approx_svm.set_params(feature_map__n_components=D)
nystroem_approx_svm.set_params(feature_map__n_components=D)
start = time()
nystroem_approx_svm.fit(data_train, targets_train)
nystroem_times.append(time() - start)
start = time()
fourier_approx_svm.fit(data_train, targets_train)
fourier_times.append(time() - start)
fourier_score = fourier_approx_svm.score(data_test, targets_test)
nystroem_score = nystroem_approx_svm.score(data_test, targets_test)
nystroem_scores.append(nystroem_score)
fourier_scores.append(fourier_score)
# plot the results:
plt.figure(figsize=(8, 8))
accuracy = plt.subplot(211)
# second y axis for timeings
timescale = plt.subplot(212)
accuracy.plot(sample_sizes, nystroem_scores, label="Nystroem approx. kernel")
timescale.plot(sample_sizes, nystroem_times, '--',
label='Nystroem approx. kernel')
accuracy.plot(sample_sizes, fourier_scores, label="Fourier approx. kernel")
timescale.plot(sample_sizes, fourier_times, '--',
label='Fourier approx. kernel')
# horizontal lines for exact rbf and linear kernels:
accuracy.plot([sample_sizes[0], sample_sizes[-1]],
[linear_svm_score, linear_svm_score], label="linear svm")
timescale.plot([sample_sizes[0], sample_sizes[-1]],
[linear_svm_time, linear_svm_time], '--', label='linear svm')
accuracy.plot([sample_sizes[0], sample_sizes[-1]],
[kernel_svm_score, kernel_svm_score], label="rbf svm")
timescale.plot([sample_sizes[0], sample_sizes[-1]],
[kernel_svm_time, kernel_svm_time], '--', label='rbf svm')
# vertical line for dataset dimensionality = 64
accuracy.plot([64, 64], [0.7, 1], label="n_features")
# legends and labels
accuracy.set_title("Classification accuracy")
timescale.set_title("Training times")
accuracy.set_xlim(sample_sizes[0], sample_sizes[-1])
accuracy.set_xticks(())
accuracy.set_ylim(np.min(fourier_scores), 1)
timescale.set_xlabel("Sampling steps = transformed feature dimension")
accuracy.set_ylabel("Classification accuracy")
timescale.set_ylabel("Training time in seconds")
accuracy.legend(loc='best')
timescale.legend(loc='best')
# visualize the decision surface, projected down to the first
# two principal components of the dataset
pca = PCA(n_components=8).fit(data_train)
X = pca.transform(data_train)
# Gemerate grid along first two principal components
multiples = np.arange(-2, 2, 0.1)
# steps along first component
first = multiples[:, np.newaxis] * pca.components_[0, :]
# steps along second component
second = multiples[:, np.newaxis] * pca.components_[1, :]
# combine
grid = first[np.newaxis, :, :] + second[:, np.newaxis, :]
flat_grid = grid.reshape(-1, data.shape[1])
# title for the plots
titles = ['SVC with rbf kernel',
'SVC (linear kernel)\n with Fourier rbf feature map\n'
'n_components=100',
'SVC (linear kernel)\n with Nystroem rbf feature map\n'
'n_components=100']
plt.tight_layout()
plt.figure(figsize=(12, 5))
# predict and plot
for i, clf in enumerate((kernel_svm, nystroem_approx_svm,
fourier_approx_svm)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
plt.subplot(1, 3, i + 1)
Z = clf.predict(flat_grid)
# Put the result into a color plot
Z = Z.reshape(grid.shape[:-1])
plt.contourf(multiples, multiples, Z, cmap=plt.cm.Paired)
plt.axis('off')
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=targets_train, cmap=plt.cm.Paired)
plt.title(titles[i])
plt.tight_layout()
plt.show()
| bsd-3-clause |
ericdill/scikit-beam-examples | demos/1_time_correlation/one-time-correlation.py | 3 | 2993 |
import numpy as np
from matplotlib.ticker import MaxNLocator
from matplotlib.colors import LogNorm
import matplotlib.pyplot as plt
import matplotlib.patches as mp
import skxray.core.correlation as corr
import skxray.core.roi as roi
# it would be great to have a link to what this multi-tau scheme is!
num_levels = 7
num_bufs = 8
# load the data
img_stack = np.load("100_500_NIPA_GEL.npy")
# plot the first image to make sure the data loaded correctly
fig, ax = plt.subplots()
ax.imshow(img_stack[0])
ax.set_title("NIPA_GEL_250K")
# define the ROIs
roi_start = 65 # in pixels
roi_width = 9 # in pixels
roi_spacing = (5.0, 4.0)
x_center = 7. # in pixels
y_center = (129.) # in pixels
num_rings = 3
# get the edges of the rings
edges = roi.ring_edges(roi_start, width=roi_width,
spacing=roi_spacing, num_rings=num_rings)
# get the label array from the ring shaped 3 region of interests(ROI's)
labeled_roi_array = roi.rings(
edges, (y_center, x_center), img_stack.shape[1:])
# extarct the ROI's lables and pixel indices corresponding to those labels
roi_indices, pixel_list = corr.extract_label_indices(labeled_roi_array)
# define the ROIs
roi_start = 65 # in pixels
roi_width = 9 # in pixels
roi_spacing = (5.0, 4.0)
x_center = 7. # in pixels
y_center = (129.) # in pixels
num_rings = 3
# get the edges of the rings
edges = roi.ring_edges(roi_start, width=roi_width,
spacing=roi_spacing, num_rings=num_rings)
# get the label array from the ring shaped 3 region of interests(ROI's)
labeled_roi_array = roi.rings(
edges, (y_center, x_center), img_stack.shape[1:])
# extarct the ROI's lables and pixel indices corresponding to those labels
roi_indices, pixel_list = corr.extract_label_indices(labeled_roi_array)
def overlay_rois(ax, inds, pix_list, img_dim, image):
"""
This will plot the reqiured roi's on the image
"""
tt = np.zeros(img_dim).ravel() * np.nan
tt[pix_list] = inds
im = ax.imshow(image, interpolation='none', norm=LogNorm())
im = ax.imshow(tt.reshape(*img_dim), cmap='Paired',
interpolation='nearest')
roi_names = ['gray', 'orange', 'brown']
tt = np.zeros(img_stack.shape[1:]).ravel()
tt[pixel_list] = roi_indices
fig, ax = plt.subplots()
ax.set_title("NIPA_GEL_250K")
overlay_rois(ax, roi_indices, pixel_list,
img_stack.shape[1:], img_stack[0])
# g2 one time correlation results for 3 ROI's
g2, lag_steps = corr.multi_tau_auto_corr(
num_levels, num_bufs, labeled_roi_array, img_stack)
# lag_staps are delays for multiple tau analysis
lag_time = 0.001
lag_step = lag_steps[:g2.shape[0]]
lags = lag_step*lag_time
fig, axes = plt.subplots(num_rings, sharex=True, figsize=(5,10))
axes[num_rings-1].set_xlabel("lags")
for i, roi_color in zip(range(num_rings), roi_names):
axes[i].set_ylabel("g2")
axes[i].semilogx(lags, g2[:, i], 'o', markerfacecolor=roi_color, markersize=10)
axes[i].set_ylim(bottom=1, top=np.max(g2[1:, i]))
plt.show()
| bsd-3-clause |
FireCARES/data | sources/nfirs/scripts/jupyter_notebook_config.py | 1 | 25267 | # Configuration file for jupyter-notebook.
#------------------------------------------------------------------------------
# Application(SingletonConfigurable) configuration
#------------------------------------------------------------------------------
## This is an application.
## The date format used by logging formatters for %(asctime)s
#c.Application.log_datefmt = '%Y-%m-%d %H:%M:%S'
## The Logging format template
#c.Application.log_format = '[%(name)s]%(highlevel)s %(message)s'
## Set the log level by value or name.
#c.Application.log_level = 30
#------------------------------------------------------------------------------
# JupyterApp(Application) configuration
#------------------------------------------------------------------------------
## Base class for Jupyter applications
## Answer yes to any prompts.
#c.JupyterApp.answer_yes = False
## Full path of a config file.
#c.JupyterApp.config_file = u''
## Specify a config file to load.
#c.JupyterApp.config_file_name = u''
## Generate default config file.
#c.JupyterApp.generate_config = False
#------------------------------------------------------------------------------
# NotebookApp(JupyterApp) configuration
#------------------------------------------------------------------------------
## Set the Access-Control-Allow-Credentials: true header
#c.NotebookApp.allow_credentials = False
## Set the Access-Control-Allow-Origin header
#
# Use '*' to allow any origin to access your server.
#
# Takes precedence over allow_origin_pat.
#c.NotebookApp.allow_origin = ''
## Use a regular expression for the Access-Control-Allow-Origin header
#
# Requests from an origin matching the expression will get replies with:
#
# Access-Control-Allow-Origin: origin
#
# where `origin` is the origin of the request.
#
# Ignored if allow_origin is set.
#c.NotebookApp.allow_origin_pat = ''
## Whether to allow the user to run the notebook as root.
#c.NotebookApp.allow_root = False
## DEPRECATED use base_url
#c.NotebookApp.base_project_url = '/'
## The base URL for the notebook server.
#
# Leading and trailing slashes can be omitted, and will automatically be added.
#c.NotebookApp.base_url = '/'
## Specify what command to use to invoke a web browser when opening the notebook.
# If not specified, the default browser will be determined by the `webbrowser`
# standard library module, which allows setting of the BROWSER environment
# variable to override it.
#c.NotebookApp.browser = u''
## The full path to an SSL/TLS certificate file.
#c.NotebookApp.certfile = u''
## The full path to a certificate authority certificate for SSL/TLS client
# authentication.
#c.NotebookApp.client_ca = u''
## The config manager class to use
#c.NotebookApp.config_manager_class = 'notebook.services.config.manager.ConfigManager'
## The notebook manager class to use.
#c.NotebookApp.contents_manager_class = 'notebook.services.contents.largefilemanager.LargeFileManager'
## Extra keyword arguments to pass to `set_secure_cookie`. See tornado's
# set_secure_cookie docs for details.
#c.NotebookApp.cookie_options = {}
## The random bytes used to secure cookies. By default this is a new random
# number every time you start the Notebook. Set it to a value in a config file
# to enable logins to persist across server sessions.
#
# Note: Cookie secrets should be kept private, do not share config files with
# cookie_secret stored in plaintext (you can read the value from a file).
#c.NotebookApp.cookie_secret = ''
## The file where the cookie secret is stored.
#c.NotebookApp.cookie_secret_file = u''
## The default URL to redirect to from `/`
#c.NotebookApp.default_url = '/tree'
## Disable cross-site-request-forgery protection
#
# Jupyter notebook 4.3.1 introduces protection from cross-site request
# forgeries, requiring API requests to either:
#
# - originate from pages served by this server (validated with XSRF cookie and
# token), or - authenticate with a token
#
# Some anonymous compute resources still desire the ability to run code,
# completely without authentication. These services can disable all
# authentication and security checks, with the full knowledge of what that
# implies.
#c.NotebookApp.disable_check_xsrf = False
## Whether to enable MathJax for typesetting math/TeX
#
# MathJax is the javascript library Jupyter uses to render math/LaTeX. It is
# very large, so you may want to disable it if you have a slow internet
# connection, or for offline use of the notebook.
#
# When disabled, equations etc. will appear as their untransformed TeX source.
#c.NotebookApp.enable_mathjax = True
## extra paths to look for Javascript notebook extensions
#c.NotebookApp.extra_nbextensions_path = []
## Extra paths to search for serving static files.
#
# This allows adding javascript/css to be available from the notebook server
# machine, or overriding individual files in the IPython
#c.NotebookApp.extra_static_paths = []
## Extra paths to search for serving jinja templates.
#
# Can be used to override templates from notebook.templates.
#c.NotebookApp.extra_template_paths = []
##
#c.NotebookApp.file_to_run = ''
## Deprecated: Use minified JS file or not, mainly use during dev to avoid JS
# recompilation
#c.NotebookApp.ignore_minified_js = False
## (bytes/sec) Maximum rate at which stream output can be sent on iopub before
# they are limited.
#c.NotebookApp.iopub_data_rate_limit = 1000000
## (msgs/sec) Maximum rate at which messages can be sent on iopub before they are
# limited.
#c.NotebookApp.iopub_msg_rate_limit = 1000
## The IP address the notebook server will listen on.
c.NotebookApp.ip = '192.168.33.15'
## Supply extra arguments that will be passed to Jinja environment.
#c.NotebookApp.jinja_environment_options = {}
## Extra variables to supply to jinja templates when rendering.
#c.NotebookApp.jinja_template_vars = {}
## The kernel manager class to use.
#c.NotebookApp.kernel_manager_class = 'notebook.services.kernels.kernelmanager.MappingKernelManager'
## The kernel spec manager class to use. Should be a subclass of
# `jupyter_client.kernelspec.KernelSpecManager`.
#
# The Api of KernelSpecManager is provisional and might change without warning
# between this version of Jupyter and the next stable one.
#c.NotebookApp.kernel_spec_manager_class = 'jupyter_client.kernelspec.KernelSpecManager'
## The full path to a private key file for usage with SSL/TLS.
#c.NotebookApp.keyfile = u''
## The login handler class to use.
#c.NotebookApp.login_handler_class = 'notebook.auth.login.LoginHandler'
## The logout handler class to use.
#c.NotebookApp.logout_handler_class = 'notebook.auth.logout.LogoutHandler'
## The MathJax.js configuration file that is to be used.
#c.NotebookApp.mathjax_config = 'TeX-AMS-MML_HTMLorMML-full,Safe'
## A custom url for MathJax.js. Should be in the form of a case-sensitive url to
# MathJax, for example: /static/components/MathJax/MathJax.js
#c.NotebookApp.mathjax_url = ''
## Dict of Python modules to load as notebook server extensions.Entry values can
# be used to enable and disable the loading ofthe extensions. The extensions
# will be loaded in alphabetical order.
#c.NotebookApp.nbserver_extensions = {}
## The directory to use for notebooks and kernels.
#c.NotebookApp.notebook_dir = u''
## Whether to open in a browser after starting. The specific browser used is
# platform dependent and determined by the python standard library `webbrowser`
# module, unless it is overridden using the --browser (NotebookApp.browser)
# configuration option.
c.NotebookApp.open_browser = True
## Hashed password to use for web authentication.
#
# To generate, type in a python/IPython shell:
#
# from notebook.auth import passwd; passwd()
#
# The string should be of the form type:salt:hashed-password.
#c.NotebookApp.password = u''
## Forces users to use a password for the Notebook server. This is useful in a
# multi user environment, for instance when everybody in the LAN can access each
# other's machine through ssh.
#
# In such a case, server the notebook server on localhost is not secure since
# any user can connect to the notebook server via ssh.
c.NotebookApp.password_required = False
## The port the notebook server will listen on.
c.NotebookApp.port = 8007
## The number of additional ports to try if the specified port is not available.
c.NotebookApp.port_retries = 50
## DISABLED: use %pylab or %matplotlib in the notebook to enable matplotlib.
#c.NotebookApp.pylab = 'disabled'
## (sec) Time window used to check the message and data rate limits.
#c.NotebookApp.rate_limit_window = 3
## Reraise exceptions encountered loading server extensions?
#c.NotebookApp.reraise_server_extension_failures = False
## DEPRECATED use the nbserver_extensions dict instead
#c.NotebookApp.server_extensions = []
## The session manager class to use.
#c.NotebookApp.session_manager_class = 'notebook.services.sessions.sessionmanager.SessionManager'
## Supply SSL options for the tornado HTTPServer. See the tornado docs for
# details.
#c.NotebookApp.ssl_options = {}
## Supply overrides for terminado. Currently only supports "shell_command".
#c.NotebookApp.terminado_settings = {}
## Token used for authenticating first-time connections to the server.
#
# When no password is enabled, the default is to generate a new, random token.
#
# Setting to an empty string disables authentication altogether, which is NOT
# RECOMMENDED.
#c.NotebookApp.token = '<generated>'
## Supply overrides for the tornado.web.Application that the Jupyter notebook
# uses.
#c.NotebookApp.tornado_settings = {}
## Whether to trust or not X-Scheme/X-Forwarded-Proto and X-Real-Ip/X-Forwarded-
# For headerssent by the upstream reverse proxy. Necessary if the proxy handles
# SSL
#c.NotebookApp.trust_xheaders = False
## DEPRECATED, use tornado_settings
#c.NotebookApp.webapp_settings = {}
## Specify Where to open the notebook on startup. This is the
# `new` argument passed to the standard library method `webbrowser.open`.
# The behaviour is not guaranteed, but depends on browser support. Valid
# values are:
# 2 opens a new tab,
# 1 opens a new window,
# 0 opens in an existing window.
# See the `webbrowser.open` documentation for details.
#c.NotebookApp.webbrowser_open_new = 2
## Set the tornado compression options for websocket connections.
#
# This value will be returned from
# :meth:`WebSocketHandler.get_compression_options`. None (default) will disable
# compression. A dict (even an empty one) will enable compression.
#
# See the tornado docs for WebSocketHandler.get_compression_options for details.
#c.NotebookApp.websocket_compression_options = None
## The base URL for websockets, if it differs from the HTTP server (hint: it
# almost certainly doesn't).
#
# Should be in the form of an HTTP origin: ws[s]://hostname[:port]
#c.NotebookApp.websocket_url = ''
#------------------------------------------------------------------------------
# ConnectionFileMixin(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## Mixin for configurable classes that work with connection files
## JSON file in which to store connection info [default: kernel-<pid>.json]
#
# This file will contain the IP, ports, and authentication key needed to connect
# clients to this kernel. By default, this file will be created in the security
# dir of the current profile, but can be specified by absolute path.
#c.ConnectionFileMixin.connection_file = ''
## set the control (ROUTER) port [default: random]
#c.ConnectionFileMixin.control_port = 0
## set the heartbeat port [default: random]
#c.ConnectionFileMixin.hb_port = 0
## set the iopub (PUB) port [default: random]
#c.ConnectionFileMixin.iopub_port = 0
## Set the kernel's IP address [default localhost]. If the IP address is
# something other than localhost, then Consoles on other machines will be able
# to connect to the Kernel, so be careful!
#c.ConnectionFileMixin.ip = u''
## set the shell (ROUTER) port [default: random]
#c.ConnectionFileMixin.shell_port = 0
## set the stdin (ROUTER) port [default: random]
#c.ConnectionFileMixin.stdin_port = 0
##
#c.ConnectionFileMixin.transport = 'tcp'
#------------------------------------------------------------------------------
# KernelManager(ConnectionFileMixin) configuration
#------------------------------------------------------------------------------
## Manages a single kernel in a subprocess on this host.
#
# This version starts kernels with Popen.
## Should we autorestart the kernel if it dies.
#c.KernelManager.autorestart = True
## DEPRECATED: Use kernel_name instead.
#
# The Popen Command to launch the kernel. Override this if you have a custom
# kernel. If kernel_cmd is specified in a configuration file, Jupyter does not
# pass any arguments to the kernel, because it cannot make any assumptions about
# the arguments that the kernel understands. In particular, this means that the
# kernel does not receive the option --debug if it given on the Jupyter command
# line.
#c.KernelManager.kernel_cmd = []
## Time to wait for a kernel to terminate before killing it, in seconds.
#c.KernelManager.shutdown_wait_time = 5.0
#------------------------------------------------------------------------------
# Session(Configurable) configuration
#------------------------------------------------------------------------------
## Object for handling serialization and sending of messages.
#
# The Session object handles building messages and sending them with ZMQ sockets
# or ZMQStream objects. Objects can communicate with each other over the
# network via Session objects, and only need to work with the dict-based IPython
# message spec. The Session will handle serialization/deserialization, security,
# and metadata.
#
# Sessions support configurable serialization via packer/unpacker traits, and
# signing with HMAC digests via the key/keyfile traits.
#
# Parameters ----------
#
# debug : bool
# whether to trigger extra debugging statements
# packer/unpacker : str : 'json', 'pickle' or import_string
# importstrings for methods to serialize message parts. If just
# 'json' or 'pickle', predefined JSON and pickle packers will be used.
# Otherwise, the entire importstring must be used.
#
# The functions must accept at least valid JSON input, and output *bytes*.
#
# For example, to use msgpack:
# packer = 'msgpack.packb', unpacker='msgpack.unpackb'
# pack/unpack : callables
# You can also set the pack/unpack callables for serialization directly.
# session : bytes
# the ID of this Session object. The default is to generate a new UUID.
# username : unicode
# username added to message headers. The default is to ask the OS.
# key : bytes
# The key used to initialize an HMAC signature. If unset, messages
# will not be signed or checked.
# keyfile : filepath
# The file containing a key. If this is set, `key` will be initialized
# to the contents of the file.
## Threshold (in bytes) beyond which an object's buffer should be extracted to
# avoid pickling.
#c.Session.buffer_threshold = 1024
## Whether to check PID to protect against calls after fork.
#
# This check can be disabled if fork-safety is handled elsewhere.
#c.Session.check_pid = True
## Threshold (in bytes) beyond which a buffer should be sent without copying.
#c.Session.copy_threshold = 65536
## Debug output in the Session
#c.Session.debug = False
## The maximum number of digests to remember.
#
# The digest history will be culled when it exceeds this value.
#c.Session.digest_history_size = 65536
## The maximum number of items for a container to be introspected for custom
# serialization. Containers larger than this are pickled outright.
#c.Session.item_threshold = 64
## execution key, for signing messages.
#c.Session.key = ''
## path to file containing execution key.
#c.Session.keyfile = ''
## Metadata dictionary, which serves as the default top-level metadata dict for
# each message.
#c.Session.metadata = {}
## The name of the packer for serializing messages. Should be one of 'json',
# 'pickle', or an import name for a custom callable serializer.
#c.Session.packer = 'json'
## The UUID identifying this session.
#c.Session.session = u''
## The digest scheme used to construct the message signatures. Must have the form
# 'hmac-HASH'.
#c.Session.signature_scheme = 'hmac-sha256'
## The name of the unpacker for unserializing messages. Only used with custom
# functions for `packer`.
#c.Session.unpacker = 'json'
## Username for the Session. Default is your system username.
#c.Session.username = u'vagrant'
#------------------------------------------------------------------------------
# MultiKernelManager(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## A class for managing multiple kernels.
## The name of the default kernel to start
#c.MultiKernelManager.default_kernel_name = 'python2'
## The kernel manager class. This is configurable to allow subclassing of the
# KernelManager for customized behavior.
#c.MultiKernelManager.kernel_manager_class = 'jupyter_client.ioloop.IOLoopKernelManager'
#------------------------------------------------------------------------------
# MappingKernelManager(MultiKernelManager) configuration
#------------------------------------------------------------------------------
## A KernelManager that handles notebook mapping and HTTP error handling
## Whether messages from kernels whose frontends have disconnected should be
# buffered in-memory.
#
# When True (default), messages are buffered and replayed on reconnect, avoiding
# lost messages due to interrupted connectivity.
#
# Disable if long-running kernels will produce too much output while no
# frontends are connected.
#c.MappingKernelManager.buffer_offline_messages = True
## Whether to consider culling kernels which are busy. Only effective if
# cull_idle_timeout is not 0.
#c.MappingKernelManager.cull_busy = False
## Whether to consider culling kernels which have one or more connections. Only
# effective if cull_idle_timeout is not 0.
#c.MappingKernelManager.cull_connected = False
## Timeout (in seconds) after which a kernel is considered idle and ready to be
# culled. Values of 0 or lower disable culling. The minimum timeout is 300
# seconds (5 minutes). Positive values less than the minimum value will be set
# to the minimum.
#c.MappingKernelManager.cull_idle_timeout = 0
## The interval (in seconds) on which to check for idle kernels exceeding the
# cull timeout value.
#c.MappingKernelManager.cull_interval = 300
##
#c.MappingKernelManager.root_dir = u''
#------------------------------------------------------------------------------
# ContentsManager(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## Base class for serving files and directories.
#
# This serves any text or binary file, as well as directories, with special
# handling for JSON notebook documents.
#
# Most APIs take a path argument, which is always an API-style unicode path, and
# always refers to a directory.
#
# - unicode, not url-escaped
# - '/'-separated
# - leading and trailing '/' will be stripped
# - if unspecified, path defaults to '',
# indicating the root path.
##
#c.ContentsManager.checkpoints = None
##
#c.ContentsManager.checkpoints_class = 'notebook.services.contents.checkpoints.Checkpoints'
##
#c.ContentsManager.checkpoints_kwargs = {}
## handler class to use when serving raw file requests.
#
# Default is a fallback that talks to the ContentsManager API, which may be
# inefficient, especially for large files.
#
# Local files-based ContentsManagers can use a StaticFileHandler subclass, which
# will be much more efficient.
#
# Access to these files should be Authenticated.
#c.ContentsManager.files_handler_class = 'notebook.files.handlers.FilesHandler'
## Extra parameters to pass to files_handler_class.
#
# For example, StaticFileHandlers generally expect a `path` argument specifying
# the root directory from which to serve files.
#c.ContentsManager.files_handler_params = {}
## Glob patterns to hide in file and directory listings.
#c.ContentsManager.hide_globs = [u'__pycache__', '*.pyc', '*.pyo', '.DS_Store', '*.so', '*.dylib', '*~']
## Python callable or importstring thereof
#
# To be called on a contents model prior to save.
#
# This can be used to process the structure, such as removing notebook outputs
# or other side effects that should not be saved.
#
# It will be called as (all arguments passed by keyword)::
#
# hook(path=path, model=model, contents_manager=self)
#
# - model: the model to be saved. Includes file contents.
# Modifying this dict will affect the file that is stored.
# - path: the API path of the save destination
# - contents_manager: this ContentsManager instance
#c.ContentsManager.pre_save_hook = None
##
#c.ContentsManager.root_dir = '/'
## The base name used when creating untitled directories.
#c.ContentsManager.untitled_directory = 'Untitled Folder'
## The base name used when creating untitled files.
#c.ContentsManager.untitled_file = 'untitled'
## The base name used when creating untitled notebooks.
#c.ContentsManager.untitled_notebook = 'Untitled'
#------------------------------------------------------------------------------
# FileManagerMixin(Configurable) configuration
#------------------------------------------------------------------------------
## Mixin for ContentsAPI classes that interact with the filesystem.
#
# Provides facilities for reading, writing, and copying both notebooks and
# generic files.
#
# Shared by FileContentsManager and FileCheckpoints.
#
# Note ---- Classes using this mixin must provide the following attributes:
#
# root_dir : unicode
# A directory against against which API-style paths are to be resolved.
#
# log : logging.Logger
## By default notebooks are saved on disk on a temporary file and then if
# succefully written, it replaces the old ones. This procedure, namely
# 'atomic_writing', causes some bugs on file system whitout operation order
# enforcement (like some networked fs). If set to False, the new notebook is
# written directly on the old one which could fail (eg: full filesystem or quota
# )
#c.FileManagerMixin.use_atomic_writing = True
#------------------------------------------------------------------------------
# FileContentsManager(FileManagerMixin,ContentsManager) configuration
#------------------------------------------------------------------------------
## Python callable or importstring thereof
#
# to be called on the path of a file just saved.
#
# This can be used to process the file on disk, such as converting the notebook
# to a script or HTML via nbconvert.
#
# It will be called as (all arguments passed by keyword)::
#
# hook(os_path=os_path, model=model, contents_manager=instance)
#
# - path: the filesystem path to the file just written - model: the model
# representing the file - contents_manager: this ContentsManager instance
#c.FileContentsManager.post_save_hook = None
##
#c.FileContentsManager.root_dir = u''
## DEPRECATED, use post_save_hook. Will be removed in Notebook 5.0
#c.FileContentsManager.save_script = False
#------------------------------------------------------------------------------
# NotebookNotary(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## A class for computing and verifying notebook signatures.
## The hashing algorithm used to sign notebooks.
#c.NotebookNotary.algorithm = 'sha256'
## The sqlite file in which to store notebook signatures. By default, this will
# be in your Jupyter data directory. You can set it to ':memory:' to disable
# sqlite writing to the filesystem.
#c.NotebookNotary.db_file = u''
## The secret key with which notebooks are signed.
#c.NotebookNotary.secret = ''
## The file where the secret key is stored.
#c.NotebookNotary.secret_file = u''
## A callable returning the storage backend for notebook signatures. The default
# uses an SQLite database.
#c.NotebookNotary.store_factory = traitlets.Undefined
#------------------------------------------------------------------------------
# KernelSpecManager(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## If there is no Python kernelspec registered and the IPython kernel is
# available, ensure it is added to the spec list.
#c.KernelSpecManager.ensure_native_kernel = True
## The kernel spec class. This is configurable to allow subclassing of the
# KernelSpecManager for customized behavior.
#c.KernelSpecManager.kernel_spec_class = 'jupyter_client.kernelspec.KernelSpec'
## Whitelist of allowed kernel names.
#
# By default, all installed kernels are allowed.
#c.KernelSpecManager.whitelist = set([])
| mit |
Aegeaner/spark | python/pyspark/sql/functions.py | 1 | 134163 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A collections of builtin functions
"""
import sys
import functools
import warnings
if sys.version < "3":
from itertools import imap as map
if sys.version >= '3':
basestring = str
from pyspark import since, SparkContext
from pyspark.rdd import ignore_unicode_prefix, PythonEvalType
from pyspark.sql.column import Column, _to_java_column, _to_seq, _create_column_from_literal
from pyspark.sql.dataframe import DataFrame
from pyspark.sql.types import StringType, DataType
# Keep UserDefinedFunction import for backwards compatible import; moved in SPARK-22409
from pyspark.sql.udf import UserDefinedFunction, _create_udf
def _create_name_function(name, doc=""):
""" Create a function that takes a column name argument, by name"""
def _(col):
sc = SparkContext._active_spark_context
jc = getattr(sc._jvm.functions, name)(col._jc if isinstance(col, Column) else col)
return Column(jc)
_.__name__ = name
_.__doc__ = doc
return _
def _create_function(name, doc=""):
""" Create a function that takes a Column object, by name"""
def _(col):
sc = SparkContext._active_spark_context
jc = getattr(sc._jvm.functions, name)(_to_java_column(col))
return Column(jc)
_.__name__ = name
_.__doc__ = doc
return _
def _wrap_deprecated_function(func, message):
""" Wrap the deprecated function to print out deprecation warnings"""
def _(col):
warnings.warn(message, DeprecationWarning)
return func(col)
return functools.wraps(func)(_)
def _create_binary_mathfunction(name, doc=""):
""" Create a binary mathfunction by name"""
def _(col1, col2):
sc = SparkContext._active_spark_context
# users might write ints for simplicity. This would throw an error on the JVM side.
jc = getattr(sc._jvm.functions, name)(col1._jc if isinstance(col1, Column) else float(col1),
col2._jc if isinstance(col2, Column) else float(col2))
return Column(jc)
_.__name__ = name
_.__doc__ = doc
return _
def _create_window_function(name, doc=''):
""" Create a window function by name """
def _():
sc = SparkContext._active_spark_context
jc = getattr(sc._jvm.functions, name)()
return Column(jc)
_.__name__ = name
_.__doc__ = 'Window function: ' + doc
return _
_lit_doc = """
Creates a :class:`Column` of literal value.
>>> df.select(lit(5).alias('height')).withColumn('spark_user', lit(True)).take(1)
[Row(height=5, spark_user=True)]
"""
_name_functions = {
# name functions take a column name as their argument
'lit': _lit_doc,
'col': 'Returns a :class:`Column` based on the given column name.',
'column': 'Returns a :class:`Column` based on the given column name.',
'asc': 'Returns a sort expression based on the ascending order of the given column name.',
'desc': 'Returns a sort expression based on the descending order of the given column name.',
}
_functions = {
'upper': 'Converts a string expression to upper case.',
'lower': 'Converts a string expression to upper case.',
'sqrt': 'Computes the square root of the specified float value.',
'abs': 'Computes the absolute value.',
'max': 'Aggregate function: returns the maximum value of the expression in a group.',
'min': 'Aggregate function: returns the minimum value of the expression in a group.',
'count': 'Aggregate function: returns the number of items in a group.',
'sum': 'Aggregate function: returns the sum of all values in the expression.',
'avg': 'Aggregate function: returns the average of the values in a group.',
'mean': 'Aggregate function: returns the average of the values in a group.',
'sumDistinct': 'Aggregate function: returns the sum of distinct values in the expression.',
}
_functions_1_4 = {
# unary math functions
'acos': ':return: inverse cosine of `col`, as if computed by `java.lang.Math.acos()`',
'asin': ':return: inverse sine of `col`, as if computed by `java.lang.Math.asin()`',
'atan': ':return: inverse tangent of `col`, as if computed by `java.lang.Math.atan()`',
'cbrt': 'Computes the cube-root of the given value.',
'ceil': 'Computes the ceiling of the given value.',
'cos': """:param col: angle in radians
:return: cosine of the angle, as if computed by `java.lang.Math.cos()`.""",
'cosh': """:param col: hyperbolic angle
:return: hyperbolic cosine of the angle, as if computed by `java.lang.Math.cosh()`""",
'exp': 'Computes the exponential of the given value.',
'expm1': 'Computes the exponential of the given value minus one.',
'floor': 'Computes the floor of the given value.',
'log': 'Computes the natural logarithm of the given value.',
'log10': 'Computes the logarithm of the given value in Base 10.',
'log1p': 'Computes the natural logarithm of the given value plus one.',
'rint': 'Returns the double value that is closest in value to the argument and' +
' is equal to a mathematical integer.',
'signum': 'Computes the signum of the given value.',
'sin': """:param col: angle in radians
:return: sine of the angle, as if computed by `java.lang.Math.sin()`""",
'sinh': """:param col: hyperbolic angle
:return: hyperbolic sine of the given value,
as if computed by `java.lang.Math.sinh()`""",
'tan': """:param col: angle in radians
:return: tangent of the given value, as if computed by `java.lang.Math.tan()`""",
'tanh': """:param col: hyperbolic angle
:return: hyperbolic tangent of the given value,
as if computed by `java.lang.Math.tanh()`""",
'toDegrees': '.. note:: Deprecated in 2.1, use :func:`degrees` instead.',
'toRadians': '.. note:: Deprecated in 2.1, use :func:`radians` instead.',
'bitwiseNOT': 'Computes bitwise not.',
}
_name_functions_2_4 = {
'asc_nulls_first': 'Returns a sort expression based on the ascending order of the given' +
' column name, and null values return before non-null values.',
'asc_nulls_last': 'Returns a sort expression based on the ascending order of the given' +
' column name, and null values appear after non-null values.',
'desc_nulls_first': 'Returns a sort expression based on the descending order of the given' +
' column name, and null values appear before non-null values.',
'desc_nulls_last': 'Returns a sort expression based on the descending order of the given' +
' column name, and null values appear after non-null values',
}
_collect_list_doc = """
Aggregate function: returns a list of objects with duplicates.
.. note:: The function is non-deterministic because the order of collected results depends
on order of rows which may be non-deterministic after a shuffle.
>>> df2 = spark.createDataFrame([(2,), (5,), (5,)], ('age',))
>>> df2.agg(collect_list('age')).collect()
[Row(collect_list(age)=[2, 5, 5])]
"""
_collect_set_doc = """
Aggregate function: returns a set of objects with duplicate elements eliminated.
.. note:: The function is non-deterministic because the order of collected results depends
on order of rows which may be non-deterministic after a shuffle.
>>> df2 = spark.createDataFrame([(2,), (5,), (5,)], ('age',))
>>> df2.agg(collect_set('age')).collect()
[Row(collect_set(age)=[5, 2])]
"""
_functions_1_6 = {
# unary math functions
'stddev': 'Aggregate function: returns the unbiased sample standard deviation of' +
' the expression in a group.',
'stddev_samp': 'Aggregate function: returns the unbiased sample standard deviation of' +
' the expression in a group.',
'stddev_pop': 'Aggregate function: returns population standard deviation of' +
' the expression in a group.',
'variance': 'Aggregate function: returns the population variance of the values in a group.',
'var_samp': 'Aggregate function: returns the unbiased variance of the values in a group.',
'var_pop': 'Aggregate function: returns the population variance of the values in a group.',
'skewness': 'Aggregate function: returns the skewness of the values in a group.',
'kurtosis': 'Aggregate function: returns the kurtosis of the values in a group.',
'collect_list': _collect_list_doc,
'collect_set': _collect_set_doc
}
_functions_2_1 = {
# unary math functions
'degrees': """
Converts an angle measured in radians to an approximately equivalent angle
measured in degrees.
:param col: angle in radians
:return: angle in degrees, as if computed by `java.lang.Math.toDegrees()`
""",
'radians': """
Converts an angle measured in degrees to an approximately equivalent angle
measured in radians.
:param col: angle in degrees
:return: angle in radians, as if computed by `java.lang.Math.toRadians()`
""",
}
# math functions that take two arguments as input
_binary_mathfunctions = {
'atan2': """
:param col1: coordinate on y-axis
:param col2: coordinate on x-axis
:return: the `theta` component of the point
(`r`, `theta`)
in polar coordinates that corresponds to the point
(`x`, `y`) in Cartesian coordinates,
as if computed by `java.lang.Math.atan2()`
""",
'hypot': 'Computes ``sqrt(a^2 + b^2)`` without intermediate overflow or underflow.',
'pow': 'Returns the value of the first argument raised to the power of the second argument.',
}
_window_functions = {
'row_number':
"""returns a sequential number starting at 1 within a window partition.""",
'dense_rank':
"""returns the rank of rows within a window partition, without any gaps.
The difference between rank and dense_rank is that dense_rank leaves no gaps in ranking
sequence when there are ties. That is, if you were ranking a competition using dense_rank
and had three people tie for second place, you would say that all three were in second
place and that the next person came in third. Rank would give me sequential numbers, making
the person that came in third place (after the ties) would register as coming in fifth.
This is equivalent to the DENSE_RANK function in SQL.""",
'rank':
"""returns the rank of rows within a window partition.
The difference between rank and dense_rank is that dense_rank leaves no gaps in ranking
sequence when there are ties. That is, if you were ranking a competition using dense_rank
and had three people tie for second place, you would say that all three were in second
place and that the next person came in third. Rank would give me sequential numbers, making
the person that came in third place (after the ties) would register as coming in fifth.
This is equivalent to the RANK function in SQL.""",
'cume_dist':
"""returns the cumulative distribution of values within a window partition,
i.e. the fraction of rows that are below the current row.""",
'percent_rank':
"""returns the relative rank (i.e. percentile) of rows within a window partition.""",
}
# Wraps deprecated functions (keys) with the messages (values).
_functions_deprecated = {
}
for _name, _doc in _name_functions.items():
globals()[_name] = since(1.3)(_create_name_function(_name, _doc))
for _name, _doc in _functions.items():
globals()[_name] = since(1.3)(_create_function(_name, _doc))
for _name, _doc in _functions_1_4.items():
globals()[_name] = since(1.4)(_create_function(_name, _doc))
for _name, _doc in _binary_mathfunctions.items():
globals()[_name] = since(1.4)(_create_binary_mathfunction(_name, _doc))
for _name, _doc in _window_functions.items():
globals()[_name] = since(1.6)(_create_window_function(_name, _doc))
for _name, _doc in _functions_1_6.items():
globals()[_name] = since(1.6)(_create_function(_name, _doc))
for _name, _doc in _functions_2_1.items():
globals()[_name] = since(2.1)(_create_function(_name, _doc))
for _name, _message in _functions_deprecated.items():
globals()[_name] = _wrap_deprecated_function(globals()[_name], _message)
for _name, _doc in _name_functions_2_4.items():
globals()[_name] = since(2.4)(_create_name_function(_name, _doc))
del _name, _doc
@since(2.1)
def approx_count_distinct(col, rsd=None):
"""Aggregate function: returns a new :class:`Column` for approximate distinct count of
column `col`.
:param rsd: maximum estimation error allowed (default = 0.05). For rsd < 0.01, it is more
efficient to use :func:`countDistinct`
>>> df.agg(approx_count_distinct(df.age).alias('distinct_ages')).collect()
[Row(distinct_ages=2)]
"""
sc = SparkContext._active_spark_context
if rsd is None:
jc = sc._jvm.functions.approx_count_distinct(_to_java_column(col))
else:
jc = sc._jvm.functions.approx_count_distinct(_to_java_column(col), rsd)
return Column(jc)
@since(1.6)
def broadcast(df):
"""Marks a DataFrame as small enough for use in broadcast joins."""
sc = SparkContext._active_spark_context
return DataFrame(sc._jvm.functions.broadcast(df._jdf), df.sql_ctx)
@since(1.4)
def coalesce(*cols):
"""Returns the first column that is not null.
>>> cDf = spark.createDataFrame([(None, None), (1, None), (None, 2)], ("a", "b"))
>>> cDf.show()
+----+----+
| a| b|
+----+----+
|null|null|
| 1|null|
|null| 2|
+----+----+
>>> cDf.select(coalesce(cDf["a"], cDf["b"])).show()
+--------------+
|coalesce(a, b)|
+--------------+
| null|
| 1|
| 2|
+--------------+
>>> cDf.select('*', coalesce(cDf["a"], lit(0.0))).show()
+----+----+----------------+
| a| b|coalesce(a, 0.0)|
+----+----+----------------+
|null|null| 0.0|
| 1|null| 1.0|
|null| 2| 0.0|
+----+----+----------------+
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.coalesce(_to_seq(sc, cols, _to_java_column))
return Column(jc)
@since(1.6)
def corr(col1, col2):
"""Returns a new :class:`Column` for the Pearson Correlation Coefficient for ``col1``
and ``col2``.
>>> a = range(20)
>>> b = [2 * x for x in range(20)]
>>> df = spark.createDataFrame(zip(a, b), ["a", "b"])
>>> df.agg(corr("a", "b").alias('c')).collect()
[Row(c=1.0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.corr(_to_java_column(col1), _to_java_column(col2)))
@since(2.0)
def covar_pop(col1, col2):
"""Returns a new :class:`Column` for the population covariance of ``col1`` and ``col2``.
>>> a = [1] * 10
>>> b = [1] * 10
>>> df = spark.createDataFrame(zip(a, b), ["a", "b"])
>>> df.agg(covar_pop("a", "b").alias('c')).collect()
[Row(c=0.0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.covar_pop(_to_java_column(col1), _to_java_column(col2)))
@since(2.0)
def covar_samp(col1, col2):
"""Returns a new :class:`Column` for the sample covariance of ``col1`` and ``col2``.
>>> a = [1] * 10
>>> b = [1] * 10
>>> df = spark.createDataFrame(zip(a, b), ["a", "b"])
>>> df.agg(covar_samp("a", "b").alias('c')).collect()
[Row(c=0.0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.covar_samp(_to_java_column(col1), _to_java_column(col2)))
@since(1.3)
def countDistinct(col, *cols):
"""Returns a new :class:`Column` for distinct count of ``col`` or ``cols``.
>>> df.agg(countDistinct(df.age, df.name).alias('c')).collect()
[Row(c=2)]
>>> df.agg(countDistinct("age", "name").alias('c')).collect()
[Row(c=2)]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.countDistinct(_to_java_column(col), _to_seq(sc, cols, _to_java_column))
return Column(jc)
@since(1.3)
def first(col, ignorenulls=False):
"""Aggregate function: returns the first value in a group.
The function by default returns the first values it sees. It will return the first non-null
value it sees when ignoreNulls is set to true. If all values are null, then null is returned.
.. note:: The function is non-deterministic because its results depends on order of rows which
may be non-deterministic after a shuffle.
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.first(_to_java_column(col), ignorenulls)
return Column(jc)
@since(2.0)
def grouping(col):
"""
Aggregate function: indicates whether a specified column in a GROUP BY list is aggregated
or not, returns 1 for aggregated or 0 for not aggregated in the result set.
>>> df.cube("name").agg(grouping("name"), sum("age")).orderBy("name").show()
+-----+--------------+--------+
| name|grouping(name)|sum(age)|
+-----+--------------+--------+
| null| 1| 7|
|Alice| 0| 2|
| Bob| 0| 5|
+-----+--------------+--------+
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.grouping(_to_java_column(col))
return Column(jc)
@since(2.0)
def grouping_id(*cols):
"""
Aggregate function: returns the level of grouping, equals to
(grouping(c1) << (n-1)) + (grouping(c2) << (n-2)) + ... + grouping(cn)
.. note:: The list of columns should match with grouping columns exactly, or empty (means all
the grouping columns).
>>> df.cube("name").agg(grouping_id(), sum("age")).orderBy("name").show()
+-----+-------------+--------+
| name|grouping_id()|sum(age)|
+-----+-------------+--------+
| null| 1| 7|
|Alice| 0| 2|
| Bob| 0| 5|
+-----+-------------+--------+
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.grouping_id(_to_seq(sc, cols, _to_java_column))
return Column(jc)
@since(1.6)
def input_file_name():
"""Creates a string column for the file name of the current Spark task.
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.input_file_name())
@since(1.6)
def isnan(col):
"""An expression that returns true iff the column is NaN.
>>> df = spark.createDataFrame([(1.0, float('nan')), (float('nan'), 2.0)], ("a", "b"))
>>> df.select(isnan("a").alias("r1"), isnan(df.a).alias("r2")).collect()
[Row(r1=False, r2=False), Row(r1=True, r2=True)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.isnan(_to_java_column(col)))
@since(1.6)
def isnull(col):
"""An expression that returns true iff the column is null.
>>> df = spark.createDataFrame([(1, None), (None, 2)], ("a", "b"))
>>> df.select(isnull("a").alias("r1"), isnull(df.a).alias("r2")).collect()
[Row(r1=False, r2=False), Row(r1=True, r2=True)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.isnull(_to_java_column(col)))
@since(1.3)
def last(col, ignorenulls=False):
"""Aggregate function: returns the last value in a group.
The function by default returns the last values it sees. It will return the last non-null
value it sees when ignoreNulls is set to true. If all values are null, then null is returned.
.. note:: The function is non-deterministic because its results depends on order of rows
which may be non-deterministic after a shuffle.
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.last(_to_java_column(col), ignorenulls)
return Column(jc)
@since(1.6)
def monotonically_increasing_id():
"""A column that generates monotonically increasing 64-bit integers.
The generated ID is guaranteed to be monotonically increasing and unique, but not consecutive.
The current implementation puts the partition ID in the upper 31 bits, and the record number
within each partition in the lower 33 bits. The assumption is that the data frame has
less than 1 billion partitions, and each partition has less than 8 billion records.
.. note:: The function is non-deterministic because its result depends on partition IDs.
As an example, consider a :class:`DataFrame` with two partitions, each with 3 records.
This expression would return the following IDs:
0, 1, 2, 8589934592 (1L << 33), 8589934593, 8589934594.
>>> df0 = sc.parallelize(range(2), 2).mapPartitions(lambda x: [(1,), (2,), (3,)]).toDF(['col1'])
>>> df0.select(monotonically_increasing_id().alias('id')).collect()
[Row(id=0), Row(id=1), Row(id=2), Row(id=8589934592), Row(id=8589934593), Row(id=8589934594)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.monotonically_increasing_id())
@since(1.6)
def nanvl(col1, col2):
"""Returns col1 if it is not NaN, or col2 if col1 is NaN.
Both inputs should be floating point columns (:class:`DoubleType` or :class:`FloatType`).
>>> df = spark.createDataFrame([(1.0, float('nan')), (float('nan'), 2.0)], ("a", "b"))
>>> df.select(nanvl("a", "b").alias("r1"), nanvl(df.a, df.b).alias("r2")).collect()
[Row(r1=1.0, r2=1.0), Row(r1=2.0, r2=2.0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.nanvl(_to_java_column(col1), _to_java_column(col2)))
@ignore_unicode_prefix
@since(1.4)
def rand(seed=None):
"""Generates a random column with independent and identically distributed (i.i.d.) samples
from U[0.0, 1.0].
.. note:: The function is non-deterministic in general case.
>>> df.withColumn('rand', rand(seed=42) * 3).collect()
[Row(age=2, name=u'Alice', rand=1.1568609015300986),
Row(age=5, name=u'Bob', rand=1.403379671529166)]
"""
sc = SparkContext._active_spark_context
if seed is not None:
jc = sc._jvm.functions.rand(seed)
else:
jc = sc._jvm.functions.rand()
return Column(jc)
@ignore_unicode_prefix
@since(1.4)
def randn(seed=None):
"""Generates a column with independent and identically distributed (i.i.d.) samples from
the standard normal distribution.
.. note:: The function is non-deterministic in general case.
>>> df.withColumn('randn', randn(seed=42)).collect()
[Row(age=2, name=u'Alice', randn=-0.7556247885860078),
Row(age=5, name=u'Bob', randn=-0.0861619008451133)]
"""
sc = SparkContext._active_spark_context
if seed is not None:
jc = sc._jvm.functions.randn(seed)
else:
jc = sc._jvm.functions.randn()
return Column(jc)
@since(1.5)
def round(col, scale=0):
"""
Round the given value to `scale` decimal places using HALF_UP rounding mode if `scale` >= 0
or at integral part when `scale` < 0.
>>> spark.createDataFrame([(2.5,)], ['a']).select(round('a', 0).alias('r')).collect()
[Row(r=3.0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.round(_to_java_column(col), scale))
@since(2.0)
def bround(col, scale=0):
"""
Round the given value to `scale` decimal places using HALF_EVEN rounding mode if `scale` >= 0
or at integral part when `scale` < 0.
>>> spark.createDataFrame([(2.5,)], ['a']).select(bround('a', 0).alias('r')).collect()
[Row(r=2.0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.bround(_to_java_column(col), scale))
@since(1.5)
def shiftLeft(col, numBits):
"""Shift the given value numBits left.
>>> spark.createDataFrame([(21,)], ['a']).select(shiftLeft('a', 1).alias('r')).collect()
[Row(r=42)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.shiftLeft(_to_java_column(col), numBits))
@since(1.5)
def shiftRight(col, numBits):
"""(Signed) shift the given value numBits right.
>>> spark.createDataFrame([(42,)], ['a']).select(shiftRight('a', 1).alias('r')).collect()
[Row(r=21)]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.shiftRight(_to_java_column(col), numBits)
return Column(jc)
@since(1.5)
def shiftRightUnsigned(col, numBits):
"""Unsigned shift the given value numBits right.
>>> df = spark.createDataFrame([(-42,)], ['a'])
>>> df.select(shiftRightUnsigned('a', 1).alias('r')).collect()
[Row(r=9223372036854775787)]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.shiftRightUnsigned(_to_java_column(col), numBits)
return Column(jc)
@since(1.6)
def spark_partition_id():
"""A column for partition ID.
.. note:: This is indeterministic because it depends on data partitioning and task scheduling.
>>> df.repartition(1).select(spark_partition_id().alias("pid")).collect()
[Row(pid=0), Row(pid=0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.spark_partition_id())
@since(1.5)
def expr(str):
"""Parses the expression string into the column that it represents
>>> df.select(expr("length(name)")).collect()
[Row(length(name)=5), Row(length(name)=3)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.expr(str))
@ignore_unicode_prefix
@since(1.4)
def struct(*cols):
"""Creates a new struct column.
:param cols: list of column names (string) or list of :class:`Column` expressions
>>> df.select(struct('age', 'name').alias("struct")).collect()
[Row(struct=Row(age=2, name=u'Alice')), Row(struct=Row(age=5, name=u'Bob'))]
>>> df.select(struct([df.age, df.name]).alias("struct")).collect()
[Row(struct=Row(age=2, name=u'Alice')), Row(struct=Row(age=5, name=u'Bob'))]
"""
sc = SparkContext._active_spark_context
if len(cols) == 1 and isinstance(cols[0], (list, set)):
cols = cols[0]
jc = sc._jvm.functions.struct(_to_seq(sc, cols, _to_java_column))
return Column(jc)
@since(1.5)
def greatest(*cols):
"""
Returns the greatest value of the list of column names, skipping null values.
This function takes at least 2 parameters. It will return null iff all parameters are null.
>>> df = spark.createDataFrame([(1, 4, 3)], ['a', 'b', 'c'])
>>> df.select(greatest(df.a, df.b, df.c).alias("greatest")).collect()
[Row(greatest=4)]
"""
if len(cols) < 2:
raise ValueError("greatest should take at least two columns")
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.greatest(_to_seq(sc, cols, _to_java_column)))
@since(1.5)
def least(*cols):
"""
Returns the least value of the list of column names, skipping null values.
This function takes at least 2 parameters. It will return null iff all parameters are null.
>>> df = spark.createDataFrame([(1, 4, 3)], ['a', 'b', 'c'])
>>> df.select(least(df.a, df.b, df.c).alias("least")).collect()
[Row(least=1)]
"""
if len(cols) < 2:
raise ValueError("least should take at least two columns")
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.least(_to_seq(sc, cols, _to_java_column)))
@since(1.4)
def when(condition, value):
"""Evaluates a list of conditions and returns one of multiple possible result expressions.
If :func:`Column.otherwise` is not invoked, None is returned for unmatched conditions.
:param condition: a boolean :class:`Column` expression.
:param value: a literal value, or a :class:`Column` expression.
>>> df.select(when(df['age'] == 2, 3).otherwise(4).alias("age")).collect()
[Row(age=3), Row(age=4)]
>>> df.select(when(df.age == 2, df.age + 1).alias("age")).collect()
[Row(age=3), Row(age=None)]
"""
sc = SparkContext._active_spark_context
if not isinstance(condition, Column):
raise TypeError("condition should be a Column")
v = value._jc if isinstance(value, Column) else value
jc = sc._jvm.functions.when(condition._jc, v)
return Column(jc)
@since(1.5)
def log(arg1, arg2=None):
"""Returns the first argument-based logarithm of the second argument.
If there is only one argument, then this takes the natural logarithm of the argument.
>>> df.select(log(10.0, df.age).alias('ten')).rdd.map(lambda l: str(l.ten)[:7]).collect()
['0.30102', '0.69897']
>>> df.select(log(df.age).alias('e')).rdd.map(lambda l: str(l.e)[:7]).collect()
['0.69314', '1.60943']
"""
sc = SparkContext._active_spark_context
if arg2 is None:
jc = sc._jvm.functions.log(_to_java_column(arg1))
else:
jc = sc._jvm.functions.log(arg1, _to_java_column(arg2))
return Column(jc)
@since(1.5)
def log2(col):
"""Returns the base-2 logarithm of the argument.
>>> spark.createDataFrame([(4,)], ['a']).select(log2('a').alias('log2')).collect()
[Row(log2=2.0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.log2(_to_java_column(col)))
@since(1.5)
@ignore_unicode_prefix
def conv(col, fromBase, toBase):
"""
Convert a number in a string column from one base to another.
>>> df = spark.createDataFrame([("010101",)], ['n'])
>>> df.select(conv(df.n, 2, 16).alias('hex')).collect()
[Row(hex=u'15')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.conv(_to_java_column(col), fromBase, toBase))
@since(1.5)
def factorial(col):
"""
Computes the factorial of the given value.
>>> df = spark.createDataFrame([(5,)], ['n'])
>>> df.select(factorial(df.n).alias('f')).collect()
[Row(f=120)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.factorial(_to_java_column(col)))
# --------------- Window functions ------------------------
@since(1.4)
def lag(col, offset=1, default=None):
"""
Window function: returns the value that is `offset` rows before the current row, and
`defaultValue` if there is less than `offset` rows before the current row. For example,
an `offset` of one will return the previous row at any given point in the window partition.
This is equivalent to the LAG function in SQL.
:param col: name of column or expression
:param offset: number of row to extend
:param default: default value
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.lag(_to_java_column(col), offset, default))
@since(1.4)
def lead(col, offset=1, default=None):
"""
Window function: returns the value that is `offset` rows after the current row, and
`defaultValue` if there is less than `offset` rows after the current row. For example,
an `offset` of one will return the next row at any given point in the window partition.
This is equivalent to the LEAD function in SQL.
:param col: name of column or expression
:param offset: number of row to extend
:param default: default value
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.lead(_to_java_column(col), offset, default))
@since(1.4)
def ntile(n):
"""
Window function: returns the ntile group id (from 1 to `n` inclusive)
in an ordered window partition. For example, if `n` is 4, the first
quarter of the rows will get value 1, the second quarter will get 2,
the third quarter will get 3, and the last quarter will get 4.
This is equivalent to the NTILE function in SQL.
:param n: an integer
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.ntile(int(n)))
# ---------------------- Date/Timestamp functions ------------------------------
@since(1.5)
def current_date():
"""
Returns the current date as a :class:`DateType` column.
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.current_date())
def current_timestamp():
"""
Returns the current timestamp as a :class:`TimestampType` column.
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.current_timestamp())
@ignore_unicode_prefix
@since(1.5)
def date_format(date, format):
"""
Converts a date/timestamp/string to a value of string in the format specified by the date
format given by the second argument.
A pattern could be for instance `dd.MM.yyyy` and could return a string like '18.03.1993'. All
pattern letters of the Java class `java.time.format.DateTimeFormatter` can be used.
.. note:: Use when ever possible specialized functions like `year`. These benefit from a
specialized implementation.
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(date_format('dt', 'MM/dd/yyy').alias('date')).collect()
[Row(date=u'04/08/2015')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.date_format(_to_java_column(date), format))
@since(1.5)
def year(col):
"""
Extract the year of a given date as integer.
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(year('dt').alias('year')).collect()
[Row(year=2015)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.year(_to_java_column(col)))
@since(1.5)
def quarter(col):
"""
Extract the quarter of a given date as integer.
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(quarter('dt').alias('quarter')).collect()
[Row(quarter=2)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.quarter(_to_java_column(col)))
@since(1.5)
def month(col):
"""
Extract the month of a given date as integer.
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(month('dt').alias('month')).collect()
[Row(month=4)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.month(_to_java_column(col)))
@since(2.3)
def dayofweek(col):
"""
Extract the day of the week of a given date as integer.
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(dayofweek('dt').alias('day')).collect()
[Row(day=4)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.dayofweek(_to_java_column(col)))
@since(1.5)
def dayofmonth(col):
"""
Extract the day of the month of a given date as integer.
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(dayofmonth('dt').alias('day')).collect()
[Row(day=8)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.dayofmonth(_to_java_column(col)))
@since(1.5)
def dayofyear(col):
"""
Extract the day of the year of a given date as integer.
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(dayofyear('dt').alias('day')).collect()
[Row(day=98)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.dayofyear(_to_java_column(col)))
@since(1.5)
def hour(col):
"""
Extract the hours of a given date as integer.
>>> df = spark.createDataFrame([('2015-04-08 13:08:15',)], ['ts'])
>>> df.select(hour('ts').alias('hour')).collect()
[Row(hour=13)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.hour(_to_java_column(col)))
@since(1.5)
def minute(col):
"""
Extract the minutes of a given date as integer.
>>> df = spark.createDataFrame([('2015-04-08 13:08:15',)], ['ts'])
>>> df.select(minute('ts').alias('minute')).collect()
[Row(minute=8)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.minute(_to_java_column(col)))
@since(1.5)
def second(col):
"""
Extract the seconds of a given date as integer.
>>> df = spark.createDataFrame([('2015-04-08 13:08:15',)], ['ts'])
>>> df.select(second('ts').alias('second')).collect()
[Row(second=15)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.second(_to_java_column(col)))
@since(1.5)
def weekofyear(col):
"""
Extract the week number of a given date as integer.
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(weekofyear(df.dt).alias('week')).collect()
[Row(week=15)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.weekofyear(_to_java_column(col)))
@since(1.5)
def date_add(start, days):
"""
Returns the date that is `days` days after `start`
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(date_add(df.dt, 1).alias('next_date')).collect()
[Row(next_date=datetime.date(2015, 4, 9))]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.date_add(_to_java_column(start), days))
@since(1.5)
def date_sub(start, days):
"""
Returns the date that is `days` days before `start`
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(date_sub(df.dt, 1).alias('prev_date')).collect()
[Row(prev_date=datetime.date(2015, 4, 7))]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.date_sub(_to_java_column(start), days))
@since(1.5)
def datediff(end, start):
"""
Returns the number of days from `start` to `end`.
>>> df = spark.createDataFrame([('2015-04-08','2015-05-10')], ['d1', 'd2'])
>>> df.select(datediff(df.d2, df.d1).alias('diff')).collect()
[Row(diff=32)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.datediff(_to_java_column(end), _to_java_column(start)))
@since(1.5)
def add_months(start, months):
"""
Returns the date that is `months` months after `start`
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(add_months(df.dt, 1).alias('next_month')).collect()
[Row(next_month=datetime.date(2015, 5, 8))]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.add_months(_to_java_column(start), months))
@since(1.5)
def months_between(date1, date2, roundOff=True):
"""
Returns number of months between dates date1 and date2.
If date1 is later than date2, then the result is positive.
If date1 and date2 are on the same day of month, or both are the last day of month,
returns an integer (time of day will be ignored).
The result is rounded off to 8 digits unless `roundOff` is set to `False`.
>>> df = spark.createDataFrame([('1997-02-28 10:30:00', '1996-10-30')], ['date1', 'date2'])
>>> df.select(months_between(df.date1, df.date2).alias('months')).collect()
[Row(months=3.94959677)]
>>> df.select(months_between(df.date1, df.date2, False).alias('months')).collect()
[Row(months=3.9495967741935485)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.months_between(
_to_java_column(date1), _to_java_column(date2), roundOff))
@since(2.2)
def to_date(col, format=None):
"""Converts a :class:`Column` of :class:`pyspark.sql.types.StringType` or
:class:`pyspark.sql.types.TimestampType` into :class:`pyspark.sql.types.DateType`
using the optionally specified format. Specify formats according to
`DateTimeFormatter <https://docs.oracle.com/javase/8/docs/api/java/time/format/DateTimeFormatter.html>`_. # noqa
By default, it follows casting rules to :class:`pyspark.sql.types.DateType` if the format
is omitted (equivalent to ``col.cast("date")``).
>>> df = spark.createDataFrame([('1997-02-28 10:30:00',)], ['t'])
>>> df.select(to_date(df.t).alias('date')).collect()
[Row(date=datetime.date(1997, 2, 28))]
>>> df = spark.createDataFrame([('1997-02-28 10:30:00',)], ['t'])
>>> df.select(to_date(df.t, 'yyyy-MM-dd HH:mm:ss').alias('date')).collect()
[Row(date=datetime.date(1997, 2, 28))]
"""
sc = SparkContext._active_spark_context
if format is None:
jc = sc._jvm.functions.to_date(_to_java_column(col))
else:
jc = sc._jvm.functions.to_date(_to_java_column(col), format)
return Column(jc)
@since(2.2)
def to_timestamp(col, format=None):
"""Converts a :class:`Column` of :class:`pyspark.sql.types.StringType` or
:class:`pyspark.sql.types.TimestampType` into :class:`pyspark.sql.types.DateType`
using the optionally specified format. Specify formats according to
`DateTimeFormatter <https://docs.oracle.com/javase/8/docs/api/java/time/format/DateTimeFormatter.html>`_. # noqa
By default, it follows casting rules to :class:`pyspark.sql.types.TimestampType` if the format
is omitted (equivalent to ``col.cast("timestamp")``).
>>> df = spark.createDataFrame([('1997-02-28 10:30:00',)], ['t'])
>>> df.select(to_timestamp(df.t).alias('dt')).collect()
[Row(dt=datetime.datetime(1997, 2, 28, 10, 30))]
>>> df = spark.createDataFrame([('1997-02-28 10:30:00',)], ['t'])
>>> df.select(to_timestamp(df.t, 'yyyy-MM-dd HH:mm:ss').alias('dt')).collect()
[Row(dt=datetime.datetime(1997, 2, 28, 10, 30))]
"""
sc = SparkContext._active_spark_context
if format is None:
jc = sc._jvm.functions.to_timestamp(_to_java_column(col))
else:
jc = sc._jvm.functions.to_timestamp(_to_java_column(col), format)
return Column(jc)
@since(1.5)
def trunc(date, format):
"""
Returns date truncated to the unit specified by the format.
:param format: 'year', 'yyyy', 'yy' or 'month', 'mon', 'mm'
>>> df = spark.createDataFrame([('1997-02-28',)], ['d'])
>>> df.select(trunc(df.d, 'year').alias('year')).collect()
[Row(year=datetime.date(1997, 1, 1))]
>>> df.select(trunc(df.d, 'mon').alias('month')).collect()
[Row(month=datetime.date(1997, 2, 1))]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.trunc(_to_java_column(date), format))
@since(2.3)
def date_trunc(format, timestamp):
"""
Returns timestamp truncated to the unit specified by the format.
:param format: 'year', 'yyyy', 'yy', 'month', 'mon', 'mm',
'day', 'dd', 'hour', 'minute', 'second', 'week', 'quarter'
>>> df = spark.createDataFrame([('1997-02-28 05:02:11',)], ['t'])
>>> df.select(date_trunc('year', df.t).alias('year')).collect()
[Row(year=datetime.datetime(1997, 1, 1, 0, 0))]
>>> df.select(date_trunc('mon', df.t).alias('month')).collect()
[Row(month=datetime.datetime(1997, 2, 1, 0, 0))]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.date_trunc(format, _to_java_column(timestamp)))
@since(1.5)
def next_day(date, dayOfWeek):
"""
Returns the first date which is later than the value of the date column.
Day of the week parameter is case insensitive, and accepts:
"Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun".
>>> df = spark.createDataFrame([('2015-07-27',)], ['d'])
>>> df.select(next_day(df.d, 'Sun').alias('date')).collect()
[Row(date=datetime.date(2015, 8, 2))]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.next_day(_to_java_column(date), dayOfWeek))
@since(1.5)
def last_day(date):
"""
Returns the last day of the month which the given date belongs to.
>>> df = spark.createDataFrame([('1997-02-10',)], ['d'])
>>> df.select(last_day(df.d).alias('date')).collect()
[Row(date=datetime.date(1997, 2, 28))]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.last_day(_to_java_column(date)))
@ignore_unicode_prefix
@since(1.5)
def from_unixtime(timestamp, format="yyyy-MM-dd HH:mm:ss"):
"""
Converts the number of seconds from unix epoch (1970-01-01 00:00:00 UTC) to a string
representing the timestamp of that moment in the current system time zone in the given
format.
>>> spark.conf.set("spark.sql.session.timeZone", "America/Los_Angeles")
>>> time_df = spark.createDataFrame([(1428476400,)], ['unix_time'])
>>> time_df.select(from_unixtime('unix_time').alias('ts')).collect()
[Row(ts=u'2015-04-08 00:00:00')]
>>> spark.conf.unset("spark.sql.session.timeZone")
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.from_unixtime(_to_java_column(timestamp), format))
@since(1.5)
def unix_timestamp(timestamp=None, format='yyyy-MM-dd HH:mm:ss'):
"""
Convert time string with given pattern ('yyyy-MM-dd HH:mm:ss', by default)
to Unix time stamp (in seconds), using the default timezone and the default
locale, return null if fail.
if `timestamp` is None, then it returns current timestamp.
>>> spark.conf.set("spark.sql.session.timeZone", "America/Los_Angeles")
>>> time_df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> time_df.select(unix_timestamp('dt', 'yyyy-MM-dd').alias('unix_time')).collect()
[Row(unix_time=1428476400)]
>>> spark.conf.unset("spark.sql.session.timeZone")
"""
sc = SparkContext._active_spark_context
if timestamp is None:
return Column(sc._jvm.functions.unix_timestamp())
return Column(sc._jvm.functions.unix_timestamp(_to_java_column(timestamp), format))
@since(1.5)
def from_utc_timestamp(timestamp, tz):
"""
This is a common function for databases supporting TIMESTAMP WITHOUT TIMEZONE. This function
takes a timestamp which is timezone-agnostic, and interprets it as a timestamp in UTC, and
renders that timestamp as a timestamp in the given time zone.
However, timestamp in Spark represents number of microseconds from the Unix epoch, which is not
timezone-agnostic. So in Spark this function just shift the timestamp value from UTC timezone to
the given timezone.
This function may return confusing result if the input is a string with timezone, e.g.
'2018-03-13T06:18:23+00:00'. The reason is that, Spark firstly cast the string to timestamp
according to the timezone in the string, and finally display the result by converting the
timestamp to string according to the session local timezone.
:param timestamp: the column that contains timestamps
:param tz: a string that has the ID of timezone, e.g. "GMT", "America/Los_Angeles", etc
.. versionchanged:: 2.4
`tz` can take a :class:`Column` containing timezone ID strings.
>>> df = spark.createDataFrame([('1997-02-28 10:30:00', 'JST')], ['ts', 'tz'])
>>> df.select(from_utc_timestamp(df.ts, "PST").alias('local_time')).collect()
[Row(local_time=datetime.datetime(1997, 2, 28, 2, 30))]
>>> df.select(from_utc_timestamp(df.ts, df.tz).alias('local_time')).collect()
[Row(local_time=datetime.datetime(1997, 2, 28, 19, 30))]
"""
sc = SparkContext._active_spark_context
if isinstance(tz, Column):
tz = _to_java_column(tz)
return Column(sc._jvm.functions.from_utc_timestamp(_to_java_column(timestamp), tz))
@since(1.5)
def to_utc_timestamp(timestamp, tz):
"""
This is a common function for databases supporting TIMESTAMP WITHOUT TIMEZONE. This function
takes a timestamp which is timezone-agnostic, and interprets it as a timestamp in the given
timezone, and renders that timestamp as a timestamp in UTC.
However, timestamp in Spark represents number of microseconds from the Unix epoch, which is not
timezone-agnostic. So in Spark this function just shift the timestamp value from the given
timezone to UTC timezone.
This function may return confusing result if the input is a string with timezone, e.g.
'2018-03-13T06:18:23+00:00'. The reason is that, Spark firstly cast the string to timestamp
according to the timezone in the string, and finally display the result by converting the
timestamp to string according to the session local timezone.
:param timestamp: the column that contains timestamps
:param tz: a string that has the ID of timezone, e.g. "GMT", "America/Los_Angeles", etc
.. versionchanged:: 2.4
`tz` can take a :class:`Column` containing timezone ID strings.
>>> df = spark.createDataFrame([('1997-02-28 10:30:00', 'JST')], ['ts', 'tz'])
>>> df.select(to_utc_timestamp(df.ts, "PST").alias('utc_time')).collect()
[Row(utc_time=datetime.datetime(1997, 2, 28, 18, 30))]
>>> df.select(to_utc_timestamp(df.ts, df.tz).alias('utc_time')).collect()
[Row(utc_time=datetime.datetime(1997, 2, 28, 1, 30))]
"""
sc = SparkContext._active_spark_context
if isinstance(tz, Column):
tz = _to_java_column(tz)
return Column(sc._jvm.functions.to_utc_timestamp(_to_java_column(timestamp), tz))
@since(2.0)
@ignore_unicode_prefix
def window(timeColumn, windowDuration, slideDuration=None, startTime=None):
"""Bucketize rows into one or more time windows given a timestamp specifying column. Window
starts are inclusive but the window ends are exclusive, e.g. 12:05 will be in the window
[12:05,12:10) but not in [12:00,12:05). Windows can support microsecond precision. Windows in
the order of months are not supported.
The time column must be of :class:`pyspark.sql.types.TimestampType`.
Durations are provided as strings, e.g. '1 second', '1 day 12 hours', '2 minutes'. Valid
interval strings are 'week', 'day', 'hour', 'minute', 'second', 'millisecond', 'microsecond'.
If the ``slideDuration`` is not provided, the windows will be tumbling windows.
The startTime is the offset with respect to 1970-01-01 00:00:00 UTC with which to start
window intervals. For example, in order to have hourly tumbling windows that start 15 minutes
past the hour, e.g. 12:15-13:15, 13:15-14:15... provide `startTime` as `15 minutes`.
The output column will be a struct called 'window' by default with the nested columns 'start'
and 'end', where 'start' and 'end' will be of :class:`pyspark.sql.types.TimestampType`.
>>> df = spark.createDataFrame([("2016-03-11 09:00:07", 1)]).toDF("date", "val")
>>> w = df.groupBy(window("date", "5 seconds")).agg(sum("val").alias("sum"))
>>> w.select(w.window.start.cast("string").alias("start"),
... w.window.end.cast("string").alias("end"), "sum").collect()
[Row(start=u'2016-03-11 09:00:05', end=u'2016-03-11 09:00:10', sum=1)]
"""
def check_string_field(field, fieldName):
if not field or type(field) is not str:
raise TypeError("%s should be provided as a string" % fieldName)
sc = SparkContext._active_spark_context
time_col = _to_java_column(timeColumn)
check_string_field(windowDuration, "windowDuration")
if slideDuration and startTime:
check_string_field(slideDuration, "slideDuration")
check_string_field(startTime, "startTime")
res = sc._jvm.functions.window(time_col, windowDuration, slideDuration, startTime)
elif slideDuration:
check_string_field(slideDuration, "slideDuration")
res = sc._jvm.functions.window(time_col, windowDuration, slideDuration)
elif startTime:
check_string_field(startTime, "startTime")
res = sc._jvm.functions.window(time_col, windowDuration, windowDuration, startTime)
else:
res = sc._jvm.functions.window(time_col, windowDuration)
return Column(res)
# ---------------------------- misc functions ----------------------------------
@since(1.5)
@ignore_unicode_prefix
def crc32(col):
"""
Calculates the cyclic redundancy check value (CRC32) of a binary column and
returns the value as a bigint.
>>> spark.createDataFrame([('ABC',)], ['a']).select(crc32('a').alias('crc32')).collect()
[Row(crc32=2743272264)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.crc32(_to_java_column(col)))
@ignore_unicode_prefix
@since(1.5)
def md5(col):
"""Calculates the MD5 digest and returns the value as a 32 character hex string.
>>> spark.createDataFrame([('ABC',)], ['a']).select(md5('a').alias('hash')).collect()
[Row(hash=u'902fbdd2b1df0c4f70b4a5d23525e932')]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.md5(_to_java_column(col))
return Column(jc)
@ignore_unicode_prefix
@since(1.5)
def sha1(col):
"""Returns the hex string result of SHA-1.
>>> spark.createDataFrame([('ABC',)], ['a']).select(sha1('a').alias('hash')).collect()
[Row(hash=u'3c01bdbb26f358bab27f267924aa2c9a03fcfdb8')]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.sha1(_to_java_column(col))
return Column(jc)
@ignore_unicode_prefix
@since(1.5)
def sha2(col, numBits):
"""Returns the hex string result of SHA-2 family of hash functions (SHA-224, SHA-256, SHA-384,
and SHA-512). The numBits indicates the desired bit length of the result, which must have a
value of 224, 256, 384, 512, or 0 (which is equivalent to 256).
>>> digests = df.select(sha2(df.name, 256).alias('s')).collect()
>>> digests[0]
Row(s=u'3bc51062973c458d5a6f2d8d64a023246354ad7e064b1e4e009ec8a0699a3043')
>>> digests[1]
Row(s=u'cd9fb1e148ccd8442e5aa74904cc73bf6fb54d1d54d333bd596aa9bb4bb4e961')
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.sha2(_to_java_column(col), numBits)
return Column(jc)
@since(2.0)
def hash(*cols):
"""Calculates the hash code of given columns, and returns the result as an int column.
>>> spark.createDataFrame([('ABC',)], ['a']).select(hash('a').alias('hash')).collect()
[Row(hash=-757602832)]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.hash(_to_seq(sc, cols, _to_java_column))
return Column(jc)
# ---------------------- String/Binary functions ------------------------------
_string_functions = {
'ascii': 'Computes the numeric value of the first character of the string column.',
'base64': 'Computes the BASE64 encoding of a binary column and returns it as a string column.',
'unbase64': 'Decodes a BASE64 encoded string column and returns it as a binary column.',
'ltrim': 'Trim the spaces from left end for the specified string value.',
'rtrim': 'Trim the spaces from right end for the specified string value.',
'trim': 'Trim the spaces from both ends for the specified string column.',
}
for _name, _doc in _string_functions.items():
globals()[_name] = since(1.5)(_create_function(_name, _doc))
del _name, _doc
@since(1.5)
@ignore_unicode_prefix
def concat_ws(sep, *cols):
"""
Concatenates multiple input string columns together into a single string column,
using the given separator.
>>> df = spark.createDataFrame([('abcd','123')], ['s', 'd'])
>>> df.select(concat_ws('-', df.s, df.d).alias('s')).collect()
[Row(s=u'abcd-123')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.concat_ws(sep, _to_seq(sc, cols, _to_java_column)))
@since(1.5)
def decode(col, charset):
"""
Computes the first argument into a string from a binary using the provided character set
(one of 'US-ASCII', 'ISO-8859-1', 'UTF-8', 'UTF-16BE', 'UTF-16LE', 'UTF-16').
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.decode(_to_java_column(col), charset))
@since(1.5)
def encode(col, charset):
"""
Computes the first argument into a binary from a string using the provided character set
(one of 'US-ASCII', 'ISO-8859-1', 'UTF-8', 'UTF-16BE', 'UTF-16LE', 'UTF-16').
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.encode(_to_java_column(col), charset))
@ignore_unicode_prefix
@since(1.5)
def format_number(col, d):
"""
Formats the number X to a format like '#,--#,--#.--', rounded to d decimal places
with HALF_EVEN round mode, and returns the result as a string.
:param col: the column name of the numeric value to be formatted
:param d: the N decimal places
>>> spark.createDataFrame([(5,)], ['a']).select(format_number('a', 4).alias('v')).collect()
[Row(v=u'5.0000')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.format_number(_to_java_column(col), d))
@ignore_unicode_prefix
@since(1.5)
def format_string(format, *cols):
"""
Formats the arguments in printf-style and returns the result as a string column.
:param col: the column name of the numeric value to be formatted
:param d: the N decimal places
>>> df = spark.createDataFrame([(5, "hello")], ['a', 'b'])
>>> df.select(format_string('%d %s', df.a, df.b).alias('v')).collect()
[Row(v=u'5 hello')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.format_string(format, _to_seq(sc, cols, _to_java_column)))
@since(1.5)
def instr(str, substr):
"""
Locate the position of the first occurrence of substr column in the given string.
Returns null if either of the arguments are null.
.. note:: The position is not zero based, but 1 based index. Returns 0 if substr
could not be found in str.
>>> df = spark.createDataFrame([('abcd',)], ['s',])
>>> df.select(instr(df.s, 'b').alias('s')).collect()
[Row(s=2)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.instr(_to_java_column(str), substr))
@since(1.5)
@ignore_unicode_prefix
def substring(str, pos, len):
"""
Substring starts at `pos` and is of length `len` when str is String type or
returns the slice of byte array that starts at `pos` in byte and is of length `len`
when str is Binary type.
.. note:: The position is not zero based, but 1 based index.
>>> df = spark.createDataFrame([('abcd',)], ['s',])
>>> df.select(substring(df.s, 1, 2).alias('s')).collect()
[Row(s=u'ab')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.substring(_to_java_column(str), pos, len))
@since(1.5)
@ignore_unicode_prefix
def substring_index(str, delim, count):
"""
Returns the substring from string str before count occurrences of the delimiter delim.
If count is positive, everything the left of the final delimiter (counting from left) is
returned. If count is negative, every to the right of the final delimiter (counting from the
right) is returned. substring_index performs a case-sensitive match when searching for delim.
>>> df = spark.createDataFrame([('a.b.c.d',)], ['s'])
>>> df.select(substring_index(df.s, '.', 2).alias('s')).collect()
[Row(s=u'a.b')]
>>> df.select(substring_index(df.s, '.', -3).alias('s')).collect()
[Row(s=u'b.c.d')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.substring_index(_to_java_column(str), delim, count))
@ignore_unicode_prefix
@since(1.5)
def levenshtein(left, right):
"""Computes the Levenshtein distance of the two given strings.
>>> df0 = spark.createDataFrame([('kitten', 'sitting',)], ['l', 'r'])
>>> df0.select(levenshtein('l', 'r').alias('d')).collect()
[Row(d=3)]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.levenshtein(_to_java_column(left), _to_java_column(right))
return Column(jc)
@since(1.5)
def locate(substr, str, pos=1):
"""
Locate the position of the first occurrence of substr in a string column, after position pos.
.. note:: The position is not zero based, but 1 based index. Returns 0 if substr
could not be found in str.
:param substr: a string
:param str: a Column of :class:`pyspark.sql.types.StringType`
:param pos: start position (zero based)
>>> df = spark.createDataFrame([('abcd',)], ['s',])
>>> df.select(locate('b', df.s, 1).alias('s')).collect()
[Row(s=2)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.locate(substr, _to_java_column(str), pos))
@since(1.5)
@ignore_unicode_prefix
def lpad(col, len, pad):
"""
Left-pad the string column to width `len` with `pad`.
>>> df = spark.createDataFrame([('abcd',)], ['s',])
>>> df.select(lpad(df.s, 6, '#').alias('s')).collect()
[Row(s=u'##abcd')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.lpad(_to_java_column(col), len, pad))
@since(1.5)
@ignore_unicode_prefix
def rpad(col, len, pad):
"""
Right-pad the string column to width `len` with `pad`.
>>> df = spark.createDataFrame([('abcd',)], ['s',])
>>> df.select(rpad(df.s, 6, '#').alias('s')).collect()
[Row(s=u'abcd##')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.rpad(_to_java_column(col), len, pad))
@since(1.5)
@ignore_unicode_prefix
def repeat(col, n):
"""
Repeats a string column n times, and returns it as a new string column.
>>> df = spark.createDataFrame([('ab',)], ['s',])
>>> df.select(repeat(df.s, 3).alias('s')).collect()
[Row(s=u'ababab')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.repeat(_to_java_column(col), n))
@since(1.5)
@ignore_unicode_prefix
def split(str, pattern, limit=-1):
"""
Splits str around matches of the given pattern.
:param str: a string expression to split
:param pattern: a string representing a regular expression. The regex string should be
a Java regular expression.
:param limit: an integer which controls the number of times `pattern` is applied.
* ``limit > 0``: The resulting array's length will not be more than `limit`, and the
resulting array's last entry will contain all input beyond the last
matched pattern.
* ``limit <= 0``: `pattern` will be applied as many times as possible, and the resulting
array can be of any size.
.. versionchanged:: 3.0
`split` now takes an optional `limit` field. If not provided, default limit value is -1.
>>> df = spark.createDataFrame([('oneAtwoBthreeC',)], ['s',])
>>> df.select(split(df.s, '[ABC]', 2).alias('s')).collect()
[Row(s=[u'one', u'twoBthreeC'])]
>>> df.select(split(df.s, '[ABC]', -1).alias('s')).collect()
[Row(s=[u'one', u'two', u'three', u''])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.split(_to_java_column(str), pattern, limit))
@ignore_unicode_prefix
@since(1.5)
def regexp_extract(str, pattern, idx):
r"""Extract a specific group matched by a Java regex, from the specified string column.
If the regex did not match, or the specified group did not match, an empty string is returned.
>>> df = spark.createDataFrame([('100-200',)], ['str'])
>>> df.select(regexp_extract('str', r'(\d+)-(\d+)', 1).alias('d')).collect()
[Row(d=u'100')]
>>> df = spark.createDataFrame([('foo',)], ['str'])
>>> df.select(regexp_extract('str', r'(\d+)', 1).alias('d')).collect()
[Row(d=u'')]
>>> df = spark.createDataFrame([('aaaac',)], ['str'])
>>> df.select(regexp_extract('str', '(a+)(b)?(c)', 2).alias('d')).collect()
[Row(d=u'')]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.regexp_extract(_to_java_column(str), pattern, idx)
return Column(jc)
@ignore_unicode_prefix
@since(1.5)
def regexp_replace(str, pattern, replacement):
r"""Replace all substrings of the specified string value that match regexp with rep.
>>> df = spark.createDataFrame([('100-200',)], ['str'])
>>> df.select(regexp_replace('str', r'(\d+)', '--').alias('d')).collect()
[Row(d=u'-----')]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.regexp_replace(_to_java_column(str), pattern, replacement)
return Column(jc)
@ignore_unicode_prefix
@since(1.5)
def initcap(col):
"""Translate the first letter of each word to upper case in the sentence.
>>> spark.createDataFrame([('ab cd',)], ['a']).select(initcap("a").alias('v')).collect()
[Row(v=u'Ab Cd')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.initcap(_to_java_column(col)))
@since(1.5)
@ignore_unicode_prefix
def soundex(col):
"""
Returns the SoundEx encoding for a string
>>> df = spark.createDataFrame([("Peters",),("Uhrbach",)], ['name'])
>>> df.select(soundex(df.name).alias("soundex")).collect()
[Row(soundex=u'P362'), Row(soundex=u'U612')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.soundex(_to_java_column(col)))
@ignore_unicode_prefix
@since(1.5)
def bin(col):
"""Returns the string representation of the binary value of the given column.
>>> df.select(bin(df.age).alias('c')).collect()
[Row(c=u'10'), Row(c=u'101')]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.bin(_to_java_column(col))
return Column(jc)
@ignore_unicode_prefix
@since(1.5)
def hex(col):
"""Computes hex value of the given column, which could be :class:`pyspark.sql.types.StringType`,
:class:`pyspark.sql.types.BinaryType`, :class:`pyspark.sql.types.IntegerType` or
:class:`pyspark.sql.types.LongType`.
>>> spark.createDataFrame([('ABC', 3)], ['a', 'b']).select(hex('a'), hex('b')).collect()
[Row(hex(a)=u'414243', hex(b)=u'3')]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.hex(_to_java_column(col))
return Column(jc)
@ignore_unicode_prefix
@since(1.5)
def unhex(col):
"""Inverse of hex. Interprets each pair of characters as a hexadecimal number
and converts to the byte representation of number.
>>> spark.createDataFrame([('414243',)], ['a']).select(unhex('a')).collect()
[Row(unhex(a)=bytearray(b'ABC'))]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.unhex(_to_java_column(col)))
@ignore_unicode_prefix
@since(1.5)
def length(col):
"""Computes the character length of string data or number of bytes of binary data.
The length of character data includes the trailing spaces. The length of binary data
includes binary zeros.
>>> spark.createDataFrame([('ABC ',)], ['a']).select(length('a').alias('length')).collect()
[Row(length=4)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.length(_to_java_column(col)))
@ignore_unicode_prefix
@since(1.5)
def translate(srcCol, matching, replace):
"""A function translate any character in the `srcCol` by a character in `matching`.
The characters in `replace` is corresponding to the characters in `matching`.
The translate will happen when any character in the string matching with the character
in the `matching`.
>>> spark.createDataFrame([('translate',)], ['a']).select(translate('a', "rnlt", "123") \\
... .alias('r')).collect()
[Row(r=u'1a2s3ae')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.translate(_to_java_column(srcCol), matching, replace))
# ---------------------- Collection functions ------------------------------
@ignore_unicode_prefix
@since(2.0)
def create_map(*cols):
"""Creates a new map column.
:param cols: list of column names (string) or list of :class:`Column` expressions that are
grouped as key-value pairs, e.g. (key1, value1, key2, value2, ...).
>>> df.select(create_map('name', 'age').alias("map")).collect()
[Row(map={u'Alice': 2}), Row(map={u'Bob': 5})]
>>> df.select(create_map([df.name, df.age]).alias("map")).collect()
[Row(map={u'Alice': 2}), Row(map={u'Bob': 5})]
"""
sc = SparkContext._active_spark_context
if len(cols) == 1 and isinstance(cols[0], (list, set)):
cols = cols[0]
jc = sc._jvm.functions.map(_to_seq(sc, cols, _to_java_column))
return Column(jc)
@since(2.4)
def map_from_arrays(col1, col2):
"""Creates a new map from two arrays.
:param col1: name of column containing a set of keys. All elements should not be null
:param col2: name of column containing a set of values
>>> df = spark.createDataFrame([([2, 5], ['a', 'b'])], ['k', 'v'])
>>> df.select(map_from_arrays(df.k, df.v).alias("map")).show()
+----------------+
| map|
+----------------+
|[2 -> a, 5 -> b]|
+----------------+
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.map_from_arrays(_to_java_column(col1), _to_java_column(col2)))
@since(1.4)
def array(*cols):
"""Creates a new array column.
:param cols: list of column names (string) or list of :class:`Column` expressions that have
the same data type.
>>> df.select(array('age', 'age').alias("arr")).collect()
[Row(arr=[2, 2]), Row(arr=[5, 5])]
>>> df.select(array([df.age, df.age]).alias("arr")).collect()
[Row(arr=[2, 2]), Row(arr=[5, 5])]
"""
sc = SparkContext._active_spark_context
if len(cols) == 1 and isinstance(cols[0], (list, set)):
cols = cols[0]
jc = sc._jvm.functions.array(_to_seq(sc, cols, _to_java_column))
return Column(jc)
@since(1.5)
def array_contains(col, value):
"""
Collection function: returns null if the array is null, true if the array contains the
given value, and false otherwise.
:param col: name of column containing array
:param value: value to check for in array
>>> df = spark.createDataFrame([(["a", "b", "c"],), ([],)], ['data'])
>>> df.select(array_contains(df.data, "a")).collect()
[Row(array_contains(data, a)=True), Row(array_contains(data, a)=False)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_contains(_to_java_column(col), value))
@since(2.4)
def arrays_overlap(a1, a2):
"""
Collection function: returns true if the arrays contain any common non-null element; if not,
returns null if both the arrays are non-empty and any of them contains a null element; returns
false otherwise.
>>> df = spark.createDataFrame([(["a", "b"], ["b", "c"]), (["a"], ["b", "c"])], ['x', 'y'])
>>> df.select(arrays_overlap(df.x, df.y).alias("overlap")).collect()
[Row(overlap=True), Row(overlap=False)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.arrays_overlap(_to_java_column(a1), _to_java_column(a2)))
@since(2.4)
def slice(x, start, length):
"""
Collection function: returns an array containing all the elements in `x` from index `start`
(or starting from the end if `start` is negative) with the specified `length`.
>>> df = spark.createDataFrame([([1, 2, 3],), ([4, 5],)], ['x'])
>>> df.select(slice(df.x, 2, 2).alias("sliced")).collect()
[Row(sliced=[2, 3]), Row(sliced=[5])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.slice(_to_java_column(x), start, length))
@ignore_unicode_prefix
@since(2.4)
def array_join(col, delimiter, null_replacement=None):
"""
Concatenates the elements of `column` using the `delimiter`. Null values are replaced with
`null_replacement` if set, otherwise they are ignored.
>>> df = spark.createDataFrame([(["a", "b", "c"],), (["a", None],)], ['data'])
>>> df.select(array_join(df.data, ",").alias("joined")).collect()
[Row(joined=u'a,b,c'), Row(joined=u'a')]
>>> df.select(array_join(df.data, ",", "NULL").alias("joined")).collect()
[Row(joined=u'a,b,c'), Row(joined=u'a,NULL')]
"""
sc = SparkContext._active_spark_context
if null_replacement is None:
return Column(sc._jvm.functions.array_join(_to_java_column(col), delimiter))
else:
return Column(sc._jvm.functions.array_join(
_to_java_column(col), delimiter, null_replacement))
@since(1.5)
@ignore_unicode_prefix
def concat(*cols):
"""
Concatenates multiple input columns together into a single column.
The function works with strings, binary and compatible array columns.
>>> df = spark.createDataFrame([('abcd','123')], ['s', 'd'])
>>> df.select(concat(df.s, df.d).alias('s')).collect()
[Row(s=u'abcd123')]
>>> df = spark.createDataFrame([([1, 2], [3, 4], [5]), ([1, 2], None, [3])], ['a', 'b', 'c'])
>>> df.select(concat(df.a, df.b, df.c).alias("arr")).collect()
[Row(arr=[1, 2, 3, 4, 5]), Row(arr=None)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.concat(_to_seq(sc, cols, _to_java_column)))
@since(2.4)
def array_position(col, value):
"""
Collection function: Locates the position of the first occurrence of the given value
in the given array. Returns null if either of the arguments are null.
.. note:: The position is not zero based, but 1 based index. Returns 0 if the given
value could not be found in the array.
>>> df = spark.createDataFrame([(["c", "b", "a"],), ([],)], ['data'])
>>> df.select(array_position(df.data, "a")).collect()
[Row(array_position(data, a)=3), Row(array_position(data, a)=0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_position(_to_java_column(col), value))
@ignore_unicode_prefix
@since(2.4)
def element_at(col, extraction):
"""
Collection function: Returns element of array at given index in extraction if col is array.
Returns value for the given key in extraction if col is map.
:param col: name of column containing array or map
:param extraction: index to check for in array or key to check for in map
.. note:: The position is not zero based, but 1 based index.
>>> df = spark.createDataFrame([(["a", "b", "c"],), ([],)], ['data'])
>>> df.select(element_at(df.data, 1)).collect()
[Row(element_at(data, 1)=u'a'), Row(element_at(data, 1)=None)]
>>> df = spark.createDataFrame([({"a": 1.0, "b": 2.0},), ({},)], ['data'])
>>> df.select(element_at(df.data, "a")).collect()
[Row(element_at(data, a)=1.0), Row(element_at(data, a)=None)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.element_at(_to_java_column(col), extraction))
@since(2.4)
def array_remove(col, element):
"""
Collection function: Remove all elements that equal to element from the given array.
:param col: name of column containing array
:param element: element to be removed from the array
>>> df = spark.createDataFrame([([1, 2, 3, 1, 1],), ([],)], ['data'])
>>> df.select(array_remove(df.data, 1)).collect()
[Row(array_remove(data, 1)=[2, 3]), Row(array_remove(data, 1)=[])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_remove(_to_java_column(col), element))
@since(2.4)
def array_distinct(col):
"""
Collection function: removes duplicate values from the array.
:param col: name of column or expression
>>> df = spark.createDataFrame([([1, 2, 3, 2],), ([4, 5, 5, 4],)], ['data'])
>>> df.select(array_distinct(df.data)).collect()
[Row(array_distinct(data)=[1, 2, 3]), Row(array_distinct(data)=[4, 5])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_distinct(_to_java_column(col)))
@ignore_unicode_prefix
@since(2.4)
def array_intersect(col1, col2):
"""
Collection function: returns an array of the elements in the intersection of col1 and col2,
without duplicates.
:param col1: name of column containing array
:param col2: name of column containing array
>>> from pyspark.sql import Row
>>> df = spark.createDataFrame([Row(c1=["b", "a", "c"], c2=["c", "d", "a", "f"])])
>>> df.select(array_intersect(df.c1, df.c2)).collect()
[Row(array_intersect(c1, c2)=[u'a', u'c'])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_intersect(_to_java_column(col1), _to_java_column(col2)))
@ignore_unicode_prefix
@since(2.4)
def array_union(col1, col2):
"""
Collection function: returns an array of the elements in the union of col1 and col2,
without duplicates.
:param col1: name of column containing array
:param col2: name of column containing array
>>> from pyspark.sql import Row
>>> df = spark.createDataFrame([Row(c1=["b", "a", "c"], c2=["c", "d", "a", "f"])])
>>> df.select(array_union(df.c1, df.c2)).collect()
[Row(array_union(c1, c2)=[u'b', u'a', u'c', u'd', u'f'])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_union(_to_java_column(col1), _to_java_column(col2)))
@ignore_unicode_prefix
@since(2.4)
def array_except(col1, col2):
"""
Collection function: returns an array of the elements in col1 but not in col2,
without duplicates.
:param col1: name of column containing array
:param col2: name of column containing array
>>> from pyspark.sql import Row
>>> df = spark.createDataFrame([Row(c1=["b", "a", "c"], c2=["c", "d", "a", "f"])])
>>> df.select(array_except(df.c1, df.c2)).collect()
[Row(array_except(c1, c2)=[u'b'])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_except(_to_java_column(col1), _to_java_column(col2)))
@since(1.4)
def explode(col):
"""Returns a new row for each element in the given array or map.
>>> from pyspark.sql import Row
>>> eDF = spark.createDataFrame([Row(a=1, intlist=[1,2,3], mapfield={"a": "b"})])
>>> eDF.select(explode(eDF.intlist).alias("anInt")).collect()
[Row(anInt=1), Row(anInt=2), Row(anInt=3)]
>>> eDF.select(explode(eDF.mapfield).alias("key", "value")).show()
+---+-----+
|key|value|
+---+-----+
| a| b|
+---+-----+
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.explode(_to_java_column(col))
return Column(jc)
@since(2.1)
def posexplode(col):
"""Returns a new row for each element with position in the given array or map.
>>> from pyspark.sql import Row
>>> eDF = spark.createDataFrame([Row(a=1, intlist=[1,2,3], mapfield={"a": "b"})])
>>> eDF.select(posexplode(eDF.intlist)).collect()
[Row(pos=0, col=1), Row(pos=1, col=2), Row(pos=2, col=3)]
>>> eDF.select(posexplode(eDF.mapfield)).show()
+---+---+-----+
|pos|key|value|
+---+---+-----+
| 0| a| b|
+---+---+-----+
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.posexplode(_to_java_column(col))
return Column(jc)
@since(2.3)
def explode_outer(col):
"""Returns a new row for each element in the given array or map.
Unlike explode, if the array/map is null or empty then null is produced.
>>> df = spark.createDataFrame(
... [(1, ["foo", "bar"], {"x": 1.0}), (2, [], {}), (3, None, None)],
... ("id", "an_array", "a_map")
... )
>>> df.select("id", "an_array", explode_outer("a_map")).show()
+---+----------+----+-----+
| id| an_array| key|value|
+---+----------+----+-----+
| 1|[foo, bar]| x| 1.0|
| 2| []|null| null|
| 3| null|null| null|
+---+----------+----+-----+
>>> df.select("id", "a_map", explode_outer("an_array")).show()
+---+----------+----+
| id| a_map| col|
+---+----------+----+
| 1|[x -> 1.0]| foo|
| 1|[x -> 1.0]| bar|
| 2| []|null|
| 3| null|null|
+---+----------+----+
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.explode_outer(_to_java_column(col))
return Column(jc)
@since(2.3)
def posexplode_outer(col):
"""Returns a new row for each element with position in the given array or map.
Unlike posexplode, if the array/map is null or empty then the row (null, null) is produced.
>>> df = spark.createDataFrame(
... [(1, ["foo", "bar"], {"x": 1.0}), (2, [], {}), (3, None, None)],
... ("id", "an_array", "a_map")
... )
>>> df.select("id", "an_array", posexplode_outer("a_map")).show()
+---+----------+----+----+-----+
| id| an_array| pos| key|value|
+---+----------+----+----+-----+
| 1|[foo, bar]| 0| x| 1.0|
| 2| []|null|null| null|
| 3| null|null|null| null|
+---+----------+----+----+-----+
>>> df.select("id", "a_map", posexplode_outer("an_array")).show()
+---+----------+----+----+
| id| a_map| pos| col|
+---+----------+----+----+
| 1|[x -> 1.0]| 0| foo|
| 1|[x -> 1.0]| 1| bar|
| 2| []|null|null|
| 3| null|null|null|
+---+----------+----+----+
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.posexplode_outer(_to_java_column(col))
return Column(jc)
@ignore_unicode_prefix
@since(1.6)
def get_json_object(col, path):
"""
Extracts json object from a json string based on json path specified, and returns json string
of the extracted json object. It will return null if the input json string is invalid.
:param col: string column in json format
:param path: path to the json object to extract
>>> data = [("1", '''{"f1": "value1", "f2": "value2"}'''), ("2", '''{"f1": "value12"}''')]
>>> df = spark.createDataFrame(data, ("key", "jstring"))
>>> df.select(df.key, get_json_object(df.jstring, '$.f1').alias("c0"), \\
... get_json_object(df.jstring, '$.f2').alias("c1") ).collect()
[Row(key=u'1', c0=u'value1', c1=u'value2'), Row(key=u'2', c0=u'value12', c1=None)]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.get_json_object(_to_java_column(col), path)
return Column(jc)
@ignore_unicode_prefix
@since(1.6)
def json_tuple(col, *fields):
"""Creates a new row for a json column according to the given field names.
:param col: string column in json format
:param fields: list of fields to extract
>>> data = [("1", '''{"f1": "value1", "f2": "value2"}'''), ("2", '''{"f1": "value12"}''')]
>>> df = spark.createDataFrame(data, ("key", "jstring"))
>>> df.select(df.key, json_tuple(df.jstring, 'f1', 'f2')).collect()
[Row(key=u'1', c0=u'value1', c1=u'value2'), Row(key=u'2', c0=u'value12', c1=None)]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.json_tuple(_to_java_column(col), _to_seq(sc, fields))
return Column(jc)
@ignore_unicode_prefix
@since(2.1)
def from_json(col, schema, options={}):
"""
Parses a column containing a JSON string into a :class:`MapType` with :class:`StringType`
as keys type, :class:`StructType` or :class:`ArrayType` with
the specified schema. Returns `null`, in the case of an unparseable string.
:param col: string column in json format
:param schema: a StructType or ArrayType of StructType to use when parsing the json column.
:param options: options to control parsing. accepts the same options as the json datasource
.. note:: Since Spark 2.3, the DDL-formatted string or a JSON format string is also
supported for ``schema``.
>>> from pyspark.sql.types import *
>>> data = [(1, '''{"a": 1}''')]
>>> schema = StructType([StructField("a", IntegerType())])
>>> df = spark.createDataFrame(data, ("key", "value"))
>>> df.select(from_json(df.value, schema).alias("json")).collect()
[Row(json=Row(a=1))]
>>> df.select(from_json(df.value, "a INT").alias("json")).collect()
[Row(json=Row(a=1))]
>>> df.select(from_json(df.value, "MAP<STRING,INT>").alias("json")).collect()
[Row(json={u'a': 1})]
>>> data = [(1, '''[{"a": 1}]''')]
>>> schema = ArrayType(StructType([StructField("a", IntegerType())]))
>>> df = spark.createDataFrame(data, ("key", "value"))
>>> df.select(from_json(df.value, schema).alias("json")).collect()
[Row(json=[Row(a=1)])]
>>> schema = schema_of_json(lit('''{"a": 0}'''))
>>> df.select(from_json(df.value, schema).alias("json")).collect()
[Row(json=Row(a=None))]
>>> data = [(1, '''[1, 2, 3]''')]
>>> schema = ArrayType(IntegerType())
>>> df = spark.createDataFrame(data, ("key", "value"))
>>> df.select(from_json(df.value, schema).alias("json")).collect()
[Row(json=[1, 2, 3])]
"""
sc = SparkContext._active_spark_context
if isinstance(schema, DataType):
schema = schema.json()
elif isinstance(schema, Column):
schema = _to_java_column(schema)
jc = sc._jvm.functions.from_json(_to_java_column(col), schema, options)
return Column(jc)
@ignore_unicode_prefix
@since(2.1)
def to_json(col, options={}):
"""
Converts a column containing a :class:`StructType`, :class:`ArrayType` or a :class:`MapType`
into a JSON string. Throws an exception, in the case of an unsupported type.
:param col: name of column containing a struct, an array or a map.
:param options: options to control converting. accepts the same options as the JSON datasource.
Additionally the function supports the `pretty` option which enables
pretty JSON generation.
>>> from pyspark.sql import Row
>>> from pyspark.sql.types import *
>>> data = [(1, Row(name='Alice', age=2))]
>>> df = spark.createDataFrame(data, ("key", "value"))
>>> df.select(to_json(df.value).alias("json")).collect()
[Row(json=u'{"age":2,"name":"Alice"}')]
>>> data = [(1, [Row(name='Alice', age=2), Row(name='Bob', age=3)])]
>>> df = spark.createDataFrame(data, ("key", "value"))
>>> df.select(to_json(df.value).alias("json")).collect()
[Row(json=u'[{"age":2,"name":"Alice"},{"age":3,"name":"Bob"}]')]
>>> data = [(1, {"name": "Alice"})]
>>> df = spark.createDataFrame(data, ("key", "value"))
>>> df.select(to_json(df.value).alias("json")).collect()
[Row(json=u'{"name":"Alice"}')]
>>> data = [(1, [{"name": "Alice"}, {"name": "Bob"}])]
>>> df = spark.createDataFrame(data, ("key", "value"))
>>> df.select(to_json(df.value).alias("json")).collect()
[Row(json=u'[{"name":"Alice"},{"name":"Bob"}]')]
>>> data = [(1, ["Alice", "Bob"])]
>>> df = spark.createDataFrame(data, ("key", "value"))
>>> df.select(to_json(df.value).alias("json")).collect()
[Row(json=u'["Alice","Bob"]')]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.to_json(_to_java_column(col), options)
return Column(jc)
@ignore_unicode_prefix
@since(2.4)
def schema_of_json(json, options={}):
"""
Parses a JSON string and infers its schema in DDL format.
:param json: a JSON string or a string literal containing a JSON string.
:param options: options to control parsing. accepts the same options as the JSON datasource
.. versionchanged:: 3.0
It accepts `options` parameter to control schema inferring.
>>> df = spark.range(1)
>>> df.select(schema_of_json(lit('{"a": 0}')).alias("json")).collect()
[Row(json=u'struct<a:bigint>')]
>>> schema = schema_of_json('{a: 1}', {'allowUnquotedFieldNames':'true'})
>>> df.select(schema.alias("json")).collect()
[Row(json=u'struct<a:bigint>')]
"""
if isinstance(json, basestring):
col = _create_column_from_literal(json)
elif isinstance(json, Column):
col = _to_java_column(json)
else:
raise TypeError("schema argument should be a column or string")
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.schema_of_json(col, options)
return Column(jc)
@ignore_unicode_prefix
@since(3.0)
def schema_of_csv(csv, options={}):
"""
Parses a CSV string and infers its schema in DDL format.
:param col: a CSV string or a string literal containing a CSV string.
:param options: options to control parsing. accepts the same options as the CSV datasource
>>> df = spark.range(1)
>>> df.select(schema_of_csv(lit('1|a'), {'sep':'|'}).alias("csv")).collect()
[Row(csv=u'struct<_c0:int,_c1:string>')]
>>> df.select(schema_of_csv('1|a', {'sep':'|'}).alias("csv")).collect()
[Row(csv=u'struct<_c0:int,_c1:string>')]
"""
if isinstance(csv, basestring):
col = _create_column_from_literal(csv)
elif isinstance(csv, Column):
col = _to_java_column(csv)
else:
raise TypeError("schema argument should be a column or string")
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.schema_of_csv(col, options)
return Column(jc)
@ignore_unicode_prefix
@since(3.0)
def to_csv(col, options={}):
"""
Converts a column containing a :class:`StructType` into a CSV string.
Throws an exception, in the case of an unsupported type.
:param col: name of column containing a struct.
:param options: options to control converting. accepts the same options as the CSV datasource.
>>> from pyspark.sql import Row
>>> data = [(1, Row(name='Alice', age=2))]
>>> df = spark.createDataFrame(data, ("key", "value"))
>>> df.select(to_csv(df.value).alias("csv")).collect()
[Row(csv=u'2,Alice')]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.to_csv(_to_java_column(col), options)
return Column(jc)
@since(1.5)
def size(col):
"""
Collection function: returns the length of the array or map stored in the column.
:param col: name of column or expression
>>> df = spark.createDataFrame([([1, 2, 3],),([1],),([],)], ['data'])
>>> df.select(size(df.data)).collect()
[Row(size(data)=3), Row(size(data)=1), Row(size(data)=0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.size(_to_java_column(col)))
@since(2.4)
def array_min(col):
"""
Collection function: returns the minimum value of the array.
:param col: name of column or expression
>>> df = spark.createDataFrame([([2, 1, 3],), ([None, 10, -1],)], ['data'])
>>> df.select(array_min(df.data).alias('min')).collect()
[Row(min=1), Row(min=-1)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_min(_to_java_column(col)))
@since(2.4)
def array_max(col):
"""
Collection function: returns the maximum value of the array.
:param col: name of column or expression
>>> df = spark.createDataFrame([([2, 1, 3],), ([None, 10, -1],)], ['data'])
>>> df.select(array_max(df.data).alias('max')).collect()
[Row(max=3), Row(max=10)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_max(_to_java_column(col)))
@since(1.5)
def sort_array(col, asc=True):
"""
Collection function: sorts the input array in ascending or descending order according
to the natural ordering of the array elements. Null elements will be placed at the beginning
of the returned array in ascending order or at the end of the returned array in descending
order.
:param col: name of column or expression
>>> df = spark.createDataFrame([([2, 1, None, 3],),([1],),([],)], ['data'])
>>> df.select(sort_array(df.data).alias('r')).collect()
[Row(r=[None, 1, 2, 3]), Row(r=[1]), Row(r=[])]
>>> df.select(sort_array(df.data, asc=False).alias('r')).collect()
[Row(r=[3, 2, 1, None]), Row(r=[1]), Row(r=[])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.sort_array(_to_java_column(col), asc))
@since(2.4)
def array_sort(col):
"""
Collection function: sorts the input array in ascending order. The elements of the input array
must be orderable. Null elements will be placed at the end of the returned array.
:param col: name of column or expression
>>> df = spark.createDataFrame([([2, 1, None, 3],),([1],),([],)], ['data'])
>>> df.select(array_sort(df.data).alias('r')).collect()
[Row(r=[1, 2, 3, None]), Row(r=[1]), Row(r=[])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_sort(_to_java_column(col)))
@since(2.4)
def shuffle(col):
"""
Collection function: Generates a random permutation of the given array.
.. note:: The function is non-deterministic.
:param col: name of column or expression
>>> df = spark.createDataFrame([([1, 20, 3, 5],), ([1, 20, None, 3],)], ['data'])
>>> df.select(shuffle(df.data).alias('s')).collect() # doctest: +SKIP
[Row(s=[3, 1, 5, 20]), Row(s=[20, None, 3, 1])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.shuffle(_to_java_column(col)))
@since(1.5)
@ignore_unicode_prefix
def reverse(col):
"""
Collection function: returns a reversed string or an array with reverse order of elements.
:param col: name of column or expression
>>> df = spark.createDataFrame([('Spark SQL',)], ['data'])
>>> df.select(reverse(df.data).alias('s')).collect()
[Row(s=u'LQS krapS')]
>>> df = spark.createDataFrame([([2, 1, 3],) ,([1],) ,([],)], ['data'])
>>> df.select(reverse(df.data).alias('r')).collect()
[Row(r=[3, 1, 2]), Row(r=[1]), Row(r=[])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.reverse(_to_java_column(col)))
@since(2.4)
def flatten(col):
"""
Collection function: creates a single array from an array of arrays.
If a structure of nested arrays is deeper than two levels,
only one level of nesting is removed.
:param col: name of column or expression
>>> df = spark.createDataFrame([([[1, 2, 3], [4, 5], [6]],), ([None, [4, 5]],)], ['data'])
>>> df.select(flatten(df.data).alias('r')).collect()
[Row(r=[1, 2, 3, 4, 5, 6]), Row(r=None)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.flatten(_to_java_column(col)))
@since(2.3)
def map_keys(col):
"""
Collection function: Returns an unordered array containing the keys of the map.
:param col: name of column or expression
>>> from pyspark.sql.functions import map_keys
>>> df = spark.sql("SELECT map(1, 'a', 2, 'b') as data")
>>> df.select(map_keys("data").alias("keys")).show()
+------+
| keys|
+------+
|[1, 2]|
+------+
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.map_keys(_to_java_column(col)))
@since(2.3)
def map_values(col):
"""
Collection function: Returns an unordered array containing the values of the map.
:param col: name of column or expression
>>> from pyspark.sql.functions import map_values
>>> df = spark.sql("SELECT map(1, 'a', 2, 'b') as data")
>>> df.select(map_values("data").alias("values")).show()
+------+
|values|
+------+
|[a, b]|
+------+
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.map_values(_to_java_column(col)))
@since(3.0)
def map_entries(col):
"""
Collection function: Returns an unordered array of all entries in the given map.
:param col: name of column or expression
>>> from pyspark.sql.functions import map_entries
>>> df = spark.sql("SELECT map(1, 'a', 2, 'b') as data")
>>> df.select(map_entries("data").alias("entries")).show()
+----------------+
| entries|
+----------------+
|[[1, a], [2, b]]|
+----------------+
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.map_entries(_to_java_column(col)))
@since(2.4)
def map_from_entries(col):
"""
Collection function: Returns a map created from the given array of entries.
:param col: name of column or expression
>>> from pyspark.sql.functions import map_from_entries
>>> df = spark.sql("SELECT array(struct(1, 'a'), struct(2, 'b')) as data")
>>> df.select(map_from_entries("data").alias("map")).show()
+----------------+
| map|
+----------------+
|[1 -> a, 2 -> b]|
+----------------+
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.map_from_entries(_to_java_column(col)))
@ignore_unicode_prefix
@since(2.4)
def array_repeat(col, count):
"""
Collection function: creates an array containing a column repeated count times.
>>> df = spark.createDataFrame([('ab',)], ['data'])
>>> df.select(array_repeat(df.data, 3).alias('r')).collect()
[Row(r=[u'ab', u'ab', u'ab'])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_repeat(_to_java_column(col), count))
@since(2.4)
def arrays_zip(*cols):
"""
Collection function: Returns a merged array of structs in which the N-th struct contains all
N-th values of input arrays.
:param cols: columns of arrays to be merged.
>>> from pyspark.sql.functions import arrays_zip
>>> df = spark.createDataFrame([(([1, 2, 3], [2, 3, 4]))], ['vals1', 'vals2'])
>>> df.select(arrays_zip(df.vals1, df.vals2).alias('zipped')).collect()
[Row(zipped=[Row(vals1=1, vals2=2), Row(vals1=2, vals2=3), Row(vals1=3, vals2=4)])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.arrays_zip(_to_seq(sc, cols, _to_java_column)))
@since(2.4)
def map_concat(*cols):
"""Returns the union of all the given maps.
:param cols: list of column names (string) or list of :class:`Column` expressions
>>> from pyspark.sql.functions import map_concat
>>> df = spark.sql("SELECT map(1, 'a', 2, 'b') as map1, map(3, 'c', 1, 'd') as map2")
>>> df.select(map_concat("map1", "map2").alias("map3")).show(truncate=False)
+------------------------+
|map3 |
+------------------------+
|[1 -> d, 2 -> b, 3 -> c]|
+------------------------+
"""
sc = SparkContext._active_spark_context
if len(cols) == 1 and isinstance(cols[0], (list, set)):
cols = cols[0]
jc = sc._jvm.functions.map_concat(_to_seq(sc, cols, _to_java_column))
return Column(jc)
@since(2.4)
def sequence(start, stop, step=None):
"""
Generate a sequence of integers from `start` to `stop`, incrementing by `step`.
If `step` is not set, incrementing by 1 if `start` is less than or equal to `stop`,
otherwise -1.
>>> df1 = spark.createDataFrame([(-2, 2)], ('C1', 'C2'))
>>> df1.select(sequence('C1', 'C2').alias('r')).collect()
[Row(r=[-2, -1, 0, 1, 2])]
>>> df2 = spark.createDataFrame([(4, -4, -2)], ('C1', 'C2', 'C3'))
>>> df2.select(sequence('C1', 'C2', 'C3').alias('r')).collect()
[Row(r=[4, 2, 0, -2, -4])]
"""
sc = SparkContext._active_spark_context
if step is None:
return Column(sc._jvm.functions.sequence(_to_java_column(start), _to_java_column(stop)))
else:
return Column(sc._jvm.functions.sequence(
_to_java_column(start), _to_java_column(stop), _to_java_column(step)))
@ignore_unicode_prefix
@since(3.0)
def from_csv(col, schema, options={}):
"""
Parses a column containing a CSV string to a row with the specified schema.
Returns `null`, in the case of an unparseable string.
:param col: string column in CSV format
:param schema: a string with schema in DDL format to use when parsing the CSV column.
:param options: options to control parsing. accepts the same options as the CSV datasource
>>> data = [("1,2,3",)]
>>> df = spark.createDataFrame(data, ("value",))
>>> df.select(from_csv(df.value, "a INT, b INT, c INT").alias("csv")).collect()
[Row(csv=Row(a=1, b=2, c=3))]
>>> value = data[0][0]
>>> df.select(from_csv(df.value, schema_of_csv(value)).alias("csv")).collect()
[Row(csv=Row(_c0=1, _c1=2, _c2=3))]
"""
sc = SparkContext._active_spark_context
if isinstance(schema, basestring):
schema = _create_column_from_literal(schema)
elif isinstance(schema, Column):
schema = _to_java_column(schema)
else:
raise TypeError("schema argument should be a column or string")
jc = sc._jvm.functions.from_csv(_to_java_column(col), schema, options)
return Column(jc)
# ---------------------------- User Defined Function ----------------------------------
class PandasUDFType(object):
"""Pandas UDF Types. See :meth:`pyspark.sql.functions.pandas_udf`.
"""
SCALAR = PythonEvalType.SQL_SCALAR_PANDAS_UDF
GROUPED_MAP = PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF
GROUPED_AGG = PythonEvalType.SQL_GROUPED_AGG_PANDAS_UDF
@since(1.3)
def udf(f=None, returnType=StringType()):
"""Creates a user defined function (UDF).
.. note:: The user-defined functions are considered deterministic by default. Due to
optimization, duplicate invocations may be eliminated or the function may even be invoked
more times than it is present in the query. If your function is not deterministic, call
`asNondeterministic` on the user defined function. E.g.:
>>> from pyspark.sql.types import IntegerType
>>> import random
>>> random_udf = udf(lambda: int(random.random() * 100), IntegerType()).asNondeterministic()
.. note:: The user-defined functions do not support conditional expressions or short circuiting
in boolean expressions and it ends up with being executed all internally. If the functions
can fail on special rows, the workaround is to incorporate the condition into the functions.
.. note:: The user-defined functions do not take keyword arguments on the calling side.
:param f: python function if used as a standalone function
:param returnType: the return type of the user-defined function. The value can be either a
:class:`pyspark.sql.types.DataType` object or a DDL-formatted type string.
>>> from pyspark.sql.types import IntegerType
>>> slen = udf(lambda s: len(s), IntegerType())
>>> @udf
... def to_upper(s):
... if s is not None:
... return s.upper()
...
>>> @udf(returnType=IntegerType())
... def add_one(x):
... if x is not None:
... return x + 1
...
>>> df = spark.createDataFrame([(1, "John Doe", 21)], ("id", "name", "age"))
>>> df.select(slen("name").alias("slen(name)"), to_upper("name"), add_one("age")).show()
+----------+--------------+------------+
|slen(name)|to_upper(name)|add_one(age)|
+----------+--------------+------------+
| 8| JOHN DOE| 22|
+----------+--------------+------------+
"""
# The following table shows most of Python data and SQL type conversions in normal UDFs that
# are not yet visible to the user. Some of behaviors are buggy and might be changed in the near
# future. The table might have to be eventually documented externally.
# Please see SPARK-25666's PR to see the codes in order to generate the table below.
#
# +-----------------------------+--------------+----------+------+-------+---------------+---------------+--------------------+-----------------------------+----------+----------------------+---------+--------------------+-----------------+------------+--------------+------------------+----------------------+ # noqa
# |SQL Type \ Python Value(Type)|None(NoneType)|True(bool)|1(int)|1(long)| a(str)| a(unicode)| 1970-01-01(date)|1970-01-01 00:00:00(datetime)|1.0(float)|array('i', [1])(array)|[1](list)| (1,)(tuple)| ABC(bytearray)| 1(Decimal)|{'a': 1}(dict)|Row(kwargs=1)(Row)|Row(namedtuple=1)(Row)| # noqa
# +-----------------------------+--------------+----------+------+-------+---------------+---------------+--------------------+-----------------------------+----------+----------------------+---------+--------------------+-----------------+------------+--------------+------------------+----------------------+ # noqa
# | boolean| None| True| None| None| None| None| None| None| None| None| None| None| None| None| None| X| X| # noqa
# | tinyint| None| None| 1| 1| None| None| None| None| None| None| None| None| None| None| None| X| X| # noqa
# | smallint| None| None| 1| 1| None| None| None| None| None| None| None| None| None| None| None| X| X| # noqa
# | int| None| None| 1| 1| None| None| None| None| None| None| None| None| None| None| None| X| X| # noqa
# | bigint| None| None| 1| 1| None| None| None| None| None| None| None| None| None| None| None| X| X| # noqa
# | string| None| u'true'| u'1'| u'1'| u'a'| u'a'|u'java.util.Grego...| u'java.util.Grego...| u'1.0'| u'[I@24a83055'| u'[1]'|u'[Ljava.lang.Obj...| u'[B@49093632'| u'1'| u'{a=1}'| X| X| # noqa
# | date| None| X| X| X| X| X|datetime.date(197...| datetime.date(197...| X| X| X| X| X| X| X| X| X| # noqa
# | timestamp| None| X| X| X| X| X| X| datetime.datetime...| X| X| X| X| X| X| X| X| X| # noqa
# | float| None| None| None| None| None| None| None| None| 1.0| None| None| None| None| None| None| X| X| # noqa
# | double| None| None| None| None| None| None| None| None| 1.0| None| None| None| None| None| None| X| X| # noqa
# | array<int>| None| None| None| None| None| None| None| None| None| [1]| [1]| [1]| [65, 66, 67]| None| None| X| X| # noqa
# | binary| None| None| None| None|bytearray(b'a')|bytearray(b'a')| None| None| None| None| None| None|bytearray(b'ABC')| None| None| X| X| # noqa
# | decimal(10,0)| None| None| None| None| None| None| None| None| None| None| None| None| None|Decimal('1')| None| X| X| # noqa
# | map<string,int>| None| None| None| None| None| None| None| None| None| None| None| None| None| None| {u'a': 1}| X| X| # noqa
# | struct<_1:int>| None| X| X| X| X| X| X| X| X| X|Row(_1=1)| Row(_1=1)| X| X| Row(_1=None)| Row(_1=1)| Row(_1=1)| # noqa
# +-----------------------------+--------------+----------+------+-------+---------------+---------------+--------------------+-----------------------------+----------+----------------------+---------+--------------------+-----------------+------------+--------------+------------------+----------------------+ # noqa
#
# Note: DDL formatted string is used for 'SQL Type' for simplicity. This string can be
# used in `returnType`.
# Note: The values inside of the table are generated by `repr`.
# Note: Python 2 is used to generate this table since it is used to check the backward
# compatibility often in practice.
# Note: 'X' means it throws an exception during the conversion.
# decorator @udf, @udf(), @udf(dataType())
if f is None or isinstance(f, (str, DataType)):
# If DataType has been passed as a positional argument
# for decorator use it as a returnType
return_type = f or returnType
return functools.partial(_create_udf, returnType=return_type,
evalType=PythonEvalType.SQL_BATCHED_UDF)
else:
return _create_udf(f=f, returnType=returnType,
evalType=PythonEvalType.SQL_BATCHED_UDF)
@since(2.3)
def pandas_udf(f=None, returnType=None, functionType=None):
"""
Creates a vectorized user defined function (UDF).
:param f: user-defined function. A python function if used as a standalone function
:param returnType: the return type of the user-defined function. The value can be either a
:class:`pyspark.sql.types.DataType` object or a DDL-formatted type string.
:param functionType: an enum value in :class:`pyspark.sql.functions.PandasUDFType`.
Default: SCALAR.
.. note:: Experimental
The function type of the UDF can be one of the following:
1. SCALAR
A scalar UDF defines a transformation: One or more `pandas.Series` -> A `pandas.Series`.
The length of the returned `pandas.Series` must be of the same as the input `pandas.Series`.
If the return type is :class:`StructType`, the returned value should be a `pandas.DataFrame`.
:class:`MapType`, nested :class:`StructType` are currently not supported as output types.
Scalar UDFs are used with :meth:`pyspark.sql.DataFrame.withColumn` and
:meth:`pyspark.sql.DataFrame.select`.
>>> from pyspark.sql.functions import pandas_udf, PandasUDFType
>>> from pyspark.sql.types import IntegerType, StringType
>>> slen = pandas_udf(lambda s: s.str.len(), IntegerType()) # doctest: +SKIP
>>> @pandas_udf(StringType()) # doctest: +SKIP
... def to_upper(s):
... return s.str.upper()
...
>>> @pandas_udf("integer", PandasUDFType.SCALAR) # doctest: +SKIP
... def add_one(x):
... return x + 1
...
>>> df = spark.createDataFrame([(1, "John Doe", 21)],
... ("id", "name", "age")) # doctest: +SKIP
>>> df.select(slen("name").alias("slen(name)"), to_upper("name"), add_one("age")) \\
... .show() # doctest: +SKIP
+----------+--------------+------------+
|slen(name)|to_upper(name)|add_one(age)|
+----------+--------------+------------+
| 8| JOHN DOE| 22|
+----------+--------------+------------+
>>> @pandas_udf("first string, last string") # doctest: +SKIP
... def split_expand(n):
... return n.str.split(expand=True)
>>> df.select(split_expand("name")).show() # doctest: +SKIP
+------------------+
|split_expand(name)|
+------------------+
| [John, Doe]|
+------------------+
.. note:: The length of `pandas.Series` within a scalar UDF is not that of the whole input
column, but is the length of an internal batch used for each call to the function.
Therefore, this can be used, for example, to ensure the length of each returned
`pandas.Series`, and can not be used as the column length.
2. GROUPED_MAP
A grouped map UDF defines transformation: A `pandas.DataFrame` -> A `pandas.DataFrame`
The returnType should be a :class:`StructType` describing the schema of the returned
`pandas.DataFrame`. The column labels of the returned `pandas.DataFrame` must either match
the field names in the defined returnType schema if specified as strings, or match the
field data types by position if not strings, e.g. integer indices.
The length of the returned `pandas.DataFrame` can be arbitrary.
Grouped map UDFs are used with :meth:`pyspark.sql.GroupedData.apply`.
>>> from pyspark.sql.functions import pandas_udf, PandasUDFType
>>> df = spark.createDataFrame(
... [(1, 1.0), (1, 2.0), (2, 3.0), (2, 5.0), (2, 10.0)],
... ("id", "v")) # doctest: +SKIP
>>> @pandas_udf("id long, v double", PandasUDFType.GROUPED_MAP) # doctest: +SKIP
... def normalize(pdf):
... v = pdf.v
... return pdf.assign(v=(v - v.mean()) / v.std())
>>> df.groupby("id").apply(normalize).show() # doctest: +SKIP
+---+-------------------+
| id| v|
+---+-------------------+
| 1|-0.7071067811865475|
| 1| 0.7071067811865475|
| 2|-0.8320502943378437|
| 2|-0.2773500981126146|
| 2| 1.1094003924504583|
+---+-------------------+
Alternatively, the user can define a function that takes two arguments.
In this case, the grouping key(s) will be passed as the first argument and the data will
be passed as the second argument. The grouping key(s) will be passed as a tuple of numpy
data types, e.g., `numpy.int32` and `numpy.float64`. The data will still be passed in
as a `pandas.DataFrame` containing all columns from the original Spark DataFrame.
This is useful when the user does not want to hardcode grouping key(s) in the function.
>>> import pandas as pd # doctest: +SKIP
>>> from pyspark.sql.functions import pandas_udf, PandasUDFType
>>> df = spark.createDataFrame(
... [(1, 1.0), (1, 2.0), (2, 3.0), (2, 5.0), (2, 10.0)],
... ("id", "v")) # doctest: +SKIP
>>> @pandas_udf("id long, v double", PandasUDFType.GROUPED_MAP) # doctest: +SKIP
... def mean_udf(key, pdf):
... # key is a tuple of one numpy.int64, which is the value
... # of 'id' for the current group
... return pd.DataFrame([key + (pdf.v.mean(),)])
>>> df.groupby('id').apply(mean_udf).show() # doctest: +SKIP
+---+---+
| id| v|
+---+---+
| 1|1.5|
| 2|6.0|
+---+---+
>>> @pandas_udf(
... "id long, `ceil(v / 2)` long, v double",
... PandasUDFType.GROUPED_MAP) # doctest: +SKIP
>>> def sum_udf(key, pdf):
... # key is a tuple of two numpy.int64s, which is the values
... # of 'id' and 'ceil(df.v / 2)' for the current group
... return pd.DataFrame([key + (pdf.v.sum(),)])
>>> df.groupby(df.id, ceil(df.v / 2)).apply(sum_udf).show() # doctest: +SKIP
+---+-----------+----+
| id|ceil(v / 2)| v|
+---+-----------+----+
| 2| 5|10.0|
| 1| 1| 3.0|
| 2| 3| 5.0|
| 2| 2| 3.0|
+---+-----------+----+
.. note:: If returning a new `pandas.DataFrame` constructed with a dictionary, it is
recommended to explicitly index the columns by name to ensure the positions are correct,
or alternatively use an `OrderedDict`.
For example, `pd.DataFrame({'id': ids, 'a': data}, columns=['id', 'a'])` or
`pd.DataFrame(OrderedDict([('id', ids), ('a', data)]))`.
.. seealso:: :meth:`pyspark.sql.GroupedData.apply`
3. GROUPED_AGG
A grouped aggregate UDF defines a transformation: One or more `pandas.Series` -> A scalar
The `returnType` should be a primitive data type, e.g., :class:`DoubleType`.
The returned scalar can be either a python primitive type, e.g., `int` or `float`
or a numpy data type, e.g., `numpy.int64` or `numpy.float64`.
:class:`MapType` and :class:`StructType` are currently not supported as output types.
Group aggregate UDFs are used with :meth:`pyspark.sql.GroupedData.agg` and
:class:`pyspark.sql.Window`
This example shows using grouped aggregated UDFs with groupby:
>>> from pyspark.sql.functions import pandas_udf, PandasUDFType
>>> df = spark.createDataFrame(
... [(1, 1.0), (1, 2.0), (2, 3.0), (2, 5.0), (2, 10.0)],
... ("id", "v"))
>>> @pandas_udf("double", PandasUDFType.GROUPED_AGG) # doctest: +SKIP
... def mean_udf(v):
... return v.mean()
>>> df.groupby("id").agg(mean_udf(df['v'])).show() # doctest: +SKIP
+---+-----------+
| id|mean_udf(v)|
+---+-----------+
| 1| 1.5|
| 2| 6.0|
+---+-----------+
This example shows using grouped aggregated UDFs as window functions.
>>> from pyspark.sql.functions import pandas_udf, PandasUDFType
>>> from pyspark.sql import Window
>>> df = spark.createDataFrame(
... [(1, 1.0), (1, 2.0), (2, 3.0), (2, 5.0), (2, 10.0)],
... ("id", "v"))
>>> @pandas_udf("double", PandasUDFType.GROUPED_AGG) # doctest: +SKIP
... def mean_udf(v):
... return v.mean()
>>> w = (Window.partitionBy('id')
... .orderBy('v')
... .rowsBetween(-1, 0))
>>> df.withColumn('mean_v', mean_udf(df['v']).over(w)).show() # doctest: +SKIP
+---+----+------+
| id| v|mean_v|
+---+----+------+
| 1| 1.0| 1.0|
| 1| 2.0| 1.5|
| 2| 3.0| 3.0|
| 2| 5.0| 4.0|
| 2|10.0| 7.5|
+---+----+------+
.. note:: For performance reasons, the input series to window functions are not copied.
Therefore, mutating the input series is not allowed and will cause incorrect results.
For the same reason, users should also not rely on the index of the input series.
.. seealso:: :meth:`pyspark.sql.GroupedData.agg` and :class:`pyspark.sql.Window`
.. note:: The user-defined functions are considered deterministic by default. Due to
optimization, duplicate invocations may be eliminated or the function may even be invoked
more times than it is present in the query. If your function is not deterministic, call
`asNondeterministic` on the user defined function. E.g.:
>>> @pandas_udf('double', PandasUDFType.SCALAR) # doctest: +SKIP
... def random(v):
... import numpy as np
... import pandas as pd
... return pd.Series(np.random.randn(len(v))
>>> random = random.asNondeterministic() # doctest: +SKIP
.. note:: The user-defined functions do not support conditional expressions or short circuiting
in boolean expressions and it ends up with being executed all internally. If the functions
can fail on special rows, the workaround is to incorporate the condition into the functions.
.. note:: The user-defined functions do not take keyword arguments on the calling side.
.. note:: The data type of returned `pandas.Series` from the user-defined functions should be
matched with defined returnType (see :meth:`types.to_arrow_type` and
:meth:`types.from_arrow_type`). When there is mismatch between them, Spark might do
conversion on returned data. The conversion is not guaranteed to be correct and results
should be checked for accuracy by users.
"""
# The following table shows most of Pandas data and SQL type conversions in Pandas UDFs that
# are not yet visible to the user. Some of behaviors are buggy and might be changed in the near
# future. The table might have to be eventually documented externally.
# Please see SPARK-25798's PR to see the codes in order to generate the table below.
#
# +-----------------------------+----------------------+----------+-------+--------+--------------------+--------------------+--------+---------+---------+---------+------------+------------+------------+-----------------------------------+-----------------------------------------------------+-----------------+--------------------+-----------------------------+-------------+-----------------+------------------+-----------+--------------------------------+ # noqa
# |SQL Type \ Pandas Value(Type)|None(object(NoneType))|True(bool)|1(int8)|1(int16)| 1(int32)| 1(int64)|1(uint8)|1(uint16)|1(uint32)|1(uint64)|1.0(float16)|1.0(float32)|1.0(float64)|1970-01-01 00:00:00(datetime64[ns])|1970-01-01 00:00:00-05:00(datetime64[ns, US/Eastern])|a(object(string))| 1(object(Decimal))|[1 2 3](object(array[int32]))|1.0(float128)|(1+0j)(complex64)|(1+0j)(complex128)|A(category)|1 days 00:00:00(timedelta64[ns])| # noqa
# +-----------------------------+----------------------+----------+-------+--------+--------------------+--------------------+--------+---------+---------+---------+------------+------------+------------+-----------------------------------+-----------------------------------------------------+-----------------+--------------------+-----------------------------+-------------+-----------------+------------------+-----------+--------------------------------+ # noqa
# | boolean| None| True| True| True| True| True| True| True| True| True| False| False| False| False| False| X| X| X| False| False| False| X| False| # noqa
# | tinyint| None| 1| 1| 1| 1| 1| X| X| X| X| 1| 1| 1| X| X| X| X| X| X| X| X| 0| X| # noqa
# | smallint| None| 1| 1| 1| 1| 1| 1| X| X| X| 1| 1| 1| X| X| X| X| X| X| X| X| X| X| # noqa
# | int| None| 1| 1| 1| 1| 1| 1| 1| X| X| 1| 1| 1| X| X| X| X| X| X| X| X| X| X| # noqa
# | bigint| None| 1| 1| 1| 1| 1| 1| 1| 1| X| 1| 1| 1| 0| 18000000000000| X| X| X| X| X| X| X| X| # noqa
# | float| None| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| X| X| X|1.401298464324817...| X| X| X| X| X| X| # noqa
# | double| None| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| X| X| X| X| X| X| X| X| X| X| # noqa
# | date| None| X| X| X|datetime.date(197...| X| X| X| X| X| X| X| X| datetime.date(197...| X| X| X| X| X| X| X| X| X| # noqa
# | timestamp| None| X| X| X| X|datetime.datetime...| X| X| X| X| X| X| X| datetime.datetime...| datetime.datetime...| X| X| X| X| X| X| X| X| # noqa
# | string| None| u''|u'\x01'| u'\x01'| u'\x01'| u'\x01'| u'\x01'| u'\x01'| u'\x01'| u'\x01'| u''| u''| u''| X| X| u'a'| X| X| u''| u''| u''| X| X| # noqa
# | decimal(10,0)| None| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| Decimal('1')| X| X| X| X| X| X| # noqa
# | array<int>| None| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| [1, 2, 3]| X| X| X| X| X| # noqa
# | map<string,int>| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| # noqa
# | struct<_1:int>| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| # noqa
# | binary| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| # noqa
# +-----------------------------+----------------------+----------+-------+--------+--------------------+--------------------+--------+---------+---------+---------+------------+------------+------------+-----------------------------------+-----------------------------------------------------+-----------------+--------------------+-----------------------------+-------------+-----------------+------------------+-----------+--------------------------------+ # noqa
#
# Note: DDL formatted string is used for 'SQL Type' for simplicity. This string can be
# used in `returnType`.
# Note: The values inside of the table are generated by `repr`.
# Note: Python 2 is used to generate this table since it is used to check the backward
# compatibility often in practice.
# Note: Pandas 0.19.2 and PyArrow 0.9.0 are used.
# Note: Timezone is Singapore timezone.
# Note: 'X' means it throws an exception during the conversion.
# Note: 'binary' type is only supported with PyArrow 0.10.0+ (SPARK-23555).
# decorator @pandas_udf(returnType, functionType)
is_decorator = f is None or isinstance(f, (str, DataType))
if is_decorator:
# If DataType has been passed as a positional argument
# for decorator use it as a returnType
return_type = f or returnType
if functionType is not None:
# @pandas_udf(dataType, functionType=functionType)
# @pandas_udf(returnType=dataType, functionType=functionType)
eval_type = functionType
elif returnType is not None and isinstance(returnType, int):
# @pandas_udf(dataType, functionType)
eval_type = returnType
else:
# @pandas_udf(dataType) or @pandas_udf(returnType=dataType)
eval_type = PythonEvalType.SQL_SCALAR_PANDAS_UDF
else:
return_type = returnType
if functionType is not None:
eval_type = functionType
else:
eval_type = PythonEvalType.SQL_SCALAR_PANDAS_UDF
if return_type is None:
raise ValueError("Invalid returnType: returnType can not be None")
if eval_type not in [PythonEvalType.SQL_SCALAR_PANDAS_UDF,
PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF,
PythonEvalType.SQL_GROUPED_AGG_PANDAS_UDF]:
raise ValueError("Invalid functionType: "
"functionType must be one the values from PandasUDFType")
if is_decorator:
return functools.partial(_create_udf, returnType=return_type, evalType=eval_type)
else:
return _create_udf(f=f, returnType=return_type, evalType=eval_type)
blacklist = ['map', 'since', 'ignore_unicode_prefix']
__all__ = [k for k, v in globals().items()
if not k.startswith('_') and k[0].islower() and callable(v) and k not in blacklist]
__all__ += ["PandasUDFType"]
__all__.sort()
def _test():
import doctest
from pyspark.sql import Row, SparkSession
import pyspark.sql.functions
globs = pyspark.sql.functions.__dict__.copy()
spark = SparkSession.builder\
.master("local[4]")\
.appName("sql.functions tests")\
.getOrCreate()
sc = spark.sparkContext
globs['sc'] = sc
globs['spark'] = spark
globs['df'] = spark.createDataFrame([Row(name='Alice', age=2), Row(name='Bob', age=5)])
(failure_count, test_count) = doctest.testmod(
pyspark.sql.functions, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
martin-hunt/foobar | docs/conf.py | 1 | 8160 | # -*- coding: utf-8 -*-
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import inspect
from sphinx import apidoc
__location__ = os.path.join(os.getcwd(), os.path.dirname(
inspect.getfile(inspect.currentframe())))
output_dir = os.path.join(__location__, "../docs/_rst")
module_dir = os.path.join(__location__, "../foobar")
cmd_line_template = "sphinx-apidoc -f -o {outputdir} {moduledir}"
cmd_line = cmd_line_template.format(outputdir=output_dir, moduledir=module_dir)
apidoc.main(cmd_line.split(" "))
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.todo',
'sphinx.ext.autosummary', 'sphinx.ext.viewcode', 'sphinx.ext.coverage',
'sphinx.ext.doctest', 'sphinx.ext.ifconfig', 'sphinx.ext.pngmath']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'foobar'
copyright = u'2014, Martin Hunt'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '' # Is set by calling `setup.py docs`
# The full version, including alpha/beta/rc tags.
release = '' # Is set by calling `setup.py docs`
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
try:
from foobar import __version__ as version
except ImportError:
pass
else:
release = version
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = ""
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'foobar-doc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'user_guide.tex', u'foobar Documentation',
u'Martin Hunt', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = ""
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- External mapping ------------------------------------------------------------
python_version = '.'.join(map(str, sys.version_info[0:2]))
intersphinx_mapping = {
'sphinx': ('http://sphinx.pocoo.org', None),
'python': ('http://docs.python.org/' + python_version, None),
'matplotlib': ('http://matplotlib.sourceforge.net', None),
'numpy': ('http://docs.scipy.org/doc/numpy', None),
'sklearn': ('http://scikit-learn.org/stable', None),
'pandas': ('http://pandas.pydata.org/pandas-docs/stable', None),
'scipy': ('http://docs.scipy.org/doc/scipy/reference/', None),
}
| mit |
pbrod/numpy | numpy/core/fromnumeric.py | 2 | 122368 | """Module containing non-deprecated functions borrowed from Numeric.
"""
import functools
import types
import warnings
import numpy as np
from . import multiarray as mu
from . import overrides
from . import umath as um
from . import numerictypes as nt
from .multiarray import asarray, array, asanyarray, concatenate
from . import _methods
_dt_ = nt.sctype2char
# functions that are methods
__all__ = [
'alen', 'all', 'alltrue', 'amax', 'amin', 'any', 'argmax',
'argmin', 'argpartition', 'argsort', 'around', 'choose', 'clip',
'compress', 'cumprod', 'cumproduct', 'cumsum', 'diagonal', 'mean',
'ndim', 'nonzero', 'partition', 'prod', 'product', 'ptp', 'put',
'ravel', 'repeat', 'reshape', 'resize', 'round_',
'searchsorted', 'shape', 'size', 'sometrue', 'sort', 'squeeze',
'std', 'sum', 'swapaxes', 'take', 'trace', 'transpose', 'var',
]
_gentype = types.GeneratorType
# save away Python sum
_sum_ = sum
array_function_dispatch = functools.partial(
overrides.array_function_dispatch, module='numpy')
# functions that are now methods
def _wrapit(obj, method, *args, **kwds):
try:
wrap = obj.__array_wrap__
except AttributeError:
wrap = None
result = getattr(asarray(obj), method)(*args, **kwds)
if wrap:
if not isinstance(result, mu.ndarray):
result = asarray(result)
result = wrap(result)
return result
def _wrapfunc(obj, method, *args, **kwds):
bound = getattr(obj, method, None)
if bound is None:
return _wrapit(obj, method, *args, **kwds)
try:
return bound(*args, **kwds)
except TypeError:
# A TypeError occurs if the object does have such a method in its
# class, but its signature is not identical to that of NumPy's. This
# situation has occurred in the case of a downstream library like
# 'pandas'.
#
# Call _wrapit from within the except clause to ensure a potential
# exception has a traceback chain.
return _wrapit(obj, method, *args, **kwds)
def _wrapreduction(obj, ufunc, method, axis, dtype, out, **kwargs):
passkwargs = {k: v for k, v in kwargs.items()
if v is not np._NoValue}
if type(obj) is not mu.ndarray:
try:
reduction = getattr(obj, method)
except AttributeError:
pass
else:
# This branch is needed for reductions like any which don't
# support a dtype.
if dtype is not None:
return reduction(axis=axis, dtype=dtype, out=out, **passkwargs)
else:
return reduction(axis=axis, out=out, **passkwargs)
return ufunc.reduce(obj, axis, dtype, out, **passkwargs)
def _take_dispatcher(a, indices, axis=None, out=None, mode=None):
return (a, out)
@array_function_dispatch(_take_dispatcher)
def take(a, indices, axis=None, out=None, mode='raise'):
"""
Take elements from an array along an axis.
When axis is not None, this function does the same thing as "fancy"
indexing (indexing arrays using arrays); however, it can be easier to use
if you need elements along a given axis. A call such as
``np.take(arr, indices, axis=3)`` is equivalent to
``arr[:,:,:,indices,...]``.
Explained without fancy indexing, this is equivalent to the following use
of `ndindex`, which sets each of ``ii``, ``jj``, and ``kk`` to a tuple of
indices::
Ni, Nk = a.shape[:axis], a.shape[axis+1:]
Nj = indices.shape
for ii in ndindex(Ni):
for jj in ndindex(Nj):
for kk in ndindex(Nk):
out[ii + jj + kk] = a[ii + (indices[jj],) + kk]
Parameters
----------
a : array_like (Ni..., M, Nk...)
The source array.
indices : array_like (Nj...)
The indices of the values to extract.
.. versionadded:: 1.8.0
Also allow scalars for indices.
axis : int, optional
The axis over which to select values. By default, the flattened
input array is used.
out : ndarray, optional (Ni..., Nj..., Nk...)
If provided, the result will be placed in this array. It should
be of the appropriate shape and dtype. Note that `out` is always
buffered if `mode='raise'`; use other modes for better performance.
mode : {'raise', 'wrap', 'clip'}, optional
Specifies how out-of-bounds indices will behave.
* 'raise' -- raise an error (default)
* 'wrap' -- wrap around
* 'clip' -- clip to the range
'clip' mode means that all indices that are too large are replaced
by the index that addresses the last element along that axis. Note
that this disables indexing with negative numbers.
Returns
-------
out : ndarray (Ni..., Nj..., Nk...)
The returned array has the same type as `a`.
See Also
--------
compress : Take elements using a boolean mask
ndarray.take : equivalent method
take_along_axis : Take elements by matching the array and the index arrays
Notes
-----
By eliminating the inner loop in the description above, and using `s_` to
build simple slice objects, `take` can be expressed in terms of applying
fancy indexing to each 1-d slice::
Ni, Nk = a.shape[:axis], a.shape[axis+1:]
for ii in ndindex(Ni):
for kk in ndindex(Nj):
out[ii + s_[...,] + kk] = a[ii + s_[:,] + kk][indices]
For this reason, it is equivalent to (but faster than) the following use
of `apply_along_axis`::
out = np.apply_along_axis(lambda a_1d: a_1d[indices], axis, a)
Examples
--------
>>> a = [4, 3, 5, 7, 6, 8]
>>> indices = [0, 1, 4]
>>> np.take(a, indices)
array([4, 3, 6])
In this example if `a` is an ndarray, "fancy" indexing can be used.
>>> a = np.array(a)
>>> a[indices]
array([4, 3, 6])
If `indices` is not one dimensional, the output also has these dimensions.
>>> np.take(a, [[0, 1], [2, 3]])
array([[4, 3],
[5, 7]])
"""
return _wrapfunc(a, 'take', indices, axis=axis, out=out, mode=mode)
def _reshape_dispatcher(a, newshape, order=None):
return (a,)
# not deprecated --- copy if necessary, view otherwise
@array_function_dispatch(_reshape_dispatcher)
def reshape(a, newshape, order='C'):
"""
Gives a new shape to an array without changing its data.
Parameters
----------
a : array_like
Array to be reshaped.
newshape : int or tuple of ints
The new shape should be compatible with the original shape. If
an integer, then the result will be a 1-D array of that length.
One shape dimension can be -1. In this case, the value is
inferred from the length of the array and remaining dimensions.
order : {'C', 'F', 'A'}, optional
Read the elements of `a` using this index order, and place the
elements into the reshaped array using this index order. 'C'
means to read / write the elements using C-like index order,
with the last axis index changing fastest, back to the first
axis index changing slowest. 'F' means to read / write the
elements using Fortran-like index order, with the first index
changing fastest, and the last index changing slowest. Note that
the 'C' and 'F' options take no account of the memory layout of
the underlying array, and only refer to the order of indexing.
'A' means to read / write the elements in Fortran-like index
order if `a` is Fortran *contiguous* in memory, C-like order
otherwise.
Returns
-------
reshaped_array : ndarray
This will be a new view object if possible; otherwise, it will
be a copy. Note there is no guarantee of the *memory layout* (C- or
Fortran- contiguous) of the returned array.
See Also
--------
ndarray.reshape : Equivalent method.
Notes
-----
It is not always possible to change the shape of an array without
copying the data. If you want an error to be raised when the data is copied,
you should assign the new shape to the shape attribute of the array::
>>> a = np.zeros((10, 2))
# A transpose makes the array non-contiguous
>>> b = a.T
# Taking a view makes it possible to modify the shape without modifying
# the initial object.
>>> c = b.view()
>>> c.shape = (20)
Traceback (most recent call last):
...
AttributeError: Incompatible shape for in-place modification. Use
`.reshape()` to make a copy with the desired shape.
The `order` keyword gives the index ordering both for *fetching* the values
from `a`, and then *placing* the values into the output array.
For example, let's say you have an array:
>>> a = np.arange(6).reshape((3, 2))
>>> a
array([[0, 1],
[2, 3],
[4, 5]])
You can think of reshaping as first raveling the array (using the given
index order), then inserting the elements from the raveled array into the
new array using the same kind of index ordering as was used for the
raveling.
>>> np.reshape(a, (2, 3)) # C-like index ordering
array([[0, 1, 2],
[3, 4, 5]])
>>> np.reshape(np.ravel(a), (2, 3)) # equivalent to C ravel then C reshape
array([[0, 1, 2],
[3, 4, 5]])
>>> np.reshape(a, (2, 3), order='F') # Fortran-like index ordering
array([[0, 4, 3],
[2, 1, 5]])
>>> np.reshape(np.ravel(a, order='F'), (2, 3), order='F')
array([[0, 4, 3],
[2, 1, 5]])
Examples
--------
>>> a = np.array([[1,2,3], [4,5,6]])
>>> np.reshape(a, 6)
array([1, 2, 3, 4, 5, 6])
>>> np.reshape(a, 6, order='F')
array([1, 4, 2, 5, 3, 6])
>>> np.reshape(a, (3,-1)) # the unspecified value is inferred to be 2
array([[1, 2],
[3, 4],
[5, 6]])
"""
return _wrapfunc(a, 'reshape', newshape, order=order)
def _choose_dispatcher(a, choices, out=None, mode=None):
yield a
yield from choices
yield out
@array_function_dispatch(_choose_dispatcher)
def choose(a, choices, out=None, mode='raise'):
"""
Construct an array from an index array and a list of arrays to choose from.
First of all, if confused or uncertain, definitely look at the Examples -
in its full generality, this function is less simple than it might
seem from the following code description (below ndi =
`numpy.lib.index_tricks`):
``np.choose(a,c) == np.array([c[a[I]][I] for I in ndi.ndindex(a.shape)])``.
But this omits some subtleties. Here is a fully general summary:
Given an "index" array (`a`) of integers and a sequence of ``n`` arrays
(`choices`), `a` and each choice array are first broadcast, as necessary,
to arrays of a common shape; calling these *Ba* and *Bchoices[i], i =
0,...,n-1* we have that, necessarily, ``Ba.shape == Bchoices[i].shape``
for each ``i``. Then, a new array with shape ``Ba.shape`` is created as
follows:
* if ``mode='raise'`` (the default), then, first of all, each element of
``a`` (and thus ``Ba``) must be in the range ``[0, n-1]``; now, suppose
that ``i`` (in that range) is the value at the ``(j0, j1, ..., jm)``
position in ``Ba`` - then the value at the same position in the new array
is the value in ``Bchoices[i]`` at that same position;
* if ``mode='wrap'``, values in `a` (and thus `Ba`) may be any (signed)
integer; modular arithmetic is used to map integers outside the range
`[0, n-1]` back into that range; and then the new array is constructed
as above;
* if ``mode='clip'``, values in `a` (and thus ``Ba``) may be any (signed)
integer; negative integers are mapped to 0; values greater than ``n-1``
are mapped to ``n-1``; and then the new array is constructed as above.
Parameters
----------
a : int array
This array must contain integers in ``[0, n-1]``, where ``n`` is the
number of choices, unless ``mode=wrap`` or ``mode=clip``, in which
cases any integers are permissible.
choices : sequence of arrays
Choice arrays. `a` and all of the choices must be broadcastable to the
same shape. If `choices` is itself an array (not recommended), then
its outermost dimension (i.e., the one corresponding to
``choices.shape[0]``) is taken as defining the "sequence".
out : array, optional
If provided, the result will be inserted into this array. It should
be of the appropriate shape and dtype. Note that `out` is always
buffered if ``mode='raise'``; use other modes for better performance.
mode : {'raise' (default), 'wrap', 'clip'}, optional
Specifies how indices outside ``[0, n-1]`` will be treated:
* 'raise' : an exception is raised
* 'wrap' : value becomes value mod ``n``
* 'clip' : values < 0 are mapped to 0, values > n-1 are mapped to n-1
Returns
-------
merged_array : array
The merged result.
Raises
------
ValueError: shape mismatch
If `a` and each choice array are not all broadcastable to the same
shape.
See Also
--------
ndarray.choose : equivalent method
numpy.take_along_axis : Preferable if `choices` is an array
Notes
-----
To reduce the chance of misinterpretation, even though the following
"abuse" is nominally supported, `choices` should neither be, nor be
thought of as, a single array, i.e., the outermost sequence-like container
should be either a list or a tuple.
Examples
--------
>>> choices = [[0, 1, 2, 3], [10, 11, 12, 13],
... [20, 21, 22, 23], [30, 31, 32, 33]]
>>> np.choose([2, 3, 1, 0], choices
... # the first element of the result will be the first element of the
... # third (2+1) "array" in choices, namely, 20; the second element
... # will be the second element of the fourth (3+1) choice array, i.e.,
... # 31, etc.
... )
array([20, 31, 12, 3])
>>> np.choose([2, 4, 1, 0], choices, mode='clip') # 4 goes to 3 (4-1)
array([20, 31, 12, 3])
>>> # because there are 4 choice arrays
>>> np.choose([2, 4, 1, 0], choices, mode='wrap') # 4 goes to (4 mod 4)
array([20, 1, 12, 3])
>>> # i.e., 0
A couple examples illustrating how choose broadcasts:
>>> a = [[1, 0, 1], [0, 1, 0], [1, 0, 1]]
>>> choices = [-10, 10]
>>> np.choose(a, choices)
array([[ 10, -10, 10],
[-10, 10, -10],
[ 10, -10, 10]])
>>> # With thanks to Anne Archibald
>>> a = np.array([0, 1]).reshape((2,1,1))
>>> c1 = np.array([1, 2, 3]).reshape((1,3,1))
>>> c2 = np.array([-1, -2, -3, -4, -5]).reshape((1,1,5))
>>> np.choose(a, (c1, c2)) # result is 2x3x5, res[0,:,:]=c1, res[1,:,:]=c2
array([[[ 1, 1, 1, 1, 1],
[ 2, 2, 2, 2, 2],
[ 3, 3, 3, 3, 3]],
[[-1, -2, -3, -4, -5],
[-1, -2, -3, -4, -5],
[-1, -2, -3, -4, -5]]])
"""
return _wrapfunc(a, 'choose', choices, out=out, mode=mode)
def _repeat_dispatcher(a, repeats, axis=None):
return (a,)
@array_function_dispatch(_repeat_dispatcher)
def repeat(a, repeats, axis=None):
"""
Repeat elements of an array.
Parameters
----------
a : array_like
Input array.
repeats : int or array of ints
The number of repetitions for each element. `repeats` is broadcasted
to fit the shape of the given axis.
axis : int, optional
The axis along which to repeat values. By default, use the
flattened input array, and return a flat output array.
Returns
-------
repeated_array : ndarray
Output array which has the same shape as `a`, except along
the given axis.
See Also
--------
tile : Tile an array.
unique : Find the unique elements of an array.
Examples
--------
>>> np.repeat(3, 4)
array([3, 3, 3, 3])
>>> x = np.array([[1,2],[3,4]])
>>> np.repeat(x, 2)
array([1, 1, 2, 2, 3, 3, 4, 4])
>>> np.repeat(x, 3, axis=1)
array([[1, 1, 1, 2, 2, 2],
[3, 3, 3, 4, 4, 4]])
>>> np.repeat(x, [1, 2], axis=0)
array([[1, 2],
[3, 4],
[3, 4]])
"""
return _wrapfunc(a, 'repeat', repeats, axis=axis)
def _put_dispatcher(a, ind, v, mode=None):
return (a, ind, v)
@array_function_dispatch(_put_dispatcher)
def put(a, ind, v, mode='raise'):
"""
Replaces specified elements of an array with given values.
The indexing works on the flattened target array. `put` is roughly
equivalent to:
::
a.flat[ind] = v
Parameters
----------
a : ndarray
Target array.
ind : array_like
Target indices, interpreted as integers.
v : array_like
Values to place in `a` at target indices. If `v` is shorter than
`ind` it will be repeated as necessary.
mode : {'raise', 'wrap', 'clip'}, optional
Specifies how out-of-bounds indices will behave.
* 'raise' -- raise an error (default)
* 'wrap' -- wrap around
* 'clip' -- clip to the range
'clip' mode means that all indices that are too large are replaced
by the index that addresses the last element along that axis. Note
that this disables indexing with negative numbers. In 'raise' mode,
if an exception occurs the target array may still be modified.
See Also
--------
putmask, place
put_along_axis : Put elements by matching the array and the index arrays
Examples
--------
>>> a = np.arange(5)
>>> np.put(a, [0, 2], [-44, -55])
>>> a
array([-44, 1, -55, 3, 4])
>>> a = np.arange(5)
>>> np.put(a, 22, -5, mode='clip')
>>> a
array([ 0, 1, 2, 3, -5])
"""
try:
put = a.put
except AttributeError as e:
raise TypeError("argument 1 must be numpy.ndarray, "
"not {name}".format(name=type(a).__name__)) from e
return put(ind, v, mode=mode)
def _swapaxes_dispatcher(a, axis1, axis2):
return (a,)
@array_function_dispatch(_swapaxes_dispatcher)
def swapaxes(a, axis1, axis2):
"""
Interchange two axes of an array.
Parameters
----------
a : array_like
Input array.
axis1 : int
First axis.
axis2 : int
Second axis.
Returns
-------
a_swapped : ndarray
For NumPy >= 1.10.0, if `a` is an ndarray, then a view of `a` is
returned; otherwise a new array is created. For earlier NumPy
versions a view of `a` is returned only if the order of the
axes is changed, otherwise the input array is returned.
Examples
--------
>>> x = np.array([[1,2,3]])
>>> np.swapaxes(x,0,1)
array([[1],
[2],
[3]])
>>> x = np.array([[[0,1],[2,3]],[[4,5],[6,7]]])
>>> x
array([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
>>> np.swapaxes(x,0,2)
array([[[0, 4],
[2, 6]],
[[1, 5],
[3, 7]]])
"""
return _wrapfunc(a, 'swapaxes', axis1, axis2)
def _transpose_dispatcher(a, axes=None):
return (a,)
@array_function_dispatch(_transpose_dispatcher)
def transpose(a, axes=None):
"""
Reverse or permute the axes of an array; returns the modified array.
For an array a with two axes, transpose(a) gives the matrix transpose.
Refer to `numpy.ndarray.transpose` for full documentation.
Parameters
----------
a : array_like
Input array.
axes : tuple or list of ints, optional
If specified, it must be a tuple or list which contains a permutation of
[0,1,..,N-1] where N is the number of axes of a. The i'th axis of the
returned array will correspond to the axis numbered ``axes[i]`` of the
input. If not specified, defaults to ``range(a.ndim)[::-1]``, which
reverses the order of the axes.
Returns
-------
p : ndarray
`a` with its axes permuted. A view is returned whenever
possible.
See Also
--------
ndarray.transpose : Equivalent method
moveaxis
argsort
Notes
-----
Use `transpose(a, argsort(axes))` to invert the transposition of tensors
when using the `axes` keyword argument.
Transposing a 1-D array returns an unchanged view of the original array.
Examples
--------
>>> x = np.arange(4).reshape((2,2))
>>> x
array([[0, 1],
[2, 3]])
>>> np.transpose(x)
array([[0, 2],
[1, 3]])
>>> x = np.ones((1, 2, 3))
>>> np.transpose(x, (1, 0, 2)).shape
(2, 1, 3)
>>> x = np.ones((2, 3, 4, 5))
>>> np.transpose(x).shape
(5, 4, 3, 2)
"""
return _wrapfunc(a, 'transpose', axes)
def _partition_dispatcher(a, kth, axis=None, kind=None, order=None):
return (a,)
@array_function_dispatch(_partition_dispatcher)
def partition(a, kth, axis=-1, kind='introselect', order=None):
"""
Return a partitioned copy of an array.
Creates a copy of the array with its elements rearranged in such a
way that the value of the element in k-th position is in the
position it would be in a sorted array. All elements smaller than
the k-th element are moved before this element and all equal or
greater are moved behind it. The ordering of the elements in the two
partitions is undefined.
.. versionadded:: 1.8.0
Parameters
----------
a : array_like
Array to be sorted.
kth : int or sequence of ints
Element index to partition by. The k-th value of the element
will be in its final sorted position and all smaller elements
will be moved before it and all equal or greater elements behind
it. The order of all elements in the partitions is undefined. If
provided with a sequence of k-th it will partition all elements
indexed by k-th of them into their sorted position at once.
axis : int or None, optional
Axis along which to sort. If None, the array is flattened before
sorting. The default is -1, which sorts along the last axis.
kind : {'introselect'}, optional
Selection algorithm. Default is 'introselect'.
order : str or list of str, optional
When `a` is an array with fields defined, this argument
specifies which fields to compare first, second, etc. A single
field can be specified as a string. Not all fields need be
specified, but unspecified fields will still be used, in the
order in which they come up in the dtype, to break ties.
Returns
-------
partitioned_array : ndarray
Array of the same type and shape as `a`.
See Also
--------
ndarray.partition : Method to sort an array in-place.
argpartition : Indirect partition.
sort : Full sorting
Notes
-----
The various selection algorithms are characterized by their average
speed, worst case performance, work space size, and whether they are
stable. A stable sort keeps items with the same key in the same
relative order. The available algorithms have the following
properties:
================= ======= ============= ============ =======
kind speed worst case work space stable
================= ======= ============= ============ =======
'introselect' 1 O(n) 0 no
================= ======= ============= ============ =======
All the partition algorithms make temporary copies of the data when
partitioning along any but the last axis. Consequently,
partitioning along the last axis is faster and uses less space than
partitioning along any other axis.
The sort order for complex numbers is lexicographic. If both the
real and imaginary parts are non-nan then the order is determined by
the real parts except when they are equal, in which case the order
is determined by the imaginary parts.
Examples
--------
>>> a = np.array([3, 4, 2, 1])
>>> np.partition(a, 3)
array([2, 1, 3, 4])
>>> np.partition(a, (1, 3))
array([1, 2, 3, 4])
"""
if axis is None:
# flatten returns (1, N) for np.matrix, so always use the last axis
a = asanyarray(a).flatten()
axis = -1
else:
a = asanyarray(a).copy(order="K")
a.partition(kth, axis=axis, kind=kind, order=order)
return a
def _argpartition_dispatcher(a, kth, axis=None, kind=None, order=None):
return (a,)
@array_function_dispatch(_argpartition_dispatcher)
def argpartition(a, kth, axis=-1, kind='introselect', order=None):
"""
Perform an indirect partition along the given axis using the
algorithm specified by the `kind` keyword. It returns an array of
indices of the same shape as `a` that index data along the given
axis in partitioned order.
.. versionadded:: 1.8.0
Parameters
----------
a : array_like
Array to sort.
kth : int or sequence of ints
Element index to partition by. The k-th element will be in its
final sorted position and all smaller elements will be moved
before it and all larger elements behind it. The order all
elements in the partitions is undefined. If provided with a
sequence of k-th it will partition all of them into their sorted
position at once.
axis : int or None, optional
Axis along which to sort. The default is -1 (the last axis). If
None, the flattened array is used.
kind : {'introselect'}, optional
Selection algorithm. Default is 'introselect'
order : str or list of str, optional
When `a` is an array with fields defined, this argument
specifies which fields to compare first, second, etc. A single
field can be specified as a string, and not all fields need be
specified, but unspecified fields will still be used, in the
order in which they come up in the dtype, to break ties.
Returns
-------
index_array : ndarray, int
Array of indices that partition `a` along the specified axis.
If `a` is one-dimensional, ``a[index_array]`` yields a partitioned `a`.
More generally, ``np.take_along_axis(a, index_array, axis=a)`` always
yields the partitioned `a`, irrespective of dimensionality.
See Also
--------
partition : Describes partition algorithms used.
ndarray.partition : Inplace partition.
argsort : Full indirect sort.
take_along_axis : Apply ``index_array`` from argpartition
to an array as if by calling partition.
Notes
-----
See `partition` for notes on the different selection algorithms.
Examples
--------
One dimensional array:
>>> x = np.array([3, 4, 2, 1])
>>> x[np.argpartition(x, 3)]
array([2, 1, 3, 4])
>>> x[np.argpartition(x, (1, 3))]
array([1, 2, 3, 4])
>>> x = [3, 4, 2, 1]
>>> np.array(x)[np.argpartition(x, 3)]
array([2, 1, 3, 4])
Multi-dimensional array:
>>> x = np.array([[3, 4, 2], [1, 3, 1]])
>>> index_array = np.argpartition(x, kth=1, axis=-1)
>>> np.take_along_axis(x, index_array, axis=-1) # same as np.partition(x, kth=1)
array([[2, 3, 4],
[1, 1, 3]])
"""
return _wrapfunc(a, 'argpartition', kth, axis=axis, kind=kind, order=order)
def _sort_dispatcher(a, axis=None, kind=None, order=None):
return (a,)
@array_function_dispatch(_sort_dispatcher)
def sort(a, axis=-1, kind=None, order=None):
"""
Return a sorted copy of an array.
Parameters
----------
a : array_like
Array to be sorted.
axis : int or None, optional
Axis along which to sort. If None, the array is flattened before
sorting. The default is -1, which sorts along the last axis.
kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional
Sorting algorithm. The default is 'quicksort'. Note that both 'stable'
and 'mergesort' use timsort or radix sort under the covers and, in general,
the actual implementation will vary with data type. The 'mergesort' option
is retained for backwards compatibility.
.. versionchanged:: 1.15.0.
The 'stable' option was added.
order : str or list of str, optional
When `a` is an array with fields defined, this argument specifies
which fields to compare first, second, etc. A single field can
be specified as a string, and not all fields need be specified,
but unspecified fields will still be used, in the order in which
they come up in the dtype, to break ties.
Returns
-------
sorted_array : ndarray
Array of the same type and shape as `a`.
See Also
--------
ndarray.sort : Method to sort an array in-place.
argsort : Indirect sort.
lexsort : Indirect stable sort on multiple keys.
searchsorted : Find elements in a sorted array.
partition : Partial sort.
Notes
-----
The various sorting algorithms are characterized by their average speed,
worst case performance, work space size, and whether they are stable. A
stable sort keeps items with the same key in the same relative
order. The four algorithms implemented in NumPy have the following
properties:
=========== ======= ============= ============ ========
kind speed worst case work space stable
=========== ======= ============= ============ ========
'quicksort' 1 O(n^2) 0 no
'heapsort' 3 O(n*log(n)) 0 no
'mergesort' 2 O(n*log(n)) ~n/2 yes
'timsort' 2 O(n*log(n)) ~n/2 yes
=========== ======= ============= ============ ========
.. note:: The datatype determines which of 'mergesort' or 'timsort'
is actually used, even if 'mergesort' is specified. User selection
at a finer scale is not currently available.
All the sort algorithms make temporary copies of the data when
sorting along any but the last axis. Consequently, sorting along
the last axis is faster and uses less space than sorting along
any other axis.
The sort order for complex numbers is lexicographic. If both the real
and imaginary parts are non-nan then the order is determined by the
real parts except when they are equal, in which case the order is
determined by the imaginary parts.
Previous to numpy 1.4.0 sorting real and complex arrays containing nan
values led to undefined behaviour. In numpy versions >= 1.4.0 nan
values are sorted to the end. The extended sort order is:
* Real: [R, nan]
* Complex: [R + Rj, R + nanj, nan + Rj, nan + nanj]
where R is a non-nan real value. Complex values with the same nan
placements are sorted according to the non-nan part if it exists.
Non-nan values are sorted as before.
.. versionadded:: 1.12.0
quicksort has been changed to `introsort <https://en.wikipedia.org/wiki/Introsort>`_.
When sorting does not make enough progress it switches to
`heapsort <https://en.wikipedia.org/wiki/Heapsort>`_.
This implementation makes quicksort O(n*log(n)) in the worst case.
'stable' automatically chooses the best stable sorting algorithm
for the data type being sorted.
It, along with 'mergesort' is currently mapped to
`timsort <https://en.wikipedia.org/wiki/Timsort>`_
or `radix sort <https://en.wikipedia.org/wiki/Radix_sort>`_
depending on the data type.
API forward compatibility currently limits the
ability to select the implementation and it is hardwired for the different
data types.
.. versionadded:: 1.17.0
Timsort is added for better performance on already or nearly
sorted data. On random data timsort is almost identical to
mergesort. It is now used for stable sort while quicksort is still the
default sort if none is chosen. For timsort details, refer to
`CPython listsort.txt <https://github.com/python/cpython/blob/3.7/Objects/listsort.txt>`_.
'mergesort' and 'stable' are mapped to radix sort for integer data types. Radix sort is an
O(n) sort instead of O(n log n).
.. versionchanged:: 1.18.0
NaT now sorts to the end of arrays for consistency with NaN.
Examples
--------
>>> a = np.array([[1,4],[3,1]])
>>> np.sort(a) # sort along the last axis
array([[1, 4],
[1, 3]])
>>> np.sort(a, axis=None) # sort the flattened array
array([1, 1, 3, 4])
>>> np.sort(a, axis=0) # sort along the first axis
array([[1, 1],
[3, 4]])
Use the `order` keyword to specify a field to use when sorting a
structured array:
>>> dtype = [('name', 'S10'), ('height', float), ('age', int)]
>>> values = [('Arthur', 1.8, 41), ('Lancelot', 1.9, 38),
... ('Galahad', 1.7, 38)]
>>> a = np.array(values, dtype=dtype) # create a structured array
>>> np.sort(a, order='height') # doctest: +SKIP
array([('Galahad', 1.7, 38), ('Arthur', 1.8, 41),
('Lancelot', 1.8999999999999999, 38)],
dtype=[('name', '|S10'), ('height', '<f8'), ('age', '<i4')])
Sort by age, then height if ages are equal:
>>> np.sort(a, order=['age', 'height']) # doctest: +SKIP
array([('Galahad', 1.7, 38), ('Lancelot', 1.8999999999999999, 38),
('Arthur', 1.8, 41)],
dtype=[('name', '|S10'), ('height', '<f8'), ('age', '<i4')])
"""
if axis is None:
# flatten returns (1, N) for np.matrix, so always use the last axis
a = asanyarray(a).flatten()
axis = -1
else:
a = asanyarray(a).copy(order="K")
a.sort(axis=axis, kind=kind, order=order)
return a
def _argsort_dispatcher(a, axis=None, kind=None, order=None):
return (a,)
@array_function_dispatch(_argsort_dispatcher)
def argsort(a, axis=-1, kind=None, order=None):
"""
Returns the indices that would sort an array.
Perform an indirect sort along the given axis using the algorithm specified
by the `kind` keyword. It returns an array of indices of the same shape as
`a` that index data along the given axis in sorted order.
Parameters
----------
a : array_like
Array to sort.
axis : int or None, optional
Axis along which to sort. The default is -1 (the last axis). If None,
the flattened array is used.
kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional
Sorting algorithm. The default is 'quicksort'. Note that both 'stable'
and 'mergesort' use timsort under the covers and, in general, the
actual implementation will vary with data type. The 'mergesort' option
is retained for backwards compatibility.
.. versionchanged:: 1.15.0.
The 'stable' option was added.
order : str or list of str, optional
When `a` is an array with fields defined, this argument specifies
which fields to compare first, second, etc. A single field can
be specified as a string, and not all fields need be specified,
but unspecified fields will still be used, in the order in which
they come up in the dtype, to break ties.
Returns
-------
index_array : ndarray, int
Array of indices that sort `a` along the specified `axis`.
If `a` is one-dimensional, ``a[index_array]`` yields a sorted `a`.
More generally, ``np.take_along_axis(a, index_array, axis=axis)``
always yields the sorted `a`, irrespective of dimensionality.
See Also
--------
sort : Describes sorting algorithms used.
lexsort : Indirect stable sort with multiple keys.
ndarray.sort : Inplace sort.
argpartition : Indirect partial sort.
take_along_axis : Apply ``index_array`` from argsort
to an array as if by calling sort.
Notes
-----
See `sort` for notes on the different sorting algorithms.
As of NumPy 1.4.0 `argsort` works with real/complex arrays containing
nan values. The enhanced sort order is documented in `sort`.
Examples
--------
One dimensional array:
>>> x = np.array([3, 1, 2])
>>> np.argsort(x)
array([1, 2, 0])
Two-dimensional array:
>>> x = np.array([[0, 3], [2, 2]])
>>> x
array([[0, 3],
[2, 2]])
>>> ind = np.argsort(x, axis=0) # sorts along first axis (down)
>>> ind
array([[0, 1],
[1, 0]])
>>> np.take_along_axis(x, ind, axis=0) # same as np.sort(x, axis=0)
array([[0, 2],
[2, 3]])
>>> ind = np.argsort(x, axis=1) # sorts along last axis (across)
>>> ind
array([[0, 1],
[0, 1]])
>>> np.take_along_axis(x, ind, axis=1) # same as np.sort(x, axis=1)
array([[0, 3],
[2, 2]])
Indices of the sorted elements of a N-dimensional array:
>>> ind = np.unravel_index(np.argsort(x, axis=None), x.shape)
>>> ind
(array([0, 1, 1, 0]), array([0, 0, 1, 1]))
>>> x[ind] # same as np.sort(x, axis=None)
array([0, 2, 2, 3])
Sorting with keys:
>>> x = np.array([(1, 0), (0, 1)], dtype=[('x', '<i4'), ('y', '<i4')])
>>> x
array([(1, 0), (0, 1)],
dtype=[('x', '<i4'), ('y', '<i4')])
>>> np.argsort(x, order=('x','y'))
array([1, 0])
>>> np.argsort(x, order=('y','x'))
array([0, 1])
"""
return _wrapfunc(a, 'argsort', axis=axis, kind=kind, order=order)
def _argmax_dispatcher(a, axis=None, out=None):
return (a, out)
@array_function_dispatch(_argmax_dispatcher)
def argmax(a, axis=None, out=None):
"""
Returns the indices of the maximum values along an axis.
Parameters
----------
a : array_like
Input array.
axis : int, optional
By default, the index is into the flattened array, otherwise
along the specified axis.
out : array, optional
If provided, the result will be inserted into this array. It should
be of the appropriate shape and dtype.
Returns
-------
index_array : ndarray of ints
Array of indices into the array. It has the same shape as `a.shape`
with the dimension along `axis` removed.
See Also
--------
ndarray.argmax, argmin
amax : The maximum value along a given axis.
unravel_index : Convert a flat index into an index tuple.
take_along_axis : Apply ``np.expand_dims(index_array, axis)``
from argmax to an array as if by calling max.
Notes
-----
In case of multiple occurrences of the maximum values, the indices
corresponding to the first occurrence are returned.
Examples
--------
>>> a = np.arange(6).reshape(2,3) + 10
>>> a
array([[10, 11, 12],
[13, 14, 15]])
>>> np.argmax(a)
5
>>> np.argmax(a, axis=0)
array([1, 1, 1])
>>> np.argmax(a, axis=1)
array([2, 2])
Indexes of the maximal elements of a N-dimensional array:
>>> ind = np.unravel_index(np.argmax(a, axis=None), a.shape)
>>> ind
(1, 2)
>>> a[ind]
15
>>> b = np.arange(6)
>>> b[1] = 5
>>> b
array([0, 5, 2, 3, 4, 5])
>>> np.argmax(b) # Only the first occurrence is returned.
1
>>> x = np.array([[4,2,3], [1,0,3]])
>>> index_array = np.argmax(x, axis=-1)
>>> # Same as np.max(x, axis=-1, keepdims=True)
>>> np.take_along_axis(x, np.expand_dims(index_array, axis=-1), axis=-1)
array([[4],
[3]])
>>> # Same as np.max(x, axis=-1)
>>> np.take_along_axis(x, np.expand_dims(index_array, axis=-1), axis=-1).squeeze(axis=-1)
array([4, 3])
"""
return _wrapfunc(a, 'argmax', axis=axis, out=out)
def _argmin_dispatcher(a, axis=None, out=None):
return (a, out)
@array_function_dispatch(_argmin_dispatcher)
def argmin(a, axis=None, out=None):
"""
Returns the indices of the minimum values along an axis.
Parameters
----------
a : array_like
Input array.
axis : int, optional
By default, the index is into the flattened array, otherwise
along the specified axis.
out : array, optional
If provided, the result will be inserted into this array. It should
be of the appropriate shape and dtype.
Returns
-------
index_array : ndarray of ints
Array of indices into the array. It has the same shape as `a.shape`
with the dimension along `axis` removed.
See Also
--------
ndarray.argmin, argmax
amin : The minimum value along a given axis.
unravel_index : Convert a flat index into an index tuple.
take_along_axis : Apply ``np.expand_dims(index_array, axis)``
from argmin to an array as if by calling min.
Notes
-----
In case of multiple occurrences of the minimum values, the indices
corresponding to the first occurrence are returned.
Examples
--------
>>> a = np.arange(6).reshape(2,3) + 10
>>> a
array([[10, 11, 12],
[13, 14, 15]])
>>> np.argmin(a)
0
>>> np.argmin(a, axis=0)
array([0, 0, 0])
>>> np.argmin(a, axis=1)
array([0, 0])
Indices of the minimum elements of a N-dimensional array:
>>> ind = np.unravel_index(np.argmin(a, axis=None), a.shape)
>>> ind
(0, 0)
>>> a[ind]
10
>>> b = np.arange(6) + 10
>>> b[4] = 10
>>> b
array([10, 11, 12, 13, 10, 15])
>>> np.argmin(b) # Only the first occurrence is returned.
0
>>> x = np.array([[4,2,3], [1,0,3]])
>>> index_array = np.argmin(x, axis=-1)
>>> # Same as np.min(x, axis=-1, keepdims=True)
>>> np.take_along_axis(x, np.expand_dims(index_array, axis=-1), axis=-1)
array([[2],
[0]])
>>> # Same as np.max(x, axis=-1)
>>> np.take_along_axis(x, np.expand_dims(index_array, axis=-1), axis=-1).squeeze(axis=-1)
array([2, 0])
"""
return _wrapfunc(a, 'argmin', axis=axis, out=out)
def _searchsorted_dispatcher(a, v, side=None, sorter=None):
return (a, v, sorter)
@array_function_dispatch(_searchsorted_dispatcher)
def searchsorted(a, v, side='left', sorter=None):
"""
Find indices where elements should be inserted to maintain order.
Find the indices into a sorted array `a` such that, if the
corresponding elements in `v` were inserted before the indices, the
order of `a` would be preserved.
Assuming that `a` is sorted:
====== ============================
`side` returned index `i` satisfies
====== ============================
left ``a[i-1] < v <= a[i]``
right ``a[i-1] <= v < a[i]``
====== ============================
Parameters
----------
a : 1-D array_like
Input array. If `sorter` is None, then it must be sorted in
ascending order, otherwise `sorter` must be an array of indices
that sort it.
v : array_like
Values to insert into `a`.
side : {'left', 'right'}, optional
If 'left', the index of the first suitable location found is given.
If 'right', return the last such index. If there is no suitable
index, return either 0 or N (where N is the length of `a`).
sorter : 1-D array_like, optional
Optional array of integer indices that sort array a into ascending
order. They are typically the result of argsort.
.. versionadded:: 1.7.0
Returns
-------
indices : array of ints
Array of insertion points with the same shape as `v`.
See Also
--------
sort : Return a sorted copy of an array.
histogram : Produce histogram from 1-D data.
Notes
-----
Binary search is used to find the required insertion points.
As of NumPy 1.4.0 `searchsorted` works with real/complex arrays containing
`nan` values. The enhanced sort order is documented in `sort`.
This function uses the same algorithm as the builtin python `bisect.bisect_left`
(``side='left'``) and `bisect.bisect_right` (``side='right'``) functions,
which is also vectorized in the `v` argument.
Examples
--------
>>> np.searchsorted([1,2,3,4,5], 3)
2
>>> np.searchsorted([1,2,3,4,5], 3, side='right')
3
>>> np.searchsorted([1,2,3,4,5], [-10, 10, 2, 3])
array([0, 5, 1, 2])
"""
return _wrapfunc(a, 'searchsorted', v, side=side, sorter=sorter)
def _resize_dispatcher(a, new_shape):
return (a,)
@array_function_dispatch(_resize_dispatcher)
def resize(a, new_shape):
"""
Return a new array with the specified shape.
If the new array is larger than the original array, then the new
array is filled with repeated copies of `a`. Note that this behavior
is different from a.resize(new_shape) which fills with zeros instead
of repeated copies of `a`.
Parameters
----------
a : array_like
Array to be resized.
new_shape : int or tuple of int
Shape of resized array.
Returns
-------
reshaped_array : ndarray
The new array is formed from the data in the old array, repeated
if necessary to fill out the required number of elements. The
data are repeated iterating over the array in C-order.
See Also
--------
np.reshape : Reshape an array without changing the total size.
np.pad : Enlarge and pad an array.
np.repeat : Repeat elements of an array.
ndarray.resize : resize an array in-place.
Notes
-----
When the total size of the array does not change `~numpy.reshape` should
be used. In most other cases either indexing (to reduce the size)
or padding (to increase the size) may be a more appropriate solution.
Warning: This functionality does **not** consider axes separately,
i.e. it does not apply interpolation/extrapolation.
It fills the return array with the required number of elements, iterating
over `a` in C-order, disregarding axes (and cycling back from the start if
the new shape is larger). This functionality is therefore not suitable to
resize images, or data where each axis represents a separate and distinct
entity.
Examples
--------
>>> a=np.array([[0,1],[2,3]])
>>> np.resize(a,(2,3))
array([[0, 1, 2],
[3, 0, 1]])
>>> np.resize(a,(1,4))
array([[0, 1, 2, 3]])
>>> np.resize(a,(2,4))
array([[0, 1, 2, 3],
[0, 1, 2, 3]])
"""
if isinstance(new_shape, (int, nt.integer)):
new_shape = (new_shape,)
a = ravel(a)
new_size = 1
for dim_length in new_shape:
new_size *= dim_length
if dim_length < 0:
raise ValueError('all elements of `new_shape` must be non-negative')
if a.size == 0 or new_size == 0:
# First case must zero fill. The second would have repeats == 0.
return np.zeros_like(a, shape=new_shape)
repeats = -(-new_size // a.size) # ceil division
a = concatenate((a,) * repeats)[:new_size]
return reshape(a, new_shape)
def _squeeze_dispatcher(a, axis=None):
return (a,)
@array_function_dispatch(_squeeze_dispatcher)
def squeeze(a, axis=None):
"""
Remove axes of length one from `a`.
Parameters
----------
a : array_like
Input data.
axis : None or int or tuple of ints, optional
.. versionadded:: 1.7.0
Selects a subset of the entries of length one in the
shape. If an axis is selected with shape entry greater than
one, an error is raised.
Returns
-------
squeezed : ndarray
The input array, but with all or a subset of the
dimensions of length 1 removed. This is always `a` itself
or a view into `a`. Note that if all axes are squeezed,
the result is a 0d array and not a scalar.
Raises
------
ValueError
If `axis` is not None, and an axis being squeezed is not of length 1
See Also
--------
expand_dims : The inverse operation, adding entries of length one
reshape : Insert, remove, and combine dimensions, and resize existing ones
Examples
--------
>>> x = np.array([[[0], [1], [2]]])
>>> x.shape
(1, 3, 1)
>>> np.squeeze(x).shape
(3,)
>>> np.squeeze(x, axis=0).shape
(3, 1)
>>> np.squeeze(x, axis=1).shape
Traceback (most recent call last):
...
ValueError: cannot select an axis to squeeze out which has size not equal to one
>>> np.squeeze(x, axis=2).shape
(1, 3)
>>> x = np.array([[1234]])
>>> x.shape
(1, 1)
>>> np.squeeze(x)
array(1234) # 0d array
>>> np.squeeze(x).shape
()
>>> np.squeeze(x)[()]
1234
"""
try:
squeeze = a.squeeze
except AttributeError:
return _wrapit(a, 'squeeze', axis=axis)
if axis is None:
return squeeze()
else:
return squeeze(axis=axis)
def _diagonal_dispatcher(a, offset=None, axis1=None, axis2=None):
return (a,)
@array_function_dispatch(_diagonal_dispatcher)
def diagonal(a, offset=0, axis1=0, axis2=1):
"""
Return specified diagonals.
If `a` is 2-D, returns the diagonal of `a` with the given offset,
i.e., the collection of elements of the form ``a[i, i+offset]``. If
`a` has more than two dimensions, then the axes specified by `axis1`
and `axis2` are used to determine the 2-D sub-array whose diagonal is
returned. The shape of the resulting array can be determined by
removing `axis1` and `axis2` and appending an index to the right equal
to the size of the resulting diagonals.
In versions of NumPy prior to 1.7, this function always returned a new,
independent array containing a copy of the values in the diagonal.
In NumPy 1.7 and 1.8, it continues to return a copy of the diagonal,
but depending on this fact is deprecated. Writing to the resulting
array continues to work as it used to, but a FutureWarning is issued.
Starting in NumPy 1.9 it returns a read-only view on the original array.
Attempting to write to the resulting array will produce an error.
In some future release, it will return a read/write view and writing to
the returned array will alter your original array. The returned array
will have the same type as the input array.
If you don't write to the array returned by this function, then you can
just ignore all of the above.
If you depend on the current behavior, then we suggest copying the
returned array explicitly, i.e., use ``np.diagonal(a).copy()`` instead
of just ``np.diagonal(a)``. This will work with both past and future
versions of NumPy.
Parameters
----------
a : array_like
Array from which the diagonals are taken.
offset : int, optional
Offset of the diagonal from the main diagonal. Can be positive or
negative. Defaults to main diagonal (0).
axis1 : int, optional
Axis to be used as the first axis of the 2-D sub-arrays from which
the diagonals should be taken. Defaults to first axis (0).
axis2 : int, optional
Axis to be used as the second axis of the 2-D sub-arrays from
which the diagonals should be taken. Defaults to second axis (1).
Returns
-------
array_of_diagonals : ndarray
If `a` is 2-D, then a 1-D array containing the diagonal and of the
same type as `a` is returned unless `a` is a `matrix`, in which case
a 1-D array rather than a (2-D) `matrix` is returned in order to
maintain backward compatibility.
If ``a.ndim > 2``, then the dimensions specified by `axis1` and `axis2`
are removed, and a new axis inserted at the end corresponding to the
diagonal.
Raises
------
ValueError
If the dimension of `a` is less than 2.
See Also
--------
diag : MATLAB work-a-like for 1-D and 2-D arrays.
diagflat : Create diagonal arrays.
trace : Sum along diagonals.
Examples
--------
>>> a = np.arange(4).reshape(2,2)
>>> a
array([[0, 1],
[2, 3]])
>>> a.diagonal()
array([0, 3])
>>> a.diagonal(1)
array([1])
A 3-D example:
>>> a = np.arange(8).reshape(2,2,2); a
array([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
>>> a.diagonal(0, # Main diagonals of two arrays created by skipping
... 0, # across the outer(left)-most axis last and
... 1) # the "middle" (row) axis first.
array([[0, 6],
[1, 7]])
The sub-arrays whose main diagonals we just obtained; note that each
corresponds to fixing the right-most (column) axis, and that the
diagonals are "packed" in rows.
>>> a[:,:,0] # main diagonal is [0 6]
array([[0, 2],
[4, 6]])
>>> a[:,:,1] # main diagonal is [1 7]
array([[1, 3],
[5, 7]])
The anti-diagonal can be obtained by reversing the order of elements
using either `numpy.flipud` or `numpy.fliplr`.
>>> a = np.arange(9).reshape(3, 3)
>>> a
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> np.fliplr(a).diagonal() # Horizontal flip
array([2, 4, 6])
>>> np.flipud(a).diagonal() # Vertical flip
array([6, 4, 2])
Note that the order in which the diagonal is retrieved varies depending
on the flip function.
"""
if isinstance(a, np.matrix):
# Make diagonal of matrix 1-D to preserve backward compatibility.
return asarray(a).diagonal(offset=offset, axis1=axis1, axis2=axis2)
else:
return asanyarray(a).diagonal(offset=offset, axis1=axis1, axis2=axis2)
def _trace_dispatcher(
a, offset=None, axis1=None, axis2=None, dtype=None, out=None):
return (a, out)
@array_function_dispatch(_trace_dispatcher)
def trace(a, offset=0, axis1=0, axis2=1, dtype=None, out=None):
"""
Return the sum along diagonals of the array.
If `a` is 2-D, the sum along its diagonal with the given offset
is returned, i.e., the sum of elements ``a[i,i+offset]`` for all i.
If `a` has more than two dimensions, then the axes specified by axis1 and
axis2 are used to determine the 2-D sub-arrays whose traces are returned.
The shape of the resulting array is the same as that of `a` with `axis1`
and `axis2` removed.
Parameters
----------
a : array_like
Input array, from which the diagonals are taken.
offset : int, optional
Offset of the diagonal from the main diagonal. Can be both positive
and negative. Defaults to 0.
axis1, axis2 : int, optional
Axes to be used as the first and second axis of the 2-D sub-arrays
from which the diagonals should be taken. Defaults are the first two
axes of `a`.
dtype : dtype, optional
Determines the data-type of the returned array and of the accumulator
where the elements are summed. If dtype has the value None and `a` is
of integer type of precision less than the default integer
precision, then the default integer precision is used. Otherwise,
the precision is the same as that of `a`.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and
it must be of the right shape to hold the output.
Returns
-------
sum_along_diagonals : ndarray
If `a` is 2-D, the sum along the diagonal is returned. If `a` has
larger dimensions, then an array of sums along diagonals is returned.
See Also
--------
diag, diagonal, diagflat
Examples
--------
>>> np.trace(np.eye(3))
3.0
>>> a = np.arange(8).reshape((2,2,2))
>>> np.trace(a)
array([6, 8])
>>> a = np.arange(24).reshape((2,2,2,3))
>>> np.trace(a).shape
(2, 3)
"""
if isinstance(a, np.matrix):
# Get trace of matrix via an array to preserve backward compatibility.
return asarray(a).trace(offset=offset, axis1=axis1, axis2=axis2, dtype=dtype, out=out)
else:
return asanyarray(a).trace(offset=offset, axis1=axis1, axis2=axis2, dtype=dtype, out=out)
def _ravel_dispatcher(a, order=None):
return (a,)
@array_function_dispatch(_ravel_dispatcher)
def ravel(a, order='C'):
"""Return a contiguous flattened array.
A 1-D array, containing the elements of the input, is returned. A copy is
made only if needed.
As of NumPy 1.10, the returned array will have the same type as the input
array. (for example, a masked array will be returned for a masked array
input)
Parameters
----------
a : array_like
Input array. The elements in `a` are read in the order specified by
`order`, and packed as a 1-D array.
order : {'C','F', 'A', 'K'}, optional
The elements of `a` are read using this index order. 'C' means
to index the elements in row-major, C-style order,
with the last axis index changing fastest, back to the first
axis index changing slowest. 'F' means to index the elements
in column-major, Fortran-style order, with the
first index changing fastest, and the last index changing
slowest. Note that the 'C' and 'F' options take no account of
the memory layout of the underlying array, and only refer to
the order of axis indexing. 'A' means to read the elements in
Fortran-like index order if `a` is Fortran *contiguous* in
memory, C-like order otherwise. 'K' means to read the
elements in the order they occur in memory, except for
reversing the data when strides are negative. By default, 'C'
index order is used.
Returns
-------
y : array_like
y is an array of the same subtype as `a`, with shape ``(a.size,)``.
Note that matrices are special cased for backward compatibility, if `a`
is a matrix, then y is a 1-D ndarray.
See Also
--------
ndarray.flat : 1-D iterator over an array.
ndarray.flatten : 1-D array copy of the elements of an array
in row-major order.
ndarray.reshape : Change the shape of an array without changing its data.
Notes
-----
In row-major, C-style order, in two dimensions, the row index
varies the slowest, and the column index the quickest. This can
be generalized to multiple dimensions, where row-major order
implies that the index along the first axis varies slowest, and
the index along the last quickest. The opposite holds for
column-major, Fortran-style index ordering.
When a view is desired in as many cases as possible, ``arr.reshape(-1)``
may be preferable.
Examples
--------
It is equivalent to ``reshape(-1, order=order)``.
>>> x = np.array([[1, 2, 3], [4, 5, 6]])
>>> np.ravel(x)
array([1, 2, 3, 4, 5, 6])
>>> x.reshape(-1)
array([1, 2, 3, 4, 5, 6])
>>> np.ravel(x, order='F')
array([1, 4, 2, 5, 3, 6])
When ``order`` is 'A', it will preserve the array's 'C' or 'F' ordering:
>>> np.ravel(x.T)
array([1, 4, 2, 5, 3, 6])
>>> np.ravel(x.T, order='A')
array([1, 2, 3, 4, 5, 6])
When ``order`` is 'K', it will preserve orderings that are neither 'C'
nor 'F', but won't reverse axes:
>>> a = np.arange(3)[::-1]; a
array([2, 1, 0])
>>> a.ravel(order='C')
array([2, 1, 0])
>>> a.ravel(order='K')
array([2, 1, 0])
>>> a = np.arange(12).reshape(2,3,2).swapaxes(1,2); a
array([[[ 0, 2, 4],
[ 1, 3, 5]],
[[ 6, 8, 10],
[ 7, 9, 11]]])
>>> a.ravel(order='C')
array([ 0, 2, 4, 1, 3, 5, 6, 8, 10, 7, 9, 11])
>>> a.ravel(order='K')
array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])
"""
if isinstance(a, np.matrix):
return asarray(a).ravel(order=order)
else:
return asanyarray(a).ravel(order=order)
def _nonzero_dispatcher(a):
return (a,)
@array_function_dispatch(_nonzero_dispatcher)
def nonzero(a):
"""
Return the indices of the elements that are non-zero.
Returns a tuple of arrays, one for each dimension of `a`,
containing the indices of the non-zero elements in that
dimension. The values in `a` are always tested and returned in
row-major, C-style order.
To group the indices by element, rather than dimension, use `argwhere`,
which returns a row for each non-zero element.
.. note::
When called on a zero-d array or scalar, ``nonzero(a)`` is treated
as ``nonzero(atleast_1d(a))``.
.. deprecated:: 1.17.0
Use `atleast_1d` explicitly if this behavior is deliberate.
Parameters
----------
a : array_like
Input array.
Returns
-------
tuple_of_arrays : tuple
Indices of elements that are non-zero.
See Also
--------
flatnonzero :
Return indices that are non-zero in the flattened version of the input
array.
ndarray.nonzero :
Equivalent ndarray method.
count_nonzero :
Counts the number of non-zero elements in the input array.
Notes
-----
While the nonzero values can be obtained with ``a[nonzero(a)]``, it is
recommended to use ``x[x.astype(bool)]`` or ``x[x != 0]`` instead, which
will correctly handle 0-d arrays.
Examples
--------
>>> x = np.array([[3, 0, 0], [0, 4, 0], [5, 6, 0]])
>>> x
array([[3, 0, 0],
[0, 4, 0],
[5, 6, 0]])
>>> np.nonzero(x)
(array([0, 1, 2, 2]), array([0, 1, 0, 1]))
>>> x[np.nonzero(x)]
array([3, 4, 5, 6])
>>> np.transpose(np.nonzero(x))
array([[0, 0],
[1, 1],
[2, 0],
[2, 1]])
A common use for ``nonzero`` is to find the indices of an array, where
a condition is True. Given an array `a`, the condition `a` > 3 is a
boolean array and since False is interpreted as 0, np.nonzero(a > 3)
yields the indices of the `a` where the condition is true.
>>> a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
>>> a > 3
array([[False, False, False],
[ True, True, True],
[ True, True, True]])
>>> np.nonzero(a > 3)
(array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2]))
Using this result to index `a` is equivalent to using the mask directly:
>>> a[np.nonzero(a > 3)]
array([4, 5, 6, 7, 8, 9])
>>> a[a > 3] # prefer this spelling
array([4, 5, 6, 7, 8, 9])
``nonzero`` can also be called as a method of the array.
>>> (a > 3).nonzero()
(array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2]))
"""
return _wrapfunc(a, 'nonzero')
def _shape_dispatcher(a):
return (a,)
@array_function_dispatch(_shape_dispatcher)
def shape(a):
"""
Return the shape of an array.
Parameters
----------
a : array_like
Input array.
Returns
-------
shape : tuple of ints
The elements of the shape tuple give the lengths of the
corresponding array dimensions.
See Also
--------
len
ndarray.shape : Equivalent array method.
Examples
--------
>>> np.shape(np.eye(3))
(3, 3)
>>> np.shape([[1, 2]])
(1, 2)
>>> np.shape([0])
(1,)
>>> np.shape(0)
()
>>> a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
>>> np.shape(a)
(2,)
>>> a.shape
(2,)
"""
try:
result = a.shape
except AttributeError:
result = asarray(a).shape
return result
def _compress_dispatcher(condition, a, axis=None, out=None):
return (condition, a, out)
@array_function_dispatch(_compress_dispatcher)
def compress(condition, a, axis=None, out=None):
"""
Return selected slices of an array along given axis.
When working along a given axis, a slice along that axis is returned in
`output` for each index where `condition` evaluates to True. When
working on a 1-D array, `compress` is equivalent to `extract`.
Parameters
----------
condition : 1-D array of bools
Array that selects which entries to return. If len(condition)
is less than the size of `a` along the given axis, then output is
truncated to the length of the condition array.
a : array_like
Array from which to extract a part.
axis : int, optional
Axis along which to take slices. If None (default), work on the
flattened array.
out : ndarray, optional
Output array. Its type is preserved and it must be of the right
shape to hold the output.
Returns
-------
compressed_array : ndarray
A copy of `a` without the slices along axis for which `condition`
is false.
See Also
--------
take, choose, diag, diagonal, select
ndarray.compress : Equivalent method in ndarray
extract : Equivalent method when working on 1-D arrays
:ref:`ufuncs-output-type`
Examples
--------
>>> a = np.array([[1, 2], [3, 4], [5, 6]])
>>> a
array([[1, 2],
[3, 4],
[5, 6]])
>>> np.compress([0, 1], a, axis=0)
array([[3, 4]])
>>> np.compress([False, True, True], a, axis=0)
array([[3, 4],
[5, 6]])
>>> np.compress([False, True], a, axis=1)
array([[2],
[4],
[6]])
Working on the flattened array does not return slices along an axis but
selects elements.
>>> np.compress([False, True], a)
array([2])
"""
return _wrapfunc(a, 'compress', condition, axis=axis, out=out)
def _clip_dispatcher(a, a_min, a_max, out=None, **kwargs):
return (a, a_min, a_max)
@array_function_dispatch(_clip_dispatcher)
def clip(a, a_min, a_max, out=None, **kwargs):
"""
Clip (limit) the values in an array.
Given an interval, values outside the interval are clipped to
the interval edges. For example, if an interval of ``[0, 1]``
is specified, values smaller than 0 become 0, and values larger
than 1 become 1.
Equivalent to but faster than ``np.minimum(a_max, np.maximum(a, a_min))``.
No check is performed to ensure ``a_min < a_max``.
Parameters
----------
a : array_like
Array containing elements to clip.
a_min, a_max : array_like or None
Minimum and maximum value. If ``None``, clipping is not performed on
the corresponding edge. Only one of `a_min` and `a_max` may be
``None``. Both are broadcast against `a`.
out : ndarray, optional
The results will be placed in this array. It may be the input
array for in-place clipping. `out` must be of the right shape
to hold the output. Its type is preserved.
**kwargs
For other keyword-only arguments, see the
:ref:`ufunc docs <ufuncs.kwargs>`.
.. versionadded:: 1.17.0
Returns
-------
clipped_array : ndarray
An array with the elements of `a`, but where values
< `a_min` are replaced with `a_min`, and those > `a_max`
with `a_max`.
See Also
--------
:ref:`ufuncs-output-type`
Notes
-----
When `a_min` is greater than `a_max`, `clip` returns an
array in which all values are equal to `a_max`,
as shown in the second example.
Examples
--------
>>> a = np.arange(10)
>>> a
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> np.clip(a, 1, 8)
array([1, 1, 2, 3, 4, 5, 6, 7, 8, 8])
>>> np.clip(a, 8, 1)
array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
>>> np.clip(a, 3, 6, out=a)
array([3, 3, 3, 3, 4, 5, 6, 6, 6, 6])
>>> a
array([3, 3, 3, 3, 4, 5, 6, 6, 6, 6])
>>> a = np.arange(10)
>>> a
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> np.clip(a, [3, 4, 1, 1, 1, 4, 4, 4, 4, 4], 8)
array([3, 4, 2, 3, 4, 5, 6, 7, 8, 8])
"""
return _wrapfunc(a, 'clip', a_min, a_max, out=out, **kwargs)
def _sum_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None,
initial=None, where=None):
return (a, out)
@array_function_dispatch(_sum_dispatcher)
def sum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue,
initial=np._NoValue, where=np._NoValue):
"""
Sum of array elements over a given axis.
Parameters
----------
a : array_like
Elements to sum.
axis : None or int or tuple of ints, optional
Axis or axes along which a sum is performed. The default,
axis=None, will sum all of the elements of the input array. If
axis is negative it counts from the last to the first axis.
.. versionadded:: 1.7.0
If axis is a tuple of ints, a sum is performed on all of the axes
specified in the tuple instead of a single axis or all the axes as
before.
dtype : dtype, optional
The type of the returned array and of the accumulator in which the
elements are summed. The dtype of `a` is used by default unless `a`
has an integer dtype of less precision than the default platform
integer. In that case, if `a` is signed then the platform integer
is used while if `a` is unsigned then an unsigned integer of the
same precision as the platform integer is used.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output, but the type of the output
values will be cast if necessary.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `sum` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-class' method does not implement `keepdims` any
exceptions will be raised.
initial : scalar, optional
Starting value for the sum. See `~numpy.ufunc.reduce` for details.
.. versionadded:: 1.15.0
where : array_like of bool, optional
Elements to include in the sum. See `~numpy.ufunc.reduce` for details.
.. versionadded:: 1.17.0
Returns
-------
sum_along_axis : ndarray
An array with the same shape as `a`, with the specified
axis removed. If `a` is a 0-d array, or if `axis` is None, a scalar
is returned. If an output array is specified, a reference to
`out` is returned.
See Also
--------
ndarray.sum : Equivalent method.
add.reduce : Equivalent functionality of `add`.
cumsum : Cumulative sum of array elements.
trapz : Integration of array values using the composite trapezoidal rule.
mean, average
Notes
-----
Arithmetic is modular when using integer types, and no error is
raised on overflow.
The sum of an empty array is the neutral element 0:
>>> np.sum([])
0.0
For floating point numbers the numerical precision of sum (and
``np.add.reduce``) is in general limited by directly adding each number
individually to the result causing rounding errors in every step.
However, often numpy will use a numerically better approach (partial
pairwise summation) leading to improved precision in many use-cases.
This improved precision is always provided when no ``axis`` is given.
When ``axis`` is given, it will depend on which axis is summed.
Technically, to provide the best speed possible, the improved precision
is only used when the summation is along the fast axis in memory.
Note that the exact precision may vary depending on other parameters.
In contrast to NumPy, Python's ``math.fsum`` function uses a slower but
more precise approach to summation.
Especially when summing a large number of lower precision floating point
numbers, such as ``float32``, numerical errors can become significant.
In such cases it can be advisable to use `dtype="float64"` to use a higher
precision for the output.
Examples
--------
>>> np.sum([0.5, 1.5])
2.0
>>> np.sum([0.5, 0.7, 0.2, 1.5], dtype=np.int32)
1
>>> np.sum([[0, 1], [0, 5]])
6
>>> np.sum([[0, 1], [0, 5]], axis=0)
array([0, 6])
>>> np.sum([[0, 1], [0, 5]], axis=1)
array([1, 5])
>>> np.sum([[0, 1], [np.nan, 5]], where=[False, True], axis=1)
array([1., 5.])
If the accumulator is too small, overflow occurs:
>>> np.ones(128, dtype=np.int8).sum(dtype=np.int8)
-128
You can also start the sum with a value other than zero:
>>> np.sum([10], initial=5)
15
"""
if isinstance(a, _gentype):
# 2018-02-25, 1.15.0
warnings.warn(
"Calling np.sum(generator) is deprecated, and in the future will give a different result. "
"Use np.sum(np.fromiter(generator)) or the python sum builtin instead.",
DeprecationWarning, stacklevel=3)
res = _sum_(a)
if out is not None:
out[...] = res
return out
return res
return _wrapreduction(a, np.add, 'sum', axis, dtype, out, keepdims=keepdims,
initial=initial, where=where)
def _any_dispatcher(a, axis=None, out=None, keepdims=None, *,
where=np._NoValue):
return (a, where, out)
@array_function_dispatch(_any_dispatcher)
def any(a, axis=None, out=None, keepdims=np._NoValue, *, where=np._NoValue):
"""
Test whether any array element along a given axis evaluates to True.
Returns single boolean unless `axis` is not ``None``
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : None or int or tuple of ints, optional
Axis or axes along which a logical OR reduction is performed.
The default (``axis=None``) is to perform a logical OR over all
the dimensions of the input array. `axis` may be negative, in
which case it counts from the last to the first axis.
.. versionadded:: 1.7.0
If this is a tuple of ints, a reduction is performed on multiple
axes, instead of a single axis or all the axes as before.
out : ndarray, optional
Alternate output array in which to place the result. It must have
the same shape as the expected output and its type is preserved
(e.g., if it is of type float, then it will remain so, returning
1.0 for True and 0.0 for False, regardless of the type of `a`).
See :ref:`ufuncs-output-type` for more details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `any` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-class' method does not implement `keepdims` any
exceptions will be raised.
where : array_like of bool, optional
Elements to include in checking for any `True` values.
See `~numpy.ufunc.reduce` for details.
.. versionadded:: 1.20.0
Returns
-------
any : bool or ndarray
A new boolean or `ndarray` is returned unless `out` is specified,
in which case a reference to `out` is returned.
See Also
--------
ndarray.any : equivalent method
all : Test whether all elements along a given axis evaluate to True.
Notes
-----
Not a Number (NaN), positive infinity and negative infinity evaluate
to `True` because these are not equal to zero.
Examples
--------
>>> np.any([[True, False], [True, True]])
True
>>> np.any([[True, False], [False, False]], axis=0)
array([ True, False])
>>> np.any([-1, 0, 5])
True
>>> np.any(np.nan)
True
>>> np.any([[True, False], [False, False]], where=[[False], [True]])
False
>>> o=np.array(False)
>>> z=np.any([-1, 4, 5], out=o)
>>> z, o
(array(True), array(True))
>>> # Check now that z is a reference to o
>>> z is o
True
>>> id(z), id(o) # identity of z and o # doctest: +SKIP
(191614240, 191614240)
"""
return _wrapreduction(a, np.logical_or, 'any', axis, None, out,
keepdims=keepdims, where=where)
def _all_dispatcher(a, axis=None, out=None, keepdims=None, *,
where=None):
return (a, where, out)
@array_function_dispatch(_all_dispatcher)
def all(a, axis=None, out=None, keepdims=np._NoValue, *, where=np._NoValue):
"""
Test whether all array elements along a given axis evaluate to True.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : None or int or tuple of ints, optional
Axis or axes along which a logical AND reduction is performed.
The default (``axis=None``) is to perform a logical AND over all
the dimensions of the input array. `axis` may be negative, in
which case it counts from the last to the first axis.
.. versionadded:: 1.7.0
If this is a tuple of ints, a reduction is performed on multiple
axes, instead of a single axis or all the axes as before.
out : ndarray, optional
Alternate output array in which to place the result.
It must have the same shape as the expected output and its
type is preserved (e.g., if ``dtype(out)`` is float, the result
will consist of 0.0's and 1.0's). See :ref:`ufuncs-output-type` for more
details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `all` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-class' method does not implement `keepdims` any
exceptions will be raised.
where : array_like of bool, optional
Elements to include in checking for all `True` values.
See `~numpy.ufunc.reduce` for details.
.. versionadded:: 1.20.0
Returns
-------
all : ndarray, bool
A new boolean or array is returned unless `out` is specified,
in which case a reference to `out` is returned.
See Also
--------
ndarray.all : equivalent method
any : Test whether any element along a given axis evaluates to True.
Notes
-----
Not a Number (NaN), positive infinity and negative infinity
evaluate to `True` because these are not equal to zero.
Examples
--------
>>> np.all([[True,False],[True,True]])
False
>>> np.all([[True,False],[True,True]], axis=0)
array([ True, False])
>>> np.all([-1, 4, 5])
True
>>> np.all([1.0, np.nan])
True
>>> np.all([[True, True], [False, True]], where=[[True], [False]])
True
>>> o=np.array(False)
>>> z=np.all([-1, 4, 5], out=o)
>>> id(z), id(o), z
(28293632, 28293632, array(True)) # may vary
"""
return _wrapreduction(a, np.logical_and, 'all', axis, None, out,
keepdims=keepdims, where=where)
def _cumsum_dispatcher(a, axis=None, dtype=None, out=None):
return (a, out)
@array_function_dispatch(_cumsum_dispatcher)
def cumsum(a, axis=None, dtype=None, out=None):
"""
Return the cumulative sum of the elements along a given axis.
Parameters
----------
a : array_like
Input array.
axis : int, optional
Axis along which the cumulative sum is computed. The default
(None) is to compute the cumsum over the flattened array.
dtype : dtype, optional
Type of the returned array and of the accumulator in which the
elements are summed. If `dtype` is not specified, it defaults
to the dtype of `a`, unless `a` has an integer dtype with a
precision less than that of the default platform integer. In
that case, the default platform integer is used.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output
but the type will be cast if necessary. See :ref:`ufuncs-output-type` for
more details.
Returns
-------
cumsum_along_axis : ndarray.
A new array holding the result is returned unless `out` is
specified, in which case a reference to `out` is returned. The
result has the same size as `a`, and the same shape as `a` if
`axis` is not None or `a` is a 1-d array.
See Also
--------
sum : Sum array elements.
trapz : Integration of array values using the composite trapezoidal rule.
diff : Calculate the n-th discrete difference along given axis.
Notes
-----
Arithmetic is modular when using integer types, and no error is
raised on overflow.
Examples
--------
>>> a = np.array([[1,2,3], [4,5,6]])
>>> a
array([[1, 2, 3],
[4, 5, 6]])
>>> np.cumsum(a)
array([ 1, 3, 6, 10, 15, 21])
>>> np.cumsum(a, dtype=float) # specifies type of output value(s)
array([ 1., 3., 6., 10., 15., 21.])
>>> np.cumsum(a,axis=0) # sum over rows for each of the 3 columns
array([[1, 2, 3],
[5, 7, 9]])
>>> np.cumsum(a,axis=1) # sum over columns for each of the 2 rows
array([[ 1, 3, 6],
[ 4, 9, 15]])
"""
return _wrapfunc(a, 'cumsum', axis=axis, dtype=dtype, out=out)
def _ptp_dispatcher(a, axis=None, out=None, keepdims=None):
return (a, out)
@array_function_dispatch(_ptp_dispatcher)
def ptp(a, axis=None, out=None, keepdims=np._NoValue):
"""
Range of values (maximum - minimum) along an axis.
The name of the function comes from the acronym for 'peak to peak'.
.. warning::
`ptp` preserves the data type of the array. This means the
return value for an input of signed integers with n bits
(e.g. `np.int8`, `np.int16`, etc) is also a signed integer
with n bits. In that case, peak-to-peak values greater than
``2**(n-1)-1`` will be returned as negative values. An example
with a work-around is shown below.
Parameters
----------
a : array_like
Input values.
axis : None or int or tuple of ints, optional
Axis along which to find the peaks. By default, flatten the
array. `axis` may be negative, in
which case it counts from the last to the first axis.
.. versionadded:: 1.15.0
If this is a tuple of ints, a reduction is performed on multiple
axes, instead of a single axis or all the axes as before.
out : array_like
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output,
but the type of the output values will be cast if necessary.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `ptp` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-class' method does not implement `keepdims` any
exceptions will be raised.
Returns
-------
ptp : ndarray
A new array holding the result, unless `out` was
specified, in which case a reference to `out` is returned.
Examples
--------
>>> x = np.array([[4, 9, 2, 10],
... [6, 9, 7, 12]])
>>> np.ptp(x, axis=1)
array([8, 6])
>>> np.ptp(x, axis=0)
array([2, 0, 5, 2])
>>> np.ptp(x)
10
This example shows that a negative value can be returned when
the input is an array of signed integers.
>>> y = np.array([[1, 127],
... [0, 127],
... [-1, 127],
... [-2, 127]], dtype=np.int8)
>>> np.ptp(y, axis=1)
array([ 126, 127, -128, -127], dtype=int8)
A work-around is to use the `view()` method to view the result as
unsigned integers with the same bit width:
>>> np.ptp(y, axis=1).view(np.uint8)
array([126, 127, 128, 129], dtype=uint8)
"""
kwargs = {}
if keepdims is not np._NoValue:
kwargs['keepdims'] = keepdims
if type(a) is not mu.ndarray:
try:
ptp = a.ptp
except AttributeError:
pass
else:
return ptp(axis=axis, out=out, **kwargs)
return _methods._ptp(a, axis=axis, out=out, **kwargs)
def _amax_dispatcher(a, axis=None, out=None, keepdims=None, initial=None,
where=None):
return (a, out)
@array_function_dispatch(_amax_dispatcher)
def amax(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue,
where=np._NoValue):
"""
Return the maximum of an array or maximum along an axis.
Parameters
----------
a : array_like
Input data.
axis : None or int or tuple of ints, optional
Axis or axes along which to operate. By default, flattened input is
used.
.. versionadded:: 1.7.0
If this is a tuple of ints, the maximum is selected over multiple axes,
instead of a single axis or all the axes as before.
out : ndarray, optional
Alternative output array in which to place the result. Must
be of the same shape and buffer length as the expected output.
See :ref:`ufuncs-output-type` for more details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `amax` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-class' method does not implement `keepdims` any
exceptions will be raised.
initial : scalar, optional
The minimum value of an output element. Must be present to allow
computation on empty slice. See `~numpy.ufunc.reduce` for details.
.. versionadded:: 1.15.0
where : array_like of bool, optional
Elements to compare for the maximum. See `~numpy.ufunc.reduce`
for details.
.. versionadded:: 1.17.0
Returns
-------
amax : ndarray or scalar
Maximum of `a`. If `axis` is None, the result is a scalar value.
If `axis` is given, the result is an array of dimension
``a.ndim - 1``.
See Also
--------
amin :
The minimum value of an array along a given axis, propagating any NaNs.
nanmax :
The maximum value of an array along a given axis, ignoring any NaNs.
maximum :
Element-wise maximum of two arrays, propagating any NaNs.
fmax :
Element-wise maximum of two arrays, ignoring any NaNs.
argmax :
Return the indices of the maximum values.
nanmin, minimum, fmin
Notes
-----
NaN values are propagated, that is if at least one item is NaN, the
corresponding max value will be NaN as well. To ignore NaN values
(MATLAB behavior), please use nanmax.
Don't use `amax` for element-wise comparison of 2 arrays; when
``a.shape[0]`` is 2, ``maximum(a[0], a[1])`` is faster than
``amax(a, axis=0)``.
Examples
--------
>>> a = np.arange(4).reshape((2,2))
>>> a
array([[0, 1],
[2, 3]])
>>> np.amax(a) # Maximum of the flattened array
3
>>> np.amax(a, axis=0) # Maxima along the first axis
array([2, 3])
>>> np.amax(a, axis=1) # Maxima along the second axis
array([1, 3])
>>> np.amax(a, where=[False, True], initial=-1, axis=0)
array([-1, 3])
>>> b = np.arange(5, dtype=float)
>>> b[2] = np.NaN
>>> np.amax(b)
nan
>>> np.amax(b, where=~np.isnan(b), initial=-1)
4.0
>>> np.nanmax(b)
4.0
You can use an initial value to compute the maximum of an empty slice, or
to initialize it to a different value:
>>> np.max([[-50], [10]], axis=-1, initial=0)
array([ 0, 10])
Notice that the initial value is used as one of the elements for which the
maximum is determined, unlike for the default argument Python's max
function, which is only used for empty iterables.
>>> np.max([5], initial=6)
6
>>> max([5], default=6)
5
"""
return _wrapreduction(a, np.maximum, 'max', axis, None, out,
keepdims=keepdims, initial=initial, where=where)
def _amin_dispatcher(a, axis=None, out=None, keepdims=None, initial=None,
where=None):
return (a, out)
@array_function_dispatch(_amin_dispatcher)
def amin(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue,
where=np._NoValue):
"""
Return the minimum of an array or minimum along an axis.
Parameters
----------
a : array_like
Input data.
axis : None or int or tuple of ints, optional
Axis or axes along which to operate. By default, flattened input is
used.
.. versionadded:: 1.7.0
If this is a tuple of ints, the minimum is selected over multiple axes,
instead of a single axis or all the axes as before.
out : ndarray, optional
Alternative output array in which to place the result. Must
be of the same shape and buffer length as the expected output.
See :ref:`ufuncs-output-type` for more details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `amin` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-class' method does not implement `keepdims` any
exceptions will be raised.
initial : scalar, optional
The maximum value of an output element. Must be present to allow
computation on empty slice. See `~numpy.ufunc.reduce` for details.
.. versionadded:: 1.15.0
where : array_like of bool, optional
Elements to compare for the minimum. See `~numpy.ufunc.reduce`
for details.
.. versionadded:: 1.17.0
Returns
-------
amin : ndarray or scalar
Minimum of `a`. If `axis` is None, the result is a scalar value.
If `axis` is given, the result is an array of dimension
``a.ndim - 1``.
See Also
--------
amax :
The maximum value of an array along a given axis, propagating any NaNs.
nanmin :
The minimum value of an array along a given axis, ignoring any NaNs.
minimum :
Element-wise minimum of two arrays, propagating any NaNs.
fmin :
Element-wise minimum of two arrays, ignoring any NaNs.
argmin :
Return the indices of the minimum values.
nanmax, maximum, fmax
Notes
-----
NaN values are propagated, that is if at least one item is NaN, the
corresponding min value will be NaN as well. To ignore NaN values
(MATLAB behavior), please use nanmin.
Don't use `amin` for element-wise comparison of 2 arrays; when
``a.shape[0]`` is 2, ``minimum(a[0], a[1])`` is faster than
``amin(a, axis=0)``.
Examples
--------
>>> a = np.arange(4).reshape((2,2))
>>> a
array([[0, 1],
[2, 3]])
>>> np.amin(a) # Minimum of the flattened array
0
>>> np.amin(a, axis=0) # Minima along the first axis
array([0, 1])
>>> np.amin(a, axis=1) # Minima along the second axis
array([0, 2])
>>> np.amin(a, where=[False, True], initial=10, axis=0)
array([10, 1])
>>> b = np.arange(5, dtype=float)
>>> b[2] = np.NaN
>>> np.amin(b)
nan
>>> np.amin(b, where=~np.isnan(b), initial=10)
0.0
>>> np.nanmin(b)
0.0
>>> np.min([[-50], [10]], axis=-1, initial=0)
array([-50, 0])
Notice that the initial value is used as one of the elements for which the
minimum is determined, unlike for the default argument Python's max
function, which is only used for empty iterables.
Notice that this isn't the same as Python's ``default`` argument.
>>> np.min([6], initial=5)
5
>>> min([6], default=5)
6
"""
return _wrapreduction(a, np.minimum, 'min', axis, None, out,
keepdims=keepdims, initial=initial, where=where)
def _alen_dispathcer(a):
return (a,)
@array_function_dispatch(_alen_dispathcer)
def alen(a):
"""
Return the length of the first dimension of the input array.
.. deprecated:: 1.18
`numpy.alen` is deprecated, use `len` instead.
Parameters
----------
a : array_like
Input array.
Returns
-------
alen : int
Length of the first dimension of `a`.
See Also
--------
shape, size
Examples
--------
>>> a = np.zeros((7,4,5))
>>> a.shape[0]
7
>>> np.alen(a)
7
"""
# NumPy 1.18.0, 2019-08-02
warnings.warn(
"`np.alen` is deprecated, use `len` instead",
DeprecationWarning, stacklevel=2)
try:
return len(a)
except TypeError:
return len(array(a, ndmin=1))
def _prod_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None,
initial=None, where=None):
return (a, out)
@array_function_dispatch(_prod_dispatcher)
def prod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue,
initial=np._NoValue, where=np._NoValue):
"""
Return the product of array elements over a given axis.
Parameters
----------
a : array_like
Input data.
axis : None or int or tuple of ints, optional
Axis or axes along which a product is performed. The default,
axis=None, will calculate the product of all the elements in the
input array. If axis is negative it counts from the last to the
first axis.
.. versionadded:: 1.7.0
If axis is a tuple of ints, a product is performed on all of the
axes specified in the tuple instead of a single axis or all the
axes as before.
dtype : dtype, optional
The type of the returned array, as well as of the accumulator in
which the elements are multiplied. The dtype of `a` is used by
default unless `a` has an integer dtype of less precision than the
default platform integer. In that case, if `a` is signed then the
platform integer is used while if `a` is unsigned then an unsigned
integer of the same precision as the platform integer is used.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output, but the type of the output
values will be cast if necessary.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in the
result as dimensions with size one. With this option, the result
will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `prod` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-class' method does not implement `keepdims` any
exceptions will be raised.
initial : scalar, optional
The starting value for this product. See `~numpy.ufunc.reduce` for details.
.. versionadded:: 1.15.0
where : array_like of bool, optional
Elements to include in the product. See `~numpy.ufunc.reduce` for details.
.. versionadded:: 1.17.0
Returns
-------
product_along_axis : ndarray, see `dtype` parameter above.
An array shaped as `a` but with the specified axis removed.
Returns a reference to `out` if specified.
See Also
--------
ndarray.prod : equivalent method
:ref:`ufuncs-output-type`
Notes
-----
Arithmetic is modular when using integer types, and no error is
raised on overflow. That means that, on a 32-bit platform:
>>> x = np.array([536870910, 536870910, 536870910, 536870910])
>>> np.prod(x)
16 # may vary
The product of an empty array is the neutral element 1:
>>> np.prod([])
1.0
Examples
--------
By default, calculate the product of all elements:
>>> np.prod([1.,2.])
2.0
Even when the input array is two-dimensional:
>>> np.prod([[1.,2.],[3.,4.]])
24.0
But we can also specify the axis over which to multiply:
>>> np.prod([[1.,2.],[3.,4.]], axis=1)
array([ 2., 12.])
Or select specific elements to include:
>>> np.prod([1., np.nan, 3.], where=[True, False, True])
3.0
If the type of `x` is unsigned, then the output type is
the unsigned platform integer:
>>> x = np.array([1, 2, 3], dtype=np.uint8)
>>> np.prod(x).dtype == np.uint
True
If `x` is of a signed integer type, then the output type
is the default platform integer:
>>> x = np.array([1, 2, 3], dtype=np.int8)
>>> np.prod(x).dtype == int
True
You can also start the product with a value other than one:
>>> np.prod([1, 2], initial=5)
10
"""
return _wrapreduction(a, np.multiply, 'prod', axis, dtype, out,
keepdims=keepdims, initial=initial, where=where)
def _cumprod_dispatcher(a, axis=None, dtype=None, out=None):
return (a, out)
@array_function_dispatch(_cumprod_dispatcher)
def cumprod(a, axis=None, dtype=None, out=None):
"""
Return the cumulative product of elements along a given axis.
Parameters
----------
a : array_like
Input array.
axis : int, optional
Axis along which the cumulative product is computed. By default
the input is flattened.
dtype : dtype, optional
Type of the returned array, as well as of the accumulator in which
the elements are multiplied. If *dtype* is not specified, it
defaults to the dtype of `a`, unless `a` has an integer dtype with
a precision less than that of the default platform integer. In
that case, the default platform integer is used instead.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output
but the type of the resulting values will be cast if necessary.
Returns
-------
cumprod : ndarray
A new array holding the result is returned unless `out` is
specified, in which case a reference to out is returned.
See Also
--------
:ref:`ufuncs-output-type`
Notes
-----
Arithmetic is modular when using integer types, and no error is
raised on overflow.
Examples
--------
>>> a = np.array([1,2,3])
>>> np.cumprod(a) # intermediate results 1, 1*2
... # total product 1*2*3 = 6
array([1, 2, 6])
>>> a = np.array([[1, 2, 3], [4, 5, 6]])
>>> np.cumprod(a, dtype=float) # specify type of output
array([ 1., 2., 6., 24., 120., 720.])
The cumulative product for each column (i.e., over the rows) of `a`:
>>> np.cumprod(a, axis=0)
array([[ 1, 2, 3],
[ 4, 10, 18]])
The cumulative product for each row (i.e. over the columns) of `a`:
>>> np.cumprod(a,axis=1)
array([[ 1, 2, 6],
[ 4, 20, 120]])
"""
return _wrapfunc(a, 'cumprod', axis=axis, dtype=dtype, out=out)
def _ndim_dispatcher(a):
return (a,)
@array_function_dispatch(_ndim_dispatcher)
def ndim(a):
"""
Return the number of dimensions of an array.
Parameters
----------
a : array_like
Input array. If it is not already an ndarray, a conversion is
attempted.
Returns
-------
number_of_dimensions : int
The number of dimensions in `a`. Scalars are zero-dimensional.
See Also
--------
ndarray.ndim : equivalent method
shape : dimensions of array
ndarray.shape : dimensions of array
Examples
--------
>>> np.ndim([[1,2,3],[4,5,6]])
2
>>> np.ndim(np.array([[1,2,3],[4,5,6]]))
2
>>> np.ndim(1)
0
"""
try:
return a.ndim
except AttributeError:
return asarray(a).ndim
def _size_dispatcher(a, axis=None):
return (a,)
@array_function_dispatch(_size_dispatcher)
def size(a, axis=None):
"""
Return the number of elements along a given axis.
Parameters
----------
a : array_like
Input data.
axis : int, optional
Axis along which the elements are counted. By default, give
the total number of elements.
Returns
-------
element_count : int
Number of elements along the specified axis.
See Also
--------
shape : dimensions of array
ndarray.shape : dimensions of array
ndarray.size : number of elements in array
Examples
--------
>>> a = np.array([[1,2,3],[4,5,6]])
>>> np.size(a)
6
>>> np.size(a,1)
3
>>> np.size(a,0)
2
"""
if axis is None:
try:
return a.size
except AttributeError:
return asarray(a).size
else:
try:
return a.shape[axis]
except AttributeError:
return asarray(a).shape[axis]
def _around_dispatcher(a, decimals=None, out=None):
return (a, out)
@array_function_dispatch(_around_dispatcher)
def around(a, decimals=0, out=None):
"""
Evenly round to the given number of decimals.
Parameters
----------
a : array_like
Input data.
decimals : int, optional
Number of decimal places to round to (default: 0). If
decimals is negative, it specifies the number of positions to
the left of the decimal point.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output, but the type of the output
values will be cast if necessary. See :ref:`ufuncs-output-type` for more
details.
Returns
-------
rounded_array : ndarray
An array of the same type as `a`, containing the rounded values.
Unless `out` was specified, a new array is created. A reference to
the result is returned.
The real and imaginary parts of complex numbers are rounded
separately. The result of rounding a float is a float.
See Also
--------
ndarray.round : equivalent method
ceil, fix, floor, rint, trunc
Notes
-----
For values exactly halfway between rounded decimal values, NumPy
rounds to the nearest even value. Thus 1.5 and 2.5 round to 2.0,
-0.5 and 0.5 round to 0.0, etc.
``np.around`` uses a fast but sometimes inexact algorithm to round
floating-point datatypes. For positive `decimals` it is equivalent to
``np.true_divide(np.rint(a * 10**decimals), 10**decimals)``, which has
error due to the inexact representation of decimal fractions in the IEEE
floating point standard [1]_ and errors introduced when scaling by powers
of ten. For instance, note the extra "1" in the following:
>>> np.round(56294995342131.5, 3)
56294995342131.51
If your goal is to print such values with a fixed number of decimals, it is
preferable to use numpy's float printing routines to limit the number of
printed decimals:
>>> np.format_float_positional(56294995342131.5, precision=3)
'56294995342131.5'
The float printing routines use an accurate but much more computationally
demanding algorithm to compute the number of digits after the decimal
point.
Alternatively, Python's builtin `round` function uses a more accurate
but slower algorithm for 64-bit floating point values:
>>> round(56294995342131.5, 3)
56294995342131.5
>>> np.round(16.055, 2), round(16.055, 2) # equals 16.0549999999999997
(16.06, 16.05)
References
----------
.. [1] "Lecture Notes on the Status of IEEE 754", William Kahan,
https://people.eecs.berkeley.edu/~wkahan/ieee754status/IEEE754.PDF
.. [2] "How Futile are Mindless Assessments of
Roundoff in Floating-Point Computation?", William Kahan,
https://people.eecs.berkeley.edu/~wkahan/Mindless.pdf
Examples
--------
>>> np.around([0.37, 1.64])
array([0., 2.])
>>> np.around([0.37, 1.64], decimals=1)
array([0.4, 1.6])
>>> np.around([.5, 1.5, 2.5, 3.5, 4.5]) # rounds to nearest even value
array([0., 2., 2., 4., 4.])
>>> np.around([1,2,3,11], decimals=1) # ndarray of ints is returned
array([ 1, 2, 3, 11])
>>> np.around([1,2,3,11], decimals=-1)
array([ 0, 0, 0, 10])
"""
return _wrapfunc(a, 'round', decimals=decimals, out=out)
def _mean_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None, *,
where=None):
return (a, where, out)
@array_function_dispatch(_mean_dispatcher)
def mean(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, *,
where=np._NoValue):
"""
Compute the arithmetic mean along the specified axis.
Returns the average of the array elements. The average is taken over
the flattened array by default, otherwise over the specified axis.
`float64` intermediate and return values are used for integer inputs.
Parameters
----------
a : array_like
Array containing numbers whose mean is desired. If `a` is not an
array, a conversion is attempted.
axis : None or int or tuple of ints, optional
Axis or axes along which the means are computed. The default is to
compute the mean of the flattened array.
.. versionadded:: 1.7.0
If this is a tuple of ints, a mean is performed over multiple axes,
instead of a single axis or all the axes as before.
dtype : data-type, optional
Type to use in computing the mean. For integer inputs, the default
is `float64`; for floating point inputs, it is the same as the
input dtype.
out : ndarray, optional
Alternate output array in which to place the result. The default
is ``None``; if provided, it must have the same shape as the
expected output, but the type will be cast if necessary.
See :ref:`ufuncs-output-type` for more details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `mean` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-class' method does not implement `keepdims` any
exceptions will be raised.
where : array_like of bool, optional
Elements to include in the mean. See `~numpy.ufunc.reduce` for details.
.. versionadded:: 1.20.0
Returns
-------
m : ndarray, see dtype parameter above
If `out=None`, returns a new array containing the mean values,
otherwise a reference to the output array is returned.
See Also
--------
average : Weighted average
std, var, nanmean, nanstd, nanvar
Notes
-----
The arithmetic mean is the sum of the elements along the axis divided
by the number of elements.
Note that for floating-point input, the mean is computed using the
same precision the input has. Depending on the input data, this can
cause the results to be inaccurate, especially for `float32` (see
example below). Specifying a higher-precision accumulator using the
`dtype` keyword can alleviate this issue.
By default, `float16` results are computed using `float32` intermediates
for extra precision.
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> np.mean(a)
2.5
>>> np.mean(a, axis=0)
array([2., 3.])
>>> np.mean(a, axis=1)
array([1.5, 3.5])
In single precision, `mean` can be inaccurate:
>>> a = np.zeros((2, 512*512), dtype=np.float32)
>>> a[0, :] = 1.0
>>> a[1, :] = 0.1
>>> np.mean(a)
0.54999924
Computing the mean in float64 is more accurate:
>>> np.mean(a, dtype=np.float64)
0.55000000074505806 # may vary
Specifying a where argument:
>>> a = np.array([[5, 9, 13], [14, 10, 12], [11, 15, 19]])
>>> np.mean(a)
12.0
>>> np.mean(a, where=[[True], [False], [False]])
9.0
"""
kwargs = {}
if keepdims is not np._NoValue:
kwargs['keepdims'] = keepdims
if where is not np._NoValue:
kwargs['where'] = where
if type(a) is not mu.ndarray:
try:
mean = a.mean
except AttributeError:
pass
else:
return mean(axis=axis, dtype=dtype, out=out, **kwargs)
return _methods._mean(a, axis=axis, dtype=dtype,
out=out, **kwargs)
def _std_dispatcher(a, axis=None, dtype=None, out=None, ddof=None,
keepdims=None, *, where=None):
return (a, where, out)
@array_function_dispatch(_std_dispatcher)
def std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue, *,
where=np._NoValue):
"""
Compute the standard deviation along the specified axis.
Returns the standard deviation, a measure of the spread of a distribution,
of the array elements. The standard deviation is computed for the
flattened array by default, otherwise over the specified axis.
Parameters
----------
a : array_like
Calculate the standard deviation of these values.
axis : None or int or tuple of ints, optional
Axis or axes along which the standard deviation is computed. The
default is to compute the standard deviation of the flattened array.
.. versionadded:: 1.7.0
If this is a tuple of ints, a standard deviation is performed over
multiple axes, instead of a single axis or all the axes as before.
dtype : dtype, optional
Type to use in computing the standard deviation. For arrays of
integer type the default is float64, for arrays of float types it is
the same as the array type.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output but the type (of the calculated
values) will be cast if necessary.
ddof : int, optional
Means Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
By default `ddof` is zero.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `std` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-class' method does not implement `keepdims` any
exceptions will be raised.
where : array_like of bool, optional
Elements to include in the standard deviation.
See `~numpy.ufunc.reduce` for details.
.. versionadded:: 1.20.0
Returns
-------
standard_deviation : ndarray, see dtype parameter above.
If `out` is None, return a new array containing the standard deviation,
otherwise return a reference to the output array.
See Also
--------
var, mean, nanmean, nanstd, nanvar
:ref:`ufuncs-output-type`
Notes
-----
The standard deviation is the square root of the average of the squared
deviations from the mean, i.e., ``std = sqrt(mean(x))``, where
``x = abs(a - a.mean())**2``.
The average squared deviation is typically calculated as ``x.sum() / N``,
where ``N = len(x)``. If, however, `ddof` is specified, the divisor
``N - ddof`` is used instead. In standard statistical practice, ``ddof=1``
provides an unbiased estimator of the variance of the infinite population.
``ddof=0`` provides a maximum likelihood estimate of the variance for
normally distributed variables. The standard deviation computed in this
function is the square root of the estimated variance, so even with
``ddof=1``, it will not be an unbiased estimate of the standard deviation
per se.
Note that, for complex numbers, `std` takes the absolute
value before squaring, so that the result is always real and nonnegative.
For floating-point input, the *std* is computed using the same
precision the input has. Depending on the input data, this can cause
the results to be inaccurate, especially for float32 (see example below).
Specifying a higher-accuracy accumulator using the `dtype` keyword can
alleviate this issue.
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> np.std(a)
1.1180339887498949 # may vary
>>> np.std(a, axis=0)
array([1., 1.])
>>> np.std(a, axis=1)
array([0.5, 0.5])
In single precision, std() can be inaccurate:
>>> a = np.zeros((2, 512*512), dtype=np.float32)
>>> a[0, :] = 1.0
>>> a[1, :] = 0.1
>>> np.std(a)
0.45000005
Computing the standard deviation in float64 is more accurate:
>>> np.std(a, dtype=np.float64)
0.44999999925494177 # may vary
Specifying a where argument:
>>> a = np.array([[14, 8, 11, 10], [7, 9, 10, 11], [10, 15, 5, 10]])
>>> np.std(a)
2.614064523559687 # may vary
>>> np.std(a, where=[[True], [True], [False]])
2.0
"""
kwargs = {}
if keepdims is not np._NoValue:
kwargs['keepdims'] = keepdims
if where is not np._NoValue:
kwargs['where'] = where
if type(a) is not mu.ndarray:
try:
std = a.std
except AttributeError:
pass
else:
return std(axis=axis, dtype=dtype, out=out, ddof=ddof, **kwargs)
return _methods._std(a, axis=axis, dtype=dtype, out=out, ddof=ddof,
**kwargs)
def _var_dispatcher(a, axis=None, dtype=None, out=None, ddof=None,
keepdims=None, *, where=None):
return (a, where, out)
@array_function_dispatch(_var_dispatcher)
def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue, *,
where=np._NoValue):
"""
Compute the variance along the specified axis.
Returns the variance of the array elements, a measure of the spread of a
distribution. The variance is computed for the flattened array by
default, otherwise over the specified axis.
Parameters
----------
a : array_like
Array containing numbers whose variance is desired. If `a` is not an
array, a conversion is attempted.
axis : None or int or tuple of ints, optional
Axis or axes along which the variance is computed. The default is to
compute the variance of the flattened array.
.. versionadded:: 1.7.0
If this is a tuple of ints, a variance is performed over multiple axes,
instead of a single axis or all the axes as before.
dtype : data-type, optional
Type to use in computing the variance. For arrays of integer type
the default is `float64`; for arrays of float types it is the same as
the array type.
out : ndarray, optional
Alternate output array in which to place the result. It must have
the same shape as the expected output, but the type is cast if
necessary.
ddof : int, optional
"Delta Degrees of Freedom": the divisor used in the calculation is
``N - ddof``, where ``N`` represents the number of elements. By
default `ddof` is zero.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `var` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-class' method does not implement `keepdims` any
exceptions will be raised.
where : array_like of bool, optional
Elements to include in the variance. See `~numpy.ufunc.reduce` for
details.
.. versionadded:: 1.20.0
Returns
-------
variance : ndarray, see dtype parameter above
If ``out=None``, returns a new array containing the variance;
otherwise, a reference to the output array is returned.
See Also
--------
std, mean, nanmean, nanstd, nanvar
:ref:`ufuncs-output-type`
Notes
-----
The variance is the average of the squared deviations from the mean,
i.e., ``var = mean(x)``, where ``x = abs(a - a.mean())**2``.
The mean is typically calculated as ``x.sum() / N``, where ``N = len(x)``.
If, however, `ddof` is specified, the divisor ``N - ddof`` is used
instead. In standard statistical practice, ``ddof=1`` provides an
unbiased estimator of the variance of a hypothetical infinite population.
``ddof=0`` provides a maximum likelihood estimate of the variance for
normally distributed variables.
Note that for complex numbers, the absolute value is taken before
squaring, so that the result is always real and nonnegative.
For floating-point input, the variance is computed using the same
precision the input has. Depending on the input data, this can cause
the results to be inaccurate, especially for `float32` (see example
below). Specifying a higher-accuracy accumulator using the ``dtype``
keyword can alleviate this issue.
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> np.var(a)
1.25
>>> np.var(a, axis=0)
array([1., 1.])
>>> np.var(a, axis=1)
array([0.25, 0.25])
In single precision, var() can be inaccurate:
>>> a = np.zeros((2, 512*512), dtype=np.float32)
>>> a[0, :] = 1.0
>>> a[1, :] = 0.1
>>> np.var(a)
0.20250003
Computing the variance in float64 is more accurate:
>>> np.var(a, dtype=np.float64)
0.20249999932944759 # may vary
>>> ((1-0.55)**2 + (0.1-0.55)**2)/2
0.2025
Specifying a where argument:
>>> a = np.array([[14, 8, 11, 10], [7, 9, 10, 11], [10, 15, 5, 10]])
>>> np.var(a)
6.833333333333333 # may vary
>>> np.var(a, where=[[True], [True], [False]])
4.0
"""
kwargs = {}
if keepdims is not np._NoValue:
kwargs['keepdims'] = keepdims
if where is not np._NoValue:
kwargs['where'] = where
if type(a) is not mu.ndarray:
try:
var = a.var
except AttributeError:
pass
else:
return var(axis=axis, dtype=dtype, out=out, ddof=ddof, **kwargs)
return _methods._var(a, axis=axis, dtype=dtype, out=out, ddof=ddof,
**kwargs)
# Aliases of other functions. These have their own definitions only so that
# they can have unique docstrings.
@array_function_dispatch(_around_dispatcher)
def round_(a, decimals=0, out=None):
"""
Round an array to the given number of decimals.
See Also
--------
around : equivalent function; see for details.
"""
return around(a, decimals=decimals, out=out)
@array_function_dispatch(_prod_dispatcher, verify=False)
def product(*args, **kwargs):
"""
Return the product of array elements over a given axis.
See Also
--------
prod : equivalent function; see for details.
"""
return prod(*args, **kwargs)
@array_function_dispatch(_cumprod_dispatcher, verify=False)
def cumproduct(*args, **kwargs):
"""
Return the cumulative product over the given axis.
See Also
--------
cumprod : equivalent function; see for details.
"""
return cumprod(*args, **kwargs)
@array_function_dispatch(_any_dispatcher, verify=False)
def sometrue(*args, **kwargs):
"""
Check whether some values are true.
Refer to `any` for full documentation.
See Also
--------
any : equivalent function; see for details.
"""
return any(*args, **kwargs)
@array_function_dispatch(_all_dispatcher, verify=False)
def alltrue(*args, **kwargs):
"""
Check if all elements of input array are true.
See Also
--------
numpy.all : Equivalent function; see for details.
"""
return all(*args, **kwargs)
| bsd-3-clause |
BinRoot/TensorFlow-Book | ch04_classification/logistic_2d.py | 1 | 1737 | import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
learning_rate = 0.1
training_epochs = 2000
def sigmoid(x):
return 1. / (1. + np.exp(-x))
x1_label1 = np.random.normal(3, 1, 1000)
x2_label1 = np.random.normal(2, 1, 1000)
x1_label2 = np.random.normal(7, 1, 1000)
x2_label2 = np.random.normal(6, 1, 1000)
x1s = np.append(x1_label1, x1_label2)
x2s = np.append(x2_label1, x2_label2)
ys = np.asarray([0.] * len(x1_label1) + [1.] * len(x1_label2))
X1 = tf.placeholder(tf.float32, shape=(None,), name="x1")
X2 = tf.placeholder(tf.float32, shape=(None,), name="x2")
Y = tf.placeholder(tf.float32, shape=(None,), name="y")
w = tf.Variable([0., 0., 0.], name="w", trainable=True)
y_model = tf.sigmoid(-(w[2] * X2 + w[1] * X1 + w[0]))
cost = tf.reduce_mean(-tf.log(y_model) * Y -tf.log(1 - y_model) * (1 - Y))
train_op = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
prev_err = 0
for epoch in range(training_epochs):
err, _ = sess.run([cost, train_op], {X1: x1s, X2: x2s, Y: ys})
print(epoch, err)
if abs(prev_err - err) < 0.0001:
break
prev_err = err
w_val = sess.run(w)
x1_boundary, x2_boundary = [], []
for x1_test in np.linspace(0, 10, 100):
for x2_test in np.linspace(0, 10, 100):
z = sigmoid(-x2_test*w_val[2] - x1_test*w_val[1] - w_val[0])
if abs(z - 0.5) < 0.01:
x1_boundary.append(x1_test)
x2_boundary.append(x2_test)
plt.scatter(x1_boundary, x2_boundary, c='b', marker='o', s=20)
plt.scatter(x1_label1, x2_label1, c='r', marker='x', s=20)
plt.scatter(x1_label2, x2_label2, c='g', marker='1', s=20)
plt.show()
| mit |
saiwing-yeung/scikit-learn | examples/decomposition/plot_pca_vs_fa_model_selection.py | 70 | 4523 | """
===============================================================
Model selection with Probabilistic PCA and Factor Analysis (FA)
===============================================================
Probabilistic PCA and Factor Analysis are probabilistic models.
The consequence is that the likelihood of new data can be used
for model selection and covariance estimation.
Here we compare PCA and FA with cross-validation on low rank data corrupted
with homoscedastic noise (noise variance
is the same for each feature) or heteroscedastic noise (noise variance
is the different for each feature). In a second step we compare the model
likelihood to the likelihoods obtained from shrinkage covariance estimators.
One can observe that with homoscedastic noise both FA and PCA succeed
in recovering the size of the low rank subspace. The likelihood with PCA
is higher than FA in this case. However PCA fails and overestimates
the rank when heteroscedastic noise is present. Under appropriate
circumstances the low rank models are more likely than shrinkage models.
The automatic estimation from
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604
by Thomas P. Minka is also compared.
"""
# Authors: Alexandre Gramfort
# Denis A. Engemann
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg
from sklearn.decomposition import PCA, FactorAnalysis
from sklearn.covariance import ShrunkCovariance, LedoitWolf
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
print(__doc__)
###############################################################################
# Create the data
n_samples, n_features, rank = 1000, 50, 10
sigma = 1.
rng = np.random.RandomState(42)
U, _, _ = linalg.svd(rng.randn(n_features, n_features))
X = np.dot(rng.randn(n_samples, rank), U[:, :rank].T)
# Adding homoscedastic noise
X_homo = X + sigma * rng.randn(n_samples, n_features)
# Adding heteroscedastic noise
sigmas = sigma * rng.rand(n_features) + sigma / 2.
X_hetero = X + rng.randn(n_samples, n_features) * sigmas
###############################################################################
# Fit the models
n_components = np.arange(0, n_features, 5) # options for n_components
def compute_scores(X):
pca = PCA(svd_solver='full')
fa = FactorAnalysis()
pca_scores, fa_scores = [], []
for n in n_components:
pca.n_components = n
fa.n_components = n
pca_scores.append(np.mean(cross_val_score(pca, X)))
fa_scores.append(np.mean(cross_val_score(fa, X)))
return pca_scores, fa_scores
def shrunk_cov_score(X):
shrinkages = np.logspace(-2, 0, 30)
cv = GridSearchCV(ShrunkCovariance(), {'shrinkage': shrinkages})
return np.mean(cross_val_score(cv.fit(X).best_estimator_, X))
def lw_score(X):
return np.mean(cross_val_score(LedoitWolf(), X))
for X, title in [(X_homo, 'Homoscedastic Noise'),
(X_hetero, 'Heteroscedastic Noise')]:
pca_scores, fa_scores = compute_scores(X)
n_components_pca = n_components[np.argmax(pca_scores)]
n_components_fa = n_components[np.argmax(fa_scores)]
pca = PCA(svd_solver='full', n_components='mle')
pca.fit(X)
n_components_pca_mle = pca.n_components_
print("best n_components by PCA CV = %d" % n_components_pca)
print("best n_components by FactorAnalysis CV = %d" % n_components_fa)
print("best n_components by PCA MLE = %d" % n_components_pca_mle)
plt.figure()
plt.plot(n_components, pca_scores, 'b', label='PCA scores')
plt.plot(n_components, fa_scores, 'r', label='FA scores')
plt.axvline(rank, color='g', label='TRUTH: %d' % rank, linestyle='-')
plt.axvline(n_components_pca, color='b',
label='PCA CV: %d' % n_components_pca, linestyle='--')
plt.axvline(n_components_fa, color='r',
label='FactorAnalysis CV: %d' % n_components_fa,
linestyle='--')
plt.axvline(n_components_pca_mle, color='k',
label='PCA MLE: %d' % n_components_pca_mle, linestyle='--')
# compare with other covariance estimators
plt.axhline(shrunk_cov_score(X), color='violet',
label='Shrunk Covariance MLE', linestyle='-.')
plt.axhline(lw_score(X), color='orange',
label='LedoitWolf MLE' % n_components_pca_mle, linestyle='-.')
plt.xlabel('nb of components')
plt.ylabel('CV scores')
plt.legend(loc='lower right')
plt.title(title)
plt.show()
| bsd-3-clause |
BigDataforYou/movie_recommendation_workshop_1 | big_data_4_you_demo_1/venv/lib/python2.7/site-packages/pandas/io/tests/parser/skiprows.py | 1 | 2376 | # -*- coding: utf-8 -*-
"""
Tests that skipped rows are properly handled during
parsing for all of the parsers defined in parsers.py
"""
from datetime import datetime
import numpy as np
import pandas.util.testing as tm
from pandas import DataFrame
from pandas.compat import StringIO, range, lrange
class SkipRowsTests(object):
def test_skiprows_bug(self):
# see gh-505
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=lrange(6), header=None,
index_col=0, parse_dates=True)
data2 = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
tm.assert_frame_equal(data, data2)
def test_deep_skiprows(self):
# see gh-4382
text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in range(10)])
condensed_text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in [0, 1, 2, 3, 4, 6, 8, 9]])
data = self.read_csv(StringIO(text), skiprows=[6, 8])
condensed_data = self.read_csv(StringIO(condensed_text))
tm.assert_frame_equal(data, condensed_data)
def test_skiprows_blank(self):
# see gh-9832
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
| mit |
kaichogami/scikit-learn | sklearn/tests/test_learning_curve.py | 59 | 10869 | # Author: Alexander Fabisch <[email protected]>
#
# License: BSD 3 clause
import sys
from sklearn.externals.six.moves import cStringIO as StringIO
import numpy as np
import warnings
from sklearn.base import BaseEstimator
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.datasets import make_classification
with warnings.catch_warnings():
warnings.simplefilter('ignore')
from sklearn.learning_curve import learning_curve, validation_curve
from sklearn.cross_validation import KFold
from sklearn.linear_model import PassiveAggressiveClassifier
class MockImprovingEstimator(BaseEstimator):
"""Dummy classifier to test the learning curve"""
def __init__(self, n_max_train_sizes):
self.n_max_train_sizes = n_max_train_sizes
self.train_sizes = 0
self.X_subset = None
def fit(self, X_subset, y_subset=None):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, Y=None):
# training score becomes worse (2 -> 1), test error better (0 -> 1)
if self._is_training_data(X):
return 2. - float(self.train_sizes) / self.n_max_train_sizes
else:
return float(self.train_sizes) / self.n_max_train_sizes
def _is_training_data(self, X):
return X is self.X_subset
class MockIncrementalImprovingEstimator(MockImprovingEstimator):
"""Dummy classifier that provides partial_fit"""
def __init__(self, n_max_train_sizes):
super(MockIncrementalImprovingEstimator,
self).__init__(n_max_train_sizes)
self.x = None
def _is_training_data(self, X):
return self.x in X
def partial_fit(self, X, y=None, **params):
self.train_sizes += X.shape[0]
self.x = X[0]
class MockEstimatorWithParameter(BaseEstimator):
"""Dummy classifier to test the validation curve"""
def __init__(self, param=0.5):
self.X_subset = None
self.param = param
def fit(self, X_subset, y_subset):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, y=None):
return self.param if self._is_training_data(X) else 1 - self.param
def _is_training_data(self, X):
return X is self.X_subset
def test_learning_curve():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
with warnings.catch_warnings(record=True) as w:
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_equal(train_scores.shape, (10, 3))
assert_equal(test_scores.shape, (10, 3))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_verbose():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
train_sizes, train_scores, test_scores = \
learning_curve(estimator, X, y, cv=3, verbose=1)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert("[learning_curve]" in out)
def test_learning_curve_incremental_learning_not_possible():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
# The mockup does not have partial_fit()
estimator = MockImprovingEstimator(1)
assert_raises(ValueError, learning_curve, estimator, X, y,
exploit_incremental_learning=True)
def test_learning_curve_incremental_learning():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_incremental_learning_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_batch_and_incremental_learning_are_equal():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
train_sizes = np.linspace(0.2, 1.0, 5)
estimator = PassiveAggressiveClassifier(n_iter=1, shuffle=False)
train_sizes_inc, train_scores_inc, test_scores_inc = \
learning_curve(
estimator, X, y, train_sizes=train_sizes,
cv=3, exploit_incremental_learning=True)
train_sizes_batch, train_scores_batch, test_scores_batch = \
learning_curve(
estimator, X, y, cv=3, train_sizes=train_sizes,
exploit_incremental_learning=False)
assert_array_equal(train_sizes_inc, train_sizes_batch)
assert_array_almost_equal(train_scores_inc.mean(axis=1),
train_scores_batch.mean(axis=1))
assert_array_almost_equal(test_scores_inc.mean(axis=1),
test_scores_batch.mean(axis=1))
def test_learning_curve_n_sample_range_out_of_bounds():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.0, 1.0])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.1, 1.1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 20])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[1, 21])
def test_learning_curve_remove_duplicate_sample_sizes():
X, y = make_classification(n_samples=3, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(2)
train_sizes, _, _ = assert_warns(
RuntimeWarning, learning_curve, estimator, X, y, cv=3,
train_sizes=np.linspace(0.33, 1.0, 3))
assert_array_equal(train_sizes, [1, 2])
def test_learning_curve_with_boolean_indices():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
cv = KFold(n=30, n_folds=3)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_validation_curve():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
param_range = np.linspace(0, 1, 10)
with warnings.catch_warnings(record=True) as w:
train_scores, test_scores = validation_curve(
MockEstimatorWithParameter(), X, y, param_name="param",
param_range=param_range, cv=2
)
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_array_almost_equal(train_scores.mean(axis=1), param_range)
assert_array_almost_equal(test_scores.mean(axis=1), 1 - param_range)
| bsd-3-clause |
Fireblend/scikit-learn | examples/decomposition/plot_ica_blind_source_separation.py | 349 | 2228 | """
=====================================
Blind source separation using FastICA
=====================================
An example of estimating sources from noisy data.
:ref:`ICA` is used to estimate sources given noisy measurements.
Imagine 3 instruments playing simultaneously and 3 microphones
recording the mixed signals. ICA is used to recover the sources
ie. what is played by each instrument. Importantly, PCA fails
at recovering our `instruments` since the related signals reflect
non-Gaussian processes.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import signal
from sklearn.decomposition import FastICA, PCA
###############################################################################
# Generate sample data
np.random.seed(0)
n_samples = 2000
time = np.linspace(0, 8, n_samples)
s1 = np.sin(2 * time) # Signal 1 : sinusoidal signal
s2 = np.sign(np.sin(3 * time)) # Signal 2 : square signal
s3 = signal.sawtooth(2 * np.pi * time) # Signal 3: saw tooth signal
S = np.c_[s1, s2, s3]
S += 0.2 * np.random.normal(size=S.shape) # Add noise
S /= S.std(axis=0) # Standardize data
# Mix data
A = np.array([[1, 1, 1], [0.5, 2, 1.0], [1.5, 1.0, 2.0]]) # Mixing matrix
X = np.dot(S, A.T) # Generate observations
# Compute ICA
ica = FastICA(n_components=3)
S_ = ica.fit_transform(X) # Reconstruct signals
A_ = ica.mixing_ # Get estimated mixing matrix
# We can `prove` that the ICA model applies by reverting the unmixing.
assert np.allclose(X, np.dot(S_, A_.T) + ica.mean_)
# For comparison, compute PCA
pca = PCA(n_components=3)
H = pca.fit_transform(X) # Reconstruct signals based on orthogonal components
###############################################################################
# Plot results
plt.figure()
models = [X, S, S_, H]
names = ['Observations (mixed signal)',
'True Sources',
'ICA recovered signals',
'PCA recovered signals']
colors = ['red', 'steelblue', 'orange']
for ii, (model, name) in enumerate(zip(models, names), 1):
plt.subplot(4, 1, ii)
plt.title(name)
for sig, color in zip(model.T, colors):
plt.plot(sig, color=color)
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.46)
plt.show()
| bsd-3-clause |
RPGOne/scikit-learn | sklearn/metrics/tests/test_regression.py | 18 | 6065 | from __future__ import division, print_function
import numpy as np
from itertools import product
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.metrics import explained_variance_score
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from sklearn.metrics import median_absolute_error
from sklearn.metrics import r2_score
from sklearn.metrics.regression import _check_reg_targets
def test_regression_metrics(n_samples=50):
y_true = np.arange(n_samples)
y_pred = y_true + 1
assert_almost_equal(mean_squared_error(y_true, y_pred), 1.)
assert_almost_equal(mean_absolute_error(y_true, y_pred), 1.)
assert_almost_equal(median_absolute_error(y_true, y_pred), 1.)
assert_almost_equal(r2_score(y_true, y_pred), 0.995, 2)
assert_almost_equal(explained_variance_score(y_true, y_pred), 1.)
def test_multioutput_regression():
y_true = np.array([[1, 0, 0, 1], [0, 1, 1, 1], [1, 1, 0, 1]])
y_pred = np.array([[0, 0, 0, 1], [1, 0, 1, 1], [0, 0, 0, 1]])
error = mean_squared_error(y_true, y_pred)
assert_almost_equal(error, (1. / 3 + 2. / 3 + 2. / 3) / 4.)
# mean_absolute_error and mean_squared_error are equal because
# it is a binary problem.
error = mean_absolute_error(y_true, y_pred)
assert_almost_equal(error, (1. / 3 + 2. / 3 + 2. / 3) / 4.)
error = r2_score(y_true, y_pred, multioutput='variance_weighted')
assert_almost_equal(error, 1. - 5. / 2)
error = r2_score(y_true, y_pred, multioutput='uniform_average')
assert_almost_equal(error, -.875)
def test_regression_metrics_at_limits():
assert_almost_equal(mean_squared_error([0.], [0.]), 0.00, 2)
assert_almost_equal(mean_absolute_error([0.], [0.]), 0.00, 2)
assert_almost_equal(median_absolute_error([0.], [0.]), 0.00, 2)
assert_almost_equal(explained_variance_score([0.], [0.]), 1.00, 2)
assert_almost_equal(r2_score([0., 1], [0., 1]), 1.00, 2)
def test__check_reg_targets():
# All of length 3
EXAMPLES = [
("continuous", [1, 2, 3], 1),
("continuous", [[1], [2], [3]], 1),
("continuous-multioutput", [[1, 1], [2, 2], [3, 1]], 2),
("continuous-multioutput", [[5, 1], [4, 2], [3, 1]], 2),
("continuous-multioutput", [[1, 3, 4], [2, 2, 2], [3, 1, 1]], 3),
]
for (type1, y1, n_out1), (type2, y2, n_out2) in product(EXAMPLES,
repeat=2):
if type1 == type2 and n_out1 == n_out2:
y_type, y_check1, y_check2, multioutput = _check_reg_targets(
y1, y2, None)
assert_equal(type1, y_type)
if type1 == 'continuous':
assert_array_equal(y_check1, np.reshape(y1, (-1, 1)))
assert_array_equal(y_check2, np.reshape(y2, (-1, 1)))
else:
assert_array_equal(y_check1, y1)
assert_array_equal(y_check2, y2)
else:
assert_raises(ValueError, _check_reg_targets, y1, y2, None)
def test_regression_multioutput_array():
y_true = [[1, 2], [2.5, -1], [4.5, 3], [5, 7]]
y_pred = [[1, 1], [2, -1], [5, 4], [5, 6.5]]
mse = mean_squared_error(y_true, y_pred, multioutput='raw_values')
mae = mean_absolute_error(y_true, y_pred, multioutput='raw_values')
r = r2_score(y_true, y_pred, multioutput='raw_values')
evs = explained_variance_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(mse, [0.125, 0.5625], decimal=2)
assert_array_almost_equal(mae, [0.25, 0.625], decimal=2)
assert_array_almost_equal(r, [0.95, 0.93], decimal=2)
assert_array_almost_equal(evs, [0.95, 0.93], decimal=2)
# mean_absolute_error and mean_squared_error are equal because
# it is a binary problem.
y_true = [[0, 0]]*4
y_pred = [[1, 1]]*4
mse = mean_squared_error(y_true, y_pred, multioutput='raw_values')
mae = mean_absolute_error(y_true, y_pred, multioutput='raw_values')
r = r2_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(mse, [1., 1.], decimal=2)
assert_array_almost_equal(mae, [1., 1.], decimal=2)
assert_array_almost_equal(r, [0., 0.], decimal=2)
r = r2_score([[0, -1], [0, 1]], [[2, 2], [1, 1]], multioutput='raw_values')
assert_array_almost_equal(r, [0, -3.5], decimal=2)
assert_equal(np.mean(r), r2_score([[0, -1], [0, 1]], [[2, 2], [1, 1]],
multioutput='uniform_average'))
evs = explained_variance_score([[0, -1], [0, 1]], [[2, 2], [1, 1]],
multioutput='raw_values')
assert_array_almost_equal(evs, [0, -1.25], decimal=2)
# Checking for the condition in which both numerator and denominator is
# zero.
y_true = [[1, 3], [-1, 2]]
y_pred = [[1, 4], [-1, 1]]
r2 = r2_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(r2, [1., -3.], decimal=2)
assert_equal(np.mean(r2), r2_score(y_true, y_pred,
multioutput='uniform_average'))
evs = explained_variance_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(evs, [1., -3.], decimal=2)
assert_equal(np.mean(evs), explained_variance_score(y_true, y_pred))
def test_regression_custom_weights():
y_true = [[1, 2], [2.5, -1], [4.5, 3], [5, 7]]
y_pred = [[1, 1], [2, -1], [5, 4], [5, 6.5]]
msew = mean_squared_error(y_true, y_pred, multioutput=[0.4, 0.6])
maew = mean_absolute_error(y_true, y_pred, multioutput=[0.4, 0.6])
rw = r2_score(y_true, y_pred, multioutput=[0.4, 0.6])
evsw = explained_variance_score(y_true, y_pred, multioutput=[0.4, 0.6])
assert_almost_equal(msew, 0.39, decimal=2)
assert_almost_equal(maew, 0.475, decimal=3)
assert_almost_equal(rw, 0.94, decimal=2)
assert_almost_equal(evsw, 0.94, decimal=2)
| bsd-3-clause |
quantopian/zipline | zipline/examples/buy_and_hold.py | 1 | 1568 | #!/usr/bin/env python
#
# Copyright 2015 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from zipline.api import order, symbol
from zipline.finance import commission, slippage
stocks = ['AAPL', 'MSFT']
def initialize(context):
context.has_ordered = False
context.stocks = stocks
# Explicitly set the commission/slippage to the "old" value until we can
# rebuild example data.
# github.com/quantopian/zipline/blob/master/tests/resources/
# rebuild_example_data#L105
context.set_commission(commission.PerShare(cost=.0075, min_trade_cost=1.0))
context.set_slippage(slippage.VolumeShareSlippage())
def handle_data(context, data):
if not context.has_ordered:
for stock in context.stocks:
order(symbol(stock), 100)
context.has_ordered = True
def _test_args():
"""Extra arguments to use when zipline's automated tests run this example.
"""
import pandas as pd
return {
'start': pd.Timestamp('2008', tz='utc'),
'end': pd.Timestamp('2013', tz='utc'),
}
| apache-2.0 |
shiquanwang/pylearn2 | pylearn2/gui/tangent_plot.py | 5 | 1687 | """
Code for plotting curves with tangent lines.
"""
__author__ = "Ian Goodfellow"
try:
from matplotlib import pyplot
except Exception:
pyplot = None
def tangent_plot(x, y, s):
"""
Plots a curve with tangent lines.
Parameters
----------
x : list
List of x coordinates.
Assumed to be sorted into ascending order, so that the tangent
lines occupy 80 percent of the horizontal space between each pair
of points.
y : list
List of y coordinates
s : list
List of slopes
"""
assert isinstance(x, list)
assert isinstance(y, list)
assert isinstance(s, list)
n = len(x)
assert len(y) == n
assert len(s) == n
if pyplot is None:
raise RuntimeError("Could not import pyplot, can't run this code.")
pyplot.plot(x, y, color='b')
if n == 0:
pyplot.show()
return
pyplot.hold(True)
# Add dummy entries so that the for loop can use the same code on every
# entry
if n == 1:
x = [x[0] - 1] + x[0] + [x[0] + 1.]
else:
x = [x[0] - (x[1] - x[0])] + x + [x[-2] + (x[-1] - x[-2])]
y = [0.] + y + [0]
s = [0.] + s + [0]
for i in xrange(1, n + 1):
ld = 0.4 * (x[i] - x[i - 1])
lx = x[i] - ld
ly = y[i] - ld * s[i]
rd = 0.4 * (x[i + 1] - x[i])
rx = x[i] + rd
ry = y[i] + rd * s[i]
pyplot.plot([lx, rx], [ly, ry], color='g')
pyplot.show()
if __name__ == "__main__":
# Demo by plotting a quadratic function
import numpy as np
x = np.arange(-5., 5., .1)
y = 0.5 * (x ** 2)
x = list(x)
y = list(y)
tangent_plot(x, y, x)
| bsd-3-clause |
metaml/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/colors.py | 69 | 31676 | """
A module for converting numbers or color arguments to *RGB* or *RGBA*
*RGB* and *RGBA* are sequences of, respectively, 3 or 4 floats in the
range 0-1.
This module includes functions and classes for color specification
conversions, and for mapping numbers to colors in a 1-D array of
colors called a colormap. Colormapping typically involves two steps:
a data array is first mapped onto the range 0-1 using an instance
of :class:`Normalize` or of a subclass; then this number in the 0-1
range is mapped to a color using an instance of a subclass of
:class:`Colormap`. Two are provided here:
:class:`LinearSegmentedColormap`, which is used to generate all
the built-in colormap instances, but is also useful for making
custom colormaps, and :class:`ListedColormap`, which is used for
generating a custom colormap from a list of color specifications.
The module also provides a single instance, *colorConverter*, of the
:class:`ColorConverter` class providing methods for converting single
color specifications or sequences of them to *RGB* or *RGBA*.
Commands which take color arguments can use several formats to specify
the colors. For the basic builtin colors, you can use a single letter
- b : blue
- g : green
- r : red
- c : cyan
- m : magenta
- y : yellow
- k : black
- w : white
Gray shades can be given as a string encoding a float in the 0-1
range, e.g.::
color = '0.75'
For a greater range of colors, you have two options. You can specify
the color using an html hex string, as in::
color = '#eeefff'
or you can pass an *R* , *G* , *B* tuple, where each of *R* , *G* , *B*
are in the range [0,1].
Finally, legal html names for colors, like 'red', 'burlywood' and
'chartreuse' are supported.
"""
import re
import numpy as np
from numpy import ma
import matplotlib.cbook as cbook
parts = np.__version__.split('.')
NP_MAJOR, NP_MINOR = map(int, parts[:2])
# true if clip supports the out kwarg
NP_CLIP_OUT = NP_MAJOR>=1 and NP_MINOR>=2
cnames = {
'aliceblue' : '#F0F8FF',
'antiquewhite' : '#FAEBD7',
'aqua' : '#00FFFF',
'aquamarine' : '#7FFFD4',
'azure' : '#F0FFFF',
'beige' : '#F5F5DC',
'bisque' : '#FFE4C4',
'black' : '#000000',
'blanchedalmond' : '#FFEBCD',
'blue' : '#0000FF',
'blueviolet' : '#8A2BE2',
'brown' : '#A52A2A',
'burlywood' : '#DEB887',
'cadetblue' : '#5F9EA0',
'chartreuse' : '#7FFF00',
'chocolate' : '#D2691E',
'coral' : '#FF7F50',
'cornflowerblue' : '#6495ED',
'cornsilk' : '#FFF8DC',
'crimson' : '#DC143C',
'cyan' : '#00FFFF',
'darkblue' : '#00008B',
'darkcyan' : '#008B8B',
'darkgoldenrod' : '#B8860B',
'darkgray' : '#A9A9A9',
'darkgreen' : '#006400',
'darkkhaki' : '#BDB76B',
'darkmagenta' : '#8B008B',
'darkolivegreen' : '#556B2F',
'darkorange' : '#FF8C00',
'darkorchid' : '#9932CC',
'darkred' : '#8B0000',
'darksalmon' : '#E9967A',
'darkseagreen' : '#8FBC8F',
'darkslateblue' : '#483D8B',
'darkslategray' : '#2F4F4F',
'darkturquoise' : '#00CED1',
'darkviolet' : '#9400D3',
'deeppink' : '#FF1493',
'deepskyblue' : '#00BFFF',
'dimgray' : '#696969',
'dodgerblue' : '#1E90FF',
'firebrick' : '#B22222',
'floralwhite' : '#FFFAF0',
'forestgreen' : '#228B22',
'fuchsia' : '#FF00FF',
'gainsboro' : '#DCDCDC',
'ghostwhite' : '#F8F8FF',
'gold' : '#FFD700',
'goldenrod' : '#DAA520',
'gray' : '#808080',
'green' : '#008000',
'greenyellow' : '#ADFF2F',
'honeydew' : '#F0FFF0',
'hotpink' : '#FF69B4',
'indianred' : '#CD5C5C',
'indigo' : '#4B0082',
'ivory' : '#FFFFF0',
'khaki' : '#F0E68C',
'lavender' : '#E6E6FA',
'lavenderblush' : '#FFF0F5',
'lawngreen' : '#7CFC00',
'lemonchiffon' : '#FFFACD',
'lightblue' : '#ADD8E6',
'lightcoral' : '#F08080',
'lightcyan' : '#E0FFFF',
'lightgoldenrodyellow' : '#FAFAD2',
'lightgreen' : '#90EE90',
'lightgrey' : '#D3D3D3',
'lightpink' : '#FFB6C1',
'lightsalmon' : '#FFA07A',
'lightseagreen' : '#20B2AA',
'lightskyblue' : '#87CEFA',
'lightslategray' : '#778899',
'lightsteelblue' : '#B0C4DE',
'lightyellow' : '#FFFFE0',
'lime' : '#00FF00',
'limegreen' : '#32CD32',
'linen' : '#FAF0E6',
'magenta' : '#FF00FF',
'maroon' : '#800000',
'mediumaquamarine' : '#66CDAA',
'mediumblue' : '#0000CD',
'mediumorchid' : '#BA55D3',
'mediumpurple' : '#9370DB',
'mediumseagreen' : '#3CB371',
'mediumslateblue' : '#7B68EE',
'mediumspringgreen' : '#00FA9A',
'mediumturquoise' : '#48D1CC',
'mediumvioletred' : '#C71585',
'midnightblue' : '#191970',
'mintcream' : '#F5FFFA',
'mistyrose' : '#FFE4E1',
'moccasin' : '#FFE4B5',
'navajowhite' : '#FFDEAD',
'navy' : '#000080',
'oldlace' : '#FDF5E6',
'olive' : '#808000',
'olivedrab' : '#6B8E23',
'orange' : '#FFA500',
'orangered' : '#FF4500',
'orchid' : '#DA70D6',
'palegoldenrod' : '#EEE8AA',
'palegreen' : '#98FB98',
'palevioletred' : '#AFEEEE',
'papayawhip' : '#FFEFD5',
'peachpuff' : '#FFDAB9',
'peru' : '#CD853F',
'pink' : '#FFC0CB',
'plum' : '#DDA0DD',
'powderblue' : '#B0E0E6',
'purple' : '#800080',
'red' : '#FF0000',
'rosybrown' : '#BC8F8F',
'royalblue' : '#4169E1',
'saddlebrown' : '#8B4513',
'salmon' : '#FA8072',
'sandybrown' : '#FAA460',
'seagreen' : '#2E8B57',
'seashell' : '#FFF5EE',
'sienna' : '#A0522D',
'silver' : '#C0C0C0',
'skyblue' : '#87CEEB',
'slateblue' : '#6A5ACD',
'slategray' : '#708090',
'snow' : '#FFFAFA',
'springgreen' : '#00FF7F',
'steelblue' : '#4682B4',
'tan' : '#D2B48C',
'teal' : '#008080',
'thistle' : '#D8BFD8',
'tomato' : '#FF6347',
'turquoise' : '#40E0D0',
'violet' : '#EE82EE',
'wheat' : '#F5DEB3',
'white' : '#FFFFFF',
'whitesmoke' : '#F5F5F5',
'yellow' : '#FFFF00',
'yellowgreen' : '#9ACD32',
}
# add british equivs
for k, v in cnames.items():
if k.find('gray')>=0:
k = k.replace('gray', 'grey')
cnames[k] = v
def is_color_like(c):
'Return *True* if *c* can be converted to *RGB*'
try:
colorConverter.to_rgb(c)
return True
except ValueError:
return False
def rgb2hex(rgb):
'Given a len 3 rgb tuple of 0-1 floats, return the hex string'
return '#%02x%02x%02x' % tuple([round(val*255) for val in rgb])
hexColorPattern = re.compile("\A#[a-fA-F0-9]{6}\Z")
def hex2color(s):
"""
Take a hex string *s* and return the corresponding rgb 3-tuple
Example: #efefef -> (0.93725, 0.93725, 0.93725)
"""
if not isinstance(s, basestring):
raise TypeError('hex2color requires a string argument')
if hexColorPattern.match(s) is None:
raise ValueError('invalid hex color string "%s"' % s)
return tuple([int(n, 16)/255.0 for n in (s[1:3], s[3:5], s[5:7])])
class ColorConverter:
"""
Provides methods for converting color specifications to *RGB* or *RGBA*
Caching is used for more efficient conversion upon repeated calls
with the same argument.
Ordinarily only the single instance instantiated in this module,
*colorConverter*, is needed.
"""
colors = {
'b' : (0.0, 0.0, 1.0),
'g' : (0.0, 0.5, 0.0),
'r' : (1.0, 0.0, 0.0),
'c' : (0.0, 0.75, 0.75),
'm' : (0.75, 0, 0.75),
'y' : (0.75, 0.75, 0),
'k' : (0.0, 0.0, 0.0),
'w' : (1.0, 1.0, 1.0),
}
cache = {}
def to_rgb(self, arg):
"""
Returns an *RGB* tuple of three floats from 0-1.
*arg* can be an *RGB* or *RGBA* sequence or a string in any of
several forms:
1) a letter from the set 'rgbcmykw'
2) a hex color string, like '#00FFFF'
3) a standard name, like 'aqua'
4) a float, like '0.4', indicating gray on a 0-1 scale
if *arg* is *RGBA*, the *A* will simply be discarded.
"""
try: return self.cache[arg]
except KeyError: pass
except TypeError: # could be unhashable rgb seq
arg = tuple(arg)
try: return self.cache[arg]
except KeyError: pass
except TypeError:
raise ValueError(
'to_rgb: arg "%s" is unhashable even inside a tuple'
% (str(arg),))
try:
if cbook.is_string_like(arg):
color = self.colors.get(arg, None)
if color is None:
str1 = cnames.get(arg, arg)
if str1.startswith('#'):
color = hex2color(str1)
else:
fl = float(arg)
if fl < 0 or fl > 1:
raise ValueError(
'gray (string) must be in range 0-1')
color = tuple([fl]*3)
elif cbook.iterable(arg):
if len(arg) > 4 or len(arg) < 3:
raise ValueError(
'sequence length is %d; must be 3 or 4'%len(arg))
color = tuple(arg[:3])
if [x for x in color if (float(x) < 0) or (x > 1)]:
# This will raise TypeError if x is not a number.
raise ValueError('number in rbg sequence outside 0-1 range')
else:
raise ValueError('cannot convert argument to rgb sequence')
self.cache[arg] = color
except (KeyError, ValueError, TypeError), exc:
raise ValueError('to_rgb: Invalid rgb arg "%s"\n%s' % (str(arg), exc))
# Error messages could be improved by handling TypeError
# separately; but this should be rare and not too hard
# for the user to figure out as-is.
return color
def to_rgba(self, arg, alpha=None):
"""
Returns an *RGBA* tuple of four floats from 0-1.
For acceptable values of *arg*, see :meth:`to_rgb`.
If *arg* is an *RGBA* sequence and *alpha* is not *None*,
*alpha* will replace the original *A*.
"""
try:
if not cbook.is_string_like(arg) and cbook.iterable(arg):
if len(arg) == 4:
if [x for x in arg if (float(x) < 0) or (x > 1)]:
# This will raise TypeError if x is not a number.
raise ValueError('number in rbga sequence outside 0-1 range')
if alpha is None:
return tuple(arg)
if alpha < 0.0 or alpha > 1.0:
raise ValueError("alpha must be in range 0-1")
return arg[0], arg[1], arg[2], arg[3] * alpha
r,g,b = arg[:3]
if [x for x in (r,g,b) if (float(x) < 0) or (x > 1)]:
raise ValueError('number in rbg sequence outside 0-1 range')
else:
r,g,b = self.to_rgb(arg)
if alpha is None:
alpha = 1.0
return r,g,b,alpha
except (TypeError, ValueError), exc:
raise ValueError('to_rgba: Invalid rgba arg "%s"\n%s' % (str(arg), exc))
def to_rgba_array(self, c, alpha=None):
"""
Returns a numpy array of *RGBA* tuples.
Accepts a single mpl color spec or a sequence of specs.
Special case to handle "no color": if *c* is "none" (case-insensitive),
then an empty array will be returned. Same for an empty list.
"""
try:
if c.lower() == 'none':
return np.zeros((0,4), dtype=np.float_)
except AttributeError:
pass
if len(c) == 0:
return np.zeros((0,4), dtype=np.float_)
try:
result = np.array([self.to_rgba(c, alpha)], dtype=np.float_)
except ValueError:
if isinstance(c, np.ndarray):
if c.ndim != 2 and c.dtype.kind not in 'SU':
raise ValueError("Color array must be two-dimensional")
result = np.zeros((len(c), 4))
for i, cc in enumerate(c):
result[i] = self.to_rgba(cc, alpha) # change in place
return np.asarray(result, np.float_)
colorConverter = ColorConverter()
def makeMappingArray(N, data):
"""Create an *N* -element 1-d lookup table
*data* represented by a list of x,y0,y1 mapping correspondences.
Each element in this list represents how a value between 0 and 1
(inclusive) represented by x is mapped to a corresponding value
between 0 and 1 (inclusive). The two values of y are to allow
for discontinuous mapping functions (say as might be found in a
sawtooth) where y0 represents the value of y for values of x
<= to that given, and y1 is the value to be used for x > than
that given). The list must start with x=0, end with x=1, and
all values of x must be in increasing order. Values between
the given mapping points are determined by simple linear interpolation.
The function returns an array "result" where ``result[x*(N-1)]``
gives the closest value for values of x between 0 and 1.
"""
try:
adata = np.array(data)
except:
raise TypeError("data must be convertable to an array")
shape = adata.shape
if len(shape) != 2 and shape[1] != 3:
raise ValueError("data must be nx3 format")
x = adata[:,0]
y0 = adata[:,1]
y1 = adata[:,2]
if x[0] != 0. or x[-1] != 1.0:
raise ValueError(
"data mapping points must start with x=0. and end with x=1")
if np.sometrue(np.sort(x)-x):
raise ValueError(
"data mapping points must have x in increasing order")
# begin generation of lookup table
x = x * (N-1)
lut = np.zeros((N,), np.float)
xind = np.arange(float(N))
ind = np.searchsorted(x, xind)[1:-1]
lut[1:-1] = ( ((xind[1:-1] - x[ind-1]) / (x[ind] - x[ind-1]))
* (y0[ind] - y1[ind-1]) + y1[ind-1])
lut[0] = y1[0]
lut[-1] = y0[-1]
# ensure that the lut is confined to values between 0 and 1 by clipping it
np.clip(lut, 0.0, 1.0)
#lut = where(lut > 1., 1., lut)
#lut = where(lut < 0., 0., lut)
return lut
class Colormap:
"""Base class for all scalar to rgb mappings
Important methods:
* :meth:`set_bad`
* :meth:`set_under`
* :meth:`set_over`
"""
def __init__(self, name, N=256):
"""
Public class attributes:
:attr:`N` : number of rgb quantization levels
:attr:`name` : name of colormap
"""
self.name = name
self.N = N
self._rgba_bad = (0.0, 0.0, 0.0, 0.0) # If bad, don't paint anything.
self._rgba_under = None
self._rgba_over = None
self._i_under = N
self._i_over = N+1
self._i_bad = N+2
self._isinit = False
def __call__(self, X, alpha=1.0, bytes=False):
"""
*X* is either a scalar or an array (of any dimension).
If scalar, a tuple of rgba values is returned, otherwise
an array with the new shape = oldshape+(4,). If the X-values
are integers, then they are used as indices into the array.
If they are floating point, then they must be in the
interval (0.0, 1.0).
Alpha must be a scalar.
If bytes is False, the rgba values will be floats on a
0-1 scale; if True, they will be uint8, 0-255.
"""
if not self._isinit: self._init()
alpha = min(alpha, 1.0) # alpha must be between 0 and 1
alpha = max(alpha, 0.0)
self._lut[:-3, -1] = alpha
mask_bad = None
if not cbook.iterable(X):
vtype = 'scalar'
xa = np.array([X])
else:
vtype = 'array'
xma = ma.asarray(X)
xa = xma.filled(0)
mask_bad = ma.getmask(xma)
if xa.dtype.char in np.typecodes['Float']:
np.putmask(xa, xa==1.0, 0.9999999) #Treat 1.0 as slightly less than 1.
# The following clip is fast, and prevents possible
# conversion of large positive values to negative integers.
if NP_CLIP_OUT:
np.clip(xa * self.N, -1, self.N, out=xa)
else:
xa = np.clip(xa * self.N, -1, self.N)
xa = xa.astype(int)
# Set the over-range indices before the under-range;
# otherwise the under-range values get converted to over-range.
np.putmask(xa, xa>self.N-1, self._i_over)
np.putmask(xa, xa<0, self._i_under)
if mask_bad is not None and mask_bad.shape == xa.shape:
np.putmask(xa, mask_bad, self._i_bad)
if bytes:
lut = (self._lut * 255).astype(np.uint8)
else:
lut = self._lut
rgba = np.empty(shape=xa.shape+(4,), dtype=lut.dtype)
lut.take(xa, axis=0, mode='clip', out=rgba)
# twice as fast as lut[xa];
# using the clip or wrap mode and providing an
# output array speeds it up a little more.
if vtype == 'scalar':
rgba = tuple(rgba[0,:])
return rgba
def set_bad(self, color = 'k', alpha = 1.0):
'''Set color to be used for masked values.
'''
self._rgba_bad = colorConverter.to_rgba(color, alpha)
if self._isinit: self._set_extremes()
def set_under(self, color = 'k', alpha = 1.0):
'''Set color to be used for low out-of-range values.
Requires norm.clip = False
'''
self._rgba_under = colorConverter.to_rgba(color, alpha)
if self._isinit: self._set_extremes()
def set_over(self, color = 'k', alpha = 1.0):
'''Set color to be used for high out-of-range values.
Requires norm.clip = False
'''
self._rgba_over = colorConverter.to_rgba(color, alpha)
if self._isinit: self._set_extremes()
def _set_extremes(self):
if self._rgba_under:
self._lut[self._i_under] = self._rgba_under
else:
self._lut[self._i_under] = self._lut[0]
if self._rgba_over:
self._lut[self._i_over] = self._rgba_over
else:
self._lut[self._i_over] = self._lut[self.N-1]
self._lut[self._i_bad] = self._rgba_bad
def _init():
'''Generate the lookup table, self._lut'''
raise NotImplementedError("Abstract class only")
def is_gray(self):
if not self._isinit: self._init()
return (np.alltrue(self._lut[:,0] == self._lut[:,1])
and np.alltrue(self._lut[:,0] == self._lut[:,2]))
class LinearSegmentedColormap(Colormap):
"""Colormap objects based on lookup tables using linear segments.
The lookup table is generated using linear interpolation for each
primary color, with the 0-1 domain divided into any number of
segments.
"""
def __init__(self, name, segmentdata, N=256):
"""Create color map from linear mapping segments
segmentdata argument is a dictionary with a red, green and blue
entries. Each entry should be a list of *x*, *y0*, *y1* tuples,
forming rows in a table.
Example: suppose you want red to increase from 0 to 1 over
the bottom half, green to do the same over the middle half,
and blue over the top half. Then you would use::
cdict = {'red': [(0.0, 0.0, 0.0),
(0.5, 1.0, 1.0),
(1.0, 1.0, 1.0)],
'green': [(0.0, 0.0, 0.0),
(0.25, 0.0, 0.0),
(0.75, 1.0, 1.0),
(1.0, 1.0, 1.0)],
'blue': [(0.0, 0.0, 0.0),
(0.5, 0.0, 0.0),
(1.0, 1.0, 1.0)]}
Each row in the table for a given color is a sequence of
*x*, *y0*, *y1* tuples. In each sequence, *x* must increase
monotonically from 0 to 1. For any input value *z* falling
between *x[i]* and *x[i+1]*, the output value of a given color
will be linearly interpolated between *y1[i]* and *y0[i+1]*::
row i: x y0 y1
/
/
row i+1: x y0 y1
Hence y0 in the first row and y1 in the last row are never used.
.. seealso::
:func:`makeMappingArray`
"""
self.monochrome = False # True only if all colors in map are identical;
# needed for contouring.
Colormap.__init__(self, name, N)
self._segmentdata = segmentdata
def _init(self):
self._lut = np.ones((self.N + 3, 4), np.float)
self._lut[:-3, 0] = makeMappingArray(self.N, self._segmentdata['red'])
self._lut[:-3, 1] = makeMappingArray(self.N, self._segmentdata['green'])
self._lut[:-3, 2] = makeMappingArray(self.N, self._segmentdata['blue'])
self._isinit = True
self._set_extremes()
class ListedColormap(Colormap):
"""Colormap object generated from a list of colors.
This may be most useful when indexing directly into a colormap,
but it can also be used to generate special colormaps for ordinary
mapping.
"""
def __init__(self, colors, name = 'from_list', N = None):
"""
Make a colormap from a list of colors.
*colors*
a list of matplotlib color specifications,
or an equivalent Nx3 floating point array (*N* rgb values)
*name*
a string to identify the colormap
*N*
the number of entries in the map. The default is *None*,
in which case there is one colormap entry for each
element in the list of colors. If::
N < len(colors)
the list will be truncated at *N*. If::
N > len(colors)
the list will be extended by repetition.
"""
self.colors = colors
self.monochrome = False # True only if all colors in map are identical;
# needed for contouring.
if N is None:
N = len(self.colors)
else:
if cbook.is_string_like(self.colors):
self.colors = [self.colors] * N
self.monochrome = True
elif cbook.iterable(self.colors):
self.colors = list(self.colors) # in case it was a tuple
if len(self.colors) == 1:
self.monochrome = True
if len(self.colors) < N:
self.colors = list(self.colors) * N
del(self.colors[N:])
else:
try: gray = float(self.colors)
except TypeError: pass
else: self.colors = [gray] * N
self.monochrome = True
Colormap.__init__(self, name, N)
def _init(self):
rgb = np.array([colorConverter.to_rgb(c)
for c in self.colors], np.float)
self._lut = np.zeros((self.N + 3, 4), np.float)
self._lut[:-3, :-1] = rgb
self._lut[:-3, -1] = 1
self._isinit = True
self._set_extremes()
class Normalize:
"""
Normalize a given value to the 0-1 range
"""
def __init__(self, vmin=None, vmax=None, clip=False):
"""
If *vmin* or *vmax* is not given, they are taken from the input's
minimum and maximum value respectively. If *clip* is *True* and
the given value falls outside the range, the returned value
will be 0 or 1, whichever is closer. Returns 0 if::
vmin==vmax
Works with scalars or arrays, including masked arrays. If
*clip* is *True*, masked values are set to 1; otherwise they
remain masked. Clipping silently defeats the purpose of setting
the over, under, and masked colors in the colormap, so it is
likely to lead to surprises; therefore the default is
*clip* = *False*.
"""
self.vmin = vmin
self.vmax = vmax
self.clip = clip
def __call__(self, value, clip=None):
if clip is None:
clip = self.clip
if cbook.iterable(value):
vtype = 'array'
val = ma.asarray(value).astype(np.float)
else:
vtype = 'scalar'
val = ma.array([value]).astype(np.float)
self.autoscale_None(val)
vmin, vmax = self.vmin, self.vmax
if vmin > vmax:
raise ValueError("minvalue must be less than or equal to maxvalue")
elif vmin==vmax:
return 0.0 * val
else:
if clip:
mask = ma.getmask(val)
val = ma.array(np.clip(val.filled(vmax), vmin, vmax),
mask=mask)
result = (val-vmin) * (1.0/(vmax-vmin))
if vtype == 'scalar':
result = result[0]
return result
def inverse(self, value):
if not self.scaled():
raise ValueError("Not invertible until scaled")
vmin, vmax = self.vmin, self.vmax
if cbook.iterable(value):
val = ma.asarray(value)
return vmin + val * (vmax - vmin)
else:
return vmin + value * (vmax - vmin)
def autoscale(self, A):
'''
Set *vmin*, *vmax* to min, max of *A*.
'''
self.vmin = ma.minimum(A)
self.vmax = ma.maximum(A)
def autoscale_None(self, A):
' autoscale only None-valued vmin or vmax'
if self.vmin is None: self.vmin = ma.minimum(A)
if self.vmax is None: self.vmax = ma.maximum(A)
def scaled(self):
'return true if vmin and vmax set'
return (self.vmin is not None and self.vmax is not None)
class LogNorm(Normalize):
"""
Normalize a given value to the 0-1 range on a log scale
"""
def __call__(self, value, clip=None):
if clip is None:
clip = self.clip
if cbook.iterable(value):
vtype = 'array'
val = ma.asarray(value).astype(np.float)
else:
vtype = 'scalar'
val = ma.array([value]).astype(np.float)
self.autoscale_None(val)
vmin, vmax = self.vmin, self.vmax
if vmin > vmax:
raise ValueError("minvalue must be less than or equal to maxvalue")
elif vmin<=0:
raise ValueError("values must all be positive")
elif vmin==vmax:
return 0.0 * val
else:
if clip:
mask = ma.getmask(val)
val = ma.array(np.clip(val.filled(vmax), vmin, vmax),
mask=mask)
result = (ma.log(val)-np.log(vmin))/(np.log(vmax)-np.log(vmin))
if vtype == 'scalar':
result = result[0]
return result
def inverse(self, value):
if not self.scaled():
raise ValueError("Not invertible until scaled")
vmin, vmax = self.vmin, self.vmax
if cbook.iterable(value):
val = ma.asarray(value)
return vmin * ma.power((vmax/vmin), val)
else:
return vmin * pow((vmax/vmin), value)
class BoundaryNorm(Normalize):
'''
Generate a colormap index based on discrete intervals.
Unlike :class:`Normalize` or :class:`LogNorm`,
:class:`BoundaryNorm` maps values to integers instead of to the
interval 0-1.
Mapping to the 0-1 interval could have been done via
piece-wise linear interpolation, but using integers seems
simpler, and reduces the number of conversions back and forth
between integer and floating point.
'''
def __init__(self, boundaries, ncolors, clip=False):
'''
*boundaries*
a monotonically increasing sequence
*ncolors*
number of colors in the colormap to be used
If::
b[i] <= v < b[i+1]
then v is mapped to color j;
as i varies from 0 to len(boundaries)-2,
j goes from 0 to ncolors-1.
Out-of-range values are mapped to -1 if low and ncolors
if high; these are converted to valid indices by
:meth:`Colormap.__call__` .
'''
self.clip = clip
self.vmin = boundaries[0]
self.vmax = boundaries[-1]
self.boundaries = np.asarray(boundaries)
self.N = len(self.boundaries)
self.Ncmap = ncolors
if self.N-1 == self.Ncmap:
self._interp = False
else:
self._interp = True
def __call__(self, x, clip=None):
if clip is None:
clip = self.clip
x = ma.asarray(x)
mask = ma.getmaskarray(x)
xx = x.filled(self.vmax+1)
if clip:
np.clip(xx, self.vmin, self.vmax)
iret = np.zeros(x.shape, dtype=np.int16)
for i, b in enumerate(self.boundaries):
iret[xx>=b] = i
if self._interp:
iret = (iret * (float(self.Ncmap-1)/(self.N-2))).astype(np.int16)
iret[xx<self.vmin] = -1
iret[xx>=self.vmax] = self.Ncmap
ret = ma.array(iret, mask=mask)
if ret.shape == () and not mask:
ret = int(ret) # assume python scalar
return ret
def inverse(self, value):
return ValueError("BoundaryNorm is not invertible")
class NoNorm(Normalize):
'''
Dummy replacement for Normalize, for the case where we
want to use indices directly in a
:class:`~matplotlib.cm.ScalarMappable` .
'''
def __call__(self, value, clip=None):
return value
def inverse(self, value):
return value
# compatibility with earlier class names that violated convention:
normalize = Normalize
no_norm = NoNorm
| agpl-3.0 |
percyfal/snakemakelib | snakemakelib/report/picard.py | 1 | 25057 | # Copyright (c) 2014 Per Unneberg
import os
import sys
import re
import csv
import texttable as tt
import collections
from snakemakelib.report.utils import Template
import matplotlib
matplotlib.use('Agg')
from pylab import *
import matplotlib.pyplot as plt
import numpy as np
# http://stackoverflow.com/questions/2170900/get-first-list-index-containing-sub-string-in-python
def index_containing_substring(the_list, substring):
for i, s in enumerate(the_list):
if substring in s:
return i
return -1
def _raw(x):
return (x, None)
def _convert_input(x):
if re.match("^[0-9]+$", x):
return int(x)
elif re.match("^[0-9,.]+$", x):
return float(x.replace(",", "."))
else:
return str(x)
def _read_picard_metrics(f):
with open(f) as fh:
data = fh.readlines()
# Find histogram line
i_hist = index_containing_substring(data, "## HISTOGRAM")
if i_hist == -1:
i = len(data)
else:
i = i_hist
metrics = [[_convert_input(y) for y in x.rstrip("\n").split("\t")] for x in data[0:i] if not re.match("^[ #\n]", x)]
if i_hist == -1:
return (metrics, None)
hist = [[_convert_input(y) for y in x.rstrip("\n").split("\t")] for x in data[i_hist:len(data)] if not re.match("^[ #\n]", x)]
return (metrics, hist)
def _indent_texttable_for_rst(ttab, indent=4, add_spacing=True):
"""Texttable needs to be indented for rst.
:param ttab: texttable object
:param indent: indentation (should be 4 *spaces* for rst documents)
:param add_spacing_row: add additional empty row below class directives
:returns: reformatted texttable object as string
"""
output = ttab.draw()
new_output = []
for row in output.split("\n"):
new_output.append(" " * indent + row)
if re.search('.. class::', row):
new_row = [" " if x != "|" else x for x in row]
new_output.append(" " * indent + "".join(new_row))
return "\n".join(new_output)
def _make_unique(l):
cnt = collections.Counter(l)
luniq = []
d = {}
for c in l:
if not c in d.keys():
d[c] = 0
else:
d[c] += 1
luniq.append("{c}.{sfx}".format(c=c, sfx=d[c]) if d[c]>0 else c)
return luniq
def make_rst_table(data, header=None, indent=True):
"""Make rst table with :py:mod:`Texttable`.
Args:
data (list): data frame to be printed
header (list): column header names
indent (bool): indent table for rst
Returns:
rst-formatted table
"""
if data is None:
return ""
else:
tab_tt = tt.Texttable()
tab_tt.set_precision(2)
if not header is None:
data[0] = header
w = [len(c) + 2 for c in data[0]]
for r in data:
for i in range(0, len(r)):
w[i] = max(w[i], len(r[i]) + 2)
tab_tt.add_rows(data)
tab_tt.set_cols_width(w)
tab_tt.set_cols_align("r" * len(data[0]))
if indent:
return _indent_texttable_for_rst(tab_tt)
else:
return tab_tt.draw()
class DataFrame(object):
"""Light weight data frame object
A data frame is represented as an OrderedDict.
Args:
*args: if provided, must be a list of lists that upon initialization is converted to an OrderedDict. The first list is treated as a header.
"""
_format = collections.OrderedDict()
_tp = Template()
def __init__(self, *args, **fmt):
if (len(args[0]) == 1):
self._colnames = args[0]
self._data = [collections.OrderedDict([(args[0][0], row[0])]) for row in args[1:]]
else:
reader = csv.DictReader([",".join([str(y) for y in x]) for x in args])
self._colnames = reader.fieldnames
self._data = [collections.OrderedDict([(k, row[k]) for k in self._colnames]) for row in reader]
if fmt:
self._format = collections.OrderedDict(fmt)
def __str__(self):
return "{cls} object with {rows} rows, {columns} columns".format(cls=self.__class__, rows=self.dim[0], columns=self.dim[1])
def __iter__(self):
self.index = 0
return self
def __next__(self):
if len(self._data) > self.index:
self.index += 1
return self._data[self.index - 1]
else:
raise StopIteration
def __getitem__(self, columns):
a = [columns] + [[row[c] for c in columns] for row in self._data]
return self.__class__(*a)
def __setitem__(self, key, val):
# if val is a list must be equal in length to no. rows
if isinstance(val, list):
for (row,v) in zip(self._data, val):
row.update([(key, v)])
else:
_ = [row.update([(key, val)]) for row in self._data]
if not key in self.colnames:
self.colnames.append(key)
def x(self, column=None, indices=None, ctype=None):
column = self.colnames[0] if not column else column
x = [row[column] for row in self._data]
if indices:
x = [self._data[i][column] for i in indices]
if ctype:
x = [ctype(y) for y in x]
return x
# Copy definition of x
y=x
@property
def colnames(self):
return self._colnames
@property
def data(self):
return self._data
@property
def dim(self):
return (len(self._data), len(self._data[0]))
def as_list(self):
return [self._colnames] + [[self._format[c][1](row[c]) for c in self._colnames] for row in self._data]
def _format_field(self, value, spec, ctype):
if value == '?':
spec = 's'
ctype = str
elif value == "":
spec = 's'
ctype = str
value = ctype(value)
return self._tp.format_field(value, spec)
def set_format(self, **fmt):
self._format = collections.OrderedDict(fmt)
def rst(self, raw=False):
ttab = make_rst_table(self.summary.split("\n"))
return _indent_texttable_for_rst(ttab)
def summary(self, fmt = None, ctype = None, sep="\t", raw=False):
columns = self.colnames
fmt = {k:v[0] for (k,v) in list(self._format.items()) if k in columns} if fmt is None else {k:v for (k,v) in zip(columns, fmt)}
ctype = {k:v[1] for (k,v) in list(self._format.items()) if k in columns} if ctype is None else {k:v for (k,v) in zip(columns, ctype)}
if not fmt:
raise ValueError("No format defined for {cls}; please use derived subclass".format(cls=__class__))
if raw:
return "\n".join([sep.join([x for x in columns])] + [sep.join([r[c] for c in columns]) for r in self])
return "\n".join([sep.join([x for x in columns])] + [sep.join([self._format_field(r[c], fmt[c], ctype[c]) for c in columns]) for r in self])
class PicardMetrics(object):
"""Generic class to store metrics section from Picard Metrics reports.
See also class PicardHistMetrics for reports that provide metrics
and histogram information.
Args:
*args: if provided, must be a list of lists that upon initialization is converted to an OrderedDict. The first list is treated as a header.
filename (str): file from which to collect metrics
identifier (str): unique identifier for object, e.g. sample or unit name
Returns:
An instance of class PicardMetrics
"""
_format = collections.OrderedDict()
_tp = Template()
def __init__(self, *args, filename=None, identifier=None):
self._id = identifier
if filename is None and not args:
raise ValueError("please supply either filename or args to instantiate class")
self._set_vars(identifier, filename)
if not self.filename is None and not args:
(args, _) = _read_picard_metrics(self.filename)
self._metrics = DataFrame(*args, **self._format)
def _set_vars(self, identifier, filename):
self._id = identifier if not identifier is None else filename
self._filename = str(filename)
def __str__(self):
return "{cls} object with a metrics field with {rows} rows, {columns} columns".format(cls=self.__class__, rows=self.metrics.dim[0], columns=self.metrics.dim[1])
def __getitem__(self, columns):
a = [columns] + [[row[c] for c in columns] for row in self._metrics._data]
m = self.__class__(*a, identifier=self.id, filename=self.filename)
fmt = collections.OrderedDict([(c, self.metrics._format[c]) for c in columns])
m.set_format(**fmt)
return m
@property
def metrics(self):
return self._metrics
@property
def id(self):
return self._id
@property
def filename(self):
return self._filename
def x(self, column=None, indices=None):
return self.metrics.x(column, indices, ctype=self._format[column][1])
y=x
def summary(self, fmt = None, ctype = None, sep="\t", raw=False):
return self.metrics.summary(fmt, ctype, sep, raw)
def as_list(self):
return self.metrics.as_list()
def set_format(self, **fmt):
self._format = collections.OrderedDict(fmt)
self._metrics.set_format(**fmt)
def add_column(self, col, val, **fmt):
"""Add column to metrics"""
self.metrics[col] = val
self._metrics._format.update(fmt)
self._format.update(fmt)
class PicardHistMetrics(PicardMetrics):
"""Generic class to store metrics section from Picard Histogram
Metrics reports.
In addition to metrics data the class also stores histogram
values.
Args:
name (str): unique identifier for object, e.g. sample or unit name
filename (str): file from which to collect metrics
hist (list): list of lists, where the first entry holds the
names of the values
*args (list): if provided, must be a list of lists that upon
initialization is converted to an OrderedDict. The first list
is treated as a header.
Returns:
An instance of class PicardHistMetrics
"""
def __init__(self, *args, identifier=None, filename=None, hist=None):
# NB: __init__ should call super, but with current
# implementation would require reading the metrics file twice!
if filename is None and hist is None:
raise ValueError("please provide argument hist when not reading from file")
if filename is None and not args:
raise ValueError("please supply either filename or args to instantiate class")
self._set_vars(identifier, filename)
if not self.filename is None and not args:
(args, hist) = _read_picard_metrics(self.filename)
self._metrics = DataFrame(*args, **self._format)
if hist:
fmt = collections.OrderedDict([(x, type(y)) for x,y in zip(hist[0], hist[1])])
self._hist = DataFrame(*hist, **fmt)
def __getitem__(self, columns):
a = [columns] + [[row[c] for c in columns] for row in self.metrics._data]
h = [self.hist.colnames] + [[row[c] for c in self.hist.colnames] for row in self.hist._data]
return self.__class__(*a, identifier=self.id, filename=self.filename, hist=h)
@property
def hist(self):
return self._hist
class AlignMetrics(PicardMetrics):
_format = collections.OrderedDict([('CATEGORY', ('s', str)), ('TOTAL_READS', ('3.2h', int)),
('PF_READS', ('3.2h', int)), ('PCT_PF_READS', ('3.2%', float)),
('PF_NOISE_READS', ('3.2h', int)), ('PF_READS_ALIGNED', ('3.2h', int)),
('PCT_PF_READS_ALIGNED', ('3.2%', float)), ('PF_ALIGNED_BASES', ('3.2h', int)),
('PF_HQ_ALIGNED_READS', ('3.2h', int)), ('PF_HQ_ALIGNED_BASES', ('3.2h', int)),
('PF_HQ_ALIGNED_Q20_BASES', ('3.2h', int)), ('PF_HQ_MEDIAN_MISMATCHES', ('', int)),
('PF_MISMATCH_RATE', ('3.2f', float)), ('PF_HQ_ERROR_RATE', ('3.2f', float)), ('PF_INDEL_RATE', ('3.2f', float)),
('MEAN_READ_LENGTH', ('3.2f', float)), ('READS_ALIGNED_IN_PAIRS', ('3.2h', int)),
('PCT_READS_ALIGNED_IN_PAIRS', ('3.2%', float)), ('BAD_CYCLES', ('3.2h', int)), ('STRAND_BALANCE', ('3.2f', float)),
('PCT_CHIMERAS', ('3.2%', float)), ('PCT_ADAPTER', ('3.2%', float)), ('SAMPLE', ('s', str)),
('LIBRARY', ('s', str)), ('READ_GROUP', ('s', str))])
def __init__(self, *args, identifier=None, filename=None):
super(AlignMetrics, self).__init__(*args, identifier=identifier, filename=filename)
def category(self, category="PAIR"):
"""Retrieve subset object with only one alignment category"""
a = [self.metrics.colnames] + [[row[c] for c in self.metrics.colnames] for row in self._metrics if row['CATEGORY'] == category]
return AlignMetrics(*a, filename=self.filename, identifier=self.id)
class InsertMetrics(PicardHistMetrics):
_format = collections.OrderedDict([('MEDIAN_INSERT_SIZE', ('', int)), ('MEDIAN_ABSOLUTE_DEVIATION', ('', int)),
('MIN_INSERT_SIZE', ('', int)), ('MAX_INSERT_SIZE', ('', int)),
('MEAN_INSERT_SIZE', ('3.3f', float)), ('STANDARD_DEVIATION', ('3.3f', float)),
('READ_PAIRS', ('3.2h', int)), ('PAIR_ORIENTATION', ('s', str)),
('WIDTH_OF_10_PERCENT', ('', int)), ('WIDTH_OF_20_PERCENT', ('', int)),
('WIDTH_OF_30_PERCENT', ('', int)), ('WIDTH_OF_40_PERCENT', ('', int)),
('WIDTH_OF_50_PERCENT', ('', int)), ('WIDTH_OF_60_PERCENT', ('', int)),
('WIDTH_OF_70_PERCENT', ('', int)), ('WIDTH_OF_80_PERCENT', ('', int)),
('WIDTH_OF_90_PERCENT', ('', int)), ('WIDTH_OF_99_PERCENT', ('', int)),
('SAMPLE', ('s', str)), ('LIBRARY', ('s', str)), ('READ_GROUP', ('s', str))])
def __init__(self, *args, identifier=None, filename=None, hist=None):
super(InsertMetrics, self).__init__(*args, identifier=identifier, filename=filename, hist=hist)
class HsMetrics(PicardMetrics):
_format = collections.OrderedDict([('BAIT_SET', ('s', str)), ('GENOME_SIZE', ('3.2h', int)),
('BAIT_TERRITORY', ('3.2h', int)), ('TARGET_TERRITORY', ('3.2h', int)),
('BAIT_DESIGN_EFFICIENCY', ('3.2f', float)), ('TOTAL_READS', ('3.2h', int)),
('PF_READS', ('3.2h', int)), ('PF_UNIQUE_READS', ('3.2h', int)), ('PCT_PF_READS', ('3.2%', float)),
('PCT_PF_UQ_READS', ('3.2%', float)), ('PF_UQ_READS_ALIGNED', ('3.2h', int)),
('PCT_PF_UQ_READS_ALIGNED', ('3.2%', float)), ('PF_UQ_BASES_ALIGNED', ('3.2h', int)),
('ON_BAIT_BASES', ('3.2h', int)), ('NEAR_BAIT_BASES', ('3.2h', int)), ('OFF_BAIT_BASES', ('3.2h', int)),
('ON_TARGET_BASES', ('3.2h', int)), ('PCT_SELECTED_BASES', ('3.2%', float)), ('PCT_OFF_BAIT', ('3.2%', float)),
('ON_BAIT_VS_SELECTED', ('3.2f', float)), ('MEAN_BAIT_COVERAGE', ('3.2f', float)),
('MEAN_TARGET_COVERAGE', ('3.2f', float)), ('PCT_USABLE_BASES_ON_BAIT', ('3.2%', float)),
('PCT_USABLE_BASES_ON_TARGET', ('3.2%', float)), ('FOLD_ENRICHMENT', ('3.2f', float)),
('ZERO_CVG_TARGETS_PCT', ('3.2%', float)), ('FOLD_80_BASE_PENALTY', ('3.2f', float)),
('PCT_TARGET_BASES_2X', ('3.2%', float)), ('PCT_TARGET_BASES_10X', ('3.2%', float)),
('PCT_TARGET_BASES_20X', ('3.2%', float)), ('PCT_TARGET_BASES_30X', ('3.2%', float)),
('PCT_TARGET_BASES_40X', ('3.2%', float)), ('PCT_TARGET_BASES_50X', ('3.2%', float)),
('PCT_TARGET_BASES_100X', ('3.2%', float)), ('HS_LIBRARY_SIZE', ('3.2h', int)), ('HS_PENALTY_10X', ('3.2f', float)),
('HS_PENALTY_20X', ('3.2f', float)), ('HS_PENALTY_30X', ('3.2f', float)), ('HS_PENALTY_40X', ('3.2f', float)),
('HS_PENALTY_50X', ('3.2f', float)), ('HS_PENALTY_100X', ('3.2f', float)), ('AT_DROPOUT', ('3.2f', float)),
('GC_DROPOUT', ('3.2f', float)), ('SAMPLE', ('s', str)), ('LIBRARY', ('s', str)), ('READ_GROUP', ('s', str))])
def __init__(self, *args, identifier=None, filename=None):
super(HsMetrics, self).__init__(*args, identifier=identifier, filename=filename)
class DuplicationMetrics(PicardHistMetrics):
_format = collections.OrderedDict([('LIBRARY', ('s', str)), ('UNPAIRED_READS_EXAMINED', ('3.2h', int)),
('READ_PAIRS_EXAMINED', ('3.2h', int)), ('UNMAPPED_READS', ('3.2h', int)),
('UNPAIRED_READ_DUPLICATES', ('3.2h', int)), ('READ_PAIR_DUPLICATES', ('3.2h', int)),
('READ_PAIR_OPTICAL_DUPLICATES', ('3.2f', float)),
('PERCENT_DUPLICATION', ('3.2%', float)), ('ESTIMATED_LIBRARY_SIZE', ('3.2h', int))])
def __init__(self, *args, identifier=None, filename=None, hist=None):
super(DuplicationMetrics, self).__init__(*args, identifier=identifier, filename=filename, hist=hist)
self._prune_empty_rows()
def _prune_empty_rows(self):
"""Prune empty rows in metrics. This could happen if there is no read
group information and all metrics are stored in 'Unknown
library'
"""
if (self.metrics.dim[0] != 2):
return
self.metrics._data = [self._metrics.data[1]]
def combine_metrics(metrics, mergename = "PicardMetrics_merge", uniquify=False):
"""Convert list of metrics objects to PicardMetrics object
Args:
metrics (list of tuples): list of metrics objects
mergename (str): id to give to merged objet
uniquify (bool): convert columns to unique names, appending .# where # is the count of the duplicated column
Returns:
new PicardMetrics object with format specifications set for summary operations
"""
nrows = set([nr for sublist in [[m.metrics.dim[0] for m in mtup] for mtup in metrics] for nr in sublist])
if len(set(nrows)) > 1:
raise ValueError("not all metrics of equal length; most probably you need to subset an AlignMetrics class to one category")
ncols = set([nr for sublist in [[m.metrics.dim[1] for m in mtup] for mtup in metrics] for nr in sublist])
if len(set(ncols)) > len(metrics[0]):
raise ValueError("not all metrics tuples have same set of columns; refusing to merge")
colnames = [c for sublist in [m.metrics.colnames for m in metrics[0]] for c in sublist]
args = [_make_unique(colnames)] if uniquify else [colnames]
for mtup in metrics:
rowargs = []
fmtlist = []
for m in mtup:
rowargs += [m.metrics.data[0][k] for k in m.metrics.colnames]
fmtlist += [(k,v) for k,v in m._format.items()]
args.append(rowargs)
pm = PicardMetrics(*args, identifier=mergename)
fmt = collections.OrderedDict(fmtlist)
if uniquify:
for c in args[0]:
m = re.match("(.*)\.[0-9]+$", c)
if m:
fmt[c] = fmt[m.group(1)]
pm.set_format(**fmt)
return (pm)
def qc_plots(inputfiles, config, output):
"""Generate qc plots for picard QC summary report"""
samples = config['samples']
# Collect pm metrics and plot
mlist =(list(
zip(
[AlignMetrics(filename=x).category() for x in inputfiles if x.endswith(config['alnmetrics'])],
[InsertMetrics(filename=x) for x in inputfiles if x.endswith(config['insmetrics'])],
[DuplicationMetrics(filename=x) for x in inputfiles if x.endswith(config['dupmetrics'])],
[HsMetrics(filename=x) for x in inputfiles if x.endswith(config['hsmetrics'])]
)
))
pm = combine_metrics(mlist)
pm.add_column('PCT_ON_TARGET', [str(100 * float(x)/float(y)) for (x,y) in zip(pm.x('ON_TARGET_BASES'), pm.y('PF_UQ_BASES_ALIGNED'))], **{'PCT_ON_TARGET' : ('3.2f', float)})
pm.metrics['SAMPLE'] = samples
# Sequence statistics plot
plt.clf()
sdup = [int(50 + 500 * x) for x in pm.x('PERCENT_DUPLICATION')]
plt.scatter(pm.x('PCT_PF_READS_ALIGNED'), pm.y('TOTAL_READS'), s=sdup, alpha=0.5)
plt.xlabel(r'Percent aligned', fontsize=14)
plt.yscale('log', **{'basey':10})
plt.xticks(arange(0,1.1,0.1), range(0,110,10))
plt.ylabel(r'Read count', fontsize=14)
plt.title("Sequence summary.\nPoint sizes correspond to duplication levels.", fontsize=14)
plt.tight_layout()
plt.savefig(output.seqstats)
plt.close()
# Alignment metrics
plt.clf()
n = len(samples)
plt.xlim(0, n+2)
plt.xticks(range(1,n+1), [x for x in samples], rotation=90)
plt.ylim(0,1)
plt.yticks(arange(0,1.1,0.1), range(0,110,10))
plt.plot(range(1,n+1), pm.x('PCT_PF_READS_ALIGNED'), "o")
plt.xlabel(r'Sample', fontsize=14)
plt.ylabel(r'Percent aligned', fontsize=14)
plt.tight_layout()
plt.savefig(output.alnmet)
plt.close()
# Duplication metrics
plt.clf()
plt.xlim(0, n+2)
plt.xticks(range(1,n+1), [x for x in samples], rotation=90)
plt.ylim(0,1)
plt.yticks(arange(0,1.1,0.1), range(0,110,10))
plt.plot(range(1,n+1), pm.x('PERCENT_DUPLICATION'), "o")
plt.xlabel(r'Sample', fontsize=14)
plt.ylabel(r'Percent duplication', fontsize=14)
plt.tight_layout()
plt.savefig(output.dupmet)
plt.close()
# Insert metrics
plt.clf()
plt.xlim(0, n+2)
plt.xticks(range(1,n+1), [x for x in samples], rotation=90)
plt.plot(range(1,n+1), pm.x('MEAN_INSERT_SIZE'), "o")
plt.xlabel(r'Sample', fontsize=14)
plt.ylabel(r'Mean insert size', fontsize=14)
plt.tight_layout()
plt.savefig(output.insmet)
plt.close()
# Target metrics
plt.clf()
plt.xlim(0, n+2)
plt.xticks(range(1,n+1), [x for x in samples], rotation=90)
plt.plot(range(1,n+1), pm.x('PCT_ON_TARGET'), "o")
plt.xlabel(r'Sample', fontsize=14)
plt.ylabel(r'Percent on target', fontsize=14)
plt.tight_layout()
plt.savefig(output.targetmet)
plt.close()
# Target metrics
plt.clf()
plt.plot(pm.x('PERCENT_DUPLICATION'), pm.y('PCT_ON_TARGET'), "o")
plt.xlabel(r'Percent duplication', fontsize=14)
plt.ylabel(r'Percent on target', fontsize=14)
plt.tight_layout()
plt.savefig(output.target2dup)
plt.close()
# Hs metrics
columns = config['columns']
hticks = config['hticks']
hsmetrics = pm[columns]
# Hs boxplot metrics
plt.clf()
plt.ylim(0,1)
plt.yticks(arange(0,1.1,0.1), range(0,110,10))
plt.boxplot(np.array(hsmetrics.as_list()[1:]))
plt.xticks(range(1,len(hticks)+1), [x for x in hticks])
plt.savefig(output.hsmet)
plt.close()
nsubplots = int(math.ceil(n/9))
k = 0
for i_subplot in range(0, nsubplots):
plt.clf()
f, axarr = plt.subplots(3, 3, sharex='col', sharey='row')
for i in range(0, 3):
for j in range(0, 3):
if k < n:
x = range(1, len(hticks) + 1)
axarr[i,j].plot(x, hsmetrics.as_list()[1:][k], "o")
axarr[i,j].set_xticks(x)
axarr[i,j].set_title(samples[k])
axarr[i,j].set_xlim(0, (len(hticks) + 1))
axarr[i,j].set_ylim(-0.05, 1.05)
axarr[i,j].set_yticks(arange(0,1.1,0.1))
axarr[i,j].set_yticklabels(range(0,110,10))
axarr[i,j].set_xticklabels([h for h in hticks], rotation=45)
else:
axarr[i,j].axis('off')
k += 1
plt.savefig(output.hsmetsub[i_subplot])
plt.close()
# Write csv summary file
pmsum = pm[config['summarycolumns']]
with open(output.summarytable, "w") as fh:
fh.write(pmsum.metrics.summary(sep=","))
# Finally write entire merged metrics data frame
with open(output.metricstable, "w") as fh:
fh.write(pm.metrics.summary(raw=True, sep=","))
| mit |
maxiee/MyCodes | KalmanAndBesianFiltersInPython/MyKalman/sensor.py | 1 | 1504 | import matplotlib.pyplot as plt
import matplotlib.pylab as pylab
import numpy.random as random
import math
class OneDSensor(object):
def __init__(self,
x0=0, # 初始位置
velocity=1,
measurement_variance=0.0,
process_variance=0.0):
self.x = x0
self.velocity = velocity
self.noise = math.sqrt(measurement_variance)
self.pnoise = math.sqrt(process_variance)
self.constant_vel = velocity
def sense_position(self):
# random.rand() 0-1均匀分布
# random.randn() 均值为0的正态分布
pnoise = abs(random.rand() * self.pnoise)
if self.velocity > self.constant_vel:
pnoise = -pnoise
self.velocity += pnoise
self.x = self.x + self.velocity
return self.x + random.randn() * self.noise
if __name__ == "__main__":
count = 100
dog = OneDSensor(measurement_variance=4.0)
xs = []
dog2 = OneDSensor(measurement_variance=100.0)
xs2 = []
dog3 = OneDSensor(process_variance=0.5)
xs3 = []
xs_real = [i for i in range(count)]
for i in range(count):
x = dog.sense_position()
xs.append(x)
x2 = dog2.sense_position()
xs2.append(x2)
x3 = dog3.sense_position()
xs3.append(x3)
plt.plot(xs_real, label='real', lw=2)
plt.plot(xs, label='Sensor')
plt.plot(xs2, label='Sensor2')
plt.plot(xs3, label='Sensor3')
plt.legend(loc='best')
plt.show()
| gpl-3.0 |
s3h10r/avaon | django-avaon/acore/views/visualize.py | 1 | 6406 | """
statistics - heatmaps of impacts
"""
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render_to_response, get_object_or_404
from django.template import loader,Context,RequestContext
from django.conf import settings
import logging
logger = logging.getLogger(__name__)
# --- if no x-server is available, comment this out
#import matplotlib
#matplotlib.use("Cairo") # cairo-backend, so we can create figures without x-server (PDF & PNG ...)
# Cairo backend requires that pycairo is installed.
# --- end no x-server
def _calc_stats_heatmap(id=None):
"""
create a datstructure which gives us info about the density of impacts
per weekday & hour
returns
data[weekday][hour] = nr. of impacts
"""
"""
TODO: write a test for me
"""
import datetime as dt
from acore.models import Impact, Thing
if id:
imps = Impact.objects.filter(thing = id)
else:
imps = Impact.objects.all()
# --- init empty data[weekday][hour]
# data = [[0] *24]*7 # data[weekday][hour] # BUG! try: data[0][1] && print data (reference?)
data = []
for i in range(7):
data.append([0]*24)
# ---
delta = dt.timedelta(seconds=60*60)
for im in imps:
#print im.t_from, "+", im.duration, "==", im.duration.seconds, 's'
#print " "*4, im.t_from.weekday()
travel_t = im.t_from
dur = 0
while True:
wd = travel_t.weekday()
h = travel_t.hour
data[wd][h] += 1
dur += delta.seconds
travel_t += delta
if ((im.t_to - travel_t) < delta) and (im.t_to.hour != travel_t.hour):
break
return data
def _plot_heatmap(fig,ax,data,y_label="y_label",x_label="x_label",title="title"):
"""
"""
import numpy as np
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
import matplotlib.pyplot as plt
ax.set_title(title,fontstyle='italic')
ax.set_ylabel(y_label)
ax.set_xlabel(x_label)
ticks_x=24
ticks_y=7
# we must turn our data into a numpy-array for using pcolor
data = np.array(data)
ax.pcolor(data, cmap=plt.cm.Blues)
pc = ax.pcolor(data, cmap=plt.cm.Blues)
fig.colorbar(pc, shrink=0.5)
# Shift ticks to be at 0.5, 1.5, etc
# http://stackoverflow.com/questions/24190858/matplotlib-move-ticklabels-between-ticks
ax.yaxis.set(ticks=np.arange(0.5,ticks_y + 0.5),ticklabels=range(0,ticks_y))
ax.xaxis.set(ticks=np.arange(0.5,ticks_x + 0.5),ticklabels=range(0,ticks_x))
ax.set_xlim(0.0,ticks_x)
ax.set_ylim(0.0,ticks_y)
ax.xaxis.labelpad = 10
fig.gca().set_aspect('equal')
fig.tight_layout()# not as good as savefig(bbox_inches="tight")?
def heatmap(request,id=None):
"""
plot a "impact density heatmap" in format data[weekday][hour]
for all available impacts or for Thing.id = id
"""
"""
TODO: time-range param
"""
import numpy as np
import random
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
data = _calc_stats_heatmap(id)
fig=Figure(figsize=(10,8), dpi=100)
ax=fig.add_subplot(111)
if not id:
_plot_heatmap(fig,ax,data,y_label="weekday", x_label="time", title="impact density #heatmap\n")
else:
from acore.models import Impact, Thing
thing = Thing.objects.get(id=id)
_plot_heatmap(fig,ax,data,y_label="weekday", x_label="time", title="'%s' - impact density #heatmap\n" % thing.name)
# cool, Django's HttpResponse object supports file-like API :)
# so we dunnot need StringIO
response = HttpResponse(content_type="image/png")
# bbox_inches 'tight' removes lot of unwanted whitespace around our plot
fig.savefig(response, format="png",bbox_inches='tight')
return response
def demo_heatmap(request):
"""
returns matplotlib plot as selftest
"""
"""
http://stackoverflow.com/questions/14391959/heatmap-in-matplotlib-with-pcolor
http://stackoverflow.com/questions/15988413/python-pylab-pcolor-options-for-publication-quality-plots
http://stackoverflow.com/questions/24190858/matplotlib-move-ticklabels-between-ticks
http://matplotlib.org/faq/usage_faq.html#matplotlib-pylab-and-pyplot-how-are-they-related
"""
import django
import numpy as np
import random
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
import matplotlib.pyplot as plt
fig=Figure(figsize=(10,8), dpi=100)
ax=fig.add_subplot(111)
ax.set_title("demo heatmap e.g. 'impact density'",fontstyle='italic')
ax.set_ylabel("weekday")
ax.set_xlabel("time in h")
ticks_x=24
ticks_y=7
data = []
for i in range(ticks_y):
y = [ random.randint(0,10) for i in range(ticks_x) ]
data.append(y)
data = np.array(data)
pc = ax.pcolor(data, cmap=plt.cm.Blues) # http://wiki.scipy.org/Cookbook/Matplotlib/Show_colormaps
ax.yaxis.set_ticks(np.arange(0.5,ticks_y + 0.5),range(0,ticks_y))
ax.xaxis.set_ticks(np.arange(0.5,ticks_x + 0.5),range(0,ticks_x))
# http://stackoverflow.com/questions/12608788/changing-the-tick-frequency-on-x-or-y-axis-in-matplotlib
import matplotlib.ticker as ticker
#ax.xaxis.set_major_formatter(ticker.FormatStrFormatter('%0.1f'))
loc = ticker.MultipleLocator(base=1) # this locator puts ticks at regular intervals
ax.xaxis.set_major_locator(loc)
ax.xaxis.set_minor_locator(ticker.NullLocator())
ax.set_xlim(0.0,ticks_x)
ax.set_ylim(0.0,ticks_y)
#ax.xaxis.labelpad = 20
fig.colorbar(pc, shrink=0.5)
#plt.gca().invert_yaxis()
fig.gca().set_aspect('equal')
# tight_layout() & bbox_inches 'tight' removes lot of unwanted
# whitespace around our plot
fig.tight_layout()# not as good as savefig(bbox_inches="tight")?
# cool, Django's HttpResponse object supports file-like API :)
# so we dunnot need StringIO
response=django.http.HttpResponse(content_type='image/png')
fig.savefig(response, format="png",bbox_inches='tight')
return response
if __name__ == '__main__':
print _calc_stats_heatmap()
| gpl-2.0 |
Vimos/scikit-learn | benchmarks/bench_covertype.py | 57 | 7378 | """
===========================
Covertype dataset benchmark
===========================
Benchmark stochastic gradient descent (SGD), Liblinear, and Naive Bayes, CART
(decision tree), RandomForest and Extra-Trees on the forest covertype dataset
of Blackard, Jock, and Dean [1]. The dataset comprises 581,012 samples. It is
low dimensional with 54 features and a sparsity of approx. 23%. Here, we
consider the task of predicting class 1 (spruce/fir). The classification
performance of SGD is competitive with Liblinear while being two orders of
magnitude faster to train::
[..]
Classification performance:
===========================
Classifier train-time test-time error-rate
--------------------------------------------
liblinear 15.9744s 0.0705s 0.2305
GaussianNB 3.0666s 0.3884s 0.4841
SGD 1.0558s 0.1152s 0.2300
CART 79.4296s 0.0523s 0.0469
RandomForest 1190.1620s 0.5881s 0.0243
ExtraTrees 640.3194s 0.6495s 0.0198
The same task has been used in a number of papers including:
* `"SVM Optimization: Inverse Dependence on Training Set Size"
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.139.2112>`_
S. Shalev-Shwartz, N. Srebro - In Proceedings of ICML '08.
* `"Pegasos: Primal estimated sub-gradient solver for svm"
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.74.8513>`_
S. Shalev-Shwartz, Y. Singer, N. Srebro - In Proceedings of ICML '07.
* `"Training Linear SVMs in Linear Time"
<www.cs.cornell.edu/People/tj/publications/joachims_06a.pdf>`_
T. Joachims - In SIGKDD '06
[1] http://archive.ics.uci.edu/ml/datasets/Covertype
"""
from __future__ import division, print_function
# Author: Peter Prettenhofer <[email protected]>
# Arnaud Joly <[email protected]>
# License: BSD 3 clause
import os
from time import time
import argparse
import numpy as np
from sklearn.datasets import fetch_covtype, get_data_home
from sklearn.svm import LinearSVC
from sklearn.linear_model import SGDClassifier, LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.metrics import zero_one_loss
from sklearn.externals.joblib import Memory
from sklearn.utils import check_array
# Memoize the data extraction and memory map the resulting
# train / test splits in readonly mode
memory = Memory(os.path.join(get_data_home(), 'covertype_benchmark_data'),
mmap_mode='r')
@memory.cache
def load_data(dtype=np.float32, order='C', random_state=13):
"""Load the data, then cache and memmap the train/test split"""
######################################################################
# Load dataset
print("Loading dataset...")
data = fetch_covtype(download_if_missing=True, shuffle=True,
random_state=random_state)
X = check_array(data['data'], dtype=dtype, order=order)
y = (data['target'] != 1).astype(np.int)
# Create train-test split (as [Joachims, 2006])
print("Creating train-test split...")
n_train = 522911
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
# Standardize first 10 features (the numerical ones)
mean = X_train.mean(axis=0)
std = X_train.std(axis=0)
mean[10:] = 0.0
std[10:] = 1.0
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
return X_train, X_test, y_train, y_test
ESTIMATORS = {
'GBRT': GradientBoostingClassifier(n_estimators=250),
'ExtraTrees': ExtraTreesClassifier(n_estimators=20),
'RandomForest': RandomForestClassifier(n_estimators=20),
'CART': DecisionTreeClassifier(min_samples_split=5),
'SGD': SGDClassifier(alpha=0.001, n_iter=2),
'GaussianNB': GaussianNB(),
'liblinear': LinearSVC(loss="l2", penalty="l2", C=1000, dual=False,
tol=1e-3),
'SAG': LogisticRegression(solver='sag', max_iter=2, C=1000)
}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--classifiers', nargs="+",
choices=ESTIMATORS, type=str,
default=['liblinear', 'GaussianNB', 'SGD', 'CART'],
help="list of classifiers to benchmark.")
parser.add_argument('--n-jobs', nargs="?", default=1, type=int,
help="Number of concurrently running workers for "
"models that support parallelism.")
parser.add_argument('--order', nargs="?", default="C", type=str,
choices=["F", "C"],
help="Allow to choose between fortran and C ordered "
"data")
parser.add_argument('--random-seed', nargs="?", default=13, type=int,
help="Common seed used by random number generator.")
args = vars(parser.parse_args())
print(__doc__)
X_train, X_test, y_train, y_test = load_data(
order=args["order"], random_state=args["random_seed"])
print("")
print("Dataset statistics:")
print("===================")
print("%s %d" % ("number of features:".ljust(25), X_train.shape[1]))
print("%s %d" % ("number of classes:".ljust(25), np.unique(y_train).size))
print("%s %s" % ("data type:".ljust(25), X_train.dtype))
print("%s %d (pos=%d, neg=%d, size=%dMB)"
% ("number of train samples:".ljust(25),
X_train.shape[0], np.sum(y_train == 1),
np.sum(y_train == 0), int(X_train.nbytes / 1e6)))
print("%s %d (pos=%d, neg=%d, size=%dMB)"
% ("number of test samples:".ljust(25),
X_test.shape[0], np.sum(y_test == 1),
np.sum(y_test == 0), int(X_test.nbytes / 1e6)))
print()
print("Training Classifiers")
print("====================")
error, train_time, test_time = {}, {}, {}
for name in sorted(args["classifiers"]):
print("Training %s ... " % name, end="")
estimator = ESTIMATORS[name]
estimator_params = estimator.get_params()
estimator.set_params(**{p: args["random_seed"]
for p in estimator_params
if p.endswith("random_state")})
if "n_jobs" in estimator_params:
estimator.set_params(n_jobs=args["n_jobs"])
time_start = time()
estimator.fit(X_train, y_train)
train_time[name] = time() - time_start
time_start = time()
y_pred = estimator.predict(X_test)
test_time[name] = time() - time_start
error[name] = zero_one_loss(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print("%s %s %s %s"
% ("Classifier ", "train-time", "test-time", "error-rate"))
print("-" * 44)
for name in sorted(args["classifiers"], key=error.get):
print("%s %s %s %s" % (name.ljust(12),
("%.4fs" % train_time[name]).center(10),
("%.4fs" % test_time[name]).center(10),
("%.4f" % error[name]).center(10)))
print()
| bsd-3-clause |
michaelbramwell/sms-tools | lectures/03-Fourier-properties/plots-code/fft-zero-phase.py | 24 | 1140 | import matplotlib.pyplot as plt
import numpy as np
from scipy.fftpack import fft, fftshift
import sys
sys.path.append('../../../software/models/')
import utilFunctions as UF
(fs, x) = UF.wavread('../../../sounds/oboe-A4.wav')
N = 512
M = 401
hN = N/2
hM = (M+1)/2
start = .8*fs
xw = x[start-hM:start+hM-1] * np.hamming(M)
plt.figure(1, figsize=(9.5, 6.5))
plt.subplot(411)
plt.plot(np.arange(-hM, hM-1), xw, lw=1.5)
plt.axis([-hN, hN-1, min(xw), max(xw)])
plt.title('x (oboe-A4.wav), M = 401')
fftbuffer = np.zeros(N)
fftbuffer[:hM] = xw[hM-1:]
fftbuffer[N-hM+1:] = xw[:hM-1]
plt.subplot(412)
plt.plot(np.arange(0, N), fftbuffer, lw=1.5)
plt.axis([0, N, min(xw), max(xw)])
plt.title('fftbuffer: N = 512')
X = fftshift(fft(fftbuffer))
mX = 20 * np.log10(abs(X)/N)
pX = np.unwrap(np.angle(X))
plt.subplot(413)
plt.plot(np.arange(-hN, hN), mX, 'r', lw=1.5)
plt.axis([-hN,hN-1,-100,max(mX)])
plt.title('mX')
plt.subplot(414)
plt.plot(np.arange(-hN, hN), pX, 'c', lw=1.5)
plt.axis([-hN,hN-1,min(pX),max(pX)])
plt.title('pX')
plt.tight_layout()
plt.savefig('fft-zero-phase.png')
plt.show()
| agpl-3.0 |
kwyoung11/ciml | labs/lab2-KNN/HighD.py | 4 | 1526 | from math import *
import random
from numpy import *
import matplotlib.pyplot as plt
waitForEnter=False
def generateUniformExample(numDim):
return [random.random() for d in range(numDim)]
def generateUniformDataset(numDim, numEx):
return [generateUniformExample(numDim) for n in range(numEx)]
def computeExampleDistance(x1, x2):
dist = 0.0
for d in range(len(x1)):
dist += (x1[d] - x2[d]) * (x1[d] - x2[d])
return sqrt(dist)
def computeDistances(data):
N = len(data)
D = len(data[0])
dist = []
for n in range(N):
for m in range(n):
dist.append( computeExampleDistance(data[n],data[m]) / sqrt(D))
return dist
N = 200 # number of examples
Dims = [2, 8, 32, 128, 512] # dimensionalities to try
Cols = ['#FF0000', '#880000', '#000000', '#000088', '#0000FF']
Bins = arange(0, 1, 0.02)
plt.xlabel('distance / sqrt(dimensionality)')
plt.ylabel('# of pairs of points at that distance')
plt.title('dimensionality versus uniform point distances')
for i,d in enumerate(Dims):
distances = computeDistances(generateUniformDataset(d, N))
print "D=%d, average distance=%g" % (d, mean(distances) * sqrt(d))
plt.hist(distances,
Bins,
histtype='step',
color=Cols[i])
if waitForEnter:
plt.legend(['%d dims' % d for d in Dims])
plt.show(False)
x = raw_input('Press enter to continue...')
plt.legend(['%d dims' % d for d in Dims])
plt.savefig('fig.pdf')
plt.show()
| gpl-2.0 |
quheng/scikit-learn | sklearn/tests/test_dummy.py | 186 | 17778 | from __future__ import division
import numpy as np
import scipy.sparse as sp
from sklearn.base import clone
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.stats import _weighted_percentile
from sklearn.dummy import DummyClassifier, DummyRegressor
@ignore_warnings
def _check_predict_proba(clf, X, y):
proba = clf.predict_proba(X)
# We know that we can have division by zero
log_proba = clf.predict_log_proba(X)
y = np.atleast_1d(y)
if y.ndim == 1:
y = np.reshape(y, (-1, 1))
n_outputs = y.shape[1]
n_samples = len(X)
if n_outputs == 1:
proba = [proba]
log_proba = [log_proba]
for k in range(n_outputs):
assert_equal(proba[k].shape[0], n_samples)
assert_equal(proba[k].shape[1], len(np.unique(y[:, k])))
assert_array_equal(proba[k].sum(axis=1), np.ones(len(X)))
# We know that we can have division by zero
assert_array_equal(np.log(proba[k]), log_proba[k])
def _check_behavior_2d(clf):
# 1d case
X = np.array([[0], [0], [0], [0]]) # ignored
y = np.array([1, 2, 1, 1])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
# 2d case
y = np.array([[1, 0],
[2, 0],
[1, 0],
[1, 3]])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
def _check_behavior_2d_for_constant(clf):
# 2d case only
X = np.array([[0], [0], [0], [0]]) # ignored
y = np.array([[1, 0, 5, 4, 3],
[2, 0, 1, 2, 5],
[1, 0, 4, 5, 2],
[1, 3, 3, 2, 0]])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
def _check_equality_regressor(statistic, y_learn, y_pred_learn,
y_test, y_pred_test):
assert_array_equal(np.tile(statistic, (y_learn.shape[0], 1)),
y_pred_learn)
assert_array_equal(np.tile(statistic, (y_test.shape[0], 1)),
y_pred_test)
def test_most_frequent_and_prior_strategy():
X = [[0], [0], [0], [0]] # ignored
y = [1, 2, 1, 1]
for strategy in ("most_frequent", "prior"):
clf = DummyClassifier(strategy=strategy, random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.ones(len(X)))
_check_predict_proba(clf, X, y)
if strategy == "prior":
assert_array_equal(clf.predict_proba([X[0]]),
clf.class_prior_.reshape((1, -1)))
else:
assert_array_equal(clf.predict_proba([X[0]]),
clf.class_prior_.reshape((1, -1)) > 0.5)
def test_most_frequent_and_prior_strategy_multioutput():
X = [[0], [0], [0], [0]] # ignored
y = np.array([[1, 0],
[2, 0],
[1, 0],
[1, 3]])
n_samples = len(X)
for strategy in ("prior", "most_frequent"):
clf = DummyClassifier(strategy=strategy, random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X),
np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_stratified_strategy():
X = [[0]] * 5 # ignored
y = [1, 2, 1, 1, 2]
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
p = np.bincount(y_pred) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[2], 2. / 5, decimal=1)
_check_predict_proba(clf, X, y)
def test_stratified_strategy_multioutput():
X = [[0]] * 5 # ignored
y = np.array([[2, 1],
[2, 2],
[1, 1],
[1, 2],
[1, 1]])
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[2], 2. / 5, decimal=1)
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_uniform_strategy():
X = [[0]] * 4 # ignored
y = [1, 2, 1, 1]
clf = DummyClassifier(strategy="uniform", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
p = np.bincount(y_pred) / float(len(X))
assert_almost_equal(p[1], 0.5, decimal=1)
assert_almost_equal(p[2], 0.5, decimal=1)
_check_predict_proba(clf, X, y)
def test_uniform_strategy_multioutput():
X = [[0]] * 4 # ignored
y = np.array([[2, 1],
[2, 2],
[1, 2],
[1, 1]])
clf = DummyClassifier(strategy="uniform", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 0.5, decimal=1)
assert_almost_equal(p[2], 0.5, decimal=1)
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_string_labels():
X = [[0]] * 5
y = ["paris", "paris", "tokyo", "amsterdam", "berlin"]
clf = DummyClassifier(strategy="most_frequent")
clf.fit(X, y)
assert_array_equal(clf.predict(X), ["paris"] * 5)
def test_classifier_exceptions():
clf = DummyClassifier(strategy="unknown")
assert_raises(ValueError, clf.fit, [], [])
assert_raises(ValueError, clf.predict, [])
assert_raises(ValueError, clf.predict_proba, [])
def test_mean_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 4 # ignored
y = random_state.randn(4)
reg = DummyRegressor()
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.mean(y)] * len(X))
def test_mean_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
mean = np.mean(y_learn, axis=0).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor()
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(mean, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_regressor_exceptions():
reg = DummyRegressor()
assert_raises(ValueError, reg.predict, [])
def test_median_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="median")
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.median(y)] * len(X))
def test_median_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
median = np.median(y_learn, axis=0).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="median")
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
median, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_quantile_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="quantile", quantile=0.5)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.median(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=0)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.min(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=1)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.max(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=0.3)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.percentile(y, q=30)] * len(X))
def test_quantile_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
median = np.median(y_learn, axis=0).reshape((1, -1))
quantile_values = np.percentile(y_learn, axis=0, q=80).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="quantile", quantile=0.5)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
median, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
# Correctness oracle
est = DummyRegressor(strategy="quantile", quantile=0.8)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
quantile_values, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_quantile_invalid():
X = [[0]] * 5 # ignored
y = [0] * 5 # ignored
est = DummyRegressor(strategy="quantile")
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=None)
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=[0])
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=-0.1)
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=1.1)
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile='abc')
assert_raises(TypeError, est.fit, X, y)
def test_quantile_strategy_empty_train():
est = DummyRegressor(strategy="quantile", quantile=0.4)
assert_raises(ValueError, est.fit, [], [])
def test_constant_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="constant", constant=[43])
reg.fit(X, y)
assert_array_equal(reg.predict(X), [43] * len(X))
reg = DummyRegressor(strategy="constant", constant=43)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [43] * len(X))
def test_constant_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
# test with 2d array
constants = random_state.randn(5)
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="constant", constant=constants)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
constants, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d_for_constant(est)
def test_y_mean_attribute_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
# when strategy = 'mean'
est = DummyRegressor(strategy='mean')
est.fit(X, y)
assert_equal(est.constant_, np.mean(y))
def test_unknown_strategey_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
est = DummyRegressor(strategy='gona')
assert_raises(ValueError, est.fit, X, y)
def test_constants_not_specified_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
est = DummyRegressor(strategy='constant')
assert_raises(TypeError, est.fit, X, y)
def test_constant_size_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X = random_state.randn(10, 10)
y = random_state.randn(10, 5)
est = DummyRegressor(strategy='constant', constant=[1, 2, 3, 4])
assert_raises(ValueError, est.fit, X, y)
def test_constant_strategy():
X = [[0], [0], [0], [0]] # ignored
y = [2, 1, 2, 2]
clf = DummyClassifier(strategy="constant", random_state=0, constant=1)
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.ones(len(X)))
_check_predict_proba(clf, X, y)
X = [[0], [0], [0], [0]] # ignored
y = ['two', 'one', 'two', 'two']
clf = DummyClassifier(strategy="constant", random_state=0, constant='one')
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.array(['one'] * 4))
_check_predict_proba(clf, X, y)
def test_constant_strategy_multioutput():
X = [[0], [0], [0], [0]] # ignored
y = np.array([[2, 3],
[1, 3],
[2, 3],
[2, 0]])
n_samples = len(X)
clf = DummyClassifier(strategy="constant", random_state=0,
constant=[1, 0])
clf.fit(X, y)
assert_array_equal(clf.predict(X),
np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
_check_predict_proba(clf, X, y)
def test_constant_strategy_exceptions():
X = [[0], [0], [0], [0]] # ignored
y = [2, 1, 2, 2]
clf = DummyClassifier(strategy="constant", random_state=0)
assert_raises(ValueError, clf.fit, X, y)
clf = DummyClassifier(strategy="constant", random_state=0,
constant=[2, 0])
assert_raises(ValueError, clf.fit, X, y)
def test_classification_sample_weight():
X = [[0], [0], [1]]
y = [0, 1, 0]
sample_weight = [0.1, 1., 0.1]
clf = DummyClassifier().fit(X, y, sample_weight)
assert_array_almost_equal(clf.class_prior_, [0.2 / 1.2, 1. / 1.2])
def test_constant_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[0, 1],
[4, 0],
[1, 1],
[1, 4],
[1, 1]]))
n_samples = len(X)
clf = DummyClassifier(strategy="constant", random_state=0, constant=[1, 0])
clf.fit(X, y)
y_pred = clf.predict(X)
assert_true(sp.issparse(y_pred))
assert_array_equal(y_pred.toarray(), np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
def test_uniform_strategy_sparse_target_warning():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[2, 1],
[2, 2],
[1, 4],
[4, 2],
[1, 1]]))
clf = DummyClassifier(strategy="uniform", random_state=0)
assert_warns_message(UserWarning,
"the uniform strategy would not save memory",
clf.fit, X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 1 / 3, decimal=1)
assert_almost_equal(p[2], 1 / 3, decimal=1)
assert_almost_equal(p[4], 1 / 3, decimal=1)
def test_stratified_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[4, 1],
[0, 0],
[1, 1],
[1, 4],
[1, 1]]))
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
assert_true(sp.issparse(y_pred))
y_pred = y_pred.toarray()
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[0], 1. / 5, decimal=1)
assert_almost_equal(p[4], 1. / 5, decimal=1)
def test_most_frequent_and_prior_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[1, 0],
[1, 3],
[4, 0],
[0, 1],
[1, 0]]))
n_samples = len(X)
y_expected = np.hstack([np.ones((n_samples, 1)), np.zeros((n_samples, 1))])
for strategy in ("most_frequent", "prior"):
clf = DummyClassifier(strategy=strategy, random_state=0)
clf.fit(X, y)
y_pred = clf.predict(X)
assert_true(sp.issparse(y_pred))
assert_array_equal(y_pred.toarray(), y_expected)
def test_dummy_regressor_sample_weight(n_samples=10):
random_state = np.random.RandomState(seed=1)
X = [[0]] * n_samples
y = random_state.rand(n_samples)
sample_weight = random_state.rand(n_samples)
est = DummyRegressor(strategy="mean").fit(X, y, sample_weight)
assert_equal(est.constant_, np.average(y, weights=sample_weight))
est = DummyRegressor(strategy="median").fit(X, y, sample_weight)
assert_equal(est.constant_, _weighted_percentile(y, sample_weight, 50.))
est = DummyRegressor(strategy="quantile", quantile=.95).fit(X, y,
sample_weight)
assert_equal(est.constant_, _weighted_percentile(y, sample_weight, 95.))
| bsd-3-clause |
kyleabeauchamp/HMCNotes | code/correctness/old/test_john_hmc.py | 1 | 1318 | import lb_loader
import pandas as pd
import simtk.openmm.app as app
import numpy as np
import simtk.openmm as mm
from simtk import unit as u
from openmmtools import hmc_integrators, testsystems
sysname = "ljbox"
system, positions, groups, temperature, timestep = lb_loader.load(sysname)
integrator = hmc_integrators.GHMCIntegratorOneStep(temperature, timestep=8*u.femtoseconds)
context = lb_loader.build(system, integrator, positions, temperature)
integrator.step(50000)
positions = context.getState(getPositions=True).getPositions()
collision_rate = 1.0 / u.picoseconds
n_steps = 25
Neff_cutoff = 2000.
grid = []
for itype in ["GHMCIntegratorOneStep"]:
for timestep_factor in [1.0, 2.0, 4.0]:
d = dict(itype=itype, timestep=timestep / timestep_factor)
grid.append(d)
for settings in grid:
itype = settings.pop("itype")
timestep = settings["timestep"]
integrator = hmc_integrators.GHMCIntegratorOneStep(temperature, timestep=timestep)
context = lb_loader.build(system, integrator, positions, temperature)
filename = "./data/%s_%s_%.3f_%d.csv" % (sysname, itype, timestep / u.femtoseconds, collision_rate * u.picoseconds)
print(filename)
data, start, g, Neff = lb_loader.converge(context, n_steps=n_steps, Neff_cutoff=Neff_cutoff)
data.to_csv(filename)
| gpl-2.0 |
koobonil/Boss2D | Boss2D/addon/tensorflow-1.2.1_for_boss/tensorflow/python/estimator/inputs/queues/feeding_functions.py | 46 | 15782 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helper functions for enqueuing data from arrays and pandas `DataFrame`s."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import random
import types as tp
import numpy as np
import six
from tensorflow.python.estimator.inputs.queues import feeding_queue_runner as fqr
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary
from tensorflow.python.training import queue_runner
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
def _get_integer_indices_for_next_batch(
batch_indices_start, batch_size, epoch_end, array_length,
current_epoch, total_epochs):
"""Returns the integer indices for next batch.
If total epochs is not None and current epoch is the final epoch, the end
index of the next batch should not exceed the `epoch_end` (i.e., the final
batch might not have size `batch_size` to avoid overshooting the last epoch).
Args:
batch_indices_start: Integer, the index to start next batch.
batch_size: Integer, size of batches to return.
epoch_end: Integer, the end index of the epoch. The epoch could start from a
random position, so `epoch_end` provides the end index for that.
array_length: Integer, the length of the array.
current_epoch: Integer, the epoch number has been emitted.
total_epochs: Integer or `None`, the total number of epochs to emit. If
`None` will run forever.
Returns:
A tuple of a list with integer indices for next batch and `current_epoch`
value after the next batch.
Raises:
OutOfRangeError if `current_epoch` is not less than `total_epochs`.
"""
if total_epochs is not None and current_epoch >= total_epochs:
raise errors.OutOfRangeError(None, None,
"Already emitted %s epochs." % current_epoch)
batch_indices_end = batch_indices_start + batch_size
batch_indices = [j % array_length for j in
range(batch_indices_start, batch_indices_end)]
epoch_end_indices = [i for i, x in enumerate(batch_indices) if x == epoch_end]
current_epoch += len(epoch_end_indices)
if total_epochs is None or current_epoch < total_epochs:
return (batch_indices, current_epoch)
# Now we might have emitted more data for expected epochs. Need to trim.
final_epoch_end_inclusive = epoch_end_indices[
-(current_epoch - total_epochs + 1)]
batch_indices = batch_indices[:final_epoch_end_inclusive + 1]
return (batch_indices, total_epochs)
class _ArrayFeedFn(object):
"""Creates feed dictionaries from numpy arrays."""
def __init__(self,
placeholders,
array,
batch_size,
random_start=False,
seed=None,
num_epochs=None):
if len(placeholders) != 2:
raise ValueError("_array_feed_fn expects 2 placeholders; got {}.".format(
len(placeholders)))
self._placeholders = placeholders
self._array = array
self._max = len(array)
self._batch_size = batch_size
self._num_epochs = num_epochs
self._epoch = 0
random.seed(seed)
self._trav = random.randrange(self._max) if random_start else 0
self._epoch_end = (self._trav - 1) % self._max
def __call__(self):
integer_indexes, self._epoch = _get_integer_indices_for_next_batch(
batch_indices_start=self._trav,
batch_size=self._batch_size,
epoch_end=self._epoch_end,
array_length=self._max,
current_epoch=self._epoch,
total_epochs=self._num_epochs)
self._trav = (integer_indexes[-1] + 1) % self._max
return {
self._placeholders[0]: integer_indexes,
self._placeholders[1]: self._array[integer_indexes]
}
class _OrderedDictNumpyFeedFn(object):
"""Creates feed dictionaries from `OrderedDict`s of numpy arrays."""
def __init__(self,
placeholders,
ordered_dict_of_arrays,
batch_size,
random_start=False,
seed=None,
num_epochs=None):
if len(placeholders) != len(ordered_dict_of_arrays) + 1:
raise ValueError("Expected {} placeholders; got {}.".format(
len(ordered_dict_of_arrays), len(placeholders)))
self._index_placeholder = placeholders[0]
self._col_placeholders = placeholders[1:]
self._ordered_dict_of_arrays = ordered_dict_of_arrays
self._max = len(next(iter(ordered_dict_of_arrays.values())))
for _, v in ordered_dict_of_arrays.items():
if len(v) != self._max:
raise ValueError("Array lengths must match.")
self._batch_size = batch_size
self._num_epochs = num_epochs
self._epoch = 0
random.seed(seed)
self._trav = random.randrange(self._max) if random_start else 0
self._epoch_end = (self._trav - 1) % self._max
def __call__(self):
integer_indexes, self._epoch = _get_integer_indices_for_next_batch(
batch_indices_start=self._trav,
batch_size=self._batch_size,
epoch_end=self._epoch_end,
array_length=self._max,
current_epoch=self._epoch,
total_epochs=self._num_epochs)
self._trav = (integer_indexes[-1] + 1) % self._max
feed_dict = {self._index_placeholder: integer_indexes}
cols = [
column[integer_indexes]
for column in self._ordered_dict_of_arrays.values()
]
feed_dict.update(dict(zip(self._col_placeholders, cols)))
return feed_dict
class _PandasFeedFn(object):
"""Creates feed dictionaries from pandas `DataFrames`."""
def __init__(self,
placeholders,
dataframe,
batch_size,
random_start=False,
seed=None,
num_epochs=None):
if len(placeholders) != len(dataframe.columns) + 1:
raise ValueError("Expected {} placeholders; got {}.".format(
len(dataframe.columns), len(placeholders)))
self._index_placeholder = placeholders[0]
self._col_placeholders = placeholders[1:]
self._dataframe = dataframe
self._max = len(dataframe)
self._batch_size = batch_size
self._num_epochs = num_epochs
self._epoch = 0
random.seed(seed)
self._trav = random.randrange(self._max) if random_start else 0
self._epoch_end = (self._trav - 1) % self._max
def __call__(self):
integer_indexes, self._epoch = _get_integer_indices_for_next_batch(
batch_indices_start=self._trav,
batch_size=self._batch_size,
epoch_end=self._epoch_end,
array_length=self._max,
current_epoch=self._epoch,
total_epochs=self._num_epochs)
self._trav = (integer_indexes[-1] + 1) % self._max
result = self._dataframe.iloc[integer_indexes]
cols = [result[col].values for col in result.columns]
feed_dict = dict(zip(self._col_placeholders, cols))
feed_dict[self._index_placeholder] = result.index.values
return feed_dict
class _GeneratorFeedFn(object):
"""Creates feed dictionaries from `Generator` of `dicts` of numpy arrays."""
def __init__(self,
placeholders,
generator,
batch_size,
random_start=False,
seed=None,
num_epochs=None):
first_sample = next(generator())
if len(placeholders) != len(first_sample):
raise ValueError("Expected {} placeholders; got {}.".format(
len(first_sample), len(placeholders)))
self._keys = sorted(list(first_sample.keys()))
self._col_placeholders = placeholders
self._generator_function = generator
self._iterator = generator()
self._batch_size = batch_size
self._num_epochs = num_epochs
self._epoch = 0
random.seed(seed)
def __call__(self):
if self._num_epochs and self._epoch >= self._num_epochs:
raise errors.OutOfRangeError(None, None,
"Already emitted %s epochs." % self._epoch)
list_dict = {}
list_dict_size = 0
while list_dict_size < self._batch_size:
try:
data_row = next(self._iterator)
except StopIteration:
self._epoch += 1
self._iterator = self._generator_function()
data_row = next(self._iterator)
for index, key in enumerate(self._keys):
if key not in data_row.keys():
raise KeyError("key mismatch between dicts emitted by GenFun"
"Expected {} keys; got {}".format(
self._keys, data_row.keys()))
list_dict.setdefault(self._col_placeholders[index],
list()).append(data_row[key])
list_dict_size += 1
feed_dict = {key: np.asarray(item) for key, item in list(list_dict.items())}
return feed_dict
def _enqueue_data(data,
capacity,
shuffle=False,
min_after_dequeue=None,
num_threads=1,
seed=None,
name="enqueue_input",
enqueue_size=1,
num_epochs=None):
"""Creates a queue filled from a numpy array or pandas `DataFrame`.
Returns a queue filled with the rows of the given (`OrderedDict` of) array
or `DataFrame`. In the case of a pandas `DataFrame`, the first enqueued
`Tensor` corresponds to the index of the `DataFrame`. For (`OrderedDict` of)
numpy arrays, the first enqueued `Tensor` contains the row number.
Args:
data: a numpy `ndarray`, `OrderedDict` of numpy arrays, or a generator
yielding `dict`s of numpy arrays or pandas `DataFrame` that will be read
into the queue.
capacity: the capacity of the queue.
shuffle: whether or not to shuffle the rows of the array.
min_after_dequeue: minimum number of elements that can remain in the queue
after a dequeue operation. Only used when `shuffle` is true. If not set,
defaults to `capacity` / 4.
num_threads: number of threads used for reading and enqueueing.
seed: used to seed shuffling and reader starting points.
name: a scope name identifying the data.
enqueue_size: the number of rows to enqueue per step.
num_epochs: limit enqueuing to a specified number of epochs, if provided.
Returns:
A queue filled with the rows of the given (`OrderedDict` of) array or
`DataFrame`.
Raises:
TypeError: `data` is not a Pandas `DataFrame`, an `OrderedDict` of numpy
arrays, a numpy `ndarray`, or a generator producing these.
"""
with ops.name_scope(name):
if isinstance(data, np.ndarray):
types = [dtypes.int64, dtypes.as_dtype(data.dtype)]
queue_shapes = [(), data.shape[1:]]
get_feed_fn = _ArrayFeedFn
elif isinstance(data, collections.OrderedDict):
types = [dtypes.int64] + [
dtypes.as_dtype(col.dtype) for col in data.values()
]
queue_shapes = [()] + [col.shape[1:] for col in data.values()]
get_feed_fn = _OrderedDictNumpyFeedFn
elif isinstance(data, tp.FunctionType):
x_first_el = six.next(data())
x_first_keys = sorted(x_first_el.keys())
x_first_values = [x_first_el[key] for key in x_first_keys]
types = [dtypes.as_dtype(col.dtype) for col in x_first_values]
queue_shapes = [col.shape for col in x_first_values]
get_feed_fn = _GeneratorFeedFn
elif HAS_PANDAS and isinstance(data, pd.DataFrame):
types = [
dtypes.as_dtype(dt) for dt in [data.index.dtype] + list(data.dtypes)
]
queue_shapes = [() for _ in types]
get_feed_fn = _PandasFeedFn
else:
raise TypeError(
"data must be either a numpy array or pandas DataFrame if pandas is "
"installed; got {}".format(type(data).__name__))
# TODO(jamieas): TensorBoard warnings for all warnings below once available.
if num_threads > 1 and num_epochs is not None:
logging.warning(
"enqueue_data was called with num_epochs and num_threads > 1. "
"num_epochs is applied per thread, so this will produce more "
"epochs than you probably intend. "
"If you want to limit epochs, use one thread.")
if shuffle and num_threads > 1 and num_epochs is not None:
logging.warning(
"enqueue_data was called with shuffle=True, num_threads > 1, and "
"num_epochs. This will create multiple threads, all reading the "
"array/dataframe in order adding to the same shuffling queue; the "
"results will likely not be sufficiently shuffled.")
if not shuffle and num_threads > 1:
logging.warning(
"enqueue_data was called with shuffle=False and num_threads > 1. "
"This will create multiple threads, all reading the "
"array/dataframe in order. If you want examples read in order, use"
" one thread; if you want multiple threads, enable shuffling.")
if shuffle:
min_after_dequeue = int(capacity / 4 if min_after_dequeue is None else
min_after_dequeue)
queue = data_flow_ops.RandomShuffleQueue(
capacity,
min_after_dequeue,
dtypes=types,
shapes=queue_shapes,
seed=seed)
else:
min_after_dequeue = 0 # just for the summary text
queue = data_flow_ops.FIFOQueue(
capacity, dtypes=types, shapes=queue_shapes)
enqueue_ops = []
feed_fns = []
for i in range(num_threads):
# Note the placeholders have no shapes, so they will accept any
# enqueue_size. enqueue_many below will break them up.
placeholders = [array_ops.placeholder(t) for t in types]
enqueue_ops.append(queue.enqueue_many(placeholders))
seed_i = None if seed is None else (i + 1) * seed
feed_fns.append(
get_feed_fn(
placeholders,
data,
enqueue_size,
random_start=shuffle,
seed=seed_i,
num_epochs=num_epochs))
runner = fqr._FeedingQueueRunner( # pylint: disable=protected-access
queue=queue, enqueue_ops=enqueue_ops, feed_fns=feed_fns)
queue_runner.add_queue_runner(runner)
full = (math_ops.cast(
math_ops.maximum(0, queue.size() - min_after_dequeue),
dtypes.float32) * (1. / (capacity - min_after_dequeue)))
# Note that name contains a '/' at the end so we intentionally do not place
# a '/' after %s below.
summary_name = ("queue/%sfraction_over_%d_of_%d_full" %
(queue.name, min_after_dequeue,
capacity - min_after_dequeue))
summary.scalar(summary_name, full)
return queue
| mit |
apbard/scipy | scipy/signal/spectral.py | 1 | 66503 | """Tools for spectral analysis.
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from scipy import fftpack
from . import signaltools
from .windows import get_window
from ._spectral import _lombscargle
from ._arraytools import const_ext, even_ext, odd_ext, zero_ext
import warnings
from scipy._lib.six import string_types
__all__ = ['periodogram', 'welch', 'lombscargle', 'csd', 'coherence',
'spectrogram', 'stft', 'istft', 'check_COLA']
def lombscargle(x,
y,
freqs,
precenter=False,
normalize=False):
"""
lombscargle(x, y, freqs)
Computes the Lomb-Scargle periodogram.
The Lomb-Scargle periodogram was developed by Lomb [1]_ and further
extended by Scargle [2]_ to find, and test the significance of weak
periodic signals with uneven temporal sampling.
When *normalize* is False (default) the computed periodogram
is unnormalized, it takes the value ``(A**2) * N/4`` for a harmonic
signal with amplitude A for sufficiently large N.
When *normalize* is True the computed periodogram is is normalized by
the residuals of the data around a constant reference model (at zero).
Input arrays should be one-dimensional and will be cast to float64.
Parameters
----------
x : array_like
Sample times.
y : array_like
Measurement values.
freqs : array_like
Angular frequencies for output periodogram.
precenter : bool, optional
Pre-center amplitudes by subtracting the mean.
normalize : bool, optional
Compute normalized periodogram.
Returns
-------
pgram : array_like
Lomb-Scargle periodogram.
Raises
------
ValueError
If the input arrays `x` and `y` do not have the same shape.
Notes
-----
This subroutine calculates the periodogram using a slightly
modified algorithm due to Townsend [3]_ which allows the
periodogram to be calculated using only a single pass through
the input arrays for each frequency.
The algorithm running time scales roughly as O(x * freqs) or O(N^2)
for a large number of samples and frequencies.
References
----------
.. [1] N.R. Lomb "Least-squares frequency analysis of unequally spaced
data", Astrophysics and Space Science, vol 39, pp. 447-462, 1976
.. [2] J.D. Scargle "Studies in astronomical time series analysis. II -
Statistical aspects of spectral analysis of unevenly spaced data",
The Astrophysical Journal, vol 263, pp. 835-853, 1982
.. [3] R.H.D. Townsend, "Fast calculation of the Lomb-Scargle
periodogram using graphics processing units.", The Astrophysical
Journal Supplement Series, vol 191, pp. 247-253, 2010
Examples
--------
>>> import scipy.signal
>>> import matplotlib.pyplot as plt
First define some input parameters for the signal:
>>> A = 2.
>>> w = 1.
>>> phi = 0.5 * np.pi
>>> nin = 1000
>>> nout = 100000
>>> frac_points = 0.9 # Fraction of points to select
Randomly select a fraction of an array with timesteps:
>>> r = np.random.rand(nin)
>>> x = np.linspace(0.01, 10*np.pi, nin)
>>> x = x[r >= frac_points]
Plot a sine wave for the selected times:
>>> y = A * np.sin(w*x+phi)
Define the array of frequencies for which to compute the periodogram:
>>> f = np.linspace(0.01, 10, nout)
Calculate Lomb-Scargle periodogram:
>>> import scipy.signal as signal
>>> pgram = signal.lombscargle(x, y, f, normalize=True)
Now make a plot of the input data:
>>> plt.subplot(2, 1, 1)
>>> plt.plot(x, y, 'b+')
Then plot the normalized periodogram:
>>> plt.subplot(2, 1, 2)
>>> plt.plot(f, pgram)
>>> plt.show()
"""
x = np.asarray(x, dtype=np.float64)
y = np.asarray(y, dtype=np.float64)
freqs = np.asarray(freqs, dtype=np.float64)
assert x.ndim == 1
assert y.ndim == 1
assert freqs.ndim == 1
if precenter:
pgram = _lombscargle(x, y - y.mean(), freqs)
else:
pgram = _lombscargle(x, y, freqs)
if normalize:
pgram *= 2 / np.dot(y, y)
return pgram
def periodogram(x, fs=1.0, window='boxcar', nfft=None, detrend='constant',
return_onesided=True, scaling='density', axis=-1):
"""
Estimate power spectral density using a periodogram.
Parameters
----------
x : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. If `window` is a string or tuple, it is
passed to `get_window` to generate the window values, which are
DFT-even by default. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length must be nperseg. Defaults
to 'boxcar'.
nfft : int, optional
Length of the FFT used. If `None` the length of `x` will be
used.
detrend : str or function or `False`, optional
Specifies how to detrend each segment. If `detrend` is a
string, it is passed as the `type` argument to the `detrend`
function. If it is a function, it takes a segment and returns a
detrended segment. If `detrend` is `False`, no detrending is
done. Defaults to 'constant'.
return_onesided : bool, optional
If `True`, return a one-sided spectrum for real data. If
`False` return a two-sided spectrum. Note that for complex
data, a two-sided spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the power spectral density ('density')
where `Pxx` has units of V**2/Hz and computing the power
spectrum ('spectrum') where `Pxx` has units of V**2, if `x`
is measured in V and `fs` is measured in Hz. Defaults to
'density'
axis : int, optional
Axis along which the periodogram is computed; the default is
over the last axis (i.e. ``axis=-1``).
Returns
-------
f : ndarray
Array of sample frequencies.
Pxx : ndarray
Power spectral density or power spectrum of `x`.
Notes
-----
.. versionadded:: 0.12.0
See Also
--------
welch: Estimate power spectral density using Welch's method
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> np.random.seed(1234)
Generate a test signal, a 2 Vrms sine wave at 1234 Hz, corrupted by
0.001 V**2/Hz of white noise sampled at 10 kHz.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 2*np.sqrt(2)
>>> freq = 1234.0
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / fs
>>> x = amp*np.sin(2*np.pi*freq*time)
>>> x += np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
Compute and plot the power spectral density.
>>> f, Pxx_den = signal.periodogram(x, fs)
>>> plt.semilogy(f, Pxx_den)
>>> plt.ylim([1e-7, 1e2])
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('PSD [V**2/Hz]')
>>> plt.show()
If we average the last half of the spectral density, to exclude the
peak, we can recover the noise power on the signal.
>>> np.mean(Pxx_den[256:])
0.0018156616014838548
Now compute and plot the power spectrum.
>>> f, Pxx_spec = signal.periodogram(x, fs, 'flattop', scaling='spectrum')
>>> plt.figure()
>>> plt.semilogy(f, np.sqrt(Pxx_spec))
>>> plt.ylim([1e-4, 1e1])
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('Linear spectrum [V RMS]')
>>> plt.show()
The peak height in the power spectrum is an estimate of the RMS
amplitude.
>>> np.sqrt(Pxx_spec.max())
2.0077340678640727
"""
x = np.asarray(x)
if x.size == 0:
return np.empty(x.shape), np.empty(x.shape)
if window is None:
window = 'boxcar'
if nfft is None:
nperseg = x.shape[axis]
elif nfft == x.shape[axis]:
nperseg = nfft
elif nfft > x.shape[axis]:
nperseg = x.shape[axis]
elif nfft < x.shape[axis]:
s = [np.s_[:]]*len(x.shape)
s[axis] = np.s_[:nfft]
x = x[s]
nperseg = nfft
nfft = None
return welch(x, fs, window, nperseg, 0, nfft, detrend, return_onesided,
scaling, axis)
def welch(x, fs=1.0, window='hann', nperseg=None, noverlap=None, nfft=None,
detrend='constant', return_onesided=True, scaling='density',
axis=-1):
r"""
Estimate power spectral density using Welch's method.
Welch's method [1]_ computes an estimate of the power spectral
density by dividing the data into overlapping segments, computing a
modified periodogram for each segment and averaging the
periodograms.
Parameters
----------
x : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. If `window` is a string or tuple, it is
passed to `get_window` to generate the window values, which are
DFT-even by default. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length must be nperseg. Defaults
to a Hann window.
nperseg : int, optional
Length of each segment. Defaults to None, but if window is str or
tuple, is set to 256, and if window is array_like, is set to the
length of the window.
noverlap : int, optional
Number of points to overlap between segments. If `None`,
``noverlap = nperseg // 2``. Defaults to `None`.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If
`None`, the FFT length is `nperseg`. Defaults to `None`.
detrend : str or function or `False`, optional
Specifies how to detrend each segment. If `detrend` is a
string, it is passed as the `type` argument to the `detrend`
function. If it is a function, it takes a segment and returns a
detrended segment. If `detrend` is `False`, no detrending is
done. Defaults to 'constant'.
return_onesided : bool, optional
If `True`, return a one-sided spectrum for real data. If
`False` return a two-sided spectrum. Note that for complex
data, a two-sided spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the power spectral density ('density')
where `Pxx` has units of V**2/Hz and computing the power
spectrum ('spectrum') where `Pxx` has units of V**2, if `x`
is measured in V and `fs` is measured in Hz. Defaults to
'density'
axis : int, optional
Axis along which the periodogram is computed; the default is
over the last axis (i.e. ``axis=-1``).
Returns
-------
f : ndarray
Array of sample frequencies.
Pxx : ndarray
Power spectral density or power spectrum of x.
See Also
--------
periodogram: Simple, optionally modified periodogram
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
Notes
-----
An appropriate amount of overlap will depend on the choice of window
and on your requirements. For the default Hann window an overlap of
50% is a reasonable trade off between accurately estimating the
signal power, while not over counting any of the data. Narrower
windows may require a larger overlap.
If `noverlap` is 0, this method is equivalent to Bartlett's method
[2]_.
.. versionadded:: 0.12.0
References
----------
.. [1] P. Welch, "The use of the fast Fourier transform for the
estimation of power spectra: A method based on time averaging
over short, modified periodograms", IEEE Trans. Audio
Electroacoust. vol. 15, pp. 70-73, 1967.
.. [2] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra",
Biometrika, vol. 37, pp. 1-16, 1950.
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> np.random.seed(1234)
Generate a test signal, a 2 Vrms sine wave at 1234 Hz, corrupted by
0.001 V**2/Hz of white noise sampled at 10 kHz.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 2*np.sqrt(2)
>>> freq = 1234.0
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / fs
>>> x = amp*np.sin(2*np.pi*freq*time)
>>> x += np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
Compute and plot the power spectral density.
>>> f, Pxx_den = signal.welch(x, fs, nperseg=1024)
>>> plt.semilogy(f, Pxx_den)
>>> plt.ylim([0.5e-3, 1])
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('PSD [V**2/Hz]')
>>> plt.show()
If we average the last half of the spectral density, to exclude the
peak, we can recover the noise power on the signal.
>>> np.mean(Pxx_den[256:])
0.0009924865443739191
Now compute and plot the power spectrum.
>>> f, Pxx_spec = signal.welch(x, fs, 'flattop', 1024, scaling='spectrum')
>>> plt.figure()
>>> plt.semilogy(f, np.sqrt(Pxx_spec))
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('Linear spectrum [V RMS]')
>>> plt.show()
The peak height in the power spectrum is an estimate of the RMS
amplitude.
>>> np.sqrt(Pxx_spec.max())
2.0077340678640727
"""
freqs, Pxx = csd(x, x, fs, window, nperseg, noverlap, nfft, detrend,
return_onesided, scaling, axis)
return freqs, Pxx.real
def csd(x, y, fs=1.0, window='hann', nperseg=None, noverlap=None, nfft=None,
detrend='constant', return_onesided=True, scaling='density', axis=-1):
r"""
Estimate the cross power spectral density, Pxy, using Welch's
method.
Parameters
----------
x : array_like
Time series of measurement values
y : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` and `y` time series. Defaults
to 1.0.
window : str or tuple or array_like, optional
Desired window to use. If `window` is a string or tuple, it is
passed to `get_window` to generate the window values, which are
DFT-even by default. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length must be nperseg. Defaults
to a Hann window.
nperseg : int, optional
Length of each segment. Defaults to None, but if window is str or
tuple, is set to 256, and if window is array_like, is set to the
length of the window.
noverlap: int, optional
Number of points to overlap between segments. If `None`,
``noverlap = nperseg // 2``. Defaults to `None`.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If
`None`, the FFT length is `nperseg`. Defaults to `None`.
detrend : str or function or `False`, optional
Specifies how to detrend each segment. If `detrend` is a
string, it is passed as the `type` argument to the `detrend`
function. If it is a function, it takes a segment and returns a
detrended segment. If `detrend` is `False`, no detrending is
done. Defaults to 'constant'.
return_onesided : bool, optional
If `True`, return a one-sided spectrum for real data. If
`False` return a two-sided spectrum. Note that for complex
data, a two-sided spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the cross spectral density ('density')
where `Pxy` has units of V**2/Hz and computing the cross spectrum
('spectrum') where `Pxy` has units of V**2, if `x` and `y` are
measured in V and `fs` is measured in Hz. Defaults to 'density'
axis : int, optional
Axis along which the CSD is computed for both inputs; the
default is over the last axis (i.e. ``axis=-1``).
Returns
-------
f : ndarray
Array of sample frequencies.
Pxy : ndarray
Cross spectral density or cross power spectrum of x,y.
See Also
--------
periodogram: Simple, optionally modified periodogram
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
welch: Power spectral density by Welch's method. [Equivalent to
csd(x,x)]
coherence: Magnitude squared coherence by Welch's method.
Notes
--------
By convention, Pxy is computed with the conjugate FFT of X
multiplied by the FFT of Y.
If the input series differ in length, the shorter series will be
zero-padded to match.
An appropriate amount of overlap will depend on the choice of window
and on your requirements. For the default Hann window an overlap of
50% is a reasonable trade off between accurately estimating the
signal power, while not over counting any of the data. Narrower
windows may require a larger overlap.
.. versionadded:: 0.16.0
References
----------
.. [1] P. Welch, "The use of the fast Fourier transform for the
estimation of power spectra: A method based on time averaging
over short, modified periodograms", IEEE Trans. Audio
Electroacoust. vol. 15, pp. 70-73, 1967.
.. [2] Rabiner, Lawrence R., and B. Gold. "Theory and Application of
Digital Signal Processing" Prentice-Hall, pp. 414-419, 1975
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Generate two test signals with some common features.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 20
>>> freq = 1234.0
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / fs
>>> b, a = signal.butter(2, 0.25, 'low')
>>> x = np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
>>> y = signal.lfilter(b, a, x)
>>> x += amp*np.sin(2*np.pi*freq*time)
>>> y += np.random.normal(scale=0.1*np.sqrt(noise_power), size=time.shape)
Compute and plot the magnitude of the cross spectral density.
>>> f, Pxy = signal.csd(x, y, fs, nperseg=1024)
>>> plt.semilogy(f, np.abs(Pxy))
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('CSD [V**2/Hz]')
>>> plt.show()
"""
freqs, _, Pxy = _spectral_helper(x, y, fs, window, nperseg, noverlap, nfft,
detrend, return_onesided, scaling, axis,
mode='psd')
# Average over windows.
if len(Pxy.shape) >= 2 and Pxy.size > 0:
if Pxy.shape[-1] > 1:
Pxy = Pxy.mean(axis=-1)
else:
Pxy = np.reshape(Pxy, Pxy.shape[:-1])
return freqs, Pxy
def spectrogram(x, fs=1.0, window=('tukey',.25), nperseg=None, noverlap=None,
nfft=None, detrend='constant', return_onesided=True,
scaling='density', axis=-1, mode='psd'):
"""
Compute a spectrogram with consecutive Fourier transforms.
Spectrograms can be used as a way of visualizing the change of a
nonstationary signal's frequency content over time.
Parameters
----------
x : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. If `window` is a string or tuple, it is
passed to `get_window` to generate the window values, which are
DFT-even by default. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length must be nperseg.
Defaults to a Tukey window with shape parameter of 0.25.
nperseg : int, optional
Length of each segment. Defaults to None, but if window is str or
tuple, is set to 256, and if window is array_like, is set to the
length of the window.
noverlap : int, optional
Number of points to overlap between segments. If `None`,
``noverlap = nperseg // 8``. Defaults to `None`.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If
`None`, the FFT length is `nperseg`. Defaults to `None`.
detrend : str or function or `False`, optional
Specifies how to detrend each segment. If `detrend` is a
string, it is passed as the `type` argument to the `detrend`
function. If it is a function, it takes a segment and returns a
detrended segment. If `detrend` is `False`, no detrending is
done. Defaults to 'constant'.
return_onesided : bool, optional
If `True`, return a one-sided spectrum for real data. If
`False` return a two-sided spectrum. Note that for complex
data, a two-sided spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the power spectral density ('density')
where `Sxx` has units of V**2/Hz and computing the power
spectrum ('spectrum') where `Sxx` has units of V**2, if `x`
is measured in V and `fs` is measured in Hz. Defaults to
'density'.
axis : int, optional
Axis along which the spectrogram is computed; the default is over
the last axis (i.e. ``axis=-1``).
mode : str, optional
Defines what kind of return values are expected. Options are
['psd', 'complex', 'magnitude', 'angle', 'phase']. 'complex' is
equivalent to the output of `stft` with no padding or boundary
extension. 'magnitude' returns the absolute magnitude of the
STFT. 'angle' and 'phase' return the complex angle of the STFT,
with and without unwrapping, respectively.
Returns
-------
f : ndarray
Array of sample frequencies.
t : ndarray
Array of segment times.
Sxx : ndarray
Spectrogram of x. By default, the last axis of Sxx corresponds
to the segment times.
See Also
--------
periodogram: Simple, optionally modified periodogram
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
welch: Power spectral density by Welch's method.
csd: Cross spectral density by Welch's method.
Notes
-----
An appropriate amount of overlap will depend on the choice of window
and on your requirements. In contrast to welch's method, where the
entire data stream is averaged over, one may wish to use a smaller
overlap (or perhaps none at all) when computing a spectrogram, to
maintain some statistical independence between individual segments.
It is for this reason that the default window is a Tukey window with
1/8th of a window's length overlap at each end.
.. versionadded:: 0.16.0
References
----------
.. [1] Oppenheim, Alan V., Ronald W. Schafer, John R. Buck
"Discrete-Time Signal Processing", Prentice Hall, 1999.
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Generate a test signal, a 2 Vrms sine wave whose frequency is slowly
modulated around 3kHz, corrupted by white noise of exponentially
decreasing magnitude sampled at 10 kHz.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 2 * np.sqrt(2)
>>> noise_power = 0.01 * fs / 2
>>> time = np.arange(N) / float(fs)
>>> mod = 500*np.cos(2*np.pi*0.25*time)
>>> carrier = amp * np.sin(2*np.pi*3e3*time + mod)
>>> noise = np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
>>> noise *= np.exp(-time/5)
>>> x = carrier + noise
Compute and plot the spectrogram.
>>> f, t, Sxx = signal.spectrogram(x, fs)
>>> plt.pcolormesh(t, f, Sxx)
>>> plt.ylabel('Frequency [Hz]')
>>> plt.xlabel('Time [sec]')
>>> plt.show()
"""
modelist = ['psd', 'complex', 'magnitude', 'angle', 'phase']
if mode not in modelist:
raise ValueError('unknown value for mode {}, must be one of {}'
.format(mode, modelist))
# need to set default for nperseg before setting default for noverlap below
window, nperseg = _triage_segments(window, nperseg,
input_length=x.shape[axis])
# Less overlap than welch, so samples are more statisically independent
if noverlap is None:
noverlap = nperseg // 8
if mode == 'psd':
freqs, time, Sxx = _spectral_helper(x, x, fs, window, nperseg,
noverlap, nfft, detrend,
return_onesided, scaling, axis,
mode='psd')
else:
freqs, time, Sxx = _spectral_helper(x, x, fs, window, nperseg,
noverlap, nfft, detrend,
return_onesided, scaling, axis,
mode='stft')
if mode == 'magnitude':
Sxx = np.abs(Sxx)
elif mode in ['angle', 'phase']:
Sxx = np.angle(Sxx)
if mode == 'phase':
# Sxx has one additional dimension for time strides
if axis < 0:
axis -= 1
Sxx = np.unwrap(Sxx, axis=axis)
# mode =='complex' is same as `stft`, doesn't need modification
return freqs, time, Sxx
def check_COLA(window, nperseg, noverlap, tol=1e-10):
r"""
Check whether the Constant OverLap Add (COLA) constraint is met
Parameters
----------
window : str or tuple or array_like
Desired window to use. If `window` is a string or tuple, it is
passed to `get_window` to generate the window values, which are
DFT-even by default. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length must be nperseg.
nperseg : int
Length of each segment.
noverlap : int
Number of points to overlap between segments.
tol : float, optional
The allowed variance of a bin's weighted sum from the median bin
sum.
Returns
-------
verdict : bool
`True` if chosen combination satisfies COLA within `tol`,
`False` otherwise
See Also
--------
stft: Short Time Fourier Transform
istft: Inverse Short Time Fourier Transform
Notes
-----
In order to enable inversion of an STFT via the inverse STFT in
`istft`, the signal windowing must obey the constraint of "Constant
OverLap Add" (COLA). This ensures that every point in the input data
is equally weighted, thereby avoiding aliasing and allowing full
reconstruction.
Some examples of windows that satisfy COLA:
- Rectangular window at overlap of 0, 1/2, 2/3, 3/4, ...
- Bartlett window at overlap of 1/2, 3/4, 5/6, ...
- Hann window at 1/2, 2/3, 3/4, ...
- Any Blackman family window at 2/3 overlap
- Any window with ``noverlap = nperseg-1``
A very comprehensive list of other windows may be found in [2]_,
wherein the COLA condition is satisfied when the "Amplitude
Flatness" is unity.
.. versionadded:: 0.19.0
References
----------
.. [1] Julius O. Smith III, "Spectral Audio Signal Processing", W3K
Publishing, 2011,ISBN 978-0-9745607-3-1.
.. [2] G. Heinzel, A. Ruediger and R. Schilling, "Spectrum and
spectral density estimation by the Discrete Fourier transform
(DFT), including a comprehensive list of window functions and
some new at-top windows", 2002,
http://hdl.handle.net/11858/00-001M-0000-0013-557A-5
Examples
--------
>>> from scipy import signal
Confirm COLA condition for rectangular window of 75% (3/4) overlap:
>>> signal.check_COLA(signal.boxcar(100), 100, 75)
True
COLA is not true for 25% (1/4) overlap, though:
>>> signal.check_COLA(signal.boxcar(100), 100, 25)
False
"Symmetrical" Hann window (for filter design) is not COLA:
>>> signal.check_COLA(signal.hann(120, sym=True), 120, 60)
False
"Periodic" or "DFT-even" Hann window (for FFT analysis) is COLA for
overlap of 1/2, 2/3, 3/4, etc.:
>>> signal.check_COLA(signal.hann(120, sym=False), 120, 60)
True
>>> signal.check_COLA(signal.hann(120, sym=False), 120, 80)
True
>>> signal.check_COLA(signal.hann(120, sym=False), 120, 90)
True
"""
nperseg = int(nperseg)
if nperseg < 1:
raise ValueError('nperseg must be a positive integer')
if noverlap >= nperseg:
raise ValueError('noverlap must be less than nperseg.')
noverlap = int(noverlap)
if isinstance(window, string_types) or type(window) is tuple:
win = get_window(window, nperseg)
else:
win = np.asarray(window)
if len(win.shape) != 1:
raise ValueError('window must be 1-D')
if win.shape[0] != nperseg:
raise ValueError('window must have length of nperseg')
step = nperseg - noverlap
binsums = np.sum((win[ii*step:(ii+1)*step] for ii in range(nperseg//step)),
axis=0)
if nperseg % step != 0:
binsums[:nperseg % step] += win[-(nperseg % step):]
deviation = binsums - np.median(binsums)
return np.max(np.abs(deviation)) < tol
def stft(x, fs=1.0, window='hann', nperseg=256, noverlap=None, nfft=None,
detrend=False, return_onesided=True, boundary='zeros', padded=True,
axis=-1):
r"""
Compute the Short Time Fourier Transform (STFT).
STFTs can be used as a way of quantifying the change of a
nonstationary signal's frequency and phase content over time.
Parameters
----------
x : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. If `window` is a string or tuple, it is
passed to `get_window` to generate the window values, which are
DFT-even by default. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length must be nperseg. Defaults
to a Hann window.
nperseg : int, optional
Length of each segment. Defaults to 256.
noverlap : int, optional
Number of points to overlap between segments. If `None`,
``noverlap = nperseg // 2``. Defaults to `None`. When
specified, the COLA constraint must be met (see Notes below).
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If
`None`, the FFT length is `nperseg`. Defaults to `None`.
detrend : str or function or `False`, optional
Specifies how to detrend each segment. If `detrend` is a
string, it is passed as the `type` argument to the `detrend`
function. If it is a function, it takes a segment and returns a
detrended segment. If `detrend` is `False`, no detrending is
done. Defaults to `False`.
return_onesided : bool, optional
If `True`, return a one-sided spectrum for real data. If
`False` return a two-sided spectrum. Note that for complex
data, a two-sided spectrum is always returned. Defaults to
`True`.
boundary : str or None, optional
Specifies whether the input signal is extended at both ends, and
how to generate the new values, in order to center the first
windowed segment on the first input point. This has the benefit
of enabling reconstruction of the first input point when the
employed window function starts at zero. Valid options are
``['even', 'odd', 'constant', 'zeros', None]``. Defaults to
'zeros', for zero padding extension. I.e. ``[1, 2, 3, 4]`` is
extended to ``[0, 1, 2, 3, 4, 0]`` for ``nperseg=3``.
padded : bool, optional
Specifies whether the input signal is zero-padded at the end to
make the signal fit exactly into an integer number of window
segments, so that all of the signal is included in the output.
Defaults to `True`. Padding occurs after boundary extension, if
`boundary` is not `None`, and `padded` is `True`, as is the
default.
axis : int, optional
Axis along which the STFT is computed; the default is over the
last axis (i.e. ``axis=-1``).
Returns
-------
f : ndarray
Array of sample frequencies.
t : ndarray
Array of segment times.
Zxx : ndarray
STFT of `x`. By default, the last axis of `Zxx` corresponds
to the segment times.
See Also
--------
istft: Inverse Short Time Fourier Transform
check_COLA: Check whether the Constant OverLap Add (COLA) constraint
is met
welch: Power spectral density by Welch's method.
spectrogram: Spectrogram by Welch's method.
csd: Cross spectral density by Welch's method.
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
Notes
-----
In order to enable inversion of an STFT via the inverse STFT in
`istft`, the signal windowing must obey the constraint of "Constant
OverLap Add" (COLA), and the input signal must have complete
windowing coverage (i.e. ``(x.shape[axis] - nperseg) %
(nperseg-noverlap) == 0``). The `padded` argument may be used to
accomplish this.
The COLA constraint ensures that every point in the input data is
equally weighted, thereby avoiding aliasing and allowing full
reconstruction. Whether a choice of `window`, `nperseg`, and
`noverlap` satisfy this constraint can be tested with
`check_COLA`.
.. versionadded:: 0.19.0
References
----------
.. [1] Oppenheim, Alan V., Ronald W. Schafer, John R. Buck
"Discrete-Time Signal Processing", Prentice Hall, 1999.
.. [2] Daniel W. Griffin, Jae S. Limdt "Signal Estimation from
Modified Short Fourier Transform", IEEE 1984,
10.1109/TASSP.1984.1164317
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Generate a test signal, a 2 Vrms sine wave whose frequency is slowly
modulated around 3kHz, corrupted by white noise of exponentially
decreasing magnitude sampled at 10 kHz.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 2 * np.sqrt(2)
>>> noise_power = 0.01 * fs / 2
>>> time = np.arange(N) / float(fs)
>>> mod = 500*np.cos(2*np.pi*0.25*time)
>>> carrier = amp * np.sin(2*np.pi*3e3*time + mod)
>>> noise = np.random.normal(scale=np.sqrt(noise_power),
... size=time.shape)
>>> noise *= np.exp(-time/5)
>>> x = carrier + noise
Compute and plot the STFT's magnitude.
>>> f, t, Zxx = signal.stft(x, fs, nperseg=1000)
>>> plt.pcolormesh(t, f, np.abs(Zxx), vmin=0, vmax=amp)
>>> plt.title('STFT Magnitude')
>>> plt.ylabel('Frequency [Hz]')
>>> plt.xlabel('Time [sec]')
>>> plt.show()
"""
freqs, time, Zxx = _spectral_helper(x, x, fs, window, nperseg, noverlap,
nfft, detrend, return_onesided,
scaling='spectrum', axis=axis,
mode='stft', boundary=boundary,
padded=padded)
return freqs, time, Zxx
def istft(Zxx, fs=1.0, window='hann', nperseg=None, noverlap=None, nfft=None,
input_onesided=True, boundary=True, time_axis=-1, freq_axis=-2):
r"""
Perform the inverse Short Time Fourier transform (iSTFT).
Parameters
----------
Zxx : array_like
STFT of the signal to be reconstructed. If a purely real array
is passed, it will be cast to a complex data type.
fs : float, optional
Sampling frequency of the time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. If `window` is a string or tuple, it is
passed to `get_window` to generate the window values, which are
DFT-even by default. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length must be nperseg. Defaults
to a Hann window. Must match the window used to generate the
STFT for faithful inversion.
nperseg : int, optional
Number of data points corresponding to each STFT segment. This
parameter must be specified if the number of data points per
segment is odd, or if the STFT was padded via ``nfft >
nperseg``. If `None`, the value depends on the shape of
`Zxx` and `input_onesided`. If `input_onesided` is True,
``nperseg=2*(Zxx.shape[freq_axis] - 1)``. Otherwise,
``nperseg=Zxx.shape[freq_axis]``. Defaults to `None`.
noverlap : int, optional
Number of points to overlap between segments. If `None`, half
of the segment length. Defaults to `None`. When specified, the
COLA constraint must be met (see Notes below), and should match
the parameter used to generate the STFT. Defaults to `None`.
nfft : int, optional
Number of FFT points corresponding to each STFT segment. This
parameter must be specified if the STFT was padded via ``nfft >
nperseg``. If `None`, the default values are the same as for
`nperseg`, detailed above, with one exception: if
`input_onesided` is True and
``nperseg==2*Zxx.shape[freq_axis] - 1``, `nfft` also takes on
that value. This case allows the proper inversion of an
odd-length unpadded STFT using ``nfft=None``. Defaults to
`None`.
input_onesided : bool, optional
If `True`, interpret the input array as one-sided FFTs, such
as is returned by `stft` with ``return_onesided=True`` and
`numpy.fft.rfft`. If `False`, interpret the input as a a
two-sided FFT. Defaults to `True`.
boundary : bool, optional
Specifies whether the input signal was extended at its
boundaries by supplying a non-`None` ``boundary`` argument to
`stft`. Defaults to `True`.
time_axis : int, optional
Where the time segments of the STFT is located; the default is
the last axis (i.e. ``axis=-1``).
freq_axis : int, optional
Where the frequency axis of the STFT is located; the default is
the penultimate axis (i.e. ``axis=-2``).
Returns
-------
t : ndarray
Array of output data times.
x : ndarray
iSTFT of `Zxx`.
See Also
--------
stft: Short Time Fourier Transform
check_COLA: Check whether the Constant OverLap Add (COLA) constraint
is met
Notes
-----
In order to enable inversion of an STFT via the inverse STFT with
`istft`, the signal windowing must obey the constraint of "Constant
OverLap Add" (COLA). This ensures that every point in the input data
is equally weighted, thereby avoiding aliasing and allowing full
reconstruction. Whether a choice of `window`, `nperseg`, and
`noverlap` satisfy this constraint can be tested with
`check_COLA`, by using ``nperseg = Zxx.shape[freq_axis]``.
An STFT which has been modified (via masking or otherwise) is not
guaranteed to correspond to a exactly realizible signal. This
function implements the iSTFT via the least-squares esimation
algorithm detailed in [2]_, which produces a signal that minimizes
the mean squared error between the STFT of the returned signal and
the modified STFT.
.. versionadded:: 0.19.0
References
----------
.. [1] Oppenheim, Alan V., Ronald W. Schafer, John R. Buck
"Discrete-Time Signal Processing", Prentice Hall, 1999.
.. [2] Daniel W. Griffin, Jae S. Limdt "Signal Estimation from
Modified Short Fourier Transform", IEEE 1984,
10.1109/TASSP.1984.1164317
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Generate a test signal, a 2 Vrms sine wave at 50Hz corrupted by
0.001 V**2/Hz of white noise sampled at 1024 Hz.
>>> fs = 1024
>>> N = 10*fs
>>> nperseg = 512
>>> amp = 2 * np.sqrt(2)
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / float(fs)
>>> carrier = amp * np.sin(2*np.pi*50*time)
>>> noise = np.random.normal(scale=np.sqrt(noise_power),
... size=time.shape)
>>> x = carrier + noise
Compute the STFT, and plot its magnitude
>>> f, t, Zxx = signal.stft(x, fs=fs, nperseg=nperseg)
>>> plt.figure()
>>> plt.pcolormesh(t, f, np.abs(Zxx), vmin=0, vmax=amp)
>>> plt.ylim([f[1], f[-1]])
>>> plt.title('STFT Magnitude')
>>> plt.ylabel('Frequency [Hz]')
>>> plt.xlabel('Time [sec]')
>>> plt.yscale('log')
>>> plt.show()
Zero the components that are 10% or less of the carrier magnitude,
then convert back to a time series via inverse STFT
>>> Zxx = np.where(np.abs(Zxx) >= amp/10, Zxx, 0)
>>> _, xrec = signal.istft(Zxx, fs)
Compare the cleaned signal with the original and true carrier signals.
>>> plt.figure()
>>> plt.plot(time, x, time, xrec, time, carrier)
>>> plt.xlim([2, 2.1])
>>> plt.xlabel('Time [sec]')
>>> plt.ylabel('Signal')
>>> plt.legend(['Carrier + Noise', 'Filtered via STFT', 'True Carrier'])
>>> plt.show()
Note that the cleaned signal does not start as abruptly as the original,
since some of the coefficients of the transient were also removed:
>>> plt.figure()
>>> plt.plot(time, x, time, xrec, time, carrier)
>>> plt.xlim([0, 0.1])
>>> plt.xlabel('Time [sec]')
>>> plt.ylabel('Signal')
>>> plt.legend(['Carrier + Noise', 'Filtered via STFT', 'True Carrier'])
>>> plt.show()
"""
# Make sure input is an ndarray of appropriate complex dtype
Zxx = np.asarray(Zxx) + 0j
freq_axis = int(freq_axis)
time_axis = int(time_axis)
if Zxx.ndim < 2:
raise ValueError('Input stft must be at least 2d!')
if freq_axis == time_axis:
raise ValueError('Must specify differing time and frequency axes!')
nseg = Zxx.shape[time_axis]
if input_onesided:
# Assume even segment length
n_default = 2*(Zxx.shape[freq_axis] - 1)
else:
n_default = Zxx.shape[freq_axis]
# Check windowing parameters
if nperseg is None:
nperseg = n_default
else:
nperseg = int(nperseg)
if nperseg < 1:
raise ValueError('nperseg must be a positive integer')
if nfft is None:
if (input_onesided) and (nperseg == n_default + 1):
# Odd nperseg, no FFT padding
nfft = nperseg
else:
nfft = n_default
elif nfft < nperseg:
raise ValueError('nfft must be greater than or equal to nperseg.')
else:
nfft = int(nfft)
if noverlap is None:
noverlap = nperseg//2
else:
noverlap = int(noverlap)
if noverlap >= nperseg:
raise ValueError('noverlap must be less than nperseg.')
nstep = nperseg - noverlap
if not check_COLA(window, nperseg, noverlap):
raise ValueError('Window, STFT shape and noverlap do not satisfy the '
'COLA constraint.')
# Rearrange axes if neccessary
if time_axis != Zxx.ndim-1 or freq_axis != Zxx.ndim-2:
# Turn negative indices to positive for the call to transpose
if freq_axis < 0:
freq_axis = Zxx.ndim + freq_axis
if time_axis < 0:
time_axis = Zxx.ndim + time_axis
zouter = list(range(Zxx.ndim))
for ax in sorted([time_axis, freq_axis], reverse=True):
zouter.pop(ax)
Zxx = np.transpose(Zxx, zouter+[freq_axis, time_axis])
# Get window as array
if isinstance(window, string_types) or type(window) is tuple:
win = get_window(window, nperseg)
else:
win = np.asarray(window)
if len(win.shape) != 1:
raise ValueError('window must be 1-D')
if win.shape[0] != nperseg:
raise ValueError('window must have length of {0}'.format(nperseg))
if input_onesided:
ifunc = np.fft.irfft
else:
ifunc = fftpack.ifft
xsubs = ifunc(Zxx, axis=-2, n=nfft)[..., :nperseg, :]
# Initialize output and normalization arrays
outputlength = nperseg + (nseg-1)*nstep
x = np.zeros(list(Zxx.shape[:-2])+[outputlength], dtype=xsubs.dtype)
norm = np.zeros(outputlength, dtype=xsubs.dtype)
if np.result_type(win, xsubs) != xsubs.dtype:
win = win.astype(xsubs.dtype)
xsubs *= win.sum() # This takes care of the 'spectrum' scaling
# Construct the output from the ifft segments
# This loop could perhaps be vectorized/strided somehow...
for ii in range(nseg):
# Window the ifft
x[..., ii*nstep:ii*nstep+nperseg] += xsubs[..., ii] * win
norm[..., ii*nstep:ii*nstep+nperseg] += win**2
# Divide out normalization where non-tiny
x /= np.where(norm > 1e-10, norm, 1.0)
# Remove extension points
if boundary:
x = x[..., nperseg//2:-(nperseg//2)]
if input_onesided:
x = x.real
# Put axes back
if x.ndim > 1:
if time_axis != Zxx.ndim-1:
if freq_axis < time_axis:
time_axis -= 1
x = np.rollaxis(x, -1, time_axis)
time = np.arange(x.shape[0])/float(fs)
return time, x
def coherence(x, y, fs=1.0, window='hann', nperseg=None, noverlap=None,
nfft=None, detrend='constant', axis=-1):
r"""
Estimate the magnitude squared coherence estimate, Cxy, of
discrete-time signals X and Y using Welch's method.
``Cxy = abs(Pxy)**2/(Pxx*Pyy)``, where `Pxx` and `Pyy` are power
spectral density estimates of X and Y, and `Pxy` is the cross
spectral density estimate of X and Y.
Parameters
----------
x : array_like
Time series of measurement values
y : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` and `y` time series. Defaults
to 1.0.
window : str or tuple or array_like, optional
Desired window to use. If `window` is a string or tuple, it is
passed to `get_window` to generate the window values, which are
DFT-even by default. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length must be nperseg. Defaults
to a Hann window.
nperseg : int, optional
Length of each segment. Defaults to None, but if window is str or
tuple, is set to 256, and if window is array_like, is set to the
length of the window.
noverlap: int, optional
Number of points to overlap between segments. If `None`,
``noverlap = nperseg // 2``. Defaults to `None`.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If
`None`, the FFT length is `nperseg`. Defaults to `None`.
detrend : str or function or `False`, optional
Specifies how to detrend each segment. If `detrend` is a
string, it is passed as the `type` argument to the `detrend`
function. If it is a function, it takes a segment and returns a
detrended segment. If `detrend` is `False`, no detrending is
done. Defaults to 'constant'.
axis : int, optional
Axis along which the coherence is computed for both inputs; the
default is over the last axis (i.e. ``axis=-1``).
Returns
-------
f : ndarray
Array of sample frequencies.
Cxy : ndarray
Magnitude squared coherence of x and y.
See Also
--------
periodogram: Simple, optionally modified periodogram
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
welch: Power spectral density by Welch's method.
csd: Cross spectral density by Welch's method.
Notes
--------
An appropriate amount of overlap will depend on the choice of window
and on your requirements. For the default Hann window an overlap of
50% is a reasonable trade off between accurately estimating the
signal power, while not over counting any of the data. Narrower
windows may require a larger overlap.
.. versionadded:: 0.16.0
References
----------
.. [1] P. Welch, "The use of the fast Fourier transform for the
estimation of power spectra: A method based on time averaging
over short, modified periodograms", IEEE Trans. Audio
Electroacoust. vol. 15, pp. 70-73, 1967.
.. [2] Stoica, Petre, and Randolph Moses, "Spectral Analysis of
Signals" Prentice Hall, 2005
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Generate two test signals with some common features.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 20
>>> freq = 1234.0
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / fs
>>> b, a = signal.butter(2, 0.25, 'low')
>>> x = np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
>>> y = signal.lfilter(b, a, x)
>>> x += amp*np.sin(2*np.pi*freq*time)
>>> y += np.random.normal(scale=0.1*np.sqrt(noise_power), size=time.shape)
Compute and plot the coherence.
>>> f, Cxy = signal.coherence(x, y, fs, nperseg=1024)
>>> plt.semilogy(f, Cxy)
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('Coherence')
>>> plt.show()
"""
freqs, Pxx = welch(x, fs, window, nperseg, noverlap, nfft, detrend,
axis=axis)
_, Pyy = welch(y, fs, window, nperseg, noverlap, nfft, detrend, axis=axis)
_, Pxy = csd(x, y, fs, window, nperseg, noverlap, nfft, detrend, axis=axis)
Cxy = np.abs(Pxy)**2 / Pxx / Pyy
return freqs, Cxy
def _spectral_helper(x, y, fs=1.0, window='hann', nperseg=None, noverlap=None,
nfft=None, detrend='constant', return_onesided=True,
scaling='spectrum', axis=-1, mode='psd', boundary=None,
padded=False):
"""
Calculate various forms of windowed FFTs for PSD, CSD, etc.
This is a helper function that implements the commonality between
the stft, psd, csd, and spectrogram functions. It is not designed to
be called externally. The windows are not averaged over; the result
from each window is returned.
Parameters
---------
x : array_like
Array or sequence containing the data to be analyzed.
y : array_like
Array or sequence containing the data to be analyzed. If this is
the same object in memory as `x` (i.e. ``_spectral_helper(x,
x, ...)``), the extra computations are spared.
fs : float, optional
Sampling frequency of the time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. If `window` is a string or tuple, it is
passed to `get_window` to generate the window values, which are
DFT-even by default. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length must be nperseg. Defaults
to a Hann window.
nperseg : int, optional
Length of each segment. Defaults to None, but if window is str or
tuple, is set to 256, and if window is array_like, is set to the
length of the window.
noverlap : int, optional
Number of points to overlap between segments. If `None`,
``noverlap = nperseg // 2``. Defaults to `None`.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If
`None`, the FFT length is `nperseg`. Defaults to `None`.
detrend : str or function or `False`, optional
Specifies how to detrend each segment. If `detrend` is a
string, it is passed as the `type` argument to the `detrend`
function. If it is a function, it takes a segment and returns a
detrended segment. If `detrend` is `False`, no detrending is
done. Defaults to 'constant'.
return_onesided : bool, optional
If `True`, return a one-sided spectrum for real data. If
`False` return a two-sided spectrum. Note that for complex
data, a two-sided spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the cross spectral density ('density')
where `Pxy` has units of V**2/Hz and computing the cross
spectrum ('spectrum') where `Pxy` has units of V**2, if `x`
and `y` are measured in V and `fs` is measured in Hz.
Defaults to 'density'
axis : int, optional
Axis along which the FFTs are computed; the default is over the
last axis (i.e. ``axis=-1``).
mode: str {'psd', 'stft'}, optional
Defines what kind of return values are expected. Defaults to
'psd'.
boundary : str or None, optional
Specifies whether the input signal is extended at both ends, and
how to generate the new values, in order to center the first
windowed segment on the first input point. This has the benefit
of enabling reconstruction of the first input point when the
employed window function starts at zero. Valid options are
``['even', 'odd', 'constant', 'zeros', None]``. Defaults to
`None`.
padded : bool, optional
Specifies whether the input signal is zero-padded at the end to
make the signal fit exactly into an integer number of window
segments, so that all of the signal is included in the output.
Defaults to `False`. Padding occurs after boundary extension, if
`boundary` is not `None`, and `padded` is `True`.
Returns
-------
freqs : ndarray
Array of sample frequencies.
t : ndarray
Array of times corresponding to each data segment
result : ndarray
Array of output data, contents dependant on *mode* kwarg.
References
----------
.. [1] Stack Overflow, "Rolling window for 1D arrays in Numpy?",
http://stackoverflow.com/a/6811241
.. [2] Stack Overflow, "Using strides for an efficient moving
average filter", http://stackoverflow.com/a/4947453
Notes
-----
Adapted from matplotlib.mlab
.. versionadded:: 0.16.0
"""
if mode not in ['psd', 'stft']:
raise ValueError("Unknown value for mode %s, must be one of: "
"{'psd', 'stft'}" % mode)
boundary_funcs = {'even': even_ext,
'odd': odd_ext,
'constant': const_ext,
'zeros': zero_ext,
None: None}
if boundary not in boundary_funcs:
raise ValueError("Unknown boundary option '{0}', must be one of: {1}"
.format(boundary, list(boundary_funcs.keys())))
# If x and y are the same object we can save ourselves some computation.
same_data = y is x
if not same_data and mode != 'psd':
raise ValueError("x and y must be equal if mode is 'stft'")
axis = int(axis)
# Ensure we have np.arrays, get outdtype
x = np.asarray(x)
if not same_data:
y = np.asarray(y)
outdtype = np.result_type(x, y, np.complex64)
else:
outdtype = np.result_type(x, np.complex64)
if not same_data:
# Check if we can broadcast the outer axes together
xouter = list(x.shape)
youter = list(y.shape)
xouter.pop(axis)
youter.pop(axis)
try:
outershape = np.broadcast(np.empty(xouter), np.empty(youter)).shape
except ValueError:
raise ValueError('x and y cannot be broadcast together.')
if same_data:
if x.size == 0:
return np.empty(x.shape), np.empty(x.shape), np.empty(x.shape)
else:
if x.size == 0 or y.size == 0:
outshape = outershape + (min([x.shape[axis], y.shape[axis]]),)
emptyout = np.rollaxis(np.empty(outshape), -1, axis)
return emptyout, emptyout, emptyout
if x.ndim > 1:
if axis != -1:
x = np.rollaxis(x, axis, len(x.shape))
if not same_data and y.ndim > 1:
y = np.rollaxis(y, axis, len(y.shape))
# Check if x and y are the same length, zero-pad if neccesary
if not same_data:
if x.shape[-1] != y.shape[-1]:
if x.shape[-1] < y.shape[-1]:
pad_shape = list(x.shape)
pad_shape[-1] = y.shape[-1] - x.shape[-1]
x = np.concatenate((x, np.zeros(pad_shape)), -1)
else:
pad_shape = list(y.shape)
pad_shape[-1] = x.shape[-1] - y.shape[-1]
y = np.concatenate((y, np.zeros(pad_shape)), -1)
if nperseg is not None: # if specified by user
nperseg = int(nperseg)
if nperseg < 1:
raise ValueError('nperseg must be a positive integer')
# parse window; if array like, then set nperseg = win.shape
win, nperseg = _triage_segments(window, nperseg,input_length=x.shape[-1])
if nfft is None:
nfft = nperseg
elif nfft < nperseg:
raise ValueError('nfft must be greater than or equal to nperseg.')
else:
nfft = int(nfft)
if noverlap is None:
noverlap = nperseg//2
else:
noverlap = int(noverlap)
if noverlap >= nperseg:
raise ValueError('noverlap must be less than nperseg.')
nstep = nperseg - noverlap
# Padding occurs after boundary extension, so that the extended signal ends
# in zeros, instead of introducing an impulse at the end.
# I.e. if x = [..., 3, 2]
# extend then pad -> [..., 3, 2, 2, 3, 0, 0, 0]
# pad then extend -> [..., 3, 2, 0, 0, 0, 2, 3]
if boundary is not None:
ext_func = boundary_funcs[boundary]
x = ext_func(x, nperseg//2, axis=-1)
if not same_data:
y = ext_func(y, nperseg//2, axis=-1)
if padded:
# Pad to integer number of windowed segments
# I.e make x.shape[-1] = nperseg + (nseg-1)*nstep, with integer nseg
nadd = (-(x.shape[-1]-nperseg) % nstep) % nperseg
zeros_shape = list(x.shape[:-1]) + [nadd]
x = np.concatenate((x, np.zeros(zeros_shape)), axis=-1)
if not same_data:
zeros_shape = list(y.shape[:-1]) + [nadd]
y = np.concatenate((y, np.zeros(zeros_shape)), axis=-1)
# Handle detrending and window functions
if not detrend:
def detrend_func(d):
return d
elif not hasattr(detrend, '__call__'):
def detrend_func(d):
return signaltools.detrend(d, type=detrend, axis=-1)
elif axis != -1:
# Wrap this function so that it receives a shape that it could
# reasonably expect to receive.
def detrend_func(d):
d = np.rollaxis(d, -1, axis)
d = detrend(d)
return np.rollaxis(d, axis, len(d.shape))
else:
detrend_func = detrend
if np.result_type(win,np.complex64) != outdtype:
win = win.astype(outdtype)
if scaling == 'density':
scale = 1.0 / (fs * (win*win).sum())
elif scaling == 'spectrum':
scale = 1.0 / win.sum()**2
else:
raise ValueError('Unknown scaling: %r' % scaling)
if mode == 'stft':
scale = np.sqrt(scale)
if return_onesided:
if np.iscomplexobj(x):
sides = 'twosided'
warnings.warn('Input data is complex, switching to '
'return_onesided=False')
else:
sides = 'onesided'
if not same_data:
if np.iscomplexobj(y):
sides = 'twosided'
warnings.warn('Input data is complex, switching to '
'return_onesided=False')
else:
sides = 'twosided'
if sides == 'twosided':
freqs = fftpack.fftfreq(nfft, 1/fs)
elif sides == 'onesided':
freqs = np.fft.rfftfreq(nfft, 1/fs)
# Perform the windowed FFTs
result = _fft_helper(x, win, detrend_func, nperseg, noverlap, nfft, sides)
if not same_data:
# All the same operations on the y data
result_y = _fft_helper(y, win, detrend_func, nperseg, noverlap, nfft,
sides)
result = np.conjugate(result) * result_y
elif mode == 'psd':
result = np.conjugate(result) * result
result *= scale
if sides == 'onesided' and mode == 'psd':
if nfft % 2:
result[..., 1:] *= 2
else:
# Last point is unpaired Nyquist freq point, don't double
result[..., 1:-1] *= 2
time = np.arange(nperseg/2, x.shape[-1] - nperseg/2 + 1,
nperseg - noverlap)/float(fs)
if boundary is not None:
time -= (nperseg/2) / fs
result = result.astype(outdtype)
# All imaginary parts are zero anyways
if same_data and mode != 'stft':
result = result.real
# Output is going to have new last axis for time/window index, so a
# negative axis index shifts down one
if axis < 0:
axis -= 1
# Roll frequency axis back to axis where the data came from
result = np.rollaxis(result, -1, axis)
return freqs, time, result
def _fft_helper(x, win, detrend_func, nperseg, noverlap, nfft, sides):
"""
Calculate windowed FFT, for internal use by
scipy.signal._spectral_helper
This is a helper function that does the main FFT calculation for
`_spectral helper`. All input valdiation is performed there, and the
data axis is assumed to be the last axis of x. It is not designed to
be called externally. The windows are not averaged over; the result
from each window is returned.
Returns
-------
result : ndarray
Array of FFT data
References
----------
.. [1] Stack Overflow, "Repeat NumPy array without replicating
data?", http://stackoverflow.com/a/5568169
Notes
-----
Adapted from matplotlib.mlab
.. versionadded:: 0.16.0
"""
# Created strided array of data segments
if nperseg == 1 and noverlap == 0:
result = x[..., np.newaxis]
else:
step = nperseg - noverlap
shape = x.shape[:-1]+((x.shape[-1]-noverlap)//step, nperseg)
strides = x.strides[:-1]+(step*x.strides[-1], x.strides[-1])
result = np.lib.stride_tricks.as_strided(x, shape=shape,
strides=strides)
# Detrend each data segment individually
result = detrend_func(result)
# Apply window by multiplication
result = win * result
# Perform the fft. Acts on last axis by default. Zero-pads automatically
if sides == 'twosided':
func = fftpack.fft
else:
result = result.real
func = np.fft.rfft
result = func(result, n=nfft)
return result
def _triage_segments(window, nperseg,input_length):
"""
Parses window and nperseg arguments for spectrogram and _spectral_helper.
This is a helper function, not meant to be called externally.
Parameters
---------
window : string, tuple, or ndarray
If window is specified by a string or tuple and nperseg is not
specified, nperseg is set to the default of 256 and returns a window of
that length.
If instead the window is array_like and nperseg is not specified, then
nperseg is set to the length of the window. A ValueError is raised if
the user supplies both an array_like window and a value for nperseg but
nperseg does not equal the length of the window.
nperseg : int
Length of each segment
input_length: int
Length of input signal, i.e. x.shape[-1]. Used to test for errors.
Returns
-------
win : ndarray
window. If function was called with string or tuple than this will hold
the actual array used as a window.
nperseg : int
Length of each segment. If window is str or tuple, nperseg is set to
256. If window is array_like, nperseg is set to the length of the
6
window.
"""
#parse window; if array like, then set nperseg = win.shape
if isinstance(window, string_types) or isinstance(window, tuple):
# if nperseg not specified
if nperseg is None:
nperseg = 256 # then change to default
if nperseg > input_length:
warnings.warn('nperseg = {0:d} is greater than input length '
' = {1:d}, using nperseg = {1:d}'
.format(nperseg, input_length))
nperseg = input_length
win = get_window(window, nperseg)
else:
win = np.asarray(window)
if len(win.shape) != 1:
raise ValueError('window must be 1-D')
if input_length < win.shape[-1]:
raise ValueError('window is longer than input signal')
if nperseg is None:
nperseg = win.shape[0]
elif nperseg is not None:
if nperseg != win.shape[0]:
raise ValueError("value specified for nperseg is different from"
" length of window")
return win, nperseg
| bsd-3-clause |
RecipeML/Recipe | recipe/preprocessors/fag.py | 1 | 1645 | # -*- coding: utf-8 -*-
"""
Copyright 2016 Walter José and Alex de Sá
This file is part of the RECIPE Algorithm.
The RECIPE is free software: you can redistribute it and/or
modify it under the terms of the GNU General Public License as published by the
Free Software Foundation, either version 3 of the License, or (at your option)
any later version.
RECIPE is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. See http://www.gnu.org/licenses/.
"""
from sklearn.cluster import FeatureAgglomeration
def fag(args):
"""Uses scikit-learn's FeatureAgglomeration agglomerate features.
Parameters
----------
affinity : string
Metric used to compute the linkage. Can be “euclidean”, “l1”, “l2”, “manhattan”, “cosine”, or ‘precomputed’. If linkage is “ward”,
only “euclidean” is accepted.
linkage : {“ward”, “complete”, “average”}
Which linkage criterion to use.
The linkage criterion determines which distance to use between sets of features.
The algorithm will merge the pairs of cluster that minimize this criterion.
compute_full_tree : bool or ‘auto’
Stop early the construction of the tree at n_clusters
n_clusters : int
The number of clusters to find.
"""
affi = args[1]
link = args[2]
cft = False
if(args[3].find("True")!=-1):
cft = True
n_clust = int(args[4])
return FeatureAgglomeration(n_clusters=n_clust, affinity=affi,
connectivity=None,compute_full_tree=cft, linkage=link)
| gpl-3.0 |
treycausey/scikit-learn | sklearn/cluster/tests/test_mean_shift.py | 9 | 2843 | """
Testing for mean shift clustering methods
"""
import numpy as np
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.cluster import MeanShift
from sklearn.cluster import mean_shift
from sklearn.cluster import estimate_bandwidth
from sklearn.cluster import get_bin_seeds
from sklearn.datasets.samples_generator import make_blobs
n_clusters = 3
centers = np.array([[1, 1], [-1, -1], [1, -1]]) + 10
X, _ = make_blobs(n_samples=500, n_features=2, centers=centers,
cluster_std=0.4, shuffle=True, random_state=0)
def test_estimate_bandwidth():
"""Test estimate_bandwidth"""
bandwidth = estimate_bandwidth(X, n_samples=300)
assert_true(0.9 <= bandwidth <= 1.5)
def test_mean_shift():
""" Test MeanShift algorithm """
bandwidth = 1.2
ms = MeanShift(bandwidth=bandwidth)
labels = ms.fit(X).labels_
cluster_centers = ms.cluster_centers_
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
assert_equal(n_clusters_, n_clusters)
cluster_centers, labels = mean_shift(X, bandwidth=bandwidth)
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
assert_equal(n_clusters_, n_clusters)
def test_meanshift_predict():
"""Test MeanShift.predict"""
ms = MeanShift(bandwidth=1.2)
labels = ms.fit_predict(X)
labels2 = ms.predict(X)
assert_array_equal(labels, labels2)
def test_unfitted():
"""Non-regression: before fit, there should be not fitted attributes."""
ms = MeanShift()
assert_false(hasattr(ms, "cluster_centers_"))
assert_false(hasattr(ms, "labels_"))
def test_bin_seeds():
"""
Test the bin seeding technique which can be used in the mean shift
algorithm
"""
# Data is just 6 points in the plane
X = np.array([[1., 1.], [1.5, 1.5], [1.8, 1.2],
[2., 1.], [2.1, 1.1], [0., 0.]])
# With a bin coarseness of 1.0 and min_bin_freq of 1, 3 bins should be
# found
ground_truth = set([(1., 1.), (2., 1.), (0., 0.)])
test_bins = get_bin_seeds(X, 1, 1)
test_result = set([tuple(p) for p in test_bins])
assert_true(len(ground_truth.symmetric_difference(test_result)) == 0)
# With a bin coarseness of 1.0 and min_bin_freq of 2, 2 bins should be
# found
ground_truth = set([(1., 1.), (2., 1.)])
test_bins = get_bin_seeds(X, 1, 2)
test_result = set([tuple(p) for p in test_bins])
assert_true(len(ground_truth.symmetric_difference(test_result)) == 0)
# With a bin size of 0.01 and min_bin_freq of 1, 6 bins should be found
test_bins = get_bin_seeds(X, 0.01, 1)
test_result = set([tuple(p) for p in test_bins])
assert_true(len(test_result) == 6)
| bsd-3-clause |
anhaidgroup/py_stringsimjoin | py_stringsimjoin/utils/generic_helper.py | 1 | 4541 |
import multiprocessing
import operator
import os
from six.moves import xrange
import pandas as pd
COMP_OP_MAP = {'>=': operator.ge,
'>': operator.gt,
'<=': operator.le,
'<': operator.lt,
'=': operator.eq,
'!=': operator.ne}
def get_output_row_from_tables(l_row, r_row,
l_key_attr_index, r_key_attr_index,
l_out_attrs_indices=None,
r_out_attrs_indices=None):
output_row = []
# add ltable id attr
output_row.append(l_row[l_key_attr_index])
# add rtable id attr
output_row.append(r_row[r_key_attr_index])
# add ltable output attributes
if l_out_attrs_indices:
for l_attr_index in l_out_attrs_indices:
output_row.append(l_row[l_attr_index])
# add rtable output attributes
if r_out_attrs_indices:
for r_attr_index in r_out_attrs_indices:
output_row.append(r_row[r_attr_index])
return output_row
def get_output_header_from_tables(l_key_attr, r_key_attr,
l_out_attrs, r_out_attrs,
l_out_prefix, r_out_prefix):
output_header = []
output_header.append(l_out_prefix + l_key_attr)
output_header.append(r_out_prefix + r_key_attr)
if l_out_attrs:
for l_attr in l_out_attrs:
output_header.append(l_out_prefix + l_attr)
if r_out_attrs:
for r_attr in r_out_attrs:
output_header.append(r_out_prefix + r_attr)
return output_header
def convert_dataframe_to_list(table, join_attr_index,
remove_null=True):
table_list = []
for row in table.itertuples(index=False):
if remove_null and pd.isnull(row[join_attr_index]):
continue
table_list.append(tuple(row))
return table_list
def convert_dataframe_to_array(dataframe, proj_attrs, join_attr,
remove_nan=True):
if remove_nan:
projected_dataframe = dataframe[proj_attrs].dropna(0,
subset=[join_attr])
else:
projected_dataframe = dataframe[proj_attrs]
return projected_dataframe.values
def build_dict_from_table(table, key_attr_index, join_attr_index,
remove_null=True):
table_dict = {}
for row in table.itertuples(index=False):
if remove_null and pd.isnull(row[join_attr_index]):
continue
table_dict[row[key_attr_index]] = tuple(row)
return table_dict
def find_output_attribute_indices(original_columns, output_attributes):
output_attribute_indices = []
if output_attributes is not None:
for attr in output_attributes:
output_attribute_indices.append(original_columns.index(attr))
return output_attribute_indices
def split_table(table, num_splits):
splits = []
split_size = 1.0/num_splits*len(table)
for i in xrange(num_splits):
splits.append(table[int(round(i*split_size)):
int(round((i+1)*split_size))])
return splits
def remove_non_ascii(s):
return ''.join(i for i in s if ord(i) < 128)
def get_num_processes_to_launch(n_jobs):
# determine number of processes to launch parallely
num_procs = n_jobs
if n_jobs < 0:
num_cpus = multiprocessing.cpu_count()
num_procs = num_cpus + 1 + n_jobs
return max(num_procs, 1)
def get_install_path():
current_dir = os.path.dirname(os.path.realpath(__file__))
return os.path.dirname(current_dir)
def remove_redundant_attrs(out_attrs, key_attr):
# this method removes key_attr from out_attrs, if present.
# further it removes redundant attributes in out_attrs,
# but preserves the order of attributes.
if out_attrs is None:
return out_attrs
uniq_attrs = []
seen_attrs = {}
for attr in out_attrs:
if attr == key_attr or seen_attrs.get(attr) is not None:
continue
uniq_attrs.append(attr)
seen_attrs[attr] = True
return uniq_attrs
def get_attrs_to_project(out_attrs, key_attr, join_attr):
# this method assumes key_attr has already been removed from
# out_attrs, if present.
proj_attrs = [key_attr, join_attr]
if out_attrs is not None:
for attr in out_attrs:
if attr != join_attr:
proj_attrs.append(attr)
return proj_attrs
| bsd-3-clause |
yarikoptic/pystatsmodels | statsmodels/tsa/tests/test_stattools.py | 3 | 7864 | from statsmodels.tsa.stattools import (adfuller, acf, pacf_ols, pacf_yw,
pacf, grangercausalitytests,
coint, acovf)
from statsmodels.tsa.base.datetools import dates_from_range
import numpy as np
from numpy.testing import assert_almost_equal, assert_equal, assert_raises
from numpy import genfromtxt#, concatenate
from statsmodels.datasets import macrodata, sunspots
from pandas import Series, Index
import os
DECIMAL_8 = 8
DECIMAL_6 = 6
DECIMAL_5 = 5
DECIMAL_4 = 4
DECIMAL_3 = 3
DECIMAL_2 = 2
DECIMAL_1 = 1
class CheckADF(object):
"""
Test Augmented Dickey-Fuller
Test values taken from Stata.
"""
levels = ['1%', '5%', '10%']
data = macrodata.load()
x = data.data['realgdp']
y = data.data['infl']
def test_teststat(self):
assert_almost_equal(self.res1[0], self.teststat, DECIMAL_5)
def test_pvalue(self):
assert_almost_equal(self.res1[1], self.pvalue, DECIMAL_5)
def test_critvalues(self):
critvalues = [self.res1[4][lev] for lev in self.levels]
assert_almost_equal(critvalues, self.critvalues, DECIMAL_2)
class TestADFConstant(CheckADF):
"""
Dickey-Fuller test for unit root
"""
def __init__(self):
self.res1 = adfuller(self.x, regression="c", autolag=None,
maxlag=4)
self.teststat = .97505319
self.pvalue = .99399563
self.critvalues = [-3.476, -2.883, -2.573]
class TestADFConstantTrend(CheckADF):
"""
"""
def __init__(self):
self.res1 = adfuller(self.x, regression="ct", autolag=None,
maxlag=4)
self.teststat = -1.8566374
self.pvalue = .67682968
self.critvalues = [-4.007, -3.437, -3.137]
#class TestADFConstantTrendSquared(CheckADF):
# """
# """
# pass
#TODO: get test values from R?
class TestADFNoConstant(CheckADF):
"""
"""
def __init__(self):
self.res1 = adfuller(self.x, regression="nc", autolag=None,
maxlag=4)
self.teststat = 3.5227498
self.pvalue = .99999 # Stata does not return a p-value for noconstant.
# Tau^max in MacKinnon (1994) is missing, so it is
# assumed that its right-tail is well-behaved
self.critvalues = [-2.587, -1.950, -1.617]
# No Unit Root
class TestADFConstant2(CheckADF):
def __init__(self):
self.res1 = adfuller(self.y, regression="c", autolag=None,
maxlag=1)
self.teststat = -4.3346988
self.pvalue = .00038661
self.critvalues = [-3.476, -2.883, -2.573]
class TestADFConstantTrend2(CheckADF):
def __init__(self):
self.res1 = adfuller(self.y, regression="ct", autolag=None,
maxlag=1)
self.teststat = -4.425093
self.pvalue = .00199633
self.critvalues = [-4.006, -3.437, -3.137]
class TestADFNoConstant2(CheckADF):
def __init__(self):
self.res1 = adfuller(self.y, regression="nc", autolag=None,
maxlag=1)
self.teststat = -2.4511596
self.pvalue = 0.013747 # Stata does not return a p-value for noconstant
# this value is just taken from our results
self.critvalues = [-2.587,-1.950,-1.617]
class CheckCorrGram(object):
"""
Set up for ACF, PACF tests.
"""
data = macrodata.load()
x = data.data['realgdp']
filename = os.path.dirname(os.path.abspath(__file__))+\
"/results/results_corrgram.csv"
results = genfromtxt(open(filename, "rb"), delimiter=",", names=True,dtype=float)
#not needed: add 1. for lag zero
#self.results['acvar'] = np.concatenate(([1.], self.results['acvar']))
class TestACF(CheckCorrGram):
"""
Test Autocorrelation Function
"""
def __init__(self):
self.acf = self.results['acvar']
#self.acf = np.concatenate(([1.], self.acf))
self.qstat = self.results['Q1']
self.res1 = acf(self.x, nlags=40, qstat=True, alpha=.05)
self.confint_res = self.results[['acvar_lb','acvar_ub']].view((float,
2))
def test_acf(self):
assert_almost_equal(self.res1[0][1:41], self.acf, DECIMAL_8)
def test_confint(self):
centered = self.res1[1] - self.res1[1].mean(1)[:,None]
assert_almost_equal(centered[1:41], self.confint_res, DECIMAL_8)
def test_qstat(self):
assert_almost_equal(self.res1[2][:40], self.qstat, DECIMAL_3)
# 3 decimal places because of stata rounding
# def pvalue(self):
# pass
#NOTE: shouldn't need testing if Q stat is correct
class TestACF_FFT(CheckCorrGram):
"""
Test Autocorrelation Function using FFT
"""
def __init__(self):
self.acf = self.results['acvarfft']
self.qstat = self.results['Q1']
self.res1 = acf(self.x, nlags=40, qstat=True, fft=True)
def test_acf(self):
assert_almost_equal(self.res1[0][1:], self.acf, DECIMAL_8)
def test_qstat(self):
#todo why is res1/qstat 1 short
assert_almost_equal(self.res1[1], self.qstat, DECIMAL_3)
class TestPACF(CheckCorrGram):
def __init__(self):
self.pacfols = self.results['PACOLS']
self.pacfyw = self.results['PACYW']
def test_ols(self):
pacfols, confint = pacf(self.x, nlags=40, alpha=.05, method="ols")
assert_almost_equal(pacfols[1:], self.pacfols, DECIMAL_6)
centered = confint - confint.mean(1)[:,None]
# from edited Stata ado file
res = [[-.1375625, .1375625]] * 40
assert_almost_equal(centered[1:41], res, DECIMAL_6)
def test_yw(self):
pacfyw = pacf_yw(self.x, nlags=40, method="mle")
assert_almost_equal(pacfyw[1:], self.pacfyw, DECIMAL_8)
def test_ld(self):
pacfyw = pacf_yw(self.x, nlags=40, method="mle")
pacfld = pacf(self.x, nlags=40, method="ldb")
assert_almost_equal(pacfyw, pacfld, DECIMAL_8)
pacfyw = pacf(self.x, nlags=40, method="yw")
pacfld = pacf(self.x, nlags=40, method="ldu")
assert_almost_equal(pacfyw, pacfld, DECIMAL_8)
class CheckCoint(object):
"""
Test Cointegration Test Results for 2-variable system
Test values taken from Stata
"""
levels = ['1%', '5%', '10%']
data = macrodata.load()
y1 = data.data['realcons']
y2 = data.data['realgdp']
def test_tstat(self):
assert_almost_equal(self.coint_t,self.teststat, DECIMAL_4)
class TestCoint_t(CheckCoint):
"""
Get AR(1) parameter on residuals
"""
def __init__(self):
self.coint_t = coint(self.y1, self.y2, regression ="c")[0]
self.teststat = -1.8208817
def test_grangercausality():
# some example data
mdata = macrodata.load().data
mdata = mdata[['realgdp','realcons']]
data = mdata.view((float,2))
data = np.diff(np.log(data), axis=0)
#R: lmtest:grangertest
r_result = [0.243097, 0.7844328, 195, 2] #f_test
gr = grangercausalitytests(data[:,1::-1], 2, verbose=False)
assert_almost_equal(r_result, gr[2][0]['ssr_ftest'], decimal=7)
assert_almost_equal(gr[2][0]['params_ftest'], gr[2][0]['ssr_ftest'],
decimal=7)
def test_pandasacovf():
s = Series(range(1, 11))
assert_almost_equal(acovf(s), acovf(s.values))
def test_acovf2d():
dta = sunspots.load_pandas().data
dta.index = Index(dates_from_range('1700', '2008'))
del dta["YEAR"]
res = acovf(dta)
assert_equal(res, acovf(dta.values))
X = np.random.random((10,2))
assert_raises(ValueError, acovf, X)
if __name__=="__main__":
import nose
# nose.runmodule(argv=[__file__, '-vvs','-x','-pdb'], exit=False)
import numpy as np
np.testing.run_module_suite()
| bsd-3-clause |
JrtPec/opengrid | opengrid/library/plotting.py | 2 | 6242 | # -*- coding: utf-8 -*-
"""
Created on Wed Nov 26 18:03:24 2014
@author: KDB
"""
import numpy as np
import pandas as pd
import datetime as dt
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from matplotlib.dates import date2num, num2date, HourLocator, DayLocator, AutoDateLocator, DateFormatter
from matplotlib.colors import LogNorm
def carpet(timeseries, **kwargs):
"""
Draw a carpet plot of a pandas timeseries.
The carpet plot reads like a letter. Every day one line is added to the
bottom of the figure, minute for minute moving from left (morning) to right
(evening).
The color denotes the level of consumption and is scaled logarithmically.
If vmin and vmax are not provided as inputs, the minimum and maximum of the
colorbar represent the minimum and maximum of the (resampled) timeseries.
Parameters
----------
timeseries : pandas.Series
vmin, vmax : If not None, either or both of these values determine the range
of the z axis. If None, the range is given by the minimum and/or maximum
of the (resampled) timeseries.
zlabel, title : If not None, these determine the labels of z axis and/or
title. If None, the name of the timeseries is used if defined.
cmap : matplotlib.cm instance, default coolwarm
"""
# define optional input parameters
cmap = kwargs.pop('cmap', cm.coolwarm)
norm = kwargs.pop('norm', LogNorm())
interpolation = kwargs.pop('interpolation', 'nearest')
cblabel = kwargs.pop('zlabel', timeseries.name if timeseries.name else '')
title = kwargs.pop('title', 'carpet plot: ' + timeseries.name if timeseries.name else '')
# data preparation
if timeseries.dropna().empty:
print('skipped {} - no data'.format(title))
return
ts = timeseries.resample('min', how='mean', label='left', closed='left')
vmin = max(0.1, kwargs.pop('vmin', ts[ts > 0].min()))
vmax = max(vmin, kwargs.pop('vmax', ts.quantile(.999)))
# convert to dataframe with date as index and time as columns by
# first replacing the index by a MultiIndex
# tz_convert('UTC'): workaround for https://github.com/matplotlib/matplotlib/issues/3896
mpldatetimes = date2num(ts.index.tz_convert('UTC').astype(dt.datetime))
ts.index = pd.MultiIndex.from_arrays(
[np.floor(mpldatetimes), 2 + mpldatetimes % 1]) # '2 +': matplotlib bug workaround.
# and then unstacking the second index level to columns
df = ts.unstack()
# data plotting
fig, ax = plt.subplots()
# define the extent of the axes (remark the +- 0.5 for the y axis in order to obtain aligned date ticks)
extent = [df.columns[0], df.columns[-1], df.index[-1] + 0.5, df.index[0] - 0.5]
im = plt.imshow(df, vmin=vmin, vmax=vmax, extent=extent, cmap=cmap, aspect='auto', norm=norm,
interpolation=interpolation, **kwargs)
# figure formatting
# x axis
ax.xaxis_date()
ax.xaxis.set_major_locator(HourLocator(interval=2))
ax.xaxis.set_major_formatter(DateFormatter('%H:%M'))
ax.xaxis.grid(True)
plt.xlabel('UTC Time')
# y axis
ax.yaxis_date()
dmin, dmax = ax.yaxis.get_data_interval()
number_of_days = (num2date(dmax) - num2date(dmin)).days
# AutoDateLocator is not suited in case few data is available
if abs(number_of_days) <= 35:
ax.yaxis.set_major_locator(DayLocator())
else:
ax.yaxis.set_major_locator(AutoDateLocator())
ax.yaxis.set_major_formatter(DateFormatter("%a, %d %b %Y"))
# plot colorbar
cbticks = np.logspace(np.log10(vmin), np.log10(vmax), 11, endpoint=True)
cb = plt.colorbar(format='%.0f', ticks=cbticks)
cb.set_label(cblabel)
# plot title
plt.title(title)
return im
def fanchart(timeseries, **kwargs):
"""
Draw a fan chart of the daily consumption profile.
The fan chart shows the different quantiles of the daily consumption, with
the blue line representing the median, and the black line the average.
By default, the consumption of the whole day is taken, but one can select
the hours of interest, e.g. night time standby consumption.
Parameters
----------
timeseries : pandas.Series
start_hour, end_hour : int or float, optional
Start and end hours of period of interest, default values are 0, 24
As of now, ensure that start_hour < end_hour
ylabel, title : str
If not None, these determine the labels of y axis and/or title.
If None, the name of the timeseries is used if defined.
"""
start_hour = 2. + kwargs.pop('start_hour', 0.) / 24.
end_hour = 2. + kwargs.pop('end_hour', 24.) / 24.
ylabel = kwargs.pop('ylabel', timeseries.name if timeseries.name else '')
title = kwargs.pop('title', 'carpet plot: ' + timeseries.name if timeseries.name else '')
# data preparation
if timeseries.dropna().empty:
print('skipped {} - no data'.format(title))
return
ts = timeseries.resample('min', how='mean', label='left', closed='left')
# convert to dataframe with date as index and time as columns by
# first replacing the index by a MultiIndex
# tz_convert('UTC'): workaround for https://github.com/matplotlib/matplotlib/issues/3896
mpldatetimes = date2num(ts.index.tz_convert('UTC').astype(dt.datetime))
ts.index = pd.MultiIndex.from_arrays(
[np.floor(mpldatetimes), 2 + mpldatetimes % 1]) # '2 +': matplotlib bug workaround.
# and then unstacking the second index level to columns
df = ts.unstack()
df = df.T.truncate(start_hour, end_hour)
num = 20
num_max = 4
df_quant = df.quantile(np.linspace(0., 1., 2 * num + 1))
# data plotting
fig, ax = plt.subplots()
im = plt.plot(df.columns, df_quant.iloc[num], 'b', label='median')
for i in range(1, num):
plt.fill_between(df.columns, df_quant.iloc[num - i], df_quant.iloc[min(num + i, 2 * num - num_max)], color='b',
alpha=0.05)
plt.plot(df.columns, df.mean(), 'k--', label='mean')
plt.legend()
# x axis
ax.xaxis_date()
plt.xlim(df.columns[0], df.columns[-1])
plt.ylabel(ylabel)
# plot title
plt.title(title)
plt.grid(True)
return im
| apache-2.0 |
TNT-Samuel/Coding-Projects | DNS Server/Source - Copy/Lib/site-packages/dask/bag/tests/test_bag.py | 2 | 40727 | # coding=utf-8
from __future__ import absolute_import, division, print_function
import pytest
import math
import os
import random
import sys
from collections import Iterator
from itertools import repeat
import partd
from toolz import merge, join, filter, identity, valmap, groupby, pluck
import dask
import dask.bag as db
from dask.bag.core import (Bag, lazify, lazify_task, map, collect,
reduceby, reify, partition, inline_singleton_lists,
optimize, from_delayed)
from dask.bag.utils import assert_eq
from dask.compatibility import BZ2File, GzipFile, PY2
from dask.delayed import Delayed
from dask.utils import filetexts, tmpfile, tmpdir
from dask.utils_test import inc, add
dsk = {('x', 0): (range, 5),
('x', 1): (range, 5),
('x', 2): (range, 5)}
L = list(range(5)) * 3
b = Bag(dsk, 'x', 3)
def iseven(x):
return x % 2 == 0
def isodd(x):
return x % 2 == 1
def test_Bag():
assert b.name == 'x'
assert b.npartitions == 3
def test_keys():
assert b.__dask_keys__() == sorted(dsk.keys())
def test_bag_map():
b = db.from_sequence(range(100), npartitions=10)
b2 = db.from_sequence(range(100, 200), npartitions=10)
x = b.compute()
x2 = b2.compute()
def myadd(a=1, b=2, c=3):
return a + b + c
assert db.map(myadd, b).compute() == list(map(myadd, x))
assert db.map(myadd, a=b).compute() == list(map(myadd, x))
assert db.map(myadd, b, b2).compute() == list(map(myadd, x, x2))
assert db.map(myadd, b, 10).compute() == [myadd(i, 10) for i in x]
assert db.map(myadd, 10, b=b).compute() == [myadd(10, b=i) for i in x]
sol = [myadd(i, b=j, c=100) for (i, j) in zip(x, x2)]
assert db.map(myadd, b, b=b2, c=100).compute() == sol
sol = [myadd(i, c=100) for (i, j) in zip(x, x2)]
assert db.map(myadd, b, c=100).compute() == sol
x_sum = sum(x)
sol = [myadd(x_sum, b=i, c=100) for i in x2]
assert db.map(myadd, b.sum(), b=b2, c=100).compute() == sol
sol = [myadd(i, b=x_sum, c=100) for i in x2]
assert db.map(myadd, b2, b.sum(), c=100).compute() == sol
sol = [myadd(a=100, b=x_sum, c=i) for i in x2]
assert db.map(myadd, a=100, b=b.sum(), c=b2).compute() == sol
a = dask.delayed(10)
assert db.map(myadd, b, a).compute() == [myadd(i, 10) for i in x]
assert db.map(myadd, b, b=a).compute() == [myadd(i, b=10) for i in x]
# Mispatched npartitions
fewer_parts = db.from_sequence(range(100), npartitions=5)
with pytest.raises(ValueError):
db.map(myadd, b, fewer_parts)
# No bags
with pytest.raises(ValueError):
db.map(myadd, b.sum(), 1, 2)
# Unequal partitioning
unequal = db.from_sequence(range(110), npartitions=10)
with pytest.raises(ValueError):
db.map(myadd, b, unequal, c=b2).compute()
with pytest.raises(ValueError):
db.map(myadd, b, b=unequal, c=b2).compute()
def test_map_method():
b = db.from_sequence(range(100), npartitions=10)
b2 = db.from_sequence(range(100, 200), npartitions=10)
x = b.compute()
x2 = b2.compute()
def myadd(a, b=2, c=3):
return a + b + c
assert b.map(myadd).compute() == list(map(myadd, x))
assert b.map(myadd, b2).compute() == list(map(myadd, x, x2))
assert b.map(myadd, 10).compute() == [myadd(i, 10) for i in x]
assert b.map(myadd, b=10).compute() == [myadd(i, b=10) for i in x]
assert (b.map(myadd, b2, c=10).compute() ==
[myadd(i, j, 10) for (i, j) in zip(x, x2)])
x_sum = sum(x)
assert (b.map(myadd, b.sum(), c=10).compute() ==
[myadd(i, x_sum, 10) for i in x])
# check that map works with multiarg functions. Can be removed after
# deprecated behavior is removed
assert b.map(add, b2).compute() == list(map(add, x, x2))
# check that map works with vararg functions. Can be removed after
# deprecated behavior is removed
def vararg_inc(*args):
return inc(*args)
assert_eq(b.map(vararg_inc), list(map(inc, x)))
def test_starmap():
data = [(1, 2), (3, 4), (5, 6), (7, 8), (9, 10)]
b = db.from_sequence(data, npartitions=2)
def myadd(a, b, c=0):
return a + b + c
assert b.starmap(myadd).compute() == [myadd(*a) for a in data]
assert b.starmap(myadd, c=10).compute() == [myadd(*a, c=10) for a in data]
max_second = b.pluck(1).max()
assert (b.starmap(myadd, c=max_second).compute() ==
[myadd(*a, c=max_second.compute()) for a in data])
c = dask.delayed(10)
assert b.starmap(myadd, c=c).compute() == [myadd(*a, c=10) for a in data]
def test_filter():
c = b.filter(iseven)
expected = merge(dsk, dict(((c.name, i),
(reify, (filter, iseven, (b.name, i))))
for i in range(b.npartitions)))
assert c.dask == expected
assert c.name == b.filter(iseven).name
def test_remove():
f = lambda x: x % 2 == 0
c = b.remove(f)
assert list(c) == [1, 3] * 3
assert c.name == b.remove(f).name
def test_iter():
assert sorted(list(b)) == sorted(L)
assert sorted(list(b.map(inc))) == sorted(list(range(1, 6)) * 3)
@pytest.mark.parametrize('func', [str, repr])
def test_repr(func):
assert str(b.npartitions) in func(b)
assert b.name[:5] in func(b)
def test_pluck():
d = {('x', 0): [(1, 10), (2, 20)],
('x', 1): [(3, 30), (4, 40)]}
b = Bag(d, 'x', 2)
assert set(b.pluck(0)) == set([1, 2, 3, 4])
assert set(b.pluck(1)) == set([10, 20, 30, 40])
assert set(b.pluck([1, 0])) == set([(10, 1), (20, 2), (30, 3), (40, 4)])
assert b.pluck([1, 0]).name == b.pluck([1, 0]).name
def test_pluck_with_default():
b = db.from_sequence(['Hello', '', 'World'])
pytest.raises(IndexError, lambda: list(b.pluck(0)))
assert list(b.pluck(0, None)) == ['H', None, 'W']
assert b.pluck(0, None).name == b.pluck(0, None).name
assert b.pluck(0).name != b.pluck(0, None).name
def test_unzip():
b = db.from_sequence(range(100)).map(lambda x: (x, x + 1, x + 2))
one, two, three = b.unzip(3)
assert list(one) == list(range(100))
assert list(three) == [i + 2 for i in range(100)]
assert one.name == b.unzip(3)[0].name
assert one.name != two.name
def test_fold():
c = b.fold(add)
assert c.compute() == sum(L)
assert c.key == b.fold(add).key
c2 = b.fold(add, initial=10)
assert c2.key != c.key
assert c2.compute() == sum(L) + 10 * b.npartitions
assert c2.key == b.fold(add, initial=10).key
c = db.from_sequence(range(5), npartitions=3)
def binop(acc, x):
acc = acc.copy()
acc.add(x)
return acc
d = c.fold(binop, set.union, initial=set())
assert d.compute() == set(c)
assert d.key == c.fold(binop, set.union, initial=set()).key
d = db.from_sequence('hello')
assert set(d.fold(lambda a, b: ''.join([a, b]), initial='').compute()) == set('hello')
e = db.from_sequence([[1], [2], [3]], npartitions=2)
assert set(e.fold(add, initial=[]).compute(scheduler='sync')) == set([1, 2, 3])
def test_distinct():
assert sorted(b.distinct()) == [0, 1, 2, 3, 4]
assert b.distinct().name == b.distinct().name
assert 'distinct' in b.distinct().name
assert b.distinct().count().compute() == 5
bag = db.from_sequence([0] * 50, npartitions=50)
assert bag.filter(None).distinct().compute() == []
def test_frequencies():
c = b.frequencies()
assert dict(c) == {0: 3, 1: 3, 2: 3, 3: 3, 4: 3}
c2 = b.frequencies(split_every=2)
assert dict(c2) == {0: 3, 1: 3, 2: 3, 3: 3, 4: 3}
assert c.name == b.frequencies().name
assert c.name != c2.name
assert c2.name == b.frequencies(split_every=2).name
# test bag with empty partitions
b2 = db.from_sequence(range(20), partition_size=2)
b2 = b2.filter(lambda x: x < 10)
d = b2.frequencies()
assert dict(d) == dict(zip(range(10), [1] * 10))
bag = db.from_sequence([0, 0, 0, 0], npartitions=4)
bag2 = bag.filter(None).frequencies(split_every=2)
assert_eq(bag2, [])
def test_topk():
assert list(b.topk(4)) == [4, 4, 4, 3]
c = b.topk(4, key=lambda x: -x)
assert list(c) == [0, 0, 0, 1]
c2 = b.topk(4, key=lambda x: -x, split_every=2)
assert list(c2) == [0, 0, 0, 1]
assert c.name != c2.name
assert b.topk(4).name == b.topk(4).name
@pytest.mark.parametrize('npartitions', [1, 2])
def test_topk_with_non_callable_key(npartitions):
b = db.from_sequence([(1, 10), (2, 9), (3, 8)], npartitions=npartitions)
assert list(b.topk(2, key=1)) == [(1, 10), (2, 9)]
assert list(b.topk(2, key=0)) == [(3, 8), (2, 9)]
assert b.topk(2, key=1).name == b.topk(2, key=1).name
def test_topk_with_multiarg_lambda():
b = db.from_sequence([(1, 10), (2, 9), (3, 8)], npartitions=2)
assert list(b.topk(2, key=lambda a, b: b)) == [(1, 10), (2, 9)]
def test_lambdas():
assert list(b.map(lambda x: x + 1)) == list(b.map(inc))
def test_reductions():
assert int(b.count()) == 15
assert int(b.sum()) == 30
assert int(b.max()) == 4
assert int(b.min()) == 0
assert b.any().compute() is True
assert b.all().compute() is False
assert b.all().key == b.all().key
assert b.all().key != b.any().key
def test_reduction_names():
assert b.sum().name.startswith('sum')
assert b.reduction(sum, sum).name.startswith('sum')
assert any(isinstance(k, str) and k.startswith('max')
for k in b.reduction(sum, max).dask)
assert b.reduction(sum, sum, name='foo').name.startswith('foo')
def test_tree_reductions():
b = db.from_sequence(range(12))
c = b.reduction(sum, sum, split_every=2)
d = b.reduction(sum, sum, split_every=6)
e = b.reduction(sum, sum, split_every=5)
assert c.compute() == d.compute() == e.compute()
assert len(c.dask) > len(d.dask)
c = b.sum(split_every=2)
d = b.sum(split_every=5)
assert c.compute() == d.compute()
assert len(c.dask) > len(d.dask)
assert c.key != d.key
assert c.key == b.sum(split_every=2).key
assert c.key != b.sum().key
@pytest.mark.parametrize('npartitions', [1, 3, 4])
def test_aggregation(npartitions):
L = list(range(15))
b = db.range(15, npartitions=npartitions)
assert_eq(b.mean(), sum(L) / len(L))
assert_eq(b.sum(), sum(L))
assert_eq(b.count(), len(L))
@pytest.mark.parametrize('npartitions', [1, 10])
def test_non_splittable_reductions(npartitions):
np = pytest.importorskip('numpy')
data = list(range(100))
c = db.from_sequence(data, npartitions=npartitions)
assert_eq(c.mean(), np.mean(data))
assert_eq(c.std(), np.std(data))
def test_std():
assert_eq(b.std(), math.sqrt(2.0))
assert float(b.std()) == math.sqrt(2.0)
def test_var():
assert_eq(b.var(), 2.0)
assert float(b.var()) == 2.0
@pytest.mark.parametrize('transform', [
identity,
dask.delayed,
lambda x: db.from_sequence(x, npartitions=1)
])
def test_join(transform):
other = transform([1, 2, 3])
c = b.join(other, on_self=isodd, on_other=iseven)
assert_eq(c, list(join(iseven, [1, 2, 3], isodd, list(b))))
assert_eq(b.join(other, isodd),
list(join(isodd, [1, 2, 3], isodd, list(b))))
assert c.name == b.join(other, on_self=isodd, on_other=iseven).name
def test_foldby():
c = b.foldby(iseven, add, 0, add, 0)
assert (reduceby, iseven, add, (b.name, 0), 0) in list(c.dask.values())
assert set(c) == set(reduceby(iseven, lambda acc, x: acc + x, L, 0).items())
assert c.name == b.foldby(iseven, add, 0, add, 0).name
c = b.foldby(iseven, lambda acc, x: acc + x)
assert set(c) == set(reduceby(iseven, lambda acc, x: acc + x, L, 0).items())
def test_foldby_tree_reduction():
dsk = list()
for n in [1, 7, 32]:
b = db.from_sequence(range(100), npartitions=n)
c = b.foldby(iseven, add)
dsk += [c]
for m in [False, None, 2, 3]:
d = b.foldby(iseven, add, split_every=m)
e = b.foldby(iseven, add, 0, split_every=m)
f = b.foldby(iseven, add, 0, add, split_every=m)
g = b.foldby(iseven, add, 0, add, 0, split_every=m)
dsk += [d,e,f,g]
results = dask.compute(dsk)
first = results[0]
assert all([r == first for r in results])
def test_map_partitions():
assert list(b.map_partitions(len)) == [5, 5, 5]
assert b.map_partitions(len).name == b.map_partitions(len).name
assert b.map_partitions(lambda a: len(a) + 1).name != b.map_partitions(len).name
def test_map_partitions_args_kwargs():
x = [random.randint(-100, 100) for i in range(100)]
y = [random.randint(-100, 100) for i in range(100)]
dx = db.from_sequence(x, npartitions=10)
dy = db.from_sequence(y, npartitions=10)
def maximum(x, y=0):
y = repeat(y) if isinstance(y, int) else y
return [max(a, b) for (a, b) in zip(x, y)]
sol = maximum(x, y=10)
assert db.map_partitions(maximum, dx, y=10).compute() == sol
assert dx.map_partitions(maximum, y=10).compute() == sol
assert dx.map_partitions(maximum, 10).compute() == sol
sol = maximum(x, y)
assert db.map_partitions(maximum, dx, dy).compute() == sol
assert dx.map_partitions(maximum, y=dy).compute() == sol
assert dx.map_partitions(maximum, dy).compute() == sol
dy_mean = dy.mean().apply(int)
sol = maximum(x, int(sum(y) / len(y)))
assert dx.map_partitions(maximum, y=dy_mean).compute() == sol
assert dx.map_partitions(maximum, dy_mean).compute() == sol
dy_mean = dask.delayed(dy_mean)
assert dx.map_partitions(maximum, y=dy_mean).compute() == sol
assert dx.map_partitions(maximum, dy_mean).compute() == sol
def test_random_sample_size():
"""
Number of randomly sampled elements are in the expected range.
"""
a = db.from_sequence(range(1000), npartitions=5)
# we expect a size of approx. 100, but leave large margins to avoid
# random failures
assert 10 < len(list(a.random_sample(0.1, 42))) < 300
def test_random_sample_prob_range():
"""
Specifying probabilities outside the range [0, 1] raises ValueError.
"""
a = db.from_sequence(range(50), npartitions=5)
with pytest.raises(ValueError):
a.random_sample(-1)
with pytest.raises(ValueError):
a.random_sample(1.1)
def test_random_sample_repeated_computation():
"""
Repeated computation of a defined random sampling operation
generates identical results.
"""
a = db.from_sequence(range(50), npartitions=5)
b = a.random_sample(0.2)
assert list(b) == list(b) # computation happens here
def test_random_sample_different_definitions():
"""
Repeatedly defining a random sampling operation yields different results
upon computation if no random seed is specified.
"""
a = db.from_sequence(range(50), npartitions=5)
assert list(a.random_sample(0.5)) != list(a.random_sample(0.5))
assert a.random_sample(0.5).name != a.random_sample(0.5).name
def test_random_sample_random_state():
"""
Sampling with fixed random seed generates identical results.
"""
a = db.from_sequence(range(50), npartitions=5)
b = a.random_sample(0.5, 1234)
c = a.random_sample(0.5, 1234)
assert list(b) == list(c)
def test_lazify_task():
task = (sum, (reify, (map, inc, [1, 2, 3])))
assert lazify_task(task) == (sum, (map, inc, [1, 2, 3]))
task = (reify, (map, inc, [1, 2, 3]))
assert lazify_task(task) == task
a = (reify, (map, inc, (reify, (filter, iseven, 'y'))))
b = (reify, (map, inc, (filter, iseven, 'y')))
assert lazify_task(a) == b
f = lambda x: x
def test_lazify():
a = {'x': (reify, (map, inc, (reify, (filter, iseven, 'y')))),
'a': (f, 'x'), 'b': (f, 'x')}
b = {'x': (reify, (map, inc, (filter, iseven, 'y'))),
'a': (f, 'x'), 'b': (f, 'x')}
assert lazify(a) == b
def test_inline_singleton_lists():
inp = {'b': (list, 'a'),
'c': (f, 'b', 1)}
out = {'c': (f, (list, 'a'), 1)}
assert inline_singleton_lists(inp) == out
out = {'c': (f, 'a', 1)}
assert optimize(inp, ['c'], rename_fused_keys=False) == out
inp = {'b': (list, 'a'),
'c': (f, 'b', 1),
'd': (f, 'b', 2)}
assert inline_singleton_lists(inp) == inp
inp = {'b': (4, 5)} # doesn't inline constants
assert inline_singleton_lists(inp) == inp
def test_take():
assert list(b.take(2)) == [0, 1]
assert b.take(2) == (0, 1)
assert isinstance(b.take(2, compute=False), Bag)
def test_take_npartitions():
assert list(b.take(6, npartitions=2)) == [0, 1, 2, 3, 4, 0]
assert b.take(6, npartitions=-1) == (0, 1, 2, 3, 4, 0)
assert b.take(3, npartitions=-1) == (0, 1, 2)
with pytest.raises(ValueError):
b.take(1, npartitions=5)
def test_take_npartitions_warn():
# Use single-threaded scheduler so warnings are properly captured in the
# same process
with dask.config.set(scheduler='sync'):
with pytest.warns(UserWarning):
b.take(100)
with pytest.warns(UserWarning):
b.take(7)
with pytest.warns(None) as rec:
b.take(7, npartitions=2)
assert len(rec) == 0
with pytest.warns(None) as rec:
b.take(7, warn=False)
assert len(rec) == 0
def test_map_is_lazy():
from dask.bag.core import map
assert isinstance(map(lambda x: x, [1, 2, 3]), Iterator)
def test_can_use_dict_to_make_concrete():
assert isinstance(dict(b.frequencies()), dict)
@pytest.mark.slow
@pytest.mark.network
@pytest.mark.skip(reason="Hangs")
def test_from_url():
a = db.from_url(['http://google.com', 'http://github.com'])
assert a.npartitions == 2
b = db.from_url('http://raw.githubusercontent.com/dask/dask/master/README.rst')
assert b.npartitions == 1
assert b'Dask\n' in b.take(10)
def test_read_text():
with filetexts({'a1.log': 'A\nB', 'a2.log': 'C\nD'}) as fns:
assert (set(line.strip() for line in db.read_text(fns)) ==
set('ABCD'))
assert (set(line.strip() for line in db.read_text('a*.log')) ==
set('ABCD'))
pytest.raises(ValueError, lambda: db.read_text('non-existent-*-path'))
def test_read_text_large():
with tmpfile() as fn:
with open(fn, 'wb') as f:
f.write(('Hello, world!' + os.linesep).encode() * 100)
b = db.read_text(fn, blocksize=100)
c = db.read_text(fn)
assert len(b.dask) > 5
assert list(map(str, b.str.strip())) == list(map(str, c.str.strip()))
d = db.read_text([fn], blocksize=100)
assert list(b) == list(d)
def test_read_text_encoding():
with tmpfile() as fn:
with open(fn, 'wb') as f:
f.write((u'你好!' + os.linesep).encode('gb18030') * 100)
b = db.read_text(fn, blocksize=100, encoding='gb18030')
c = db.read_text(fn, encoding='gb18030')
assert len(b.dask) > 5
assert (list(b.str.strip().map(lambda x: x.encode('utf-8'))) ==
list(c.str.strip().map(lambda x: x.encode('utf-8'))))
d = db.read_text([fn], blocksize=100, encoding='gb18030')
assert list(b) == list(d)
def test_read_text_large_gzip():
with tmpfile('gz') as fn:
f = GzipFile(fn, 'wb')
f.write(b'Hello, world!\n' * 100)
f.close()
with pytest.raises(ValueError):
db.read_text(fn, blocksize=50, linedelimiter='\n')
c = db.read_text(fn)
assert c.npartitions == 1
@pytest.mark.slow
@pytest.mark.network
def test_from_s3():
# note we don't test connection modes with aws_access_key and
# aws_secret_key because these are not on travis-ci
pytest.importorskip('s3fs')
five_tips = (u'total_bill,tip,sex,smoker,day,time,size\n',
u'16.99,1.01,Female,No,Sun,Dinner,2\n',
u'10.34,1.66,Male,No,Sun,Dinner,3\n',
u'21.01,3.5,Male,No,Sun,Dinner,3\n',
u'23.68,3.31,Male,No,Sun,Dinner,2\n')
# test compressed data
e = db.read_text('s3://tip-data/t*.gz', storage_options=dict(anon=True))
assert e.take(5) == five_tips
# test multiple keys in bucket
c = db.read_text(['s3://tip-data/tips.gz', 's3://tip-data/tips.json',
's3://tip-data/tips.csv'],
storage_options=dict(anon=True))
assert c.npartitions == 3
def test_from_sequence():
b = db.from_sequence([1, 2, 3, 4, 5], npartitions=3)
assert len(b.dask) == 3
assert set(b) == set([1, 2, 3, 4, 5])
def test_from_long_sequence():
L = list(range(1001))
b = db.from_sequence(L)
assert set(b) == set(L)
def test_product():
b2 = b.product(b)
assert b2.npartitions == b.npartitions**2
assert set(b2) == set([(i, j) for i in L for j in L])
x = db.from_sequence([1, 2, 3, 4])
y = db.from_sequence([10, 20, 30])
z = x.product(y)
assert set(z) == set([(i, j) for i in [1, 2, 3, 4] for j in [10, 20, 30]])
assert z.name != b2.name
assert z.name == x.product(y).name
def test_partition_collect():
with partd.Pickle() as p:
partition(identity, range(6), 3, p)
assert set(p.get(0)) == set([0, 3])
assert set(p.get(1)) == set([1, 4])
assert set(p.get(2)) == set([2, 5])
assert sorted(collect(identity, 0, p, '')) == [(0, [0]), (3, [3])]
def test_groupby():
c = b.groupby(identity)
result = dict(c)
assert result == {0: [0, 0 ,0],
1: [1, 1, 1],
2: [2, 2, 2],
3: [3, 3, 3],
4: [4, 4, 4]}
assert c.npartitions == b.npartitions
assert c.name == b.groupby(identity).name
assert c.name != b.groupby(lambda x: x + 1).name
def test_groupby_with_indexer():
b = db.from_sequence([[1, 2, 3], [1, 4, 9], [2, 3, 4]])
result = dict(b.groupby(0))
assert valmap(sorted, result) == {1: [[1, 2, 3], [1, 4, 9]],
2: [[2, 3, 4]]}
def test_groupby_with_npartitions_changed():
result = b.groupby(lambda x: x, npartitions=1)
result2 = dict(result)
assert result2 == {0: [0, 0 ,0],
1: [1, 1, 1],
2: [2, 2, 2],
3: [3, 3, 3],
4: [4, 4, 4]}
assert result.npartitions == 1
def test_concat():
a = db.from_sequence([1, 2, 3])
b = db.from_sequence([4, 5, 6])
c = db.concat([a, b])
assert list(c) == [1, 2, 3, 4, 5, 6]
assert c.name == db.concat([a, b]).name
def test_flatten():
b = db.from_sequence([[1], [2, 3]])
assert list(b.flatten()) == [1, 2, 3]
assert b.flatten().name == b.flatten().name
def test_concat_after_map():
a = db.from_sequence([1, 2])
b = db.from_sequence([4, 5])
result = db.concat([a.map(inc), b])
assert list(result) == [2, 3, 4, 5]
def test_args():
c = b.map(lambda x: x + 1)
d = Bag(*c._args)
assert list(c) == list(d)
assert c.npartitions == d.npartitions
def test_to_dataframe():
dd = pytest.importorskip('dask.dataframe')
pd = pytest.importorskip('pandas')
def check_parts(df, sol):
assert all((p.dtypes == sol.dtypes).all() for p in
dask.compute(*df.to_delayed()))
dsk = {('test', 0): [(1, 2)],
('test', 1): [],
('test', 2): [(10, 20), (100, 200)]}
b = Bag(dsk, 'test', 3)
sol = pd.DataFrame(b.compute(), columns=['a', 'b'])
# Elements are tuples
df = b.to_dataframe()
dd.utils.assert_eq(df, sol.rename(columns={'a': 0, 'b': 1}),
check_index=False)
df = b.to_dataframe(columns=['a', 'b'])
dd.utils.assert_eq(df, sol, check_index=False)
check_parts(df, sol)
df = b.to_dataframe(meta=[('a', 'i8'), ('b', 'i8')])
dd.utils.assert_eq(df, sol, check_index=False)
check_parts(df, sol)
# Elements are dictionaries
b = b.map(lambda x: dict(zip(['a', 'b'], x)))
df = b.to_dataframe()
dd.utils.assert_eq(df, sol, check_index=False)
check_parts(df, sol)
assert df._name == b.to_dataframe()._name
# With metadata specified
for meta in [sol, [('a', 'i8'), ('b', 'i8')]]:
df = b.to_dataframe(meta=meta)
dd.utils.assert_eq(df, sol, check_index=False)
check_parts(df, sol)
# Error to specify both columns and meta
with pytest.raises(ValueError):
b.to_dataframe(columns=['a', 'b'], meta=sol)
# Inference fails if empty first partition
b2 = b.filter(lambda x: x['a'] > 200)
with pytest.raises(ValueError):
b2.to_dataframe()
# Single column
b = b.pluck('a')
sol = sol[['a']]
df = b.to_dataframe(meta=sol)
dd.utils.assert_eq(df, sol, check_index=False)
check_parts(df, sol)
# Works with iterators and tuples
sol = pd.DataFrame({'a': range(100)})
b = db.from_sequence(range(100), npartitions=5)
for f in [iter, tuple]:
df = b.map_partitions(f).to_dataframe(meta=sol)
dd.utils.assert_eq(df, sol, check_index=False)
check_parts(df, sol)
ext_open = [('gz', GzipFile), ('', open)]
if not PY2:
ext_open.append(('bz2', BZ2File))
@pytest.mark.parametrize('ext,myopen', ext_open)
def test_to_textfiles(ext, myopen):
b = db.from_sequence(['abc', '123', 'xyz'], npartitions=2)
with tmpdir() as dir:
c = b.to_textfiles(os.path.join(dir, '*.' + ext), compute=False)
dask.compute(*c, scheduler='sync')
assert os.path.exists(os.path.join(dir, '1.' + ext))
f = myopen(os.path.join(dir, '1.' + ext), 'rb')
text = f.read()
if hasattr(text, 'decode'):
text = text.decode()
assert 'xyz' in text
f.close()
def test_to_textfiles_name_function_preserves_order():
seq = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p']
b = db.from_sequence(seq, npartitions=16)
with tmpdir() as dn:
b.to_textfiles(dn)
out = db.read_text(os.path.join(dn, "*"), encoding='ascii').map(str).map(str.strip).compute()
assert seq == out
@pytest.mark.skipif(sys.version_info[:2] == (3,3), reason="Python3.3 uses pytest2.7.2, w/o warns method")
def test_to_textfiles_name_function_warn():
seq = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p']
a = db.from_sequence(seq, npartitions=16)
with tmpdir() as dn:
with pytest.warns(None):
a.to_textfiles(dn, name_function=str)
def test_to_textfiles_encoding():
b = db.from_sequence([u'汽车', u'苹果', u'天气'], npartitions=2)
for ext, myopen in [('gz', GzipFile), ('bz2', BZ2File), ('', open)]:
if ext == 'bz2' and PY2:
continue
with tmpdir() as dir:
c = b.to_textfiles(os.path.join(dir, '*.' + ext), encoding='gb18030', compute=False)
dask.compute(*c)
assert os.path.exists(os.path.join(dir, '1.' + ext))
f = myopen(os.path.join(dir, '1.' + ext), 'rb')
text = f.read()
if hasattr(text, 'decode'):
text = text.decode('gb18030')
assert u'天气' in text
f.close()
def test_to_textfiles_inputs():
B = db.from_sequence(['abc', '123', 'xyz'], npartitions=2)
with tmpfile() as a:
with tmpfile() as b:
B.to_textfiles([a, b])
assert os.path.exists(a)
assert os.path.exists(b)
with tmpdir() as dirname:
B.to_textfiles(dirname)
assert os.path.exists(dirname)
assert os.path.exists(os.path.join(dirname, '0.part'))
with pytest.raises(TypeError):
B.to_textfiles(5)
def test_to_textfiles_endlines():
b = db.from_sequence(['a', 'b', 'c'], npartitions=1)
with tmpfile() as fn:
for last_endline in False, True:
b.to_textfiles([fn], last_endline=last_endline)
with open(fn, 'r') as f:
result = f.readlines()
assert result == ['a\n', 'b\n', 'c\n' if last_endline else 'c']
def test_string_namespace():
b = db.from_sequence(['Alice Smith', 'Bob Jones', 'Charlie Smith'],
npartitions=2)
assert 'split' in dir(b.str)
assert 'match' in dir(b.str)
assert list(b.str.lower()) == ['alice smith', 'bob jones', 'charlie smith']
assert list(b.str.split(' ')) == [['Alice', 'Smith'],
['Bob', 'Jones'],
['Charlie', 'Smith']]
assert list(b.str.match('*Smith')) == ['Alice Smith', 'Charlie Smith']
pytest.raises(AttributeError, lambda: b.str.sfohsofhf)
assert b.str.match('*Smith').name == b.str.match('*Smith').name
assert b.str.match('*Smith').name != b.str.match('*John').name
def test_string_namespace_with_unicode():
b = db.from_sequence([u'Alice Smith', u'Bob Jones', 'Charlie Smith'],
npartitions=2)
assert list(b.str.lower()) == ['alice smith', 'bob jones', 'charlie smith']
def test_str_empty_split():
b = db.from_sequence([u'Alice Smith', u'Bob Jones', 'Charlie Smith'],
npartitions=2)
assert list(b.str.split()) == [['Alice', 'Smith'],
['Bob', 'Jones'],
['Charlie', 'Smith']]
def test_map_with_iterator_function():
b = db.from_sequence([[1, 2, 3], [4, 5, 6]], npartitions=2)
def f(L):
for x in L:
yield x + 1
c = b.map(f)
assert list(c) == [[2, 3, 4], [5, 6, 7]]
def test_ensure_compute_output_is_concrete():
b = db.from_sequence([1, 2, 3])
result = b.map(lambda x: x + 1).compute()
assert not isinstance(result, Iterator)
class BagOfDicts(db.Bag):
def get(self, key, default=None):
return self.map(lambda d: d.get(key, default))
def set(self, key, value):
def setter(d):
d[key] = value
return d
return self.map(setter)
def test_bag_class_extend():
dictbag = BagOfDicts(*db.from_sequence([{'a': {'b': 'c'}}])._args)
assert dictbag.get('a').get('b').compute()[0] == 'c'
assert dictbag.get('a').set('d', 'EXTENSIBILITY!!!').compute()[0] == \
{'b': 'c', 'd': 'EXTENSIBILITY!!!'}
assert isinstance(dictbag.get('a').get('b'), BagOfDicts)
def test_gh715():
bin_data = u'\u20ac'.encode('utf-8')
with tmpfile() as fn:
with open(fn, 'wb') as f:
f.write(bin_data)
a = db.read_text(fn)
assert a.compute()[0] == bin_data.decode('utf-8')
def test_bag_compute_forward_kwargs():
x = db.from_sequence([1, 2, 3]).map(lambda a: a + 1)
x.compute(bogus_keyword=10)
def test_to_delayed():
b = db.from_sequence([1, 2, 3, 4, 5, 6], npartitions=3)
a, b, c = b.map(inc).to_delayed()
assert all(isinstance(x, Delayed) for x in [a, b, c])
assert b.compute() == [4, 5]
b = db.from_sequence([1, 2, 3, 4, 5, 6], npartitions=3)
t = b.sum().to_delayed()
assert isinstance(t, Delayed)
assert t.compute() == 21
def test_to_delayed_optimize_graph():
b = db.from_sequence([1, 2, 3, 4, 5, 6], npartitions=1)
b2 = b.map(inc).map(inc).map(inc)
[d] = b2.to_delayed()
text = str(dict(d.dask))
assert text.count('reify') == 1
[d2] = b2.to_delayed(optimize_graph=False)
assert dict(d2.dask) == dict(b2.dask)
assert d.compute() == d2.compute()
x = b2.sum()
d = x.to_delayed()
text = str(dict(d.dask))
assert text.count('reify') == 0
d2 = x.to_delayed(optimize_graph=False)
assert dict(d2.dask) == dict(x.dask)
assert d.compute() == d2.compute()
[d] = b2.to_textfiles('foo.txt', compute=False)
text = str(dict(d.dask))
assert text.count('reify') <= 0
def test_from_delayed():
from dask.delayed import delayed
a, b, c = delayed([1, 2, 3]), delayed([4, 5, 6]), delayed([7, 8, 9])
bb = from_delayed([a, b, c])
assert bb.name == from_delayed([a, b, c]).name
assert isinstance(bb, Bag)
assert list(bb) == [1, 2, 3, 4, 5, 6, 7, 8, 9]
asum_value = delayed(lambda X: sum(X))(a)
asum_item = db.Item.from_delayed(asum_value)
assert asum_value.compute() == asum_item.compute() == 6
def test_from_delayed_iterator():
from dask.delayed import delayed
def lazy_records(n):
return ({'operations': [1, 2]} for _ in range(n))
delayed_records = delayed(lazy_records, pure=False)
bag = db.from_delayed([delayed_records(5) for _ in range(5)])
assert db.compute(
bag.count(),
bag.pluck('operations').count(),
bag.pluck('operations').flatten().count(),
scheduler='sync',
) == (25, 25, 50)
def test_range():
for npartitions in [1, 7, 10, 28]:
b = db.range(100, npartitions=npartitions)
assert len(b.dask) == npartitions
assert b.npartitions == npartitions
assert list(b) == list(range(100))
@pytest.mark.parametrize("npartitions", [1, 7, 10, 28])
def test_zip(npartitions, hi=1000):
evens = db.from_sequence(range(0, hi, 2), npartitions=npartitions)
odds = db.from_sequence(range(1, hi, 2), npartitions=npartitions)
pairs = db.zip(evens, odds)
assert pairs.npartitions == npartitions
assert list(pairs) == list(zip(range(0, hi, 2), range(1, hi, 2)))
@pytest.mark.parametrize('nin', [1, 2, 7, 11, 23])
@pytest.mark.parametrize('nout', [1, 2, 5, 12, 23])
def test_repartition(nin, nout):
b = db.from_sequence(range(100), npartitions=nin)
c = b.repartition(npartitions=nout)
assert c.npartitions == nout
assert_eq(b, c)
results = dask.get(c.dask, c.__dask_keys__())
assert all(results)
def test_repartition_names():
b = db.from_sequence(range(100), npartitions=5)
c = b.repartition(2)
assert b.name != c.name
d = b.repartition(20)
assert b.name != c.name
assert c.name != d.name
c = b.repartition(5)
assert b is c
@pytest.mark.skipif('not db.core._implement_accumulate')
def test_accumulate():
parts = [[1, 2, 3], [4, 5], [], [6, 7]]
dsk = dict((('test', i), p) for (i, p) in enumerate(parts))
b = db.Bag(dsk, 'test', len(parts))
r = b.accumulate(add)
assert r.name == b.accumulate(add).name
assert r.name != b.accumulate(add, -1).name
assert r.compute() == [1, 3, 6, 10, 15, 21, 28]
assert b.accumulate(add, -1).compute() == [-1, 0, 2, 5, 9, 14, 20, 27]
assert b.accumulate(add).map(inc).compute() == [2, 4, 7, 11, 16, 22, 29]
b = db.from_sequence([1, 2, 3], npartitions=1)
assert b.accumulate(add).compute() == [1, 3, 6]
def test_groupby_tasks():
b = db.from_sequence(range(160), npartitions=4)
out = b.groupby(lambda x: x % 10, max_branch=4, shuffle='tasks')
partitions = dask.get(out.dask, out.__dask_keys__())
for a in partitions:
for b in partitions:
if a is not b:
assert not set(pluck(0, a)) & set(pluck(0, b))
b = db.from_sequence(range(1000), npartitions=100)
out = b.groupby(lambda x: x % 123, shuffle='tasks')
assert len(out.dask) < 100**2
partitions = dask.get(out.dask, out.__dask_keys__())
for a in partitions:
for b in partitions:
if a is not b:
assert not set(pluck(0, a)) & set(pluck(0, b))
b = db.from_sequence(range(10000), npartitions=345)
out = b.groupby(lambda x: x % 2834, max_branch=24, shuffle='tasks')
partitions = dask.get(out.dask, out.__dask_keys__())
for a in partitions:
for b in partitions:
if a is not b:
assert not set(pluck(0, a)) & set(pluck(0, b))
def test_groupby_tasks_names():
b = db.from_sequence(range(160), npartitions=4)
func = lambda x: x % 10
func2 = lambda x: x % 20
assert (set(b.groupby(func, max_branch=4, shuffle='tasks').dask) ==
set(b.groupby(func, max_branch=4, shuffle='tasks').dask))
assert (set(b.groupby(func, max_branch=4, shuffle='tasks').dask) !=
set(b.groupby(func, max_branch=2, shuffle='tasks').dask))
assert (set(b.groupby(func, max_branch=4, shuffle='tasks').dask) !=
set(b.groupby(func2, max_branch=4, shuffle='tasks').dask))
@pytest.mark.parametrize('size,npartitions,groups', [(1000, 20, 100),
(12345, 234, 1042)])
def test_groupby_tasks_2(size, npartitions, groups):
func = lambda x: x % groups
b = db.range(size, npartitions=npartitions).groupby(func, shuffle='tasks')
result = b.compute(scheduler='sync')
assert dict(result) == groupby(func, range(size))
def test_groupby_tasks_3():
func = lambda x: x % 10
b = db.range(20, npartitions=5).groupby(func, shuffle='tasks', max_branch=2)
result = b.compute(scheduler='sync')
assert dict(result) == groupby(func, range(20))
# assert b.npartitions == 5
def test_to_textfiles_empty_partitions():
with tmpdir() as d:
b = db.range(5, npartitions=5).filter(lambda x: x == 1).map(str)
b.to_textfiles(os.path.join(d, '*.txt'))
assert len(os.listdir(d)) == 5
def test_reduction_empty():
b = db.from_sequence(range(10), npartitions=100)
assert_eq(b.filter(lambda x: x % 2 == 0).max(), 8)
assert_eq(b.filter(lambda x: x % 2 == 0).min(), 0)
@pytest.mark.parametrize('npartitions', [1, 2, 4])
def test_reduction_empty_aggregate(npartitions):
b = db.from_sequence([0, 0, 0, 1], npartitions=npartitions).filter(None)
assert_eq(b.min(split_every=2), 1)
vals = db.compute(b.min(split_every=2), b.max(split_every=2), scheduler='sync')
assert vals == (1, 1)
with pytest.raises(ValueError):
b = db.from_sequence([0, 0, 0, 0], npartitions=npartitions)
b.filter(None).min(split_every=2).compute(scheduler='sync')
class StrictReal(int):
def __eq__(self, other):
assert isinstance(other, StrictReal)
return self.real == other.real
def __ne__(self, other):
assert isinstance(other, StrictReal)
return self.real != other.real
def test_reduction_with_non_comparable_objects():
b = db.from_sequence([StrictReal(x) for x in range(10)], partition_size=2)
assert_eq(b.fold(max, max), StrictReal(9))
def test_reduction_with_sparse_matrices():
sp = pytest.importorskip('scipy.sparse')
b = db.from_sequence([sp.csr_matrix([0]) for x in range(4)], partition_size=2)
def sp_reduce(a, b):
return sp.vstack([a, b])
assert b.fold(sp_reduce, sp_reduce).compute(scheduler='sync').shape == (4, 1)
def test_empty():
list(db.from_sequence([])) == []
def test_bag_picklable():
from pickle import loads, dumps
b = db.from_sequence(range(100))
b2 = loads(dumps(b))
assert b.compute() == b2.compute()
s = b.sum()
s2 = loads(dumps(s))
assert s.compute() == s2.compute()
def test_msgpack_unicode():
b = db.from_sequence([{"a": 1}]).groupby("a")
result = b.compute(scheduler='sync')
assert dict(result) == {1: [{'a': 1}]}
def test_bag_with_single_callable():
f = lambda: None
b = db.from_sequence([f])
assert_eq(b, [f])
def test_optimize_fuse_keys():
x = db.range(10, npartitions=2)
y = x.map(inc)
z = y.map(inc)
dsk = z.__dask_optimize__(z.dask, z.__dask_keys__())
assert not set(y.dask) & set(dsk)
dsk = z.__dask_optimize__(z.dask, z.__dask_keys__(),
fuse_keys=y.__dask_keys__())
assert all(k in dsk for k in y.__dask_keys__())
def test_reductions_are_lazy():
current = [None]
def part():
for i in range(10):
current[0] = i
yield i
def func(part):
assert current[0] == 0
return sum(part)
b = Bag({('foo', 0): part()}, 'foo', 1)
res = b.reduction(func, sum)
assert_eq(res, sum(range(10)))
def test_repeated_groupby():
b = db.range(10, npartitions=4)
c = b.groupby(lambda x: x % 3)
assert valmap(len, dict(c)) == valmap(len, dict(c))
def test_temporary_directory(tmpdir):
b = db.range(10, npartitions=4)
with dask.config.set(temporary_directory=str(tmpdir)):
b2 = b.groupby(lambda x: x % 2)
b2.compute()
assert any(fn.endswith('.partd') for fn in os.listdir(str(tmpdir)))
def test_empty_bag():
b = db.from_sequence([])
assert_eq(b.map(inc).all(), True)
assert_eq(b.map(inc).any(), False)
assert_eq(b.map(inc).sum(), False)
assert_eq(b.map(inc).count(), False)
def test_bag_paths():
b = db.from_sequence(['abc', '123', 'xyz'], npartitions=2)
assert b.to_textfiles('foo*') == ['foo0', 'foo1']
os.remove('foo0')
os.remove('foo1')
| gpl-3.0 |
walterreade/scikit-learn | sklearn/cluster/tests/test_dbscan.py | 176 | 12155 | """
Tests for DBSCAN clustering algorithm
"""
import pickle
import numpy as np
from scipy.spatial import distance
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_not_in
from sklearn.neighbors import NearestNeighbors
from sklearn.cluster.dbscan_ import DBSCAN
from sklearn.cluster.dbscan_ import dbscan
from sklearn.cluster.tests.common import generate_clustered_data
from sklearn.metrics.pairwise import pairwise_distances
n_clusters = 3
X = generate_clustered_data(n_clusters=n_clusters)
def test_dbscan_similarity():
# Tests the DBSCAN algorithm with a similarity array.
# Parameters chosen specifically for this task.
eps = 0.15
min_samples = 10
# Compute similarities
D = distance.squareform(distance.pdist(X))
D /= np.max(D)
# Compute DBSCAN
core_samples, labels = dbscan(D, metric="precomputed", eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - (1 if -1 in labels else 0)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric="precomputed", eps=eps, min_samples=min_samples)
labels = db.fit(D).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_feature():
# Tests the DBSCAN algorithm with a feature vector array.
# Parameters chosen specifically for this task.
# Different eps to other test, because distance is not normalised.
eps = 0.8
min_samples = 10
metric = 'euclidean'
# Compute DBSCAN
# parameters chosen for task
core_samples, labels = dbscan(X, metric=metric, eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples)
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_sparse():
core_sparse, labels_sparse = dbscan(sparse.lil_matrix(X), eps=.8,
min_samples=10)
core_dense, labels_dense = dbscan(X, eps=.8, min_samples=10)
assert_array_equal(core_dense, core_sparse)
assert_array_equal(labels_dense, labels_sparse)
def test_dbscan_sparse_precomputed():
D = pairwise_distances(X)
nn = NearestNeighbors(radius=.9).fit(X)
D_sparse = nn.radius_neighbors_graph(mode='distance')
# Ensure it is sparse not merely on diagonals:
assert D_sparse.nnz < D.shape[0] * (D.shape[0] - 1)
core_sparse, labels_sparse = dbscan(D_sparse,
eps=.8,
min_samples=10,
metric='precomputed')
core_dense, labels_dense = dbscan(D, eps=.8, min_samples=10,
metric='precomputed')
assert_array_equal(core_dense, core_sparse)
assert_array_equal(labels_dense, labels_sparse)
def test_dbscan_no_core_samples():
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < .8] = 0
for X_ in [X, sparse.csr_matrix(X)]:
db = DBSCAN(min_samples=6).fit(X_)
assert_array_equal(db.components_, np.empty((0, X_.shape[1])))
assert_array_equal(db.labels_, -1)
assert_equal(db.core_sample_indices_.shape, (0,))
def test_dbscan_callable():
# Tests the DBSCAN algorithm with a callable metric.
# Parameters chosen specifically for this task.
# Different eps to other test, because distance is not normalised.
eps = 0.8
min_samples = 10
# metric is the function reference, not the string key.
metric = distance.euclidean
# Compute DBSCAN
# parameters chosen for task
core_samples, labels = dbscan(X, metric=metric, eps=eps,
min_samples=min_samples,
algorithm='ball_tree')
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples,
algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_balltree():
# Tests the DBSCAN algorithm with balltree for neighbor calculation.
eps = 0.8
min_samples = 10
D = pairwise_distances(X)
core_samples, labels = dbscan(D, metric="precomputed", eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm='kd_tree')
labels = db.fit(X).labels_
n_clusters_3 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_3, n_clusters)
db = DBSCAN(p=1.0, eps=eps, min_samples=min_samples, algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_4 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_4, n_clusters)
db = DBSCAN(leaf_size=20, eps=eps, min_samples=min_samples,
algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_5 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_5, n_clusters)
def test_input_validation():
# DBSCAN.fit should accept a list of lists.
X = [[1., 2.], [3., 4.]]
DBSCAN().fit(X) # must not raise exception
def test_dbscan_badargs():
# Test bad argument values: these should all raise ValueErrors
assert_raises(ValueError,
dbscan,
X, eps=-1.0)
assert_raises(ValueError,
dbscan,
X, algorithm='blah')
assert_raises(ValueError,
dbscan,
X, metric='blah')
assert_raises(ValueError,
dbscan,
X, leaf_size=-1)
assert_raises(ValueError,
dbscan,
X, p=-1)
def test_pickle():
obj = DBSCAN()
s = pickle.dumps(obj)
assert_equal(type(pickle.loads(s)), obj.__class__)
def test_boundaries():
# ensure min_samples is inclusive of core point
core, _ = dbscan([[0], [1]], eps=2, min_samples=2)
assert_in(0, core)
# ensure eps is inclusive of circumference
core, _ = dbscan([[0], [1], [1]], eps=1, min_samples=2)
assert_in(0, core)
core, _ = dbscan([[0], [1], [1]], eps=.99, min_samples=2)
assert_not_in(0, core)
def test_weighted_dbscan():
# ensure sample_weight is validated
assert_raises(ValueError, dbscan, [[0], [1]], sample_weight=[2])
assert_raises(ValueError, dbscan, [[0], [1]], sample_weight=[2, 3, 4])
# ensure sample_weight has an effect
assert_array_equal([], dbscan([[0], [1]], sample_weight=None,
min_samples=6)[0])
assert_array_equal([], dbscan([[0], [1]], sample_weight=[5, 5],
min_samples=6)[0])
assert_array_equal([0], dbscan([[0], [1]], sample_weight=[6, 5],
min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[6, 6],
min_samples=6)[0])
# points within eps of each other:
assert_array_equal([0, 1], dbscan([[0], [1]], eps=1.5,
sample_weight=[5, 1], min_samples=6)[0])
# and effect of non-positive and non-integer sample_weight:
assert_array_equal([], dbscan([[0], [1]], sample_weight=[5, 0],
eps=1.5, min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[5.9, 0.1],
eps=1.5, min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[6, 0],
eps=1.5, min_samples=6)[0])
assert_array_equal([], dbscan([[0], [1]], sample_weight=[6, -1],
eps=1.5, min_samples=6)[0])
# for non-negative sample_weight, cores should be identical to repetition
rng = np.random.RandomState(42)
sample_weight = rng.randint(0, 5, X.shape[0])
core1, label1 = dbscan(X, sample_weight=sample_weight)
assert_equal(len(label1), len(X))
X_repeated = np.repeat(X, sample_weight, axis=0)
core_repeated, label_repeated = dbscan(X_repeated)
core_repeated_mask = np.zeros(X_repeated.shape[0], dtype=bool)
core_repeated_mask[core_repeated] = True
core_mask = np.zeros(X.shape[0], dtype=bool)
core_mask[core1] = True
assert_array_equal(np.repeat(core_mask, sample_weight), core_repeated_mask)
# sample_weight should work with precomputed distance matrix
D = pairwise_distances(X)
core3, label3 = dbscan(D, sample_weight=sample_weight,
metric='precomputed')
assert_array_equal(core1, core3)
assert_array_equal(label1, label3)
# sample_weight should work with estimator
est = DBSCAN().fit(X, sample_weight=sample_weight)
core4 = est.core_sample_indices_
label4 = est.labels_
assert_array_equal(core1, core4)
assert_array_equal(label1, label4)
est = DBSCAN()
label5 = est.fit_predict(X, sample_weight=sample_weight)
core5 = est.core_sample_indices_
assert_array_equal(core1, core5)
assert_array_equal(label1, label5)
assert_array_equal(label1, est.labels_)
def test_dbscan_core_samples_toy():
X = [[0], [2], [3], [4], [6], [8], [10]]
n_samples = len(X)
for algorithm in ['brute', 'kd_tree', 'ball_tree']:
# Degenerate case: every sample is a core sample, either with its own
# cluster or including other close core samples.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=1)
assert_array_equal(core_samples, np.arange(n_samples))
assert_array_equal(labels, [0, 1, 1, 1, 2, 3, 4])
# With eps=1 and min_samples=2 only the 3 samples from the denser area
# are core samples. All other points are isolated and considered noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=2)
assert_array_equal(core_samples, [1, 2, 3])
assert_array_equal(labels, [-1, 0, 0, 0, -1, -1, -1])
# Only the sample in the middle of the dense area is core. Its two
# neighbors are edge samples. Remaining samples are noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=3)
assert_array_equal(core_samples, [2])
assert_array_equal(labels, [-1, 0, 0, 0, -1, -1, -1])
# It's no longer possible to extract core samples with eps=1:
# everything is noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=4)
assert_array_equal(core_samples, [])
assert_array_equal(labels, -np.ones(n_samples))
def test_dbscan_precomputed_metric_with_degenerate_input_arrays():
# see https://github.com/scikit-learn/scikit-learn/issues/4641 for
# more details
X = np.eye(10)
labels = DBSCAN(eps=0.5, metric='precomputed').fit(X).labels_
assert_equal(len(set(labels)), 1)
X = np.zeros((10, 10))
labels = DBSCAN(eps=0.5, metric='precomputed').fit(X).labels_
assert_equal(len(set(labels)), 1)
| bsd-3-clause |
mattgiguere/scikit-learn | sklearn/feature_extraction/tests/test_feature_hasher.py | 258 | 2861 | from __future__ import unicode_literals
import numpy as np
from sklearn.feature_extraction import FeatureHasher
from nose.tools import assert_raises, assert_true
from numpy.testing import assert_array_equal, assert_equal
def test_feature_hasher_dicts():
h = FeatureHasher(n_features=16)
assert_equal("dict", h.input_type)
raw_X = [{"dada": 42, "tzara": 37}, {"gaga": 17}]
X1 = FeatureHasher(n_features=16).transform(raw_X)
gen = (iter(d.items()) for d in raw_X)
X2 = FeatureHasher(n_features=16, input_type="pair").transform(gen)
assert_array_equal(X1.toarray(), X2.toarray())
def test_feature_hasher_strings():
# mix byte and Unicode strings; note that "foo" is a duplicate in row 0
raw_X = [["foo", "bar", "baz", "foo".encode("ascii")],
["bar".encode("ascii"), "baz", "quux"]]
for lg_n_features in (7, 9, 11, 16, 22):
n_features = 2 ** lg_n_features
it = (x for x in raw_X) # iterable
h = FeatureHasher(n_features, non_negative=True, input_type="string")
X = h.transform(it)
assert_equal(X.shape[0], len(raw_X))
assert_equal(X.shape[1], n_features)
assert_true(np.all(X.data > 0))
assert_equal(X[0].sum(), 4)
assert_equal(X[1].sum(), 3)
assert_equal(X.nnz, 6)
def test_feature_hasher_pairs():
raw_X = (iter(d.items()) for d in [{"foo": 1, "bar": 2},
{"baz": 3, "quux": 4, "foo": -1}])
h = FeatureHasher(n_features=16, input_type="pair")
x1, x2 = h.transform(raw_X).toarray()
x1_nz = sorted(np.abs(x1[x1 != 0]))
x2_nz = sorted(np.abs(x2[x2 != 0]))
assert_equal([1, 2], x1_nz)
assert_equal([1, 3, 4], x2_nz)
def test_hash_empty_input():
n_features = 16
raw_X = [[], (), iter(range(0))]
h = FeatureHasher(n_features=n_features, input_type="string")
X = h.transform(raw_X)
assert_array_equal(X.A, np.zeros((len(raw_X), n_features)))
def test_hasher_invalid_input():
assert_raises(ValueError, FeatureHasher, input_type="gobbledygook")
assert_raises(ValueError, FeatureHasher, n_features=-1)
assert_raises(ValueError, FeatureHasher, n_features=0)
assert_raises(TypeError, FeatureHasher, n_features='ham')
h = FeatureHasher(n_features=np.uint16(2 ** 6))
assert_raises(ValueError, h.transform, [])
assert_raises(Exception, h.transform, [[5.5]])
assert_raises(Exception, h.transform, [[None]])
def test_hasher_set_params():
# Test delayed input validation in fit (useful for grid search).
hasher = FeatureHasher()
hasher.set_params(n_features=np.inf)
assert_raises(TypeError, hasher.fit)
def test_hasher_zeros():
# Assert that no zeros are materialized in the output.
X = FeatureHasher().transform([{'foo': 0}])
assert_equal(X.data.shape, (0,))
| bsd-3-clause |
petebachant/daqmx | examples/experiment_run.py | 1 | 3389 | # -*- coding: utf-8 -*-
"""
Created on Tue May 21 21:06:37 2013
@author: Pete
This program commands a National Instruments counter output device to send a
pulse after a target position (read from the ACS controller) is reached.
"""
import daqmx
import acsc
import time
import numpy as np
import matplotlib
matplotlib.use("TkAgg")
import matplotlib.pyplot as plt
def main():
taskhandle = daqmx.TaskHandle()
daqmx.CreateTask("", taskhandle)
phys_chan = "cDAQ9188-16D66BBMod3/ctr2"
globalchan = "VecPulse"
#Timing parameters
rate = 200 # Pulse rate in Hz
initialdelay = 0
lowtime = 1/rate/2
hightime = 1/rate/2
# daqmx.CreateCOPulseChanTime(taskhandle, phys_chan, "", daqmx.Val_Seconds,
# daqmx.Val_Low, initialdelay, lowtime, hightime,
# False)
daqmx.AddGlobalChansToTask(taskhandle, globalchan)
daqmx.CfgImplicitTiming(taskhandle, daqmx.Val_FiniteSamps, 1)
# Set up communication with motion controller
simulator = False
# Parameters for plotting
plotting = False
plot_dynamic = False
if simulator == True:
hcomm = acsc.OpenCommDirect()
else:
hcomm = acsc.OpenCommEthernetTCP("10.0.0.100", 701)
axis = 5
buffno = 7
target = 0
timeout = 10
# Initialize arrays for storing time and position
t = np.array(0)
x = np.array(0)
if plotting == True and plot_dynamic == True:
plt.ioff()
fig = plt.figure()
ax = fig.add_subplot(111)
# plt.xlim(0, timeout)
# plt.ylim(0, target)
line, = ax.plot([], [])
fig.show()
if hcomm == acsc.INVALID:
print "Cannot connect to controller. Error:", acsc.GetLastError()
else:
# acsc.Enable(hcomm, axis)
rpos = acsc.GetRPosition(hcomm, axis)
x = rpos
t0 = time.time()
if simulator == True:
acsc.ToPoint(hcomm, 0, axis, target+50000)
else:
acsc.RunBuffer(hcomm, buffno, None, None)
while True:
rpos = acsc.GetRPosition(hcomm, axis)
if rpos >= target: break
x = np.append(x, rpos)
t = np.append(t, time.time() - t0)
if plotting == True and plot_dynamic == True:
line.set_xdata(t)
line.set_ydata(x)
ax.relim()
ax.autoscale_view()
fig.canvas.draw()
print "Axis is", acsc.GetAxisState(hcomm, axis)+'...'
if time.time() - t0 > timeout:
print "Motion timeout"
print "Final axis position:", rpos
break
time.sleep(0.001)
print "Target reached. Sending trigger pulse to", globalchan + "..."
daqmx.StartTask(taskhandle, fatalerror=False)
daqmx.WaitUntilTaskDone(taskhandle, timeout=10, fatalerror=False)
daqmx.ClearTask(taskhandle, fatalerror=False)
acsc.CloseComm(hcomm)
print "Triggered at", np.max(x)
if plotting == True:
if plot_dynamic == False:
plt.plot(t, x)
plt.show()
return t, x
if __name__ == "__main__":
t, x = main() | gpl-2.0 |
tacodog1/tdameritrade | tdapi.py | 1 | 27387 | from __future__ import unicode_literals
import datetime
import base64
import http.client
import urllib.request, urllib.parse, urllib.error
import getpass
import binascii
import time
import array
import string
import math, types
from struct import pack, unpack
from xml.etree import ElementTree
import logging
import pandas
class StockQuote():
symbol = None
description = None
bid = None
ask = None
bidAskSize = None
last = None
lastTradeSize = None
lastTradeDate = None
openPrice = None
highPrice = None
lowPrice = None
closePrice = None
volume = None
yearHigh = None
yearLow = None
realTime = None
exchange = None
assetType = None
change = None
changePercent = None
def __init__(self, elementTree):
i = elementTree
self.symbol = i.findall('symbol')[0].text
self.description = i.findall('description')[0].text
self.bid = float(i.findall('bid')[0].text)
self.ask = float(i.findall('ask')[0].text)
self.bidAskSize = i.findall('bid-ask-size')[0].text
self.last = float(i.findall('last')[0].text)
self.lastTradeSize = i.findall('last-trade-size')[0].text
self.lastTradeDate = i.findall('last-trade-date')[0].text
self.openPrice = float(i.findall('open')[0].text)
self.highPrice = float(i.findall('high')[0].text)
self.lowPrice = float(i.findall('low')[0].text)
self.closePrice = float(i.findall('close')[0].text)
self.volume = float(i.findall('volume')[0].text)
self.yearHigh = float(i.findall('year-high')[0].text)
self.yearLow = float(i.findall('year-low')[0].text)
self.realTime = i.findall('real-time')[0].text
self.exchange = i.findall('exchange')[0].text
self.assetType = i.findall('asset-type')[0].text
self.change = float(i.findall('change')[0].text)
self.changePercent = i.findall('change-percent')[0].text
def __str__(self):
s = ''
for key in dir(self):
if key[0] != '_':
s += '%s: %s\n' % (key, getattr(self, key))
return s
class OptionChainElement():
quoteDateTime = None
# Option Date Length - Short - 2
# Option Date - String - Variable
optionDate = None
# Expiration Type Length - Short - 2
# Expiration Type - String - Variable (R for Regular, L for LEAP)
expirationType = None
# Strike Price - Double - 8
strike = None
# Standard Option Flag - Byte - 1 (1 = true, 0 = false)
standardOptionFlag = None
# Put/Call Indicator - Char - 2 (P or C in unicode)
pcIndicator = None
# Option Symbol Length - Short - 2
# Option Symbol - String - Variable
optionSymbol = None
# Option Description Length - Short - 2
# Option Description - String - Variable
optionDescription = None
# Bid - Double - 8
bid = None
# Ask - Double - 8
ask = None
# Bid/Ask Size Length - Short - 2
# Bid/Ask Size - String - Variable
baSize = None
# Last - Double - 8
last = None
# Last Trade Size Length - Short - 2
# Last Trade Size - String - Variable
lastTradeSize = None
# Last Trade Date Length - short - 2
# Last Trade Date - String - Variable
lastTradeDate = None
# Volume - Long - 8
volume = None
# Open Interest - Integer - 4
openInterest = None
# RT Quote Flag - Byte - 1 (1=true, 0=false)
rtQuoteFlag = None
# Underlying Symbol length - Short 2
# Underlying Symbol - String - Variable
underlyingSymbol = None
# Delta - Double- 8
# Gamma - Double - 8
# Theta - Double - 8
# Vega - Double - 8
# Rho - Double - 8
delta = None
gamma = None
theta = None
vega = None
rho = None
# Implied Volatility - Double - 8
impliedVolatility = None
# Time Value Index - Double - 8
timeValueIndex = None
# Multiplier - Double - 8
multiplier = None
# Change - Double - 8
change = None
# Change Percentage - Double - 8
changePercentage = None
# ITM Flag - Byte - 1 (1 = true, 0 = false)
itmFlag = None
# NTM Flag - Byte - 1 (1 = true, 0 = false)
ntmFlag = None
# Theoretical value - Double - 8
theoreticalValue = None
# Deliverable Note Length - Short - 2
# Deliverable Note - String - Variable
deliverableNote = None
# CIL Dollar Amount - Double - 8
cilDollarAmoubt = None
# OP Cash Dollar Amount - Double - 8
opCashDollarAmount = None
# Index Option Flag - Byte - 1 (1 = true, 0 = false)
indexOptionFlag = None
# Number of Deliverables - Integer - 4
deliverables = [] # (symbol, shares)
# REPEATING block for each Deliverable
# Deliverable Symbol Length - Short - 2
# Deliverable Symbol - String - Variable
# Deliverable Shares - Integer - 4
# END
def setQuoteDateTime(self, quoteTime):
#self.quoteDateTime = datetime.datetime.now()
self.quoteDateTime = quoteTime
def __str__(self):
s = self.optionDescription.decode()
if self.last != None:
s = '%s Last: $%.2f' % (s, self.last)
else:
s = '%s Last: N/A' % s
if not (self.delta is None or self.gamma is None or self.theta is None or \
self.vega is None or self.rho is None):
s = "%s (d: %f g: %f t: %f v: %f r: %f)" % \
(s, self.delta, self.gamma, self.theta, self.vega, self.rho)
# date
# expirationType
# strike
# standardOptionFlag
# pcIndicator
# optionSymbol
# optionDescription
# bid
# ask
# baSize
# last
# lastTradeSize
# lastTradeDate
# volume
# openInterest
# rtQuoteFlag
# underlyingSymbol
# delta
# gamma
# theta
# vega
# rho
# impliedVolatility
# timeValueIndex
# multiplier
# change
# changePercentage
# itmFlag
# ntmFlag
# theoreticalValue
# deliverableNote
# cilDollarAmoubt
# opCashDollarAmount
# indexOptionFlag
# deliverables = [] # (symbol, shares)
# REPEATING block for each Deliverable
# Deliverable Symbol Length - Short - 2
# Deliverable Symbol - String - Variable
# Deliverable Shares - Integer - 4
return s
# Python3
__repr__ = __str__
class HistoricalPriceBar():
close = None
high = None
low = None
open = None
volume = None
timestamp = None
def __init__(self):
pass
def __str__(self):
return '%f,%f,%f,%f,%f,%s' % (self.close, self.high, self.low, self.open, self.volume, self.timestamp)
class TDAmeritradeAPI():
_sourceID = None # Note - Source ID must be provided by TD Ameritrade
_version = '0.1'
_active = False
_sessionID = ''
def __init__(self, sourceID):
self._sourceID = sourceID
def isActive(self, confirm=False):
if self._active == True:
# Confirm with server by calling KeepAlive
if confirm:
if self.keepAlive() == False:
self.login()
# TODO: add more robust checking here to make sure we're really logged in
return True
else:
return False
def keepAlive(self):
conn = http.client.HTTPSConnection('apis.tdameritrade.com')
conn.request('POST', '/apps/100/KeepAlive?source=%s' % self._sourceID)
response = conn.getresponse()
#print response.status, response.reason
#print 'Getting response data...'
data = response.read()
#print 'Data:',data
conn.close()
if data.strip() == 'LoggedOn':
return True
elif data.strip() == 'InvalidSession':
return False
else:
#print 'Unrecognized response: %s' % data
pass
def login(self, login, password):
logging.debug('[tdapi] Entered login()')
params = urllib.parse.urlencode({'source': self._sourceID, 'version': self._version})
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
body = urllib.parse.urlencode({'userid': login,
'password': password,
'source': self._sourceID,
'version': self._version})
conn = http.client.HTTPSConnection('apis.tdameritrade.com')
#conn.set_debuglevel(100)
conn.request('POST', '/apps/100/LogIn?'+params, body, headers)
response = conn.getresponse()
if response.status == 200:
self._active = True
else:
self._active = False
return False
# The data response is an XML fragment. Log it.
data = response.read()
logging.debug('Login response:\n--------%s\n--------' % data)
conn.close()
# Make sure the login succeeded. First look for <result>OK</result>
element = ElementTree.XML(data)
try:
result = element.findall('result')[0].text
if result == 'FAIL':
self._active = False
return False
elif result == 'OK':
self._active = True
else:
logging.error('Unrecognized login result: %s' % result)
return False
# Now get the session ID
self._sessionID = element.findall('xml-log-in')[0].findall('session-id')[0].text
except:
logging.error('Failed to parse login response.')
return False
def logout(self):
conn = http.client.HTTPSConnection('apis.tdameritrade.com')
conn.request('POST', '/apps/100/LogOut?source=%s' % self._sourceID)
response = conn.getresponse()
data = response.read()
conn.close()
self._active = False
def getSessionID(self):
return self._sessionID
def getStreamerInfo(self, accountID=None):
arguments = {'source': self._sourceID}
if accountID != None:
arguments['accountid'] = accountID
params = urllib.parse.urlencode(arguments)
conn = http.client.HTTPSConnection('apis.tdameritrade.com')
#conn.set_debuglevel(100)
conn.request('GET', '/apps/100/StreamerInfo?'+params)
response = conn.getresponse()
#print response.status, response.reason
data = response.read()
conn.close()
#print 'Read %d bytes' % len(data)
# We will need to create an ElementTree to process this XML response
# TODO: handle exceptions
element = ElementTree.XML(data)
# Process XML response
streamerInfo = {}
try:
children = element.findall('streamer-info')[0].getchildren()
for c in children:
streamerInfo[c.tag] = c.text
except e:
#print 'Error: failed to parse streamer-info response: %s', e
return False
#print 'Received streamer-info properties: %s' % streamerInfo
return streamerInfo
def getSnapshotQuote(self, tickers, assetType, detailed=False):
logging.info('[tdapi.getSnapshotQuote] Enter')
if len(tickers) > 300:
logging.error('TODO: divide in batches of 300')
if assetType not in ['stock','option','index','mutualfund']:
logging.error('getSnapshotQuote: Unrecognized asset type %s' % assetType)
return []
arguments = {'source': self._sourceID,
'symbol': str.join(',', tickers)}
params = urllib.parse.urlencode(arguments)
#print 'Arguments: ', arguments
conn = http.client.HTTPSConnection('apis.tdameritrade.com')
#conn.set_debuglevel(100)
conn.request('GET', ('/apps/100/Quote;jsessionid=%s?' % self.getSessionID()) +params)
response = conn.getresponse()
data = response.read()
conn.close()
logging.info('[tdapi.getSnapshotQuote] Read %d bytes' % len(data))
quotes = {}
# Perform basic processing regardless of quote type
element = ElementTree.XML(data)
try:
result = element.findall('result')[0].text
if result == 'FAIL':
self._active = False
return False
elif result == 'OK':
self._active = True
else:
logging.error('[tdapi.getSnapshotQuote] Unrecognized result: %s' % result)
return {}
# Now get the session ID
#self._sessionID = element.findall('xml-log-in')[0].findall('session-id')[0].text
except:
logging.error('[tdapi.getSnapshotQuote] Failed to parse snapshot quote response.')
return False
if assetType == 'stock':
try:
quoteElements = element.findall('quote-list')[0].findall('quote')
for i in quoteElements:
symbol = i.findall('symbol')[0].text
if detailed:
q = StockQuote(i) # Create a quote object from etree
quotes[symbol] = q
else:
last = float(i.findall('last')[0].text)
quotes[symbol] = last
except:
logging.error('Failed to parse snapshot quote response')
return {}
else:
logging.error('[tdapi.getSnapshotQuote] Asset type not supported: %s' % assetType)
return quotes
def getBinaryOptionChain(self, ticker):
arguments = {'source': self._sourceID,
'symbol': ticker,
'range': 'ALL',
'quotes': 'true'
}
params = urllib.parse.urlencode(arguments)
#print 'Arguments: ', arguments
conn = http.client.HTTPSConnection('apis.tdameritrade.com')
#conn.set_debuglevel(100)
conn.request('GET', ('/apps/200/BinaryOptionChain;jsessionid=%s?' % self.getSessionID()) +params)
response = conn.getresponse()
data = response.read()
conn.close()
cursor = 0
error = unpack('b', data[cursor:cursor+1])[0]
cursor += 1
# If there is an error, there will be an error length and corresponding error text
if error != 0:
errorLength = unpack('>h', data[cursor:cursor+2])[0]
cursor += 2
if errorLength > 0:
errorText = data[cursor:cursor+errorLength]
cursor += errorLength
raise ValueError('[getBinaryOptionChain] Error: %s' % errorText)
symbolLength = unpack('>h', data[cursor:cursor+2])[0]
cursor += 2
symbol = data[cursor:cursor+symbolLength].decode('utf-8')
cursor += symbolLength
symbolDescriptionLength = unpack('>h', data[cursor:cursor+2])[0]
cursor += 2
symbolDescription = data[cursor:cursor+symbolDescriptionLength].decode('utf-8')
cursor += symbolDescriptionLength
bid = unpack('>d', data[cursor:cursor+8])[0]
cursor += 8
ask = unpack('>d', data[cursor:cursor+8])[0]
cursor += 8
baSizeLength = unpack('>h', data[cursor:cursor+2])[0]
cursor += 2
baSize = data[cursor:cursor+baSizeLength]
cursor += baSizeLength
last = unpack('>d', data[cursor:cursor+8])[0]
cursor += 8
open = unpack('>d', data[cursor:cursor+8])[0]
cursor += 8
high = unpack('>d', data[cursor:cursor+8])[0]
cursor += 8
low = unpack('>d', data[cursor:cursor+8])[0]
cursor += 8
close = unpack('>d', data[cursor:cursor+8])[0]
cursor += 8
volume = unpack('>d', data[cursor:cursor+8])[0]
cursor += 8
change = unpack('>d', data[cursor:cursor+8])[0]
cursor += 8
rtFlag = chr(unpack('>H', data[cursor:cursor+2])[0])
cursor += 2
qtLength = unpack('>H', data[cursor:cursor+2])[0]
cursor += 2
quoteTime = data[cursor:cursor+qtLength]
cursor += qtLength
rowCount = unpack('>i', data[cursor:cursor+4])[0]
cursor += 4
optionChain = []
for i in range(rowCount):
if cursor > len(data):
print('Error! Read too much data')
break
o = OptionChainElement()
optionChain.append(o)
# Option Date Length - Short - 2
l = unpack('>h', data[cursor:cursor+2])[0]; cursor += 2
# Option Date - String - Variable
o.optionDate = data[cursor:cursor+l]; cursor += l
# Expiration Type Length - Short - 2
l = unpack('>h', data[cursor:cursor+2])[0]; cursor += 2
# Expiration Type - String - Variable (R for Regular, L for LEAP)
o.expirationType = data[cursor:cursor+l]; cursor += l
# Strike Price - Double - 8
o.strike = unpack('>d', data[cursor:cursor+8])[0]; cursor += 8
# Standard Option Flag - Byte - 1 (1 = true, 0 = false)
o.standardOptionFlag = unpack('b', data[cursor:cursor+1])[0]; cursor += 1
# Put/Call Indicator - Char - 2 (P or C in unicode)
o.pcIndicator = chr(unpack('>H', data[cursor:cursor+2])[0]); cursor += 2
# Option Symbol Length - Short - 2
l = unpack('>h', data[cursor:cursor+2])[0]; cursor += 2
# Option Symbol - String - Variable
o.optionSymbol = data[cursor:cursor+l]; cursor += l
# Option Description Length - Short - 2
l = unpack('>h', data[cursor:cursor+2])[0]; cursor += 2
# Option Description - String - Variable
o.optionDescription = data[cursor:cursor+l]; cursor += l
# Bid - Double - 8
o.bid = unpack('>d', data[cursor:cursor+8])[0]; cursor += 8
# Ask - Double - 8
o.ask = unpack('>d', data[cursor:cursor+8])[0]; cursor += 8
# Bid/Ask Size Length - Short - 2
l = unpack('>h', data[cursor:cursor+2])[0]; cursor += 2
# Bid/Ask Size - String - Variable
o.baSize = data[cursor:cursor+l]; cursor += l
# Last - Double - 8
o.last = unpack('>d', data[cursor:cursor+8])[0]; cursor += 8
# Last Trade Size Length - Short - 2
l = unpack('>h', data[cursor:cursor+2])[0]; cursor += 2
# Last Trade Size - String - Variable
o.lastTradeSize = data[cursor:cursor+l]; cursor += l
# Last Trade Date Length - short - 2
l = unpack('>h', data[cursor:cursor+2])[0]; cursor += 2
# Last Trade Date - String - Variable
o.lastTradeDate = data[cursor:cursor+l]; cursor += l
# Volume - Long - 8
o.volume = unpack('>Q',data[cursor:cursor+8])[0]; cursor += 8
# Open Interest - Integer - 4
o.openInterest = unpack('>i', data[cursor:cursor+4])[0]; cursor += 4
# RT Quote Flag - Byte - 1 (1=true, 0=false)
o.rtQuoteFlag = unpack('b', data[cursor:cursor+1])[0]; cursor += 1
o.setQuoteDateTime(quoteTime)
# Underlying Symbol length - Short 2
l = unpack('>h', data[cursor:cursor+2])[0]; cursor += 2
# Underlying Symbol - String - Variable
o.underlyingSymbol = data[cursor:cursor+l].decode('utf-8'); cursor += l
# Delta - Double- 8
# Gamma - Double - 8
# Theta - Double - 8
# Vega - Double - 8
# Rho - Double - 8
# Implied Volatility - Double - 8
# Time Value Index - Double - 8
# Multiplier - Double - 8
# Change - Double - 8
# Change Percentage - Double - 8
(o.delta, o.gamma, o.theta, o.vega, o.rho, o.impliedVolatility, o.tvIndex,
o.multiplier, o.change, o.changePercentage) = \
unpack('>10d', data[cursor:cursor+80]); cursor += 80
# ITM Flag - Byte - 1 (1 = true, 0 = false)
# NTM Flag - Byte - 1 (1 = true, 0 = false)
(o.itmFlag, o.ntmFlag) = unpack('2b', data[cursor:cursor+2]); cursor += 2
# Theoretical value - Double - 8
o.theoreticalValue = unpack('>d', data[cursor:cursor+8])[0]; cursor += 8
# Deliverable Note Length - Short - 2
l = unpack('>h', data[cursor:cursor+2])[0]; cursor += 2
# Deliverable Note - String - Variable
o.deliverableNote = data[cursor:cursor+l]; cursor += l
# CIL Dollar Amount - Double - 8
# OP Cash Dollar Amount - Double - 8
(o.cilDollarAmount, o.opCashDollarAmount) = \
unpack('>2d', data[cursor:cursor+16]); cursor += 16
# Index Option Flag - Byte - 1 (1 = true, 0 = false)
o.indexOptionFlag = unpack('b', data[cursor:cursor+1])[0]; cursor += 1
# Number of Deliverables - Integer - 4
numDeliverables = unpack('>i', data[cursor:cursor+4])[0]; cursor += 4
for j in range(numDeliverables):
# REPEATING block for each Deliverable
# Deliverable Symbol Length - Short - 2
l = unpack('>h', data[cursor:cursor+2])[0]; cursor += 2
# Deliverable Symbol - String - Variable
s = data[cursor:cursor+l]; cursor += l
# Deliverable Shares - Integer - 4
o.deliverables.append((s, unpack('>i', data[cursor:cursor+4])[0])); cursor += 4
# END
# Change all "nan" to None to make sure the oce is serializable
for k in list(o.__dict__.keys()):
if (type(o.__dict__[k]) == float) and math.isnan(o.__dict__[k]):
logging.info('[tdapi.getBinaryOptionChain] Converting o[%s]=nan to None' % (k))
o.__dict__[k] = None
return optionChain
def getPriceHistory(self, ticker, intervalType='DAILY', intervalDuration='1', periodType='MONTH',
period='1', startdate=None, enddate=None, extended=None):
validPeriodTypes = [
'DAY',
'MONTH',
'YEAR',
'YTD'
]
validIntervalTypes = {
'DAY': ['MINUTE'],
'MONTH': ['DAILY', 'WEEKLY'],
'YEAR': ['DAILY', 'WEEKLY', 'MONTHLY'],
'YTD': ['DAILY', 'WEEKLY']
}
arguments = {'source': self._sourceID,
'requestidentifiertype': 'SYMBOL',
'requestvalue': ticker,
'intervaltype': intervalType,
'intervalduration': intervalDuration,
'period': period,
'periodtype':periodType,
'startdate':startdate,
'enddate':enddate
}
# TODO: build params conditionally based on whether we're doing period-style request
# TODO: support start and end dates
validArgs = {}
for k in list(arguments.keys()):
if arguments[k] != None:
validArgs[k] = arguments[k]
params = urllib.parse.urlencode(validArgs)
logging.getLogger("requests").setLevel(logging.WARNING)
conn = http.client.HTTPSConnection('apis.tdameritrade.com')
conn.set_debuglevel(0)
conn.request('GET', '/apps/100/PriceHistory?'+params)
response = conn.getresponse()
if response.status != 200:
#import pdb; pdb.set_trace()
raise ValueError(response.reason)
data = response.read()
conn.close()
# The first 15 bytes are the header
# DATA TYPE DESCRIPTION
# 00 00 00 01 4 bytes Symbol Count =1
# 00 04 2 bytes Symbol Length = 4
# 41 4D 54 44 4 bytes Symbol = AMTD
# 00 1 byte Error code = 0 (OK)
# 00 00 00 02 4 bytes Bar Count = 2
cursor = 0
symbolCount = unpack('>i', data[0:4])[0]
if symbolCount > 1:
fp = open('tdapi_debug_dump','wb')
fp.write(data)
fp.close()
raise ValueError('Error - see tdapi_debug_dump')
symbolLength = unpack('>h', data[4:6])[0]
cursor = 6
symbol = data[cursor:cursor+symbolLength]
cursor += symbolLength
error = unpack('b', data[cursor:cursor+1])[0]
cursor += 1
# If there is an error, there will be an error length and corresponding error text
if error != 0:
errorLength = unpack('>h', data[cursor:cursor+2])[0]
# TODO: verify that this is correct below -- advance cursor for error length
cursor += 2
if errorLength > 0:
errorText = data[cursor:cursor+errorLength]
cursor += errorLength
raise ValueError('[getPriceHistory] Error: %s' % errorText)
barCount = unpack('>i', data[cursor:cursor+4])[0]
cursor += 4
# TODO: Add more rigorous checks on header data
# Now we need to extract the bars
bars = []
for i in range(barCount):
# Make sure we still have enough data for a bar and a terminator (note only one terminator at the end)
if cursor + 28 > len(data):
raise ValueError('Trying to read %d bytes from %d total!' % (cursor+58, len(data)))
C = unpack('>f', data[cursor:cursor+4])[0]
cursor += 4
H = unpack('>f', data[cursor:cursor+4])[0]
cursor += 4
L = unpack('>f', data[cursor:cursor+4])[0]
cursor += 4
O = unpack('>f', data[cursor:cursor+4])[0]
cursor += 4
V = unpack('>f', data[cursor:cursor+4])[0] * 100.0
cursor += 4
#T = time.gmtime(float(unpack('>Q',data[cursor:cursor+8])[0]) / 1000.0) # Returned in ms since the epoch
T = datetime.datetime.utcfromtimestamp(float(unpack('>Q',data[cursor:cursor+8])[0]) / 1000.0) # Returned in ms since the epoch
cursor += 8
bars.append((O,H,L,C,V,T))
# Finally we should see a terminator of FF
if data[cursor:cursor+2] != b'\xff\xff':
fp = open('tdapi_debug_dump','wb')
fp.write(data)
fp.close()
raise ValueError('Did not find terminator at hexdata[%d]! See tdapi_debug_dump' % cursor)
df = pandas.DataFrame(data=bars, columns=['open','high','low','close','volume','timestamp'])
return df
| mit |
seckcoder/lang-learn | python/sklearn/examples/linear_model/plot_sgd_comparison.py | 7 | 1641 | """
==================================
Comparing various online solvers
==================================
An example showing how different online solvers perform
on the hand-written digits dataset.
"""
# Author: Rob Zinkov <rob at zinkov dot com>
# License: Simplified BSD
import numpy as np
import pylab as pl
from sklearn import datasets
from sklearn.cross_validation import train_test_split
from sklearn.linear_model import SGDClassifier, Perceptron
from sklearn.linear_model import PassiveAggressiveClassifier
heldout = [0.95, 0.90, 0.75, 0.50, 0.01]
rounds = 20
digits = datasets.load_digits()
classifiers = [
("SGD", SGDClassifier()),
("Perceptron", Perceptron()),
("Passive-Aggressive I", PassiveAggressiveClassifier(loss='hinge',
C=1.0)),
("Passive-Aggressive II", PassiveAggressiveClassifier(loss='squared_hinge',
C=1.0)),
]
xx = 1 - np.array(heldout)
for name, clf in classifiers:
yy = []
for i in heldout:
yy_ = []
for r in range(rounds):
X_train, X_test, y_train, y_test = train_test_split(digits.data,
digits.target,
test_size=i)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
yy_.append(1 - np.mean(y_pred == y_test))
yy.append(np.mean(yy_))
pl.plot(xx, yy, label=name)
pl.legend(loc="upper right")
pl.xlabel("Proportion train")
pl.ylabel("Test Error Rate")
pl.show()
| unlicense |
vitordouzi/sigtrec_eval | sigtrec_eval.py | 1 | 6354 | import sys, os, subprocess, math, multiprocessing, random
import numpy as np
from numpy import nan
import pandas as pd
from scipy.stats.mstats import ttest_rel
from scipy.stats import ttest_ind, wilcoxon
from imblearn.over_sampling import RandomOverSampler, SMOTE
from collections import namedtuple
Result = namedtuple('Result', ['qrelFileName', 'datasetid', 'resultsFiles', 'nameApproach'])
def getFileName(qrelFile):
return os.path.basename(qrelFile)
class SIGTREC_Eval():
def __init__(self, cv=0, seed=42, round_=4, trec_eval=os.path.join(".", os.path.dirname(__file__), "trec_eval")):
self.nameApp = {}
self.cv = cv
self.seed = seed
self.trec_eval = trec_eval
self.round = round_
random.seed(seed)
def _build_F1(self, qrelFileName, to_compare, m, top):
command = ' '.join([self.trec_eval, qrelFileName, to_compare, '-q ', '-M %d' % top, '-m %s.%d'])
content_P = str(subprocess.Popen(command % ('P', top), stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True).communicate()[0])[2:-1].split('\\n')
content_R = str(subprocess.Popen(command % ('recall', top), stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True).communicate()[0])[2:-1].split('\\n')
content_F1 = []
for i in range(len(content_P)):
part_P = content_P[i].split('\\t')
part_R = content_R[i].split('\\t')
if len(part_P) != len(part_R) or len(part_P) < 3:
continue
if part_P[1] != part_R[1]:
print(part_P[1], part_R[1])
else:
Pre = float(part_P[2])
Rec = float(part_R[2])
if Pre == 0. or Rec == 0.:
content_F1.append( 'F1_%d\\t%s\\t0.' % ( top, part_P[1] ) )
else:
line = 'F1_%d\\t%s\\t%.4f' % ( top, part_P[1], (2.*Pre*Rec)/(Pre+Rec) )
content_F1.append( line )
return content_F1
def build_df(self, results, measures, top):
raw = []
qtd = len(measures)*sum([ len(input_result.resultsFiles) for input_result in results])
i=0
for input_result in results:
self.nameApp[input_result.datasetid] = []
for m in measures:
for (idx, to_compare) in enumerate(input_result.resultsFiles):
self.nameApp[input_result.datasetid].append(getFileName(to_compare))
print("\r%.2f%%" % (100.*i/qtd),end='')
i+=1
if m.startswith("F1"):
content = self._build_F1(input_result.qrelFileName, to_compare, m, top=top)
else:
############################################################################################## Tamanho 10 FIXADO ##############################################################################################
command = ' '.join([self.trec_eval, input_result.qrelFileName, to_compare, '-q -M %d' % top, '-m', m])
content = str(subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True).communicate()[0])[2:-1].split('\\n')
raw.extend([ (input_result.datasetid, idx, getFileName(to_compare), *( w.strip() for w in line.split('\\t') )) for line in content ][:-1])
print("\r100.00%%")
df_raw = pd.DataFrame(list(filter(lambda x: not x[4]=='all', raw)), columns=['qrel', 'idx_approach', 'approach', 'measure', 'docid', 'result'])
df_finale = pd.pivot_table(df_raw, index=['qrel', 'docid'], columns=['idx_approach','measure'], values='result', aggfunc='first')
df_finale.reset_index()
df_finale[np.array(df_finale.columns)] = df_finale[np.array(df_finale.columns)].astype(np.float64)
df_finale.replace('None', 0.0, inplace=True)
df_finale.replace(nan, 0.0, inplace=True)
#df_finale = df_finale[~df_finale['docid'].isin(['all'])]
df_finale['fold'] = [0]*len(df_finale)
if self.cv > 0:
for (qrel, qrel_group) in df_finale.groupby('qrel'):
folds=(list(range(self.cv))*math.ceil(len(qrel_group)/self.cv))[:len(qrel_group)]
random.shuffle(folds)
df_finale.loc[qrel, 'fold'] = folds
#with pd.option_context('display.max_rows', None, 'display.max_columns', 10000000000):
# print(df_finale)
return df_finale
def get_test(self, test, pbase, pcomp, multi_test=False):
if np.array_equal(pbase.values, pcomp.values):
pvalue = 1.
else:
if test == 'student':
(tvalue, pvalue) = ttest_rel(pbase, pcomp)
elif test == 'wilcoxon':
(tvalue, pvalue) = wilcoxon(pbase, pcomp)
elif test == 'welcht':
(tvalue, pvalue) = ttest_ind(pbase, pcomp, equal_var=False)
if pvalue < 0.05:
pbase_mean = pbase.mean()
pcomp_mean = pcomp.mean()
if pvalue < 0.01:
if pbase_mean > pcomp_mean:
result_test = '▼ '
else:
result_test = '▲ '
else:
if pbase_mean > pcomp_mean:
result_test = 'ᐁ '
else:
result_test = 'ᐃ '
else:
if not multi_test:
result_test = ' '
else:
result_test = '⏺ '
return result_test
def build_printable(self, table, significance_tests):
printable = {}
for qrel, qrel_group in table.groupby('qrel'):
raw = []
base = qrel_group.loc[:,0]
for idx_app in [idx for idx in qrel_group.columns.levels[0] if type(idx) == int]:
instance = [ self.nameApp[qrel][idx_app] ]
for m in qrel_group[idx_app].columns:
array_results = qrel_group[idx_app][m]
#print(qrel_group.groupby('fold').mean()[idx_app][m])
mean_measure_folds = qrel_group.groupby('fold').mean()[idx_app][m].mean()
test_result=""
for test in significance_tests:
if idx_app > 0:
test_result+=(self.get_test(test, base[m], array_results, len(significance_tests)>1))
else:
test_result+=('bl ')
instance.append('%f %s' % (round(mean_measure_folds,self.round), test_result) )
raw.append(instance)
printable[qrel] = pd.DataFrame(raw, columns=['app', *(table.columns.levels[1].get_values())[:-1]])
return printable
def get_sampler(self, sampler_name):
if sampler_name == "ros" or sampler_name == 'RandomOverSampler':
return RandomOverSampler(random_state=self.seed)
if sampler_name == "SMOTE" or sampler_name == "smote":
return SMOTE(random_state=self.seed)
def build_over_sample(self, df, sampler):
raw = []
for fold, fold_group in df.groupby('fold'):
y = pd.factorize(fold_group.index.get_level_values('qrel'))[0]
X_sampled, y_res = sampler.fit_sample(fold_group, y)
raw.extend(X_sampled)
df_sampled = pd.DataFrame(raw, columns=df.columns)
df_sampled['qrel'] = [sampler.__class__.__name__]*len(df_sampled)
self.nameApp[sampler.__class__.__name__] = self.nameApp[list(self.nameApp.keys())[0]]
return df_sampled
| mit |
zp312/gmmreg | Python/_plotting.py | 14 | 2435 | #!/usr/bin/env python
#coding=utf-8
##====================================================
## $Author$
## $Date$
## $Revision$
##====================================================
from pylab import *
from configobj import ConfigObj
import matplotlib.pyplot as plt
def display2Dpointset(A):
fig = plt.figure()
ax = fig.add_subplot(111)
#ax.grid(True)
ax.plot(A[:,0],A[:,1],'yo',markersize=8,mew=1)
labels = plt.getp(plt.gca(), 'xticklabels')
plt.setp(labels, color='k', fontweight='bold')
labels = plt.getp(plt.gca(), 'yticklabels')
plt.setp(labels, color='k', fontweight='bold')
for i,x in enumerate(A):
ax.annotate('%d'%(i+1), xy = x, xytext = x + 0)
ax.set_axis_off()
#fig.show()
def display2Dpointsets(A, B, ax = None):
""" display a pair of 2D point sets """
if not ax:
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(A[:,0],A[:,1],'yo',markersize=8,mew=1)
ax.plot(B[:,0],B[:,1],'b+',markersize=8,mew=1)
#pylab.setp(pylab.gca(), 'xlim', [-0.15,0.6])
labels = plt.getp(plt.gca(), 'xticklabels')
plt.setp(labels, color='k', fontweight='bold')
labels = plt.getp(plt.gca(), 'yticklabels')
plt.setp(labels, color='k', fontweight='bold')
def display3Dpointsets(A,B,ax):
#ax.plot3d(A[:,0],A[:,1],A[:,2],'yo',markersize=10,mew=1)
#ax.plot3d(B[:,0],B[:,1],B[:,2],'b+',markersize=10,mew=1)
ax.scatter(A[:,0],A[:,1],A[:,2], c = 'y', marker = 'o')
ax.scatter(B[:,0],B[:,1],B[:,2], c = 'b', marker = '+')
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
from mpl_toolkits.mplot3d import Axes3D
def displayABC(A,B,C):
fig = plt.figure()
dim = A.shape[1]
if dim==2:
ax = plt.subplot(121)
display2Dpointsets(A, B, ax)
ax = plt.subplot(122)
display2Dpointsets(C, B, ax)
if dim==3:
plot1 = plt.subplot(1,2,1)
ax = Axes3D(fig, rect = plot1.get_position())
display3Dpointsets(A,B,ax)
plot2 = plt.subplot(1,2,2)
ax = Axes3D(fig, rect = plot2.get_position())
display3Dpointsets(C,B,ax)
plt.show()
def display_pts(f_config):
config = ConfigObj(f_config)
file_section = config['FILES']
mf = file_section['model']
sf = file_section['scene']
tf = file_section['transformed_model']
m = np.loadtxt(mf)
s = np.loadtxt(sf)
t = np.loadtxt(tf)
displayABC(m,s,t)
| gpl-3.0 |
nborggren/zipline | tests/test_munge.py | 34 | 1794 | #
# Copyright 2015 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import pandas as pd
import numpy as np
from numpy.testing import assert_almost_equal
from unittest import TestCase
from zipline.utils.munge import bfill, ffill
class MungeTests(TestCase):
def test_bfill(self):
# test ndim=1
N = 100
s = pd.Series(np.random.randn(N))
mask = random.sample(range(N), 10)
s.iloc[mask] = np.nan
correct = s.bfill().values
test = bfill(s.values)
assert_almost_equal(correct, test)
# test ndim=2
df = pd.DataFrame(np.random.randn(N, N))
df.iloc[mask] = np.nan
correct = df.bfill().values
test = bfill(df.values)
assert_almost_equal(correct, test)
def test_ffill(self):
# test ndim=1
N = 100
s = pd.Series(np.random.randn(N))
mask = random.sample(range(N), 10)
s.iloc[mask] = np.nan
correct = s.ffill().values
test = ffill(s.values)
assert_almost_equal(correct, test)
# test ndim=2
df = pd.DataFrame(np.random.randn(N, N))
df.iloc[mask] = np.nan
correct = df.ffill().values
test = ffill(df.values)
assert_almost_equal(correct, test)
| apache-2.0 |
jakobworldpeace/scikit-learn | sklearn/cluster/__init__.py | 364 | 1228 | """
The :mod:`sklearn.cluster` module gathers popular unsupervised clustering
algorithms.
"""
from .spectral import spectral_clustering, SpectralClustering
from .mean_shift_ import (mean_shift, MeanShift,
estimate_bandwidth, get_bin_seeds)
from .affinity_propagation_ import affinity_propagation, AffinityPropagation
from .hierarchical import (ward_tree, AgglomerativeClustering, linkage_tree,
FeatureAgglomeration)
from .k_means_ import k_means, KMeans, MiniBatchKMeans
from .dbscan_ import dbscan, DBSCAN
from .bicluster import SpectralBiclustering, SpectralCoclustering
from .birch import Birch
__all__ = ['AffinityPropagation',
'AgglomerativeClustering',
'Birch',
'DBSCAN',
'KMeans',
'FeatureAgglomeration',
'MeanShift',
'MiniBatchKMeans',
'SpectralClustering',
'affinity_propagation',
'dbscan',
'estimate_bandwidth',
'get_bin_seeds',
'k_means',
'linkage_tree',
'mean_shift',
'spectral_clustering',
'ward_tree',
'SpectralBiclustering',
'SpectralCoclustering']
| bsd-3-clause |
gavrieltal/opencog | opencog/embodiment/Monitor/emotion_space_browser.py | 17 | 8987 | import numpy as np
import zmq
import json
import matplotlib as mpl
from matplotlib.figure import Figure
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QTAgg as NavigationToolbar
# configure the matplotlib settings
#mpl.rcParams['legend.fontsize'] = 10
from PyQt4 import QtGui, QtCore
from common import *
class ZmqMultiFiltersSubscriberThread(QThread):
data_update_signal = pyqtSignal(dict)
def __init__(self, widget, publish_endpoint, filter_list,
zmq_context = glb.zmq_context, parent = None):
"""
widget should contains the slot method as below:
@pyqtSlot(dict)
def handle_data_update(self, json_dict):
# Some code here to process the data in json format
publish_endpoint tells the subscriber where the message source is
"""
# Initialize the thread
QThread.__init__(self)
# Connect the signal with the handler residing in widget
self.widget = widget
self.data_update_signal.connect(self.widget.handle_data_update)
# Initialize the ZeroMQ socket
self.socket = zmq_context.socket(zmq.SUB)
self.filter_list = filter_list
for filter_name in self.filter_list:
self.socket.setsockopt(zmq.SUBSCRIBE, filter_name)
self.socket.connect(publish_endpoint)
def run(self):
"""
Receive the message with matching filter_key from publish_endpoint
via ZeroMQ, discard the filter_key message and emit the signal to
corresponding handler with the actual data wrapped in python dictionary
"""
while True:
message = self.socket.recv()
# if the message contains only filter key, discard it
if message in self.filter_list:
self.latest_msg_filter = message
continue
# Unpack the message into python dictionary
json_dict = json.loads(message)
# Apply a filter name to this data dictionary, in order to distinguish it
json_dict['filter_key'] = self.latest_msg_filter
# Emit the signal which would evoke the corresponding handler
self.data_update_signal.emit(json_dict)
class EmotionSpace(FigureCanvas):
def __init__(self, publish_endpoint, parent=None, width=5, height=4, dpi=100):
# Initialize a cache for incoming data.
self.max_data_len = 25
## Feeling dictionary stores dominant feelings with different timestamps.
## Format: { timestamp -> dominant_feeling_name }
self.feeling_dict = {}
## Modulator dictionary caches modulators value at different time points.
## Format:
## { modulator_name -> { timestamp -> modulator_value } }
self.modulator_dict = {}
# The modulator dicitonary should be initialized in the format before
# appending data.
self.has_modulator_dict_initialized = False
# The legend list used to show legend in the chart
self.legend_list = []
# Chosen 3 modulators to be the axes of 3-dimensional space.
self.modulator_axes = []
self.axes_group_box = QtGui.QGroupBox("Modulator Axes:")
# Initialize variables related to graphics.
self.fig = Figure(figsize=(width, height), dpi=dpi)
self.axes = Axes3D(self.fig)
self.axes.hold(False)
FigureCanvas.__init__(self, self.fig)
self.setParent(parent)
FigureCanvas.setSizePolicy(self,
QtGui.QSizePolicy.Expanding,
QtGui.QSizePolicy.Expanding
)
FigureCanvas.updateGeometry(self)
# Create and start ZeroMQ subscriber threads
self.zmq_sub_thread = ZmqMultiFiltersSubscriberThread(self,
publish_endpoint,
[
"PsiFeelingUpdaterAgent",
"PsiModulatorUpdaterAgent"
]
)
self.zmq_sub_thread.start()
# Initialize modulator dictionary and legend list
def initialize_modulator_dict(self, json_dict):
timestamp = json_dict['timestamp']
del json_dict['timestamp']
for k, v in json_dict.iteritems():
self.modulator_dict[k] = {}
self.modulator_dict[k][timestamp] = v
self.modulator_axes.append(k)
self.has_modulator_dict_initialized = True
#self.initialize_axes_group()
def initialize_axes_group(self):
vLayout = QtGui.QVBoxLayout()
for axes in self.emotion_space.get_axes_list():
vLayout.addWidget(QtGui.QCheckBox(axes))
self.axes_group.setLayout(vLayout)
axesGroupSizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Maximum, True)
self.axes_group.setSizePolicy(axesGroupSizePolicy)
self.layout().insert(self.axes_group, 0)
def update_data(self, json_dict):
'''
Update the data of feeling and modulators.
As the parameter might be either feeling data or modulator data,
we return the state of the updating result, in which, 0 indicates
feeling dictionary has been updated, 1 indicates modulator dictionary
has been updated.
'''
if json_dict['filter_key'] == "PsiFeelingUpdaterAgent":
# Just leave feelings alone
del json_dict['filter_key']
timestamp = json_dict['timestamp']
del json_dict['timestamp']
# Get the feeling name with max value
dominant_feeling = max(json_dict, key = lambda k : json_dict.get(k))
# Cache the pair in the feeling dictionary
self.feeling_dict[timestamp] = dominant_feeling
# return state 0
return 0
elif json_dict['filter_key'] == "PsiModulatorUpdaterAgent":
# Remove filter key pair
del json_dict['filter_key']
if not self.has_modulator_dict_initialized:
self.initialize_modulator_dict(json_dict)
return
timestamp = json_dict['timestamp']
del json_dict['timestamp']
for k, v in json_dict.iteritems():
self.modulator_dict[k][timestamp] = v
# return state 1
return 1
else:
pass
@pyqtSlot(dict)
def handle_data_update(self, json_dict):
"""
Process the data in json format
"""
update_state = self.update_data(json_dict)
# Only update the graphic when the widget is visible and
# modulator data has been updated.
if self.isVisible() and update_state == 1:
self.do_draw()
def do_draw(self):
self.axes.clear()
X = []
Y = []
Z = []
m = self.modulator_axes
print '=========='
for k, v in self.feeling_dict.iteritems():
X.append(self.modulator_dict[m[0]][k])
Y.append(self.modulator_dict[m[1]][k])
Z.append(self.modulator_dict[m[2]][k])
print str(self.modulator_dict[m[0]][k]) + ':' \
+ str(self.modulator_dict[m[1]][k]) + ':' \
+ str(self.modulator_dict[m[2]][k])
print '=========='
self.axes.grid(True)
self.axes.plot(X, Y, Z, '-o')
self.draw()
def get_axes_list(self):
return self.modulator_axes
class EmotionSpaceExplorer(QtGui.QWidget):
def __init__(self, parent=None):
super(EmotionSpaceExplorer, self).__init__(parent)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding, True)
sizePolicy.setHeightForWidth(self.sizePolicy().hasHeightForWidth())
self.setSizePolicy(sizePolicy)
self.emotion_space = EmotionSpace("tcp://192.168.1.250:18002", self)
self.navigation_toolbar = NavigationToolbar(self.emotion_space, self)
mainLayout = QtGui.QVBoxLayout(self)
mainLayout.addWidget(self.emotion_space)
mainLayout.addWidget(self.navigation_toolbar)
self.setLayout(mainLayout)
| agpl-3.0 |
shaunstanislaus/ibis | ibis/tasks.py | 7 | 7836 | # Copyright 2014 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import traceback
from cPickle import loads as pickle_load
from ibis.cloudpickle import dumps as pickle_dump
from ibis.wire import PackedMessageReader, PackedMessageWriter
import ibis.wire as wire
try:
import ibis.comms as comms
except ImportError:
pass
class IbisTaskMessage(object):
"""
Prototype wire protocol for task descriptions
uint32_t semaphore_id
uint32_t shmem_name_len
char* shmem_name
uint64_t shmem_offset
uint64_t shmem_size
"""
def __init__(self, semaphore_id, shmem_name, shmem_offset, shmem_size):
self.semaphore_id = semaphore_id
self.shmem_name = shmem_name
self.shmem_offset = shmem_offset
self.shmem_size = shmem_size
@classmethod
def decode(self, message):
"""
Convert from the bytestring wire protocol
Parameters
----------
message : bytes
Returns
-------
message : IbisTaskMessage
"""
buf = PackedMessageReader(message)
sem_id = buf.uint32()
shmem_name = buf.string()
shmem_offset = buf.uint64()
shmem_size = buf.uint64()
return IbisTaskMessage(sem_id, shmem_name, shmem_offset, shmem_size)
def encode(self):
"""
Format this message as a bytestring according to the current version of
the wire protocol.
Returns
-------
encoded : bytes
"""
buf = PackedMessageWriter()
buf.uint32(self.semaphore_id)
buf.string(self.shmem_name)
buf.uint64(self.shmem_offset)
buf.uint64(self.shmem_size)
return buf.get_result()
class Task(object):
"""
Prototype
Run task in a thread, capture tracebacks or other problems.
"""
def __init__(self, shmem):
self.shmem = shmem
self.complete = False
def mark_success(self):
wire.write_uint8(self.shmem, 1)
def mark_failure(self):
wire.write_uint8(self.shmem, 0)
def execute(self):
pass
def run(self):
raise NotImplementedError
def done(self):
pass
_task_registry = {}
def register_task(kind, task_class, override=False):
"""
Register a new task implementation with the execution system
"""
if kind in _task_registry and not override:
raise KeyError('Task of type %s is already defined and '
'override is False')
_task_registry[kind] = task_class
class IbisTaskExecutor(object):
"""
Runs the requested task and handles locking, exception reporting, and so
forth.
"""
def __init__(self, task_msg):
self.task_msg = task_msg
self.lock = comms.IPCLock(self.task_msg.semaphore_id)
self.shmem = comms.SharedMmap(self.task_msg.shmem_name,
self.task_msg.shmem_size,
offset=self.task_msg.shmem_offset)
def _cycle_ipc_lock(self):
# TODO: I put this here as a failsafe in case the task needs to bail
# out for a known reason and we want to immediately release control to
# the master process
self.lock.acquire()
self.lock.release()
def execute(self):
# TODO: Timeout concerns
self.lock.acquire()
# TODO: this can break in various ways on bad input
task_type = wire.read_string(self.shmem)
try:
klass = _task_registry[task_type]
task = klass(self.shmem)
task.run()
except:
self.shmem.seek(0)
# XXX: Failure indicator
wire.write_uint8(self.shmem, 0)
tb = traceback.format_exc()
# HACK: Traceback string must be truncated so it will fit in the
# shared memory (along with the uint32 length prefix)
if len(tb) + 5 > len(self.shmem):
tb = tb[:len(self.shmem) - 5]
wire.write_string(self.shmem, tb)
finally:
self.lock.release()
# ---------------------------------------------------------------------
# Ping pong task for testing
class PingPongTask(Task):
def run(self):
self.shmem.seek(0)
self.mark_success()
wire.write_string(self.shmem, 'pong')
register_task('ping', PingPongTask)
# ---------------------------------------------------------------------
# Aggregation execution tasks
class AggregationTask(Task):
def _write_response(self, agg_inst):
self.shmem.seek(0)
self.mark_success()
serialized_inst = pickle_dump(agg_inst)
wire.write_string(self.shmem, serialized_inst)
class AggregationUpdateTask(AggregationTask):
"""
Task header layout
- serialized agg class
- prior state flag 1/0
- (optional) serialized prior state
- serialized table fragment
"""
def __init__(self, shmem):
AggregationTask.__init__(self, shmem)
self._read_header()
def _read_header(self):
reader = wire.PackedMessageReader(self.shmem)
# Unpack header
self.agg_class_pickled = reader.string()
has_prior_state = reader.uint8() != 0
if has_prior_state:
self.prior_state = pickle_load(reader.string())
else:
self.prior_state = None
def run(self):
if self.prior_state is not None:
agg_inst = self.prior_state
else:
klass = pickle_load(self.agg_class_pickled)
agg_inst = klass()
args = self._deserialize_args()
agg_inst.update(*args)
self._write_response(agg_inst)
def _deserialize_args(self):
# TODO: we need some mechanism to indicate how the data should be
# deserialized before passing to the aggregator. For now, will assume
# "pandas-friendly" NumPy-format
# Deserialize data fragment
table_reader = comms.IbisTableReader(self.shmem)
args = []
for i in range(table_reader.ncolumns):
col = table_reader.get_column(i)
arg = col.to_numpy_for_pandas()
args.append(arg)
return args
class AggregationMergeTask(AggregationTask):
def __init__(self, shmem):
AggregationTask.__init__(self, shmem)
reader = wire.PackedMessageReader(shmem)
# TODO: may wish to merge more than 2 at a time?
# Unpack header
self.left_inst = pickle_load(reader.string())
self.right_inst = pickle_load(reader.string())
def run(self):
# Objects to merge stored in length-prefixed strings in shared memory
merged = self.left_inst.merge(self.right_inst)
self._write_response(merged)
class AggregationFinalizeTask(AggregationTask):
def __init__(self, shmem):
AggregationTask.__init__(self, shmem)
reader = wire.PackedMessageReader(shmem)
self.state = pickle_load(reader.string())
def run(self):
# Single length-prefixed string to finalize
result = self.state.finalize()
self._write_response(result)
register_task('agg-update', AggregationUpdateTask)
register_task('agg-merge', AggregationMergeTask)
register_task('agg-finalize', AggregationFinalizeTask)
| apache-2.0 |
ahmadghizzawi/aub-ml | assignment2/linear-regression.py | 1 | 5573 | import numpy as np
from numpy import sign as sign
import matplotlib.pyplot as plt
class Utils:
@staticmethod
def generate_set(N):
"""Generates an n by 3 uniformly distributed dataset"""
# Generate random uniformally distributes points
training_set = np.random.uniform(-1, 1, size=(N, 2))
# Insert x0 values into the begining of the array.
x0 = np.ones((N, 1))
training_set = np.insert(training_set, [0], x0, axis=1)
# Set training_set and labels as instance attributes
return training_set
@staticmethod
def display_figures(num_of_runs, ein_total, eout_total):
print('Average Ein(g): ' + str(ein_total/num_of_runs))
print('Average Eout(g): ' + str(eout_total/num_of_runs))
@staticmethod
def plot(training_set, labels, target_weights, best_hypothesis):
# change the axis to fit our dataset
plt.axis([-1, 1, -1, 1])
# breakdown the dataset into two separate arrays, each array representing their label by f(x)
training_set_above_line = []
training_set_below_line = []
for i in range(len(labels)):
if labels[i] == 1:
training_set_above_line.append(training_set[i])
else:
training_set_below_line.append(training_set[i])
training_set_above_line = np.array(training_set_above_line)
training_set_below_line = np.array(training_set_below_line)
# plot the sets
if training_set_above_line.size > 0:
training_set_x1_1 = training_set_above_line[:, 1]
training_set_x2_1 = training_set_above_line[:, 2]
plt.scatter(training_set_x1_1, training_set_x2_1, c='b')
if training_set_below_line.size > 0:
training_set_x1_neg_1 = training_set_below_line[:, 1]
training_set_x2_neg_1 = training_set_below_line[:, 2]
plt.scatter(training_set_x1_neg_1, training_set_x2_neg_1, c='r')
# generate 50 evenly spaced numbers from -1 to 1.
line = np.linspace(-1, 1)
# plot the f(x) in blue
m, b = -target_weights[1] / target_weights[2], -target_weights[0] / target_weights[2]
plt.plot(line, m * line + b, 'b-', label='f(x)')
# plot the g(x) in dashed red
m1, b1 = -best_hypothesis[1] / best_hypothesis[2], -best_hypothesis[0] / best_hypothesis[2]
plt.plot(line, m1 * line + b1, 'r--', label='h(x)')
plt.show()
@staticmethod
def generate_target_weights():
# create random line as target function f(x)
point1 = [1, np.random.uniform(-1, 1), np.random.uniform(-1, 1)]
point2 = [1, np.random.uniform(-1, 1), np.random.uniform(-1, 1)]
x1A = point1[1]
x2A = point1[2]
x1B = point2[1]
x2B = point2[2]
# create the target function weights based on the random points
target_weights = np.array([x1B * x2A - x1A * x2B, x2B - x1A, x1A - x1B])
return target_weights
@staticmethod
def evaluate_difference(target_labels, hypothesis_labels):
# evaluate the difference between f and g on in or out-of-sample data.
sample_size = len(target_labels)
i = 0
total_misclassified = 0
while i < sample_size:
target_classification = target_labels[i]
hypothesis_classification = hypothesis_labels[i]
if target_classification != hypothesis_classification:
total_misclassified += 1
i += 1
return total_misclassified / sample_size
class LinearRegression:
def __init__(self):
self.target_weights = Utils.generate_target_weights()
@staticmethod
def apply(w, x):
# apply h(x)
return sign(np.dot(w, x))
@staticmethod
def learn(X, Y):
# learn from misclassifications
pseudo_inverse = np.linalg.pinv(X)
return np.dot(pseudo_inverse, Y)
@staticmethod
def get_labels(w, X):
labels = []
[labels.append(LinearRegression.apply(w, x)) for x in X]
return np.array([labels]).T
def run(self, number_of_runs):
ein_total = 0
eout_total = 0
best_model = None
best_eout = None
i = 0
while i < number_of_runs:
# Generate data set and labels
X = Utils.generate_set(100)
Y = LinearRegression.get_labels(w=self.target_weights, X=X)
# Generate hypothesis weights and labels
hypothesis_weight = LinearRegression.learn(X, Y).T.flatten()
hypothesis_labels = LinearRegression.get_labels(hypothesis_weight, X)
# Calculate Ein total
ein_total += Utils.evaluate_difference(Y, hypothesis_labels)
# Generate 1000 out_of_sample points
X_out = Utils.generate_set(1000)
Y_out = LinearRegression.get_labels(w=self.target_weights, X=X_out)
# Label out_of_sample data set with hypothesis
hypothesis_labels_out = LinearRegression.get_labels(hypothesis_weight, X_out)
# Calculate Eout total
eout = Utils.evaluate_difference(Y_out, hypothesis_labels_out)
eout_total += eout
if best_eout is None or eout < best_eout:
best_eout = eout
best_model = hypothesis_weight
i += 1
Utils.display_figures(number_of_runs, ein_total, eout_total)
Utils.plot(X, Y, self.target_weights, best_model)
model = LinearRegression()
model.run(1000)
| unlicense |
jcasner/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/dviread.py | 69 | 29920 | """
An experimental module for reading dvi files output by TeX. Several
limitations make this not (currently) useful as a general-purpose dvi
preprocessor.
Interface::
dvi = Dvi(filename, 72)
for page in dvi: # iterate over pages
w, h, d = page.width, page.height, page.descent
for x,y,font,glyph,width in page.text:
fontname = font.texname
pointsize = font.size
...
for x,y,height,width in page.boxes:
...
"""
import errno
import matplotlib
import matplotlib.cbook as mpl_cbook
import numpy as np
import struct
import subprocess
_dvistate = mpl_cbook.Bunch(pre=0, outer=1, inpage=2, post_post=3, finale=4)
class Dvi(object):
"""
A dvi ("device-independent") file, as produced by TeX.
The current implementation only reads the first page and does not
even attempt to verify the postamble.
"""
def __init__(self, filename, dpi):
"""
Initialize the object. This takes the filename as input and
opens the file; actually reading the file happens when
iterating through the pages of the file.
"""
matplotlib.verbose.report('Dvi: ' + filename, 'debug')
self.file = open(filename, 'rb')
self.dpi = dpi
self.fonts = {}
self.state = _dvistate.pre
def __iter__(self):
"""
Iterate through the pages of the file.
Returns (text, pages) pairs, where:
text is a list of (x, y, fontnum, glyphnum, width) tuples
boxes is a list of (x, y, height, width) tuples
The coordinates are transformed into a standard Cartesian
coordinate system at the dpi value given when initializing.
The coordinates are floating point numbers, but otherwise
precision is not lost and coordinate values are not clipped to
integers.
"""
while True:
have_page = self._read()
if have_page:
yield self._output()
else:
break
def close(self):
"""
Close the underlying file if it is open.
"""
if not self.file.closed:
self.file.close()
def _output(self):
"""
Output the text and boxes belonging to the most recent page.
page = dvi._output()
"""
minx, miny, maxx, maxy = np.inf, np.inf, -np.inf, -np.inf
maxy_pure = -np.inf
for elt in self.text + self.boxes:
if len(elt) == 4: # box
x,y,h,w = elt
e = 0 # zero depth
else: # glyph
x,y,font,g,w = elt
h = _mul2012(font._scale, font._tfm.height[g])
e = _mul2012(font._scale, font._tfm.depth[g])
minx = min(minx, x)
miny = min(miny, y - h)
maxx = max(maxx, x + w)
maxy = max(maxy, y + e)
maxy_pure = max(maxy_pure, y)
if self.dpi is None:
# special case for ease of debugging: output raw dvi coordinates
return mpl_cbook.Bunch(text=self.text, boxes=self.boxes,
width=maxx-minx, height=maxy_pure-miny,
descent=maxy-maxy_pure)
d = self.dpi / (72.27 * 2**16) # from TeX's "scaled points" to dpi units
text = [ ((x-minx)*d, (maxy-y)*d, f, g, w*d)
for (x,y,f,g,w) in self.text ]
boxes = [ ((x-minx)*d, (maxy-y)*d, h*d, w*d) for (x,y,h,w) in self.boxes ]
return mpl_cbook.Bunch(text=text, boxes=boxes,
width=(maxx-minx)*d,
height=(maxy_pure-miny)*d,
descent=(maxy-maxy_pure)*d)
def _read(self):
"""
Read one page from the file. Return True if successful,
False if there were no more pages.
"""
while True:
byte = ord(self.file.read(1))
self._dispatch(byte)
# if self.state == _dvistate.inpage:
# matplotlib.verbose.report(
# 'Dvi._read: after %d at %f,%f' %
# (byte, self.h, self.v),
# 'debug-annoying')
if byte == 140: # end of page
return True
if self.state == _dvistate.post_post: # end of file
self.close()
return False
def _arg(self, nbytes, signed=False):
"""
Read and return an integer argument "nbytes" long.
Signedness is determined by the "signed" keyword.
"""
str = self.file.read(nbytes)
value = ord(str[0])
if signed and value >= 0x80:
value = value - 0x100
for i in range(1, nbytes):
value = 0x100*value + ord(str[i])
return value
def _dispatch(self, byte):
"""
Based on the opcode "byte", read the correct kinds of
arguments from the dvi file and call the method implementing
that opcode with those arguments.
"""
if 0 <= byte <= 127: self._set_char(byte)
elif byte == 128: self._set_char(self._arg(1))
elif byte == 129: self._set_char(self._arg(2))
elif byte == 130: self._set_char(self._arg(3))
elif byte == 131: self._set_char(self._arg(4, True))
elif byte == 132: self._set_rule(self._arg(4, True), self._arg(4, True))
elif byte == 133: self._put_char(self._arg(1))
elif byte == 134: self._put_char(self._arg(2))
elif byte == 135: self._put_char(self._arg(3))
elif byte == 136: self._put_char(self._arg(4, True))
elif byte == 137: self._put_rule(self._arg(4, True), self._arg(4, True))
elif byte == 138: self._nop()
elif byte == 139: self._bop(*[self._arg(4, True) for i in range(11)])
elif byte == 140: self._eop()
elif byte == 141: self._push()
elif byte == 142: self._pop()
elif byte == 143: self._right(self._arg(1, True))
elif byte == 144: self._right(self._arg(2, True))
elif byte == 145: self._right(self._arg(3, True))
elif byte == 146: self._right(self._arg(4, True))
elif byte == 147: self._right_w(None)
elif byte == 148: self._right_w(self._arg(1, True))
elif byte == 149: self._right_w(self._arg(2, True))
elif byte == 150: self._right_w(self._arg(3, True))
elif byte == 151: self._right_w(self._arg(4, True))
elif byte == 152: self._right_x(None)
elif byte == 153: self._right_x(self._arg(1, True))
elif byte == 154: self._right_x(self._arg(2, True))
elif byte == 155: self._right_x(self._arg(3, True))
elif byte == 156: self._right_x(self._arg(4, True))
elif byte == 157: self._down(self._arg(1, True))
elif byte == 158: self._down(self._arg(2, True))
elif byte == 159: self._down(self._arg(3, True))
elif byte == 160: self._down(self._arg(4, True))
elif byte == 161: self._down_y(None)
elif byte == 162: self._down_y(self._arg(1, True))
elif byte == 163: self._down_y(self._arg(2, True))
elif byte == 164: self._down_y(self._arg(3, True))
elif byte == 165: self._down_y(self._arg(4, True))
elif byte == 166: self._down_z(None)
elif byte == 167: self._down_z(self._arg(1, True))
elif byte == 168: self._down_z(self._arg(2, True))
elif byte == 169: self._down_z(self._arg(3, True))
elif byte == 170: self._down_z(self._arg(4, True))
elif 171 <= byte <= 234: self._fnt_num(byte-171)
elif byte == 235: self._fnt_num(self._arg(1))
elif byte == 236: self._fnt_num(self._arg(2))
elif byte == 237: self._fnt_num(self._arg(3))
elif byte == 238: self._fnt_num(self._arg(4, True))
elif 239 <= byte <= 242:
len = self._arg(byte-238)
special = self.file.read(len)
self._xxx(special)
elif 243 <= byte <= 246:
k = self._arg(byte-242, byte==246)
c, s, d, a, l = [ self._arg(x) for x in (4, 4, 4, 1, 1) ]
n = self.file.read(a+l)
self._fnt_def(k, c, s, d, a, l, n)
elif byte == 247:
i, num, den, mag, k = [ self._arg(x) for x in (1, 4, 4, 4, 1) ]
x = self.file.read(k)
self._pre(i, num, den, mag, x)
elif byte == 248: self._post()
elif byte == 249: self._post_post()
else:
raise ValueError, "unknown command: byte %d"%byte
def _pre(self, i, num, den, mag, comment):
if self.state != _dvistate.pre:
raise ValueError, "pre command in middle of dvi file"
if i != 2:
raise ValueError, "Unknown dvi format %d"%i
if num != 25400000 or den != 7227 * 2**16:
raise ValueError, "nonstandard units in dvi file"
# meaning: TeX always uses those exact values, so it
# should be enough for us to support those
# (There are 72.27 pt to an inch so 7227 pt =
# 7227 * 2**16 sp to 100 in. The numerator is multiplied
# by 10^5 to get units of 10**-7 meters.)
if mag != 1000:
raise ValueError, "nonstandard magnification in dvi file"
# meaning: LaTeX seems to frown on setting \mag, so
# I think we can assume this is constant
self.state = _dvistate.outer
def _set_char(self, char):
if self.state != _dvistate.inpage:
raise ValueError, "misplaced set_char in dvi file"
self._put_char(char)
self.h += self.fonts[self.f]._width_of(char)
def _set_rule(self, a, b):
if self.state != _dvistate.inpage:
raise ValueError, "misplaced set_rule in dvi file"
self._put_rule(a, b)
self.h += b
def _put_char(self, char):
if self.state != _dvistate.inpage:
raise ValueError, "misplaced put_char in dvi file"
font = self.fonts[self.f]
if font._vf is None:
self.text.append((self.h, self.v, font, char,
font._width_of(char)))
# matplotlib.verbose.report(
# 'Dvi._put_char: %d,%d %d' %(self.h, self.v, char),
# 'debug-annoying')
else:
scale = font._scale
for x, y, f, g, w in font._vf[char].text:
newf = DviFont(scale=_mul2012(scale, f._scale),
tfm=f._tfm, texname=f.texname, vf=f._vf)
self.text.append((self.h + _mul2012(x, scale),
self.v + _mul2012(y, scale),
newf, g, newf._width_of(g)))
self.boxes.extend([(self.h + _mul2012(x, scale),
self.v + _mul2012(y, scale),
_mul2012(a, scale), _mul2012(b, scale))
for x, y, a, b in font._vf[char].boxes])
def _put_rule(self, a, b):
if self.state != _dvistate.inpage:
raise ValueError, "misplaced put_rule in dvi file"
if a > 0 and b > 0:
self.boxes.append((self.h, self.v, a, b))
# matplotlib.verbose.report(
# 'Dvi._put_rule: %d,%d %d,%d' % (self.h, self.v, a, b),
# 'debug-annoying')
def _nop(self):
pass
def _bop(self, c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, p):
if self.state != _dvistate.outer:
raise ValueError, \
"misplaced bop in dvi file (state %d)" % self.state
self.state = _dvistate.inpage
self.h, self.v, self.w, self.x, self.y, self.z = 0, 0, 0, 0, 0, 0
self.stack = []
self.text = [] # list of (x,y,fontnum,glyphnum)
self.boxes = [] # list of (x,y,width,height)
def _eop(self):
if self.state != _dvistate.inpage:
raise ValueError, "misplaced eop in dvi file"
self.state = _dvistate.outer
del self.h, self.v, self.w, self.x, self.y, self.z, self.stack
def _push(self):
if self.state != _dvistate.inpage:
raise ValueError, "misplaced push in dvi file"
self.stack.append((self.h, self.v, self.w, self.x, self.y, self.z))
def _pop(self):
if self.state != _dvistate.inpage:
raise ValueError, "misplaced pop in dvi file"
self.h, self.v, self.w, self.x, self.y, self.z = self.stack.pop()
def _right(self, b):
if self.state != _dvistate.inpage:
raise ValueError, "misplaced right in dvi file"
self.h += b
def _right_w(self, new_w):
if self.state != _dvistate.inpage:
raise ValueError, "misplaced w in dvi file"
if new_w is not None:
self.w = new_w
self.h += self.w
def _right_x(self, new_x):
if self.state != _dvistate.inpage:
raise ValueError, "misplaced x in dvi file"
if new_x is not None:
self.x = new_x
self.h += self.x
def _down(self, a):
if self.state != _dvistate.inpage:
raise ValueError, "misplaced down in dvi file"
self.v += a
def _down_y(self, new_y):
if self.state != _dvistate.inpage:
raise ValueError, "misplaced y in dvi file"
if new_y is not None:
self.y = new_y
self.v += self.y
def _down_z(self, new_z):
if self.state != _dvistate.inpage:
raise ValueError, "misplaced z in dvi file"
if new_z is not None:
self.z = new_z
self.v += self.z
def _fnt_num(self, k):
if self.state != _dvistate.inpage:
raise ValueError, "misplaced fnt_num in dvi file"
self.f = k
def _xxx(self, special):
matplotlib.verbose.report(
'Dvi._xxx: encountered special: %s'
% ''.join([(32 <= ord(ch) < 127) and ch
or '<%02x>' % ord(ch)
for ch in special]),
'debug')
def _fnt_def(self, k, c, s, d, a, l, n):
tfm = _tfmfile(n[-l:])
if c != 0 and tfm.checksum != 0 and c != tfm.checksum:
raise ValueError, 'tfm checksum mismatch: %s'%n
# It seems that the assumption behind the following check is incorrect:
#if d != tfm.design_size:
# raise ValueError, 'tfm design size mismatch: %d in dvi, %d in %s'%\
# (d, tfm.design_size, n)
vf = _vffile(n[-l:])
self.fonts[k] = DviFont(scale=s, tfm=tfm, texname=n, vf=vf)
def _post(self):
if self.state != _dvistate.outer:
raise ValueError, "misplaced post in dvi file"
self.state = _dvistate.post_post
# TODO: actually read the postamble and finale?
# currently post_post just triggers closing the file
def _post_post(self):
raise NotImplementedError
class DviFont(object):
"""
Object that holds a font's texname and size, supports comparison,
and knows the widths of glyphs in the same units as the AFM file.
There are also internal attributes (for use by dviread.py) that
are _not_ used for comparison.
The size is in Adobe points (converted from TeX points).
"""
__slots__ = ('texname', 'size', 'widths', '_scale', '_vf', '_tfm')
def __init__(self, scale, tfm, texname, vf):
self._scale, self._tfm, self.texname, self._vf = \
scale, tfm, texname, vf
self.size = scale * (72.0 / (72.27 * 2**16))
try:
nchars = max(tfm.width.iterkeys())
except ValueError:
nchars = 0
self.widths = [ (1000*tfm.width.get(char, 0)) >> 20
for char in range(nchars) ]
def __eq__(self, other):
return self.__class__ == other.__class__ and \
self.texname == other.texname and self.size == other.size
def __ne__(self, other):
return not self.__eq__(other)
def _width_of(self, char):
"""
Width of char in dvi units. For internal use by dviread.py.
"""
width = self._tfm.width.get(char, None)
if width is not None:
return _mul2012(width, self._scale)
matplotlib.verbose.report(
'No width for char %d in font %s' % (char, self.texname),
'debug')
return 0
class Vf(Dvi):
"""
A virtual font (\*.vf file) containing subroutines for dvi files.
Usage::
vf = Vf(filename)
glyph = vf[code]
glyph.text, glyph.boxes, glyph.width
"""
def __init__(self, filename):
Dvi.__init__(self, filename, 0)
self._first_font = None
self._chars = {}
self._packet_ends = None
self._read()
self.close()
def __getitem__(self, code):
return self._chars[code]
def _dispatch(self, byte):
# If we are in a packet, execute the dvi instructions
if self.state == _dvistate.inpage:
byte_at = self.file.tell()-1
if byte_at == self._packet_ends:
self._finalize_packet()
# fall through
elif byte_at > self._packet_ends:
raise ValueError, "Packet length mismatch in vf file"
else:
if byte in (139, 140) or byte >= 243:
raise ValueError, "Inappropriate opcode %d in vf file" % byte
Dvi._dispatch(self, byte)
return
# We are outside a packet
if byte < 242: # a short packet (length given by byte)
cc, tfm = self._arg(1), self._arg(3)
self._init_packet(byte, cc, tfm)
elif byte == 242: # a long packet
pl, cc, tfm = [ self._arg(x) for x in (4, 4, 4) ]
self._init_packet(pl, cc, tfm)
elif 243 <= byte <= 246:
Dvi._dispatch(self, byte)
elif byte == 247: # preamble
i, k = self._arg(1), self._arg(1)
x = self.file.read(k)
cs, ds = self._arg(4), self._arg(4)
self._pre(i, x, cs, ds)
elif byte == 248: # postamble (just some number of 248s)
self.state = _dvistate.post_post
else:
raise ValueError, "unknown vf opcode %d" % byte
def _init_packet(self, pl, cc, tfm):
if self.state != _dvistate.outer:
raise ValueError, "Misplaced packet in vf file"
self.state = _dvistate.inpage
self._packet_ends = self.file.tell() + pl
self._packet_char = cc
self._packet_width = tfm
self.h, self.v, self.w, self.x, self.y, self.z = 0, 0, 0, 0, 0, 0
self.stack, self.text, self.boxes = [], [], []
self.f = self._first_font
def _finalize_packet(self):
self._chars[self._packet_char] = mpl_cbook.Bunch(
text=self.text, boxes=self.boxes, width = self._packet_width)
self.state = _dvistate.outer
def _pre(self, i, x, cs, ds):
if self.state != _dvistate.pre:
raise ValueError, "pre command in middle of vf file"
if i != 202:
raise ValueError, "Unknown vf format %d" % i
if len(x):
matplotlib.verbose.report('vf file comment: ' + x, 'debug')
self.state = _dvistate.outer
# cs = checksum, ds = design size
def _fnt_def(self, k, *args):
Dvi._fnt_def(self, k, *args)
if self._first_font is None:
self._first_font = k
def _fix2comp(num):
"""
Convert from two's complement to negative.
"""
assert 0 <= num < 2**32
if num & 2**31:
return num - 2**32
else:
return num
def _mul2012(num1, num2):
"""
Multiply two numbers in 20.12 fixed point format.
"""
# Separated into a function because >> has surprising precedence
return (num1*num2) >> 20
class Tfm(object):
"""
A TeX Font Metric file. This implementation covers only the bare
minimum needed by the Dvi class.
Attributes:
checksum: for verifying against dvi file
design_size: design size of the font (in what units?)
width[i]: width of character \#i, needs to be scaled
by the factor specified in the dvi file
(this is a dict because indexing may not start from 0)
height[i], depth[i]: height and depth of character \#i
"""
__slots__ = ('checksum', 'design_size', 'width', 'height', 'depth')
def __init__(self, filename):
matplotlib.verbose.report('opening tfm file ' + filename, 'debug')
file = open(filename, 'rb')
try:
header1 = file.read(24)
lh, bc, ec, nw, nh, nd = \
struct.unpack('!6H', header1[2:14])
matplotlib.verbose.report(
'lh=%d, bc=%d, ec=%d, nw=%d, nh=%d, nd=%d' % (
lh, bc, ec, nw, nh, nd), 'debug')
header2 = file.read(4*lh)
self.checksum, self.design_size = \
struct.unpack('!2I', header2[:8])
# there is also encoding information etc.
char_info = file.read(4*(ec-bc+1))
widths = file.read(4*nw)
heights = file.read(4*nh)
depths = file.read(4*nd)
finally:
file.close()
self.width, self.height, self.depth = {}, {}, {}
widths, heights, depths = \
[ struct.unpack('!%dI' % (len(x)/4), x)
for x in (widths, heights, depths) ]
for i in range(ec-bc):
self.width[bc+i] = _fix2comp(widths[ord(char_info[4*i])])
self.height[bc+i] = _fix2comp(heights[ord(char_info[4*i+1]) >> 4])
self.depth[bc+i] = _fix2comp(depths[ord(char_info[4*i+1]) & 0xf])
class PsfontsMap(object):
"""
A psfonts.map formatted file, mapping TeX fonts to PS fonts.
Usage: map = PsfontsMap('.../psfonts.map'); map['cmr10']
For historical reasons, TeX knows many Type-1 fonts by different
names than the outside world. (For one thing, the names have to
fit in eight characters.) Also, TeX's native fonts are not Type-1
but Metafont, which is nontrivial to convert to PostScript except
as a bitmap. While high-quality conversions to Type-1 format exist
and are shipped with modern TeX distributions, we need to know
which Type-1 fonts are the counterparts of which native fonts. For
these reasons a mapping is needed from internal font names to font
file names.
A texmf tree typically includes mapping files called e.g.
psfonts.map, pdftex.map, dvipdfm.map. psfonts.map is used by
dvips, pdftex.map by pdfTeX, and dvipdfm.map by dvipdfm.
psfonts.map might avoid embedding the 35 PostScript fonts, while
the pdf-related files perhaps only avoid the "Base 14" pdf fonts.
But the user may have configured these files differently.
"""
__slots__ = ('_font',)
def __init__(self, filename):
self._font = {}
file = open(filename, 'rt')
try:
self._parse(file)
finally:
file.close()
def __getitem__(self, texname):
result = self._font[texname]
fn, enc = result.filename, result.encoding
if fn is not None and not fn.startswith('/'):
result.filename = find_tex_file(fn)
if enc is not None and not enc.startswith('/'):
result.encoding = find_tex_file(result.encoding)
return result
def _parse(self, file):
"""Parse each line into words."""
for line in file:
line = line.strip()
if line == '' or line.startswith('%'):
continue
words, pos = [], 0
while pos < len(line):
if line[pos] == '"': # double quoted word
pos += 1
end = line.index('"', pos)
words.append(line[pos:end])
pos = end + 1
else: # ordinary word
end = line.find(' ', pos+1)
if end == -1: end = len(line)
words.append(line[pos:end])
pos = end
while pos < len(line) and line[pos] == ' ':
pos += 1
self._register(words)
def _register(self, words):
"""Register a font described by "words".
The format is, AFAIK: texname fontname [effects and filenames]
Effects are PostScript snippets like ".177 SlantFont",
filenames begin with one or two less-than signs. A filename
ending in enc is an encoding file, other filenames are font
files. This can be overridden with a left bracket: <[foobar
indicates an encoding file named foobar.
There is some difference between <foo.pfb and <<bar.pfb in
subsetting, but I have no example of << in my TeX installation.
"""
texname, psname = words[:2]
effects, encoding, filename = [], None, None
for word in words[2:]:
if not word.startswith('<'):
effects.append(word)
else:
word = word.lstrip('<')
if word.startswith('['):
assert encoding is None
encoding = word[1:]
elif word.endswith('.enc'):
assert encoding is None
encoding = word
else:
assert filename is None
filename = word
self._font[texname] = mpl_cbook.Bunch(
texname=texname, psname=psname, effects=effects,
encoding=encoding, filename=filename)
class Encoding(object):
"""
Parses a \*.enc file referenced from a psfonts.map style file.
The format this class understands is a very limited subset of
PostScript.
Usage (subject to change)::
for name in Encoding(filename):
whatever(name)
"""
__slots__ = ('encoding',)
def __init__(self, filename):
file = open(filename, 'rt')
try:
matplotlib.verbose.report('Parsing TeX encoding ' + filename, 'debug-annoying')
self.encoding = self._parse(file)
matplotlib.verbose.report('Result: ' + `self.encoding`, 'debug-annoying')
finally:
file.close()
def __iter__(self):
for name in self.encoding:
yield name
def _parse(self, file):
result = []
state = 0
for line in file:
comment_start = line.find('%')
if comment_start > -1:
line = line[:comment_start]
line = line.strip()
if state == 0:
# Expecting something like /FooEncoding [
if '[' in line:
state = 1
line = line[line.index('[')+1:].strip()
if state == 1:
if ']' in line: # ] def
line = line[:line.index(']')]
state = 2
words = line.split()
for w in words:
if w.startswith('/'):
# Allow for /abc/def/ghi
subwords = w.split('/')
result.extend(subwords[1:])
else:
raise ValueError, "Broken name in encoding file: " + w
return result
def find_tex_file(filename, format=None):
"""
Call kpsewhich to find a file in the texmf tree.
If format is not None, it is used as the value for the --format option.
See the kpathsea documentation for more information.
Apparently most existing TeX distributions on Unix-like systems
use kpathsea. I hear MikTeX (a popular distribution on Windows)
doesn't use kpathsea, so what do we do? (TODO)
"""
cmd = ['kpsewhich']
if format is not None:
cmd += ['--format=' + format]
cmd += [filename]
matplotlib.verbose.report('find_tex_file(%s): %s' \
% (filename,cmd), 'debug')
pipe = subprocess.Popen(cmd, stdout=subprocess.PIPE)
result = pipe.communicate()[0].rstrip()
matplotlib.verbose.report('find_tex_file result: %s' % result,
'debug')
return result
def _read_nointr(pipe, bufsize=-1):
while True:
try:
return pipe.read(bufsize)
except OSError, e:
if e.errno == errno.EINTR:
continue
else:
raise
# With multiple text objects per figure (e.g. tick labels) we may end
# up reading the same tfm and vf files many times, so we implement a
# simple cache. TODO: is this worth making persistent?
_tfmcache = {}
_vfcache = {}
def _fontfile(texname, class_, suffix, cache):
try:
return cache[texname]
except KeyError:
pass
filename = find_tex_file(texname + suffix)
if filename:
result = class_(filename)
else:
result = None
cache[texname] = result
return result
def _tfmfile(texname):
return _fontfile(texname, Tfm, '.tfm', _tfmcache)
def _vffile(texname):
return _fontfile(texname, Vf, '.vf', _vfcache)
if __name__ == '__main__':
import sys
matplotlib.verbose.set_level('debug-annoying')
fname = sys.argv[1]
try: dpi = float(sys.argv[2])
except IndexError: dpi = None
dvi = Dvi(fname, dpi)
fontmap = PsfontsMap(find_tex_file('pdftex.map'))
for page in dvi:
print '=== new page ==='
fPrev = None
for x,y,f,c,w in page.text:
if f != fPrev:
print 'font', f.texname, 'scaled', f._scale/pow(2.0,20)
fPrev = f
print x,y,c, 32 <= c < 128 and chr(c) or '.', w
for x,y,w,h in page.boxes:
print x,y,'BOX',w,h
| agpl-3.0 |
avannaldas/QuickView | QuickView/DataFrameVisualizer.py | 1 | 5101 | '''
Title: QuickView
Purpose: Provides a Glance at the dataset with one line of code!
GitHub: http://github.com/avannaldas/QuickView
Author: Abhijit Annaldas (Twitter @avannaldas)
'''
import pandas as _pd
import matplotlib.pyplot as _plt
from matplotlib.pyplot import cm
'''Number of rows in the dataframe'''
row_count = -1
'''Number of columns in the dataframe'''
column_count = -1
''' List of numeric column names'''
numeric_column_names = []
'''List of text column names'''
text_column_names = []
'''List of categorical column names'''
categorical_column_names = []
'''Dict of Column names and distinct categrical values'''
categorical_column_values = dict()
''' Dict of Column names and number of distinct categrical values'''
categorical_column_values_count = dict()
'''Pandas dataframe object containing rows with at least one null/na values'''
rows_with_nulls = _pd.DataFrame()
'''Dict of Column names and number of null/na values'''
columnwise_null_values_count = dict()
'''Pandas dataframe object with min, max, mean and std of all numeric columns'''
min_max_mean_std = _pd.DataFrame()
'''Indicates whether the visualization is complete, values for all the member variables are updated after visualize() method is complete'''
loaded = False
#
def visualize(df, print_summaries=True, cat_pthreshold=5, cat_cthreshold=-1):
row_count = df.shape[0]
column_count = df.shape[1]
rows_with_nulls = df[_pd.isnull(df).any(axis=1)]
numeric_types = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64', 'int', 'float', 'long', 'complex']
numeric_column_names = list(df.select_dtypes(include=numeric_types).columns)
__text_column_names = list(df.select_dtypes(exclude=numeric_types).columns)
for c in __text_column_names:
if (((df[c].nunique()/row_count) * 100) < cat_pthreshold) or (df[c].nunique() < cat_cthreshold):
categorical_column_names.append(c)
categorical_column_values[c] = df[c].unique()
categorical_column_values_count[c] = df[c].nunique()
else:
text_column_names.append(c)
for c in list(df.columns):
if df[c].isnull().sum() > 0:
columnwise_null_values_count[c] = df[c].isnull().sum()
minSumm = df.min(axis=0, skipna=True, numeric_only=True)
maxSumm = df.max(axis=0, skipna=True, numeric_only=True)
meanSumm = df.mean(axis=0, skipna=True, numeric_only=True)
stdSumm = df.std(axis=0, skipna=True, numeric_only=True)
min_max_mean_std = _pd.DataFrame(dict(min=minSumm, max=maxSumm, mean=meanSumm, std=stdSumm), index=stdSumm.index)
loaded = True
if print_summaries == True:
print()
print('Here is a summary of the Dataset, aka Quick View :P ...')
print()
print('Rows count: ' + str(row_count))
print('Columns count: ' + str(column_count))
print()
print('Number of rows having null value(s): ' + str(rows_with_nulls.shape[0]))
print()
print('Numeric Columns: ' + (', '.join(numeric_column_names)))
print()
print('Categorical Columns: ' + (', '.join(categorical_column_names)))
print()
print('Text Columns: ' + (', '.join(text_column_names)))
print()
print('Columns with null values...')
for k, v in columnwise_null_values_count.items():
print(k + ' : ' + str(v))
print()
print('Distinct values in categorical columns...')
for col, vals in categorical_column_values.items():
print('{Column Name}: ' + col + ' {Values}: ' + ((', ').join(str(v) for v in vals)))
print()
print('Min, Max, Mean and std...')
print(min_max_mean_std)
print()
# START PLOTTING
# Column-wise plot null count
_plt.bar(range(len(columnwise_null_values_count)), columnwise_null_values_count.values(), align='center')
_plt.xticks(range(len(columnwise_null_values_count)), columnwise_null_values_count.keys(), rotation='vertical')
_plt.xlabel('Column names')
_plt.ylabel('# of Null values')
_plt.title('Column-wise null value counts')
_plt.tight_layout()
_plt.show()
print()
# Column-wise plot unique categorical values count
_plt.bar(range(len(categorical_column_values_count)), categorical_column_values_count.values(), align='center')
_plt.xticks(range(len(categorical_column_values_count)), categorical_column_values_count.keys(), rotation='vertical')
_plt.xlabel('Categorical Column names')
_plt.ylabel('# of Unique Categorical values')
_plt.title('Categorical Columns and their unique categorical value counts')
_plt.tight_layout()
_plt.show()
print()
# Correlation Matrix...
corr = df.corr()
fig, ax = _plt.subplots(figsize=(8, 8))
cb = ax.matshow(corr, interpolation='nearest', cmap=cm.Blues)
fig.colorbar(cb)
_plt.xticks(range(len(corr.columns)), corr.columns, rotation='vertical')
_plt.yticks(range(len(corr.columns)), corr.columns)
_plt.xlabel('Correlation Matrix')
_plt.tight_layout()
_plt.show()
print() | mit |
hsiaoyi0504/scikit-learn | sklearn/utils/tests/test_murmurhash.py | 261 | 2836 | # Author: Olivier Grisel <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from sklearn.externals.six import b, u
from sklearn.utils.murmurhash import murmurhash3_32
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from nose.tools import assert_equal, assert_true
def test_mmhash3_int():
assert_equal(murmurhash3_32(3), 847579505)
assert_equal(murmurhash3_32(3, seed=0), 847579505)
assert_equal(murmurhash3_32(3, seed=42), -1823081949)
assert_equal(murmurhash3_32(3, positive=False), 847579505)
assert_equal(murmurhash3_32(3, seed=0, positive=False), 847579505)
assert_equal(murmurhash3_32(3, seed=42, positive=False), -1823081949)
assert_equal(murmurhash3_32(3, positive=True), 847579505)
assert_equal(murmurhash3_32(3, seed=0, positive=True), 847579505)
assert_equal(murmurhash3_32(3, seed=42, positive=True), 2471885347)
def test_mmhash3_int_array():
rng = np.random.RandomState(42)
keys = rng.randint(-5342534, 345345, size=3 * 2 * 1).astype(np.int32)
keys = keys.reshape((3, 2, 1))
for seed in [0, 42]:
expected = np.array([murmurhash3_32(int(k), seed)
for k in keys.flat])
expected = expected.reshape(keys.shape)
assert_array_equal(murmurhash3_32(keys, seed), expected)
for seed in [0, 42]:
expected = np.array([murmurhash3_32(k, seed, positive=True)
for k in keys.flat])
expected = expected.reshape(keys.shape)
assert_array_equal(murmurhash3_32(keys, seed, positive=True),
expected)
def test_mmhash3_bytes():
assert_equal(murmurhash3_32(b('foo'), 0), -156908512)
assert_equal(murmurhash3_32(b('foo'), 42), -1322301282)
assert_equal(murmurhash3_32(b('foo'), 0, positive=True), 4138058784)
assert_equal(murmurhash3_32(b('foo'), 42, positive=True), 2972666014)
def test_mmhash3_unicode():
assert_equal(murmurhash3_32(u('foo'), 0), -156908512)
assert_equal(murmurhash3_32(u('foo'), 42), -1322301282)
assert_equal(murmurhash3_32(u('foo'), 0, positive=True), 4138058784)
assert_equal(murmurhash3_32(u('foo'), 42, positive=True), 2972666014)
def test_no_collision_on_byte_range():
previous_hashes = set()
for i in range(100):
h = murmurhash3_32(' ' * i, 0)
assert_true(h not in previous_hashes,
"Found collision on growing empty string")
def test_uniform_distribution():
n_bins, n_samples = 10, 100000
bins = np.zeros(n_bins, dtype=np.float)
for i in range(n_samples):
bins[murmurhash3_32(i, positive=True) % n_bins] += 1
means = bins / n_samples
expected = np.ones(n_bins) / n_bins
assert_array_almost_equal(means / expected, np.ones(n_bins), 2)
| bsd-3-clause |
shusenl/scikit-learn | examples/model_selection/plot_roc_crossval.py | 247 | 3253 | """
=============================================================
Receiver Operating Characteristic (ROC) with cross validation
=============================================================
Example of Receiver Operating Characteristic (ROC) metric to evaluate
classifier output quality using cross-validation.
ROC curves typically feature true positive rate on the Y axis, and false
positive rate on the X axis. This means that the top left corner of the plot is
the "ideal" point - a false positive rate of zero, and a true positive rate of
one. This is not very realistic, but it does mean that a larger area under the
curve (AUC) is usually better.
The "steepness" of ROC curves is also important, since it is ideal to maximize
the true positive rate while minimizing the false positive rate.
This example shows the ROC response of different datasets, created from K-fold
cross-validation. Taking all of these curves, it is possible to calculate the
mean area under curve, and see the variance of the curve when the
training set is split into different subsets. This roughly shows how the
classifier output is affected by changes in the training data, and how
different the splits generated by K-fold cross-validation are from one another.
.. note::
See also :func:`sklearn.metrics.auc_score`,
:func:`sklearn.cross_validation.cross_val_score`,
:ref:`example_model_selection_plot_roc.py`,
"""
print(__doc__)
import numpy as np
from scipy import interp
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from sklearn.metrics import roc_curve, auc
from sklearn.cross_validation import StratifiedKFold
###############################################################################
# Data IO and generation
# import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
X, y = X[y != 2], y[y != 2]
n_samples, n_features = X.shape
# Add noisy features
random_state = np.random.RandomState(0)
X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
###############################################################################
# Classification and ROC analysis
# Run classifier with cross-validation and plot ROC curves
cv = StratifiedKFold(y, n_folds=6)
classifier = svm.SVC(kernel='linear', probability=True,
random_state=random_state)
mean_tpr = 0.0
mean_fpr = np.linspace(0, 1, 100)
all_tpr = []
for i, (train, test) in enumerate(cv):
probas_ = classifier.fit(X[train], y[train]).predict_proba(X[test])
# Compute ROC curve and area the curve
fpr, tpr, thresholds = roc_curve(y[test], probas_[:, 1])
mean_tpr += interp(mean_fpr, fpr, tpr)
mean_tpr[0] = 0.0
roc_auc = auc(fpr, tpr)
plt.plot(fpr, tpr, lw=1, label='ROC fold %d (area = %0.2f)' % (i, roc_auc))
plt.plot([0, 1], [0, 1], '--', color=(0.6, 0.6, 0.6), label='Luck')
mean_tpr /= len(cv)
mean_tpr[-1] = 1.0
mean_auc = auc(mean_fpr, mean_tpr)
plt.plot(mean_fpr, mean_tpr, 'k--',
label='Mean ROC (area = %0.2f)' % mean_auc, lw=2)
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
| bsd-3-clause |
smrjan/seldon-server | docker/examples/iris/vw/vw_pipeline.py | 2 | 1126 | import sys, getopt, argparse
import seldon.pipeline.basic_transforms as bt
import seldon.pipeline.auto_transforms as pauto
import seldon.pipeline.util as sutl
from sklearn.pipeline import Pipeline
import seldon.vw as vw
import sys
def run_pipeline(events,models):
tNameId = bt.Feature_id_transform(min_size=0,exclude_missing=True,zero_based=True,input_feature="name",output_feature="nameId")
tAuto = pauto.Auto_transform(max_values_numeric_categorical=2,exclude=["nameId","name"])
vwc = vw.VWClassifier(target="nameId",target_readable="name")
transformers = [("tName",tNameId),("tAuto",tAuto),("vw",vwc)]
p = Pipeline(transformers)
pw = sutl.Pipeline_wrapper()
df = pw.create_dataframe_from_files(events)
df2 = p.fit(df)
pw.save_pipeline(p,models)
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog='xgb_pipeline')
parser.add_argument('--events', help='events folder', required=True)
parser.add_argument('--models', help='output models folder', required=True)
args = parser.parse_args()
opts = vars(args)
run_pipeline([args.events],args.models)
| apache-2.0 |
polyanskiy/refractiveindex.info-scripts | scripts/Rakic 1998 - Cr (BB model).py | 1 | 2587 | # -*- coding: utf-8 -*-
# Author: Mikhail Polyanskiy
# Last modified: 2017-04-02
# Original data: Rakić et al. 1998, https://doi.org/10.1364/AO.37.005271
import numpy as np
import matplotlib.pyplot as plt
from scipy.special import wofz as w
π = np.pi
# Brendel-Bormann (BB) model parameters
ωp = 10.75 #eV
f0 = 0.154
Γ0 = 0.048 #eV
f1 = 0.338
Γ1 = 4.256 #eV
ω1 = 0.281 #eV
σ1 = 0.115 #eV
f2 = 0.261
Γ2 = 3.957 #eV
ω2 = 0.584 #eV
σ2 = 0.252 #eV
f3 = 0.817
Γ3 = 2.218 #eV
ω3 = 1.919 #eV
σ3 = 0.225 #eV
f4 = 0.105
Γ4 = 6.983 #eV
ω4 = 6.997 #eV
σ4 = 4.903 #eV
Ωp = f0**.5 * ωp #eV
def BB(ω): #ω: eV
ε = 1-Ωp**2/(ω*(ω+1j*Γ0))
α = (ω**2+1j*ω*Γ1)**.5
za = (α-ω1)/(2**.5*σ1)
zb = (α+ω1)/(2**.5*σ1)
ε += 1j*π**.5*f1*ωp**2 / (2**1.5*α*σ1) * (w(za)+w(zb)) #χ1
α = (ω**2+1j*ω*Γ2)**.5
za = (α-ω2)/(2**.5*σ2)
zb = (α+ω2)/(2**.5*σ2)
ε += 1j*π**.5*f2*ωp**2 / (2**1.5*α*σ2) * (w(za)+w(zb)) #χ2
α = (ω**2+1j*ω*Γ3)**.5
za = (α-ω3)/(2**.5*σ3)
zb = (α+ω3)/(2**.5*σ3)
ε += 1j*π**.5*f3*ωp**2 / (2**1.5*α*σ3) * (w(za)+w(zb)) #χ3
α = (ω**2+1j*ω*Γ4)**.5
za = (α-ω4)/(2**.5*σ4)
zb = (α+ω4)/(2**.5*σ4)
ε += 1j*π**.5*f4*ωp**2 / (2**1.5*α*σ4) * (w(za)+w(zb)) #χ4
return ε
ev_min=0.02
ev_max=5
npoints=200
eV = np.logspace(np.log10(ev_min), np.log10(ev_max), npoints)
μm = 4.13566733e-1*2.99792458/eV
ε = BB(eV)
n = (ε**.5).real
k = (ε**.5).imag
#============================ DATA OUTPUT =================================
file = open('out.txt', 'w')
for i in range(npoints-1, -1, -1):
file.write('\n {:.4e} {:.4e} {:.4e}'.format(μm[i],n[i],k[i]))
file.close()
#=============================== PLOT =====================================
plt.rc('font', family='Arial', size='14')
plt.figure(1)
plt.plot(eV, -ε.real, label="-ε1")
plt.plot(eV, ε.imag, label="ε2")
plt.xlabel('Photon energy (eV)')
plt.ylabel('ε')
plt.xscale('log')
plt.yscale('log')
plt.legend(bbox_to_anchor=(0,1.02,1,0),loc=3,ncol=2,borderaxespad=0)
#plot n,k vs eV
plt.figure(2)
plt.plot(eV, n, label="n")
plt.plot(eV, k, label="k")
plt.xlabel('Photon energy (eV)')
plt.ylabel('n, k')
plt.yscale('log')
plt.legend(bbox_to_anchor=(0,1.02,1,0),loc=3,ncol=2,borderaxespad=0)
#plot n,k vs μm
plt.figure(3)
plt.plot(μm, n, label="n")
plt.plot(μm, k, label="k")
plt.xlabel('Wavelength (μm)')
plt.ylabel('n, k')
plt.xscale('log')
plt.yscale('log')
plt.legend(bbox_to_anchor=(0,1.02,1,0),loc=3,ncol=2,borderaxespad=0) | gpl-3.0 |
NelisVerhoef/scikit-learn | examples/neural_networks/plot_rbm_logistic_classification.py | 258 | 4609 | """
==============================================================
Restricted Boltzmann Machine features for digit classification
==============================================================
For greyscale image data where pixel values can be interpreted as degrees of
blackness on a white background, like handwritten digit recognition, the
Bernoulli Restricted Boltzmann machine model (:class:`BernoulliRBM
<sklearn.neural_network.BernoulliRBM>`) can perform effective non-linear
feature extraction.
In order to learn good latent representations from a small dataset, we
artificially generate more labeled data by perturbing the training data with
linear shifts of 1 pixel in each direction.
This example shows how to build a classification pipeline with a BernoulliRBM
feature extractor and a :class:`LogisticRegression
<sklearn.linear_model.LogisticRegression>` classifier. The hyperparameters
of the entire model (learning rate, hidden layer size, regularization)
were optimized by grid search, but the search is not reproduced here because
of runtime constraints.
Logistic regression on raw pixel values is presented for comparison. The
example shows that the features extracted by the BernoulliRBM help improve the
classification accuracy.
"""
from __future__ import print_function
print(__doc__)
# Authors: Yann N. Dauphin, Vlad Niculae, Gabriel Synnaeve
# License: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy.ndimage import convolve
from sklearn import linear_model, datasets, metrics
from sklearn.cross_validation import train_test_split
from sklearn.neural_network import BernoulliRBM
from sklearn.pipeline import Pipeline
###############################################################################
# Setting up
def nudge_dataset(X, Y):
"""
This produces a dataset 5 times bigger than the original one,
by moving the 8x8 images in X around by 1px to left, right, down, up
"""
direction_vectors = [
[[0, 1, 0],
[0, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[1, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 1],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 0],
[0, 1, 0]]]
shift = lambda x, w: convolve(x.reshape((8, 8)), mode='constant',
weights=w).ravel()
X = np.concatenate([X] +
[np.apply_along_axis(shift, 1, X, vector)
for vector in direction_vectors])
Y = np.concatenate([Y for _ in range(5)], axis=0)
return X, Y
# Load Data
digits = datasets.load_digits()
X = np.asarray(digits.data, 'float32')
X, Y = nudge_dataset(X, digits.target)
X = (X - np.min(X, 0)) / (np.max(X, 0) + 0.0001) # 0-1 scaling
X_train, X_test, Y_train, Y_test = train_test_split(X, Y,
test_size=0.2,
random_state=0)
# Models we will use
logistic = linear_model.LogisticRegression()
rbm = BernoulliRBM(random_state=0, verbose=True)
classifier = Pipeline(steps=[('rbm', rbm), ('logistic', logistic)])
###############################################################################
# Training
# Hyper-parameters. These were set by cross-validation,
# using a GridSearchCV. Here we are not performing cross-validation to
# save time.
rbm.learning_rate = 0.06
rbm.n_iter = 20
# More components tend to give better prediction performance, but larger
# fitting time
rbm.n_components = 100
logistic.C = 6000.0
# Training RBM-Logistic Pipeline
classifier.fit(X_train, Y_train)
# Training Logistic regression
logistic_classifier = linear_model.LogisticRegression(C=100.0)
logistic_classifier.fit(X_train, Y_train)
###############################################################################
# Evaluation
print()
print("Logistic regression using RBM features:\n%s\n" % (
metrics.classification_report(
Y_test,
classifier.predict(X_test))))
print("Logistic regression using raw pixel features:\n%s\n" % (
metrics.classification_report(
Y_test,
logistic_classifier.predict(X_test))))
###############################################################################
# Plotting
plt.figure(figsize=(4.2, 4))
for i, comp in enumerate(rbm.components_):
plt.subplot(10, 10, i + 1)
plt.imshow(comp.reshape((8, 8)), cmap=plt.cm.gray_r,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('100 components extracted by RBM', fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
plt.show()
| bsd-3-clause |
fourwood/OutflowCone | example/cone_example.py | 1 | 7656 | #!/usr/bin/env python3
# Should be all Python3 compatible, and I'd suggest keeping it that way.
# However, it should *also* be all Python2.7 compatible, as well.
from __future__ import print_function, division
import numpy as np
import OutflowCone as oc
# How many clouds to generate in the cone model?
# 1e4 gives a *really, really* rough result, but probably okay for seeing things.
# 1e5 is pretty fine, but still sees slight variation with different random seeds.
# 1e6 is pretty smooth. Takes a bit longer, though, of course.
n_points = 1e5
inc = 45
PA = 90
theta = 60
r_in = 0.5
r_out = 6.0
# Makes sure the bottom of the cone is at the midplane:
zero_z = True
# Makes the bottom of the cone flat, rather than a sector of a spherical surface:
flatten = True
vel_vec = np.array([30., 0, 225.])
# Rotation curve paramters
C, p = (0.5, 1.35)
accel = vel_vec[0]
# The code implicitly assumes the line-of-sight is directed along -z. A.K.A. anything
# moving _into_ the LOS is moving in the +z direction. So this next variable is just
# a reminder, and you probably shouldn't try to change this without going into
# the model code.
#LOS = np.array([0, 0, -1])
## LOS grid
# ~plate scale in kpc/pix
step = 0.1
# Xs, aka RA
x_min, x_max = (-1, 1)
# This will represent the lower bin edges, essentially.
sky_xs = np.arange(0, x_max-x_min, step)
nxs = len(sky_xs)
sky_xs -= step * (nxs//2)
# Ys, aka dec
y_min, y_max = (-1, 1)
# This will represent the lower bin edges, essentially.
sky_ys = np.arange(0, y_max-y_min, step)
nys = len(sky_ys)
sky_ys -= step * (nys//2)
cone = oc.Cone(inc=inc, PA=PA, theta=theta, r_in=r_in, r_out=r_out)
# 'flatten' means make it so the cone is flat on the bottom at a disk height of r_in;
# otherwise it'll be a spherical surface of radius r_in. If zero_z is true, the minimum
# z-height of the cone will be 0, otherwise it'll be some non-zero value based on r_in.
cone.GenerateClouds(n_points, flatten=flatten, zero_z=zero_z)
# Make a rotation curve for each point.
sph_vels = np.repeat(np.asarray([vel_vec]), cone._n_clouds, axis=0)
R_max0 = r_in * np.sin(np.radians(theta))
R_maxs = R_max0 + cone._local_zs * np.tan(np.radians(theta))
Rs = np.sqrt(cone._local_xs**2+cone._local_ys**2)
R0s = R_max0 * Rs / R_maxs
rot_vels = oc.RotCurve(vel_vec[2], R0s, C=C, p=p)
rot_vels *= R0s / cone._local_rs #Ang. mom. cons.
sph_vels[:,2] = rot_vels
R_0 = 1. # kpc normalization for acceleration
sph_vels[:,0] += 1.0 * accel * (cone._local_rs / R_0)
# This function will convert the spherical-coordinate velocities
# into Cartesian, accounting for inclination.
cone.SetLocalVels(sph_vels)
## LOS is ~hard-coded, so these are equivalent.
#proj_vels = -cone._vzs
proj_vels = cone.LOS_vs
# Now we're gonna bin up velocities in the LOS grid and do some convolutions.
max_vels = np.zeros((nys, nxs))
fwhms = np.zeros_like(max_vels)
radii = np.sqrt(cone._xs**2 + cone._ys**2 + cone._zs**2)
weights = 1/radii**2 # 1/r^2 weighting approximates photoionization
inst_res = 17.0
turb_sigma = 90.0
total_broadening = np.sqrt(inst_res**2 + turb_sigma**2)
for j, y in enumerate(sky_ys):
for i, x in enumerate(sky_xs):
masked_vels = cone.GetLOSCloudVels((x, y), step)
mask = ~masked_vels.mask # foo is a masked array, we want what *isn't* masked, really.
bin_vels = masked_vels[mask]
bin_weights = weights[mask]
bin_radii = radii[mask]
if len(bin_vels) > 0:
v_min, v_max = (-500., 500.) # Enough to encompass everything...
bin_size = 5. # but probably shouldn't be hard-coded.
hist_bins = np.linspace(v_min, v_max, (v_max-v_min)/bin_size+1)
n, edges = np.histogram(bin_vels, bins=hist_bins, weights=bin_weights)
# Convolve the velocity profile with turb/inst broadening
gauss_bins = np.linspace(10*v_min,
10*v_max,
(10*v_max-10*v_min)/bin_size+1)
gauss = np.exp(-gauss_bins**2/(2*total_broadening**2)) / \
(total_broadening*np.sqrt(2*np.pi)) * bin_size
convol = np.convolve(n, gauss, mode='same')
## Estimate the FWHM.
#g_max_idx = np.where(convol == np.max(convol))[0][0]
#g_max = convol[g_max_idx]
#g_hmax = g_max / 2.
#left_hmax_idx = np.abs(convol[:g_max_idx]-g_hmax).argmin()
#right_hmax_idx = np.abs(convol[g_max_idx:]-g_hmax).argmin() + g_max_idx
#fwhm_est = gauss_bins[right_hmax_idx] - gauss_bins[left_hmax_idx]
#fwhms[j,i] = fwhm_est
max_vels[j,i] = np.average(gauss_bins, weights=convol)
else:
max_vels[j,i] = np.nan
## Print some stats:
#print("Outflow velocity stats:")
#print("Minimum:\t\t{:0.1f}".format(np.min(sph_vels[:,0])))
#print("Maximum:\t\t{:0.1f}".format(np.max(sph_vels[:,0])))
#print("Median:\t\t\t{:0.1f}".format(np.median(sph_vels[:,0])))
#print("Average:\t\t{:0.1f}".format(np.average(sph_vels[:,0])))
#print("1/r^2 weighted-average:\t{:0.1f}".format(np.average(sph_vels[:,0],
# weights=1/radii**2)))
###############
## Plotting! ##
###############
import pylab as plt
import matplotlib as mpl
import matplotlib.ticker as ticker
cmap = plt.cm.RdBu_r
cmap_vals = cmap(np.arange(256))
scale = 0.75
new_cmap_vals = np.array([[scale*val[0],
scale*val[1],
scale*val[2],
scale*val[3]] for val in cmap_vals])
new_cmap = mpl.colors.LinearSegmentedColormap.from_list("new_cmap", new_cmap_vals)
params = {'figure.subplot.left': 0.10,
'figure.subplot.right': 0.93,
'figure.subplot.bottom': 0.12,
'figure.subplot.top': 0.97,
'text.usetex': True,
'font.size': 10}
plt.rcParams.update(params)
figsize = (3.35, 2.75)
fig, ax = plt.subplots(figsize=figsize)
## LOS grid
#for y in sky_ys:
# for x in sky_xs:
# plt.plot([x, x], [y_min, y_max],
# 'k-', linewidth=0.1, zorder=0.1)
# plt.plot([x_min, x_max], [y, y],
# 'k-', linewidth=0.1, zorder=0.1)
v_min, v_max = (-150, 150)
# Main image
n_levels = 41
levels = np.linspace(v_min, v_max, n_levels)
extents = (sky_xs[0], sky_xs[-1]+step,
sky_ys[0], sky_ys[-1]+step)
img = ax.contourf(max_vels, cmap=cmap, vmin=v_min, vmax=v_max,
origin='lower', zorder=0, levels=levels, extend='both',
extent=extents)
# Contour overlay
c_levels = 11
c_levels = np.linspace(v_min, v_max, c_levels)
contours = ax.contour(max_vels, cmap=new_cmap, vmin=v_min, vmax=v_max,
origin='lower', levels=c_levels, extend='both',
extent=extents)
# Crosshairs
ax.plot([x_min, x_max], [0, 0], 'k:', scalex=False, scaley=False)
ax.plot([0, 0], [y_min, y_max], 'k:', scalex=False, scaley=False)
# Colorbar
ticks = np.arange(v_min, v_max+1, 50)
ticks = c_levels
bar = plt.colorbar(img, ticks=ticks, pad=0.025)
bar.set_label(r'Velocity (km s$^{-1}$)', labelpad=3.0)
plt.xlabel('Distance (kpc)', labelpad=0.)
plt.ylabel('Distance (kpc)', labelpad=-5.)
x_maj_ticks = ticker.MaxNLocator(2)
x_min_ticks = ticker.AutoMinorLocator(10)
y_maj_ticks = ticker.MaxNLocator(2)
y_min_ticks = ticker.AutoMinorLocator(10)
ax.xaxis.set_major_locator(x_maj_ticks)
ax.xaxis.set_minor_locator(x_min_ticks)
ax.yaxis.set_major_locator(y_maj_ticks)
ax.yaxis.set_minor_locator(y_min_ticks)
ax.set_xlim(x_min, x_max)
ax.set_ylim(y_min, y_max)
plt.savefig("cone_example.eps")
plt.close()
#plt.show()
| mit |
LennonLab/simplex | tools/DiversityTools/macroecotools/macroecotools.py | 12 | 16643 | """Module for conducting standard macroecological plots and analyses"""
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
import colorsys
from numpy import log10
import pandas
import numpy
import math
def AIC(k, L):
"""Computes the Akaike Information Criterion.
Keyword arguments:
L -- log likelihood value of given distribution.
k -- number of fitted parameters.
"""
AIC = 2 * k - 2 * L
return AIC
def AICc(k, L, n):
"""Computes the corrected Akaike Information Criterion.
Keyword arguments:
L -- log likelihood value of given distribution.
k -- number of fitted parameters.
n -- number of observations.
"""
AICc = 2 * k - 2 * L + 2 * k * (k + 1) / (n - k - 1)
return AICc
def aic_weight(AICc_list, n, cutoff = 4):
"""Computes Akaike weight for one model relative to others
Based on information from Burnham and Anderson (2002).
Keyword arguments:
n -- number of observations.
cutoff -- minimum number of observations required to generate a weight.
AICc_list -- list of AICc values for each model
"""
if n < cutoff:
AICc_weights = None
else:
AICc_min = min(AICc_list) # Minimum AICc value for the entire list
relative_likelihoods = []
for AICc in AICc_list:
delta_AICc = AICc - AICc_min
relative_likelihood = np.exp(-(delta_AICc)/2)
relative_likelihoods.append(relative_likelihood)
relative_likelihoods = np.array(relative_likelihoods)
AICc_weights = relative_likelihoods / sum(relative_likelihoods)
return(AICc_weights)
def get_pred_iterative(cdf_obs, dist, *pars):
"""Function to get predicted abundances (reverse-sorted) for distributions with no analytical ppf."""
cdf_obs = np.sort(cdf_obs)
abundance = list(np.empty([len(cdf_obs)]))
j = 0
cdf_cum = 0
i = 1
while j < len(cdf_obs):
cdf_cum += dist.pmf(i, *pars)
while cdf_cum >= cdf_obs[j]:
abundance[j] = i
j += 1
if j == len(cdf_obs):
abundance.reverse()
return np.array(abundance)
i += 1
def get_rad_from_cdf(dist, S, *args):
"""Return a predicted rank-abundance distribution from a theoretical CDF
Keyword arguments:
dist -- a distribution class
S -- the number of species for which the RAD should be predicted. Should
match the number of species in the community if comparing to empirical data.
args -- arguments for dist
Finds the predicted rank-abundance distribution that results from a
theoretical cumulative distribution function, by rounding the value of the
cdf evaluated at 1 / S * (Rank - 0.5) to the nearest integer
"""
emp_cdf = [(S - i + 0.5) / S for i in range(1, S + 1)]
try: rad = int(np.round(dist.ppf(emp_cdf, *args)))
except: rad = get_pred_iterative(emp_cdf, dist, *args)
return np.array(rad)
def get_emp_cdf(dat):
"""Compute the empirical cdf given a list or an array"""
dat = np.array(dat)
emp_cdf = []
for point in dat:
point_cdf = len(dat[dat <= point]) / len(dat)
emp_cdf.append(point_cdf)
return np.array(emp_cdf)
def hist_pmf(xs, pmf, bins):
"""Create a histogram based on a probability mass function
Creates a histogram by combining the pmf values inside a series of bins
Args:
xs: Array-like list of x values that the pmf values come from
pmf: Array-like list of pmf values associate with the values of x
bins: Array-like list of bin edges
Returns:
hist: Array of values of the histogram
bin_edges: Array of value of the bin edges
"""
xs = np.array(xs)
pmf = np.array(pmf)
bins = np.array(bins)
hist = []
for lower_edge_index in range(len(bins) - 1):
if lower_edge_index + 1 == len(bins):
hist.append(sum(pmf[(xs >= bins[lower_edge_index]) &
(xs <= bins[lower_edge_index + 1])]))
else:
hist.append(sum(pmf[(xs >= bins[lower_edge_index]) &
(xs < bins[lower_edge_index + 1])]))
hist = np.array(hist)
return (hist, bins)
def plot_rad(Ns):
"""Plot a rank-abundance distribution based on a vector of abundances"""
Ns.sort(reverse=True)
rank = range(1, len(Ns) + 1)
plt.plot(rank, Ns, 'bo-')
plt.xlabel('Rank')
plt.ylabel('Abundance')
def get_rad_data(Ns):
"""Provide ranks and relative abundances for a vector of abundances"""
Ns = np.array(Ns)
Ns_sorted = -1 * np.sort(-1 * Ns)
relab_sorted = Ns_sorted / sum(Ns_sorted)
rank = range(1, len(Ns) + 1)
return (rank, relab_sorted)
def preston_sad(abund_vector, b=None, normalized = 'no'):
"""Plot histogram of species abundances on a log2 scale"""
if b == None:
q = np.exp2(list(range(0, 25)))
b = q [(q <= max(abund_vector)*2)]
if normalized == 'no':
hist_ab = np.histogram(abund_vector, bins = b)
if normalized == 'yes':
hist_ab_norm = np.histogram(abund_vector, bins = b)
hist_ab_norm1 = hist_ab_norm[0]/(b[0:len(hist_ab_norm[0])])
hist_ab_norm2 = hist_ab_norm[1][0:len(hist_ab_norm[0])]
hist_ab = (hist_ab_norm1, hist_ab_norm2)
return hist_ab
def plot_SARs(list_of_A_and_S):
"""Plot multiple SARs on a single plot.
Input: a list of lists, each sublist contains one vector for S and one vector for A.
Output: a graph with SARs plotted on log-log scale, with colors spanning the spectrum.
"""
N = len(list_of_A_and_S)
HSV_tuples = [(x * 1.0 / N, 0.5, 0.5) for x in range(N)]
RGB_tuples = map(lambda x: colorsys.hsv_to_rgb(*x), HSV_tuples)
for i in range(len(list_of_A_and_S)):
sublist = list_of_A_and_S[i]
plt.loglog(sublist[0], sublist[1], color = RGB_tuples[i])
plt.hold(False)
plt.xlabel('Area')
plt.ylabel('Richness')
def points_on_circle(center, radius, n = 100):
"""Generate n equally spaced points on a circle, given its center and radius."""
x0, y0 = center
points = [(math.cos(2 * math.pi / n * x) * radius + x0, math.sin(2 * math.pi / n * x) * radius + y0) for x in xrange(0, n + 1)]
return points
def count_pts_within_radius(x, y, radius, logscale=0):
"""Count the number of points within a fixed radius in 2D space"""
#TODO: see if we can improve performance using KDTree.query_ball_point
#http://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.KDTree.query_ball_point.html
#instead of doing the subset based on the circle
unique_points = set([(x[i], y[i]) for i in range(len(x))])
count_data = []
logx, logy, logr = log10(x), log10(y), log10(radius)
for a, b in unique_points:
if logscale == 1:
loga, logb = log10(a), log10(b)
num_neighbors = len(x[((logx - loga) ** 2 +
(logy - logb) ** 2) <= logr ** 2])
else:
num_neighbors = len(x[((x - a) ** 2 + (y - b) ** 2) <= radius ** 2])
count_data.append((a, b, num_neighbors))
return count_data
def plot_color_by_pt_dens(x, y, radius, loglog=0, plot_obj=None):
"""Plot bivariate relationships with large n using color for point density
Inputs:
x & y -- variables to be plotted
radius -- the linear distance within which to count points as neighbors
loglog -- a flag to indicate the use of a loglog plot (loglog = 1)
The color of each point in the plot is determined by the logarithm (base 10)
of the number of points that occur with a given radius of the focal point,
with hotter colors indicating more points. The number of neighboring points
is determined in linear space regardless of whether a loglog plot is
presented.
"""
plot_data = count_pts_within_radius(x, y, radius, loglog)
sorted_plot_data = np.array(sorted(plot_data, key=lambda point: point[2]))
if plot_obj == None:
plot_obj = plt.axes()
if loglog == 1:
plot_obj.set_xscale('log')
plot_obj.set_yscale('log')
plot_obj.scatter(sorted_plot_data[:, 0], sorted_plot_data[:, 1],
c = np.sqrt(sorted_plot_data[:, 2]), edgecolors='none')
plot_obj.set_xlim(min(x) * 0.5, max(x) * 2)
plot_obj.set_ylim(min(y) * 0.5, max(y) * 2)
else:
plot_obj.scatter(sorted_plot_data[:, 0], sorted_plot_data[:, 1],
c = log10(sorted_plot_data[:, 2]), edgecolors='none')
return plot_obj
def e_var(abundance_data):
"""Calculate Smith and Wilson's (1996; Oikos 76:70-82) evenness index (Evar)
Input:
abundance_data = list of abundance fo all species in a community
"""
S = len(abundance_data)
ln_nj_over_S=[]
for i in range(0, S):
v1 = (np.log(abundance_data[i]))/S
ln_nj_over_S.append(v1)
ln_ni_minus_above=[]
for i in range(0, S):
v2 = ((np.log(abundance_data[i])) - sum(ln_nj_over_S)) ** 2
ln_ni_minus_above.append(v2)
return(1 - ((2 / np.pi) * np.arctan(sum(ln_ni_minus_above) / S)))
def obs_pred_rsquare(obs, pred):
"""Determines the prop of variability in a data set accounted for by a model
In other words, this determines the proportion of variation explained by
the 1:1 line in an observed-predicted plot.
"""
return 1 - sum((obs - pred) ** 2) / sum((obs - np.mean(obs)) ** 2)
def obs_pred_mse(obs, pred):
"""Calculate the mean squared error of the prediction given observation."""
return sum((obs - pred) ** 2) / len(obs)
def comp_ed (spdata1,abdata1,spdata2,abdata2):
"""Calculate the compositional Euclidean Distance between two sites
Ref: Thibault KM, White EP, Ernest SKM. 2004. Temporal dynamics in the
structure and composition of a desert rodent community. Ecology. 85:2649-2655.
"""
abdata1 = (abdata1 * 1.0) / sum(abdata1)
abdata2 = (abdata2 * 1.0) / sum(abdata2)
intersect12 = set(spdata1).intersection(spdata2)
setdiff12 = np.setdiff1d(spdata1,spdata2)
setdiff21 = np.setdiff1d(spdata2,spdata1)
relab1 = np.concatenate(((abdata1[np.setmember1d(spdata1,list(intersect12)) == 1]),
abdata1[np.setmember1d(spdata1,setdiff12)],
np.zeros(len(setdiff21))))
relab2 = np.concatenate((abdata2[np.setmember1d(spdata2,list(intersect12)) == 1],
np.zeros(len(setdiff12)),
abdata2[np.setmember1d(spdata2,setdiff21)]))
return np.sqrt(sum((relab1 - relab2) ** 2))
def calc_comp_eds(ifile, fout):
"""Calculate Euclidean distances in species composition across sites.
Determines the Euclidean distances among all possible pairs of sites and
saves the results to a file
Inputs:
ifile -- ifile = np.genfromtxt(input_filename, dtype = "S15,S15,i8",
names = ['site','species','ab'], delimiter = ",")
fout -- fout = csv.writer(open(output_filename,'ab'))
"""
#TODO - Remove reliance on on names of columns in input
# Possibly move to 3 1-D arrays for input rather than the 2-D with 3 columns
# Return result rather than writing to file
usites = np.sort(list(set(ifile["site"])))
for i in range (0, len(usites)-1):
spdata1 = ifile["species"][ifile["site"] == usites[i]]
abdata1 = ifile["ab"][ifile["site"] == usites[i]]
for a in range (i+1,len(usites)):
spdata2 = ifile["species"][ifile["site"] == usites[a]]
abdata2 = ifile["ab"][ifile["site"] == usites[a]]
ed = comp_ed (spdata1,abdata1,spdata2,abdata2)
results = np.column_stack((usites[i], usites[a], ed))
fout.writerows(results)
def combined_spID(*species_identifiers):
"""Return a single column unique species identifier
Creates a unique species identifier based on one or more columns of a
data frame that represent the unique species ID.
Args:
species_identifiers: A tuple containing one or pieces of a unique
species identifier or lists of these pieces.
Returns:
A single unique species identifier or a list of single identifiers
"""
# Make standard input data types capable of element wise summation
input_type = type(species_identifiers[0])
assert input_type in [list, tuple, str, pandas.core.series.Series, numpy.ndarray]
if input_type is not str:
species_identifiers = [pandas.Series(identifier) for identifier in species_identifiers]
single_identifier = species_identifiers[0]
if len(species_identifiers) > 1:
for identifier in species_identifiers[1:]:
single_identifier += identifier
if input_type == numpy.ndarray:
single_identifier = numpy.array(single_identifier)
else:
single_identifier = input_type(single_identifier)
return single_identifier
def richness_in_group(composition_data, group_cols, spid_cols):
"""Determine the number of species in a grouping (e.g., at each site)
Counts the number of species grouped at one or more levels. For example,
the number of species occuring at each of a series of sites or in each of
a series of years.
If a combination of grouping variables is not present in the data, then no
values will be returned for that combination. In other words, if these
missing combinations should be treated as zeros, this will need to be
handled elsewhere.
Args:
composition_data: A Pandas data frame with one or more columns with
information on species identity and one or more columns with
information on the groups, e.g., years or sites.
group_cols: A list of strings of the names othe columns in
composition_data that hold the grouping fields.
spid_cols: A list of strings of the names of the columns in
composition_data that hold the data on species ID. This could be a
single column with a unique ID or two columns containing the latin binomial.
Returns:
A data frame with the grouping fields and the species richness
"""
spid_series = [composition_data[spid_col] for spid_col in spid_cols]
single_spid = combined_spID(*spid_series)
composition_data['_spid'] = single_spid
richness = composition_data.groupby(group_cols)._spid.nunique()
richness = richness.reset_index()
richness.columns = group_cols + ['richness']
del composition_data['_spid']
return richness
def abundance_in_group(composition_data, group_cols, abund_col=None):
"""Determine the number of individuals in a grouping (e.g., at each site)
Counts the number of individuals grouped at one or more levels. For example,
the number of species occuring at each of a series of sites or in each of
a series of genus-species combinations.
If a combination of grouping variables is not present in the data, then no
values will be returned for that combination. In other words, if these
missing combinations should be treated as zeros, this will need to be
handled elsewhere.
Args:
composition_data: A Pandas data frame with one or more columns with
information on species identity and one or more columns with
information on the groups, e.g., years or sites, and a column
containing the abundance value.
group_cols: A list of strings of the names othe columns in
composition_data that hold the grouping fields.
abund_col: A column containing abundance data. If this column is not
provided then it is assumed that the abundances are to be obtained
by counting the number of rows in the group (e.g., there is one
sample per individual in many ecological datasets)
Returns:
A data frame with the grouping fields and the species richness
"""
if abund_col:
abundance = composition_data[group_cols + abund_col].groupby(group_cols).sum()
else:
abundance = composition_data[group_cols].groupby(group_cols).size()
abundance = pandas.DataFrame(abundance)
abundance.columns = ['abundance']
abundance = abundance.reset_index()
return abundance
| mit |
chugunovyar/factoryForBuild | env/lib/python2.7/site-packages/numpy/lib/npyio.py | 23 | 73862 | from __future__ import division, absolute_import, print_function
import sys
import os
import re
import itertools
import warnings
import weakref
from operator import itemgetter, index as opindex
import numpy as np
from . import format
from ._datasource import DataSource
from numpy.core.multiarray import packbits, unpackbits
from ._iotools import (
LineSplitter, NameValidator, StringConverter, ConverterError,
ConverterLockError, ConversionWarning, _is_string_like,
has_nested_fields, flatten_dtype, easy_dtype, _bytes_to_name
)
from numpy.compat import (
asbytes, asstr, asbytes_nested, bytes, basestring, unicode, is_pathlib_path
)
if sys.version_info[0] >= 3:
import pickle
else:
import cPickle as pickle
from future_builtins import map
loads = pickle.loads
__all__ = [
'savetxt', 'loadtxt', 'genfromtxt', 'ndfromtxt', 'mafromtxt',
'recfromtxt', 'recfromcsv', 'load', 'loads', 'save', 'savez',
'savez_compressed', 'packbits', 'unpackbits', 'fromregex', 'DataSource'
]
class BagObj(object):
"""
BagObj(obj)
Convert attribute look-ups to getitems on the object passed in.
Parameters
----------
obj : class instance
Object on which attribute look-up is performed.
Examples
--------
>>> from numpy.lib.npyio import BagObj as BO
>>> class BagDemo(object):
... def __getitem__(self, key): # An instance of BagObj(BagDemo)
... # will call this method when any
... # attribute look-up is required
... result = "Doesn't matter what you want, "
... return result + "you're gonna get this"
...
>>> demo_obj = BagDemo()
>>> bagobj = BO(demo_obj)
>>> bagobj.hello_there
"Doesn't matter what you want, you're gonna get this"
>>> bagobj.I_can_be_anything
"Doesn't matter what you want, you're gonna get this"
"""
def __init__(self, obj):
# Use weakref to make NpzFile objects collectable by refcount
self._obj = weakref.proxy(obj)
def __getattribute__(self, key):
try:
return object.__getattribute__(self, '_obj')[key]
except KeyError:
raise AttributeError(key)
def __dir__(self):
"""
Enables dir(bagobj) to list the files in an NpzFile.
This also enables tab-completion in an interpreter or IPython.
"""
return object.__getattribute__(self, '_obj').keys()
def zipfile_factory(file, *args, **kwargs):
"""
Create a ZipFile.
Allows for Zip64, and the `file` argument can accept file, str, or
pathlib.Path objects. `args` and `kwargs` are passed to the zipfile.ZipFile
constructor.
"""
if is_pathlib_path(file):
file = str(file)
import zipfile
kwargs['allowZip64'] = True
return zipfile.ZipFile(file, *args, **kwargs)
class NpzFile(object):
"""
NpzFile(fid)
A dictionary-like object with lazy-loading of files in the zipped
archive provided on construction.
`NpzFile` is used to load files in the NumPy ``.npz`` data archive
format. It assumes that files in the archive have a ``.npy`` extension,
other files are ignored.
The arrays and file strings are lazily loaded on either
getitem access using ``obj['key']`` or attribute lookup using
``obj.f.key``. A list of all files (without ``.npy`` extensions) can
be obtained with ``obj.files`` and the ZipFile object itself using
``obj.zip``.
Attributes
----------
files : list of str
List of all files in the archive with a ``.npy`` extension.
zip : ZipFile instance
The ZipFile object initialized with the zipped archive.
f : BagObj instance
An object on which attribute can be performed as an alternative
to getitem access on the `NpzFile` instance itself.
allow_pickle : bool, optional
Allow loading pickled data. Default: True
pickle_kwargs : dict, optional
Additional keyword arguments to pass on to pickle.load.
These are only useful when loading object arrays saved on
Python 2 when using Python 3.
Parameters
----------
fid : file or str
The zipped archive to open. This is either a file-like object
or a string containing the path to the archive.
own_fid : bool, optional
Whether NpzFile should close the file handle.
Requires that `fid` is a file-like object.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> y = np.sin(x)
>>> np.savez(outfile, x=x, y=y)
>>> outfile.seek(0)
>>> npz = np.load(outfile)
>>> isinstance(npz, np.lib.io.NpzFile)
True
>>> npz.files
['y', 'x']
>>> npz['x'] # getitem access
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> npz.f.x # attribute lookup
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
def __init__(self, fid, own_fid=False, allow_pickle=True,
pickle_kwargs=None):
# Import is postponed to here since zipfile depends on gzip, an
# optional component of the so-called standard library.
_zip = zipfile_factory(fid)
self._files = _zip.namelist()
self.files = []
self.allow_pickle = allow_pickle
self.pickle_kwargs = pickle_kwargs
for x in self._files:
if x.endswith('.npy'):
self.files.append(x[:-4])
else:
self.files.append(x)
self.zip = _zip
self.f = BagObj(self)
if own_fid:
self.fid = fid
else:
self.fid = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def close(self):
"""
Close the file.
"""
if self.zip is not None:
self.zip.close()
self.zip = None
if self.fid is not None:
self.fid.close()
self.fid = None
self.f = None # break reference cycle
def __del__(self):
self.close()
def __getitem__(self, key):
# FIXME: This seems like it will copy strings around
# more than is strictly necessary. The zipfile
# will read the string and then
# the format.read_array will copy the string
# to another place in memory.
# It would be better if the zipfile could read
# (or at least uncompress) the data
# directly into the array memory.
member = 0
if key in self._files:
member = 1
elif key in self.files:
member = 1
key += '.npy'
if member:
bytes = self.zip.open(key)
magic = bytes.read(len(format.MAGIC_PREFIX))
bytes.close()
if magic == format.MAGIC_PREFIX:
bytes = self.zip.open(key)
return format.read_array(bytes,
allow_pickle=self.allow_pickle,
pickle_kwargs=self.pickle_kwargs)
else:
return self.zip.read(key)
else:
raise KeyError("%s is not a file in the archive" % key)
def __iter__(self):
return iter(self.files)
def items(self):
"""
Return a list of tuples, with each tuple (filename, array in file).
"""
return [(f, self[f]) for f in self.files]
def iteritems(self):
"""Generator that returns tuples (filename, array in file)."""
for f in self.files:
yield (f, self[f])
def keys(self):
"""Return files in the archive with a ``.npy`` extension."""
return self.files
def iterkeys(self):
"""Return an iterator over the files in the archive."""
return self.__iter__()
def __contains__(self, key):
return self.files.__contains__(key)
def load(file, mmap_mode=None, allow_pickle=True, fix_imports=True,
encoding='ASCII'):
"""
Load arrays or pickled objects from ``.npy``, ``.npz`` or pickled files.
Parameters
----------
file : file-like object, string, or pathlib.Path
The file to read. File-like objects must support the
``seek()`` and ``read()`` methods. Pickled files require that the
file-like object support the ``readline()`` method as well.
mmap_mode : {None, 'r+', 'r', 'w+', 'c'}, optional
If not None, then memory-map the file, using the given mode (see
`numpy.memmap` for a detailed description of the modes). A
memory-mapped array is kept on disk. However, it can be accessed
and sliced like any ndarray. Memory mapping is especially useful
for accessing small fragments of large files without reading the
entire file into memory.
allow_pickle : bool, optional
Allow loading pickled object arrays stored in npy files. Reasons for
disallowing pickles include security, as loading pickled data can
execute arbitrary code. If pickles are disallowed, loading object
arrays will fail.
Default: True
fix_imports : bool, optional
Only useful when loading Python 2 generated pickled files on Python 3,
which includes npy/npz files containing object arrays. If `fix_imports`
is True, pickle will try to map the old Python 2 names to the new names
used in Python 3.
encoding : str, optional
What encoding to use when reading Python 2 strings. Only useful when
loading Python 2 generated pickled files on Python 3, which includes
npy/npz files containing object arrays. Values other than 'latin1',
'ASCII', and 'bytes' are not allowed, as they can corrupt numerical
data. Default: 'ASCII'
Returns
-------
result : array, tuple, dict, etc.
Data stored in the file. For ``.npz`` files, the returned instance
of NpzFile class must be closed to avoid leaking file descriptors.
Raises
------
IOError
If the input file does not exist or cannot be read.
ValueError
The file contains an object array, but allow_pickle=False given.
See Also
--------
save, savez, savez_compressed, loadtxt
memmap : Create a memory-map to an array stored in a file on disk.
lib.format.open_memmap : Create or load a memory-mapped ``.npy`` file.
Notes
-----
- If the file contains pickle data, then whatever object is stored
in the pickle is returned.
- If the file is a ``.npy`` file, then a single array is returned.
- If the file is a ``.npz`` file, then a dictionary-like object is
returned, containing ``{filename: array}`` key-value pairs, one for
each file in the archive.
- If the file is a ``.npz`` file, the returned value supports the
context manager protocol in a similar fashion to the open function::
with load('foo.npz') as data:
a = data['a']
The underlying file descriptor is closed when exiting the 'with'
block.
Examples
--------
Store data to disk, and load it again:
>>> np.save('/tmp/123', np.array([[1, 2, 3], [4, 5, 6]]))
>>> np.load('/tmp/123.npy')
array([[1, 2, 3],
[4, 5, 6]])
Store compressed data to disk, and load it again:
>>> a=np.array([[1, 2, 3], [4, 5, 6]])
>>> b=np.array([1, 2])
>>> np.savez('/tmp/123.npz', a=a, b=b)
>>> data = np.load('/tmp/123.npz')
>>> data['a']
array([[1, 2, 3],
[4, 5, 6]])
>>> data['b']
array([1, 2])
>>> data.close()
Mem-map the stored array, and then access the second row
directly from disk:
>>> X = np.load('/tmp/123.npy', mmap_mode='r')
>>> X[1, :]
memmap([4, 5, 6])
"""
own_fid = False
if isinstance(file, basestring):
fid = open(file, "rb")
own_fid = True
elif is_pathlib_path(file):
fid = file.open("rb")
own_fid = True
else:
fid = file
if encoding not in ('ASCII', 'latin1', 'bytes'):
# The 'encoding' value for pickle also affects what encoding
# the serialized binary data of NumPy arrays is loaded
# in. Pickle does not pass on the encoding information to
# NumPy. The unpickling code in numpy.core.multiarray is
# written to assume that unicode data appearing where binary
# should be is in 'latin1'. 'bytes' is also safe, as is 'ASCII'.
#
# Other encoding values can corrupt binary data, and we
# purposefully disallow them. For the same reason, the errors=
# argument is not exposed, as values other than 'strict'
# result can similarly silently corrupt numerical data.
raise ValueError("encoding must be 'ASCII', 'latin1', or 'bytes'")
if sys.version_info[0] >= 3:
pickle_kwargs = dict(encoding=encoding, fix_imports=fix_imports)
else:
# Nothing to do on Python 2
pickle_kwargs = {}
try:
# Code to distinguish from NumPy binary files and pickles.
_ZIP_PREFIX = asbytes('PK\x03\x04')
N = len(format.MAGIC_PREFIX)
magic = fid.read(N)
# If the file size is less than N, we need to make sure not
# to seek past the beginning of the file
fid.seek(-min(N, len(magic)), 1) # back-up
if magic.startswith(_ZIP_PREFIX):
# zip-file (assume .npz)
# Transfer file ownership to NpzFile
tmp = own_fid
own_fid = False
return NpzFile(fid, own_fid=tmp, allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
elif magic == format.MAGIC_PREFIX:
# .npy file
if mmap_mode:
return format.open_memmap(file, mode=mmap_mode)
else:
return format.read_array(fid, allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
else:
# Try a pickle
if not allow_pickle:
raise ValueError("allow_pickle=False, but file does not contain "
"non-pickled data")
try:
return pickle.load(fid, **pickle_kwargs)
except:
raise IOError(
"Failed to interpret file %s as a pickle" % repr(file))
finally:
if own_fid:
fid.close()
def save(file, arr, allow_pickle=True, fix_imports=True):
"""
Save an array to a binary file in NumPy ``.npy`` format.
Parameters
----------
file : file, str, or pathlib.Path
File or filename to which the data is saved. If file is a file-object,
then the filename is unchanged. If file is a string or Path, a ``.npy``
extension will be appended to the file name if it does not already
have one.
allow_pickle : bool, optional
Allow saving object arrays using Python pickles. Reasons for disallowing
pickles include security (loading pickled data can execute arbitrary
code) and portability (pickled objects may not be loadable on different
Python installations, for example if the stored objects require libraries
that are not available, and not all pickled data is compatible between
Python 2 and Python 3).
Default: True
fix_imports : bool, optional
Only useful in forcing objects in object arrays on Python 3 to be
pickled in a Python 2 compatible way. If `fix_imports` is True, pickle
will try to map the new Python 3 names to the old module names used in
Python 2, so that the pickle data stream is readable with Python 2.
arr : array_like
Array data to be saved.
See Also
--------
savez : Save several arrays into a ``.npz`` archive
savetxt, load
Notes
-----
For a description of the ``.npy`` format, see the module docstring
of `numpy.lib.format` or the NumPy Enhancement Proposal
http://docs.scipy.org/doc/numpy/neps/npy-format.html
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> np.save(outfile, x)
>>> outfile.seek(0) # Only needed here to simulate closing & reopening file
>>> np.load(outfile)
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
own_fid = False
if isinstance(file, basestring):
if not file.endswith('.npy'):
file = file + '.npy'
fid = open(file, "wb")
own_fid = True
elif is_pathlib_path(file):
if not file.name.endswith('.npy'):
file = file.parent / (file.name + '.npy')
fid = file.open("wb")
own_fid = True
else:
fid = file
if sys.version_info[0] >= 3:
pickle_kwargs = dict(fix_imports=fix_imports)
else:
# Nothing to do on Python 2
pickle_kwargs = None
try:
arr = np.asanyarray(arr)
format.write_array(fid, arr, allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
finally:
if own_fid:
fid.close()
def savez(file, *args, **kwds):
"""
Save several arrays into a single file in uncompressed ``.npz`` format.
If arguments are passed in with no keywords, the corresponding variable
names, in the ``.npz`` file, are 'arr_0', 'arr_1', etc. If keyword
arguments are given, the corresponding variable names, in the ``.npz``
file will match the keyword names.
Parameters
----------
file : str or file
Either the file name (string) or an open file (file-like object)
where the data will be saved. If file is a string or a Path, the
``.npz`` extension will be appended to the file name if it is not
already there.
args : Arguments, optional
Arrays to save to the file. Since it is not possible for Python to
know the names of the arrays outside `savez`, the arrays will be saved
with names "arr_0", "arr_1", and so on. These arguments can be any
expression.
kwds : Keyword arguments, optional
Arrays to save to the file. Arrays will be saved in the file with the
keyword names.
Returns
-------
None
See Also
--------
save : Save a single array to a binary file in NumPy format.
savetxt : Save an array to a file as plain text.
savez_compressed : Save several arrays into a compressed ``.npz`` archive
Notes
-----
The ``.npz`` file format is a zipped archive of files named after the
variables they contain. The archive is not compressed and each file
in the archive contains one variable in ``.npy`` format. For a
description of the ``.npy`` format, see `numpy.lib.format` or the
NumPy Enhancement Proposal
http://docs.scipy.org/doc/numpy/neps/npy-format.html
When opening the saved ``.npz`` file with `load` a `NpzFile` object is
returned. This is a dictionary-like object which can be queried for
its list of arrays (with the ``.files`` attribute), and for the arrays
themselves.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> y = np.sin(x)
Using `savez` with \\*args, the arrays are saved with default names.
>>> np.savez(outfile, x, y)
>>> outfile.seek(0) # Only needed here to simulate closing & reopening file
>>> npzfile = np.load(outfile)
>>> npzfile.files
['arr_1', 'arr_0']
>>> npzfile['arr_0']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
Using `savez` with \\**kwds, the arrays are saved with the keyword names.
>>> outfile = TemporaryFile()
>>> np.savez(outfile, x=x, y=y)
>>> outfile.seek(0)
>>> npzfile = np.load(outfile)
>>> npzfile.files
['y', 'x']
>>> npzfile['x']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
_savez(file, args, kwds, False)
def savez_compressed(file, *args, **kwds):
"""
Save several arrays into a single file in compressed ``.npz`` format.
If keyword arguments are given, then filenames are taken from the keywords.
If arguments are passed in with no keywords, then stored file names are
arr_0, arr_1, etc.
Parameters
----------
file : str
File name of ``.npz`` file.
args : Arguments
Function arguments.
kwds : Keyword arguments
Keywords.
See Also
--------
numpy.savez : Save several arrays into an uncompressed ``.npz`` file format
numpy.load : Load the files created by savez_compressed.
"""
_savez(file, args, kwds, True)
def _savez(file, args, kwds, compress, allow_pickle=True, pickle_kwargs=None):
# Import is postponed to here since zipfile depends on gzip, an optional
# component of the so-called standard library.
import zipfile
# Import deferred for startup time improvement
import tempfile
if isinstance(file, basestring):
if not file.endswith('.npz'):
file = file + '.npz'
elif is_pathlib_path(file):
if not file.name.endswith('.npz'):
file = file.parent / (file.name + '.npz')
namedict = kwds
for i, val in enumerate(args):
key = 'arr_%d' % i
if key in namedict.keys():
raise ValueError(
"Cannot use un-named variables and keyword %s" % key)
namedict[key] = val
if compress:
compression = zipfile.ZIP_DEFLATED
else:
compression = zipfile.ZIP_STORED
zipf = zipfile_factory(file, mode="w", compression=compression)
# Stage arrays in a temporary file on disk, before writing to zip.
# Since target file might be big enough to exceed capacity of a global
# temporary directory, create temp file side-by-side with the target file.
file_dir, file_prefix = os.path.split(file) if _is_string_like(file) else (None, 'tmp')
fd, tmpfile = tempfile.mkstemp(prefix=file_prefix, dir=file_dir, suffix='-numpy.npy')
os.close(fd)
try:
for key, val in namedict.items():
fname = key + '.npy'
fid = open(tmpfile, 'wb')
try:
format.write_array(fid, np.asanyarray(val),
allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
fid.close()
fid = None
zipf.write(tmpfile, arcname=fname)
except IOError as exc:
raise IOError("Failed to write to %s: %s" % (tmpfile, exc))
finally:
if fid:
fid.close()
finally:
os.remove(tmpfile)
zipf.close()
def _getconv(dtype):
""" Find the correct dtype converter. Adapted from matplotlib """
def floatconv(x):
x.lower()
if b'0x' in x:
return float.fromhex(asstr(x))
return float(x)
typ = dtype.type
if issubclass(typ, np.bool_):
return lambda x: bool(int(x))
if issubclass(typ, np.uint64):
return np.uint64
if issubclass(typ, np.int64):
return np.int64
if issubclass(typ, np.integer):
return lambda x: int(float(x))
elif issubclass(typ, np.longdouble):
return np.longdouble
elif issubclass(typ, np.floating):
return floatconv
elif issubclass(typ, np.complex):
return lambda x: complex(asstr(x))
elif issubclass(typ, np.bytes_):
return bytes
else:
return str
def loadtxt(fname, dtype=float, comments='#', delimiter=None,
converters=None, skiprows=0, usecols=None, unpack=False,
ndmin=0):
"""
Load data from a text file.
Each row in the text file must have the same number of values.
Parameters
----------
fname : file, str, or pathlib.Path
File, filename, or generator to read. If the filename extension is
``.gz`` or ``.bz2``, the file is first decompressed. Note that
generators should return byte strings for Python 3k.
dtype : data-type, optional
Data-type of the resulting array; default: float. If this is a
structured data-type, the resulting array will be 1-dimensional, and
each row will be interpreted as an element of the array. In this
case, the number of columns used must match the number of fields in
the data-type.
comments : str or sequence, optional
The characters or list of characters used to indicate the start of a
comment;
default: '#'.
delimiter : str, optional
The string used to separate values. By default, this is any
whitespace.
converters : dict, optional
A dictionary mapping column number to a function that will convert
that column to a float. E.g., if column 0 is a date string:
``converters = {0: datestr2num}``. Converters can also be used to
provide a default value for missing data (but see also `genfromtxt`):
``converters = {3: lambda s: float(s.strip() or 0)}``. Default: None.
skiprows : int, optional
Skip the first `skiprows` lines; default: 0.
usecols : int or sequence, optional
Which columns to read, with 0 being the first. For example,
usecols = (1,4,5) will extract the 2nd, 5th and 6th columns.
The default, None, results in all columns being read.
.. versionadded:: 1.11.0
Also when a single column has to be read it is possible to use
an integer instead of a tuple. E.g ``usecols = 3`` reads the
fourth column the same way as `usecols = (3,)`` would.
unpack : bool, optional
If True, the returned array is transposed, so that arguments may be
unpacked using ``x, y, z = loadtxt(...)``. When used with a structured
data-type, arrays are returned for each field. Default is False.
ndmin : int, optional
The returned array will have at least `ndmin` dimensions.
Otherwise mono-dimensional axes will be squeezed.
Legal values: 0 (default), 1 or 2.
.. versionadded:: 1.6.0
Returns
-------
out : ndarray
Data read from the text file.
See Also
--------
load, fromstring, fromregex
genfromtxt : Load data with missing values handled as specified.
scipy.io.loadmat : reads MATLAB data files
Notes
-----
This function aims to be a fast reader for simply formatted files. The
`genfromtxt` function provides more sophisticated handling of, e.g.,
lines with missing values.
.. versionadded:: 1.10.0
The strings produced by the Python float.hex method can be used as
input for floats.
Examples
--------
>>> from io import StringIO # StringIO behaves like a file object
>>> c = StringIO("0 1\\n2 3")
>>> np.loadtxt(c)
array([[ 0., 1.],
[ 2., 3.]])
>>> d = StringIO("M 21 72\\nF 35 58")
>>> np.loadtxt(d, dtype={'names': ('gender', 'age', 'weight'),
... 'formats': ('S1', 'i4', 'f4')})
array([('M', 21, 72.0), ('F', 35, 58.0)],
dtype=[('gender', '|S1'), ('age', '<i4'), ('weight', '<f4')])
>>> c = StringIO("1,0,2\\n3,0,4")
>>> x, y = np.loadtxt(c, delimiter=',', usecols=(0, 2), unpack=True)
>>> x
array([ 1., 3.])
>>> y
array([ 2., 4.])
"""
# Type conversions for Py3 convenience
if comments is not None:
if isinstance(comments, (basestring, bytes)):
comments = [asbytes(comments)]
else:
comments = [asbytes(comment) for comment in comments]
# Compile regex for comments beforehand
comments = (re.escape(comment) for comment in comments)
regex_comments = re.compile(asbytes('|').join(comments))
user_converters = converters
if delimiter is not None:
delimiter = asbytes(delimiter)
if usecols is not None:
# Allow usecols to be a single int or a sequence of ints
try:
usecols_as_list = list(usecols)
except TypeError:
usecols_as_list = [usecols]
for col_idx in usecols_as_list:
try:
opindex(col_idx)
except TypeError as e:
e.args = (
"usecols must be an int or a sequence of ints but "
"it contains at least one element of type %s" %
type(col_idx),
)
raise
# Fall back to existing code
usecols = usecols_as_list
fown = False
try:
if is_pathlib_path(fname):
fname = str(fname)
if _is_string_like(fname):
fown = True
if fname.endswith('.gz'):
import gzip
fh = iter(gzip.GzipFile(fname))
elif fname.endswith('.bz2'):
import bz2
fh = iter(bz2.BZ2File(fname))
elif sys.version_info[0] == 2:
fh = iter(open(fname, 'U'))
else:
fh = iter(open(fname))
else:
fh = iter(fname)
except TypeError:
raise ValueError('fname must be a string, file handle, or generator')
X = []
def flatten_dtype(dt):
"""Unpack a structured data-type, and produce re-packing info."""
if dt.names is None:
# If the dtype is flattened, return.
# If the dtype has a shape, the dtype occurs
# in the list more than once.
shape = dt.shape
if len(shape) == 0:
return ([dt.base], None)
else:
packing = [(shape[-1], list)]
if len(shape) > 1:
for dim in dt.shape[-2::-1]:
packing = [(dim*packing[0][0], packing*dim)]
return ([dt.base] * int(np.prod(dt.shape)), packing)
else:
types = []
packing = []
for field in dt.names:
tp, bytes = dt.fields[field]
flat_dt, flat_packing = flatten_dtype(tp)
types.extend(flat_dt)
# Avoid extra nesting for subarrays
if len(tp.shape) > 0:
packing.extend(flat_packing)
else:
packing.append((len(flat_dt), flat_packing))
return (types, packing)
def pack_items(items, packing):
"""Pack items into nested lists based on re-packing info."""
if packing is None:
return items[0]
elif packing is tuple:
return tuple(items)
elif packing is list:
return list(items)
else:
start = 0
ret = []
for length, subpacking in packing:
ret.append(pack_items(items[start:start+length], subpacking))
start += length
return tuple(ret)
def split_line(line):
"""Chop off comments, strip, and split at delimiter.
Note that although the file is opened as text, this function
returns bytes.
"""
line = asbytes(line)
if comments is not None:
line = regex_comments.split(asbytes(line), maxsplit=1)[0]
line = line.strip(asbytes('\r\n'))
if line:
return line.split(delimiter)
else:
return []
try:
# Make sure we're dealing with a proper dtype
dtype = np.dtype(dtype)
defconv = _getconv(dtype)
# Skip the first `skiprows` lines
for i in range(skiprows):
next(fh)
# Read until we find a line with some values, and use
# it to estimate the number of columns, N.
first_vals = None
try:
while not first_vals:
first_line = next(fh)
first_vals = split_line(first_line)
except StopIteration:
# End of lines reached
first_line = ''
first_vals = []
warnings.warn('loadtxt: Empty input file: "%s"' % fname, stacklevel=2)
N = len(usecols or first_vals)
dtype_types, packing = flatten_dtype(dtype)
if len(dtype_types) > 1:
# We're dealing with a structured array, each field of
# the dtype matches a column
converters = [_getconv(dt) for dt in dtype_types]
else:
# All fields have the same dtype
converters = [defconv for i in range(N)]
if N > 1:
packing = [(N, tuple)]
# By preference, use the converters specified by the user
for i, conv in (user_converters or {}).items():
if usecols:
try:
i = usecols.index(i)
except ValueError:
# Unused converter specified
continue
converters[i] = conv
# Parse each line, including the first
for i, line in enumerate(itertools.chain([first_line], fh)):
vals = split_line(line)
if len(vals) == 0:
continue
if usecols:
vals = [vals[i] for i in usecols]
if len(vals) != N:
line_num = i + skiprows + 1
raise ValueError("Wrong number of columns at line %d"
% line_num)
# Convert each value according to its column and store
items = [conv(val) for (conv, val) in zip(converters, vals)]
# Then pack it according to the dtype's nesting
items = pack_items(items, packing)
X.append(items)
finally:
if fown:
fh.close()
X = np.array(X, dtype)
# Multicolumn data are returned with shape (1, N, M), i.e.
# (1, 1, M) for a single row - remove the singleton dimension there
if X.ndim == 3 and X.shape[:2] == (1, 1):
X.shape = (1, -1)
# Verify that the array has at least dimensions `ndmin`.
# Check correctness of the values of `ndmin`
if ndmin not in [0, 1, 2]:
raise ValueError('Illegal value of ndmin keyword: %s' % ndmin)
# Tweak the size and shape of the arrays - remove extraneous dimensions
if X.ndim > ndmin:
X = np.squeeze(X)
# and ensure we have the minimum number of dimensions asked for
# - has to be in this order for the odd case ndmin=1, X.squeeze().ndim=0
if X.ndim < ndmin:
if ndmin == 1:
X = np.atleast_1d(X)
elif ndmin == 2:
X = np.atleast_2d(X).T
if unpack:
if len(dtype_types) > 1:
# For structured arrays, return an array for each field.
return [X[field] for field in dtype.names]
else:
return X.T
else:
return X
def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n', header='',
footer='', comments='# '):
"""
Save an array to a text file.
Parameters
----------
fname : filename or file handle
If the filename ends in ``.gz``, the file is automatically saved in
compressed gzip format. `loadtxt` understands gzipped files
transparently.
X : array_like
Data to be saved to a text file.
fmt : str or sequence of strs, optional
A single format (%10.5f), a sequence of formats, or a
multi-format string, e.g. 'Iteration %d -- %10.5f', in which
case `delimiter` is ignored. For complex `X`, the legal options
for `fmt` are:
a) a single specifier, `fmt='%.4e'`, resulting in numbers formatted
like `' (%s+%sj)' % (fmt, fmt)`
b) a full string specifying every real and imaginary part, e.g.
`' %.4e %+.4ej %.4e %+.4ej %.4e %+.4ej'` for 3 columns
c) a list of specifiers, one per column - in this case, the real
and imaginary part must have separate specifiers,
e.g. `['%.3e + %.3ej', '(%.15e%+.15ej)']` for 2 columns
delimiter : str, optional
String or character separating columns.
newline : str, optional
String or character separating lines.
.. versionadded:: 1.5.0
header : str, optional
String that will be written at the beginning of the file.
.. versionadded:: 1.7.0
footer : str, optional
String that will be written at the end of the file.
.. versionadded:: 1.7.0
comments : str, optional
String that will be prepended to the ``header`` and ``footer`` strings,
to mark them as comments. Default: '# ', as expected by e.g.
``numpy.loadtxt``.
.. versionadded:: 1.7.0
See Also
--------
save : Save an array to a binary file in NumPy ``.npy`` format
savez : Save several arrays into an uncompressed ``.npz`` archive
savez_compressed : Save several arrays into a compressed ``.npz`` archive
Notes
-----
Further explanation of the `fmt` parameter
(``%[flag]width[.precision]specifier``):
flags:
``-`` : left justify
``+`` : Forces to precede result with + or -.
``0`` : Left pad the number with zeros instead of space (see width).
width:
Minimum number of characters to be printed. The value is not truncated
if it has more characters.
precision:
- For integer specifiers (eg. ``d,i,o,x``), the minimum number of
digits.
- For ``e, E`` and ``f`` specifiers, the number of digits to print
after the decimal point.
- For ``g`` and ``G``, the maximum number of significant digits.
- For ``s``, the maximum number of characters.
specifiers:
``c`` : character
``d`` or ``i`` : signed decimal integer
``e`` or ``E`` : scientific notation with ``e`` or ``E``.
``f`` : decimal floating point
``g,G`` : use the shorter of ``e,E`` or ``f``
``o`` : signed octal
``s`` : string of characters
``u`` : unsigned decimal integer
``x,X`` : unsigned hexadecimal integer
This explanation of ``fmt`` is not complete, for an exhaustive
specification see [1]_.
References
----------
.. [1] `Format Specification Mini-Language
<http://docs.python.org/library/string.html#
format-specification-mini-language>`_, Python Documentation.
Examples
--------
>>> x = y = z = np.arange(0.0,5.0,1.0)
>>> np.savetxt('test.out', x, delimiter=',') # X is an array
>>> np.savetxt('test.out', (x,y,z)) # x,y,z equal sized 1D arrays
>>> np.savetxt('test.out', x, fmt='%1.4e') # use exponential notation
"""
# Py3 conversions first
if isinstance(fmt, bytes):
fmt = asstr(fmt)
delimiter = asstr(delimiter)
own_fh = False
if is_pathlib_path(fname):
fname = str(fname)
if _is_string_like(fname):
own_fh = True
if fname.endswith('.gz'):
import gzip
fh = gzip.open(fname, 'wb')
else:
if sys.version_info[0] >= 3:
fh = open(fname, 'wb')
else:
fh = open(fname, 'w')
elif hasattr(fname, 'write'):
fh = fname
else:
raise ValueError('fname must be a string or file handle')
try:
X = np.asarray(X)
# Handle 1-dimensional arrays
if X.ndim == 1:
# Common case -- 1d array of numbers
if X.dtype.names is None:
X = np.atleast_2d(X).T
ncol = 1
# Complex dtype -- each field indicates a separate column
else:
ncol = len(X.dtype.descr)
else:
ncol = X.shape[1]
iscomplex_X = np.iscomplexobj(X)
# `fmt` can be a string with multiple insertion points or a
# list of formats. E.g. '%10.5f\t%10d' or ('%10.5f', '$10d')
if type(fmt) in (list, tuple):
if len(fmt) != ncol:
raise AttributeError('fmt has wrong shape. %s' % str(fmt))
format = asstr(delimiter).join(map(asstr, fmt))
elif isinstance(fmt, str):
n_fmt_chars = fmt.count('%')
error = ValueError('fmt has wrong number of %% formats: %s' % fmt)
if n_fmt_chars == 1:
if iscomplex_X:
fmt = [' (%s+%sj)' % (fmt, fmt), ] * ncol
else:
fmt = [fmt, ] * ncol
format = delimiter.join(fmt)
elif iscomplex_X and n_fmt_chars != (2 * ncol):
raise error
elif ((not iscomplex_X) and n_fmt_chars != ncol):
raise error
else:
format = fmt
else:
raise ValueError('invalid fmt: %r' % (fmt,))
if len(header) > 0:
header = header.replace('\n', '\n' + comments)
fh.write(asbytes(comments + header + newline))
if iscomplex_X:
for row in X:
row2 = []
for number in row:
row2.append(number.real)
row2.append(number.imag)
fh.write(asbytes(format % tuple(row2) + newline))
else:
for row in X:
try:
fh.write(asbytes(format % tuple(row) + newline))
except TypeError:
raise TypeError("Mismatch between array dtype ('%s') and "
"format specifier ('%s')"
% (str(X.dtype), format))
if len(footer) > 0:
footer = footer.replace('\n', '\n' + comments)
fh.write(asbytes(comments + footer + newline))
finally:
if own_fh:
fh.close()
def fromregex(file, regexp, dtype):
"""
Construct an array from a text file, using regular expression parsing.
The returned array is always a structured array, and is constructed from
all matches of the regular expression in the file. Groups in the regular
expression are converted to fields of the structured array.
Parameters
----------
file : str or file
File name or file object to read.
regexp : str or regexp
Regular expression used to parse the file.
Groups in the regular expression correspond to fields in the dtype.
dtype : dtype or list of dtypes
Dtype for the structured array.
Returns
-------
output : ndarray
The output array, containing the part of the content of `file` that
was matched by `regexp`. `output` is always a structured array.
Raises
------
TypeError
When `dtype` is not a valid dtype for a structured array.
See Also
--------
fromstring, loadtxt
Notes
-----
Dtypes for structured arrays can be specified in several forms, but all
forms specify at least the data type and field name. For details see
`doc.structured_arrays`.
Examples
--------
>>> f = open('test.dat', 'w')
>>> f.write("1312 foo\\n1534 bar\\n444 qux")
>>> f.close()
>>> regexp = r"(\\d+)\\s+(...)" # match [digits, whitespace, anything]
>>> output = np.fromregex('test.dat', regexp,
... [('num', np.int64), ('key', 'S3')])
>>> output
array([(1312L, 'foo'), (1534L, 'bar'), (444L, 'qux')],
dtype=[('num', '<i8'), ('key', '|S3')])
>>> output['num']
array([1312, 1534, 444], dtype=int64)
"""
own_fh = False
if not hasattr(file, "read"):
file = open(file, 'rb')
own_fh = True
try:
if not hasattr(regexp, 'match'):
regexp = re.compile(asbytes(regexp))
if not isinstance(dtype, np.dtype):
dtype = np.dtype(dtype)
seq = regexp.findall(file.read())
if seq and not isinstance(seq[0], tuple):
# Only one group is in the regexp.
# Create the new array as a single data-type and then
# re-interpret as a single-field structured array.
newdtype = np.dtype(dtype[dtype.names[0]])
output = np.array(seq, dtype=newdtype)
output.dtype = dtype
else:
output = np.array(seq, dtype=dtype)
return output
finally:
if own_fh:
file.close()
#####--------------------------------------------------------------------------
#---- --- ASCII functions ---
#####--------------------------------------------------------------------------
def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
skip_header=0, skip_footer=0, converters=None,
missing_values=None, filling_values=None, usecols=None,
names=None, excludelist=None, deletechars=None,
replace_space='_', autostrip=False, case_sensitive=True,
defaultfmt="f%i", unpack=None, usemask=False, loose=True,
invalid_raise=True, max_rows=None):
"""
Load data from a text file, with missing values handled as specified.
Each line past the first `skip_header` lines is split at the `delimiter`
character, and characters following the `comments` character are discarded.
Parameters
----------
fname : file, str, pathlib.Path, list of str, generator
File, filename, list, or generator to read. If the filename
extension is `.gz` or `.bz2`, the file is first decompressed. Note
that generators must return byte strings in Python 3k. The strings
in a list or produced by a generator are treated as lines.
dtype : dtype, optional
Data type of the resulting array.
If None, the dtypes will be determined by the contents of each
column, individually.
comments : str, optional
The character used to indicate the start of a comment.
All the characters occurring on a line after a comment are discarded
delimiter : str, int, or sequence, optional
The string used to separate values. By default, any consecutive
whitespaces act as delimiter. An integer or sequence of integers
can also be provided as width(s) of each field.
skiprows : int, optional
`skiprows` was removed in numpy 1.10. Please use `skip_header` instead.
skip_header : int, optional
The number of lines to skip at the beginning of the file.
skip_footer : int, optional
The number of lines to skip at the end of the file.
converters : variable, optional
The set of functions that convert the data of a column to a value.
The converters can also be used to provide a default value
for missing data: ``converters = {3: lambda s: float(s or 0)}``.
missing : variable, optional
`missing` was removed in numpy 1.10. Please use `missing_values`
instead.
missing_values : variable, optional
The set of strings corresponding to missing data.
filling_values : variable, optional
The set of values to be used as default when the data are missing.
usecols : sequence, optional
Which columns to read, with 0 being the first. For example,
``usecols = (1, 4, 5)`` will extract the 2nd, 5th and 6th columns.
names : {None, True, str, sequence}, optional
If `names` is True, the field names are read from the first valid line
after the first `skip_header` lines.
If `names` is a sequence or a single-string of comma-separated names,
the names will be used to define the field names in a structured dtype.
If `names` is None, the names of the dtype fields will be used, if any.
excludelist : sequence, optional
A list of names to exclude. This list is appended to the default list
['return','file','print']. Excluded names are appended an underscore:
for example, `file` would become `file_`.
deletechars : str, optional
A string combining invalid characters that must be deleted from the
names.
defaultfmt : str, optional
A format used to define default field names, such as "f%i" or "f_%02i".
autostrip : bool, optional
Whether to automatically strip white spaces from the variables.
replace_space : char, optional
Character(s) used in replacement of white spaces in the variables
names. By default, use a '_'.
case_sensitive : {True, False, 'upper', 'lower'}, optional
If True, field names are case sensitive.
If False or 'upper', field names are converted to upper case.
If 'lower', field names are converted to lower case.
unpack : bool, optional
If True, the returned array is transposed, so that arguments may be
unpacked using ``x, y, z = loadtxt(...)``
usemask : bool, optional
If True, return a masked array.
If False, return a regular array.
loose : bool, optional
If True, do not raise errors for invalid values.
invalid_raise : bool, optional
If True, an exception is raised if an inconsistency is detected in the
number of columns.
If False, a warning is emitted and the offending lines are skipped.
max_rows : int, optional
The maximum number of rows to read. Must not be used with skip_footer
at the same time. If given, the value must be at least 1. Default is
to read the entire file.
.. versionadded:: 1.10.0
Returns
-------
out : ndarray
Data read from the text file. If `usemask` is True, this is a
masked array.
See Also
--------
numpy.loadtxt : equivalent function when no data is missing.
Notes
-----
* When spaces are used as delimiters, or when no delimiter has been given
as input, there should not be any missing data between two fields.
* When the variables are named (either by a flexible dtype or with `names`,
there must not be any header in the file (else a ValueError
exception is raised).
* Individual values are not stripped of spaces by default.
When using a custom converter, make sure the function does remove spaces.
References
----------
.. [1] NumPy User Guide, section `I/O with NumPy
<http://docs.scipy.org/doc/numpy/user/basics.io.genfromtxt.html>`_.
Examples
---------
>>> from io import StringIO
>>> import numpy as np
Comma delimited file with mixed dtype
>>> s = StringIO("1,1.3,abcde")
>>> data = np.genfromtxt(s, dtype=[('myint','i8'),('myfloat','f8'),
... ('mystring','S5')], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
Using dtype = None
>>> s.seek(0) # needed for StringIO example only
>>> data = np.genfromtxt(s, dtype=None,
... names = ['myint','myfloat','mystring'], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
Specifying dtype and names
>>> s.seek(0)
>>> data = np.genfromtxt(s, dtype="i8,f8,S5",
... names=['myint','myfloat','mystring'], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
An example with fixed-width columns
>>> s = StringIO("11.3abcde")
>>> data = np.genfromtxt(s, dtype=None, names=['intvar','fltvar','strvar'],
... delimiter=[1,3,5])
>>> data
array((1, 1.3, 'abcde'),
dtype=[('intvar', '<i8'), ('fltvar', '<f8'), ('strvar', '|S5')])
"""
if max_rows is not None:
if skip_footer:
raise ValueError(
"The keywords 'skip_footer' and 'max_rows' can not be "
"specified at the same time.")
if max_rows < 1:
raise ValueError("'max_rows' must be at least 1.")
# Py3 data conversions to bytes, for convenience
if comments is not None:
comments = asbytes(comments)
if isinstance(delimiter, unicode):
delimiter = asbytes(delimiter)
if isinstance(missing_values, (unicode, list, tuple)):
missing_values = asbytes_nested(missing_values)
#
if usemask:
from numpy.ma import MaskedArray, make_mask_descr
# Check the input dictionary of converters
user_converters = converters or {}
if not isinstance(user_converters, dict):
raise TypeError(
"The input argument 'converter' should be a valid dictionary "
"(got '%s' instead)" % type(user_converters))
# Initialize the filehandle, the LineSplitter and the NameValidator
own_fhd = False
try:
if is_pathlib_path(fname):
fname = str(fname)
if isinstance(fname, basestring):
if sys.version_info[0] == 2:
fhd = iter(np.lib._datasource.open(fname, 'rbU'))
else:
fhd = iter(np.lib._datasource.open(fname, 'rb'))
own_fhd = True
else:
fhd = iter(fname)
except TypeError:
raise TypeError(
"fname must be a string, filehandle, list of strings, "
"or generator. Got %s instead." % type(fname))
split_line = LineSplitter(delimiter=delimiter, comments=comments,
autostrip=autostrip)._handyman
validate_names = NameValidator(excludelist=excludelist,
deletechars=deletechars,
case_sensitive=case_sensitive,
replace_space=replace_space)
# Skip the first `skip_header` rows
for i in range(skip_header):
next(fhd)
# Keep on until we find the first valid values
first_values = None
try:
while not first_values:
first_line = next(fhd)
if names is True:
if comments in first_line:
first_line = (
asbytes('').join(first_line.split(comments)[1:]))
first_values = split_line(first_line)
except StopIteration:
# return an empty array if the datafile is empty
first_line = asbytes('')
first_values = []
warnings.warn('genfromtxt: Empty input file: "%s"' % fname, stacklevel=2)
# Should we take the first values as names ?
if names is True:
fval = first_values[0].strip()
if fval in comments:
del first_values[0]
# Check the columns to use: make sure `usecols` is a list
if usecols is not None:
try:
usecols = [_.strip() for _ in usecols.split(",")]
except AttributeError:
try:
usecols = list(usecols)
except TypeError:
usecols = [usecols, ]
nbcols = len(usecols or first_values)
# Check the names and overwrite the dtype.names if needed
if names is True:
names = validate_names([_bytes_to_name(_.strip())
for _ in first_values])
first_line = asbytes('')
elif _is_string_like(names):
names = validate_names([_.strip() for _ in names.split(',')])
elif names:
names = validate_names(names)
# Get the dtype
if dtype is not None:
dtype = easy_dtype(dtype, defaultfmt=defaultfmt, names=names,
excludelist=excludelist,
deletechars=deletechars,
case_sensitive=case_sensitive,
replace_space=replace_space)
# Make sure the names is a list (for 2.5)
if names is not None:
names = list(names)
if usecols:
for (i, current) in enumerate(usecols):
# if usecols is a list of names, convert to a list of indices
if _is_string_like(current):
usecols[i] = names.index(current)
elif current < 0:
usecols[i] = current + len(first_values)
# If the dtype is not None, make sure we update it
if (dtype is not None) and (len(dtype) > nbcols):
descr = dtype.descr
dtype = np.dtype([descr[_] for _ in usecols])
names = list(dtype.names)
# If `names` is not None, update the names
elif (names is not None) and (len(names) > nbcols):
names = [names[_] for _ in usecols]
elif (names is not None) and (dtype is not None):
names = list(dtype.names)
# Process the missing values ...............................
# Rename missing_values for convenience
user_missing_values = missing_values or ()
# Define the list of missing_values (one column: one list)
missing_values = [list([asbytes('')]) for _ in range(nbcols)]
# We have a dictionary: process it field by field
if isinstance(user_missing_values, dict):
# Loop on the items
for (key, val) in user_missing_values.items():
# Is the key a string ?
if _is_string_like(key):
try:
# Transform it into an integer
key = names.index(key)
except ValueError:
# We couldn't find it: the name must have been dropped
continue
# Redefine the key as needed if it's a column number
if usecols:
try:
key = usecols.index(key)
except ValueError:
pass
# Transform the value as a list of string
if isinstance(val, (list, tuple)):
val = [str(_) for _ in val]
else:
val = [str(val), ]
# Add the value(s) to the current list of missing
if key is None:
# None acts as default
for miss in missing_values:
miss.extend(val)
else:
missing_values[key].extend(val)
# We have a sequence : each item matches a column
elif isinstance(user_missing_values, (list, tuple)):
for (value, entry) in zip(user_missing_values, missing_values):
value = str(value)
if value not in entry:
entry.append(value)
# We have a string : apply it to all entries
elif isinstance(user_missing_values, bytes):
user_value = user_missing_values.split(asbytes(","))
for entry in missing_values:
entry.extend(user_value)
# We have something else: apply it to all entries
else:
for entry in missing_values:
entry.extend([str(user_missing_values)])
# Process the filling_values ...............................
# Rename the input for convenience
user_filling_values = filling_values
if user_filling_values is None:
user_filling_values = []
# Define the default
filling_values = [None] * nbcols
# We have a dictionary : update each entry individually
if isinstance(user_filling_values, dict):
for (key, val) in user_filling_values.items():
if _is_string_like(key):
try:
# Transform it into an integer
key = names.index(key)
except ValueError:
# We couldn't find it: the name must have been dropped,
continue
# Redefine the key if it's a column number and usecols is defined
if usecols:
try:
key = usecols.index(key)
except ValueError:
pass
# Add the value to the list
filling_values[key] = val
# We have a sequence : update on a one-to-one basis
elif isinstance(user_filling_values, (list, tuple)):
n = len(user_filling_values)
if (n <= nbcols):
filling_values[:n] = user_filling_values
else:
filling_values = user_filling_values[:nbcols]
# We have something else : use it for all entries
else:
filling_values = [user_filling_values] * nbcols
# Initialize the converters ................................
if dtype is None:
# Note: we can't use a [...]*nbcols, as we would have 3 times the same
# ... converter, instead of 3 different converters.
converters = [StringConverter(None, missing_values=miss, default=fill)
for (miss, fill) in zip(missing_values, filling_values)]
else:
dtype_flat = flatten_dtype(dtype, flatten_base=True)
# Initialize the converters
if len(dtype_flat) > 1:
# Flexible type : get a converter from each dtype
zipit = zip(dtype_flat, missing_values, filling_values)
converters = [StringConverter(dt, locked=True,
missing_values=miss, default=fill)
for (dt, miss, fill) in zipit]
else:
# Set to a default converter (but w/ different missing values)
zipit = zip(missing_values, filling_values)
converters = [StringConverter(dtype, locked=True,
missing_values=miss, default=fill)
for (miss, fill) in zipit]
# Update the converters to use the user-defined ones
uc_update = []
for (j, conv) in user_converters.items():
# If the converter is specified by column names, use the index instead
if _is_string_like(j):
try:
j = names.index(j)
i = j
except ValueError:
continue
elif usecols:
try:
i = usecols.index(j)
except ValueError:
# Unused converter specified
continue
else:
i = j
# Find the value to test - first_line is not filtered by usecols:
if len(first_line):
testing_value = first_values[j]
else:
testing_value = None
converters[i].update(conv, locked=True,
testing_value=testing_value,
default=filling_values[i],
missing_values=missing_values[i],)
uc_update.append((i, conv))
# Make sure we have the corrected keys in user_converters...
user_converters.update(uc_update)
# Fixme: possible error as following variable never used.
#miss_chars = [_.missing_values for _ in converters]
# Initialize the output lists ...
# ... rows
rows = []
append_to_rows = rows.append
# ... masks
if usemask:
masks = []
append_to_masks = masks.append
# ... invalid
invalid = []
append_to_invalid = invalid.append
# Parse each line
for (i, line) in enumerate(itertools.chain([first_line, ], fhd)):
values = split_line(line)
nbvalues = len(values)
# Skip an empty line
if nbvalues == 0:
continue
if usecols:
# Select only the columns we need
try:
values = [values[_] for _ in usecols]
except IndexError:
append_to_invalid((i + skip_header + 1, nbvalues))
continue
elif nbvalues != nbcols:
append_to_invalid((i + skip_header + 1, nbvalues))
continue
# Store the values
append_to_rows(tuple(values))
if usemask:
append_to_masks(tuple([v.strip() in m
for (v, m) in zip(values,
missing_values)]))
if len(rows) == max_rows:
break
if own_fhd:
fhd.close()
# Upgrade the converters (if needed)
if dtype is None:
for (i, converter) in enumerate(converters):
current_column = [itemgetter(i)(_m) for _m in rows]
try:
converter.iterupgrade(current_column)
except ConverterLockError:
errmsg = "Converter #%i is locked and cannot be upgraded: " % i
current_column = map(itemgetter(i), rows)
for (j, value) in enumerate(current_column):
try:
converter.upgrade(value)
except (ConverterError, ValueError):
errmsg += "(occurred line #%i for value '%s')"
errmsg %= (j + 1 + skip_header, value)
raise ConverterError(errmsg)
# Check that we don't have invalid values
nbinvalid = len(invalid)
if nbinvalid > 0:
nbrows = len(rows) + nbinvalid - skip_footer
# Construct the error message
template = " Line #%%i (got %%i columns instead of %i)" % nbcols
if skip_footer > 0:
nbinvalid_skipped = len([_ for _ in invalid
if _[0] > nbrows + skip_header])
invalid = invalid[:nbinvalid - nbinvalid_skipped]
skip_footer -= nbinvalid_skipped
#
# nbrows -= skip_footer
# errmsg = [template % (i, nb)
# for (i, nb) in invalid if i < nbrows]
# else:
errmsg = [template % (i, nb)
for (i, nb) in invalid]
if len(errmsg):
errmsg.insert(0, "Some errors were detected !")
errmsg = "\n".join(errmsg)
# Raise an exception ?
if invalid_raise:
raise ValueError(errmsg)
# Issue a warning ?
else:
warnings.warn(errmsg, ConversionWarning, stacklevel=2)
# Strip the last skip_footer data
if skip_footer > 0:
rows = rows[:-skip_footer]
if usemask:
masks = masks[:-skip_footer]
# Convert each value according to the converter:
# We want to modify the list in place to avoid creating a new one...
if loose:
rows = list(
zip(*[[conv._loose_call(_r) for _r in map(itemgetter(i), rows)]
for (i, conv) in enumerate(converters)]))
else:
rows = list(
zip(*[[conv._strict_call(_r) for _r in map(itemgetter(i), rows)]
for (i, conv) in enumerate(converters)]))
# Reset the dtype
data = rows
if dtype is None:
# Get the dtypes from the types of the converters
column_types = [conv.type for conv in converters]
# Find the columns with strings...
strcolidx = [i for (i, v) in enumerate(column_types)
if v in (type('S'), np.string_)]
# ... and take the largest number of chars.
for i in strcolidx:
column_types[i] = "|S%i" % max(len(row[i]) for row in data)
#
if names is None:
# If the dtype is uniform, don't define names, else use ''
base = set([c.type for c in converters if c._checked])
if len(base) == 1:
(ddtype, mdtype) = (list(base)[0], np.bool)
else:
ddtype = [(defaultfmt % i, dt)
for (i, dt) in enumerate(column_types)]
if usemask:
mdtype = [(defaultfmt % i, np.bool)
for (i, dt) in enumerate(column_types)]
else:
ddtype = list(zip(names, column_types))
mdtype = list(zip(names, [np.bool] * len(column_types)))
output = np.array(data, dtype=ddtype)
if usemask:
outputmask = np.array(masks, dtype=mdtype)
else:
# Overwrite the initial dtype names if needed
if names and dtype.names:
dtype.names = names
# Case 1. We have a structured type
if len(dtype_flat) > 1:
# Nested dtype, eg [('a', int), ('b', [('b0', int), ('b1', 'f4')])]
# First, create the array using a flattened dtype:
# [('a', int), ('b1', int), ('b2', float)]
# Then, view the array using the specified dtype.
if 'O' in (_.char for _ in dtype_flat):
if has_nested_fields(dtype):
raise NotImplementedError(
"Nested fields involving objects are not supported...")
else:
output = np.array(data, dtype=dtype)
else:
rows = np.array(data, dtype=[('', _) for _ in dtype_flat])
output = rows.view(dtype)
# Now, process the rowmasks the same way
if usemask:
rowmasks = np.array(
masks, dtype=np.dtype([('', np.bool) for t in dtype_flat]))
# Construct the new dtype
mdtype = make_mask_descr(dtype)
outputmask = rowmasks.view(mdtype)
# Case #2. We have a basic dtype
else:
# We used some user-defined converters
if user_converters:
ishomogeneous = True
descr = []
for i, ttype in enumerate([conv.type for conv in converters]):
# Keep the dtype of the current converter
if i in user_converters:
ishomogeneous &= (ttype == dtype.type)
if ttype == np.string_:
ttype = "|S%i" % max(len(row[i]) for row in data)
descr.append(('', ttype))
else:
descr.append(('', dtype))
# So we changed the dtype ?
if not ishomogeneous:
# We have more than one field
if len(descr) > 1:
dtype = np.dtype(descr)
# We have only one field: drop the name if not needed.
else:
dtype = np.dtype(ttype)
#
output = np.array(data, dtype)
if usemask:
if dtype.names:
mdtype = [(_, np.bool) for _ in dtype.names]
else:
mdtype = np.bool
outputmask = np.array(masks, dtype=mdtype)
# Try to take care of the missing data we missed
names = output.dtype.names
if usemask and names:
for (name, conv) in zip(names or (), converters):
missing_values = [conv(_) for _ in conv.missing_values
if _ != asbytes('')]
for mval in missing_values:
outputmask[name] |= (output[name] == mval)
# Construct the final array
if usemask:
output = output.view(MaskedArray)
output._mask = outputmask
if unpack:
return output.squeeze().T
return output.squeeze()
def ndfromtxt(fname, **kwargs):
"""
Load ASCII data stored in a file and return it as a single array.
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function.
"""
kwargs['usemask'] = False
return genfromtxt(fname, **kwargs)
def mafromtxt(fname, **kwargs):
"""
Load ASCII data stored in a text file and return a masked array.
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function to load ASCII data.
"""
kwargs['usemask'] = True
return genfromtxt(fname, **kwargs)
def recfromtxt(fname, **kwargs):
"""
Load ASCII data from a file and return it in a record array.
If ``usemask=False`` a standard `recarray` is returned,
if ``usemask=True`` a MaskedRecords array is returned.
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function
Notes
-----
By default, `dtype` is None, which means that the data-type of the output
array will be determined from the data.
"""
kwargs.setdefault("dtype", None)
usemask = kwargs.get('usemask', False)
output = genfromtxt(fname, **kwargs)
if usemask:
from numpy.ma.mrecords import MaskedRecords
output = output.view(MaskedRecords)
else:
output = output.view(np.recarray)
return output
def recfromcsv(fname, **kwargs):
"""
Load ASCII data stored in a comma-separated file.
The returned array is a record array (if ``usemask=False``, see
`recarray`) or a masked record array (if ``usemask=True``,
see `ma.mrecords.MaskedRecords`).
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function to load ASCII data.
Notes
-----
By default, `dtype` is None, which means that the data-type of the output
array will be determined from the data.
"""
# Set default kwargs for genfromtxt as relevant to csv import.
kwargs.setdefault("case_sensitive", "lower")
kwargs.setdefault("names", True)
kwargs.setdefault("delimiter", ",")
kwargs.setdefault("dtype", None)
output = genfromtxt(fname, **kwargs)
usemask = kwargs.get("usemask", False)
if usemask:
from numpy.ma.mrecords import MaskedRecords
output = output.view(MaskedRecords)
else:
output = output.view(np.recarray)
return output
| gpl-3.0 |
larrybradley/astropy | astropy/table/__init__.py | 8 | 3387 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from astropy import config as _config
from .column import Column, MaskedColumn, StringTruncateWarning, ColumnInfo
__all__ = ['BST', 'Column', 'ColumnGroups', 'ColumnInfo', 'Conf',
'JSViewer', 'MaskedColumn', 'NdarrayMixin', 'QTable', 'Row',
'SCEngine', 'SerializedColumn', 'SortedArray', 'StringTruncateWarning',
'Table', 'TableAttribute', 'TableColumns', 'TableFormatter',
'TableGroups', 'TableMergeError', 'TableReplaceWarning', 'conf',
'connect', 'hstack', 'join', 'registry', 'represent_mixins_as_columns',
'setdiff', 'unique', 'vstack', 'dstack', 'conf', 'join_skycoord',
'join_distance', 'PprintIncludeExclude']
class Conf(_config.ConfigNamespace): # noqa
"""
Configuration parameters for `astropy.table`.
"""
auto_colname = _config.ConfigItem(
'col{0}',
'The template that determines the name of a column if it cannot be '
'determined. Uses new-style (format method) string formatting.',
aliases=['astropy.table.column.auto_colname'])
default_notebook_table_class = _config.ConfigItem(
'table-striped table-bordered table-condensed',
'The table class to be used in Jupyter notebooks when displaying '
'tables (and not overridden). See <https://getbootstrap.com/css/#tables '
'for a list of useful bootstrap classes.')
replace_warnings = _config.ConfigItem(
[],
'List of conditions for issuing a warning when replacing a table '
"column using setitem, e.g. t['a'] = value. Allowed options are "
"'always', 'slice', 'refcount', 'attributes'.",
'list')
replace_inplace = _config.ConfigItem(
False,
'Always use in-place update of a table column when using setitem, '
"e.g. t['a'] = value. This overrides the default behavior of "
"replacing the column entirely with the new value when possible. "
"This configuration option will be deprecated and then removed in "
"subsequent major releases.")
conf = Conf() # noqa
from . import connect # noqa: E402
from .groups import TableGroups, ColumnGroups # noqa: E402
from .table import (Table, QTable, TableColumns, Row, TableFormatter,
NdarrayMixin, TableReplaceWarning, TableAttribute,
PprintIncludeExclude) # noqa: E402
from .operations import (join, setdiff, hstack, dstack, vstack, unique, # noqa: E402
TableMergeError, join_skycoord, join_distance) # noqa: E402
from .bst import BST # noqa: E402
from .sorted_array import SortedArray # noqa: E402
from .soco import SCEngine # noqa: E402
from .serialize import SerializedColumn, represent_mixins_as_columns # noqa: E402
# Finally import the formats for the read and write method but delay building
# the documentation until all are loaded. (#5275)
from astropy.io import registry # noqa: E402
with registry.delay_doc_updates(Table):
# Import routines that connect readers/writers to astropy.table
from .jsviewer import JSViewer
import astropy.io.ascii.connect
import astropy.io.fits.connect
import astropy.io.misc.connect
import astropy.io.votable.connect
import astropy.io.misc.asdf.connect
import astropy.io.misc.pandas.connect # noqa: F401
| bsd-3-clause |
fqhuy/minimind | prototypes/test_image_upsampling.py | 1 | 1261 | import numpy as np
import scipy as sp
from scipy.stats import multivariate_normal
from matplotlib import pyplot as plt
def k(x, a = -0.75):
if abs(x) <= 1:
return (a + 2.) * abs(x)**3 - (a + 3.) * abs(x) ** 2. + 1
elif 1 < abs(x) <= 2:
return a * abs(x) ** 3. - 5. * a * abs(x) **2. + 8. * a * abs(x) - 4. * a
else:
return 0.0
# x, y = np.mgrid[-1:1:.01, -1:1:.01]
# pos = np.empty(x.shape + (2,))
# pos[:, :, 0] = x; pos[:, :, 1] = y
# rv = multivariate_normal([0.0, 0.0], [[2.0, 0.3], [0.3, 2.0]])
# plt.contourf(x, y, rv.pdf(pos))
A = np.random.rand(5, 5)
B = A.copy()
# A = np.kron(A, np.ones((2, 2), dtype=float))
alpha = 5
C = np.zeros((A.shape[0] * alpha, A.shape[1] * alpha), dtype=float)
for i in range(A.shape[0]):
for j in range(A.shape[1]):
C[i * alpha, j * alpha] = A[i, j]
# K = np.array([k(xx) for xx in [-2, -1.5, -1, -0.5, 0, 0.5, 1, 1.5, 2]])
K = np.array([k(xx) for xx in np.linspace(-2, 2, 11)])
for r in range(C.shape[0]):
C[r, :] = np.convolve(C[r], K, 'same')
for c in range(A.shape[1]):
C[:, c] = np.convolve(C[:, c].flatten(), K, 'same')
plt.subplot(121)
plt.imshow(C, interpolation='bicubic')
plt.subplot(122)
plt.imshow(C, interpolation='nearest')
plt.show() | mit |
ywcui1990/htmresearch | projects/capybara/datasets/SyntheticData/generate_synthetic_data.py | 9 | 5156 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Generate synthetic sequences using a pool of sequence motifs
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from sklearn import decomposition
import os
plt.figure()
# Generate a set of sequence motifs
def generateSequenceMotifs(numMotif, motifLength, seed=None):
if seed is not None:
np.random.seed(seed)
sequenceMotifs = np.random.randn(motifLength, motifLength)
pca = decomposition.PCA(n_components=numMotif)
pca.fit(sequenceMotifs)
sequenceMotifs = pca.components_
for i in range(numMotif):
sequenceMotifs[i, :] = sequenceMotifs[i, :]-min(sequenceMotifs[i, :])
sequenceMotifs[i, :] = sequenceMotifs[i, :]/max(sequenceMotifs[i, :])
return sequenceMotifs
def generateSequence(sequenceLength, useMotif, currentClass, sequenceMotifs):
motifLength = sequenceMotifs.shape[1]
sequence = np.zeros((sequenceLength + 20,))
motifState = np.zeros((sequenceLength + 20,))
randomLengthList = np.linspace(1, 10, 10).astype('int')
t = 0
while t < sequenceLength:
randomLength = np.random.choice(randomLengthList)
sequence[t:t + randomLength] = np.random.rand(randomLength)
motifState[t:t + randomLength] = -1
t += randomLength
motifIdx = np.random.choice(useMotif[currentClass])
sequence[t:t + motifLength] = sequenceMotifs[motifIdx]
motifState[t:t + motifLength] = motifIdx
t += motifLength
sequence = sequence[:sequenceLength]
motifState = motifState[:sequenceLength]
return sequence, motifState
def generateSequences(numSeq, numClass, sequenceLength, useMotif, sequenceMotifs):
trainData = np.zeros((numSeq, sequenceLength+1))
numSeqPerClass = numSeq/numClass
classList = []
for classIdx in range(numClass):
classList += [classIdx] * numSeqPerClass
for seq in range(numSeq):
currentClass = classList[seq]
sequence, motifState = generateSequence(sequenceLength, useMotif,
currentClass, sequenceMotifs)
trainData[seq, 0] = currentClass
trainData[seq, 1:] = sequence
return trainData
numMotif = 5
motifLength = 5
sequenceMotifs = generateSequenceMotifs(numMotif, 5, seed=42)
numTrain = 100
numTest = 100
numClass = 2
motifPerClass = 2
np.random.seed(2)
useMotif = {}
motifList = set(range(numMotif))
for classIdx in range(numClass):
useMotifForClass = []
for _ in range(motifPerClass):
useMotifForClass.append(np.random.choice(list(motifList)))
motifList.remove(useMotifForClass[-1])
useMotif[classIdx] = useMotifForClass
sequenceLength = 100
currentClass = 0
sequence, motifState = generateSequence(sequenceLength, useMotif,
currentClass, sequenceMotifs)
MotifColor = {}
colorList = ['r','g','b','c','m','y']
i = 0
for c in useMotif.keys():
for v in useMotif[c]:
MotifColor[v] = colorList[i]
i += 1
fig, ax = plt.subplots(nrows=4, ncols=1, figsize=(20, 3 * 4))
for plti in xrange(4):
currentClass = [0 if plti < 2 else 1][0]
sequence, motifState = generateSequence(sequenceLength, useMotif,
currentClass, sequenceMotifs)
ax[plti].plot(sequence, 'k-')
startPatch = False
for t in range(len(motifState)):
if motifState[t] >= 0 and startPatch is False:
startPatchAt = t
startPatch = True
currentMotif = motifState[t]
if startPatch and (motifState[t] < 0):
endPatchAt = t-1
ax[plti].add_patch(
patches.Rectangle(
(startPatchAt, 0),
endPatchAt-startPatchAt, 1, alpha=0.5,
color=MotifColor[currentMotif]
)
)
startPatch = False
ax[plti].set_xlim([0, 100])
ax[plti].set_ylabel('class {}'.format(currentClass))
outputDir = 'Test1'
if not os.path.exists(outputDir):
os.makedirs(outputDir)
trainData = generateSequences(
numTrain, numClass, sequenceLength, useMotif, sequenceMotifs)
testData = generateSequences(
numTest, numClass, sequenceLength, useMotif, sequenceMotifs)
np.savetxt('Test1/Test1_TRAIN', trainData, delimiter=',', fmt='%.10f')
np.savetxt('Test1/Test1_TEST', testData, delimiter=',', fmt='%.10f')
plt.savefig('Test1/Test1.png')
| agpl-3.0 |
spxtr/contrib | mungegithub/issue-labeler/simple_app.py | 20 | 4662 | #!/usr/bin/env python
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import simplejson
import logging
from logging.handlers import RotatingFileHandler
import numpy as np
from flask import Flask, request
from sklearn.feature_extraction import FeatureHasher
from sklearn.externals import joblib
from sklearn.linear_model import SGDClassifier
from nltk.tokenize import RegexpTokenizer
from nltk.stem.porter import PorterStemmer
app = Flask(__name__)
#Parameters
team_fn= "./models/trained_teams_model.pkl"
component_fn= "./models/trained_components_model.pkl"
logFile = "/tmp/issue-labeler.log"
logSize = 1024*1024*100
numFeatures = 262144
myLoss = 'hinge'
myAlpha = .1
myPenalty = 'l2'
myHasher = FeatureHasher(input_type="string", n_features= numFeatures, non_negative=True)
myStemmer = PorterStemmer()
tokenizer = RegexpTokenizer(r'\w+')
try:
if not stopwords:
stop_fn = "./stopwords.txt"
with open(stop_fn, 'r') as f:
stopwords = set([word.strip() for word in f])
except:
#don't remove any stopwords
stopwords = []
@app.errorhandler(500)
def internal_error(exception):
return str(exception), 500
@app.route("/", methods = ["POST"])
def get_labels():
"""
The request should contain 2 form-urlencoded parameters
1) title : title of the issue
2) body: body of the issue
It returns a team/<label> and a component/<label>
"""
title = request.form.get('title', "")
body = request.form.get('body', "")
tokens = tokenize_stem_stop(" ".join([title, body]))
team_mod = joblib.load(team_fn)
comp_mod = joblib.load(component_fn)
vec = myHasher.transform([tokens])
tlabel = team_mod.predict(vec)[0]
clabel = comp_mod.predict(vec)[0]
return ",".join([tlabel, clabel])
def tokenize_stem_stop(inputString):
inputString = inputString.encode('utf-8')
curTitleBody = tokenizer.tokenize(inputString.decode('utf-8').lower())
return map(myStemmer.stem, filter(lambda x: x not in stopwords, curTitleBody))
@app.route("/update_models", methods = ["PUT"])
def update_model():
"""
data should contain three fields
titles: list of titles
bodies: list of bodies
labels: list of list of labels
"""
data = request.json
titles = data.get('titles')
bodies = data.get('bodies')
labels = data.get('labels')
tTokens = []
cTokens = []
team_labels = []
component_labels = []
for (title, body, label_list) in zip(titles, bodies, labels):
tLabel = filter(lambda x: x.startswith('team'), label_list)
cLabel = filter(lambda x: x.startswith('component'), label_list)
tokens = tokenize_stem_stop(" ".join([title, body]))
if tLabel:
team_labels += tLabel
tTokens += [tokens]
if cLabel:
component_labels += cLabel
cTokens += [tokens]
tVec = myHasher.transform(tTokens)
cVec = myHasher.transform(cTokens)
if team_labels:
if os.path.isfile(team_fn):
team_model = joblib.load(team_fn)
team_model.partial_fit(tVec, np.array(team_labels))
else:
#no team model stored so build a new one
team_model = SGDClassifier(loss=myLoss, penalty=myPenalty, alpha=myAlpha)
team_model.fit(tVec, np.array(team_labels))
if component_labels:
if os.path.isfile(component_fn):
component_model = joblib.load(component_fn)
component_model.partial_fit(cVec, np.array(component_labels))
else:
#no comp model stored so build a new one
component_model = SGDClassifier(loss=myLoss, penalty=myPenalty, alpha=myAlpha)
component_model.fit(cVec, np.array(component_labels))
joblib.dump(team_model, team_fn)
joblib.dump(component_model, component_fn)
return ""
def configure_logger():
FORMAT = '%(asctime)-20s %(levelname)-10s %(message)s'
file_handler = RotatingFileHandler(logFile, maxBytes=logSize, backupCount=3)
formatter = logging.Formatter(FORMAT)
file_handler.setFormatter(formatter)
app.logger.addHandler(file_handler)
if __name__ == "__main__":
configure_logger()
app.run(host="0.0.0.0")
| apache-2.0 |
jlorieau/mollib | analysis/datasets/ramachandran.py | 1 | 8178 | """Analysis of the Ramachandran datasets.
"""
import tarfile
import json
from itertools import izip_longest as zip_longest
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from scipy import ndimage
# Plot parameters
figsize = (7, 1.5) # size of each subplot in inches
title_fontsize = 9 # size of the font for subplottitles
title_font = 'Arial'
axes_fontsize = 9 # font size of the axes
axes_font = 'Arial'
label_fontsize = 8 # font size of the tick labels
annotation_fontsize = 7
matplotlib.rc('text', usetex=False)
matplotlib.rc('font', family='sans-serif')
matplotlib.rc('axes', linewidth=0.5)
for i in ('xtick', 'ytick'):
matplotlib.rcParams[i + '.major.size'] = 2.0
matplotlib.rcParams[i + '.major.width'] = 0.75
matplotlib.rcParams[i + '.minor.size'] = 1.0
matplotlib.rcParams[i + '.minor.width'] = 0.5
# import the dataset
filename = '../../mollib/data/ramachandranstatistics/measurements.tar'
tfile = tarfile.open(name=filename, mode='r')
measurement_dict = {}
with tfile:
# Extract the files
for member in tfile.getmembers():
f= tfile.extractfile(member)
try:
identifier = member.name.strip('.json')
string = f.read().decode()
return_dict = json.loads(string)
measurement_dict[identifier] = return_dict
except KeyError:
continue
finally:
f.close()
# Import the phi/psi angles
results = {}
for identifier, return_dict in measurement_dict.items():
for classification, phi_psi_list in return_dict.items():
l = results.setdefault(classification, list())
phi_psi_list = [(i,j) for i,j in phi_psi_list if isinstance(i, float)
and isinstance(j, float)]
l.extend(phi_psi_list)
# Create and Overall dataset
phi_list = []
psi_list = []
for classification, phi_psi in results.items():
phi, psi = zip(*phi_psi)
phi_list += phi
psi_list += psi
results['Overall'] = zip(phi_list, psi_list)
# Prepare the plots
# Define the plots. These are organized by keys in results and the
# corresponding color map.
# The first item is the dataset name, the second item is the color map, and the
# third item is the position of the text label for the number of items used to
# calculate the density map
labels = (('Overall', plt.cm.plasma, (0.95, 0.01)),
('alpha-helix', plt.cm.Greens_r, (0.95, 0.01)),
('alpha-helix__N-term', plt.cm.Greens_r, (0.95, 0.01)),
('alpha-helix__C-term', plt.cm.Greens_r, (0.95, 0.01)),
('310-helix', plt.cm.Greens_r, (0.95, 0.01)),
('pi-helix', plt.cm.Greens_r, (0.95, 0.01)),
('sheet', plt.cm.Blues_r, (0.95, 0.01)),
('sheet__N-term', plt.cm.Blues_r, (0.95, 0.01)),
('sheet__C-term', plt.cm.Blues_r, (0.95, 0.01)),
('type I turn', plt.cm.Reds_r, (0.95, 0.01)),
('type II turn', plt.cm.Reds_r, (0.95, 0.01)),
("type I' turn", plt.cm.Reds_r, (0.95, 0.01)),
("type II' turn", plt.cm.Reds_r, (0.95, 0.89)),
('Gly', plt.cm.Greys_r, (0.95, 0.70)),
('No classification', plt.cm.Greys_r, (0.95, 0.01)),
)
def grouper(n, iterable, fillvalue=None):
"grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx"
args = [iter(iterable)] * n
return zip_longest(*args, fillvalue=fillvalue)
subfigure_groups = list(grouper(4, labels))
# Make rows of plots
for row, subfigure_labels in enumerate(subfigure_groups, 1):
f, axarr = plt.subplots(nrows=1, ncols=len(subfigure_labels), sharey=True,
figsize=figsize)
# Set the shared y-axis label
axarr[0].set_ylabel(r'$\psi$ (deg)', fontsize=axes_fontsize,
fontname=axes_font)
for count, values in enumerate(subfigure_labels):
# Skip empty plots
if values is None:
axarr[count].axis('off')
# axarr[count].set_visible(False)
continue
label, cmap, annot_xy = values
# Prepare the data and create 2d histograms
phi_psi = results[label]
x, y = zip(*phi_psi)
x = np.array(x)
y = np.array(y)
N = len(x)
hist2d, x, y = np.histogram2d(y, x, bins=36, range=np.array(
[(-180., 190.), (-180., 190.)]))
hist2d = -1. * np.log(hist2d + 0.1)
minimum = np.min(hist2d)
hist2d -= minimum
levels = np.arange(0.0, 5.1, 1.0)
# Set the title and x-axis label
title = (label.replace('__', ' ')
.replace('alpha', "$\\alpha$")
.replace('pi', "$\\pi$")
.replace("\\'", "'")
.replace('No', 'no'))
axarr[count].set_title(title,
size=title_fontsize,
fontname=title_font)
# Set the x-axis label
axarr[count].set_xlabel(r'$\phi$ (deg)', fontsize=axes_fontsize,
fontname=axes_font)
# Set the axis tick spacing
axarr[count].xaxis.set_ticks(np.arange(-180, 181, 90))
axarr[count].set_xlim(-180, 180)
axarr[count].yaxis.set_ticks(np.arange(-180, 181, 90))
axarr[count].set_ylim(-180, 180)
# Set the axis tick label size
axarr[count].tick_params(labelsize=label_fontsize)
# Set the axis tick label font
labels = axarr[count].get_xticklabels() + axarr[count].get_yticklabels()
for label in labels:
label.set_fontname(axes_font)
# Annotate the number of measurements on the plot
if annot_xy is not None:
axarr[count].text(annot_xy[0], annot_xy[1],
"N={:,.0f}".format(N),
verticalalignment='bottom',
horizontalalignment='right',
transform=axarr[count].transAxes,
fontsize=annotation_fontsize)
# Create the 2d contour plot
axarr[count].contourf(x[:-1], y[:-1], hist2d, levels, cmap=cmap)
# Save the figures
plt.savefig('ramachandran/ramachandran_countour_{}.png'.format(row),
format='PNG', dpi=1200, bbox_inches='tight', pad_inches=0.05)
plt.savefig('ramachandran/ramachandran_countour_{}_lowres.png'.format(row),
format='PNG', dpi=220, bbox_inches='tight', pad_inches=0.02)
plt.savefig('ramachandran/ramachandran_countour_{}.svg'.format(row),
format='SVG', bbox_inches='tight', pad_inches=0.05)
# Prepare an overall contour plot with lines
phi_psi = results['Overall']
x, y = zip(*phi_psi)
x = np.array(x)
y = np.array(y)
N = len(x)
# Convert to a histogram
hist2d, x, y = np.histogram2d(y, x, bins=36, range=np.array(
[(-180., 185.), (-180., 185.)]))
hist2d = -1. * np.log(hist2d + 0.1)
minimum = np.min(hist2d)
hist2d -= minimum
# Optionally smooth the data
# hist2d = ndimage.gaussian_filter(hist2d, sigma=0.25, order=0)
# Contour levels at 98% (4.0) and 99.8%
levels = np.array([4.1, 6.1])
f, axarr = plt.subplots(nrows=1, ncols=1, sharey=True,
figsize=(4,4))
# Set the x-axis and y-axis labels
axarr.set_xlabel(r'$\phi$ (deg)', fontsize=axes_fontsize,
fontname=axes_font)
axarr.set_ylabel(r'$\psi$ (deg)', fontsize=axes_fontsize,
fontname=axes_font)
# Set the axis tick spacing
axarr.xaxis.set_ticks(np.arange(-180, 181, 90))
axarr.set_xlim(-180, 180)
axarr.yaxis.set_ticks(np.arange(-180, 181, 90))
axarr.set_ylim(-180, 180)
# Set the axis tick label size
axarr.tick_params(labelsize=label_fontsize)
# Outer ticks
axarr.get_yaxis().set_tick_params(direction='out')
axarr.get_xaxis().set_tick_params(direction='out')
# Create the 2d contour plot
axarr.contour(x[:-1], y[:-1], hist2d, levels, cmap=plt.cm.Blues_r)
plt.savefig('ramachandran/ramachandran_line_countour_overall_lowres.png',
format='PNG', dpi=220, bbox_inches='tight', pad_inches=0.02)
plt.savefig('ramachandran/ramachandran_line_countour_overall.svg',
format='SVG', bbox_inches='tight', pad_inches=0.05)
| gpl-3.0 |
aflaxman/mpld3 | examples/linked_brush.py | 21 | 1136 | """
Linked Brushing Example
=======================
This example uses the standard Iris dataset and plots it with a linked brushing
tool for dynamically exploring the data. The paintbrush button at the bottom
left can be used to enable and disable the behavior.
"""
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
import mpld3
from mpld3 import plugins, utils
data = load_iris()
X = data.data
y = data.target
# dither the data for clearer plotting
X += 0.1 * np.random.random(X.shape)
fig, ax = plt.subplots(4, 4, sharex="col", sharey="row", figsize=(8, 8))
fig.subplots_adjust(left=0.05, right=0.95, bottom=0.05, top=0.95,
hspace=0.1, wspace=0.1)
for i in range(4):
for j in range(4):
points = ax[3 - i, j].scatter(X[:, j], X[:, i],
c=y, s=40, alpha=0.6)
# remove tick labels
for axi in ax.flat:
for axis in [axi.xaxis, axi.yaxis]:
axis.set_major_formatter(plt.NullFormatter())
# Here we connect the linked brush plugin
plugins.connect(fig, plugins.LinkedBrush(points))
mpld3.show()
| bsd-3-clause |
ryfeus/lambda-packs | Sklearn_scipy_numpy/source/sklearn/datasets/samples_generator.py | 20 | 56502 | """
Generate samples of synthetic data sets.
"""
# Authors: B. Thirion, G. Varoquaux, A. Gramfort, V. Michel, O. Grisel,
# G. Louppe, J. Nothman
# License: BSD 3 clause
import numbers
import array
import numpy as np
from scipy import linalg
import scipy.sparse as sp
from ..preprocessing import MultiLabelBinarizer
from ..utils import check_array, check_random_state
from ..utils import shuffle as util_shuffle
from ..utils.fixes import astype
from ..utils.random import sample_without_replacement
from ..externals import six
map = six.moves.map
zip = six.moves.zip
def _generate_hypercube(samples, dimensions, rng):
"""Returns distinct binary samples of length dimensions
"""
if dimensions > 30:
return np.hstack([_generate_hypercube(samples, dimensions - 30, rng),
_generate_hypercube(samples, 30, rng)])
out = astype(sample_without_replacement(2 ** dimensions, samples,
random_state=rng),
dtype='>u4', copy=False)
out = np.unpackbits(out.view('>u1')).reshape((-1, 32))[:, -dimensions:]
return out
def make_classification(n_samples=100, n_features=20, n_informative=2,
n_redundant=2, n_repeated=0, n_classes=2,
n_clusters_per_class=2, weights=None, flip_y=0.01,
class_sep=1.0, hypercube=True, shift=0.0, scale=1.0,
shuffle=True, random_state=None):
"""Generate a random n-class classification problem.
This initially creates clusters of points normally distributed (std=1)
about vertices of a `2 * class_sep`-sided hypercube, and assigns an equal
number of clusters to each class. It introduces interdependence between
these features and adds various types of further noise to the data.
Prior to shuffling, `X` stacks a number of these primary "informative"
features, "redundant" linear combinations of these, "repeated" duplicates
of sampled features, and arbitrary noise for and remaining features.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features. These comprise `n_informative`
informative features, `n_redundant` redundant features, `n_repeated`
duplicated features and `n_features-n_informative-n_redundant-
n_repeated` useless features drawn at random.
n_informative : int, optional (default=2)
The number of informative features. Each class is composed of a number
of gaussian clusters each located around the vertices of a hypercube
in a subspace of dimension `n_informative`. For each cluster,
informative features are drawn independently from N(0, 1) and then
randomly linearly combined within each cluster in order to add
covariance. The clusters are then placed on the vertices of the
hypercube.
n_redundant : int, optional (default=2)
The number of redundant features. These features are generated as
random linear combinations of the informative features.
n_repeated : int, optional (default=0)
The number of duplicated features, drawn randomly from the informative
and the redundant features.
n_classes : int, optional (default=2)
The number of classes (or labels) of the classification problem.
n_clusters_per_class : int, optional (default=2)
The number of clusters per class.
weights : list of floats or None (default=None)
The proportions of samples assigned to each class. If None, then
classes are balanced. Note that if `len(weights) == n_classes - 1`,
then the last class weight is automatically inferred.
More than `n_samples` samples may be returned if the sum of `weights`
exceeds 1.
flip_y : float, optional (default=0.01)
The fraction of samples whose class are randomly exchanged.
class_sep : float, optional (default=1.0)
The factor multiplying the hypercube dimension.
hypercube : boolean, optional (default=True)
If True, the clusters are put on the vertices of a hypercube. If
False, the clusters are put on the vertices of a random polytope.
shift : float, array of shape [n_features] or None, optional (default=0.0)
Shift features by the specified value. If None, then features
are shifted by a random value drawn in [-class_sep, class_sep].
scale : float, array of shape [n_features] or None, optional (default=1.0)
Multiply features by the specified value. If None, then features
are scaled by a random value drawn in [1, 100]. Note that scaling
happens after shifting.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for class membership of each sample.
Notes
-----
The algorithm is adapted from Guyon [1] and was designed to generate
the "Madelon" dataset.
References
----------
.. [1] I. Guyon, "Design of experiments for the NIPS 2003 variable
selection benchmark", 2003.
See also
--------
make_blobs: simplified variant
make_multilabel_classification: unrelated generator for multilabel tasks
"""
generator = check_random_state(random_state)
# Count features, clusters and samples
if n_informative + n_redundant + n_repeated > n_features:
raise ValueError("Number of informative, redundant and repeated "
"features must sum to less than the number of total"
" features")
if 2 ** n_informative < n_classes * n_clusters_per_class:
raise ValueError("n_classes * n_clusters_per_class must"
" be smaller or equal 2 ** n_informative")
if weights and len(weights) not in [n_classes, n_classes - 1]:
raise ValueError("Weights specified but incompatible with number "
"of classes.")
n_useless = n_features - n_informative - n_redundant - n_repeated
n_clusters = n_classes * n_clusters_per_class
if weights and len(weights) == (n_classes - 1):
weights.append(1.0 - sum(weights))
if weights is None:
weights = [1.0 / n_classes] * n_classes
weights[-1] = 1.0 - sum(weights[:-1])
# Distribute samples among clusters by weight
n_samples_per_cluster = []
for k in range(n_clusters):
n_samples_per_cluster.append(int(n_samples * weights[k % n_classes]
/ n_clusters_per_class))
for i in range(n_samples - sum(n_samples_per_cluster)):
n_samples_per_cluster[i % n_clusters] += 1
# Intialize X and y
X = np.zeros((n_samples, n_features))
y = np.zeros(n_samples, dtype=np.int)
# Build the polytope whose vertices become cluster centroids
centroids = _generate_hypercube(n_clusters, n_informative,
generator).astype(float)
centroids *= 2 * class_sep
centroids -= class_sep
if not hypercube:
centroids *= generator.rand(n_clusters, 1)
centroids *= generator.rand(1, n_informative)
# Initially draw informative features from the standard normal
X[:, :n_informative] = generator.randn(n_samples, n_informative)
# Create each cluster; a variant of make_blobs
stop = 0
for k, centroid in enumerate(centroids):
start, stop = stop, stop + n_samples_per_cluster[k]
y[start:stop] = k % n_classes # assign labels
X_k = X[start:stop, :n_informative] # slice a view of the cluster
A = 2 * generator.rand(n_informative, n_informative) - 1
X_k[...] = np.dot(X_k, A) # introduce random covariance
X_k += centroid # shift the cluster to a vertex
# Create redundant features
if n_redundant > 0:
B = 2 * generator.rand(n_informative, n_redundant) - 1
X[:, n_informative:n_informative + n_redundant] = \
np.dot(X[:, :n_informative], B)
# Repeat some features
if n_repeated > 0:
n = n_informative + n_redundant
indices = ((n - 1) * generator.rand(n_repeated) + 0.5).astype(np.intp)
X[:, n:n + n_repeated] = X[:, indices]
# Fill useless features
if n_useless > 0:
X[:, -n_useless:] = generator.randn(n_samples, n_useless)
# Randomly replace labels
if flip_y >= 0.0:
flip_mask = generator.rand(n_samples) < flip_y
y[flip_mask] = generator.randint(n_classes, size=flip_mask.sum())
# Randomly shift and scale
if shift is None:
shift = (2 * generator.rand(n_features) - 1) * class_sep
X += shift
if scale is None:
scale = 1 + 100 * generator.rand(n_features)
X *= scale
if shuffle:
# Randomly permute samples
X, y = util_shuffle(X, y, random_state=generator)
# Randomly permute features
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
return X, y
def make_multilabel_classification(n_samples=100, n_features=20, n_classes=5,
n_labels=2, length=50, allow_unlabeled=True,
sparse=False, return_indicator='dense',
return_distributions=False,
random_state=None):
"""Generate a random multilabel classification problem.
For each sample, the generative process is:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that
n is never zero or more than `n_classes`, and that the document length
is never zero. Likewise, we reject classes which have already been chosen.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features.
n_classes : int, optional (default=5)
The number of classes of the classification problem.
n_labels : int, optional (default=2)
The average number of labels per instance. More precisely, the number
of labels per sample is drawn from a Poisson distribution with
``n_labels`` as its expected value, but samples are bounded (using
rejection sampling) by ``n_classes``, and must be nonzero if
``allow_unlabeled`` is False.
length : int, optional (default=50)
The sum of the features (number of words if documents) is drawn from
a Poisson distribution with this expected value.
allow_unlabeled : bool, optional (default=True)
If ``True``, some instances might not belong to any class.
sparse : bool, optional (default=False)
If ``True``, return a sparse feature matrix
.. versionadded:: 0.17
parameter to allow *sparse* output.
return_indicator : 'dense' (default) | 'sparse' | False
If ``dense`` return ``Y`` in the dense binary indicator format. If
``'sparse'`` return ``Y`` in the sparse binary indicator format.
``False`` returns a list of lists of labels.
return_distributions : bool, optional (default=False)
If ``True``, return the prior class probability and conditional
probabilities of features given classes, from which the data was
drawn.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
Y : array or sparse CSR matrix of shape [n_samples, n_classes]
The label sets.
p_c : array, shape [n_classes]
The probability of each class being drawn. Only returned if
``return_distributions=True``.
p_w_c : array, shape [n_features, n_classes]
The probability of each feature being drawn given each class.
Only returned if ``return_distributions=True``.
"""
generator = check_random_state(random_state)
p_c = generator.rand(n_classes)
p_c /= p_c.sum()
cumulative_p_c = np.cumsum(p_c)
p_w_c = generator.rand(n_features, n_classes)
p_w_c /= np.sum(p_w_c, axis=0)
def sample_example():
_, n_classes = p_w_c.shape
# pick a nonzero number of labels per document by rejection sampling
y_size = n_classes + 1
while (not allow_unlabeled and y_size == 0) or y_size > n_classes:
y_size = generator.poisson(n_labels)
# pick n classes
y = set()
while len(y) != y_size:
# pick a class with probability P(c)
c = np.searchsorted(cumulative_p_c,
generator.rand(y_size - len(y)))
y.update(c)
y = list(y)
# pick a non-zero document length by rejection sampling
n_words = 0
while n_words == 0:
n_words = generator.poisson(length)
# generate a document of length n_words
if len(y) == 0:
# if sample does not belong to any class, generate noise word
words = generator.randint(n_features, size=n_words)
return words, y
# sample words with replacement from selected classes
cumulative_p_w_sample = p_w_c.take(y, axis=1).sum(axis=1).cumsum()
cumulative_p_w_sample /= cumulative_p_w_sample[-1]
words = np.searchsorted(cumulative_p_w_sample, generator.rand(n_words))
return words, y
X_indices = array.array('i')
X_indptr = array.array('i', [0])
Y = []
for i in range(n_samples):
words, y = sample_example()
X_indices.extend(words)
X_indptr.append(len(X_indices))
Y.append(y)
X_data = np.ones(len(X_indices), dtype=np.float64)
X = sp.csr_matrix((X_data, X_indices, X_indptr),
shape=(n_samples, n_features))
X.sum_duplicates()
if not sparse:
X = X.toarray()
# return_indicator can be True due to backward compatibility
if return_indicator in (True, 'sparse', 'dense'):
lb = MultiLabelBinarizer(sparse_output=(return_indicator == 'sparse'))
Y = lb.fit([range(n_classes)]).transform(Y)
elif return_indicator is not False:
raise ValueError("return_indicator must be either 'sparse', 'dense' "
'or False.')
if return_distributions:
return X, Y, p_c, p_w_c
return X, Y
def make_hastie_10_2(n_samples=12000, random_state=None):
"""Generates data for binary classification used in
Hastie et al. 2009, Example 10.2.
The ten features are standard independent Gaussian and
the target ``y`` is defined by::
y[i] = 1 if np.sum(X[i] ** 2) > 9.34 else -1
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=12000)
The number of samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 10]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
See also
--------
make_gaussian_quantiles: a generalization of this dataset approach
"""
rs = check_random_state(random_state)
shape = (n_samples, 10)
X = rs.normal(size=shape).reshape(shape)
y = ((X ** 2.0).sum(axis=1) > 9.34).astype(np.float64)
y[y == 0.0] = -1.0
return X, y
def make_regression(n_samples=100, n_features=100, n_informative=10,
n_targets=1, bias=0.0, effective_rank=None,
tail_strength=0.5, noise=0.0, shuffle=True, coef=False,
random_state=None):
"""Generate a random regression problem.
The input set can either be well conditioned (by default) or have a low
rank-fat tail singular profile. See :func:`make_low_rank_matrix` for
more details.
The output is generated by applying a (potentially biased) random linear
regression model with `n_informative` nonzero regressors to the previously
generated input and some gaussian centered noise with some adjustable
scale.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
n_informative : int, optional (default=10)
The number of informative features, i.e., the number of features used
to build the linear model used to generate the output.
n_targets : int, optional (default=1)
The number of regression targets, i.e., the dimension of the y output
vector associated with a sample. By default, the output is a scalar.
bias : float, optional (default=0.0)
The bias term in the underlying linear model.
effective_rank : int or None, optional (default=None)
if not None:
The approximate number of singular vectors required to explain most
of the input data by linear combinations. Using this kind of
singular spectrum in the input allows the generator to reproduce
the correlations often observed in practice.
if None:
The input set is well conditioned, centered and gaussian with
unit variance.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile if `effective_rank` is not None.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
coef : boolean, optional (default=False)
If True, the coefficients of the underlying linear model are returned.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples] or [n_samples, n_targets]
The output values.
coef : array of shape [n_features] or [n_features, n_targets], optional
The coefficient of the underlying linear model. It is returned only if
coef is True.
"""
n_informative = min(n_features, n_informative)
generator = check_random_state(random_state)
if effective_rank is None:
# Randomly generate a well conditioned input set
X = generator.randn(n_samples, n_features)
else:
# Randomly generate a low rank, fat tail input set
X = make_low_rank_matrix(n_samples=n_samples,
n_features=n_features,
effective_rank=effective_rank,
tail_strength=tail_strength,
random_state=generator)
# Generate a ground truth model with only n_informative features being non
# zeros (the other features are not correlated to y and should be ignored
# by a sparsifying regularizers such as L1 or elastic net)
ground_truth = np.zeros((n_features, n_targets))
ground_truth[:n_informative, :] = 100 * generator.rand(n_informative,
n_targets)
y = np.dot(X, ground_truth) + bias
# Add noise
if noise > 0.0:
y += generator.normal(scale=noise, size=y.shape)
# Randomly permute samples and features
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
ground_truth = ground_truth[indices]
y = np.squeeze(y)
if coef:
return X, y, np.squeeze(ground_truth)
else:
return X, y
def make_circles(n_samples=100, shuffle=True, noise=None, random_state=None,
factor=.8):
"""Make a large circle containing a smaller circle in 2d.
A simple toy dataset to visualize clustering and classification
algorithms.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle: bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
factor : double < 1 (default=.8)
Scale factor between inner and outer circle.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels (0 or 1) for class membership of each sample.
"""
if factor > 1 or factor < 0:
raise ValueError("'factor' has to be between 0 and 1.")
generator = check_random_state(random_state)
# so as not to have the first point = last point, we add one and then
# remove it.
linspace = np.linspace(0, 2 * np.pi, n_samples // 2 + 1)[:-1]
outer_circ_x = np.cos(linspace)
outer_circ_y = np.sin(linspace)
inner_circ_x = outer_circ_x * factor
inner_circ_y = outer_circ_y * factor
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples // 2, dtype=np.intp),
np.ones(n_samples // 2, dtype=np.intp)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if noise is not None:
X += generator.normal(scale=noise, size=X.shape)
return X, y
def make_moons(n_samples=100, shuffle=True, noise=None, random_state=None):
"""Make two interleaving half circles
A simple toy dataset to visualize clustering and classification
algorithms.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle : bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
Read more in the :ref:`User Guide <sample_generators>`.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels (0 or 1) for class membership of each sample.
"""
n_samples_out = n_samples // 2
n_samples_in = n_samples - n_samples_out
generator = check_random_state(random_state)
outer_circ_x = np.cos(np.linspace(0, np.pi, n_samples_out))
outer_circ_y = np.sin(np.linspace(0, np.pi, n_samples_out))
inner_circ_x = 1 - np.cos(np.linspace(0, np.pi, n_samples_in))
inner_circ_y = 1 - np.sin(np.linspace(0, np.pi, n_samples_in)) - .5
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples_in, dtype=np.intp),
np.ones(n_samples_out, dtype=np.intp)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if noise is not None:
X += generator.normal(scale=noise, size=X.shape)
return X, y
def make_blobs(n_samples=100, n_features=2, centers=3, cluster_std=1.0,
center_box=(-10.0, 10.0), shuffle=True, random_state=None):
"""Generate isotropic Gaussian blobs for clustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points equally divided among clusters.
n_features : int, optional (default=2)
The number of features for each sample.
centers : int or array of shape [n_centers, n_features], optional
(default=3)
The number of centers to generate, or the fixed center locations.
cluster_std: float or sequence of floats, optional (default=1.0)
The standard deviation of the clusters.
center_box: pair of floats (min, max), optional (default=(-10.0, 10.0))
The bounding box for each cluster center when centers are
generated at random.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for cluster membership of each sample.
Examples
--------
>>> from sklearn.datasets.samples_generator import make_blobs
>>> X, y = make_blobs(n_samples=10, centers=3, n_features=2,
... random_state=0)
>>> print(X.shape)
(10, 2)
>>> y
array([0, 0, 1, 0, 2, 2, 2, 1, 1, 0])
See also
--------
make_classification: a more intricate variant
"""
generator = check_random_state(random_state)
if isinstance(centers, numbers.Integral):
centers = generator.uniform(center_box[0], center_box[1],
size=(centers, n_features))
else:
centers = check_array(centers)
n_features = centers.shape[1]
if isinstance(cluster_std, numbers.Real):
cluster_std = np.ones(len(centers)) * cluster_std
X = []
y = []
n_centers = centers.shape[0]
n_samples_per_center = [int(n_samples // n_centers)] * n_centers
for i in range(n_samples % n_centers):
n_samples_per_center[i] += 1
for i, (n, std) in enumerate(zip(n_samples_per_center, cluster_std)):
X.append(centers[i] + generator.normal(scale=std,
size=(n, n_features)))
y += [i] * n
X = np.concatenate(X)
y = np.array(y)
if shuffle:
indices = np.arange(n_samples)
generator.shuffle(indices)
X = X[indices]
y = y[indices]
return X, y
def make_friedman1(n_samples=100, n_features=10, noise=0.0, random_state=None):
"""Generate the "Friedman \#1" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are independent features uniformly distributed on the interval
[0, 1]. The output `y` is created according to the formula::
y(X) = 10 * sin(pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * N(0, 1).
Out of the `n_features` features, only 5 are actually used to compute
`y`. The remaining features are independent of `y`.
The number of features has to be >= 5.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features. Should be at least 5.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
if n_features < 5:
raise ValueError("n_features must be at least five.")
generator = check_random_state(random_state)
X = generator.rand(n_samples, n_features)
y = 10 * np.sin(np.pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * generator.randn(n_samples)
return X, y
def make_friedman2(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman \#2" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = (X[:, 0] ** 2 + (X[:, 1] * X[:, 2] \
- 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 + noise * N(0, 1).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = (X[:, 0] ** 2
+ (X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 \
+ noise * generator.randn(n_samples)
return X, y
def make_friedman3(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman \#3" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) \
/ X[:, 0]) + noise * N(0, 1).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = np.arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) / X[:, 0]) \
+ noise * generator.randn(n_samples)
return X, y
def make_low_rank_matrix(n_samples=100, n_features=100, effective_rank=10,
tail_strength=0.5, random_state=None):
"""Generate a mostly low rank matrix with bell-shaped singular values
Most of the variance can be explained by a bell-shaped curve of width
effective_rank: the low rank part of the singular values profile is::
(1 - tail_strength) * exp(-1.0 * (i / effective_rank) ** 2)
The remaining singular values' tail is fat, decreasing as::
tail_strength * exp(-0.1 * i / effective_rank).
The low rank part of the profile can be considered the structured
signal part of the data while the tail can be considered the noisy
part of the data that cannot be summarized by a low number of linear
components (singular vectors).
This kind of singular profiles is often seen in practice, for instance:
- gray level pictures of faces
- TF-IDF vectors of text documents crawled from the web
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
effective_rank : int, optional (default=10)
The approximate number of singular vectors required to explain most of
the data by linear combinations.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The matrix.
"""
generator = check_random_state(random_state)
n = min(n_samples, n_features)
# Random (ortho normal) vectors
u, _ = linalg.qr(generator.randn(n_samples, n), mode='economic')
v, _ = linalg.qr(generator.randn(n_features, n), mode='economic')
# Index of the singular values
singular_ind = np.arange(n, dtype=np.float64)
# Build the singular profile by assembling signal and noise components
low_rank = ((1 - tail_strength) *
np.exp(-1.0 * (singular_ind / effective_rank) ** 2))
tail = tail_strength * np.exp(-0.1 * singular_ind / effective_rank)
s = np.identity(n) * (low_rank + tail)
return np.dot(np.dot(u, s), v.T)
def make_sparse_coded_signal(n_samples, n_components, n_features,
n_nonzero_coefs, random_state=None):
"""Generate a signal as a sparse combination of dictionary elements.
Returns a matrix Y = DX, such as D is (n_features, n_components),
X is (n_components, n_samples) and each column of X has exactly
n_nonzero_coefs non-zero elements.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int
number of samples to generate
n_components: int,
number of components in the dictionary
n_features : int
number of features of the dataset to generate
n_nonzero_coefs : int
number of active (non-zero) coefficients in each sample
random_state: int or RandomState instance, optional (default=None)
seed used by the pseudo random number generator
Returns
-------
data: array of shape [n_features, n_samples]
The encoded signal (Y).
dictionary: array of shape [n_features, n_components]
The dictionary with normalized components (D).
code: array of shape [n_components, n_samples]
The sparse code such that each column of this matrix has exactly
n_nonzero_coefs non-zero items (X).
"""
generator = check_random_state(random_state)
# generate dictionary
D = generator.randn(n_features, n_components)
D /= np.sqrt(np.sum((D ** 2), axis=0))
# generate code
X = np.zeros((n_components, n_samples))
for i in range(n_samples):
idx = np.arange(n_components)
generator.shuffle(idx)
idx = idx[:n_nonzero_coefs]
X[idx, i] = generator.randn(n_nonzero_coefs)
# encode signal
Y = np.dot(D, X)
return map(np.squeeze, (Y, D, X))
def make_sparse_uncorrelated(n_samples=100, n_features=10, random_state=None):
"""Generate a random regression problem with sparse uncorrelated design
This dataset is described in Celeux et al [1]. as::
X ~ N(0, 1)
y(X) = X[:, 0] + 2 * X[:, 1] - 2 * X[:, 2] - 1.5 * X[:, 3]
Only the first 4 features are informative. The remaining features are
useless.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] G. Celeux, M. El Anbari, J.-M. Marin, C. P. Robert,
"Regularization in regression: comparing Bayesian and frequentist
methods in a poorly informative situation", 2009.
"""
generator = check_random_state(random_state)
X = generator.normal(loc=0, scale=1, size=(n_samples, n_features))
y = generator.normal(loc=(X[:, 0] +
2 * X[:, 1] -
2 * X[:, 2] -
1.5 * X[:, 3]), scale=np.ones(n_samples))
return X, y
def make_spd_matrix(n_dim, random_state=None):
"""Generate a random symmetric, positive-definite matrix.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_dim : int
The matrix dimension.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_dim, n_dim]
The random symmetric, positive-definite matrix.
See also
--------
make_sparse_spd_matrix
"""
generator = check_random_state(random_state)
A = generator.rand(n_dim, n_dim)
U, s, V = linalg.svd(np.dot(A.T, A))
X = np.dot(np.dot(U, 1.0 + np.diag(generator.rand(n_dim))), V)
return X
def make_sparse_spd_matrix(dim=1, alpha=0.95, norm_diag=False,
smallest_coef=.1, largest_coef=.9,
random_state=None):
"""Generate a sparse symmetric definite positive matrix.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
dim: integer, optional (default=1)
The size of the random matrix to generate.
alpha: float between 0 and 1, optional (default=0.95)
The probability that a coefficient is non zero (see notes).
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
largest_coef : float between 0 and 1, optional (default=0.9)
The value of the largest coefficient.
smallest_coef : float between 0 and 1, optional (default=0.1)
The value of the smallest coefficient.
norm_diag : boolean, optional (default=False)
Whether to normalize the output matrix to make the leading diagonal
elements all 1
Returns
-------
prec : sparse matrix of shape (dim, dim)
The generated matrix.
Notes
-----
The sparsity is actually imposed on the cholesky factor of the matrix.
Thus alpha does not translate directly into the filling fraction of
the matrix itself.
See also
--------
make_spd_matrix
"""
random_state = check_random_state(random_state)
chol = -np.eye(dim)
aux = random_state.rand(dim, dim)
aux[aux < alpha] = 0
aux[aux > alpha] = (smallest_coef
+ (largest_coef - smallest_coef)
* random_state.rand(np.sum(aux > alpha)))
aux = np.tril(aux, k=-1)
# Permute the lines: we don't want to have asymmetries in the final
# SPD matrix
permutation = random_state.permutation(dim)
aux = aux[permutation].T[permutation]
chol += aux
prec = np.dot(chol.T, chol)
if norm_diag:
# Form the diagonal vector into a row matrix
d = np.diag(prec).reshape(1, prec.shape[0])
d = 1. / np.sqrt(d)
prec *= d
prec *= d.T
return prec
def make_swiss_roll(n_samples=100, noise=0.0, random_state=None):
"""Generate a swiss roll dataset.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
Notes
-----
The algorithm is from Marsland [1].
References
----------
.. [1] S. Marsland, "Machine Learning: An Algorithmic Perspective",
Chapter 10, 2009.
http://www-ist.massey.ac.nz/smarsland/Code/10/lle.py
"""
generator = check_random_state(random_state)
t = 1.5 * np.pi * (1 + 2 * generator.rand(1, n_samples))
x = t * np.cos(t)
y = 21 * generator.rand(1, n_samples)
z = t * np.sin(t)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_s_curve(n_samples=100, noise=0.0, random_state=None):
"""Generate an S curve dataset.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
"""
generator = check_random_state(random_state)
t = 3 * np.pi * (generator.rand(1, n_samples) - 0.5)
x = np.sin(t)
y = 2.0 * generator.rand(1, n_samples)
z = np.sign(t) * (np.cos(t) - 1)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_gaussian_quantiles(mean=None, cov=1., n_samples=100,
n_features=2, n_classes=3,
shuffle=True, random_state=None):
"""Generate isotropic Gaussian and label samples by quantile
This classification dataset is constructed by taking a multi-dimensional
standard normal distribution and defining classes separated by nested
concentric multi-dimensional spheres such that roughly equal numbers of
samples are in each class (quantiles of the :math:`\chi^2` distribution).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
mean : array of shape [n_features], optional (default=None)
The mean of the multi-dimensional normal distribution.
If None then use the origin (0, 0, ...).
cov : float, optional (default=1.)
The covariance matrix will be this value times the unit matrix. This
dataset only produces symmetric normal distributions.
n_samples : int, optional (default=100)
The total number of points equally divided among classes.
n_features : int, optional (default=2)
The number of features for each sample.
n_classes : int, optional (default=3)
The number of classes
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for quantile membership of each sample.
Notes
-----
The dataset is from Zhu et al [1].
References
----------
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
if n_samples < n_classes:
raise ValueError("n_samples must be at least n_classes")
generator = check_random_state(random_state)
if mean is None:
mean = np.zeros(n_features)
else:
mean = np.array(mean)
# Build multivariate normal distribution
X = generator.multivariate_normal(mean, cov * np.identity(n_features),
(n_samples,))
# Sort by distance from origin
idx = np.argsort(np.sum((X - mean[np.newaxis, :]) ** 2, axis=1))
X = X[idx, :]
# Label by quantile
step = n_samples // n_classes
y = np.hstack([np.repeat(np.arange(n_classes), step),
np.repeat(n_classes - 1, n_samples - step * n_classes)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
return X, y
def _shuffle(data, random_state=None):
generator = check_random_state(random_state)
n_rows, n_cols = data.shape
row_idx = generator.permutation(n_rows)
col_idx = generator.permutation(n_cols)
result = data[row_idx][:, col_idx]
return result, row_idx, col_idx
def make_biclusters(shape, n_clusters, noise=0.0, minval=10,
maxval=100, shuffle=True, random_state=None):
"""Generate an array with constant block diagonal structure for
biclustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
shape : iterable (n_rows, n_cols)
The shape of the result.
n_clusters : integer
The number of biclusters.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
minval : int, optional (default=10)
Minimum value of a bicluster.
maxval : int, optional (default=100)
Maximum value of a bicluster.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape `shape`
The generated array.
rows : array of shape (n_clusters, X.shape[0],)
The indicators for cluster membership of each row.
cols : array of shape (n_clusters, X.shape[1],)
The indicators for cluster membership of each column.
References
----------
.. [1] Dhillon, I. S. (2001, August). Co-clustering documents and
words using bipartite spectral graph partitioning. In Proceedings
of the seventh ACM SIGKDD international conference on Knowledge
discovery and data mining (pp. 269-274). ACM.
See also
--------
make_checkerboard
"""
generator = check_random_state(random_state)
n_rows, n_cols = shape
consts = generator.uniform(minval, maxval, n_clusters)
# row and column clusters of approximately equal sizes
row_sizes = generator.multinomial(n_rows,
np.repeat(1.0 / n_clusters,
n_clusters))
col_sizes = generator.multinomial(n_cols,
np.repeat(1.0 / n_clusters,
n_clusters))
row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_clusters), row_sizes)))
col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_clusters), col_sizes)))
result = np.zeros(shape, dtype=np.float64)
for i in range(n_clusters):
selector = np.outer(row_labels == i, col_labels == i)
result[selector] += consts[i]
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack(row_labels == c for c in range(n_clusters))
cols = np.vstack(col_labels == c for c in range(n_clusters))
return result, rows, cols
def make_checkerboard(shape, n_clusters, noise=0.0, minval=10,
maxval=100, shuffle=True, random_state=None):
"""Generate an array with block checkerboard structure for
biclustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
shape : iterable (n_rows, n_cols)
The shape of the result.
n_clusters : integer or iterable (n_row_clusters, n_column_clusters)
The number of row and column clusters.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
minval : int, optional (default=10)
Minimum value of a bicluster.
maxval : int, optional (default=100)
Maximum value of a bicluster.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape `shape`
The generated array.
rows : array of shape (n_clusters, X.shape[0],)
The indicators for cluster membership of each row.
cols : array of shape (n_clusters, X.shape[1],)
The indicators for cluster membership of each column.
References
----------
.. [1] Kluger, Y., Basri, R., Chang, J. T., & Gerstein, M. (2003).
Spectral biclustering of microarray data: coclustering genes
and conditions. Genome research, 13(4), 703-716.
See also
--------
make_biclusters
"""
generator = check_random_state(random_state)
if hasattr(n_clusters, "__len__"):
n_row_clusters, n_col_clusters = n_clusters
else:
n_row_clusters = n_col_clusters = n_clusters
# row and column clusters of approximately equal sizes
n_rows, n_cols = shape
row_sizes = generator.multinomial(n_rows,
np.repeat(1.0 / n_row_clusters,
n_row_clusters))
col_sizes = generator.multinomial(n_cols,
np.repeat(1.0 / n_col_clusters,
n_col_clusters))
row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_row_clusters), row_sizes)))
col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_col_clusters), col_sizes)))
result = np.zeros(shape, dtype=np.float64)
for i in range(n_row_clusters):
for j in range(n_col_clusters):
selector = np.outer(row_labels == i, col_labels == j)
result[selector] += generator.uniform(minval, maxval)
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack(row_labels == label
for label in range(n_row_clusters)
for _ in range(n_col_clusters))
cols = np.vstack(col_labels == label
for _ in range(n_row_clusters)
for label in range(n_col_clusters))
return result, rows, cols
| mit |
kashif/scikit-learn | doc/tutorial/text_analytics/skeletons/exercise_01_language_train_model.py | 25 | 2004 | """Build a language detector model
The goal of this exercise is to train a linear classifier on text features
that represent sequences of up to 3 consecutive characters so as to be
recognize natural languages by using the frequencies of short character
sequences as 'fingerprints'.
"""
# Author: Olivier Grisel <[email protected]>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
from sklearn.datasets import load_files
from sklearn.model_selection import train_test_split
from sklearn import metrics
# The training data folder must be passed as first argument
languages_data_folder = sys.argv[1]
dataset = load_files(languages_data_folder)
# Split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.5)
# TASK: Build a an vectorizer that splits strings into sequence of 1 to 3
# characters instead of word tokens
# TASK: Build a vectorizer / classifier pipeline using the previous analyzer
# the pipeline instance should stored in a variable named clf
# TASK: Fit the pipeline on the training set
# TASK: Predict the outcome on the testing set in a variable named y_predicted
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
#import pylab as pl
#pl.matshow(cm, cmap=pl.cm.jet)
#pl.show()
# Predict the result on some short new sentences:
sentences = [
u'This is a language detection test.',
u'Ceci est un test de d\xe9tection de la langue.',
u'Dies ist ein Test, um die Sprache zu erkennen.',
]
predicted = clf.predict(sentences)
for s, p in zip(sentences, predicted):
print(u'The language of "%s" is "%s"' % (s, dataset.target_names[p]))
| bsd-3-clause |
trenton3983/Doing_Math_with_Python | chapter6/solutions/sierpinski.py | 2 | 1545 | '''
sierpinski.py
Draw Sierpinski Triangle
'''
import random
import matplotlib.pyplot as plt
def transformation_1(p):
x = p[0]
y = p[1]
x1 = 0.5*x
y1 = 0.5*y
return x1, y1
def transformation_2(p):
x = p[0]
y = p[1]
x1 = 0.5*x + 0.5
y1 = 0.5*y + 0.5
return x1, y1
def transformation_3(p):
x = p[0]
y = p[1]
x1 = 0.5*x + 1
y1 = 0.5*y
return x1, y1
def get_index(probability):
r = random.random()
c_probability = 0
sum_probability = []
for p in probability:
c_probability += p
sum_probability.append(c_probability)
for item, sp in enumerate(sum_probability):
if r <= sp:
return item
return len(probability)-1
def transform(p):
# list of transformation functions
transformations = [transformation_1, transformation_2, transformation_3]
probability = [1/3, 1/3, 1/3]
# pick a random transformation function and call it
tindex = get_index(probability)
t = transformations[tindex]
x, y = t(p)
return x, y
def draw_sierpinski(n):
# We start with (0, 0)
x = [0]
y = [0]
x1, y1 = 0, 0
for i in range(n):
x1, y1 = transform((x1, y1))
x.append(x1)
y.append(y1)
return x, y
if __name__ == '__main__':
n = int(input('Enter the desired number of points'
'in the Sierpinski Triangle: '))
x, y = draw_sierpinski(n)
# Plot the points
plt.plot(x, y, 'o')
plt.title('Sierpinski with {0} points'.format(n))
plt.show()
| mit |
0asa/scikit-learn | sklearn/utils/validation.py | 2 | 17489 | """Utilities for input validation"""
# Authors: Olivier Grisel
# Gael Varoquaux
# Andreas Mueller
# Lars Buitinck
# Alexandre Gramfort
# Nicolas Tresegnie
# License: BSD 3 clause
import warnings
import numbers
import numpy as np
import scipy.sparse as sp
from ..externals import six
from ..base import BaseEstimator
from inspect import getargspec
class DataConversionWarning(UserWarning):
"A warning on implicit data conversions happening in the code"
pass
warnings.simplefilter("always", DataConversionWarning)
class NonBLASDotWarning(UserWarning):
"A warning on implicit dispatch to numpy.dot"
class NotFittedError(ValueError, AttributeError):
"""Exception class to raise if estimator is used before fitting
This class inherits from both ValueError and AttributeError to help with
exception handling and backward compatibility.
"""
# Silenced by default to reduce verbosity. Turn on at runtime for
# performance profiling.
warnings.simplefilter('ignore', NonBLASDotWarning)
def _assert_all_finite(X):
"""Like assert_all_finite, but only for ndarray."""
X = np.asanyarray(X)
# First try an O(n) time, O(1) space solution for the common case that
# everything is finite; fall back to O(n) space np.isfinite to prevent
# false positives from overflow in sum method.
if (X.dtype.char in np.typecodes['AllFloat'] and not np.isfinite(X.sum())
and not np.isfinite(X).all()):
raise ValueError("Input contains NaN, infinity"
" or a value too large for %r." % X.dtype)
def assert_all_finite(X):
"""Throw a ValueError if X contains NaN or infinity.
Input MUST be an np.ndarray instance or a scipy.sparse matrix."""
_assert_all_finite(X.data if sp.issparse(X) else X)
def as_float_array(X, copy=True, force_all_finite=True):
"""Converts an array-like to an array of floats
The new dtype will be np.float32 or np.float64, depending on the original
type. The function can create a copy or modify the argument depending
on the argument copy.
Parameters
----------
X : {array-like, sparse matrix}
copy : bool, optional
If True, a copy of X will be created. If False, a copy may still be
returned if X's dtype is not a floating point type.
Returns
-------
XT : {array, sparse matrix}
An array of type np.float
"""
if isinstance(X, np.matrix) or (not isinstance(X, np.ndarray)
and not sp.issparse(X)):
return check_array(X, ['csr', 'csc', 'coo'], dtype=np.float64,
copy=copy, force_all_finite=force_all_finite,
ensure_2d=False)
elif sp.issparse(X) and X.dtype in [np.float32, np.float64]:
return X.copy() if copy else X
elif X.dtype in [np.float32, np.float64]: # is numpy array
return X.copy('F' if X.flags['F_CONTIGUOUS'] else 'C') if copy else X
else:
return X.astype(np.float32 if X.dtype == np.int32 else np.float64)
def _is_arraylike(x):
"""Returns whether the input is array-like"""
return (hasattr(x, '__len__') or
hasattr(x, 'shape') or
hasattr(x, '__array__'))
def _num_samples(x):
"""Return number of samples in array-like x."""
if not hasattr(x, '__len__') and not hasattr(x, 'shape'):
if hasattr(x, '__array__'):
x = np.asarray(x)
else:
raise TypeError("Expected sequence or array-like, got %r" % x)
return x.shape[0] if hasattr(x, 'shape') else len(x)
def check_consistent_length(*arrays):
"""Check that all arrays have consistent first dimensions.
Checks whether all objects in arrays have the same shape or length.
Parameters
----------
arrays : list or tuple of input objects.
Objects that will be checked for consistent length.
"""
uniques = np.unique([_num_samples(X) for X in arrays if X is not None])
if len(uniques) > 1:
raise ValueError("Found arrays with inconsistent numbers of samples: %s"
% str(uniques))
def indexable(*iterables):
"""Make arrays indexable for cross-validation.
Checks consistent length, passes through None, and ensures that everything
can be indexed by converting sparse matrices to csr and converting
non-interable objects to arrays.
Parameters
----------
iterables : lists, dataframes, arrays, sparse matrices
List of objects to ensure sliceability.
"""
result = []
for X in iterables:
if sp.issparse(X):
result.append(X.tocsr())
elif hasattr(X, "__getitem__") or hasattr(X, "iloc"):
result.append(X)
elif X is None:
result.append(X)
else:
result.append(np.array(X))
check_consistent_length(*result)
return result
def _ensure_sparse_format(spmatrix, accept_sparse, dtype, order, copy,
force_all_finite):
"""Convert a sparse matrix to a given format.
Checks the sparse format of spmatrix and converts if necessary.
Parameters
----------
spmatrix : scipy sparse matrix
Input to validate and convert.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats ('csc',
'csr', 'coo', 'dok', 'bsr', 'lil', 'dia'). None means that sparse
matrix input will raise an error. If the input is sparse but not in
the allowed format, it will be converted to the first listed format.
dtype : string, type or None (default=none)
Data type of result. If None, the dtype of the input is preserved.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
Returns
-------
spmatrix_converted : scipy sparse matrix.
Matrix that is ensured to have an allowed type.
"""
if accept_sparse is None:
raise TypeError('A sparse matrix was passed, but dense '
'data is required. Use X.toarray() to '
'convert to a dense numpy array.')
sparse_type = spmatrix.format
if dtype is None:
dtype = spmatrix.dtype
if sparse_type in accept_sparse:
# correct type
if dtype == spmatrix.dtype:
# correct dtype
if copy:
spmatrix = spmatrix.copy()
else:
# convert dtype
spmatrix = spmatrix.astype(dtype)
else:
# create new
spmatrix = spmatrix.asformat(accept_sparse[0]).astype(dtype)
if force_all_finite:
if not hasattr(spmatrix, "data"):
warnings.warn("Can't check %s sparse matrix for nan or inf."
% spmatrix.format)
else:
_assert_all_finite(spmatrix.data)
if hasattr(spmatrix, "data"):
spmatrix.data = np.array(spmatrix.data, copy=False, order=order)
return spmatrix
def check_array(array, accept_sparse=None, dtype=None, order=None, copy=False,
force_all_finite=True, ensure_2d=True, allow_nd=False):
"""Input validation on an array, list, sparse matrix or similar.
By default, the input is converted to an at least 2nd numpy array.
Parameters
----------
array : object
Input object to check / convert.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. None means that sparse matrix input will raise an error.
If the input is sparse but not in the allowed format, it will be
converted to the first listed format.
dtype : string, type or None (default=none)
Data type of result. If None, the dtype of the input is preserved.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
ensure_2d : boolean (default=True)
Whether to make X at least 2d.
allow_nd : boolean (default=False)
Whether to allow X.ndim > 2.
Returns
-------
X_converted : object
The converted and validated X.
"""
if isinstance(accept_sparse, str):
accept_sparse = [accept_sparse]
if sp.issparse(array):
array = _ensure_sparse_format(array, accept_sparse, dtype, order,
copy, force_all_finite)
else:
if ensure_2d:
array = np.atleast_2d(array)
array = np.array(array, dtype=dtype, order=order, copy=copy)
if not allow_nd and array.ndim >= 3:
raise ValueError("Found array with dim %d. Expected <= 2" %
array.ndim)
if force_all_finite:
_assert_all_finite(array)
return array
def check_X_y(X, y, accept_sparse=None, dtype=None, order=None, copy=False,
force_all_finite=True, ensure_2d=True, allow_nd=False,
multi_output=False):
"""Input validation for standard estimators.
Checks X and y for consistent length, enforces X 2d and y 1d.
Standard input checks are only applied to y. For multi-label y,
set multi_ouput=True to allow 2d and sparse y.
Parameters
----------
X : nd-array, list or sparse matrix
Input data.
y : nd-array, list or sparse matrix
Labels.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. None means that sparse matrix input will raise an error.
If the input is sparse but not in the allowed format, it will be
converted to the first listed format.
dtype : string, type or None (default=none)
Data type of result. If None, the dtype of the input is preserved.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
ensure_2d : boolean (default=True)
Whether to make X at least 2d.
allow_nd : boolean (default=False)
Whether to allow X.ndim > 2.
multi_output : boolean (default=False)
Whether to allow 2-d y (array or sparse matrix). If false, y will be
validated as a vector.
Returns
-------
X_converted : object
The converted and validated X.
"""
X = check_array(X, accept_sparse, dtype, order, copy, force_all_finite,
ensure_2d, allow_nd)
if multi_output:
y = check_array(y, 'csr', force_all_finite=True, ensure_2d=False)
else:
y = column_or_1d(y, warn=True)
_assert_all_finite(y)
check_consistent_length(X, y)
return X, y
def column_or_1d(y, warn=False):
""" Ravel column or 1d numpy array, else raises an error
Parameters
----------
y : array-like
Returns
-------
y : array
"""
shape = np.shape(y)
if len(shape) == 1:
return np.ravel(y)
if len(shape) == 2 and shape[1] == 1:
if warn:
warnings.warn("A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning, stacklevel=2)
return np.ravel(y)
raise ValueError("bad input shape {0}".format(shape))
def warn_if_not_float(X, estimator='This algorithm'):
"""Warning utility function to check that data type is floating point.
Returns True if a warning was raised (i.e. the input is not float) and
False otherwise, for easier input validation.
"""
if not isinstance(estimator, six.string_types):
estimator = estimator.__class__.__name__
if X.dtype.kind != 'f':
warnings.warn("%s assumes floating point values as input, "
"got %s" % (estimator, X.dtype))
return True
return False
def check_random_state(seed):
"""Turn seed into a np.random.RandomState instance
If seed is None, return the RandomState singleton used by np.random.
If seed is an int, return a new RandomState instance seeded with seed.
If seed is already a RandomState instance, return it.
Otherwise raise ValueError.
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, (numbers.Integral, np.integer)):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
raise ValueError('%r cannot be used to seed a numpy.random.RandomState'
' instance' % seed)
def has_fit_parameter(estimator, parameter):
"""Checks whether the estimator's fit method supports the given parameter.
Example
-------
>>> from sklearn.svm import SVC
>>> has_fit_parameter(SVC(), "sample_weight")
True
"""
return parameter in getargspec(estimator.fit)[0]
def check_symmetric(array, tol=1E-10, raise_warning=True,
raise_exception=False):
"""Make sure that array is 2D, square and symmetric.
If the array is not symmetric, then a symmetrized version is returned.
Optionally, a warning or exception is raised if the matrix is not
symmetric.
Parameters
----------
array : nd-array or sparse matrix
Input object to check / convert. Must be two-dimensional and square,
otherwise a ValueError will be raised.
tol : float
Absolute tolerance for equivalence of arrays. Default = 1E-10.
raise_warning : boolean (default=True)
If True then raise a warning if conversion is required.
raise_exception : boolean (default=False)
If True then raise an exception if array is not symmetric.
Returns
-------
array_sym : ndarray or sparse matrix
Symmetrized version of the input array, i.e. the average of array
and array.transpose(). If sparse, then duplicate entries are first
summed and zeros are eliminated.
"""
if (array.ndim != 2) or (array.shape[0] != array.shape[1]):
raise ValueError("array must be 2-dimensional and square. "
"shape = {0}".format(array.shape))
if sp.issparse(array):
diff = array - array.T
# only csr, csc, and coo have `data` attribute
if diff.format not in ['csr', 'csc', 'coo']:
diff = diff.tocsr()
symmetric = np.all(abs(diff.data) < tol)
else:
symmetric = np.allclose(array, array.T, atol=tol)
if not symmetric:
if raise_exception:
raise ValueError("Array must be symmetric")
if raise_warning:
warnings.warn("Array is not symmetric, and will be converted "
"to symmetric by average with its transpose.")
if sp.issparse(array):
conversion = 'to' + array.format
array = getattr(0.5 * (array + array.T), conversion)()
else:
array = 0.5 * (array + array.T)
return array
def check_is_fitted(estimator, attributes, msg=None, all_or_any=all):
"""Perform is_fitted validation for estimator.
Checks if the estimator is fitted by verifying the presence of
"all_or_any" of the passed attributes and raises a NotFittedError with the
given message.
Parameters
----------
estimator : estimator instance.
estimator instance for which the check is performed.
attributes : attribute name(s) given as string or a list/tuple of strings
Eg. : ["coef_", "estimator_", ...], "coef_"
msg : string
The default error message is, "This %(name)s instance is not fitted
yet. Call 'fit' with appropriate arguments before using this method."
For custom messages if "%(name)s" is present in the message string,
it is substituted for the estimator name.
Eg. : "Estimator, %(name)s, must be fitted before sparsifying".
all_or_any : callable, {all, any}, default all
Specify whether all or any of the given attributes must exist.
"""
if msg is None:
msg = ("This %(name)s instance is not fitted yet. Call 'fit' with "
"appropriate arguments before using this method.")
if not hasattr(estimator, 'fit'):
raise ValueError("%s is not an estimator instance." % (estimator))
if not isinstance(attributes, (list, tuple)):
attributes = [attributes]
if not all_or_any([hasattr(estimator, attr) for attr in attributes]):
raise NotFittedError(msg % {'name' : type(estimator).__name__})
| bsd-3-clause |
jeffery-do/Vizdoombot | doom/lib/python3.5/site-packages/matplotlib/tests/test_arrow_patches.py | 7 | 1799 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
import matplotlib.pyplot as plt
from matplotlib.testing.decorators import image_comparison
import matplotlib
def draw_arrow(ax, t, r):
ax.annotate('', xy=(0.5, 0.5 + r), xytext=(0.5, 0.5), size=30,
arrowprops=dict(arrowstyle=t,
fc="b", ec='k'))
@image_comparison(baseline_images=['fancyarrow_test_image'])
def test_fancyarrow():
# Added 0 to test division by zero error described in issue 3930
r = [0.4, 0.3, 0.2, 0.1, 0]
t = ["fancy", "simple", matplotlib.patches.ArrowStyle.Fancy()]
fig, axes = plt.subplots(len(t), len(r), squeeze=False,
subplot_kw=dict(aspect=True),
figsize=(8, 4.5))
for i_r, r1 in enumerate(r):
for i_t, t1 in enumerate(t):
ax = axes[i_t, i_r]
draw_arrow(ax, t1, r1)
ax.tick_params(labelleft=False, labelbottom=False)
@image_comparison(baseline_images=['boxarrow_test_image'], extensions=['png'])
def test_boxarrow():
styles = matplotlib.patches.BoxStyle.get_styles()
n = len(styles)
spacing = 1.2
figheight = (n * spacing + .5)
fig1 = plt.figure(1, figsize=(4 / 1.5, figheight / 1.5))
fontsize = 0.3 * 72
for i, stylename in enumerate(sorted(styles.keys())):
fig1.text(0.5, ((n - i) * spacing - 0.5)/figheight, stylename,
ha="center",
size=fontsize,
transform=fig1.transFigure,
bbox=dict(boxstyle=stylename, fc="w", ec="k"))
if __name__ == '__main__':
import nose
nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
| mit |
isse-augsburg/ssharp | Models/Small Models/DegradedMode/createGraphFigure.py | 2 | 3466 | #!/usr/local/bin/python3
# The MIT License (MIT)
#
# Copyright (c) 2014-2017, Institute for Software & Systems Engineering
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Run under Windows using Python 3.6
# c:\Python36\Scripts\pip.exe install numpy
# c:\Python36\Scripts\pip.exe install matplotlib
# c:\Python36\python.exe createGraphFigure.py
import csv
import numpy
import matplotlib.pyplot as pyplot
from matplotlib.ticker import FuncFormatter
fileToRead = 'graph.csv'
csvDelimiter = ','
floatDelimiter = '.'
csvRawDataFile = open(fileToRead,'r')
csvRawData = csv.reader(csvRawDataFile, delimiter=csvDelimiter)
csvRows = list(csvRawData)
# read fromValues
fromValues = []
for entry in (csvRows[1])[1:]:
newValue = float(entry.replace(floatDelimiter, '.'))
fromValues.append(newValue)
# see http://matplotlib.org/api/matplotlib_configuration_api.html#matplotlib.rc
titleFont = {'fontname':'Garamond','fontsize':16}
labelFont = {'fontname':'Garamond','fontsize':14}
#standardFont = {'family':'serif','serif':['Garamond'],'sans-serif':['Garamond']}
standardFont = {'family':'serif','serif':['Garamond']}
standardFigure = {'figsize': (10,5)}
#pyplot.rcParams['figure.figsize'] = 10,5
pyplot.rc('font',**standardFont)
pyplot.rc('figure',**standardFigure)
def createCustomFormatter(scaleY):
# https://matplotlib.org/2.0.0/examples/pylab_examples/custom_ticker1.html
# http://matplotlib.org/api/ticker_api.html#tick-formatting
# http://matplotlib.org/api/ticker_api.html#matplotlib.ticker.FormatStrFormatter
#
factor = 10.0 ** scaleY
print(factor)
def formatMoreBeautiful(value, tickPos):
return '$ %.2f \\times\ 10^{-%s}$' % (value*factor, str(scaleY))
return FuncFormatter(formatMoreBeautiful)
def printRow(rowToRead,yLabelName,fileName,scaleY):
# read resultValues
resultValues = []
row = csvRows[rowToRead]
rowName = row[0]
rowData = row[1:]
print(len(rowData))
for entry in rowData:
newValue = float(entry.replace(floatDelimiter, '.'))
resultValues.append(newValue)
pyplot.cla()
pyplot.clf()
fig, ax = pyplot.subplots()
if scaleY != 1:
ax.yaxis.set_major_formatter(createCustomFormatter(scaleY))
pyplot.plot(fromValues,resultValues, 'o-')
pyplot.title('',**titleFont)
pyplot.xlabel('Pr(F1)',**labelFont)
pyplot.xticks([0.0, 1.0])
pyplot.ylabel(yLabelName,**labelFont)
pyplot.savefig(fileName, format="svg")
#pyplot.show()
printRow(2,"Pr(Hazard)","graph.svg",1)
| mit |
Fougere87/unsec | test_art.py | 1 | 2473 | import os
import glob
from unsec import Email
from unsec import Tools
from unsec import Collection
from unsec import Clustering
from sklearn import cluster
import matplotlib.pyplot as plt
import matplotlib as mpl
from mpl_toolkits.mplot3d import Axes3D
from sklearn import decomposition
# Tools.vectorize("data/bioinfo_2014-01")
# coll = Collection.Email_Collection("data/bioinfo_2014-01/*")
articles = glob.glob("dataset_test/*txt")
collec = []
art_list = []
for art in articles :
art_file = open(art, "r")
art_list.append(art)
collec.append(art_file.readlines())
# for e in Tools.words_in_collection(coll.all_cleaned_bodies) :
# print(e)
# input("plop")
# print(len(Tools.words_in_collection(coll.all_cleaned_body)))
# print(Tools.term_freq(coll[0]))
# print(Tools.invert_doc_freq(coll.all_cleaned_subjects))
# matrix_sub = Tools.vectorize_tf_idf(coll.all_cleaned_subjects) # create data matrix
# matrix_bod = Tools.vectorize_tf_idf(coll.all_cleaned_bodies)
coll2 =[]
for e in collec :
print(str(e))
coll2.append(Tools.clean(str(e)))
matrix_art = Tools.vectorize_tf_idf(coll2)
# matrixTest = [[4,2,3], [5,3,2], [12,42,54], [4,1,2], [91,87,7], [41,21,31], [51,13,67], [12,2,4], [4,1,2], [31,31,14]]
k_means = cluster.KMeans(n_clusters=2) #create k-mean objet with n clusters as param
print("K-mean fitting...")
k_means.fit(matrix_art)
print(k_means.labels_)
print("list des documents : ",Clustering.get_clustered_docs(k_means.labels_, art_list))
mat_dist_sub = k_means.fit_transform(matrix_art)
print(mat_dist_sub)
print(k_means.score(matrix_art))
# k_means.fit(matrix_bod)
# print(k_means.labels_)
pca = decomposition.PCA(n_components=3)
reduced_mat_bod = pca.fit(matrix_art).transform(matrix_art)
print(pca.components_)
Tools.matrix_to_csv(matrix_art, Tools.words_in_collection(coll2), "tfidf_art.csv")
# Tools.vectorize_to_csv(coll, "data.csv")
# clusters = Clustering.get_clustered_docs(k_means.labels_,coll.all_cleaned_subjects)
# [print(e) for e in clusters]
# clusters_files = Clustering.get_clustered_docs(k_means.labels_,coll.files_list)
# [print(e) for e in clusters_files]
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
x_data = [e[0] for e in reduced_mat_bod]
y_data = [e[1] for e in reduced_mat_bod]
z_data = [e[2] for e in reduced_mat_bod]
ax.scatter(x_data, y_data, z_data, depthshade=True)
plt.show()
# Clustering.kmeans(matrix, 3)
# print(Tools.vectorize_tf_idf(coll)[1])
#print(e.clean_body())
| unlicense |
mjabri/holoviews | holoviews/plotting/mpl/annotation.py | 1 | 4406 | import matplotlib
from matplotlib import patches as patches
from ...core.util import match_spec
from .element import ElementPlot
class AnnotationPlot(ElementPlot):
"""
AnnotationPlot handles the display of all annotation elements.
"""
def __init__(self, annotation, **params):
self._annotation = annotation
super(AnnotationPlot, self).__init__(annotation, **params)
self.handles['annotations'] = []
def initialize_plot(self, ranges=None):
annotation = self.hmap.last
key = self.keys[-1]
ranges = self.compute_ranges(self.hmap, key, ranges)
ranges = match_spec(annotation, ranges)
axis = self.handles['axis']
opts = self.style[self.cyclic_index]
handles = self.draw_annotation(axis, annotation.data, opts)
self.handles['annotations'] = handles
return self._finalize_axis(key, ranges=ranges)
def update_handles(self, axis, annotation, key, ranges=None):
# Clear all existing annotations
for element in self.handles['annotations']:
element.remove()
self.handles['annotations']=[]
opts = self.style[self.cyclic_index]
self.handles['annotations'] = self.draw_annotation(axis, annotation.data, opts)
class VLinePlot(AnnotationPlot):
"Draw a vertical line on the axis"
style_opts = ['alpha', 'color', 'linewidth', 'linestyle', 'visible']
def __init__(self, annotation, **params):
super(VLinePlot, self).__init__(annotation, **params)
def draw_annotation(self, axis, position, opts):
return [axis.axvline(position, **opts)]
class HLinePlot(AnnotationPlot):
"Draw a horizontal line on the axis"
style_opts = ['alpha', 'color', 'linewidth', 'linestyle', 'visible']
def __init__(self, annotation, **params):
super(HLinePlot, self).__init__(annotation, **params)
def draw_annotation(self, axis, position, opts):
"Draw a horizontal line on the axis"
return [axis.axhline(position, **opts)]
class TextPlot(AnnotationPlot):
"Draw the Text annotation object"
style_opts = ['alpha', 'color', 'family', 'weight', 'rotation', 'fontsize', 'visible']
def __init__(self, annotation, **params):
super(TextPlot, self).__init__(annotation, **params)
def draw_annotation(self, axis, data, opts):
(x,y, text, fontsize,
horizontalalignment, verticalalignment, rotation) = data
opts['fontsize'] = fontsize
return [axis.text(x,y, text,
horizontalalignment = horizontalalignment,
verticalalignment = verticalalignment,
rotation=rotation, **opts)]
class ArrowPlot(AnnotationPlot):
"Draw an arrow using the information supplied to the Arrow annotation"
_arrow_style_opts = ['alpha', 'color', 'lw', 'linewidth', 'visible']
_text_style_opts = TextPlot.style_opts
style_opts = sorted(set(_arrow_style_opts + _text_style_opts))
def __init__(self, annotation, **params):
super(ArrowPlot, self).__init__(annotation, **params)
def draw_annotation(self, axis, data, opts):
direction, text, xy, points, arrowstyle = data
arrowprops = dict({'arrowstyle':arrowstyle},
**{k: opts[k] for k in self._arrow_style_opts if k in opts})
textopts = {k: opts[k] for k in self._text_style_opts if k in opts}
if direction in ['v', '^']:
xytext = (0, points if direction=='v' else -points)
elif direction in ['>', '<']:
xytext = (points if direction=='<' else -points, 0)
return [axis.annotate(text, xy=xy, textcoords='offset points',
xytext=xytext, ha="center", va="center",
arrowprops=arrowprops, **textopts)]
class SplinePlot(AnnotationPlot):
"Draw the supplied Spline annotation (see Spline docstring)"
style_opts = ['alpha', 'edgecolor', 'linewidth', 'linestyle', 'visible']
def __init__(self, annotation, **params):
super(SplinePlot, self).__init__(annotation, **params)
def draw_annotation(self, axis, data, opts):
verts, codes = data
patch = patches.PathPatch(matplotlib.path.Path(verts, codes),
facecolor='none', **opts)
axis.add_patch(patch)
return [patch]
| bsd-3-clause |
thientu/scikit-learn | sklearn/manifold/isomap.py | 229 | 7169 | """Isomap for manifold learning"""
# Author: Jake Vanderplas -- <[email protected]>
# License: BSD 3 clause (C) 2011
import numpy as np
from ..base import BaseEstimator, TransformerMixin
from ..neighbors import NearestNeighbors, kneighbors_graph
from ..utils import check_array
from ..utils.graph import graph_shortest_path
from ..decomposition import KernelPCA
from ..preprocessing import KernelCenterer
class Isomap(BaseEstimator, TransformerMixin):
"""Isomap Embedding
Non-linear dimensionality reduction through Isometric Mapping
Read more in the :ref:`User Guide <isomap>`.
Parameters
----------
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold
eigen_solver : ['auto'|'arpack'|'dense']
'auto' : Attempt to choose the most efficient solver
for the given problem.
'arpack' : Use Arnoldi decomposition to find the eigenvalues
and eigenvectors.
'dense' : Use a direct solver (i.e. LAPACK)
for the eigenvalue decomposition.
tol : float
Convergence tolerance passed to arpack or lobpcg.
not used if eigen_solver == 'dense'.
max_iter : integer
Maximum number of iterations for the arpack solver.
not used if eigen_solver == 'dense'.
path_method : string ['auto'|'FW'|'D']
Method to use in finding shortest path.
'auto' : attempt to choose the best algorithm automatically.
'FW' : Floyd-Warshall algorithm.
'D' : Dijkstra's algorithm.
neighbors_algorithm : string ['auto'|'brute'|'kd_tree'|'ball_tree']
Algorithm to use for nearest neighbors search,
passed to neighbors.NearestNeighbors instance.
Attributes
----------
embedding_ : array-like, shape (n_samples, n_components)
Stores the embedding vectors.
kernel_pca_ : object
`KernelPCA` object used to implement the embedding.
training_data_ : array-like, shape (n_samples, n_features)
Stores the training data.
nbrs_ : sklearn.neighbors.NearestNeighbors instance
Stores nearest neighbors instance, including BallTree or KDtree
if applicable.
dist_matrix_ : array-like, shape (n_samples, n_samples)
Stores the geodesic distance matrix of training data.
References
----------
.. [1] Tenenbaum, J.B.; De Silva, V.; & Langford, J.C. A global geometric
framework for nonlinear dimensionality reduction. Science 290 (5500)
"""
def __init__(self, n_neighbors=5, n_components=2, eigen_solver='auto',
tol=0, max_iter=None, path_method='auto',
neighbors_algorithm='auto'):
self.n_neighbors = n_neighbors
self.n_components = n_components
self.eigen_solver = eigen_solver
self.tol = tol
self.max_iter = max_iter
self.path_method = path_method
self.neighbors_algorithm = neighbors_algorithm
self.nbrs_ = NearestNeighbors(n_neighbors=n_neighbors,
algorithm=neighbors_algorithm)
def _fit_transform(self, X):
X = check_array(X)
self.nbrs_.fit(X)
self.training_data_ = self.nbrs_._fit_X
self.kernel_pca_ = KernelPCA(n_components=self.n_components,
kernel="precomputed",
eigen_solver=self.eigen_solver,
tol=self.tol, max_iter=self.max_iter)
kng = kneighbors_graph(self.nbrs_, self.n_neighbors,
mode='distance')
self.dist_matrix_ = graph_shortest_path(kng,
method=self.path_method,
directed=False)
G = self.dist_matrix_ ** 2
G *= -0.5
self.embedding_ = self.kernel_pca_.fit_transform(G)
def reconstruction_error(self):
"""Compute the reconstruction error for the embedding.
Returns
-------
reconstruction_error : float
Notes
-------
The cost function of an isomap embedding is
``E = frobenius_norm[K(D) - K(D_fit)] / n_samples``
Where D is the matrix of distances for the input data X,
D_fit is the matrix of distances for the output embedding X_fit,
and K is the isomap kernel:
``K(D) = -0.5 * (I - 1/n_samples) * D^2 * (I - 1/n_samples)``
"""
G = -0.5 * self.dist_matrix_ ** 2
G_center = KernelCenterer().fit_transform(G)
evals = self.kernel_pca_.lambdas_
return np.sqrt(np.sum(G_center ** 2) - np.sum(evals ** 2)) / G.shape[0]
def fit(self, X, y=None):
"""Compute the embedding vectors for data X
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, precomputed tree, or NearestNeighbors
object.
Returns
-------
self : returns an instance of self.
"""
self._fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Fit the model from data in X and transform X.
Parameters
----------
X: {array-like, sparse matrix, BallTree, KDTree}
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
self._fit_transform(X)
return self.embedding_
def transform(self, X):
"""Transform X.
This is implemented by linking the points X into the graph of geodesic
distances of the training data. First the `n_neighbors` nearest
neighbors of X are found in the training data, and from these the
shortest geodesic distances from each point in X to each point in
the training data are computed in order to construct the kernel.
The embedding of X is the projection of this kernel onto the
embedding vectors of the training set.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
X = check_array(X)
distances, indices = self.nbrs_.kneighbors(X, return_distance=True)
#Create the graph of shortest distances from X to self.training_data_
# via the nearest neighbors of X.
#This can be done as a single array operation, but it potentially
# takes a lot of memory. To avoid that, use a loop:
G_X = np.zeros((X.shape[0], self.training_data_.shape[0]))
for i in range(X.shape[0]):
G_X[i] = np.min((self.dist_matrix_[indices[i]]
+ distances[i][:, None]), 0)
G_X **= 2
G_X *= -0.5
return self.kernel_pca_.transform(G_X)
| bsd-3-clause |
georglind/babusca | tests/test2.py | 1 | 5725 | from __future__ import division, print_function
import numpy as np
import sys
sys.path.append('/Users/kim/Science/Software/bosehubbard/bosehubbard')
# import bosehubbard
# import scattering as scat
# import utilities as util
# import scipy.linalg as linalg
# import scipy.sparse as sparse
import smatrix
import matplotlib.pyplot as plt
# $$$$$$\
# $$ __$$\
# $$$$$$\ \__/ $$ |
# $$ __$$\ $$$$$$ |
# $$ / $$ |$$ ____/
# $$ | $$ |$$ |
# \$$$$$$$ |$$$$$$$$\
# \____$$ |\________|
# $$\ $$ |
# \$$$$$$ |
# \______/
def g2_coherent_s(s, chlsi, chlso, E, qs=None):
"""
g2 for a coherent state (or partially coherent)
Parameters
----------
s : Setup
Scattering setup
chlsi : list
List of incoming channel indices
chlso : list
List of outgoing channel indices
E : float
Energy of the two-particle state
"""
qs, D2, S2, _ = smatrix.two_particle(s, chlsi=chlsi, chlso=chlso, E=E, dE=0, qs=qs)
# Single particle scattering
Ee = np.array([.5 * E])
_, S10 = smatrix.one_particle(s, chlsi[0], chlso[0], Ee)
_, S11 = smatrix.one_particle(s, chlsi[1], chlso[1], Ee)
dFS2 = np.abs(S10[0]) ** 2 * np.abs(S11[0]) ** 2
# Fourier transform
FS2 = np.fft.fft(S2) * (qs[1] - qs[0])
# times
times = np.fft.fftfreq(len(qs), d=qs[1] - qs[0])
# add delta function contribution
FS2 += np.exp(2 * np.pi * 1j * E / 2 * times) * np.sum(D2)
return np.abs(FS2[0]) ** 2, dFS2
def g2(s, chlsi, chlso, E, dE):
"""
Calculate the intensity-intensity correlation function as a function of total energy,
E, and energy discrepancy, dE.
Parameters
----------
s : Setup
Scattering setup object.
chlsi : list
Input state channel indices.
chlso : list
Output state channel indices.
E : float
Total energy of input photonic state.
dE : float
Energy difference between the two photons in the input state.
"""
qs, D2, S2, S2in = smatrix.two_particle(s, chlsi=chlsi, chlso=chlso, E=E, dE=dE)
Ee = np.array([.5 * (E - dE), .5 * (E + dE)])
_, S10 = smatrix.one_particle(s, chlsi[0], chlso[0], Ee)
_, S11 = smatrix.one_particle(s, chlsi[1], chlso[1], Ee)
# denominator
dFS2 = np.sum(np.abs(S10) ** 2 * np.abs(S11) ** 2)
# Fourier transform
FS2 = np.fft.fft(S2) * (qs[1] - qs[0])
# frequencies
freqs = np.fft.fftfreq(len(qs), d=qs[1] - qs[0])
# add delta function contribution
FS2 += np.exp(np.pi * 1j * E * freqs) * D2[0] + np.exp(np.pi * 1j * E * freqs) * D2[1]
return FS2, dFS2
def FT(E, dE, qs, D2, S2):
FS2 = np.fft.fft(S2) * (qs[1] - qs[0])
times = np.fft.fftfreq(len(qs), d=qs[1] - qs[0])
DFS2 = np.exp(np.pi * 1j * (E - dE) * times) * D2[0]
DFS2 += np.exp(np.pi * 1j * (E + dE) * times) * D2[1]
return times, FS2, DFS2
# $$\ $$\ $$\
# $$$\ $$$ | \__|
# $$$$\ $$$$ | $$$$$$\ $$\ $$$$$$$\
# $$\$$\$$ $$ | \____$$\ $$ |$$ __$$\
# $$ \$$$ $$ | $$$$$$$ |$$ |$$ | $$ |
# $$ |\$ /$$ |$$ __$$ |$$ |$$ | $$ |
# $$ | \_/ $$ |\$$$$$$$ |$$ |$$ | $$ |
# \__| \__| \_______|\__|\__| \__|
if __name__ == "__main__":
# N = 6
# m = bosehubbard.Model(
# omegas=[0]*N,
# links=[[i, (i+1) % N, 1] for i in xrange(N)],
# U=2)
# # The Model
U = 10
model = Model(
omegas=[0] * 2,
links=[[0, 1, 1]],
U=2 * U)
couplings = []
couplings.append(Coupling(channel=0, site=0, strength=.2))
couplings.append(Coupling(channel=1, site=1, strength=.2))
# The couplings
couplings = []
couplings.append(Coupling(channel=0, site=0, strength=.2))
couplings.append(Coupling(channel=1, site=1, strength=.2))
# The scattering setup
se = Setup(model, couplings)
# Let us calculate the t1matrix
Es = Tmatrix.energies(0, 0)
_, S100 = Smatrix.one_particle(se, 0, 0, Es)
_, S101 = Smatrix.one_particle(se, 0, 1, Es)
# Es = Tmatrix.energies(0, 0, WE=16, N=2048)
E = 0
dE = 0
chlsi = [0, 0]
chlso = [1, 1]
qs = Tmatrix.energies(0, 0, 2048, 16)
qs, D2, S2, S2in = Smatrix.two_particle(se, chlsi=chlsi, chlso=chlso, E=E, dE=dE, qs=qs)
cplsi = [se.channel_coupling(c) for c in chlsi]
cplso = [se.channel_coupling(c) for c in chlso]
# # Fourier transform
ts, FS2, DFS2 = FT(E, dE, qs, D2, S2)
FS2 += DFS2
FS2 /= (S101[512]) ** 2
dt = ts[1] - ts[0]
# # All the plots
# # S1_{1,1}
fig, ax = plt.subplots(nrows=2, ncols=2)
ax1 = plt.subplot(2, 2, 1)
ax1.plot(Es, np.abs(S100) ** 2)
ax1.set_ylabel(r'$S_{{{0},{1}}}^{{(1)}}$'.format(se.sites[0], se.sites[0]))
# # S1_{0,1}
ax2 = plt.subplot(2, 2, 2)
ax2.plot(Es, np.abs(S101) ** 2)
ax2.set_ylabel(r'$S_{{{0},{1}}}^{{(1)}}$'.format(se.sites[1], se.sites[0]))
# # S2
ax3 = plt.subplot(2, 2, 3)
ax3.plot(Es, np.abs(S2), label=r"$S^{(2)}$")
ax3.plot(Es, np.abs(S2in), label=r"$S^{(2)}_{in}$")
ax3.plot(Es, np.abs(S2 - S2in), label=r"$S^{(2)}_{ni}$")
ax3.set_ylabel(r'$S^{{(2)}}_{{{0},{1};{2},{3}}}$'.format(*([c.site for c in cplso + cplsi])))
plt.legend()
# # Fourier(S2)
ax4 = plt.subplot(2, 2, 4)
ax4.bar(ts - dt / 2, np.abs(FS2) ** 2, dt)
ax4.set_ylabel(r'$|\mathcal{{F}} S^{{(2)}}_{{{0},{1};{2},{3}}}|^2$'.format(*([c.site for c in cplso + cplsi])))
# ax4.set_xlim([-4, 4])s
plt.suptitle(r'$\Gamma_{{{0}}} = {1}$, $\Gamma_{{{2}}} = {3}$'.format(se.sites[0], se.gs[0], se.sites[1], se.gs[1]))
fig.set_tight_layout(True)
plt.show()
| mit |
epeios-q37/epeios | tools/xdhq/examples/PYH/MatPlotLib/main.py | 1 | 11970 | import os, sys
os.chdir(os.path.dirname(os.path.realpath(__file__)))
sys.path.append("../../atlastk")
import atlastk
import matplotlib.pyplot as plt
from io import StringIO
from mpl_toolkits.mplot3d import axes3d
import numpy as np
from cycler import cycler
import matplotlib as mpl
import matplotlib.tri as tri
import matplotlib.cm as cm
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import collections, colors, transforms
# plt.xkcd() # XKCD-like ploting.
BODY = """
<iframe style="border: none;" src="FaaSDesc.php?text=R3JhcGhpY3MgZHJhd24gd2l0aCBbKm1hdHBsb3RsaWIqXShodHRwczovL21hdHBsb3RsaWIub3JnLykgY2FuIGJlIHVzZWQgd2l0aCB0aGUgKkF0bGFzKiAqVG9vbGtpdCou"></iframe>
<fieldset id="Plots"></fieldset>
<fieldset id="Buttons"></fieldset>
<fieldset>
<span>Due to a limitation of <a href="https://en.wikipedia.org/wiki/Matplotlib" target="_blank" style="font-style: oblique;">matplotlib</a>, opening </span>
<br/>
<span>more then one session with this application</span>
<br/>
<span>may probably lead to a crash.</span>
</fieldset>
"""
HEAD = """
<title>MatPlotLib with the Atlas Toolkit</title>
<style>
fieldset {margin: auto; text-align: center;}
#Buttons {display: none;}
</style>
<style id="Ready">
.hide {display: none;}
#Buttons {display: block;}
</style>
"""
AMOUNT = 10
def getSVG(plt):
figfile = StringIO()
plt.savefig(figfile, format='svg')
svg = figfile.getvalue();
plt.close()
return svg
def run(example):
return eval(f"example{example}()")
def example1():
labels = ['G1', 'G2', 'G3', 'G4', 'G5']
men_means = [20, 34, 30, 35, 27]
women_means = [25, 32, 34, 20, 25]
x = np.arange(len(labels)) # the label locations
width = 0.35 # the width of the bars
fig, ax = plt.subplots()
rects1 = ax.bar(x - width/2, men_means, width, label='Men')
rects2 = ax.bar(x + width/2, women_means, width, label='Women')
# Add some text for labels, title and custom x-axis tick labels, etc.
ax.set_ylabel('Scores')
ax.set_title('Scores by group and gender')
ax.set_xticks(x)
ax.set_xticklabels(labels)
ax.legend()
def autolabel(rects):
"""Attach a text label above each bar in *rects*, displaying its height."""
for rect in rects:
height = rect.get_height()
ax.annotate('{}'.format(height),
xy=(rect.get_x() + rect.get_width() / 2, height),
xytext=(0, 3), # 3 points vertical offset
textcoords="offset points",
ha='center', va='bottom')
autolabel(rects1)
autolabel(rects2)
fig.tight_layout()
return getSVG(plt)
def example2():
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# Grab some test data.
X, Y, Z = axes3d.get_test_data(0.05)
# Plot a basic wireframe.
ax.plot_wireframe(X, Y, Z, rstride=10, cstride=10)
return getSVG(plt)
def example3():
# prepare some coordinates
x, y, z = np.indices((8, 8, 8))
# draw cuboids in the top left and bottom right corners, and a link between them
cube1 = (x < 3) & (y < 3) & (z < 3)
cube2 = (x >= 5) & (y >= 5) & (z >= 5)
link = abs(x - y) + abs(y - z) + abs(z - x) <= 2
# combine the objects into a single boolean array
voxels = cube1 | cube2 | link
# set the colors of each object
colors = np.empty(voxels.shape, dtype=object)
colors[link] = 'red'
colors[cube1] = 'blue'
colors[cube2] = 'green'
# and plot everything
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.voxels(voxels, facecolors=colors, edgecolor='k')
return getSVG(plt)
def example4():
fig = plt.figure()
ax = fig.gca(projection='3d')
X, Y = np.mgrid[0:6*np.pi:0.25, 0:4*np.pi:0.25]
Z = np.sqrt(np.abs(np.cos(X) + np.cos(Y)))
ax.plot_surface(X + 1e5, Y + 1e5, Z, cmap='autumn', cstride=2, rstride=2)
ax.set_xlabel("X label")
ax.set_ylabel("Y label")
ax.set_zlabel("Z label")
ax.set_zlim(0, 2)
return getSVG(plt)
def example5():
category_names = ['Strongly disagree', 'Disagree',
'Neither agree nor disagree', 'Agree', 'Strongly agree']
results = {
'Question 1': [10, 15, 17, 32, 26],
'Question 2': [26, 22, 29, 10, 13],
'Question 3': [35, 37, 7, 2, 19],
'Question 4': [32, 11, 9, 15, 33],
'Question 5': [21, 29, 5, 5, 40],
'Question 6': [8, 19, 5, 30, 38]
}
def survey(results, category_names):
"""
Parameters
----------
results : dict
A mapping from question labels to a list of answers per category.
It is assumed all lists contain the same number of entries and that
it matches the length of *category_names*.
category_names : list of str
The category labels.
"""
labels = list(results.keys())
data = np.array(list(results.values()))
data_cum = data.cumsum(axis=1)
category_colors = plt.get_cmap('RdYlGn')(
np.linspace(0.15, 0.85, data.shape[1]))
fig, ax = plt.subplots(figsize=(9.2, 5))
ax.invert_yaxis()
ax.xaxis.set_visible(False)
ax.set_xlim(0, np.sum(data, axis=1).max())
for i, (colname, color) in enumerate(zip(category_names, category_colors)):
widths = data[:, i]
starts = data_cum[:, i] - widths
ax.barh(labels, widths, left=starts, height=0.5,
label=colname, color=color)
xcenters = starts + widths / 2
r, g, b, _ = color
text_color = 'white' if r * g * b < 0.5 else 'darkgrey'
for y, (x, c) in enumerate(zip(xcenters, widths)):
ax.text(x, y, str(int(c)), ha='center', va='center',
color=text_color)
ax.legend(ncol=len(category_names), bbox_to_anchor=(0, 1),
loc='lower left', fontsize='small')
return fig, ax
survey(results, category_names)
return getSVG(plt)
def example6():
# Define a list of markevery cases and color cases to plot
cases = [None,
8,
(30, 8),
[16, 24, 30],
[0, -1],
slice(100, 200, 3),
0.1,
0.3,
1.5,
(0.0, 0.1),
(0.45, 0.1)]
colors = ['#1f77b4',
'#ff7f0e',
'#2ca02c',
'#d62728',
'#9467bd',
'#8c564b',
'#e377c2',
'#7f7f7f',
'#bcbd22',
'#17becf',
'#1a55FF']
# Configure rcParams axes.prop_cycle to simultaneously cycle cases and colors.
mpl.rcParams['axes.prop_cycle'] = cycler(markevery=cases, color=colors)
# Create data points and offsets
x = np.linspace(0, 2 * np.pi)
offsets = np.linspace(0, 2 * np.pi, 11, endpoint=False)
yy = np.transpose([np.sin(x + phi) for phi in offsets])
# Set the plot curve with markers and a title
fig = plt.figure()
ax = fig.add_axes([0.1, 0.1, 0.6, 0.75])
for i in range(len(cases)):
ax.plot(yy[:, i], marker='o', label=str(cases[i]))
ax.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0.)
plt.title('Support for axes.prop_cycle cycler with markevery')
return getSVG(plt)
def example7():
#-----------------------------------------------------------------------------
# Analytical test function
#-----------------------------------------------------------------------------
def function_z(x, y):
r1 = np.sqrt((0.5 - x)**2 + (0.5 - y)**2)
theta1 = np.arctan2(0.5 - x, 0.5 - y)
r2 = np.sqrt((-x - 0.2)**2 + (-y - 0.2)**2)
theta2 = np.arctan2(-x - 0.2, -y - 0.2)
z = -(2 * (np.exp((r1 / 10)**2) - 1) * 30. * np.cos(7. * theta1) +
(np.exp((r2 / 10)**2) - 1) * 30. * np.cos(11. * theta2) +
0.7 * (x**2 + y**2))
return (np.max(z) - z) / (np.max(z) - np.min(z))
#-----------------------------------------------------------------------------
# Creating a Triangulation
#-----------------------------------------------------------------------------
# First create the x and y coordinates of the points.
n_angles = 20
n_radii = 10
min_radius = 0.15
radii = np.linspace(min_radius, 0.95, n_radii)
angles = np.linspace(0, 2 * np.pi, n_angles, endpoint=False)
angles = np.repeat(angles[..., np.newaxis], n_radii, axis=1)
angles[:, 1::2] += np.pi / n_angles
x = (radii * np.cos(angles)).flatten()
y = (radii * np.sin(angles)).flatten()
z = function_z(x, y)
# Now create the Triangulation.
# (Creating a Triangulation without specifying the triangles results in the
# Delaunay triangulation of the points.)
triang = tri.Triangulation(x, y)
# Mask off unwanted triangles.
triang.set_mask(np.hypot(x[triang.triangles].mean(axis=1),
y[triang.triangles].mean(axis=1))
< min_radius)
#-----------------------------------------------------------------------------
# Refine data
#-----------------------------------------------------------------------------
refiner = tri.UniformTriRefiner(triang)
tri_refi, z_test_refi = refiner.refine_field(z, subdiv=3)
#-----------------------------------------------------------------------------
# Plot the triangulation and the high-res iso-contours
#-----------------------------------------------------------------------------
fig, ax = plt.subplots()
ax.set_aspect('equal')
ax.triplot(triang, lw=0.5, color='white')
levels = np.arange(0., 1., 0.025)
cmap = cm.get_cmap(name='terrain', lut=None)
ax.tricontourf(tri_refi, z_test_refi, levels=levels, cmap=cmap)
ax.tricontour(tri_refi, z_test_refi, levels=levels,
colors=['0.25', '0.5', '0.5', '0.5', '0.5'],
linewidths=[1.0, 0.5, 0.5, 0.5, 0.5])
ax.set_title("High-resolution tricontouring")
return getSVG(plt)
def example8():
fig, axs = plt.subplots(2, 2)
cm = ['RdBu_r', 'viridis']
for col in range(2):
for row in range(2):
ax = axs[row, col]
pcm = ax.pcolormesh(np.random.random((20, 20)) * (col + 1),
cmap=cm[col])
fig.colorbar(pcm, ax=ax)
return getSVG(plt)
def example9():
# Fixing random state for reproducibility
np.random.seed(19680801)
n = 100000
x = np.random.standard_normal(n)
y = 2.0 + 3.0 * x + 4.0 * np.random.standard_normal(n)
xmin = x.min()
xmax = x.max()
ymin = y.min()
ymax = y.max()
fig, axs = plt.subplots(ncols=2, sharey=True, figsize=(7, 4))
fig.subplots_adjust(hspace=0.5, left=0.07, right=0.93)
ax = axs[0]
hb = ax.hexbin(x, y, gridsize=50, cmap='inferno')
ax.set(xlim=(xmin, xmax), ylim=(ymin, ymax))
ax.set_title("Hexagon binning")
cb = fig.colorbar(hb, ax=ax)
cb.set_label('counts')
ax = axs[1]
hb = ax.hexbin(x, y, gridsize=50, bins='log', cmap='inferno')
ax.set(xlim=(xmin, xmax), ylim=(ymin, ymax))
ax.set_title("With a log color scale")
cb = fig.colorbar(hb, ax=ax)
cb.set_label('log10(N)')
return getSVG(plt)
def example10():
X = np.arange(-5, 5, 0.25)
Y = np.arange(-5, 5, 0.25)
X, Y = np.meshgrid(X, Y)
R = np.sqrt(X**2 + Y**2)
Z = np.sin(R)
fig = plt.figure()
ax = Axes3D(fig)
ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=cm.viridis)
return getSVG(plt)
def ac_connect(dom):
dom.inner("", BODY)
dom.disable_element("Ready")
for example in range(1,AMOUNT+1):
dom.end("Plots", f'<fieldset class="hide" id="example{example}">Please wait ({example}/{AMOUNT})…</fieldset>')
dom.inner(f"example{example}", f"<legend>Example {example}</legend>{run(example)}")
dom.flush()
dom.scroll_to(f'example{example}')
dom.end("Buttons",f'<button id="{example}" data-xdh-onevent="Display">{example}</button>')
dom.enable_element("Ready")
dom.remove_class(f"example1", "hide")
def ac_display(dom,id):
dom.add_classes(_hiding)
dom.remove_class(f"example{id}","hide")
_hiding = {}
for i in range(1, AMOUNT+1):
_hiding[f"example{i}"] = "hide"
atlastk.launch({"": ac_connect, "Display": ac_display},None,HEAD) | agpl-3.0 |
dnjohnstone/hyperspy | hyperspy/_signals/signal2d.py | 1 | 35008 | # -*- coding: utf-8 -*-
# Copyright 2007-2020 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import matplotlib.pyplot as plt
import numpy as np
import scipy as sp
import numpy.ma as ma
import dask.array as da
import logging
import warnings
from scipy import ndimage
try:
# For scikit-image >= 0.17.0
from skimage.registration._phase_cross_correlation import _upsampled_dft
except ModuleNotFoundError:
from skimage.feature.register_translation import _upsampled_dft
from hyperspy.defaults_parser import preferences
from hyperspy.external.progressbar import progressbar
from hyperspy.misc.math_tools import symmetrize, antisymmetrize, optimal_fft_size
from hyperspy.signal import BaseSignal
from hyperspy._signals.lazy import LazySignal
from hyperspy._signals.common_signal2d import CommonSignal2D
from hyperspy.signal_tools import PeaksFinder2D
from hyperspy.docstrings.plot import (
BASE_PLOT_DOCSTRING, PLOT2D_DOCSTRING, KWARGS_DOCSTRING)
from hyperspy.docstrings.signal import SHOW_PROGRESSBAR_ARG, PARALLEL_ARG, MAX_WORKERS_ARG
from hyperspy.ui_registry import DISPLAY_DT, TOOLKIT_DT
from hyperspy.utils.peakfinders2D import (
find_local_max, find_peaks_max, find_peaks_minmax, find_peaks_zaefferer,
find_peaks_stat, find_peaks_log, find_peaks_dog, find_peaks_xc)
_logger = logging.getLogger(__name__)
def shift_image(im, shift=0, interpolation_order=1, fill_value=np.nan):
if np.any(shift):
fractional, integral = np.modf(shift)
if fractional.any():
order = interpolation_order
else:
# Disable interpolation
order = 0
return ndimage.shift(im, shift, cval=fill_value, order=order)
else:
return im
def triu_indices_minus_diag(n):
"""Returns the indices for the upper-triangle of an (n, n) array
excluding its diagonal
Parameters
----------
n : int
The length of the square array
"""
ti = np.triu_indices(n)
isnotdiag = ti[0] != ti[1]
return ti[0][isnotdiag], ti[1][isnotdiag]
def hanning2d(M, N):
"""
A 2D hanning window created by outer product.
"""
return np.outer(np.hanning(M), np.hanning(N))
def sobel_filter(im):
sx = ndimage.sobel(im, axis=0, mode='constant')
sy = ndimage.sobel(im, axis=1, mode='constant')
sob = np.hypot(sx, sy)
return sob
def fft_correlation(in1, in2, normalize=False, real_only=False):
"""Correlation of two N-dimensional arrays using FFT.
Adapted from scipy's fftconvolve.
Parameters
----------
in1, in2 : array
Input arrays to convolve.
normalize: bool, default False
If True performs phase correlation.
real_only : bool, default False
If True, and in1 and in2 are real-valued inputs, uses
rfft instead of fft for approx. 2x speed-up.
"""
s1 = np.array(in1.shape)
s2 = np.array(in2.shape)
size = s1 + s2 - 1
# Calculate optimal FFT size
complex_result = (in1.dtype.kind == 'c' or in2.dtype.kind == 'c')
fsize = [optimal_fft_size(a, not complex_result) for a in size]
# For real-valued inputs, rfftn is ~2x faster than fftn
if not complex_result and real_only:
fft_f, ifft_f = np.fft.rfftn, np.fft.irfftn
else:
fft_f, ifft_f = np.fft.fftn, np.fft.ifftn
fprod = fft_f(in1, fsize)
fprod *= fft_f(in2, fsize).conjugate()
if normalize is True:
fprod = np.nan_to_num(fprod / np.absolute(fprod))
ret = ifft_f(fprod).real.copy()
return ret, fprod
def estimate_image_shift(ref, image, roi=None, sobel=True,
medfilter=True, hanning=True, plot=False,
dtype='float', normalize_corr=False,
sub_pixel_factor=1,
return_maxval=True):
"""Estimate the shift in a image using phase correlation
This method can only estimate the shift by comparing
bidimensional features that should not change the position
in the given axis. To decrease the memory usage, the time of
computation and the accuracy of the results it is convenient
to select a region of interest by setting the roi keyword.
Parameters
----------
ref : 2D numpy.ndarray
Reference image
image : 2D numpy.ndarray
Image to register
roi : tuple of ints (top, bottom, left, right)
Define the region of interest
sobel : bool
apply a sobel filter for edge enhancement
medfilter : bool
apply a median filter for noise reduction
hanning : bool
Apply a 2d hanning filter
plot : bool or matplotlib.Figure
If True, plots the images after applying the filters and the phase
correlation. If a figure instance, the images will be plotted to the
given figure.
reference : 'current' or 'cascade'
If 'current' (default) the image at the current
coordinates is taken as reference. If 'cascade' each image
is aligned with the previous one.
dtype : str or dtype
Typecode or data-type in which the calculations must be
performed.
normalize_corr : bool
If True use phase correlation instead of standard correlation
sub_pixel_factor : float
Estimate shifts with a sub-pixel accuracy of 1/sub_pixel_factor parts
of a pixel. Default is 1, i.e. no sub-pixel accuracy.
Returns
-------
shifts: np.array
containing the estimate shifts
max_value : float
The maximum value of the correlation
Notes
-----
The statistical analysis approach to the translation estimation
when using reference='stat' roughly follows [*]_ . If you use
it please cite their article.
References
----------
.. [*] Bernhard Schaffer, Werner Grogger and Gerald Kothleitner.
“Automated Spatial Drift Correction for EFTEM Image Series.”
Ultramicroscopy 102, no. 1 (December 2004): 27–36.
"""
ref, image = da.compute(ref, image)
# Make a copy of the images to avoid modifying them
ref = ref.copy().astype(dtype)
image = image.copy().astype(dtype)
if roi is not None:
top, bottom, left, right = roi
else:
top, bottom, left, right = [None, ] * 4
# Select region of interest
ref = ref[top:bottom, left:right]
image = image[top:bottom, left:right]
# Apply filters
for im in (ref, image):
if hanning is True:
im *= hanning2d(*im.shape)
if medfilter is True:
# This is faster than sp.signal.med_filt,
# which was the previous implementation.
# The size is fixed at 3 to be consistent
# with the previous implementation.
im[:] = sp.ndimage.median_filter(im, size=3)
if sobel is True:
im[:] = sobel_filter(im)
# If sub-pixel alignment not being done, use faster real-valued fft
real_only = (sub_pixel_factor == 1)
phase_correlation, image_product = fft_correlation(
ref, image, normalize=normalize_corr, real_only=real_only)
# Estimate the shift by getting the coordinates of the maximum
argmax = np.unravel_index(np.argmax(phase_correlation),
phase_correlation.shape)
threshold = (phase_correlation.shape[0] / 2 - 1,
phase_correlation.shape[1] / 2 - 1)
shift0 = argmax[0] if argmax[0] < threshold[0] else \
argmax[0] - phase_correlation.shape[0]
shift1 = argmax[1] if argmax[1] < threshold[1] else \
argmax[1] - phase_correlation.shape[1]
max_val = phase_correlation.real.max()
shifts = np.array((shift0, shift1))
# The following code is more or less copied from
# skimage.feature.register_feature, to gain access to the maximum value:
if sub_pixel_factor != 1:
# Initial shift estimate in upsampled grid
shifts = np.round(shifts * sub_pixel_factor) / sub_pixel_factor
upsampled_region_size = np.ceil(sub_pixel_factor * 1.5)
# Center of output array at dftshift + 1
dftshift = np.fix(upsampled_region_size / 2.0)
sub_pixel_factor = np.array(sub_pixel_factor, dtype=np.float64)
normalization = (image_product.size * sub_pixel_factor ** 2)
# Matrix multiply DFT around the current shift estimate
sample_region_offset = dftshift - shifts * sub_pixel_factor
correlation = _upsampled_dft(image_product.conj(),
upsampled_region_size,
sub_pixel_factor,
sample_region_offset).conj()
correlation /= normalization
# Locate maximum and map back to original pixel grid
maxima = np.array(np.unravel_index(
np.argmax(np.abs(correlation)),
correlation.shape),
dtype=np.float64)
maxima -= dftshift
shifts = shifts + maxima / sub_pixel_factor
max_val = correlation.real.max()
# Plot on demand
if plot is True or isinstance(plot, plt.Figure):
if isinstance(plot, plt.Figure):
fig = plot
axarr = plot.axes
if len(axarr) < 3:
for i in range(3):
fig.add_subplot(1, 3, i + 1)
axarr = fig.axes
else:
fig, axarr = plt.subplots(1, 3)
full_plot = len(axarr[0].images) == 0
if full_plot:
axarr[0].set_title('Reference')
axarr[1].set_title('Image')
axarr[2].set_title('Phase correlation')
axarr[0].imshow(ref)
axarr[1].imshow(image)
d = (np.array(phase_correlation.shape) - 1) // 2
extent = [-d[1], d[1], -d[0], d[0]]
axarr[2].imshow(np.fft.fftshift(phase_correlation),
extent=extent)
plt.show()
else:
axarr[0].images[0].set_data(ref)
axarr[1].images[0].set_data(image)
axarr[2].images[0].set_data(np.fft.fftshift(phase_correlation))
# TODO: Renormalize images
fig.canvas.draw_idle()
# Liberate the memory. It is specially necessary if it is a
# memory map
del ref
del image
if return_maxval:
return -shifts, max_val
else:
return -shifts
class Signal2D(BaseSignal, CommonSignal2D):
"""
"""
_signal_dimension = 2
_lazy = False
def __init__(self, *args, **kw):
super().__init__(*args, **kw)
if self.axes_manager.signal_dimension != 2:
self.axes_manager.set_signal_dimension(2)
def plot(self,
colorbar=True,
scalebar=True,
scalebar_color="white",
axes_ticks=None,
axes_off=False,
saturated_pixels=None,
vmin=None,
vmax=None,
gamma=1.0,
no_nans=False,
centre_colormap="auto",
min_aspect=0.1,
**kwargs
):
"""%s
%s
%s
"""
super(Signal2D, self).plot(
colorbar=colorbar,
scalebar=scalebar,
scalebar_color=scalebar_color,
axes_ticks=axes_ticks,
axes_off=axes_off,
saturated_pixels=saturated_pixels,
vmin=vmin,
vmax=vmax,
gamma=gamma,
no_nans=no_nans,
centre_colormap=centre_colormap,
min_aspect=min_aspect,
**kwargs
)
plot.__doc__ %= (BASE_PLOT_DOCSTRING, PLOT2D_DOCSTRING, KWARGS_DOCSTRING)
def create_model(self, dictionary=None):
"""Create a model for the current signal
Parameters
----------
dictionary : {None, dict}, optional
A dictionary to be used to recreate a model. Usually generated
using :meth:`hyperspy.model.as_dictionary`
Returns
-------
A Model class
"""
from hyperspy.models.model2d import Model2D
return Model2D(self, dictionary=dictionary)
def estimate_shift2D(self,
reference='current',
correlation_threshold=None,
chunk_size=30,
roi=None,
normalize_corr=False,
sobel=True,
medfilter=True,
hanning=True,
plot=False,
dtype='float',
show_progressbar=None,
sub_pixel_factor=1):
"""Estimate the shifts in an image using phase correlation.
This method can only estimate the shift by comparing
bi-dimensional features that should not change position
between frames. To decrease the memory usage, the time of
computation and the accuracy of the results it is convenient
to select a region of interest by setting the ``roi`` argument.
Parameters
----------
reference : {'current', 'cascade' ,'stat'}
If 'current' (default) the image at the current
coordinates is taken as reference. If 'cascade' each image
is aligned with the previous one. If 'stat' the translation
of every image with all the rest is estimated and by
performing statistical analysis on the result the
translation is estimated.
correlation_threshold : {None, 'auto', float}
This parameter is only relevant when reference='stat'.
If float, the shift estimations with a maximum correlation
value lower than the given value are not used to compute
the estimated shifts. If 'auto' the threshold is calculated
automatically as the minimum maximum correlation value
of the automatically selected reference image.
chunk_size : {None, int}
If int and reference='stat' the number of images used
as reference are limited to the given value.
roi : tuple of ints or floats (left, right, top, bottom)
Define the region of interest. If int(float) the position
is given axis index(value). Note that ROIs can be used
in place of a tuple.
normalize_corr : bool, default False
If True, use phase correlation to align the images, otherwise
use cross correlation.
sobel : bool, default True
Apply a Sobel filter for edge enhancement
medfilter : bool, default True
Apply a median filter for noise reduction
hanning : bool, default True
Apply a 2D hanning filter
plot : bool or 'reuse'
If True plots the images after applying the filters and
the phase correlation. If 'reuse', it will also plot the images,
but it will only use one figure, and continuously update the images
in that figure as it progresses through the stack.
dtype : str or dtype
Typecode or data-type in which the calculations must be
performed.
%s
sub_pixel_factor : float
Estimate shifts with a sub-pixel accuracy of 1/sub_pixel_factor
parts of a pixel. Default is 1, i.e. no sub-pixel accuracy.
Returns
-------
shifts : list of array
List of estimated shifts
Notes
-----
The statistical analysis approach to the translation estimation
when using ``reference='stat'`` roughly follows [Schaffer2004]_.
If you use it please cite their article.
References
----------
.. [Schaffer2004] Schaffer, Bernhard, Werner Grogger, and Gerald Kothleitner.
“Automated Spatial Drift Correction for EFTEM Image Series.”
Ultramicroscopy 102, no. 1 (December 2004): 27–36.
See Also
--------
* :py:meth:`~._signals.signal2d.Signal2D.align2D`
"""
if show_progressbar is None:
show_progressbar = preferences.General.show_progressbar
self._check_signal_dimension_equals_two()
if roi is not None:
# Get the indices of the roi
yaxis = self.axes_manager.signal_axes[1]
xaxis = self.axes_manager.signal_axes[0]
roi = tuple([xaxis._get_index(i) for i in roi[2:]] +
[yaxis._get_index(i) for i in roi[:2]])
ref = None if reference == 'cascade' else \
self.__call__().copy()
shifts = []
nrows = None
images_number = self.axes_manager._max_index + 1
if plot == 'reuse':
# Reuse figure for plots
plot = plt.figure()
if reference == 'stat':
nrows = images_number if chunk_size is None else \
min(images_number, chunk_size)
pcarray = ma.zeros((nrows, self.axes_manager._max_index + 1,
),
dtype=np.dtype([('max_value', np.float),
('shift', np.int32,
(2,))]))
nshift, max_value = estimate_image_shift(
self(),
self(),
roi=roi,
sobel=sobel,
medfilter=medfilter,
hanning=hanning,
normalize_corr=normalize_corr,
plot=plot,
dtype=dtype,
sub_pixel_factor=sub_pixel_factor)
np.fill_diagonal(pcarray['max_value'], max_value)
pbar_max = nrows * images_number
else:
pbar_max = images_number
# Main iteration loop. Fills the rows of pcarray when reference
# is stat
with progressbar(total=pbar_max,
disable=not show_progressbar,
leave=True) as pbar:
for i1, im in enumerate(self._iterate_signal()):
if reference in ['current', 'cascade']:
if ref is None:
ref = im.copy()
shift = np.array([0, 0])
nshift, max_val = estimate_image_shift(
ref, im, roi=roi, sobel=sobel, medfilter=medfilter,
hanning=hanning, plot=plot,
normalize_corr=normalize_corr, dtype=dtype,
sub_pixel_factor=sub_pixel_factor)
if reference == 'cascade':
shift += nshift
ref = im.copy()
else:
shift = nshift
shifts.append(shift.copy())
pbar.update(1)
elif reference == 'stat':
if i1 == nrows:
break
# Iterate to fill the columns of pcarray
for i2, im2 in enumerate(
self._iterate_signal()):
if i2 > i1:
nshift, max_value = estimate_image_shift(
im,
im2,
roi=roi,
sobel=sobel,
medfilter=medfilter,
hanning=hanning,
normalize_corr=normalize_corr,
plot=plot,
dtype=dtype,
sub_pixel_factor=sub_pixel_factor)
pcarray[i1, i2] = max_value, nshift
del im2
pbar.update(1)
del im
if reference == 'stat':
# Select the reference image as the one that has the
# higher max_value in the row
sqpcarr = pcarray[:, :nrows]
sqpcarr['max_value'][:] = symmetrize(sqpcarr['max_value'])
sqpcarr['shift'][:] = antisymmetrize(sqpcarr['shift'])
ref_index = np.argmax(pcarray['max_value'].min(1))
self.ref_index = ref_index
shifts = (pcarray['shift'] +
pcarray['shift'][ref_index, :nrows][:, np.newaxis])
if correlation_threshold is not None:
if correlation_threshold == 'auto':
correlation_threshold = \
(pcarray['max_value'].min(0)).max()
_logger.info("Correlation threshold = %1.2f",
correlation_threshold)
shifts[pcarray['max_value'] <
correlation_threshold] = ma.masked
shifts.mask[ref_index, :] = False
shifts = shifts.mean(0)
else:
shifts = np.array(shifts)
del ref
return shifts
estimate_shift2D.__doc__ %= SHOW_PROGRESSBAR_ARG
def align2D(
self,
crop=True,
fill_value=np.nan,
shifts=None,
expand=False,
interpolation_order=1,
show_progressbar=None,
parallel=None,
max_workers=None,
**kwargs,
):
"""Align the images in-place using :py:func:`scipy.ndimage.shift`.
The images can be aligned using either user-provided shifts or
by first estimating the shifts.
See :py:meth:`~._signals.signal2d.Signal2D.estimate_shift2D`
for more details on estimating image shifts.
Parameters
----------
crop : bool
If True, the data will be cropped not to include regions
with missing data
fill_value : int, float, nan
The areas with missing data are filled with the given value.
Default is nan.
shifts : None or list of tuples
If None the shifts are estimated using
:py:meth:`~._signals.signal2D.estimate_shift2D`.
expand : bool
If True, the data will be expanded to fit all data after alignment.
Overrides `crop`.
interpolation_order: int, default 1.
The order of the spline interpolation. Default is 1, linear
interpolation.
%s
%s
%s
**kwargs :
Keyword arguments passed to :py:meth:`~._signals.signal2d.Signal2D.estimate_shift2D`
Returns
-------
shifts : np.array
The estimated shifts are returned only if ``shifts`` is None
See Also
--------
* :py:meth:`~._signals.signal2d.Signal2D.estimate_shift2D`
"""
self._check_signal_dimension_equals_two()
return_shifts = False
if shifts is None:
shifts = self.estimate_shift2D(**kwargs)
return_shifts = True
if not np.any(shifts):
warnings.warn(
"The estimated shifts are all zero, suggesting "
"the images are already aligned",
UserWarning,
)
return shifts
elif not np.any(shifts):
warnings.warn(
"The provided shifts are all zero, no alignment done",
UserWarning,
)
return None
if expand:
# Expand to fit all valid data
left, right = (
int(np.floor(shifts[:, 1].min())) if shifts[:, 1].min() < 0 else 0,
int(np.ceil(shifts[:, 1].max())) if shifts[:, 1].max() > 0 else 0,
)
top, bottom = (
int(np.floor(shifts[:, 0].min())) if shifts[:, 0].min() < 0 else 0,
int(np.ceil(shifts[:, 0].max())) if shifts[:, 0].max() > 0 else 0,
)
xaxis = self.axes_manager.signal_axes[0]
yaxis = self.axes_manager.signal_axes[1]
padding = []
for i in range(self.data.ndim):
if i == xaxis.index_in_array:
padding.append((right, -left))
elif i == yaxis.index_in_array:
padding.append((bottom, -top))
else:
padding.append((0, 0))
self.data = np.pad(
self.data, padding, mode="constant", constant_values=(fill_value,)
)
if left < 0:
xaxis.offset += left * xaxis.scale
if np.any((left < 0, right > 0)):
xaxis.size += right - left
if top < 0:
yaxis.offset += top * yaxis.scale
if np.any((top < 0, bottom > 0)):
yaxis.size += bottom - top
# Translate, with sub-pixel precision if necesary,
# note that we operate in-place here
self._map_iterate(
shift_image,
iterating_kwargs=(("shift", -shifts),),
show_progressbar=show_progressbar,
parallel=parallel,
max_workers=max_workers,
ragged=False,
inplace=True,
fill_value=fill_value,
interpolation_order=interpolation_order,
)
if crop and not expand:
max_shift = np.max(shifts, axis=0) - np.min(shifts, axis=0)
if np.any(max_shift >= np.array(self.axes_manager.signal_shape)):
raise ValueError("Shift outside range of signal axes. Cannot crop signal.")
# Crop the image to the valid size
shifts = -shifts
bottom, top = (
int(np.floor(shifts[:, 0].min())) if shifts[:, 0].min() < 0 else None,
int(np.ceil(shifts[:, 0].max())) if shifts[:, 0].max() > 0 else 0,
)
right, left = (
int(np.floor(shifts[:, 1].min())) if shifts[:, 1].min() < 0 else None,
int(np.ceil(shifts[:, 1].max())) if shifts[:, 1].max() > 0 else 0,
)
self.crop_image(top, bottom, left, right)
shifts = -shifts
self.events.data_changed.trigger(obj=self)
if return_shifts:
return shifts
align2D.__doc__ %= (SHOW_PROGRESSBAR_ARG, PARALLEL_ARG, MAX_WORKERS_ARG)
def crop_image(self, top=None, bottom=None,
left=None, right=None, convert_units=False):
"""Crops an image in place.
Parameters
----------
top, bottom, left, right : {int | float}
If int the values are taken as indices. If float the values are
converted to indices.
convert_units : bool
Default is False
If True, convert the signal units using the 'convert_to_units'
method of the `axes_manager`. If False, does nothing.
See also
--------
crop
"""
self._check_signal_dimension_equals_two()
self.crop(self.axes_manager.signal_axes[1].index_in_axes_manager,
top,
bottom)
self.crop(self.axes_manager.signal_axes[0].index_in_axes_manager,
left,
right)
if convert_units:
self.axes_manager.convert_units('signal')
def add_ramp(self, ramp_x, ramp_y, offset=0):
"""Add a linear ramp to the signal.
Parameters
----------
ramp_x: float
Slope of the ramp in x-direction.
ramp_y: float
Slope of the ramp in y-direction.
offset: float, optional
Offset of the ramp at the signal fulcrum.
Notes
-----
The fulcrum of the linear ramp is at the origin and the slopes are
given in units of the axis with the according scale taken into
account. Both are available via the `axes_manager` of the signal.
"""
yy, xx = np.indices(self.axes_manager._signal_shape_in_array)
if self._lazy:
import dask.array as da
ramp = offset * da.ones(self.data.shape, dtype=self.data.dtype,
chunks=self.data.chunks)
else:
ramp = offset * np.ones(self.data.shape, dtype=self.data.dtype)
ramp += ramp_x * xx
ramp += ramp_y * yy
self.data += ramp
def find_peaks(self, method='local_max', interactive=True,
current_index=False, show_progressbar=None,
parallel=None, max_workers=None, display=True, toolkit=None,
**kwargs):
"""Find peaks in a 2D signal.
Function to locate the positive peaks in an image using various, user
specified, methods. Returns a structured array containing the peak
positions.
Parameters
----------
method : str
Select peak finding algorithm to implement. Available methods
are:
* 'local_max' - simple local maximum search using the
:py:func:`skimage.feature.peak_local_max` function
* 'max' - simple local maximum search using the
:py:func:`~hyperspy.utils.peakfinders2D.find_peaks_max`.
* 'minmax' - finds peaks by comparing maximum filter results
with minimum filter, calculates centers of mass. See the
:py:func:`~hyperspy.utils.peakfinders2D.find_peaks_minmax`
function.
* 'zaefferer' - based on gradient thresholding and refinement
by local region of interest optimisation. See the
:py:func:`~hyperspy.utils.peakfinders2D.find_peaks_zaefferer`
function.
* 'stat' - based on statistical refinement and difference with
respect to mean intensity. See the
:py:func:`~hyperspy.utils.peakfinders2D.find_peaks_stat`
function.
* 'laplacian_of_gaussian' - a blob finder using the laplacian of
Gaussian matrices approach. See the
:py:func:`~hyperspy.utils.peakfinders2D.find_peaks_log`
function.
* 'difference_of_gaussian' - a blob finder using the difference
of Gaussian matrices approach. See the
:py:func:`~hyperspy.utils.peakfinders2D.find_peaks_log`
function.
* 'template_matching' - A cross correlation peakfinder. This
method requires providing a template with the ``template``
parameter, which is used as reference pattern to perform the
template matching to the signal. It uses the
:py:func:`skimage.feature.match_template` function and the peaks
position are obtained by using `minmax` method on the
template matching result.
interactive : bool
If True, the method parameter can be adjusted interactively.
If False, the results will be returned.
current_index : bool
if True, the computation will be performed for the current index.
%s
%s
%s
%s
%s
**kwargs : dict
Keywords parameters associated with above methods, see the
documentation of each method for more details.
Notes
-----
As a convenience, the 'local_max' method accepts the 'distance' and
'threshold' argument, which will be map to the 'min_distance' and
'threshold_abs' of the :py:func:`skimage.feature.peak_local_max`
function.
Returns
-------
peaks : :py:class:`~hyperspy.signal.BaseSignal` or numpy.ndarray if current_index=True
Array of shape `_navigation_shape_in_array` in which each cell
contains an array with dimensions (npeaks, 2) that contains
the `x, y` pixel coordinates of peaks found in each image sorted
first along `y` and then along `x`.
"""
method_dict = {
'local_max': find_local_max,
'max': find_peaks_max,
'minmax': find_peaks_minmax,
'zaefferer': find_peaks_zaefferer,
'stat': find_peaks_stat,
'laplacian_of_gaussian': find_peaks_log,
'difference_of_gaussian': find_peaks_dog,
'template_matching' : find_peaks_xc,
}
# As a convenience, we map 'distance' to 'min_distance' and
# 'threshold' to 'threshold_abs' when using the 'local_max' method to
# match with the arguments of skimage.feature.peak_local_max.
if method == 'local_max':
if 'distance' in kwargs.keys():
kwargs['min_distance'] = kwargs.pop('distance')
if 'threshold' in kwargs.keys():
kwargs['threshold_abs'] = kwargs.pop('threshold')
if method in method_dict.keys():
method_func = method_dict[method]
else:
raise NotImplementedError(f"The method `{method}` is not "
"implemented. See documentation for "
"available implementations.")
if interactive:
# Create a peaks signal with the same navigation shape as a
# placeholder for the output
axes_dict = self.axes_manager._get_axes_dicts(
self.axes_manager.navigation_axes)
peaks = BaseSignal(np.empty(self.axes_manager.navigation_shape),
axes=axes_dict)
pf2D = PeaksFinder2D(self, method=method, peaks=peaks, **kwargs)
pf2D.gui(display=display, toolkit=toolkit)
elif current_index:
peaks = method_func(self.__call__(), **kwargs)
else:
peaks = self.map(method_func, show_progressbar=show_progressbar,
parallel=parallel, inplace=False, ragged=True,
max_workers=max_workers, **kwargs)
return peaks
find_peaks.__doc__ %= (SHOW_PROGRESSBAR_ARG, PARALLEL_ARG, MAX_WORKERS_ARG,
DISPLAY_DT, TOOLKIT_DT)
class LazySignal2D(LazySignal, Signal2D):
_lazy = True
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
| gpl-3.0 |
telefar/stockEye | coursera-compinvest1-master/coursera-compinvest1-master/homework/Homework4.py | 1 | 3966 | '''
(c) 2011, 2012 Georgia Tech Research Corporation
This source code is released under the New BSD license. Please see
http://wiki.quantsoftware.org/index.php?title=QSTK_License
for license details.
'''
import pandas as pd
import numpy as np
import math
import copy
import QSTK.qstkutil.qsdateutil as du
import datetime as dt
import QSTK.qstkutil.DataAccess as da
import QSTK.qstkutil.tsutil as tsu
import csv
import QSTK.qstkstudy.EventProfiler as ep
"""
Accepts a list of symbols along with start and end date
Returns the Event Matrix which is a pandas Datamatrix
Event matrix has the following structure :
|IBM |GOOG|XOM |MSFT| GS | JP |
(d1)|nan |nan | 1 |nan |nan | 1 |
(d2)|nan | 1 |nan |nan |nan |nan |
(d3)| 1 |nan | 1 |nan | 1 |nan |
(d4)|nan | 1 |nan | 1 |nan |nan |
...................................
...................................
Also, d1 = start date
nan = no information about any event.
1 = status bit(positively confirms the event occurence)
"""
def find_events(ls_symbols, d_data):
''' Finding the event dataframe '''
df_close = d_data['actual_close']
ts_market = df_close['SPY']
print "Finding Events"
# Creating an empty dataframe
df_events = copy.deepcopy(df_close)
df_events = df_events * np.NAN
# Time stamps for the event range
ldt_timestamps = df_close.index
writer = csv.writer(open('orders.csv', 'wb'), delimiter=',')
for s_sym in ls_symbols:
for i in range(1, len(ldt_timestamps)):
# Calculating the returns for this timestamp
f_symprice_today = df_close[s_sym].ix[ldt_timestamps[i]]
f_symprice_yest = df_close[s_sym].ix[ldt_timestamps[i - 1]]
f_marketprice_today = ts_market.ix[ldt_timestamps[i]]
f_marketprice_yest = ts_market.ix[ldt_timestamps[i - 1]]
f_symreturn_today = (f_symprice_today / f_symprice_yest) - 1
f_marketreturn_today = (f_marketprice_today / f_marketprice_yest) - 1
i_shares = 100
# Event is found if the symbol is down more then 3% while the
# market is up more then 2%
# if f_symreturn_today <= -0.03 and f_marketreturn_today >= 0.02:
# df_events[s_sym].ix[ldt_timestamps[i]] = 1
f_cutoff = 10.0
if f_symprice_today < f_cutoff and f_symprice_yest >= f_cutoff:
df_events[s_sym].ix[ldt_timestamps[i]] = 1
row_to_enter = [str(ldt_timestamps[i].year), str(ldt_timestamps[i].month), \
str(ldt_timestamps[i].day), s_sym, 'Buy', i_shares]
writer.writerow(row_to_enter)
try:
time_n = ldt_timestamps[i + 5]
except:
time_n = ldt_timestamps[-1]
row_to_enter = [str(time_n.year), str(time_n.month), \
str(time_n.day), s_sym, 'Sell', i_shares]
writer.writerow(row_to_enter)
return df_events
if __name__ == '__main__':
dt_start = dt.datetime(2008, 1, 1)
dt_end = dt.datetime(2009, 12, 31)
ldt_timestamps = du.getNYSEdays(dt_start, dt_end, dt.timedelta(hours=16))
dataobj = da.DataAccess('Yahoo')
ls_symbols = dataobj.get_symbols_from_list('sp5002012')
ls_symbols.append('SPY')
ls_keys = ['open', 'high', 'low', 'close', 'volume', 'actual_close']
ldf_data = dataobj.get_data(ldt_timestamps, ls_symbols, ls_keys)
d_data = dict(zip(ls_keys, ldf_data))
for s_key in ls_keys:
d_data[s_key] = d_data[s_key].fillna(method = 'ffill')
d_data[s_key] = d_data[s_key].fillna(method = 'bfill')
d_data[s_key] = d_data[s_key].fillna(1.0)
df_events = find_events(ls_symbols, d_data)
# print "Creating Study"
# ep.eventprofiler(df_events, d_data, i_lookback=20, i_lookforward=20,
# s_filename='MyEventStudy.pdf', b_market_neutral=True, b_errorbars=True,
# s_market_sym='SPY')
| bsd-3-clause |
gpfreitas/bokeh | bokeh/_legacy_charts/builder/scatter_builder.py | 43 | 7792 | """This is the Bokeh charts interface. It gives you a high level API
to build complex plot is a simple way.
This is the Scatter class which lets you build your Scatter charts
just passing the arguments to the Chart class and calling the proper
functions.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
import numpy as np
try:
import pandas as pd
except:
pd = None
from collections import OrderedDict
from ..utils import chunk, cycle_colors, make_scatter
from .._builder import create_and_build, Builder
from .._data_adapter import DataAdapter
from ...models import ColumnDataSource, Range1d
from ...properties import String
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
def Scatter(values, **kws):
""" Create a scatter chart using :class:`ScatterBuilder <bokeh.charts.builder.scatter_builder.ScatterBuilder>`
to render the geometry from values.
Args:
values (iterable): iterable 2d representing the data series
values matrix.
In addition the the parameters specific to this chart,
:ref:`userguide_charts_generic_arguments` are also accepted as keyword parameters.
Returns:
a new :class:`Chart <bokeh.charts.Chart>`
Examples:
.. bokeh-plot::
:source-position: above
from collections import OrderedDict
from bokeh.charts import Scatter, output_file, show
# (dict, OrderedDict, lists, arrays and DataFrames of (x, y) tuples are valid inputs)
xyvalues = OrderedDict()
xyvalues['python'] = [(1, 2), (3, 3), (4, 7), (5, 5), (8, 26)]
xyvalues['pypy'] = [(1, 12), (2, 23), (4, 47), (5, 15), (8, 46)]
xyvalues['jython'] = [(1, 22), (2, 43), (4, 10), (6, 25), (8, 26)]
scatter = Scatter(xyvalues, title="Scatter", legend="top_left", ylabel='Languages')
output_file('scatter.html')
show(scatter)
"""
return create_and_build(ScatterBuilder, values, **kws)
class ScatterBuilder(Builder):
"""This is the Scatter class and it is in charge of plotting
Scatter charts in an easy and intuitive way.
Essentially, we provide a way to ingest the data, make the proper
calculations and push the references into a source object.
We additionally make calculations for the ranges. And finally add
the needed glyphs (markers) taking the references from the source.
"""
# TODO: (bev) should be an enumeration
marker = String("circle", help="""
The marker type to use (default: ``circle``).
""")
def _process_data(self):
"""Take the scatter.values data to calculate the chart properties
accordingly. Then build a dict containing references to all the
calculated points to be used by the marker glyph inside the
``_yield_renderers`` method.
"""
self._data = dict()
# list to save all the attributes we are going to create
self._attr = []
# list to save all the groups available in the incoming input
self._groups.extend(self._values.keys())
# Grouping
self.parse_data()
@property
def parse_data(self):
"""Parse data received from self._values and create correct x, y
series values checking if input is a pandas DataFrameGroupBy
object or one of the stardard supported types (that can be
converted to a DataAdapter)
"""
if pd is not None and \
isinstance(self._values, pd.core.groupby.DataFrameGroupBy):
return self._parse_groupped_data
else:
return self._parse_data
def _parse_groupped_data(self):
"""Parse data in self._values in case it's a pandas
DataFrameGroupBy and create the data 'x_...' and 'y_...' values
for all data series
"""
for i, val in enumerate(self._values.keys()):
xy = self._values[val]
self._set_and_get("x_", val, xy[:, 0])
self._set_and_get("y_", val, xy[:, 1])
def _parse_data(self):
"""Parse data in self._values in case it's an iterable (not a pandas
DataFrameGroupBy) and create the data 'x_...' and 'y_...' values
for all data series
"""
for i, val in enumerate(self._values.keys()):
x_, y_ = [], []
xy = self._values[val]
for value in self._values.index:
x_.append(xy[value][0])
y_.append(xy[value][1])
self.set_and_get("x_", val, x_)
self.set_and_get("y_", val, y_)
def _set_sources(self):
"""Push the Scatter data into the ColumnDataSource and
calculate the proper ranges."""
self._source = ColumnDataSource(self._data)
x_names, y_names = self._attr[::2], self._attr[1::2]
endx = max(max(self._data[i]) for i in x_names)
startx = min(min(self._data[i]) for i in x_names)
self.x_range = Range1d(
start=startx - 0.1 * (endx - startx),
end=endx + 0.1 * (endx - startx)
)
endy = max(max(self._data[i]) for i in y_names)
starty = min(min(self._data[i]) for i in y_names)
self.y_range = Range1d(
start=starty - 0.1 * (endy - starty),
end=endy + 0.1 * (endy - starty)
)
def _yield_renderers(self):
"""Use the marker glyphs to display the points.
Takes reference points from data loaded at the ColumnDataSource.
"""
duplets = list(chunk(self._attr, 2))
colors = cycle_colors(duplets, self.palette)
for i, duplet in enumerate(duplets, start=1):
renderer = make_scatter(
self._source, duplet[0], duplet[1], self.marker, colors[i - 1]
)
self._legends.append((self._groups[i-1], [renderer]))
yield renderer
def _adapt_values(self):
"""Prepare context before main show method is invoked.
Customize show preliminary actions by handling DataFrameGroupBy
values in order to create the series values and labels."""
# check if pandas is installed
if pd:
# if it is we try to take advantage of it's data structures
# asumming we get an groupby object
if isinstance(self._values, pd.core.groupby.DataFrameGroupBy):
pdict = OrderedDict()
for i in self._values.groups.keys():
self._labels = self._values.get_group(i).columns
xname = self._values.get_group(i).columns[0]
yname = self._values.get_group(i).columns[1]
x = getattr(self._values.get_group(i), xname)
y = getattr(self._values.get_group(i), yname)
pdict[i] = np.array([x.values, y.values]).T
self._values = DataAdapter(pdict)
self._labels = self._values.keys()
else:
self._values = DataAdapter(self._values)
self._labels = self._values.keys()
else:
self._values = DataAdapter(self._values)
self._labels = self._values.keys()
| bsd-3-clause |
microsoft/EconML | econml/tests/test_shap.py | 1 | 10941 | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import numpy as np
import unittest
import shap
from shap.plots import scatter, heatmap, bar, beeswarm, waterfall, force
from econml.dml import *
from econml.orf import *
from econml.dr import *
from econml.metalearners import *
from sklearn.linear_model import LinearRegression, LogisticRegression, Lasso
from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier
from sklearn.preprocessing import PolynomialFeatures
class TestShap(unittest.TestCase):
def test_continuous_t(self):
n = 100
d_x = 3
d_w = 2
X = np.random.normal(size=(n, d_x))
W = np.random.normal(size=(n, d_w))
for d_t in [2, 1]:
for d_y in [3, 1]:
Y = np.random.normal(size=(n, d_y))
Y = Y.flatten() if d_y == 1 else Y
T = np.random.normal(size=(n, d_t))
T = T.flatten() if d_t == 1 else T
for featurizer in [None, PolynomialFeatures(degree=2, include_bias=False)]:
est_list = [LinearDML(model_y=LinearRegression(),
model_t=LinearRegression(), featurizer=featurizer),
CausalForestDML(model_y=LinearRegression(), model_t=LinearRegression())]
if d_t == 1:
est_list += [
NonParamDML(model_y=LinearRegression(
), model_t=LinearRegression(), model_final=RandomForestRegressor(), featurizer=featurizer),
]
for est in est_list:
with self.subTest(est=est, featurizer=featurizer, d_y=d_y, d_t=d_t):
est.fit(Y, T, X, W)
shap_values = est.shap_values(X[:10], feature_names=["a", "b", "c"],
background_samples=None)
for i, output in enumerate(est.cate_output_names()):
for j, treat in enumerate(est.cate_treatment_names()):
# test base values equals to mean of constant marginal effect
if not isinstance(est, (CausalForestDML, DMLOrthoForest)):
mean_cate = est.const_marginal_effect(X[:10]).mean(axis=0)
mean_cate = np.array(mean_cate).reshape((d_y, d_t))[i, j]
self.assertAlmostEqual(
shap_values[output][treat].base_values[0], mean_cate, delta=1e-2)
# test shape of shap values output is as expected
self.assertEqual(len(shap_values[output]), d_t)
self.assertEqual(len(shap_values), d_y)
# test shape of attribute of explanation object is as expected
self.assertEqual(shap_values[output][treat].values.shape[0], 10)
self.assertEqual(shap_values[output][treat].data.shape[0], 10)
self.assertEqual(shap_values[output][treat].base_values.shape, (10,))
# test length of feature names equals to shap values shape
self.assertEqual(
len(shap_values[output][treat].feature_names),
shap_values[output][treat].values.shape[1])
def test_discrete_t(self):
n = 100
d_x = 3
d_w = 2
X = np.random.normal(size=(n, d_x))
W = np.random.normal(size=(n, d_w))
for d_t in [3, 2]:
for d_y in [3, 1]:
Y = np.random.normal(size=(n, d_y))
Y = Y.flatten() if d_y == 1 else Y
T = np.random.choice(range(d_t), size=(n,))
for featurizer in [None, PolynomialFeatures(degree=2, include_bias=False)]:
est_list = [LinearDML(featurizer=featurizer, discrete_treatment=True),
TLearner(models=RandomForestRegressor()),
SLearner(overall_model=RandomForestRegressor()),
XLearner(models=RandomForestRegressor()),
DomainAdaptationLearner(models=RandomForestRegressor(),
final_models=RandomForestRegressor()),
CausalForestDML(model_y=LinearRegression(), model_t=LogisticRegression(),
discrete_treatment=True)
]
if d_t == 2:
est_list += [
NonParamDML(model_y=LinearRegression(
), model_t=LogisticRegression(), model_final=RandomForestRegressor(),
featurizer=featurizer, discrete_treatment=True)]
if d_y == 1:
est_list += [DRLearner(multitask_model_final=True, featurizer=featurizer),
DRLearner(multitask_model_final=False, featurizer=featurizer),
ForestDRLearner()]
for est in est_list:
with self.subTest(est=est, featurizer=featurizer, d_y=d_y, d_t=d_t):
if isinstance(est, (TLearner, SLearner, XLearner, DomainAdaptationLearner)):
est.fit(Y, T, X)
else:
est.fit(Y, T, X, W)
shap_values = est.shap_values(X[:10], feature_names=["a", "b", "c"],
background_samples=None)
for i, output in enumerate(est.cate_output_names()):
for j, treat in enumerate(est.cate_treatment_names()):
# test base values equals to mean of constant marginal effect
if not isinstance(est, (CausalForestDML, ForestDRLearner, DROrthoForest)):
mean_cate = est.const_marginal_effect(X[:10]).mean(axis=0)
mean_cate = np.array(mean_cate).reshape((d_y, d_t - 1))[i, j]
self.assertAlmostEqual(
shap_values[output][treat].base_values[0], mean_cate, delta=1e-2)
# test shape of shap values output is as expected
self.assertEqual(len(shap_values[output]), d_t - 1)
self.assertEqual(len(shap_values), d_y)
# test shape of attribute of explanation object is as expected
self.assertEqual(shap_values[output][treat].values.shape[0], 10)
self.assertEqual(shap_values[output][treat].data.shape[0], 10)
self.assertEqual(shap_values[output][treat].base_values.shape, (10,))
# test length of feature names equals to shap values shape
self.assertEqual(
len(shap_values[output][treat].feature_names),
shap_values[output][treat].values.shape[1])
def test_identical_output(self):
# Treatment effect function
def exp_te(x):
return np.exp(2 * x[0])
n = 500
n_w = 10
support_size = 5
n_x = 2
# Outcome support
support_Y = np.random.choice(range(n_w), size=support_size, replace=False)
coefs_Y = np.random.uniform(0, 1, size=(support_size,))
def epsilon_sample(n):
return np.random.uniform(-1, 1, size=(n,))
# Treatment support
support_T = support_Y
coefs_T = np.random.uniform(0, 1, size=support_size)
def eta_sample(n):
return np.random.uniform(-1, 1, size=n)
# Generate controls, covariates, treatments and outcomes
W = np.random.normal(0, 1, size=(n, n_w))
X = np.random.uniform(0, 1, size=(n, n_x))
# Heterogeneous treatment effects
TE = np.array([np.exp(2 * x_i[0]) for x_i in X]).flatten()
T = np.dot(W[:, support_T], coefs_T) + eta_sample(n)
Y = (TE * T) + np.dot(W[:, support_Y], coefs_Y) + epsilon_sample(n)
Y = np.tile(Y.reshape(-1, 1), (1, 2))
est = LinearDML(model_y=Lasso(),
model_t=Lasso(),
random_state=123,
fit_cate_intercept=True,
featurizer=PolynomialFeatures(degree=2, include_bias=False))
est.fit(Y, T, X=X, W=W)
shap_values1 = est.shap_values(X[:10], feature_names=["A", "B"], treatment_names=["orange"],
background_samples=None)
est = LinearDML(model_y=Lasso(),
model_t=Lasso(),
random_state=123,
fit_cate_intercept=True,
featurizer=PolynomialFeatures(degree=2, include_bias=False))
est.fit(Y[:, 0], T, X=X, W=W)
shap_values2 = est.shap_values(X[:10], feature_names=["A", "B"], treatment_names=["orange"],
background_samples=None)
np.testing.assert_allclose(shap_values1["Y0"]["orange"].data,
shap_values2["Y0"]["orange"].data)
np.testing.assert_allclose(shap_values1["Y0"]["orange"].values,
shap_values2["Y0"]["orange"].values)
# TODO There is a matrix dimension mismatch between multiple outcome and single outcome, should solve that
# through shap package.
np.testing.assert_allclose(shap_values1["Y0"]["orange"].main_effects,
shap_values2["Y0"]["orange"].main_effects)
np.testing.assert_allclose(shap_values1["Y0"]["orange"].base_values,
shap_values2["Y0"]["orange"].base_values)
# test shap could generate the plot from the shap_values
heatmap(shap_values1["Y0"]["orange"], show=False)
waterfall(shap_values1["Y0"]["orange"][6], show=False)
scatter(shap_values1["Y0"]["orange"][:, "A"], show=False)
bar(shap_values1["Y0"]["orange"], show=False)
beeswarm(shap_values1["Y0"]["orange"], show=False)
| mit |
tarthy6/dozer-thesis | examples/old/concrete/uniax-post.py | 5 | 1677 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# demonstration of the woo.post2d module (see its documentation for details)
#
from woo import post2d
import pylab # the matlab-like interface of matplotlib
loadFile='/tmp/uniax-tension.woo.gz'
if not os.path.exists(loadFile): raise RuntimeError("Run uniax.py first so that %s is created"%loadFile)
O.load(loadFile)
# flattener that project to the xz plane
flattener=post2d.AxisFlatten(useRef=False,axis=1)
# return scalar given a Body instance
extractDmg=lambda b: b.state.normDmg
# will call flattener.planar implicitly
# the same as: extractVelocity=lambda b: flattener.planar(b,b.state['vel'])
extractVelocity=lambda b: b.state.vel
# create new figure
pylab.figure()
# plot raw damage
post2d.plot(post2d.data(extractDmg,flattener))
pylab.suptitle('damage')
# plot smooth damage into new figure
pylab.figure(); ax,map=post2d.plot(post2d.data(extractDmg,flattener,stDev=2e-3))
pylab.suptitle('smooth damage')
# show color scale
pylab.colorbar(map,orientation='horizontal')
# shear stress
pylab.figure()
post2d.plot(post2d.data(lambda b: b.state.sigma,flattener))
pylab.suptitle('sigma')
pylab.figure()
post2d.plot(post2d.data(lambda b: b.state.tau,flattener,stDev=2e-3))
pylab.suptitle('smooth tau (in grid)')
# raw velocity (vector field) plot
pylab.figure(); post2d.plot(post2d.data(extractVelocity,flattener))
pylab.suptitle('velocity')
# smooth velocity plot; data are sampled at regular grid
pylab.figure(); ax,map=post2d.plot(post2d.data(extractVelocity,flattener,stDev=1e-3))
pylab.suptitle('smooth velocity')
# save last (current) figure to file
pylab.gcf().savefig('/tmp/foo.png')
# show the figures
pylab.show()
| gpl-2.0 |
adexin/Python-Machine-Learning-Samples | Other_samples/Regularization/reg_utils.py | 1 | 10602 | import numpy as np
import matplotlib.pyplot as plt
import h5py
import sklearn
import sklearn.datasets
import sklearn.linear_model
import scipy.io
def sigmoid(x):
"""
Compute the sigmoid of x
Arguments:
x -- A scalar or numpy array of any size.
Return:
s -- sigmoid(x)
"""
s = 1 / (1 + np.exp(-x))
return s
def relu(x):
"""
Compute the relu of x
Arguments:
x -- A scalar or numpy array of any size.
Return:
s -- relu(x)
"""
s = np.maximum(0, x)
return s
def load_planar_dataset(seed):
np.random.seed(seed)
m = 400 # number of examples
N = int(m / 2) # number of points per class
D = 2 # dimensionality
X = np.zeros((m, D)) # data matrix where each row is a single example
Y = np.zeros((m, 1), dtype='uint8') # labels vector (0 for red, 1 for blue)
a = 4 # maximum ray of the flower
for j in range(2):
ix = range(N * j, N * (j + 1))
t = np.linspace(j * 3.12, (j + 1) * 3.12, N) + np.random.randn(N) * 0.2 # theta
r = a * np.sin(4 * t) + np.random.randn(N) * 0.2 # radius
X[ix] = np.c_[r * np.sin(t), r * np.cos(t)]
Y[ix] = j
X = X.T
Y = Y.T
return X, Y
def initialize_parameters(layer_dims):
"""
Arguments:
layer_dims -- python array (list) containing the dimensions of each layer in our network
Returns:
parameters -- python dictionary containing your parameters "W1", "b1", ..., "WL", "bL":
W1 -- weight matrix of shape (layer_dims[l], layer_dims[l-1])
b1 -- bias vector of shape (layer_dims[l], 1)
Wl -- weight matrix of shape (layer_dims[l-1], layer_dims[l])
bl -- bias vector of shape (1, layer_dims[l])
Tips:
- For example: the layer_dims for the "Planar Data classification model" would have been [2,2,1].
This means W1's shape was (2,2), b1 was (1,2), W2 was (2,1) and b2 was (1,1). Now you have to generalize it!
- In the for loop, use parameters['W' + str(l)] to access Wl, where l is the iterative integer.
"""
np.random.seed(3)
parameters = {}
L = len(layer_dims) # number of layers in the network
for l in range(1, L):
parameters['W' + str(l)] = np.random.randn(layer_dims[l], layer_dims[l - 1]) / np.sqrt(layer_dims[l - 1])
parameters['b' + str(l)] = np.zeros((layer_dims[l], 1))
assert (parameters['W' + str(l)].shape == layer_dims[l], layer_dims[l - 1])
assert (parameters['W' + str(l)].shape == layer_dims[l], 1)
return parameters
def forward_propagation(X, parameters):
"""
Implements the forward propagation (and computes the loss) presented in Figure 2.
Arguments:
X -- input dataset, of shape (input size, number of examples)
parameters -- python dictionary containing your parameters "W1", "b1", "W2", "b2", "W3", "b3":
W1 -- weight matrix of shape ()
b1 -- bias vector of shape ()
W2 -- weight matrix of shape ()
b2 -- bias vector of shape ()
W3 -- weight matrix of shape ()
b3 -- bias vector of shape ()
Returns:
loss -- the loss function (vanilla logistic loss)
"""
# retrieve parameters
W1 = parameters["W1"]
b1 = parameters["b1"]
W2 = parameters["W2"]
b2 = parameters["b2"]
W3 = parameters["W3"]
b3 = parameters["b3"]
# LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID
Z1 = np.dot(W1, X) + b1
A1 = relu(Z1)
Z2 = np.dot(W2, A1) + b2
A2 = relu(Z2)
Z3 = np.dot(W3, A2) + b3
A3 = sigmoid(Z3)
cache = (Z1, A1, W1, b1, Z2, A2, W2, b2, Z3, A3, W3, b3)
return A3, cache
def backward_propagation(X, Y, cache):
"""
Implement the backward propagation presented in figure 2.
Arguments:
X -- input dataset, of shape (input size, number of examples)
Y -- true "label" vector (containing 0 if cat, 1 if non-cat)
cache -- cache output from forward_propagation()
Returns:
gradients -- A dictionary with the gradients with respect to each parameter, activation and pre-activation variables
"""
m = X.shape[1]
(Z1, A1, W1, b1, Z2, A2, W2, b2, Z3, A3, W3, b3) = cache
dZ3 = A3 - Y
dW3 = 1. / m * np.dot(dZ3, A2.T)
db3 = 1. / m * np.sum(dZ3, axis=1, keepdims=True)
dA2 = np.dot(W3.T, dZ3)
dZ2 = np.multiply(dA2, np.int64(A2 > 0))
dW2 = 1. / m * np.dot(dZ2, A1.T)
db2 = 1. / m * np.sum(dZ2, axis=1, keepdims=True)
dA1 = np.dot(W2.T, dZ2)
dZ1 = np.multiply(dA1, np.int64(A1 > 0))
dW1 = 1. / m * np.dot(dZ1, X.T)
db1 = 1. / m * np.sum(dZ1, axis=1, keepdims=True)
gradients = {"dZ3": dZ3, "dW3": dW3, "db3": db3,
"dA2": dA2, "dZ2": dZ2, "dW2": dW2, "db2": db2,
"dA1": dA1, "dZ1": dZ1, "dW1": dW1, "db1": db1}
return gradients
def update_parameters(parameters, grads, learning_rate):
"""
Update parameters using gradient descent
Arguments:
parameters -- python dictionary containing your parameters:
parameters['W' + str(i)] = Wi
parameters['b' + str(i)] = bi
grads -- python dictionary containing your gradients for each parameters:
grads['dW' + str(i)] = dWi
grads['db' + str(i)] = dbi
learning_rate -- the learning rate, scalar.
Returns:
parameters -- python dictionary containing your updated parameters
"""
n = len(parameters) // 2 # number of layers in the neural networks
# Update rule for each parameter
for k in range(n):
parameters["W" + str(k + 1)] = parameters["W" + str(k + 1)] - learning_rate * grads["dW" + str(k + 1)]
parameters["b" + str(k + 1)] = parameters["b" + str(k + 1)] - learning_rate * grads["db" + str(k + 1)]
return parameters
def predict(X, y, parameters):
"""
This function is used to predict the results of a n-layer neural network.
Arguments:
X -- data set of examples you would like to label
parameters -- parameters of the trained model
Returns:
p -- predictions for the given dataset X
"""
m = X.shape[1]
p = np.zeros((1, m), dtype=np.int)
# Forward propagation
a3, caches = forward_propagation(X, parameters)
# convert probas to 0/1 predictions
for i in range(0, a3.shape[1]):
if a3[0, i] > 0.5:
p[0, i] = 1
else:
p[0, i] = 0
# print results
# print ("predictions: " + str(p[0,:]))
# print ("true labels: " + str(y[0,:]))
print("Accuracy: " + str(np.mean((p[0, :] == y[0, :]))))
return p
def compute_cost(a3, Y):
"""
Implement the cost function
Arguments:
a3 -- post-activation, output of forward propagation
Y -- "true" labels vector, same shape as a3
Returns:
cost - value of the cost function
"""
m = Y.shape[1]
logprobs = np.multiply(-np.log(a3), Y) + np.multiply(-np.log(1 - a3), 1 - Y)
cost = 1. / m * np.nansum(logprobs)
return cost
def load_dataset():
train_dataset = h5py.File('datasets/train_catvnoncat.h5', "r")
train_set_x_orig = np.array(train_dataset["train_set_x"][:]) # your train set features
train_set_y_orig = np.array(train_dataset["train_set_y"][:]) # your train set labels
test_dataset = h5py.File('datasets/test_catvnoncat.h5', "r")
test_set_x_orig = np.array(test_dataset["test_set_x"][:]) # your test set features
test_set_y_orig = np.array(test_dataset["test_set_y"][:]) # your test set labels
classes = np.array(test_dataset["list_classes"][:]) # the list of classes
train_set_y = train_set_y_orig.reshape((1, train_set_y_orig.shape[0]))
test_set_y = test_set_y_orig.reshape((1, test_set_y_orig.shape[0]))
train_set_x_orig = train_set_x_orig.reshape(train_set_x_orig.shape[0], -1).T
test_set_x_orig = test_set_x_orig.reshape(test_set_x_orig.shape[0], -1).T
train_set_x = train_set_x_orig / 255
test_set_x = test_set_x_orig / 255
return train_set_x, train_set_y, test_set_x, test_set_y, classes
def predict_dec(parameters, X):
"""
Used for plotting decision boundary.
Arguments:
parameters -- python dictionary containing your parameters
X -- input data of size (m, K)
Returns
predictions -- vector of predictions of our model (red: 0 / blue: 1)
"""
# Predict using forward propagation and a classification threshold of 0.5
a3, cache = forward_propagation(X, parameters)
predictions = (a3 > 0.5)
return predictions
def load_planar_dataset(randomness, seed):
np.random.seed(seed)
m = 50
N = int(m / 2) # number of points per class
D = 2 # dimensionality
X = np.zeros((m, D)) # data matrix where each row is a single example
Y = np.zeros((m, 1), dtype='uint8') # labels vector (0 for red, 1 for blue)
a = 2 # maximum ray of the flower
for j in range(2):
ix = range(N * j, N * (j + 1))
if j == 0:
t = np.linspace(j, 4 * 3.1415 * (j + 1), N) # + np.random.randn(N)*randomness # theta
r = 0.3 * np.square(t) + np.random.randn(N) * randomness # radius
if j == 1:
t = np.linspace(j, 2 * 3.1415 * (j + 1), N) # + np.random.randn(N)*randomness # theta
r = 0.2 * np.square(t) + np.random.randn(N) * randomness # radius
X[ix] = np.c_[r * np.cos(t), r * np.sin(t)]
Y[ix] = j
X = X.T
Y = Y.T
return X, Y
def plot_decision_boundary(model, X, y):
# Set min and max values and give it some padding
x_min, x_max = X[0, :].min() - 1, X[0, :].max() + 1
y_min, y_max = X[1, :].min() - 1, X[1, :].max() + 1
h = 0.01
# Generate a grid of points with distance h between them
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Predict the function value for the whole grid
Z = model(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# Plot the contour and training examples
plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral)
plt.ylabel('x2')
plt.xlabel('x1')
plt.scatter(X[0, :], X[1, :], c=y, cmap=plt.cm.Spectral)
plt.show()
def load_2D_dataset():
data = scipy.io.loadmat('datasets/data.mat')
train_X = data['X'].T
train_Y = data['y'].T
test_X = data['Xval'].T
test_Y = data['yval'].T
plt.scatter(train_X[0, :], train_X[1, :], c=train_Y, s=40, cmap=plt.cm.Spectral)
return train_X, train_Y, test_X, test_Y | mit |
mehdidc/scikit-learn | sklearn/datasets/tests/test_base.py | 39 | 5607 | import os
import shutil
import tempfile
import warnings
import nose
import numpy
from sklearn.datasets import get_data_home
from sklearn.datasets import clear_data_home
from sklearn.datasets import load_files
from sklearn.datasets import load_sample_images
from sklearn.datasets import load_sample_image
from sklearn.datasets import load_digits
from sklearn.datasets import load_diabetes
from sklearn.datasets import load_linnerud
from sklearn.datasets import load_iris
from sklearn.datasets import load_boston
from sklearn.externals.six import b, u
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
DATA_HOME = tempfile.mkdtemp(prefix="scikit_learn_data_home_test_")
LOAD_FILES_ROOT = tempfile.mkdtemp(prefix="scikit_learn_load_files_test_")
TEST_CATEGORY_DIR1 = ""
TEST_CATEGORY_DIR2 = ""
def _remove_dir(path):
if os.path.isdir(path):
shutil.rmtree(path)
def teardown_module():
"""Test fixture (clean up) run once after all tests of this module"""
for path in [DATA_HOME, LOAD_FILES_ROOT]:
_remove_dir(path)
def setup_load_files():
global TEST_CATEGORY_DIR1
global TEST_CATEGORY_DIR2
TEST_CATEGORY_DIR1 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT)
TEST_CATEGORY_DIR2 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT)
sample_file = tempfile.NamedTemporaryFile(dir=TEST_CATEGORY_DIR1,
delete=False)
sample_file.write(b("Hello World!\n"))
sample_file.close()
def teardown_load_files():
_remove_dir(TEST_CATEGORY_DIR1)
_remove_dir(TEST_CATEGORY_DIR2)
def test_data_home():
# get_data_home will point to a pre-existing folder
data_home = get_data_home(data_home=DATA_HOME)
assert_equal(data_home, DATA_HOME)
assert_true(os.path.exists(data_home))
# clear_data_home will delete both the content and the folder it-self
clear_data_home(data_home=data_home)
assert_false(os.path.exists(data_home))
# if the folder is missing it will be created again
data_home = get_data_home(data_home=DATA_HOME)
assert_true(os.path.exists(data_home))
def test_default_empty_load_files():
res = load_files(LOAD_FILES_ROOT)
assert_equal(len(res.filenames), 0)
assert_equal(len(res.target_names), 0)
assert_equal(res.DESCR, None)
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_default_load_files():
res = load_files(LOAD_FILES_ROOT)
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 2)
assert_equal(res.DESCR, None)
assert_equal(res.data, [b("Hello World!\n")])
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_load_files_w_categories_desc_and_encoding():
category = os.path.abspath(TEST_CATEGORY_DIR1).split('/').pop()
res = load_files(LOAD_FILES_ROOT, description="test",
categories=category, encoding="utf-8")
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 1)
assert_equal(res.DESCR, "test")
assert_equal(res.data, [u("Hello World!\n")])
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_load_files_wo_load_content():
res = load_files(LOAD_FILES_ROOT, load_content=False)
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 2)
assert_equal(res.DESCR, None)
assert_equal(res.get('data'), None)
def test_load_sample_images():
try:
res = load_sample_images()
assert_equal(len(res.images), 2)
assert_equal(len(res.filenames), 2)
assert_true(res.DESCR)
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_digits():
digits = load_digits()
assert_equal(digits.data.shape, (1797, 64))
assert_equal(numpy.unique(digits.target).size, 10)
def test_load_digits_n_class_lt_10():
digits = load_digits(9)
assert_equal(digits.data.shape, (1617, 64))
assert_equal(numpy.unique(digits.target).size, 9)
def test_load_sample_image():
try:
china = load_sample_image('china.jpg')
assert_equal(china.dtype, 'uint8')
assert_equal(china.shape, (427, 640, 3))
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_missing_sample_image_error():
have_PIL = True
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
except ImportError:
have_PIL = False
if have_PIL:
assert_raises(AttributeError, load_sample_image,
'blop.jpg')
else:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_diabetes():
res = load_diabetes()
assert_equal(res.data.shape, (442, 10))
assert_true(res.target.size, 442)
def test_load_linnerud():
res = load_linnerud()
assert_equal(res.data.shape, (20, 3))
assert_equal(res.target.shape, (20, 3))
assert_equal(len(res.target_names), 3)
assert_true(res.DESCR)
def test_load_iris():
res = load_iris()
assert_equal(res.data.shape, (150, 4))
assert_equal(res.target.size, 150)
assert_equal(res.target_names.size, 3)
assert_true(res.DESCR)
def test_load_boston():
res = load_boston()
assert_equal(res.data.shape, (506, 13))
assert_equal(res.target.size, 506)
assert_equal(res.feature_names.size, 13)
assert_true(res.DESCR)
| bsd-3-clause |
ufaks/addons-yelizariev | import_framework/import_base.py | 16 | 14556 | # -*- coding: utf-8 -*-
import mapper
try:
from pandas import DataFrame
except ImportError:
pass
import logging
_logger = logging.getLogger(__name__)
class create_childs(object):
def __init__(self, childs):
# extend childs to same set of fields
# collect fields
fields = set()
for c in childs:
for f in c:
fields.add(f)
# extend childs
for c in childs:
for f in fields:
if f not in c:
c[f] = mapper.const('')
self.childs = childs
def get_childs(self):
return self.childs
class import_base(object):
def __init__(self, pool, cr, uid,
instance_name,
module_name,
email_to_notify=False,
import_dir = '/tmp/', # path to save *.csv files for debug or manual upload
run_import = True,
context=None):
#Thread.__init__(self)
self.import_options = {'quoting':'"', 'separator':',', 'headers':True}
self.external_id_field = 'id'
self.pool = pool
self.cr = cr
self.uid = uid
self.instance_name = instance_name
self.module_name = module_name
self.context = context or {}
self.email = email_to_notify
self.table_list = []
#self.logger = logging.getLogger(module_name)
self.cache = {}
self.import_dir = import_dir
self.run_import = run_import
self.import_num = 1
self.initialize()
def initialize(self):
"""
init before import
usually for the login
"""
pass
def finalize(self):
"""
init after import
"""
pass
def init_run(self):
"""
call after intialize run in the thread, not in the main process
TO use for long initialization operation
"""
pass
def get_data(self, table):
"""
@return: a list of dictionaries
each dictionnaries contains the list of pair external_field_name : value
"""
return [{}]
def get_link(self, from_table, ids, to_table):
"""
@return: a dictionaries that contains the association between the id (from_table)
and the list (to table) of id linked
"""
return {}
def get_external_id(self, data):
"""
@return the external id
the default implementation return self.external_id_field (that has 'id') by default
if the name of id field is different, you can overwrite this method or change the value
of self.external_id_field
"""
return data[self.external_id_field]
def get_mapping(self):
"""
@return: { TABLE_NAME : {
'model' : 'openerp.model.name',
#if true import the table if not just resolve dependencies, use for meta package, by default => True
#Not required
'import' : True or False,
#Not required
'dependencies' : [TABLE_1, TABLE_2],
#Not required
'hook' : self.function_name, #get the val dict of the object, return the same val dict or False
'map' : { @see mapper
'openerp_field_name' : 'external_field_name', or val('external_field_name')
'openerp_field_id/id' : ref(TABLE_1, 'external_id_field'), #make the mapping between the external id and the xml on the right
'openerp_field2_id/id_parent' : ref(TABLE_1,'external_id_field') #indicate a self dependencies on openerp_field2_id
'state' : map_val('state_equivalent_field', mapping), # use get_state_map to make the mapping between the value of the field and the value of the state
'text_field' : concat('field_1', 'field_2', .., delimiter=':'), #concat the value of the list of field in one
'description_field' : ppconcat('field_1', 'field_2', .., delimiter='\n\t'), #same as above but with a prettier formatting
'field' : call(callable, arg1, arg2, ..), #call the function with all the value, the function should send the value : self.callable
'field' : callable
'field' : call(method, val('external_field') interface of method is self, val where val is the value of the field
'field' : const(value) #always set this field to value
+ any custom mapper that you will define
}
},
}
"""
return {}
def default_hook(self, val):
"""
this hook will be apply on each table that don't have hook
here we define the identity hook
"""
return val
def hook_ignore_all(self, *args):
# for debug
return None
def get_hook_ignore_empty(self, *args):
def f(external_values):
ignore = True
for key in args:
v = (external_values.get(key) or '').strip()
if v:
ignore = False
break
if ignore:
return None
else:
return external_values
return f
def prepare_mapping(self, mapping):
res = {}
for m in mapping:
res[m['name']] = m
return res
def run(self):
self.mapped = set()
self.mapping = self.prepare_mapping(self.get_mapping())
self.resolve_dependencies([k for k in self.mapping])
_logger.info('finalize...')
self.finalize()
_logger.info('finalize done')
def _fix_size_limit(self):
import sys
import csv
maxInt = sys.maxsize
decrement = True
while decrement:
# decrease the maxInt value by factor 10
# as long as the OverflowError occurs.
decrement = False
try:
csv.field_size_limit(maxInt)
except OverflowError:
maxInt = int(maxInt/10)
decrement = True
def do_import(self, import_list, context):
self._fix_size_limit()
# import
import_obj = self.pool['base_import.import']
for imp in import_list:
try:
messages = import_obj.do(self.cr, self.uid,
imp.get('id'), imp.get('fields'),
self.import_options, context=context)
_logger.info('import_result:\n%s'%messages)
except Exception as e:
import traceback
import StringIO
sh = StringIO.StringIO()
traceback.print_exc(file=sh)
error = sh.getvalue()
error = "Error during import\n%s\n%s" % (imp, error)
_logger.error(error)
raise Exception(error)
self.cr.commit()
def resolve_dependencies(self, deps):
import_list = []
for dname in deps:
if dname in self.mapped:
continue
self.mapped.add(dname)
mtable = self.mapping.get(dname)
if not mtable:
_logger.error('no mapping found for %s' % dname)
continue
self.resolve_dependencies(mtable.get('dependencies', []))
self.map_and_import(mtable)
def map_and_import(self, mtable):
_logger.info('read table %s' % mtable.get('name'))
records = mtable.get('table')()
for mmodel in mtable.get('models'):
split = mmodel.get('split')
if not split:
_logger.info('map and import: import-%s' % self.import_num)
self.map_and_import_batch(mmodel, records)
else:
i=0
while True:
_logger.info('importing batch # %s (import-%s)' % (i,self.import_num))
rr = records[i*split:(i+1)*split]
if len(rr):
self.map_and_import_batch(mmodel, rr)
i += 1
else:
break
finalize = mmodel.get('finalize')
if finalize:
_logger.info('finalize model...')
finalize()
_logger.info('finalize model done')
def map_and_import_batch(self, mmodel, records):
import_list = self.do_mapping(records, mmodel)
context = mmodel.get('context')
if context:
context = context()
self.do_import(import_list, context)
def do_mapping(self, records, mmodel):
hook = mmodel.get('hook', self.default_hook)
res = []
mfields = self._preprocess_mapping(mmodel.get('fields'))
_logger.info('mapping records to %s: %s' %( mmodel.get('model'), len(records)))
for key, r in records.iterrows():
hooked = hook(dict(r))
if not isinstance(hooked, list):
hooked = [hooked]
for dict_sugar in hooked:
if dict_sugar:
fields, values_list = self._fields_mapp(dict_sugar, mfields)
res.extend(values_list)
if not res:
_logger.info("no records to import")
return []
res = DataFrame(res)
data_binary = res.to_csv(sep=self.import_options.get('separator'),
quotechar=self.import_options.get('quoting'),
index=False,
header = fields,
encoding='utf-8'
)
if self.import_dir:
file_name = '%s/import-%03d-%s.csv' % (
self.import_dir,
self.import_num,
mmodel.get('model'),
)
with open(file_name, 'w') as f:
f.write(data_binary)
self.import_num += 1
if not self.run_import:
return []
id = self.pool['base_import.import'].create(self.cr, self.uid,
{'res_model':mmodel.get('model'),
'file': data_binary,
'file_name': mmodel.get('model'),
})
return [{'id':id, 'fields':fields}]
def _preprocess_mapping(self, mapping):
"""
Preprocess the mapping :
after the preprocces, everything is
callable in the val of the dictionary
use to allow syntaxical sugar like 'field': 'external_field'
instead of 'field' : value('external_field')
"""
#m = dict(mapping)
m = mapping
for key, value in m.items():
if isinstance(value, basestring):
m[key] = mapper.value(value)
#set parent for instance of dbmapper
elif isinstance(value, mapper.dbmapper):
value.set_parent(self)
elif isinstance(value, create_childs):
# {'child_ids':[{'id':id1, 'name':name1}, {'id':id2, 'name':name2}]}
# ->
# {'child_ids/id':[id1, id2], 'child_ids/name': [name1, name2]}
for c in value.get_childs():
self._preprocess_mapping(c)
for ckey, cvalue in c.items():
new_key = '%s/%s' % (key, ckey)
if new_key not in m:
m[new_key] = []
m[new_key].append(cvalue)
del m[key] # delete 'child_ids'
return m
def _fields_mapp(self,dict_sugar, openerp_dict):
"""
{'name': name0, 'child_ids/id':[id1, id2], 'child_ids/name': [name1, name2]} ->
fields =
['name', 'child_ids/id', 'child_ids/name']
res = [
[name0, '',''], # i=-1
['', id1, name1] # i=0
['', id2, name2] # i=1
]
"""
res = []
i = -1
while True:
fields=[]
data_lst = []
for key,val in openerp_dict.items():
if key not in fields:
fields.append(key)
if isinstance(val, list) and len(val)>i and i>=0:
value = val[i](dict_sugar)
elif not isinstance(val, list) and i==-1:
value = val(dict_sugar)
else:
value = ''
data_lst.append(value)
if any(data_lst):
add = True
if i>=0:
print '_fields_mapp', zip(fields, data_lst)
add = False
# ignore empty lines
for pos, val in enumerate(data_lst):
if fields[pos].endswith('/id'):
continue
if val:
add = True
break
if add:
res.append(data_lst)
i += 1
else:
break
return fields, res
def xml_id_exist(self, table, external_id):
"""
Check if the external id exist in the openerp database
in order to check if the id exist the table where it come from
should be provide
@return the xml_id generated if the external_id exist in the database or false
"""
if not external_id:
return False
xml_id = self._generate_xml_id(external_id, table)
id = self.pool.get('ir.model.data').search(self.cr, self.uid, [('name', '=', xml_id), ('module', '=', self.module_name)])
return id and xml_id or False
def _generate_xml_id(self, name, table):
"""
@param name: name of the object, has to be unique in for a given table
@param table : table where the record we want generate come from
@return: a unique xml id for record, the xml_id will be the same given the same table and same name
To be used to avoid duplication of data that don't have ids
"""
sugar_instance = self.instance_name
name = name.replace('.', '_').replace(',', '_')
return sugar_instance + "_" + table + "_" + name
| lgpl-3.0 |
harmsm/pytc-gui | pytc_gui/main_window.py | 1 | 9560 | __description__ = \
"""
pytc GUI using PyQt5.
"""
__author__ = "Hiranmyai Duvvuri"
__date__ = "2017-01-06"
from . import dialogs, widgets
from .fit_container import FitContainer
from pytc.global_fit import GlobalFit
from PyQt5.QtCore import pyqtSignal
from PyQt5 import QtWidgets as QW
from matplotlib.backends.backend_pdf import PdfPages
import sys, pkg_resources, pickle
import os
class MainWindow(QW.QMainWindow):
"""
Main fitting window.
"""
def __init__(self,app):
super().__init__()
self._app = app
self._fit = FitContainer()
self._fitter_list = {}
self._version = pkg_resources.require("pytc-gui")[0].version
self._accept_exit_program = False
self.layout()
def layout(self):
"""
Create the menu bar.
"""
menu = self.menuBar()
menu.setNativeMenuBar(False)
file_menu = menu.addMenu("File")
fitting_commands = menu.addMenu("Fitting")
help_menu = menu.addMenu("Help")
# ------------- Help Menu ----------------------
prog_info = QW.QAction("About", self)
prog_info.triggered.connect(self.about_dialog)
help_menu.addAction(prog_info)
doc_info = QW.QAction("Documentation", self)
doc_info.triggered.connect(self.docs_dialog)
help_menu.addAction(doc_info)
# ------------- Fitting Menu -------------------
do_fit = QW.QAction("Do fit", self)
do_fit.setShortcut("Ctrl+F")
do_fit.triggered.connect(self.do_fit_callback)
fitting_commands.addAction(do_fit)
fitting_commands.addSeparator()
aic_test = QW.QAction("AIC Test", self)
aic_test.triggered.connect(self.aic_dialog)
fitting_commands.addAction(aic_test)
fitting_commands.addSeparator()
fitting_options = QW.QAction("Fit Options", self)
fitting_options.triggered.connect(self.fit_options_dialog)
fitting_commands.addAction(fitting_options)
# ------------------ File Menu ---------------------------
add_exp = QW.QAction("Add Experiment", self)
add_exp.setShortcut("Ctrl+Shift+N")
add_exp.triggered.connect(self.add_exp_dialog)
file_menu.addAction(add_exp)
export_results = QW.QAction("Export Results", self)
export_results.setShortcut("Ctrl+S")
export_results.triggered.connect(self.export_results_dialog)
file_menu.addAction(export_results)
file_menu.addSeparator()
#save_fitter = QW.QAction("Save Fitter", self)
#save_fitter.setShortcut("Ctrl+Shift+S")
#save_fitter.triggered.connect(self.save_fitter_dialog)
#file_menu.addAction(save_fitter)
#open_fitter = QW.QAction("Open Fitter", self)
#open_fitter.setShortcut("Ctrl+O")
#open_fitter.triggered.connect(self.open_fitter_dialog)
#file_menu.addAction(open_fitter)
file_menu.addSeparator()
new_session = QW.QAction("New Session", self)
new_session.setShortcut("Ctrl+N")
new_session.triggered.connect(self.new_session_callback)
file_menu.addAction(new_session)
close_window = QW.QAction("Exit", self)
close_window.setShortcut("Ctrl+W")
close_window.triggered.connect(self.close_program_callback)
file_menu.addAction(close_window)
# add shortcut actions to main window, for qt5 bug
self.addAction(add_exp)
self.addAction(do_fit)
self.addAction(export_results)
self.addAction(new_session)
self.addAction(close_window)
#self.addAction(save_fitter)
#self.addAction(open_fitter)
# Set up central widget
self._main_widgets = widgets.MainWidgets(self,self._fit)
self.setCentralWidget(self._main_widgets)
self.resize(1000, 600)
self.move(QW.QApplication.desktop().screen().rect().center()-self.rect().center())
self.setWindowTitle('pytc')
self.show()
def docs_dialog(self):
"""
Open a transient documentation dialog.
"""
self._tmp = dialogs.Documentation()
self._tmp.show()
def about_dialog(self):
"""
Open a transient about dialog.
"""
self._tmp = dialogs.About()
self._tmp.show()
def add_exp_dialog(self):
"""
Open a transient dialog for adding a new experiment.
"""
self._tmp = dialogs.AddExperiment(self._fit)
self._tmp.show()
def aic_dialog(self):
"""
Load persistent dialog box for doing AIC calculation.
"""
try:
self._aic_dialog.show()
except AttributeError:
self._aic_dialog = dialogs.AICTest(self,self._fit)
self._aic_dialog.show()
self._aic_dialog.raise_()
def fit_options_dialog(self):
"""
Load persistent dialog for setting fit options.
"""
# Try to show the window -- if it's not created already, make it
try:
self._fit_options_dialog.show()
except AttributeError:
self._fit_options_dialog = dialogs.FitOptions(self, self._fit)
self._fit_options_dialog.show()
self._fit_options_dialog.raise_()
def save_fitter_dialog(self):
"""
Open a transient dialog for saving a fit object.
"""
file_name, _ = QW.QFileDialog.getSaveFileName(self, "Save Global Fit", "", "Pickle Files (*.pkl);;")
pickle.dump([self._fit,self._version], open(file_name, "wb"))
def open_fitter_dialog(self):
"""
Open a transient dialog for opening a saved fit object.
"""
file_name, _ = QW.QFileDialog.getOpenFileName(self, "Save Global Fit", "", "Pickle Files (*.pkl);;")
opened_fitter, version = pickle.load(open(file_name, "rb"))
if self._version == version:
self._fit = opened_fitter
self._fit.emit_changed()
else:
err = "Could not load fit. Current version is {}, but file version is {}.".format(self._version,version)
error_message = QW.QMessageBox.warning(self,err, QW.QMessageBox.Ok)
def export_results_dialog(self):
"""
Bring up transient dialog for exporting results.
"""
out_dir, _ = QW.QFileDialog.getSaveFileName(self, "Export Experiment Output", "", "*")
try:
os.mkdir(out_dir)
data_file = open(os.path.join(out_dir,"fit_param.csv"), "w")
data_file.write(self._fit.fitter.fit_as_csv)
data_file.close()
plot_save = PdfPages(os.path.join(out_dir,"main_plot.pdf"))
fig, ax = self._fit.fitter.plot()
plot_save.savefig(fig)
plot_save.close()
plot_save = PdfPages(os.path.join(out_dir,"corner_plot.pdf"))
fig = self._fit.fitter.corner_plot()
plot_save.savefig(fig)
plot_save.close()
log_save = open(os.path.join(out_dir,"session.log"),"w")
spew = self._main_widgets.message_box.toPlainText()
log_save.write(spew)
log_save.close()
except Exception as ex:
template = "An exception of type {0} occurred. Arguments:\n{1!r}"
err = template.format(type(ex).__name__,ex.args)
error_message = QW.QMessageBox.warning(self,err, QW.QMessageBox.Ok)
def do_fit_callback(self):
"""
Do the fit.
"""
self._main_widgets.do_fit_callback()
def new_session_callback(self):
"""
Start a competely new session.
"""
warning = "Are you sure you want to start a new session?"
warning_message = QW.QMessageBox.warning(self, "warning!",warning,
QW.QMessageBox.Yes | QW.QMessageBox.No)
if warning_message == QW.QMessageBox.Yes:
self._fit.clear()
self._fit.fitter_list = {}
self._main_widgets.delete()
self.__init__(self._app)
def close_program_callback(self):
"""
Close the program out.
"""
ret = QW.QMessageBox.Ok
if not self._accept_exit_program:
if len(self._fit.experiments) > 0:
m = QW.QMessageBox()
m.setText("Are you sure you want to exit?")
m.setIcon(QW.QMessageBox.Warning)
m.setStandardButtons(QW.QMessageBox.Ok | QW.QMessageBox.Cancel)
m.setDefaultButton(QW.QMessageBox.Cancel)
ret = m.exec_()
if ret == QW.QMessageBox.Ok:
self._accept_exit_program = True
self._main_widgets.clear()
self._app.instance().closeAllWindows()
else:
self._accept_exit_program = False
def closeEvent(self,event):
"""
Override closeEvent so all windows close and clean up.
"""
self.close_program_callback()
if self._accept_exit_program:
event.accept()
else:
event.ignore()
def main():
"""
Main function, staring GUI.
"""
version = pkg_resources.require("pytc-gui")[0].version
try:
app = QW.QApplication(sys.argv)
app.setApplicationName("pytc")
app.setApplicationVersion(version)
pytc_run = MainWindow(app)
sys.exit(app.exec_())
except KeyboardInterrupt:
sys.exit()
if __name__ == '__main__':
main()
| unlicense |
ishank08/scikit-learn | examples/linear_model/plot_sgd_weighted_samples.py | 344 | 1458 | """
=====================
SGD: Weighted samples
=====================
Plot decision function of a weighted dataset, where the size of points
is proportional to its weight.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# we create 20 points
np.random.seed(0)
X = np.r_[np.random.randn(10, 2) + [1, 1], np.random.randn(10, 2)]
y = [1] * 10 + [-1] * 10
sample_weight = 100 * np.abs(np.random.randn(20))
# and assign a bigger weight to the last 10 samples
sample_weight[:10] *= 10
# plot the weighted data points
xx, yy = np.meshgrid(np.linspace(-4, 5, 500), np.linspace(-4, 5, 500))
plt.figure()
plt.scatter(X[:, 0], X[:, 1], c=y, s=sample_weight, alpha=0.9,
cmap=plt.cm.bone)
## fit the unweighted model
clf = linear_model.SGDClassifier(alpha=0.01, n_iter=100)
clf.fit(X, y)
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
no_weights = plt.contour(xx, yy, Z, levels=[0], linestyles=['solid'])
## fit the weighted model
clf = linear_model.SGDClassifier(alpha=0.01, n_iter=100)
clf.fit(X, y, sample_weight=sample_weight)
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
samples_weights = plt.contour(xx, yy, Z, levels=[0], linestyles=['dashed'])
plt.legend([no_weights.collections[0], samples_weights.collections[0]],
["no weights", "with weights"], loc="lower left")
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
marcass/tank_lora | python/archive/queue_listen.py | 1 | 4136 | from multiprocessing import Queue as Q
from multiprocessing import Process as P
import time
import sys
import numpy as np
import paho.mqtt.publish as publish
import serial
import smtplib
import requests
import matplotlib.pyplot as plt
import datetime
import sqlite3
import creds
import tanks
s_port = '/dev/LORA'
#initialise global port
port = None
#thingspeak
water_APIKey = creds.water_APIKey #channel api key
batt_APIKey = creds.batt_APIKey
thingURL = "https://api.thingspeak.com/update"
#mqtt
broker = creds.mosq_auth['broker']
auth = creds.mosq_auth
#db
def setup_db():
# Create table
conn, c = tanks.get_db()
c.execute('''CREATE TABLE IF NOT EXISTS measurements
(timestamp TIMESTAMP, tank_id INTEGER, water_volume REAL, voltage REAL)''')
conn.commit() # Save (commit) the changes
def add_measurement(tank_id,water_volume,voltage):
# Insert a row of data
conn, c = tanks.get_db()
c.execute("INSERT INTO measurements VALUES (?,?,?,?)", (datetime.datetime.utcnow(),tank_id,water_volume,voltage) )
conn.commit() # Save (commit) the changes
def readlineCR(port):
rv = ''
while True:
ch = port.read()
rv += ch
if ch=='\n':# or ch=='':
if 'PY' in rv: #arduino formats message as PY;<nodeID>;<waterlevle;batteryvoltage;>\r\n
print rv
rec_split = rv.split(';') #make array like [PYTHON, nodeID, payloadance]
print rec_split
q.put(rec_split[1:4]) #put data in queue for processing at rate
rv = ''
#format mqtt message
def pub_msg():
while True:
while (q.empty() == False):
data = q.get()
in_node = data[0]
if tanks.tanks_by_nodeID.has_key(in_node):
tank = tanks.tanks_by_nodeID[in_node]
else:
break
print data
#check to see if it's a relay (and insert null water value if it is)
dist = data[1]
batt = data[2]
try:
dist = int(dist)
#check to see if in acceptable value range
if (dist < tank.invalid_min) or (dist > tank.max_payload):
vol = None
else:
vol = tank.volume(dist)
except:
vol = None
try:
batt = float(batt)
if batt > 5.5:
batt = None
except:
batt = None
#add to db
add_measurement(in_node,vol,batt)
#publish to thingspeak
#r = requests.post(thingURL, data = {'api_key':water_APIKey, 'field' +tank.nodeID: vol})
publish.single(tank.waterTop, vol , auth=auth, hostname=broker, retain=True)
print('Published ' +str(vol) +' for nodeID ' + str(tank.nodeID) + ' to ' +tank.waterTop)
#publish to thingspeak
#time.sleep(15)
#r = requests.post(thingURL, data = {'api_key':batt_APIKey, 'field' +tank.nodeID: batt})
publish.single(tank.batTop, batt , auth=auth, hostname=broker, retain=True)
print('Published ' +str(batt) +' for nodeID ' + str(tank.nodeID) + ' to ' +tank.batTop)
#time.sleep(15)
#Serial port function opening fucntion
count = 0
def port_check(in_port):
global port
try:
port = serial.Serial(in_port, baudrate=9600, timeout=3.0)
print s_port+' found'
count = 0
return port
except:
port = None
return port
#handle exceptions for absent port (and keep retrying for a while)
while (port_check(s_port) is None) and (count < 100):
count = count + 1
print s_port+' not found '+str(count)+' times'
time.sleep(10)
if count == 100:
print 'Exited because serial port not found'
sys.exit()
#instatiate queue
q = Q()
#setup database
setup_db()
fetch_process = P(target=readlineCR, args=(port,))
broadcast_process = P(target=pub_msg, args=())
broadcast_process.start()
fetch_process.start()
| gpl-3.0 |
Clyde-fare/scikit-learn | sklearn/cluster/spectral.py | 233 | 18153 | # -*- coding: utf-8 -*-
"""Algorithms for spectral clustering"""
# Author: Gael Varoquaux [email protected]
# Brian Cheung
# Wei LI <[email protected]>
# License: BSD 3 clause
import warnings
import numpy as np
from ..base import BaseEstimator, ClusterMixin
from ..utils import check_random_state, as_float_array
from ..utils.validation import check_array
from ..utils.extmath import norm
from ..metrics.pairwise import pairwise_kernels
from ..neighbors import kneighbors_graph
from ..manifold import spectral_embedding
from .k_means_ import k_means
def discretize(vectors, copy=True, max_svd_restarts=30, n_iter_max=20,
random_state=None):
"""Search for a partition matrix (clustering) which is closest to the
eigenvector embedding.
Parameters
----------
vectors : array-like, shape: (n_samples, n_clusters)
The embedding space of the samples.
copy : boolean, optional, default: True
Whether to copy vectors, or perform in-place normalization.
max_svd_restarts : int, optional, default: 30
Maximum number of attempts to restart SVD if convergence fails
n_iter_max : int, optional, default: 30
Maximum number of iterations to attempt in rotation and partition
matrix search if machine precision convergence is not reached
random_state: int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization of the
of the rotation matrix
Returns
-------
labels : array of integers, shape: n_samples
The labels of the clusters.
References
----------
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
Notes
-----
The eigenvector embedding is used to iteratively search for the
closest discrete partition. First, the eigenvector embedding is
normalized to the space of partition matrices. An optimal discrete
partition matrix closest to this normalized embedding multiplied by
an initial rotation is calculated. Fixing this discrete partition
matrix, an optimal rotation matrix is calculated. These two
calculations are performed until convergence. The discrete partition
matrix is returned as the clustering solution. Used in spectral
clustering, this method tends to be faster and more robust to random
initialization than k-means.
"""
from scipy.sparse import csc_matrix
from scipy.linalg import LinAlgError
random_state = check_random_state(random_state)
vectors = as_float_array(vectors, copy=copy)
eps = np.finfo(float).eps
n_samples, n_components = vectors.shape
# Normalize the eigenvectors to an equal length of a vector of ones.
# Reorient the eigenvectors to point in the negative direction with respect
# to the first element. This may have to do with constraining the
# eigenvectors to lie in a specific quadrant to make the discretization
# search easier.
norm_ones = np.sqrt(n_samples)
for i in range(vectors.shape[1]):
vectors[:, i] = (vectors[:, i] / norm(vectors[:, i])) \
* norm_ones
if vectors[0, i] != 0:
vectors[:, i] = -1 * vectors[:, i] * np.sign(vectors[0, i])
# Normalize the rows of the eigenvectors. Samples should lie on the unit
# hypersphere centered at the origin. This transforms the samples in the
# embedding space to the space of partition matrices.
vectors = vectors / np.sqrt((vectors ** 2).sum(axis=1))[:, np.newaxis]
svd_restarts = 0
has_converged = False
# If there is an exception we try to randomize and rerun SVD again
# do this max_svd_restarts times.
while (svd_restarts < max_svd_restarts) and not has_converged:
# Initialize first column of rotation matrix with a row of the
# eigenvectors
rotation = np.zeros((n_components, n_components))
rotation[:, 0] = vectors[random_state.randint(n_samples), :].T
# To initialize the rest of the rotation matrix, find the rows
# of the eigenvectors that are as orthogonal to each other as
# possible
c = np.zeros(n_samples)
for j in range(1, n_components):
# Accumulate c to ensure row is as orthogonal as possible to
# previous picks as well as current one
c += np.abs(np.dot(vectors, rotation[:, j - 1]))
rotation[:, j] = vectors[c.argmin(), :].T
last_objective_value = 0.0
n_iter = 0
while not has_converged:
n_iter += 1
t_discrete = np.dot(vectors, rotation)
labels = t_discrete.argmax(axis=1)
vectors_discrete = csc_matrix(
(np.ones(len(labels)), (np.arange(0, n_samples), labels)),
shape=(n_samples, n_components))
t_svd = vectors_discrete.T * vectors
try:
U, S, Vh = np.linalg.svd(t_svd)
svd_restarts += 1
except LinAlgError:
print("SVD did not converge, randomizing and trying again")
break
ncut_value = 2.0 * (n_samples - S.sum())
if ((abs(ncut_value - last_objective_value) < eps) or
(n_iter > n_iter_max)):
has_converged = True
else:
# otherwise calculate rotation and continue
last_objective_value = ncut_value
rotation = np.dot(Vh.T, U.T)
if not has_converged:
raise LinAlgError('SVD did not converge')
return labels
def spectral_clustering(affinity, n_clusters=8, n_components=None,
eigen_solver=None, random_state=None, n_init=10,
eigen_tol=0.0, assign_labels='kmeans'):
"""Apply clustering to a projection to the normalized laplacian.
In practice Spectral Clustering is very useful when the structure of
the individual clusters is highly non-convex or more generally when
a measure of the center and spread of the cluster is not a suitable
description of the complete cluster. For instance when clusters are
nested circles on the 2D plan.
If affinity is the adjacency matrix of a graph, this method can be
used to find normalized graph cuts.
Read more in the :ref:`User Guide <spectral_clustering>`.
Parameters
-----------
affinity : array-like or sparse matrix, shape: (n_samples, n_samples)
The affinity matrix describing the relationship of the samples to
embed. **Must be symmetric**.
Possible examples:
- adjacency matrix of a graph,
- heat kernel of the pairwise distance matrix of the samples,
- symmetric k-nearest neighbours connectivity matrix of the samples.
n_clusters : integer, optional
Number of clusters to extract.
n_components : integer, optional, default is n_clusters
Number of eigen vectors to use for the spectral embedding
eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization
of the lobpcg eigen vectors decomposition when eigen_solver == 'amg'
and by the K-Means initialization.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
eigen_tol : float, optional, default: 0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when using arpack eigen_solver.
assign_labels : {'kmeans', 'discretize'}, default: 'kmeans'
The strategy to use to assign labels in the embedding
space. There are two ways to assign labels after the laplacian
embedding. k-means can be applied and is a popular choice. But it can
also be sensitive to initialization. Discretization is another
approach which is less sensitive to random initialization. See
the 'Multiclass spectral clustering' paper referenced below for
more details on the discretization approach.
Returns
-------
labels : array of integers, shape: n_samples
The labels of the clusters.
References
----------
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
Notes
------
The graph should contain only one connect component, elsewhere
the results make little sense.
This algorithm solves the normalized cut for k=2: it is a
normalized spectral clustering.
"""
if assign_labels not in ('kmeans', 'discretize'):
raise ValueError("The 'assign_labels' parameter should be "
"'kmeans' or 'discretize', but '%s' was given"
% assign_labels)
random_state = check_random_state(random_state)
n_components = n_clusters if n_components is None else n_components
maps = spectral_embedding(affinity, n_components=n_components,
eigen_solver=eigen_solver,
random_state=random_state,
eigen_tol=eigen_tol, drop_first=False)
if assign_labels == 'kmeans':
_, labels, _ = k_means(maps, n_clusters, random_state=random_state,
n_init=n_init)
else:
labels = discretize(maps, random_state=random_state)
return labels
class SpectralClustering(BaseEstimator, ClusterMixin):
"""Apply clustering to a projection to the normalized laplacian.
In practice Spectral Clustering is very useful when the structure of
the individual clusters is highly non-convex or more generally when
a measure of the center and spread of the cluster is not a suitable
description of the complete cluster. For instance when clusters are
nested circles on the 2D plan.
If affinity is the adjacency matrix of a graph, this method can be
used to find normalized graph cuts.
When calling ``fit``, an affinity matrix is constructed using either
kernel function such the Gaussian (aka RBF) kernel of the euclidean
distanced ``d(X, X)``::
np.exp(-gamma * d(X,X) ** 2)
or a k-nearest neighbors connectivity matrix.
Alternatively, using ``precomputed``, a user-provided affinity
matrix can be used.
Read more in the :ref:`User Guide <spectral_clustering>`.
Parameters
-----------
n_clusters : integer, optional
The dimension of the projection subspace.
affinity : string, array-like or callable, default 'rbf'
If a string, this may be one of 'nearest_neighbors', 'precomputed',
'rbf' or one of the kernels supported by
`sklearn.metrics.pairwise_kernels`.
Only kernels that produce similarity scores (non-negative values that
increase with similarity) should be used. This property is not checked
by the clustering algorithm.
gamma : float
Scaling factor of RBF, polynomial, exponential chi^2 and
sigmoid affinity kernel. Ignored for
``affinity='nearest_neighbors'``.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
n_neighbors : integer
Number of neighbors to use when constructing the affinity matrix using
the nearest neighbors method. Ignored for ``affinity='rbf'``.
eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization
of the lobpcg eigen vectors decomposition when eigen_solver == 'amg'
and by the K-Means initialization.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
eigen_tol : float, optional, default: 0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when using arpack eigen_solver.
assign_labels : {'kmeans', 'discretize'}, default: 'kmeans'
The strategy to use to assign labels in the embedding
space. There are two ways to assign labels after the laplacian
embedding. k-means can be applied and is a popular choice. But it can
also be sensitive to initialization. Discretization is another approach
which is less sensitive to random initialization.
kernel_params : dictionary of string to any, optional
Parameters (keyword arguments) and values for kernel passed as
callable object. Ignored by other kernels.
Attributes
----------
affinity_matrix_ : array-like, shape (n_samples, n_samples)
Affinity matrix used for clustering. Available only if after calling
``fit``.
labels_ :
Labels of each point
Notes
-----
If you have an affinity matrix, such as a distance matrix,
for which 0 means identical elements, and high values means
very dissimilar elements, it can be transformed in a
similarity matrix that is well suited for the algorithm by
applying the Gaussian (RBF, heat) kernel::
np.exp(- X ** 2 / (2. * delta ** 2))
Another alternative is to take a symmetric version of the k
nearest neighbors connectivity matrix of the points.
If the pyamg package is installed, it is used: this greatly
speeds up computation.
References
----------
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
"""
def __init__(self, n_clusters=8, eigen_solver=None, random_state=None,
n_init=10, gamma=1., affinity='rbf', n_neighbors=10,
eigen_tol=0.0, assign_labels='kmeans', degree=3, coef0=1,
kernel_params=None):
self.n_clusters = n_clusters
self.eigen_solver = eigen_solver
self.random_state = random_state
self.n_init = n_init
self.gamma = gamma
self.affinity = affinity
self.n_neighbors = n_neighbors
self.eigen_tol = eigen_tol
self.assign_labels = assign_labels
self.degree = degree
self.coef0 = coef0
self.kernel_params = kernel_params
def fit(self, X, y=None):
"""Creates an affinity matrix for X using the selected affinity,
then applies spectral clustering to this affinity matrix.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
OR, if affinity==`precomputed`, a precomputed affinity
matrix of shape (n_samples, n_samples)
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'],
dtype=np.float64)
if X.shape[0] == X.shape[1] and self.affinity != "precomputed":
warnings.warn("The spectral clustering API has changed. ``fit``"
"now constructs an affinity matrix from data. To use"
" a custom affinity matrix, "
"set ``affinity=precomputed``.")
if self.affinity == 'nearest_neighbors':
connectivity = kneighbors_graph(X, n_neighbors=self.n_neighbors, include_self=True)
self.affinity_matrix_ = 0.5 * (connectivity + connectivity.T)
elif self.affinity == 'precomputed':
self.affinity_matrix_ = X
else:
params = self.kernel_params
if params is None:
params = {}
if not callable(self.affinity):
params['gamma'] = self.gamma
params['degree'] = self.degree
params['coef0'] = self.coef0
self.affinity_matrix_ = pairwise_kernels(X, metric=self.affinity,
filter_params=True,
**params)
random_state = check_random_state(self.random_state)
self.labels_ = spectral_clustering(self.affinity_matrix_,
n_clusters=self.n_clusters,
eigen_solver=self.eigen_solver,
random_state=random_state,
n_init=self.n_init,
eigen_tol=self.eigen_tol,
assign_labels=self.assign_labels)
return self
@property
def _pairwise(self):
return self.affinity == "precomputed"
| bsd-3-clause |
anntzer/scikit-learn | sklearn/datasets/_species_distributions.py | 11 | 8726 | """
=============================
Species distribution dataset
=============================
This dataset represents the geographic distribution of species.
The dataset is provided by Phillips et. al. (2006).
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
`"Maximum entropy modeling of species geographic distributions"
<http://rob.schapire.net/papers/ecolmod.pdf>`_ S. J. Phillips,
R. P. Anderson, R. E. Schapire - Ecological Modelling, 190:231-259, 2006.
Notes
-----
For an example of using this dataset, see
:ref:`examples/applications/plot_species_distribution_modeling.py
<sphx_glr_auto_examples_applications_plot_species_distribution_modeling.py>`.
"""
# Authors: Peter Prettenhofer <[email protected]>
# Jake Vanderplas <[email protected]>
#
# License: BSD 3 clause
from io import BytesIO
from os import makedirs, remove
from os.path import exists
import logging
import numpy as np
import joblib
from . import get_data_home
from ._base import _fetch_remote
from ._base import RemoteFileMetadata
from ..utils import Bunch
from ..utils.validation import _deprecate_positional_args
from ._base import _pkl_filepath
# The original data can be found at:
# https://biodiversityinformatics.amnh.org/open_source/maxent/samples.zip
SAMPLES = RemoteFileMetadata(
filename='samples.zip',
url='https://ndownloader.figshare.com/files/5976075',
checksum=('abb07ad284ac50d9e6d20f1c4211e0fd'
'3c098f7f85955e89d321ee8efe37ac28'))
# The original data can be found at:
# https://biodiversityinformatics.amnh.org/open_source/maxent/coverages.zip
COVERAGES = RemoteFileMetadata(
filename='coverages.zip',
url='https://ndownloader.figshare.com/files/5976078',
checksum=('4d862674d72e79d6cee77e63b98651ec'
'7926043ba7d39dcb31329cf3f6073807'))
DATA_ARCHIVE_NAME = "species_coverage.pkz"
logger = logging.getLogger(__name__)
def _load_coverage(F, header_length=6, dtype=np.int16):
"""Load a coverage file from an open file object.
This will return a numpy array of the given dtype
"""
header = [F.readline() for _ in range(header_length)]
make_tuple = lambda t: (t.split()[0], float(t.split()[1]))
header = dict([make_tuple(line) for line in header])
M = np.loadtxt(F, dtype=dtype)
nodata = int(header[b'NODATA_value'])
if nodata != -9999:
M[nodata] = -9999
return M
def _load_csv(F):
"""Load csv file.
Parameters
----------
F : file object
CSV file open in byte mode.
Returns
-------
rec : np.ndarray
record array representing the data
"""
names = F.readline().decode('ascii').strip().split(',')
rec = np.loadtxt(F, skiprows=0, delimiter=',', dtype='a22,f4,f4')
rec.dtype.names = names
return rec
def construct_grids(batch):
"""Construct the map grid from the batch object
Parameters
----------
batch : Batch object
The object returned by :func:`fetch_species_distributions`
Returns
-------
(xgrid, ygrid) : 1-D arrays
The grid corresponding to the values in batch.coverages
"""
# x,y coordinates for corner cells
xmin = batch.x_left_lower_corner + batch.grid_size
xmax = xmin + (batch.Nx * batch.grid_size)
ymin = batch.y_left_lower_corner + batch.grid_size
ymax = ymin + (batch.Ny * batch.grid_size)
# x coordinates of the grid cells
xgrid = np.arange(xmin, xmax, batch.grid_size)
# y coordinates of the grid cells
ygrid = np.arange(ymin, ymax, batch.grid_size)
return (xgrid, ygrid)
@_deprecate_positional_args
def fetch_species_distributions(*, data_home=None,
download_if_missing=True):
"""Loader for species distribution dataset from Phillips et. al. (2006)
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
data_home : str, default=None
Specify another download and cache folder for the datasets. By default
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
download_if_missing : bool, default=True
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
data : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
coverages : array, shape = [14, 1592, 1212]
These represent the 14 features measured
at each point of the map grid.
The latitude/longitude values for the grid are discussed below.
Missing data is represented by the value -9999.
train : record array, shape = (1624,)
The training points for the data. Each point has three fields:
- train['species'] is the species name
- train['dd long'] is the longitude, in degrees
- train['dd lat'] is the latitude, in degrees
test : record array, shape = (620,)
The test points for the data. Same format as the training data.
Nx, Ny : integers
The number of longitudes (x) and latitudes (y) in the grid
x_left_lower_corner, y_left_lower_corner : floats
The (x,y) position of the lower-left corner, in degrees
grid_size : float
The spacing between points of the grid, in degrees
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://rob.schapire.net/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
Notes
-----
This dataset represents the geographic distribution of species.
The dataset is provided by Phillips et. al. (2006).
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
- For an example of using this dataset with scikit-learn, see
:ref:`examples/applications/plot_species_distribution_modeling.py
<sphx_glr_auto_examples_applications_plot_species_distribution_modeling.py>`.
"""
data_home = get_data_home(data_home)
if not exists(data_home):
makedirs(data_home)
# Define parameters for the data files. These should not be changed
# unless the data model changes. They will be saved in the npz file
# with the downloaded data.
extra_params = dict(x_left_lower_corner=-94.8,
Nx=1212,
y_left_lower_corner=-56.05,
Ny=1592,
grid_size=0.05)
dtype = np.int16
archive_path = _pkl_filepath(data_home, DATA_ARCHIVE_NAME)
if not exists(archive_path):
if not download_if_missing:
raise IOError("Data not found and `download_if_missing` is False")
logger.info('Downloading species data from %s to %s' % (
SAMPLES.url, data_home))
samples_path = _fetch_remote(SAMPLES, dirname=data_home)
with np.load(samples_path) as X: # samples.zip is a valid npz
for f in X.files:
fhandle = BytesIO(X[f])
if 'train' in f:
train = _load_csv(fhandle)
if 'test' in f:
test = _load_csv(fhandle)
remove(samples_path)
logger.info('Downloading coverage data from %s to %s' % (
COVERAGES.url, data_home))
coverages_path = _fetch_remote(COVERAGES, dirname=data_home)
with np.load(coverages_path) as X: # coverages.zip is a valid npz
coverages = []
for f in X.files:
fhandle = BytesIO(X[f])
logger.debug(' - converting {}'.format(f))
coverages.append(_load_coverage(fhandle))
coverages = np.asarray(coverages, dtype=dtype)
remove(coverages_path)
bunch = Bunch(coverages=coverages,
test=test,
train=train,
**extra_params)
joblib.dump(bunch, archive_path, compress=9)
else:
bunch = joblib.load(archive_path)
return bunch
| bsd-3-clause |
devanshdalal/scikit-learn | examples/svm/plot_svm_margin.py | 88 | 2540 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
SVM Margins Example
=========================================================
The plots below illustrate the effect the parameter `C` has
on the separation line. A large value of `C` basically tells
our model that we do not have that much faith in our data's
distribution, and will only consider points close to line
of separation.
A small value of `C` includes more/all the observations, allowing
the margins to be calculated using all the data in the area.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# we create 40 separable points
np.random.seed(0)
X = np.r_[np.random.randn(20, 2) - [2, 2], np.random.randn(20, 2) + [2, 2]]
Y = [0] * 20 + [1] * 20
# figure number
fignum = 1
# fit the model
for name, penalty in (('unreg', 1), ('reg', 0.05)):
clf = svm.SVC(kernel='linear', C=penalty)
clf.fit(X, Y)
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(-5, 5)
yy = a * xx - (clf.intercept_[0]) / w[1]
# plot the parallels to the separating hyperplane that pass through the
# support vectors (margin away from hyperplane in direction
# perpendicular to hyperplane). This is sqrt(1+a^2) away vertically in
# 2-d.
margin = 1 / np.sqrt(np.sum(clf.coef_ ** 2))
yy_down = yy - np.sqrt(1 + a ** 2) * margin
yy_up = yy + np.sqrt(1 + a ** 2) * margin
# plot the line, the points, and the nearest vectors to the plane
plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.plot(xx, yy, 'k-')
plt.plot(xx, yy_down, 'k--')
plt.plot(xx, yy_up, 'k--')
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=80,
facecolors='none', zorder=10, edgecolors='k')
plt.scatter(X[:, 0], X[:, 1], c=Y, zorder=10, cmap=plt.cm.Paired,
edgecolors='k')
plt.axis('tight')
x_min = -4.8
x_max = 4.2
y_min = -6
y_max = 6
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.predict(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.figure(fignum, figsize=(4, 3))
plt.pcolormesh(XX, YY, Z, cmap=plt.cm.Paired)
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
fignum = fignum + 1
plt.show()
| bsd-3-clause |
nvoron23/scikit-learn | sklearn/decomposition/tests/test_nmf.py | 47 | 8566 | import numpy as np
from scipy import linalg
from sklearn.decomposition import nmf
from scipy.sparse import csc_matrix
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import ignore_warnings
from sklearn.base import clone
random_state = np.random.mtrand.RandomState(0)
def test_initialize_nn_output():
# Test that initialization does not return negative values
data = np.abs(random_state.randn(10, 10))
for init in ('random', 'nndsvd', 'nndsvda', 'nndsvdar'):
W, H = nmf._initialize_nmf(data, 10, init=init, random_state=0)
assert_false((W < 0).any() or (H < 0).any())
@ignore_warnings
def test_parameter_checking():
A = np.ones((2, 2))
name = 'spam'
msg = "Invalid solver parameter: got 'spam' instead of one of"
assert_raise_message(ValueError, msg, nmf.NMF(solver=name).fit, A)
msg = "Invalid init parameter: got 'spam' instead of one of"
assert_raise_message(ValueError, msg, nmf.NMF(init=name).fit, A)
msg = "Invalid sparseness parameter: got 'spam' instead of one of"
assert_raise_message(ValueError, msg, nmf.NMF(sparseness=name).fit, A)
msg = "Negative values in data passed to"
assert_raise_message(ValueError, msg, nmf.NMF().fit, -A)
assert_raise_message(ValueError, msg, nmf._initialize_nmf, -A,
2, 'nndsvd')
clf = nmf.NMF(2, tol=0.1).fit(A)
assert_raise_message(ValueError, msg, clf.transform, -A)
def test_initialize_close():
# Test NNDSVD error
# Test that _initialize_nmf error is less than the standard deviation of
# the entries in the matrix.
A = np.abs(random_state.randn(10, 10))
W, H = nmf._initialize_nmf(A, 10, init='nndsvd')
error = linalg.norm(np.dot(W, H) - A)
sdev = linalg.norm(A - A.mean())
assert_true(error <= sdev)
def test_initialize_variants():
# Test NNDSVD variants correctness
# Test that the variants 'nndsvda' and 'nndsvdar' differ from basic
# 'nndsvd' only where the basic version has zeros.
data = np.abs(random_state.randn(10, 10))
W0, H0 = nmf._initialize_nmf(data, 10, init='nndsvd')
Wa, Ha = nmf._initialize_nmf(data, 10, init='nndsvda')
War, Har = nmf._initialize_nmf(data, 10, init='nndsvdar',
random_state=0)
for ref, evl in ((W0, Wa), (W0, War), (H0, Ha), (H0, Har)):
assert_true(np.allclose(evl[ref != 0], ref[ref != 0]))
@ignore_warnings
def test_nmf_fit_nn_output():
# Test that the decomposition does not contain negative values
A = np.c_[5 * np.ones(5) - np.arange(1, 6),
5 * np.ones(5) + np.arange(1, 6)]
for solver in ('pg', 'cd'):
for init in (None, 'nndsvd', 'nndsvda', 'nndsvdar'):
model = nmf.NMF(n_components=2, solver=solver, init=init,
random_state=0)
transf = model.fit_transform(A)
assert_false((model.components_ < 0).any() or
(transf < 0).any())
@ignore_warnings
def test_nmf_fit_close():
# Test that the fit is not too far away
for solver in ('pg', 'cd'):
pnmf = nmf.NMF(5, solver=solver, init='nndsvd', random_state=0)
X = np.abs(random_state.randn(6, 5))
assert_less(pnmf.fit(X).reconstruction_err_, 0.05)
def test_nls_nn_output():
# Test that NLS solver doesn't return negative values
A = np.arange(1, 5).reshape(1, -1)
Ap, _, _ = nmf._nls_subproblem(np.dot(A.T, -A), A.T, A, 0.001, 100)
assert_false((Ap < 0).any())
def test_nls_close():
# Test that the NLS results should be close
A = np.arange(1, 5).reshape(1, -1)
Ap, _, _ = nmf._nls_subproblem(np.dot(A.T, A), A.T, np.zeros_like(A),
0.001, 100)
assert_true((np.abs(Ap - A) < 0.01).all())
@ignore_warnings
def test_nmf_transform():
# Test that NMF.transform returns close values
A = np.abs(random_state.randn(6, 5))
for solver in ('pg', 'cd'):
m = nmf.NMF(solver=solver, n_components=4, init='nndsvd',
random_state=0)
ft = m.fit_transform(A)
t = m.transform(A)
assert_array_almost_equal(ft, t, decimal=2)
@ignore_warnings
def test_n_components_greater_n_features():
# Smoke test for the case of more components than features.
A = np.abs(random_state.randn(30, 10))
nmf.NMF(n_components=15, random_state=0, tol=1e-2).fit(A)
@ignore_warnings
def test_projgrad_nmf_sparseness():
# Test sparseness
# Test that sparsity constraints actually increase sparseness in the
# part where they are applied.
tol = 1e-2
A = np.abs(random_state.randn(10, 10))
m = nmf.ProjectedGradientNMF(n_components=5, random_state=0,
tol=tol).fit(A)
data_sp = nmf.ProjectedGradientNMF(n_components=5, sparseness='data',
random_state=0,
tol=tol).fit(A).data_sparseness_
comp_sp = nmf.ProjectedGradientNMF(n_components=5, sparseness='components',
random_state=0,
tol=tol).fit(A).comp_sparseness_
assert_greater(data_sp, m.data_sparseness_)
assert_greater(comp_sp, m.comp_sparseness_)
@ignore_warnings
def test_sparse_input():
# Test that sparse matrices are accepted as input
from scipy.sparse import csc_matrix
A = np.abs(random_state.randn(10, 10))
A[:, 2 * np.arange(5)] = 0
A_sparse = csc_matrix(A)
for solver in ('pg', 'cd'):
est1 = nmf.NMF(solver=solver, n_components=5, init='random',
random_state=0, tol=1e-2)
est2 = clone(est1)
W1 = est1.fit_transform(A)
W2 = est2.fit_transform(A_sparse)
H1 = est1.components_
H2 = est2.components_
assert_array_almost_equal(W1, W2)
assert_array_almost_equal(H1, H2)
@ignore_warnings
def test_sparse_transform():
# Test that transform works on sparse data. Issue #2124
A = np.abs(random_state.randn(3, 2))
A[A > 1.0] = 0
A = csc_matrix(A)
for solver in ('pg', 'cd'):
model = nmf.NMF(solver=solver, random_state=0, tol=1e-4,
n_components=2)
A_fit_tr = model.fit_transform(A)
A_tr = model.transform(A)
assert_array_almost_equal(A_fit_tr, A_tr, decimal=1)
@ignore_warnings
def test_non_negative_factorization_consistency():
# Test that the function is called in the same way, either directly
# or through the NMF class
A = np.abs(random_state.randn(10, 10))
A[:, 2 * np.arange(5)] = 0
for solver in ('pg', 'cd'):
W_nmf, H, _ = nmf.non_negative_factorization(
A, solver=solver, random_state=1, tol=1e-2)
W_nmf_2, _, _ = nmf.non_negative_factorization(
A, H=H, update_H=False, solver=solver, random_state=1, tol=1e-2)
model_class = nmf.NMF(solver=solver, random_state=1, tol=1e-2)
W_cls = model_class.fit_transform(A)
W_cls_2 = model_class.transform(A)
assert_array_almost_equal(W_nmf, W_cls, decimal=10)
assert_array_almost_equal(W_nmf_2, W_cls_2, decimal=10)
@ignore_warnings
def test_non_negative_factorization_checking():
A = np.ones((2, 2))
# Test parameters checking is public function
nnmf = nmf.non_negative_factorization
msg = "Number of components must be positive; got (n_components='2')"
assert_raise_message(ValueError, msg, nnmf, A, A, A, '2')
msg = "Negative values in data passed to NMF (input H)"
assert_raise_message(ValueError, msg, nnmf, A, A, -A, 2, 'custom')
msg = "Negative values in data passed to NMF (input W)"
assert_raise_message(ValueError, msg, nnmf, A, -A, A, 2, 'custom')
msg = "Array passed to NMF (input H) is full of zeros"
assert_raise_message(ValueError, msg, nnmf, A, A, 0 * A, 2, 'custom')
def test_safe_compute_error():
A = np.abs(random_state.randn(10, 10))
A[:, 2 * np.arange(5)] = 0
A_sparse = csc_matrix(A)
W, H = nmf._initialize_nmf(A, 5, init='random', random_state=0)
error = nmf._safe_compute_error(A, W, H)
error_sparse = nmf._safe_compute_error(A_sparse, W, H)
assert_almost_equal(error, error_sparse)
| bsd-3-clause |
classicboyir/BuildingMachineLearningSystemsWithPython | ch04/blei_lda.py | 21 | 2601 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
from __future__ import print_function
from wordcloud import create_cloud
try:
from gensim import corpora, models, matutils
except:
print("import gensim failed.")
print()
print("Please install it")
raise
import matplotlib.pyplot as plt
import numpy as np
from os import path
NUM_TOPICS = 100
# Check that data exists
if not path.exists('./data/ap/ap.dat'):
print('Error: Expected data to be present at data/ap/')
print('Please cd into ./data & run ./download_ap.sh')
# Load the data
corpus = corpora.BleiCorpus('./data/ap/ap.dat', './data/ap/vocab.txt')
# Build the topic model
model = models.ldamodel.LdaModel(
corpus, num_topics=NUM_TOPICS, id2word=corpus.id2word, alpha=None)
# Iterate over all the topics in the model
for ti in range(model.num_topics):
words = model.show_topic(ti, 64)
tf = sum(f for f, w in words)
with open('topics.txt', 'w') as output:
output.write('\n'.join('{}:{}'.format(w, int(1000. * f / tf)) for f, w in words))
output.write("\n\n\n")
# We first identify the most discussed topic, i.e., the one with the
# highest total weight
topics = matutils.corpus2dense(model[corpus], num_terms=model.num_topics)
weight = topics.sum(1)
max_topic = weight.argmax()
# Get the top 64 words for this topic
# Without the argument, show_topic would return only 10 words
words = model.show_topic(max_topic, 64)
# This function will actually check for the presence of pytagcloud and is otherwise a no-op
create_cloud('cloud_blei_lda.png', words)
num_topics_used = [len(model[doc]) for doc in corpus]
fig,ax = plt.subplots()
ax.hist(num_topics_used, np.arange(42))
ax.set_ylabel('Nr of documents')
ax.set_xlabel('Nr of topics')
fig.tight_layout()
fig.savefig('Figure_04_01.png')
# Now, repeat the same exercise using alpha=1.0
# You can edit the constant below to play around with this parameter
ALPHA = 1.0
model1 = models.ldamodel.LdaModel(
corpus, num_topics=NUM_TOPICS, id2word=corpus.id2word, alpha=ALPHA)
num_topics_used1 = [len(model1[doc]) for doc in corpus]
fig,ax = plt.subplots()
ax.hist([num_topics_used, num_topics_used1], np.arange(42))
ax.set_ylabel('Nr of documents')
ax.set_xlabel('Nr of topics')
# The coordinates below were fit by trial and error to look good
ax.text(9, 223, r'default alpha')
ax.text(26, 156, 'alpha=1.0')
fig.tight_layout()
fig.savefig('Figure_04_02.png')
| mit |
andyfaff/scipy | doc/source/tutorial/examples/optimize_global_1.py | 12 | 1752 | import numpy as np
import matplotlib.pyplot as plt
from scipy import optimize
def eggholder(x):
return (-(x[1] + 47) * np.sin(np.sqrt(abs(x[0]/2 + (x[1] + 47))))
-x[0] * np.sin(np.sqrt(abs(x[0] - (x[1] + 47)))))
bounds = [(-512, 512), (-512, 512)]
x = np.arange(-512, 513)
y = np.arange(-512, 513)
xgrid, ygrid = np.meshgrid(x, y)
xy = np.stack([xgrid, ygrid])
results = dict()
results['shgo'] = optimize.shgo(eggholder, bounds)
results['DA'] = optimize.dual_annealing(eggholder, bounds)
results['DE'] = optimize.differential_evolution(eggholder, bounds)
results['BH'] = optimize.basinhopping(eggholder, bounds)
results['shgo_sobol'] = optimize.shgo(eggholder, bounds, n=256, iters=5,
sampling_method='sobol')
fig = plt.figure(figsize=(4.5, 4.5))
ax = fig.add_subplot(111)
im = ax.imshow(eggholder(xy), interpolation='bilinear', origin='lower',
cmap='gray')
ax.set_xlabel('x')
ax.set_ylabel('y')
def plot_point(res, marker='o', color=None):
ax.plot(512+res.x[0], 512+res.x[1], marker=marker, color=color, ms=10)
plot_point(results['BH'], color='y') # basinhopping - yellow
plot_point(results['DE'], color='c') # differential_evolution - cyan
plot_point(results['DA'], color='w') # dual_annealing. - white
# SHGO produces multiple minima, plot them all (with a smaller marker size)
plot_point(results['shgo'], color='r', marker='+')
plot_point(results['shgo_sobol'], color='r', marker='x')
for i in range(results['shgo_sobol'].xl.shape[0]):
ax.plot(512 + results['shgo_sobol'].xl[i, 0],
512 + results['shgo_sobol'].xl[i, 1],
'ro', ms=2)
ax.set_xlim([-4, 514*2])
ax.set_ylim([-4, 514*2])
fig.tight_layout()
plt.show()
| bsd-3-clause |
neuroidss/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_fltkagg.py | 69 | 20839 | """
A backend for FLTK
Copyright: Gregory Lielens, Free Field Technologies SA and
John D. Hunter 2004
This code is released under the matplotlib license
"""
from __future__ import division
import os, sys, math
import fltk as Fltk
from backend_agg import FigureCanvasAgg
import os.path
import matplotlib
from matplotlib import rcParams, verbose
from matplotlib.cbook import is_string_like
from matplotlib.backend_bases import \
RendererBase, GraphicsContextBase, FigureManagerBase, FigureCanvasBase,\
NavigationToolbar2, cursors
from matplotlib.figure import Figure
from matplotlib._pylab_helpers import Gcf
import matplotlib.windowing as windowing
from matplotlib.widgets import SubplotTool
import thread,time
Fl_running=thread.allocate_lock()
def Fltk_run_interactive():
global Fl_running
if Fl_running.acquire(0):
while True:
Fltk.Fl.check()
time.sleep(0.005)
else:
print "fl loop already running"
# the true dots per inch on the screen; should be display dependent
# see http://groups.google.com/groups?q=screen+dpi+x11&hl=en&lr=&ie=UTF-8&oe=UTF-8&safe=off&selm=7077.26e81ad5%40swift.cs.tcd.ie&rnum=5 for some info about screen dpi
PIXELS_PER_INCH = 75
cursord= {
cursors.HAND: Fltk.FL_CURSOR_HAND,
cursors.POINTER: Fltk.FL_CURSOR_ARROW,
cursors.SELECT_REGION: Fltk.FL_CURSOR_CROSS,
cursors.MOVE: Fltk.FL_CURSOR_MOVE
}
special_key={
Fltk.FL_Shift_R:'shift',
Fltk.FL_Shift_L:'shift',
Fltk.FL_Control_R:'control',
Fltk.FL_Control_L:'control',
Fltk.FL_Control_R:'control',
Fltk.FL_Control_L:'control',
65515:'win',
65516:'win',
}
def error_msg_fltk(msg, parent=None):
Fltk.fl_message(msg)
def draw_if_interactive():
if matplotlib.is_interactive():
figManager = Gcf.get_active()
if figManager is not None:
figManager.canvas.draw()
def ishow():
"""
Show all the figures and enter the fltk mainloop in another thread
This allows to keep hand in interractive python session
Warning: does not work under windows
This should be the last line of your script
"""
for manager in Gcf.get_all_fig_managers():
manager.show()
if show._needmain:
thread.start_new_thread(Fltk_run_interactive,())
show._needmain = False
def show():
"""
Show all the figures and enter the fltk mainloop
This should be the last line of your script
"""
for manager in Gcf.get_all_fig_managers():
manager.show()
#mainloop, if an fltk program exist no need to call that
#threaded (and interractive) version
if show._needmain:
Fltk.Fl.run()
show._needmain = False
show._needmain = True
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
FigureClass = kwargs.pop('FigureClass', Figure)
figure = FigureClass(*args, **kwargs)
window = Fltk.Fl_Double_Window(10,10,30,30)
canvas = FigureCanvasFltkAgg(figure)
window.end()
window.show()
window.make_current()
figManager = FigureManagerFltkAgg(canvas, num, window)
if matplotlib.is_interactive():
figManager.show()
return figManager
class FltkCanvas(Fltk.Fl_Widget):
def __init__(self,x,y,w,h,l,source):
Fltk.Fl_Widget.__init__(self, 0, 0, w, h, "canvas")
self._source=source
self._oldsize=(None,None)
self._draw_overlay = False
self._button = None
self._key = None
def draw(self):
newsize=(self.w(),self.h())
if(self._oldsize !=newsize):
self._oldsize =newsize
self._source.resize(newsize)
self._source.draw()
t1,t2,w,h = self._source.figure.bbox.bounds
Fltk.fl_draw_image(self._source.buffer_rgba(0,0),0,0,int(w),int(h),4,0)
self.redraw()
def blit(self,bbox=None):
if bbox is None:
t1,t2,w,h = self._source.figure.bbox.bounds
else:
t1o,t2o,wo,ho = self._source.figure.bbox.bounds
t1,t2,w,h = bbox.bounds
x,y=int(t1),int(t2)
Fltk.fl_draw_image(self._source.buffer_rgba(x,y),x,y,int(w),int(h),4,int(wo)*4)
#self.redraw()
def handle(self, event):
x=Fltk.Fl.event_x()
y=Fltk.Fl.event_y()
yf=self._source.figure.bbox.height() - y
if event == Fltk.FL_FOCUS or event == Fltk.FL_UNFOCUS:
return 1
elif event == Fltk.FL_KEYDOWN:
ikey= Fltk.Fl.event_key()
if(ikey<=255):
self._key=chr(ikey)
else:
try:
self._key=special_key[ikey]
except:
self._key=None
FigureCanvasBase.key_press_event(self._source, self._key)
return 1
elif event == Fltk.FL_KEYUP:
FigureCanvasBase.key_release_event(self._source, self._key)
self._key=None
elif event == Fltk.FL_PUSH:
self.window().make_current()
if Fltk.Fl.event_button1():
self._button = 1
elif Fltk.Fl.event_button2():
self._button = 2
elif Fltk.Fl.event_button3():
self._button = 3
else:
self._button = None
if self._draw_overlay:
self._oldx=x
self._oldy=y
if Fltk.Fl.event_clicks():
FigureCanvasBase.button_press_event(self._source, x, yf, self._button)
return 1
else:
FigureCanvasBase.button_press_event(self._source, x, yf, self._button)
return 1
elif event == Fltk.FL_ENTER:
self.take_focus()
return 1
elif event == Fltk.FL_LEAVE:
return 1
elif event == Fltk.FL_MOVE:
FigureCanvasBase.motion_notify_event(self._source, x, yf)
return 1
elif event == Fltk.FL_DRAG:
self.window().make_current()
if self._draw_overlay:
self._dx=Fltk.Fl.event_x()-self._oldx
self._dy=Fltk.Fl.event_y()-self._oldy
Fltk.fl_overlay_rect(self._oldx,self._oldy,self._dx,self._dy)
FigureCanvasBase.motion_notify_event(self._source, x, yf)
return 1
elif event == Fltk.FL_RELEASE:
self.window().make_current()
if self._draw_overlay:
Fltk.fl_overlay_clear()
FigureCanvasBase.button_release_event(self._source, x, yf, self._button)
self._button = None
return 1
return 0
class FigureCanvasFltkAgg(FigureCanvasAgg):
def __init__(self, figure):
FigureCanvasAgg.__init__(self,figure)
t1,t2,w,h = self.figure.bbox.bounds
w, h = int(w), int(h)
self.canvas=FltkCanvas(0, 0, w, h, "canvas",self)
#self.draw()
def resize(self,size):
w, h = size
# compute desired figure size in inches
dpival = self.figure.dpi.get()
winch = w/dpival
hinch = h/dpival
self.figure.set_size_inches(winch,hinch)
def draw(self):
FigureCanvasAgg.draw(self)
self.canvas.redraw()
def blit(self,bbox):
self.canvas.blit(bbox)
show = draw
def widget(self):
return self.canvas
def start_event_loop(self,timeout):
FigureCanvasBase.start_event_loop_default(self,timeout)
start_event_loop.__doc__=FigureCanvasBase.start_event_loop_default.__doc__
def stop_event_loop(self):
FigureCanvasBase.stop_event_loop_default(self)
stop_event_loop.__doc__=FigureCanvasBase.stop_event_loop_default.__doc__
def destroy_figure(ptr,figman):
figman.window.hide()
Gcf.destroy(figman._num)
class FigureManagerFltkAgg(FigureManagerBase):
"""
Public attributes
canvas : The FigureCanvas instance
num : The Figure number
toolbar : The fltk.Toolbar
window : The fltk.Window
"""
def __init__(self, canvas, num, window):
FigureManagerBase.__init__(self, canvas, num)
#Fltk container window
t1,t2,w,h = canvas.figure.bbox.bounds
w, h = int(w), int(h)
self.window = window
self.window.size(w,h+30)
self.window_title="Figure %d" % num
self.window.label(self.window_title)
self.window.size_range(350,200)
self.window.callback(destroy_figure,self)
self.canvas = canvas
self._num = num
if matplotlib.rcParams['toolbar']=='classic':
self.toolbar = NavigationToolbar( canvas, self )
elif matplotlib.rcParams['toolbar']=='toolbar2':
self.toolbar = NavigationToolbar2FltkAgg( canvas, self )
else:
self.toolbar = None
self.window.add_resizable(canvas.widget())
if self.toolbar:
self.window.add(self.toolbar.widget())
self.toolbar.update()
self.window.show()
def notify_axes_change(fig):
'this will be called whenever the current axes is changed'
if self.toolbar != None: self.toolbar.update()
self.canvas.figure.add_axobserver(notify_axes_change)
def resize(self, event):
width, height = event.width, event.height
self.toolbar.configure(width=width) # , height=height)
def show(self):
_focus = windowing.FocusManager()
self.canvas.draw()
self.window.redraw()
def set_window_title(self, title):
self.window_title=title
self.window.label(title)
class AxisMenu:
def __init__(self, toolbar):
self.toolbar=toolbar
self._naxes = toolbar.naxes
self._mbutton = Fltk.Fl_Menu_Button(0,0,50,10,"Axes")
self._mbutton.add("Select All",0,select_all,self,0)
self._mbutton.add("Invert All",0,invert_all,self,Fltk.FL_MENU_DIVIDER)
self._axis_txt=[]
self._axis_var=[]
for i in range(self._naxes):
self._axis_txt.append("Axis %d" % (i+1))
self._mbutton.add(self._axis_txt[i],0,set_active,self,Fltk.FL_MENU_TOGGLE)
for i in range(self._naxes):
self._axis_var.append(self._mbutton.find_item(self._axis_txt[i]))
self._axis_var[i].set()
def adjust(self, naxes):
if self._naxes < naxes:
for i in range(self._naxes, naxes):
self._axis_txt.append("Axis %d" % (i+1))
self._mbutton.add(self._axis_txt[i],0,set_active,self,Fltk.FL_MENU_TOGGLE)
for i in range(self._naxes, naxes):
self._axis_var.append(self._mbutton.find_item(self._axis_txt[i]))
self._axis_var[i].set()
elif self._naxes > naxes:
for i in range(self._naxes-1, naxes-1, -1):
self._mbutton.remove(i+2)
if(naxes):
self._axis_var=self._axis_var[:naxes-1]
self._axis_txt=self._axis_txt[:naxes-1]
else:
self._axis_var=[]
self._axis_txt=[]
self._naxes = naxes
set_active(0,self)
def widget(self):
return self._mbutton
def get_indices(self):
a = [i for i in range(len(self._axis_var)) if self._axis_var[i].value()]
return a
def set_active(ptr,amenu):
amenu.toolbar.set_active(amenu.get_indices())
def invert_all(ptr,amenu):
for a in amenu._axis_var:
if not a.value(): a.set()
set_active(ptr,amenu)
def select_all(ptr,amenu):
for a in amenu._axis_var:
a.set()
set_active(ptr,amenu)
class FLTKButton:
def __init__(self, text, file, command,argument,type="classic"):
file = os.path.join(rcParams['datapath'], 'images', file)
self.im = Fltk.Fl_PNM_Image(file)
size=26
if type=="repeat":
self.b = Fltk.Fl_Repeat_Button(0,0,size,10)
self.b.box(Fltk.FL_THIN_UP_BOX)
elif type=="classic":
self.b = Fltk.Fl_Button(0,0,size,10)
self.b.box(Fltk.FL_THIN_UP_BOX)
elif type=="light":
self.b = Fltk.Fl_Light_Button(0,0,size+20,10)
self.b.box(Fltk.FL_THIN_UP_BOX)
elif type=="pushed":
self.b = Fltk.Fl_Button(0,0,size,10)
self.b.box(Fltk.FL_UP_BOX)
self.b.down_box(Fltk.FL_DOWN_BOX)
self.b.type(Fltk.FL_TOGGLE_BUTTON)
self.tooltiptext=text+" "
self.b.tooltip(self.tooltiptext)
self.b.callback(command,argument)
self.b.image(self.im)
self.b.deimage(self.im)
self.type=type
def widget(self):
return self.b
class NavigationToolbar:
"""
Public attriubutes
canvas - the FigureCanvas (FigureCanvasFltkAgg = customised fltk.Widget)
"""
def __init__(self, canvas, figman):
#xmin, xmax = canvas.figure.bbox.intervalx().get_bounds()
#height, width = 50, xmax-xmin
self.canvas = canvas
self.figman = figman
Fltk.Fl_File_Icon.load_system_icons()
self._fc = Fltk.Fl_File_Chooser( ".", "*", Fltk.Fl_File_Chooser.CREATE, "Save Figure" )
self._fc.hide()
t1,t2,w,h = canvas.figure.bbox.bounds
w, h = int(w), int(h)
self._group = Fltk.Fl_Pack(0,h+2,1000,26)
self._group.type(Fltk.FL_HORIZONTAL)
self._axes=self.canvas.figure.axes
self.naxes = len(self._axes)
self.omenu = AxisMenu( toolbar=self)
self.bLeft = FLTKButton(
text="Left", file="stock_left.ppm",
command=pan,argument=(self,1,'x'),type="repeat")
self.bRight = FLTKButton(
text="Right", file="stock_right.ppm",
command=pan,argument=(self,-1,'x'),type="repeat")
self.bZoomInX = FLTKButton(
text="ZoomInX",file="stock_zoom-in.ppm",
command=zoom,argument=(self,1,'x'),type="repeat")
self.bZoomOutX = FLTKButton(
text="ZoomOutX", file="stock_zoom-out.ppm",
command=zoom, argument=(self,-1,'x'),type="repeat")
self.bUp = FLTKButton(
text="Up", file="stock_up.ppm",
command=pan,argument=(self,1,'y'),type="repeat")
self.bDown = FLTKButton(
text="Down", file="stock_down.ppm",
command=pan,argument=(self,-1,'y'),type="repeat")
self.bZoomInY = FLTKButton(
text="ZoomInY", file="stock_zoom-in.ppm",
command=zoom,argument=(self,1,'y'),type="repeat")
self.bZoomOutY = FLTKButton(
text="ZoomOutY",file="stock_zoom-out.ppm",
command=zoom, argument=(self,-1,'y'),type="repeat")
self.bSave = FLTKButton(
text="Save", file="stock_save_as.ppm",
command=save_figure, argument=self)
self._group.end()
def widget(self):
return self._group
def close(self):
Gcf.destroy(self.figman._num)
def set_active(self, ind):
self._ind = ind
self._active = [ self._axes[i] for i in self._ind ]
def update(self):
self._axes = self.canvas.figure.axes
naxes = len(self._axes)
self.omenu.adjust(naxes)
def pan(ptr, arg):
base,direction,axe=arg
for a in base._active:
if(axe=='x'):
a.panx(direction)
else:
a.pany(direction)
base.figman.show()
def zoom(ptr, arg):
base,direction,axe=arg
for a in base._active:
if(axe=='x'):
a.zoomx(direction)
else:
a.zoomy(direction)
base.figman.show()
def save_figure(ptr,base):
filetypes = base.canvas.get_supported_filetypes()
default_filetype = base.canvas.get_default_filetype()
sorted_filetypes = filetypes.items()
sorted_filetypes.sort()
selected_filter = 0
filters = []
for i, (ext, name) in enumerate(sorted_filetypes):
filter = '%s (*.%s)' % (name, ext)
filters.append(filter)
if ext == default_filetype:
selected_filter = i
filters = '\t'.join(filters)
file_chooser=base._fc
file_chooser.filter(filters)
file_chooser.filter_value(selected_filter)
file_chooser.show()
while file_chooser.visible() :
Fltk.Fl.wait()
fname=None
if(file_chooser.count() and file_chooser.value(0) != None):
fname=""
(status,fname)=Fltk.fl_filename_absolute(fname, 1024, file_chooser.value(0))
if fname is None: # Cancel
return
#start from last directory
lastDir = os.path.dirname(fname)
file_chooser.directory(lastDir)
format = sorted_filetypes[file_chooser.filter_value()][0]
try:
base.canvas.print_figure(fname, format=format)
except IOError, msg:
err = '\n'.join(map(str, msg))
msg = 'Failed to save %s: Error msg was\n\n%s' % (
fname, err)
error_msg_fltk(msg)
class NavigationToolbar2FltkAgg(NavigationToolbar2):
"""
Public attriubutes
canvas - the FigureCanvas
figman - the Figure manager
"""
def __init__(self, canvas, figman):
self.canvas = canvas
self.figman = figman
NavigationToolbar2.__init__(self, canvas)
self.pan_selected=False
self.zoom_selected=False
def set_cursor(self, cursor):
Fltk.fl_cursor(cursord[cursor],Fltk.FL_BLACK,Fltk.FL_WHITE)
def dynamic_update(self):
self.canvas.draw()
def pan(self,*args):
self.pan_selected=not self.pan_selected
self.zoom_selected = False
self.canvas.canvas._draw_overlay= False
if self.pan_selected:
self.bPan.widget().value(1)
else:
self.bPan.widget().value(0)
if self.zoom_selected:
self.bZoom.widget().value(1)
else:
self.bZoom.widget().value(0)
NavigationToolbar2.pan(self,args)
def zoom(self,*args):
self.zoom_selected=not self.zoom_selected
self.canvas.canvas._draw_overlay=self.zoom_selected
self.pan_selected = False
if self.pan_selected:
self.bPan.widget().value(1)
else:
self.bPan.widget().value(0)
if self.zoom_selected:
self.bZoom.widget().value(1)
else:
self.bZoom.widget().value(0)
NavigationToolbar2.zoom(self,args)
def configure_subplots(self,*args):
window = Fltk.Fl_Double_Window(100,100,480,240)
toolfig = Figure(figsize=(6,3))
canvas = FigureCanvasFltkAgg(toolfig)
window.end()
toolfig.subplots_adjust(top=0.9)
tool = SubplotTool(self.canvas.figure, toolfig)
window.show()
canvas.show()
def _init_toolbar(self):
Fltk.Fl_File_Icon.load_system_icons()
self._fc = Fltk.Fl_File_Chooser( ".", "*", Fltk.Fl_File_Chooser.CREATE, "Save Figure" )
self._fc.hide()
t1,t2,w,h = self.canvas.figure.bbox.bounds
w, h = int(w), int(h)
self._group = Fltk.Fl_Pack(0,h+2,1000,26)
self._group.type(Fltk.FL_HORIZONTAL)
self._axes=self.canvas.figure.axes
self.naxes = len(self._axes)
self.omenu = AxisMenu( toolbar=self)
self.bHome = FLTKButton(
text="Home", file="home.ppm",
command=self.home,argument=self)
self.bBack = FLTKButton(
text="Back", file="back.ppm",
command=self.back,argument=self)
self.bForward = FLTKButton(
text="Forward", file="forward.ppm",
command=self.forward,argument=self)
self.bPan = FLTKButton(
text="Pan/Zoom",file="move.ppm",
command=self.pan,argument=self,type="pushed")
self.bZoom = FLTKButton(
text="Zoom to rectangle",file="zoom_to_rect.ppm",
command=self.zoom,argument=self,type="pushed")
self.bsubplot = FLTKButton( text="Configure Subplots", file="subplots.ppm",
command = self.configure_subplots,argument=self,type="pushed")
self.bSave = FLTKButton(
text="Save", file="filesave.ppm",
command=save_figure, argument=self)
self._group.end()
self.message = Fltk.Fl_Output(0,0,w,8)
self._group.add_resizable(self.message)
self.update()
def widget(self):
return self._group
def close(self):
Gcf.destroy(self.figman._num)
def set_active(self, ind):
self._ind = ind
self._active = [ self._axes[i] for i in self._ind ]
def update(self):
self._axes = self.canvas.figure.axes
naxes = len(self._axes)
self.omenu.adjust(naxes)
NavigationToolbar2.update(self)
def set_message(self, s):
self.message.value(s)
FigureManager = FigureManagerFltkAgg
| agpl-3.0 |
pprett/scikit-learn | sklearn/preprocessing/label.py | 28 | 28237 | # Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Andreas Mueller <[email protected]>
# Joel Nothman <[email protected]>
# Hamzeh Alsalhi <[email protected]>
# License: BSD 3 clause
from collections import defaultdict
import itertools
import array
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..utils.fixes import np_version
from ..utils.fixes import sparse_min_max
from ..utils.fixes import astype
from ..utils.fixes import in1d
from ..utils import column_or_1d
from ..utils.validation import check_array
from ..utils.validation import check_is_fitted
from ..utils.validation import _num_samples
from ..utils.multiclass import unique_labels
from ..utils.multiclass import type_of_target
from ..externals import six
zip = six.moves.zip
map = six.moves.map
__all__ = [
'label_binarize',
'LabelBinarizer',
'LabelEncoder',
'MultiLabelBinarizer',
]
def _check_numpy_unicode_bug(labels):
"""Check that user is not subject to an old numpy bug
Fixed in master before 1.7.0:
https://github.com/numpy/numpy/pull/243
"""
if np_version[:3] < (1, 7, 0) and labels.dtype.kind == 'U':
raise RuntimeError("NumPy < 1.7.0 does not implement searchsorted"
" on unicode data correctly. Please upgrade"
" NumPy to use LabelEncoder with unicode inputs.")
class LabelEncoder(BaseEstimator, TransformerMixin):
"""Encode labels with value between 0 and n_classes-1.
Read more in the :ref:`User Guide <preprocessing_targets>`.
Attributes
----------
classes_ : array of shape (n_class,)
Holds the label for each class.
Examples
--------
`LabelEncoder` can be used to normalize labels.
>>> from sklearn import preprocessing
>>> le = preprocessing.LabelEncoder()
>>> le.fit([1, 2, 2, 6])
LabelEncoder()
>>> le.classes_
array([1, 2, 6])
>>> le.transform([1, 1, 2, 6]) #doctest: +ELLIPSIS
array([0, 0, 1, 2]...)
>>> le.inverse_transform([0, 0, 1, 2])
array([1, 1, 2, 6])
It can also be used to transform non-numerical labels (as long as they are
hashable and comparable) to numerical labels.
>>> le = preprocessing.LabelEncoder()
>>> le.fit(["paris", "paris", "tokyo", "amsterdam"])
LabelEncoder()
>>> list(le.classes_)
['amsterdam', 'paris', 'tokyo']
>>> le.transform(["tokyo", "tokyo", "paris"]) #doctest: +ELLIPSIS
array([2, 2, 1]...)
>>> list(le.inverse_transform([2, 2, 1]))
['tokyo', 'tokyo', 'paris']
See also
--------
sklearn.preprocessing.OneHotEncoder : encode categorical integer features
using a one-hot aka one-of-K scheme.
"""
def fit(self, y):
"""Fit label encoder
Parameters
----------
y : array-like of shape (n_samples,)
Target values.
Returns
-------
self : returns an instance of self.
"""
y = column_or_1d(y, warn=True)
_check_numpy_unicode_bug(y)
self.classes_ = np.unique(y)
return self
def fit_transform(self, y):
"""Fit label encoder and return encoded labels
Parameters
----------
y : array-like of shape [n_samples]
Target values.
Returns
-------
y : array-like of shape [n_samples]
"""
y = column_or_1d(y, warn=True)
_check_numpy_unicode_bug(y)
self.classes_, y = np.unique(y, return_inverse=True)
return y
def transform(self, y):
"""Transform labels to normalized encoding.
Parameters
----------
y : array-like of shape [n_samples]
Target values.
Returns
-------
y : array-like of shape [n_samples]
"""
check_is_fitted(self, 'classes_')
y = column_or_1d(y, warn=True)
classes = np.unique(y)
_check_numpy_unicode_bug(classes)
if len(np.intersect1d(classes, self.classes_)) < len(classes):
diff = np.setdiff1d(classes, self.classes_)
raise ValueError("y contains new labels: %s" % str(diff))
return np.searchsorted(self.classes_, y)
def inverse_transform(self, y):
"""Transform labels back to original encoding.
Parameters
----------
y : numpy array of shape [n_samples]
Target values.
Returns
-------
y : numpy array of shape [n_samples]
"""
check_is_fitted(self, 'classes_')
diff = np.setdiff1d(y, np.arange(len(self.classes_)))
if diff:
raise ValueError("y contains new labels: %s" % str(diff))
y = np.asarray(y)
return self.classes_[y]
class LabelBinarizer(BaseEstimator, TransformerMixin):
"""Binarize labels in a one-vs-all fashion
Several regression and binary classification algorithms are
available in the scikit. A simple way to extend these algorithms
to the multi-class classification case is to use the so-called
one-vs-all scheme.
At learning time, this simply consists in learning one regressor
or binary classifier per class. In doing so, one needs to convert
multi-class labels to binary labels (belong or does not belong
to the class). LabelBinarizer makes this process easy with the
transform method.
At prediction time, one assigns the class for which the corresponding
model gave the greatest confidence. LabelBinarizer makes this easy
with the inverse_transform method.
Read more in the :ref:`User Guide <preprocessing_targets>`.
Parameters
----------
neg_label : int (default: 0)
Value with which negative labels must be encoded.
pos_label : int (default: 1)
Value with which positive labels must be encoded.
sparse_output : boolean (default: False)
True if the returned array from transform is desired to be in sparse
CSR format.
Attributes
----------
classes_ : array of shape [n_class]
Holds the label for each class.
y_type_ : str,
Represents the type of the target data as evaluated by
utils.multiclass.type_of_target. Possible type are 'continuous',
'continuous-multioutput', 'binary', 'multiclass',
'multiclass-multioutput', 'multilabel-indicator', and 'unknown'.
sparse_input_ : boolean,
True if the input data to transform is given as a sparse matrix, False
otherwise.
Examples
--------
>>> from sklearn import preprocessing
>>> lb = preprocessing.LabelBinarizer()
>>> lb.fit([1, 2, 6, 4, 2])
LabelBinarizer(neg_label=0, pos_label=1, sparse_output=False)
>>> lb.classes_
array([1, 2, 4, 6])
>>> lb.transform([1, 6])
array([[1, 0, 0, 0],
[0, 0, 0, 1]])
Binary targets transform to a column vector
>>> lb = preprocessing.LabelBinarizer()
>>> lb.fit_transform(['yes', 'no', 'no', 'yes'])
array([[1],
[0],
[0],
[1]])
Passing a 2D matrix for multilabel classification
>>> import numpy as np
>>> lb.fit(np.array([[0, 1, 1], [1, 0, 0]]))
LabelBinarizer(neg_label=0, pos_label=1, sparse_output=False)
>>> lb.classes_
array([0, 1, 2])
>>> lb.transform([0, 1, 2, 1])
array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[0, 1, 0]])
See also
--------
label_binarize : function to perform the transform operation of
LabelBinarizer with fixed classes.
sklearn.preprocessing.OneHotEncoder : encode categorical integer features
using a one-hot aka one-of-K scheme.
"""
def __init__(self, neg_label=0, pos_label=1, sparse_output=False):
if neg_label >= pos_label:
raise ValueError("neg_label={0} must be strictly less than "
"pos_label={1}.".format(neg_label, pos_label))
if sparse_output and (pos_label == 0 or neg_label != 0):
raise ValueError("Sparse binarization is only supported with non "
"zero pos_label and zero neg_label, got "
"pos_label={0} and neg_label={1}"
"".format(pos_label, neg_label))
self.neg_label = neg_label
self.pos_label = pos_label
self.sparse_output = sparse_output
def fit(self, y):
"""Fit label binarizer
Parameters
----------
y : array of shape [n_samples,] or [n_samples, n_classes]
Target values. The 2-d matrix should only contain 0 and 1,
represents multilabel classification.
Returns
-------
self : returns an instance of self.
"""
self.y_type_ = type_of_target(y)
if 'multioutput' in self.y_type_:
raise ValueError("Multioutput target data is not supported with "
"label binarization")
if _num_samples(y) == 0:
raise ValueError('y has 0 samples: %r' % y)
self.sparse_input_ = sp.issparse(y)
self.classes_ = unique_labels(y)
return self
def fit_transform(self, y):
"""Fit label binarizer and transform multi-class labels to binary
labels.
The output of transform is sometimes referred to as
the 1-of-K coding scheme.
Parameters
----------
y : array or sparse matrix of shape [n_samples,] or \
[n_samples, n_classes]
Target values. The 2-d matrix should only contain 0 and 1,
represents multilabel classification. Sparse matrix can be
CSR, CSC, COO, DOK, or LIL.
Returns
-------
Y : array or CSR matrix of shape [n_samples, n_classes]
Shape will be [n_samples, 1] for binary problems.
"""
return self.fit(y).transform(y)
def transform(self, y):
"""Transform multi-class labels to binary labels
The output of transform is sometimes referred to by some authors as
the 1-of-K coding scheme.
Parameters
----------
y : array or sparse matrix of shape [n_samples,] or \
[n_samples, n_classes]
Target values. The 2-d matrix should only contain 0 and 1,
represents multilabel classification. Sparse matrix can be
CSR, CSC, COO, DOK, or LIL.
Returns
-------
Y : numpy array or CSR matrix of shape [n_samples, n_classes]
Shape will be [n_samples, 1] for binary problems.
"""
check_is_fitted(self, 'classes_')
y_is_multilabel = type_of_target(y).startswith('multilabel')
if y_is_multilabel and not self.y_type_.startswith('multilabel'):
raise ValueError("The object was not fitted with multilabel"
" input.")
return label_binarize(y, self.classes_,
pos_label=self.pos_label,
neg_label=self.neg_label,
sparse_output=self.sparse_output)
def inverse_transform(self, Y, threshold=None):
"""Transform binary labels back to multi-class labels
Parameters
----------
Y : numpy array or sparse matrix with shape [n_samples, n_classes]
Target values. All sparse matrices are converted to CSR before
inverse transformation.
threshold : float or None
Threshold used in the binary and multi-label cases.
Use 0 when:
- Y contains the output of decision_function (classifier)
Use 0.5 when:
- Y contains the output of predict_proba
If None, the threshold is assumed to be half way between
neg_label and pos_label.
Returns
-------
y : numpy array or CSR matrix of shape [n_samples] Target values.
Notes
-----
In the case when the binary labels are fractional
(probabilistic), inverse_transform chooses the class with the
greatest value. Typically, this allows to use the output of a
linear model's decision_function method directly as the input
of inverse_transform.
"""
check_is_fitted(self, 'classes_')
if threshold is None:
threshold = (self.pos_label + self.neg_label) / 2.
if self.y_type_ == "multiclass":
y_inv = _inverse_binarize_multiclass(Y, self.classes_)
else:
y_inv = _inverse_binarize_thresholding(Y, self.y_type_,
self.classes_, threshold)
if self.sparse_input_:
y_inv = sp.csr_matrix(y_inv)
elif sp.issparse(y_inv):
y_inv = y_inv.toarray()
return y_inv
def label_binarize(y, classes, neg_label=0, pos_label=1, sparse_output=False):
"""Binarize labels in a one-vs-all fashion
Several regression and binary classification algorithms are
available in the scikit. A simple way to extend these algorithms
to the multi-class classification case is to use the so-called
one-vs-all scheme.
This function makes it possible to compute this transformation for a
fixed set of class labels known ahead of time.
Parameters
----------
y : array-like
Sequence of integer labels or multilabel data to encode.
classes : array-like of shape [n_classes]
Uniquely holds the label for each class.
neg_label : int (default: 0)
Value with which negative labels must be encoded.
pos_label : int (default: 1)
Value with which positive labels must be encoded.
sparse_output : boolean (default: False),
Set to true if output binary array is desired in CSR sparse format
Returns
-------
Y : numpy array or CSR matrix of shape [n_samples, n_classes]
Shape will be [n_samples, 1] for binary problems.
Examples
--------
>>> from sklearn.preprocessing import label_binarize
>>> label_binarize([1, 6], classes=[1, 2, 4, 6])
array([[1, 0, 0, 0],
[0, 0, 0, 1]])
The class ordering is preserved:
>>> label_binarize([1, 6], classes=[1, 6, 4, 2])
array([[1, 0, 0, 0],
[0, 1, 0, 0]])
Binary targets transform to a column vector
>>> label_binarize(['yes', 'no', 'no', 'yes'], classes=['no', 'yes'])
array([[1],
[0],
[0],
[1]])
See also
--------
LabelBinarizer : class used to wrap the functionality of label_binarize and
allow for fitting to classes independently of the transform operation
"""
if not isinstance(y, list):
# XXX Workaround that will be removed when list of list format is
# dropped
y = check_array(y, accept_sparse='csr', ensure_2d=False, dtype=None)
else:
if _num_samples(y) == 0:
raise ValueError('y has 0 samples: %r' % y)
if neg_label >= pos_label:
raise ValueError("neg_label={0} must be strictly less than "
"pos_label={1}.".format(neg_label, pos_label))
if (sparse_output and (pos_label == 0 or neg_label != 0)):
raise ValueError("Sparse binarization is only supported with non "
"zero pos_label and zero neg_label, got "
"pos_label={0} and neg_label={1}"
"".format(pos_label, neg_label))
# To account for pos_label == 0 in the dense case
pos_switch = pos_label == 0
if pos_switch:
pos_label = -neg_label
y_type = type_of_target(y)
if 'multioutput' in y_type:
raise ValueError("Multioutput target data is not supported with label "
"binarization")
if y_type == 'unknown':
raise ValueError("The type of target data is not known")
n_samples = y.shape[0] if sp.issparse(y) else len(y)
n_classes = len(classes)
classes = np.asarray(classes)
if y_type == "binary":
if n_classes == 1:
if sparse_output:
return sp.csr_matrix((n_samples, 1), dtype=int)
else:
Y = np.zeros((len(y), 1), dtype=np.int)
Y += neg_label
return Y
elif len(classes) >= 3:
y_type = "multiclass"
sorted_class = np.sort(classes)
if (y_type == "multilabel-indicator" and classes.size != y.shape[1]):
raise ValueError("classes {0} missmatch with the labels {1}"
"found in the data".format(classes, unique_labels(y)))
if y_type in ("binary", "multiclass"):
y = column_or_1d(y)
# pick out the known labels from y
y_in_classes = in1d(y, classes)
y_seen = y[y_in_classes]
indices = np.searchsorted(sorted_class, y_seen)
indptr = np.hstack((0, np.cumsum(y_in_classes)))
data = np.empty_like(indices)
data.fill(pos_label)
Y = sp.csr_matrix((data, indices, indptr),
shape=(n_samples, n_classes))
elif y_type == "multilabel-indicator":
Y = sp.csr_matrix(y)
if pos_label != 1:
data = np.empty_like(Y.data)
data.fill(pos_label)
Y.data = data
else:
raise ValueError("%s target data is not supported with label "
"binarization" % y_type)
if not sparse_output:
Y = Y.toarray()
Y = astype(Y, int, copy=False)
if neg_label != 0:
Y[Y == 0] = neg_label
if pos_switch:
Y[Y == pos_label] = 0
else:
Y.data = astype(Y.data, int, copy=False)
# preserve label ordering
if np.any(classes != sorted_class):
indices = np.searchsorted(sorted_class, classes)
Y = Y[:, indices]
if y_type == "binary":
if sparse_output:
Y = Y.getcol(-1)
else:
Y = Y[:, -1].reshape((-1, 1))
return Y
def _inverse_binarize_multiclass(y, classes):
"""Inverse label binarization transformation for multiclass.
Multiclass uses the maximal score instead of a threshold.
"""
classes = np.asarray(classes)
if sp.issparse(y):
# Find the argmax for each row in y where y is a CSR matrix
y = y.tocsr()
n_samples, n_outputs = y.shape
outputs = np.arange(n_outputs)
row_max = sparse_min_max(y, 1)[1]
row_nnz = np.diff(y.indptr)
y_data_repeated_max = np.repeat(row_max, row_nnz)
# picks out all indices obtaining the maximum per row
y_i_all_argmax = np.flatnonzero(y_data_repeated_max == y.data)
# For corner case where last row has a max of 0
if row_max[-1] == 0:
y_i_all_argmax = np.append(y_i_all_argmax, [len(y.data)])
# Gets the index of the first argmax in each row from y_i_all_argmax
index_first_argmax = np.searchsorted(y_i_all_argmax, y.indptr[:-1])
# first argmax of each row
y_ind_ext = np.append(y.indices, [0])
y_i_argmax = y_ind_ext[y_i_all_argmax[index_first_argmax]]
# Handle rows of all 0
y_i_argmax[np.where(row_nnz == 0)[0]] = 0
# Handles rows with max of 0 that contain negative numbers
samples = np.arange(n_samples)[(row_nnz > 0) &
(row_max.ravel() == 0)]
for i in samples:
ind = y.indices[y.indptr[i]:y.indptr[i + 1]]
y_i_argmax[i] = classes[np.setdiff1d(outputs, ind)][0]
return classes[y_i_argmax]
else:
return classes.take(y.argmax(axis=1), mode="clip")
def _inverse_binarize_thresholding(y, output_type, classes, threshold):
"""Inverse label binarization transformation using thresholding."""
if output_type == "binary" and y.ndim == 2 and y.shape[1] > 2:
raise ValueError("output_type='binary', but y.shape = {0}".
format(y.shape))
if output_type != "binary" and y.shape[1] != len(classes):
raise ValueError("The number of class is not equal to the number of "
"dimension of y.")
classes = np.asarray(classes)
# Perform thresholding
if sp.issparse(y):
if threshold > 0:
if y.format not in ('csr', 'csc'):
y = y.tocsr()
y.data = np.array(y.data > threshold, dtype=np.int)
y.eliminate_zeros()
else:
y = np.array(y.toarray() > threshold, dtype=np.int)
else:
y = np.array(y > threshold, dtype=np.int)
# Inverse transform data
if output_type == "binary":
if sp.issparse(y):
y = y.toarray()
if y.ndim == 2 and y.shape[1] == 2:
return classes[y[:, 1]]
else:
if len(classes) == 1:
return np.repeat(classes[0], len(y))
else:
return classes[y.ravel()]
elif output_type == "multilabel-indicator":
return y
else:
raise ValueError("{0} format is not supported".format(output_type))
class MultiLabelBinarizer(BaseEstimator, TransformerMixin):
"""Transform between iterable of iterables and a multilabel format
Although a list of sets or tuples is a very intuitive format for multilabel
data, it is unwieldy to process. This transformer converts between this
intuitive format and the supported multilabel format: a (samples x classes)
binary matrix indicating the presence of a class label.
Parameters
----------
classes : array-like of shape [n_classes] (optional)
Indicates an ordering for the class labels
sparse_output : boolean (default: False),
Set to true if output binary array is desired in CSR sparse format
Attributes
----------
classes_ : array of labels
A copy of the `classes` parameter where provided,
or otherwise, the sorted set of classes found when fitting.
Examples
--------
>>> from sklearn.preprocessing import MultiLabelBinarizer
>>> mlb = MultiLabelBinarizer()
>>> mlb.fit_transform([(1, 2), (3,)])
array([[1, 1, 0],
[0, 0, 1]])
>>> mlb.classes_
array([1, 2, 3])
>>> mlb.fit_transform([set(['sci-fi', 'thriller']), set(['comedy'])])
array([[0, 1, 1],
[1, 0, 0]])
>>> list(mlb.classes_)
['comedy', 'sci-fi', 'thriller']
See also
--------
sklearn.preprocessing.OneHotEncoder : encode categorical integer features
using a one-hot aka one-of-K scheme.
"""
def __init__(self, classes=None, sparse_output=False):
self.classes = classes
self.sparse_output = sparse_output
def fit(self, y):
"""Fit the label sets binarizer, storing `classes_`
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
self : returns this MultiLabelBinarizer instance
"""
if self.classes is None:
classes = sorted(set(itertools.chain.from_iterable(y)))
else:
classes = self.classes
dtype = np.int if all(isinstance(c, int) for c in classes) else object
self.classes_ = np.empty(len(classes), dtype=dtype)
self.classes_[:] = classes
return self
def fit_transform(self, y):
"""Fit the label sets binarizer and transform the given label sets
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
y_indicator : array or CSR matrix, shape (n_samples, n_classes)
A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in
`y[i]`, and 0 otherwise.
"""
if self.classes is not None:
return self.fit(y).transform(y)
# Automatically increment on new class
class_mapping = defaultdict(int)
class_mapping.default_factory = class_mapping.__len__
yt = self._transform(y, class_mapping)
# sort classes and reorder columns
tmp = sorted(class_mapping, key=class_mapping.get)
# (make safe for tuples)
dtype = np.int if all(isinstance(c, int) for c in tmp) else object
class_mapping = np.empty(len(tmp), dtype=dtype)
class_mapping[:] = tmp
self.classes_, inverse = np.unique(class_mapping, return_inverse=True)
# ensure yt.indices keeps its current dtype
yt.indices = np.array(inverse[yt.indices], dtype=yt.indices.dtype,
copy=False)
if not self.sparse_output:
yt = yt.toarray()
return yt
def transform(self, y):
"""Transform the given label sets
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
y_indicator : array or CSR matrix, shape (n_samples, n_classes)
A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in
`y[i]`, and 0 otherwise.
"""
check_is_fitted(self, 'classes_')
class_to_index = dict(zip(self.classes_, range(len(self.classes_))))
yt = self._transform(y, class_to_index)
if not self.sparse_output:
yt = yt.toarray()
return yt
def _transform(self, y, class_mapping):
"""Transforms the label sets with a given mapping
Parameters
----------
y : iterable of iterables
class_mapping : Mapping
Maps from label to column index in label indicator matrix
Returns
-------
y_indicator : sparse CSR matrix, shape (n_samples, n_classes)
Label indicator matrix
"""
indices = array.array('i')
indptr = array.array('i', [0])
for labels in y:
indices.extend(set(class_mapping[label] for label in labels))
indptr.append(len(indices))
data = np.ones(len(indices), dtype=int)
return sp.csr_matrix((data, indices, indptr),
shape=(len(indptr) - 1, len(class_mapping)))
def inverse_transform(self, yt):
"""Transform the given indicator matrix into label sets
Parameters
----------
yt : array or sparse matrix of shape (n_samples, n_classes)
A matrix containing only 1s ands 0s.
Returns
-------
y : list of tuples
The set of labels for each sample such that `y[i]` consists of
`classes_[j]` for each `yt[i, j] == 1`.
"""
check_is_fitted(self, 'classes_')
if yt.shape[1] != len(self.classes_):
raise ValueError('Expected indicator for {0} classes, but got {1}'
.format(len(self.classes_), yt.shape[1]))
if sp.issparse(yt):
yt = yt.tocsr()
if len(yt.data) != 0 and len(np.setdiff1d(yt.data, [0, 1])) > 0:
raise ValueError('Expected only 0s and 1s in label indicator.')
return [tuple(self.classes_.take(yt.indices[start:end]))
for start, end in zip(yt.indptr[:-1], yt.indptr[1:])]
else:
unexpected = np.setdiff1d(yt, [0, 1])
if len(unexpected) > 0:
raise ValueError('Expected only 0s and 1s in label indicator. '
'Also got {0}'.format(unexpected))
return [tuple(self.classes_.compress(indicators)) for indicators
in yt]
| bsd-3-clause |
jni/skan | skan/gui.py | 1 | 11875 | import os
import json
import asyncio
from pathlib import Path
import numpy as np
import matplotlib
matplotlib.use('TkAgg')
from matplotlib.figure import Figure
from matplotlib.backends.backend_tkagg import (FigureCanvasTkAgg,
NavigationToolbar2Tk)
import matplotlib.pyplot as plt
import tkinter as tk
import tkinter.filedialog
from tkinter import ttk
import click
from . import pre, pipe, draw, io, __version__
@asyncio.coroutine
def _async(coroutine, *args):
loop = asyncio.get_event_loop()
return (yield from loop.run_in_executor(None, coroutine, *args))
STANDARD_MARGIN = (3, 3, 12, 12)
class Launch(tk.Tk):
def __init__(self, params_dict=None):
super().__init__()
self.title('Skeleton analysis tool')
self.crop_radius = tk.IntVar(value=0, name='Crop radius')
self.smooth_method = tk.StringVar(value='Gaussian',
name='Smoothing method')
self.smooth_method._choices = pre.SMOOTH_METHODS
self.smooth_radius = tk.DoubleVar(value=0.1, name='Smoothing radius')
self.threshold_radius = tk.DoubleVar(value=50e-9,
name='Threshold radius')
self.brightness_offset = tk.DoubleVar(value=0.075,
name='Brightness offset')
self.image_format = tk.StringVar(value='auto',
name='Image format')
self.scale_metadata_path = tk.StringVar(value='Scan/PixelHeight',
name='Scale metadata path')
self.preview_skeleton_plots = tk.BooleanVar(value=True, name='Live '
'preview skeleton plot?')
self.save_skeleton_plots = tk.BooleanVar(value=True,
name='Save skeleton plot?')
self.skeleton_plot_prefix = tk.StringVar(value='skeleton-plot-',
name='Prefix for skeleton plots')
self.output_filename = tk.StringVar(value='skeleton.xlsx',
name='Output filename')
self.parameters = [
self.crop_radius,
self.smooth_method,
self.smooth_radius,
self.threshold_radius,
self.brightness_offset,
self.image_format,
self.scale_metadata_path,
self.preview_skeleton_plots,
self.save_skeleton_plots,
self.skeleton_plot_prefix,
self.output_filename,
]
self.input_files = []
self.output_folder = None
if params_dict is None:
params_dict = {}
self.params_dict = params_dict.copy()
self.parameter_config(params_dict)
# allow resizing
self.rowconfigure(0, weight=1)
self.columnconfigure(0, weight=1)
self.create_main_frame()
def parameter_config(self, params_dict):
"""Set parameter values from a config dictionary."""
if isinstance(params_dict, str):
if params_dict.startswith('{'): # JSON string
params_dict = json.loads(params_dict)
else: # config file
with open(params_dict) as params_fin:
params_dict = json.load(params_fin)
self.params_dict.update(params_dict)
name2param = {p._name.lower(): p for p in self.parameters}
for param, value in self.params_dict.items():
if param.lower() in name2param:
name2param[param].set(value)
params_dict.pop(param)
for param, value in params_dict.copy().items():
if param.lower() == 'input files':
self.input_files = value
params_dict.pop(param)
elif param.lower() == 'output folder':
self.output_folder = Path(os.path.expanduser(value))
params_dict.pop(param)
elif param.lower() == 'version':
print(f'Parameter file version: {params_dict.pop(param)}')
for param in params_dict:
print(f'Parameter not recognised: {param}')
def save_parameters(self, filename=None):
out = {p._name.lower(): p.get() for p in self.parameters}
out['input files'] = self.input_files
out['output folder'] = str(self.output_folder)
out['version'] = __version__
if filename is None:
return json.dumps(out)
attempt = 0
base, ext = os.path.splitext(filename)
while os.path.exists(filename):
filename = f'{base} ({attempt}){ext}'
attempt += 1
with open(filename, mode='wt') as fout:
json.dump(out, fout, indent=2)
def create_main_frame(self):
main = ttk.Frame(master=self, padding=STANDARD_MARGIN)
main.grid(row=0, column=0, sticky='nsew')
self.create_parameters_frame(main)
self.create_buttons_frame(main)
main.pack()
def create_parameters_frame(self, parent):
parameters = ttk.Frame(master=parent, padding=STANDARD_MARGIN)
parameters.grid(sticky='nsew')
heading = ttk.Label(parameters, text='Analysis parameters')
heading.grid(column=0, row=0, sticky='n')
for i, param in enumerate(self.parameters, start=1):
param_label = ttk.Label(parameters, text=param._name)
param_label.grid(row=i, column=0, sticky='nsew')
if type(param) == tk.BooleanVar:
param_entry = ttk.Checkbutton(parameters, variable=param)
elif hasattr(param, '_choices'):
param_entry = ttk.OptionMenu(parameters, param, param.get(),
*param._choices.keys())
else:
param_entry = ttk.Entry(parameters, textvariable=param)
param_entry.grid(row=i, column=1, sticky='nsew')
def create_buttons_frame(self, parent):
buttons = ttk.Frame(master=parent, padding=STANDARD_MARGIN)
buttons.grid(sticky='nsew')
actions = [
('Choose config', self.choose_config_file),
('Choose files', self.choose_input_files),
('Choose output folder', self.choose_output_folder),
('Run', lambda: asyncio.ensure_future(self.run()))
]
for col, (action_name, action) in enumerate(actions):
button = ttk.Button(buttons, text=action_name,
command=action)
button.grid(row=0, column=col)
def choose_config_file(self):
config_file = tk.filedialog.askopenfilename()
self.parameter_config(config_file)
def choose_input_files(self):
self.input_files = tk.filedialog.askopenfilenames()
if len(self.input_files) > 0 and self.output_folder is None:
self.output_folder = Path(os.path.dirname(self.input_files[0]))
def choose_output_folder(self):
self.output_folder = Path(
tk.filedialog.askdirectory(initialdir=self.output_folder))
def make_figure_window(self):
self.figure_window = tk.Toplevel(self)
self.figure_window.wm_title('Preview')
screen_dpi = self.figure_window.winfo_fpixels('1i')
screen_width = self.figure_window.winfo_screenwidth() # in pixels
figure_width = screen_width / 2 / screen_dpi
figure_height = 0.75 * figure_width
self.figure = Figure(figsize=(figure_width, figure_height),
dpi=screen_dpi)
ax0 = self.figure.add_subplot(221)
axes = [self.figure.add_subplot(220 + i, sharex=ax0, sharey=ax0)
for i in range(2, 5)]
self.axes = np.array([ax0] + axes)
canvas = FigureCanvasTkAgg(self.figure, master=self.figure_window)
canvas.show()
canvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=1)
toolbar = NavigationToolbar2Tk(canvas, self.figure_window)
toolbar.update()
canvas._tkcanvas.pack(side=tk.TOP, fill=tk.BOTH, expand=1)
async def run(self):
print('Input files:')
for file in self.input_files:
print(' ', file)
print('Parameters:')
for param in self.parameters:
p = param.get()
print(' ', param, type(p), p)
print('Output:', self.output_folder)
save_skeleton = ('' if not self.save_skeleton_plots.get() else
self.skeleton_plot_prefix.get())
images_iterator = pipe.process_images(
self.input_files, self.image_format.get(),
self.threshold_radius.get(),
self.smooth_radius.get(),
self.brightness_offset.get(),
self.scale_metadata_path.get(),
crop_radius=self.crop_radius.get(),
smooth_method=self.smooth_method.get())
if self.preview_skeleton_plots.get():
self.make_figure_window()
elif self.save_skeleton_plots.get():
self.figure = plt.figure()
ax0 = self.figure.add_subplot(221)
axes = [self.figure.add_subplot(220 + i, sharex=ax0, sharey=ax0)
for i in range(2, 5)]
self.axes = np.array([ax0] + axes)
self.save_parameters(self.output_folder / 'skan-config.json')
for i, result in enumerate(images_iterator):
if i < len(self.input_files):
filename, image, thresholded, skeleton, framedata = result
if save_skeleton:
for ax in self.axes:
ax.clear()
w, h = draw.pixel_perfect_figsize(image)
self.figure.set_size_inches(4*w, 4*h)
draw.pipeline_plot(image, thresholded, skeleton, framedata,
figure=self.figure, axes=self.axes)
output_basename = (save_skeleton +
os.path.basename(
os.path.splitext(filename)[0]) +
'.png')
output_filename = str(self.output_folder / output_basename)
self.figure.savefig(output_filename)
if self.preview_skeleton_plots.get():
self.figure.canvas.draw_idle()
else:
result_full, result_image = result
result_filtered = result_full[(result_full['mean shape index']>0.125) &
(result_full['mean shape index']<0.625) &
(result_full['branch-type'] == 2) &
(result_full['euclidean-distance']>0)]
ridgeydata = result_filtered.groupby('filename')[['filename','branch-distance','scale','euclidean-distance','squiggle','mean shape index']].mean()
io.write_excel(str(self.output_folder /
self.output_filename.get()),
branches=result_full,
images=result_image,
filtered=ridgeydata,
parameters=json.loads(self.save_parameters()))
def tk_update(loop, app):
try:
app.update()
except tkinter.TclError:
loop.stop()
return
loop.call_later(.01, tk_update, loop, app)
@click.command()
@click.option('-c', '--config', default='',
help='JSON configuration file.')
def launch(config):
params = json.load(open(config)) if config else None
app = Launch(params)
loop = asyncio.get_event_loop()
tk_update(loop, app)
loop.run_forever()
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.