repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
roxyboy/scikit-learn
|
examples/decomposition/plot_kernel_pca.py
|
353
|
2011
|
"""
==========
Kernel PCA
==========
This example shows that Kernel PCA is able to find a projection of the data
that makes data linearly separable.
"""
print(__doc__)
# Authors: Mathieu Blondel
# Andreas Mueller
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA, KernelPCA
from sklearn.datasets import make_circles
np.random.seed(0)
X, y = make_circles(n_samples=400, factor=.3, noise=.05)
kpca = KernelPCA(kernel="rbf", fit_inverse_transform=True, gamma=10)
X_kpca = kpca.fit_transform(X)
X_back = kpca.inverse_transform(X_kpca)
pca = PCA()
X_pca = pca.fit_transform(X)
# Plot results
plt.figure()
plt.subplot(2, 2, 1, aspect='equal')
plt.title("Original space")
reds = y == 0
blues = y == 1
plt.plot(X[reds, 0], X[reds, 1], "ro")
plt.plot(X[blues, 0], X[blues, 1], "bo")
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
X1, X2 = np.meshgrid(np.linspace(-1.5, 1.5, 50), np.linspace(-1.5, 1.5, 50))
X_grid = np.array([np.ravel(X1), np.ravel(X2)]).T
# projection on the first principal component (in the phi space)
Z_grid = kpca.transform(X_grid)[:, 0].reshape(X1.shape)
plt.contour(X1, X2, Z_grid, colors='grey', linewidths=1, origin='lower')
plt.subplot(2, 2, 2, aspect='equal')
plt.plot(X_pca[reds, 0], X_pca[reds, 1], "ro")
plt.plot(X_pca[blues, 0], X_pca[blues, 1], "bo")
plt.title("Projection by PCA")
plt.xlabel("1st principal component")
plt.ylabel("2nd component")
plt.subplot(2, 2, 3, aspect='equal')
plt.plot(X_kpca[reds, 0], X_kpca[reds, 1], "ro")
plt.plot(X_kpca[blues, 0], X_kpca[blues, 1], "bo")
plt.title("Projection by KPCA")
plt.xlabel("1st principal component in space induced by $\phi$")
plt.ylabel("2nd component")
plt.subplot(2, 2, 4, aspect='equal')
plt.plot(X_back[reds, 0], X_back[reds, 1], "ro")
plt.plot(X_back[blues, 0], X_back[blues, 1], "bo")
plt.title("Original space after inverse transform")
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
plt.subplots_adjust(0.02, 0.10, 0.98, 0.94, 0.04, 0.35)
plt.show()
|
bsd-3-clause
|
amueller/pystruct
|
examples/plot_latent_node.py
|
2
|
3205
|
"""
=================================
Latent Variable Hierarchical CRF
=================================
Solving a 2d grid toy problem by introducing an additional layer of latent
variables.
"""
import numpy as np
import itertools
from pystruct.models import GraphCRF, LatentNodeCRF
from pystruct.learners import NSlackSSVM, OneSlackSSVM, LatentSSVM
from pystruct.datasets import make_simple_2x2
from pystruct.utils import make_grid_edges, plot_grid
import matplotlib.pyplot as plt
def plot_boxes(boxes, size=4, title=""):
cmap = plt.cm.gray
if boxes[0].size == size * size:
fig, ax = plt.subplots(1, len(boxes), figsize=(8, 0.7))
for a, x in zip(ax, boxes):
plot_grid(x[:size * size].reshape(size, size), cmap=cmap, axes=a,
border_color="green")
else:
# have hidden states
fig, ax = plt.subplots(2, len(boxes), figsize=(8, 1))
for a, x in zip(ax[0], boxes):
plot_grid(x[size * size:].reshape(size / 2, size / 2), cmap=cmap,
axes=a, border_color="green")
for a, x in zip(ax[1], boxes):
plot_grid(x[:size * size].reshape(size, size), cmap=cmap, axes=a,
border_color="green")
fig.subplots_adjust(.01, .03, .98, .75, .2, .05)
fig.suptitle(title)
# learn the "easy" 2x2 boxes dataset.
# a 2x2 box is placed randomly in a 4x4 grid
# we add a latent variable for each 2x2 patch
# that should make the model fairly simple
X, Y = make_simple_2x2(seed=1)
# flatten X and Y
X_flat = [x.reshape(-1, 1).astype(np.float) for x in X]
Y_flat = [y.ravel() for y in Y]
# first, use standard graph CRF. Can't do much, high loss.
crf = GraphCRF()
svm = NSlackSSVM(model=crf, max_iter=200, C=1, n_jobs=1)
G = [make_grid_edges(x) for x in X]
X_grid_edges = list(zip(X_flat, G))
svm.fit(X_grid_edges, Y_flat)
plot_boxes(svm.predict(X_grid_edges), title="Non-latent SSVM predictions")
print("Training score binary grid CRF: %f" % svm.score(X_grid_edges, Y_flat))
# using one latent variable for each 2x2 rectangle
latent_crf = LatentNodeCRF(n_labels=2, n_features=1, n_hidden_states=2,
inference_method='lp')
ssvm = OneSlackSSVM(model=latent_crf, max_iter=200, C=100,
n_jobs=-1, show_loss_every=10, inference_cache=50)
latent_svm = LatentSSVM(ssvm)
# make edges for hidden states:
edges = []
node_indices = np.arange(4 * 4).reshape(4, 4)
for i, (x, y) in enumerate(itertools.product([0, 2], repeat=2)):
for j in range(x, x + 2):
for k in range(y, y + 2):
edges.append([i + 4 * 4, node_indices[j, k]])
G = [np.vstack([make_grid_edges(x), edges]) for x in X]
# Random initialization
H_init = [np.hstack([y.ravel(), np.random.randint(2, 4, size=2 * 2)])
for y in Y]
plot_boxes(H_init, title="Top: Random initial hidden states. Bottom: Ground"
"truth labeling.")
X_ = list(zip(X_flat, G, [2 * 2 for x in X_flat]))
latent_svm.fit(X_, Y_flat, H_init)
print("Training score with latent nodes: %f " % latent_svm.score(X_, Y_flat))
H = latent_svm.predict_latent(X_)
plot_boxes(H, title="Top: Hidden states after training. Bottom: Prediction.")
plt.show()
|
bsd-2-clause
|
jnmclarty/trump
|
setup.py
|
2
|
3244
|
import os
import shutil
import sys
import time
from setuptools import setup, find_packages
from setuptools.command.install import install
v = '0.0.5'
cmds = sys.argv
installing = 'install' in cmds
def copy_cfg_sample_if_not_exists(p):
for f in os.listdir(p):
if ".cfg_sample" in f:
newf = f.replace(".cfg_sample",".cfg")
if not os.path.isfile(os.path.join(p,newf)):
print "\nCreating {} from sample file.".format(os.path.join(p,newf))
shutil.copy(os.path.join(p,f),os.path.join(p,newf))
else:
print "\n{} already exists, will overwrite momentarily. Break execution to stop.".format(newf)
for i in range(5):
sys.stdout.write(".")
time.sleep(1)
#if ans.upper()[0] == 'Y':
print "Overwriting {} from sample file.".format(newf)
shutil.copy(os.path.join(p,f),os.path.join(p,newf))
#else:
# print "Skipping {}.".format(newf)
class TrumpInstall(install):
def run(self):
install.run(self)
if installing:
config_path = os.path.join(self.install_lib,'trump','config')
copy_cfg_sample_if_not_exists(config_path)
settings_path = os.path.join(self.install_lib,'trump','templating','settings')
copy_cfg_sample_if_not_exists(settings_path)
def read(*p):
"""Build a file path from paths and return the contents."""
with open(os.path.join(*p), 'r') as fi:
return fi.read()
setup(
name = 'Trump',
version = v,
packages = find_packages(),
description = 'Persistent Objectified Indexed Data',
install_requires = ['smuggle','pandas','SQLAlchemy','Quandl','validada'],
long_description = read('README.rst') ,
package_data = {'': ['config/*.cfg_sample', 'test/*.py', 'test/testdata/*.csv'],
'trump.templating' : ['settings/*.cfg_sample', 'test/*'],
'trump.aggregation' : ['test/*'],
#'trump.extensions.source' : ['*'],
'trump.extensions' : ['*.py'] + ['source/tx-{}/*'.format(f) for f in ['bbfetch', 'dbapi', 'psycopg2', 'pydatacsv', 'pydatadatareaderst', 'quandl', 'sqlalchemy', 'worldbankst', 'trump']],
'trump.reporting' : ['test/*'],
'trump.tools' : ['test/*']},
cmdclass = {'install': TrumpInstall},
author = 'Jeffrey McLarty',
author_email = '[email protected]',
url = 'http://Equitable.github.com/trump/',
download_url = 'https://github.com/Equitable/trump/tarball/' + v,
keywords = ['data', 'timeseries', 'time series', 'indexed', 'objectified', 'trump', 'monotonic', 'RDD', 'relational database', 'pandas', 'SQLAlchemy'],
classifiers = ['Development Status :: 1 - Planning',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: BSD License',
'Topic :: Database',
'Topic :: Office/Business',
'Topic :: Scientific/Engineering',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7'])
|
bsd-3-clause
|
Aasmi/scikit-learn
|
sklearn/externals/joblib/__init__.py
|
36
|
4795
|
""" Joblib is a set of tools to provide **lightweight pipelining in
Python**. In particular, joblib offers:
1. transparent disk-caching of the output values and lazy re-evaluation
(memoize pattern)
2. easy simple parallel computing
3. logging and tracing of the execution
Joblib is optimized to be **fast** and **robust** in particular on large
data and has specific optimizations for `numpy` arrays. It is
**BSD-licensed**.
============================== ============================================
**User documentation**: http://pythonhosted.org/joblib
**Download packages**: http://pypi.python.org/pypi/joblib#downloads
**Source code**: http://github.com/joblib/joblib
**Report issues**: http://github.com/joblib/joblib/issues
============================== ============================================
Vision
--------
The vision is to provide tools to easily achieve better performance and
reproducibility when working with long running jobs.
* **Avoid computing twice the same thing**: code is rerun over an
over, for instance when prototyping computational-heavy jobs (as in
scientific development), but hand-crafted solution to alleviate this
issue is error-prone and often leads to unreproducible results
* **Persist to disk transparently**: persisting in an efficient way
arbitrary objects containing large data is hard. Using
joblib's caching mechanism avoids hand-written persistence and
implicitly links the file on disk to the execution context of
the original Python object. As a result, joblib's persistence is
good for resuming an application status or computational job, eg
after a crash.
Joblib strives to address these problems while **leaving your code and
your flow control as unmodified as possible** (no framework, no new
paradigms).
Main features
------------------
1) **Transparent and fast disk-caching of output value:** a memoize or
make-like functionality for Python functions that works well for
arbitrary Python objects, including very large numpy arrays. Separate
persistence and flow-execution logic from domain logic or algorithmic
code by writing the operations as a set of steps with well-defined
inputs and outputs: Python functions. Joblib can save their
computation to disk and rerun it only if necessary::
>>> import numpy as np
>>> from sklearn.externals.joblib import Memory
>>> mem = Memory(cachedir='/tmp/joblib')
>>> import numpy as np
>>> a = np.vander(np.arange(3)).astype(np.float)
>>> square = mem.cache(np.square)
>>> b = square(a) # doctest: +ELLIPSIS
________________________________________________________________________________
[Memory] Calling square...
square(array([[ 0., 0., 1.],
[ 1., 1., 1.],
[ 4., 2., 1.]]))
___________________________________________________________square - 0...s, 0.0min
>>> c = square(a)
>>> # The above call did not trigger an evaluation
2) **Embarrassingly parallel helper:** to make is easy to write readable
parallel code and debug it quickly::
>>> from sklearn.externals.joblib import Parallel, delayed
>>> from math import sqrt
>>> Parallel(n_jobs=1)(delayed(sqrt)(i**2) for i in range(10))
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]
3) **Logging/tracing:** The different functionalities will
progressively acquire better logging mechanism to help track what
has been ran, and capture I/O easily. In addition, Joblib will
provide a few I/O primitives, to easily define define logging and
display streams, and provide a way of compiling a report.
We want to be able to quickly inspect what has been run.
4) **Fast compressed Persistence**: a replacement for pickle to work
efficiently on Python objects containing large data (
*joblib.dump* & *joblib.load* ).
..
>>> import shutil ; shutil.rmtree('/tmp/joblib/')
"""
# PEP0440 compatible formatted version, see:
# https://www.python.org/dev/peps/pep-0440/
#
# Generic release markers:
# X.Y
# X.Y.Z # For bugfix releases
#
# Admissible pre-release markers:
# X.YaN # Alpha release
# X.YbN # Beta release
# X.YrcN # Release Candidate
# X.Y # Final release
#
# Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer.
# 'X.Y.dev0' is the canonical version of 'X.Y.dev'
#
__version__ = '0.9.0b2'
from .memory import Memory, MemorizedResult
from .logger import PrintTime
from .logger import Logger
from .hashing import hash
from .numpy_pickle import dump
from .numpy_pickle import load
from .parallel import Parallel
from .parallel import delayed
from .parallel import cpu_count
|
bsd-3-clause
|
jaak-s/HashForestSVM
|
HashForestSVM/data.py
|
1
|
1357
|
import numpy as np
import matplotlib.pyplot as pp
from sklearn import metrics
import pylab as pl
def genXOR(n):
n = n // 4
means = np.array( [[2, -2], [-2, 2], [2, 2], [-2, -2]] )
cov = np.array( [[1, 0],[0, 1]] )
X = np.vstack( [ np.random.multivariate_normal(m, cov, size=n) for m in means ] )
y = np.concatenate( [np.repeat(-1, n*2), np.repeat(1, n*2)] )
return (X, y)
def plotXOR(X, y):
pp.plot(X[y==1,0], X[y==1,1], 'ro')
pp.plot(X[y!=1,0], X[y!=1,1], 'bs')
pp.grid()
pp.axis("Equal")
pp.show()
def prAUC(ytest, yprob):
precision, recall, th = metrics.precision_recall_curve(ytest, yprob)
return metrics.auc(recall, precision)
def prPlot(ytest, yprob, yprob2=None, method1="", method2=""):
pl.clf()
auc = prAUC(ytest, yprob)
precision, recall, th = metrics.precision_recall_curve(ytest, yprob)
pl.plot(recall, precision, label='Precision-Recall: %s (%0.3f)' % (method1, auc) )
if yprob2 is not None:
pr2, rc2, t2 = metrics.precision_recall_curve(ytest, yprob2)
pl.plot(rc2, pr2, 'r', label='Precision-Recall: %s (%0.3f)' % (method2, prAUC(ytest, yprob2)) )
pl.xlabel('Recall')
pl.ylabel('Precision')
pl.ylim([0.0, 1.05])
pl.xlim([0.0, 1.0])
pl.title('Precision-Recall curve' % auc)
pl.legend(loc="lower left")
pl.show()
|
apache-2.0
|
mxjl620/scikit-learn
|
sklearn/metrics/cluster/supervised.py
|
207
|
27395
|
"""Utilities to evaluate the clustering performance of models
Functions named as *_score return a scalar value to maximize: the higher the
better.
"""
# Authors: Olivier Grisel <[email protected]>
# Wei LI <[email protected]>
# Diego Molla <[email protected]>
# License: BSD 3 clause
from math import log
from scipy.misc import comb
from scipy.sparse import coo_matrix
import numpy as np
from .expected_mutual_info_fast import expected_mutual_information
from ...utils.fixes import bincount
def comb2(n):
# the exact version is faster for k == 2: use it by default globally in
# this module instead of the float approximate variant
return comb(n, 2, exact=1)
def check_clusterings(labels_true, labels_pred):
"""Check that the two clusterings matching 1D integer arrays"""
labels_true = np.asarray(labels_true)
labels_pred = np.asarray(labels_pred)
# input checks
if labels_true.ndim != 1:
raise ValueError(
"labels_true must be 1D: shape is %r" % (labels_true.shape,))
if labels_pred.ndim != 1:
raise ValueError(
"labels_pred must be 1D: shape is %r" % (labels_pred.shape,))
if labels_true.shape != labels_pred.shape:
raise ValueError(
"labels_true and labels_pred must have same size, got %d and %d"
% (labels_true.shape[0], labels_pred.shape[0]))
return labels_true, labels_pred
def contingency_matrix(labels_true, labels_pred, eps=None):
"""Build a contengency matrix describing the relationship between labels.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
eps: None or float
If a float, that value is added to all values in the contingency
matrix. This helps to stop NaN propagation.
If ``None``, nothing is adjusted.
Returns
-------
contingency: array, shape=[n_classes_true, n_classes_pred]
Matrix :math:`C` such that :math:`C_{i, j}` is the number of samples in
true class :math:`i` and in predicted class :math:`j`. If
``eps is None``, the dtype of this array will be integer. If ``eps`` is
given, the dtype will be float.
"""
classes, class_idx = np.unique(labels_true, return_inverse=True)
clusters, cluster_idx = np.unique(labels_pred, return_inverse=True)
n_classes = classes.shape[0]
n_clusters = clusters.shape[0]
# Using coo_matrix to accelerate simple histogram calculation,
# i.e. bins are consecutive integers
# Currently, coo_matrix is faster than histogram2d for simple cases
contingency = coo_matrix((np.ones(class_idx.shape[0]),
(class_idx, cluster_idx)),
shape=(n_classes, n_clusters),
dtype=np.int).toarray()
if eps is not None:
# don't use += as contingency is integer
contingency = contingency + eps
return contingency
# clustering measures
def adjusted_rand_score(labels_true, labels_pred):
"""Rand index adjusted for chance
The Rand Index computes a similarity measure between two clusterings
by considering all pairs of samples and counting pairs that are
assigned in the same or different clusters in the predicted and
true clusterings.
The raw RI score is then "adjusted for chance" into the ARI score
using the following scheme::
ARI = (RI - Expected_RI) / (max(RI) - Expected_RI)
The adjusted Rand index is thus ensured to have a value close to
0.0 for random labeling independently of the number of clusters and
samples and exactly 1.0 when the clusterings are identical (up to
a permutation).
ARI is a symmetric measure::
adjusted_rand_score(a, b) == adjusted_rand_score(b, a)
Read more in the :ref:`User Guide <adjusted_rand_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
Returns
-------
ari : float
Similarity score between -1.0 and 1.0. Random labelings have an ARI
close to 0.0. 1.0 stands for perfect match.
Examples
--------
Perfectly maching labelings have a score of 1 even
>>> from sklearn.metrics.cluster import adjusted_rand_score
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_rand_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not always pure, hence penalized::
>>> adjusted_rand_score([0, 0, 1, 2], [0, 0, 1, 1]) # doctest: +ELLIPSIS
0.57...
ARI is symmetric, so labelings that have pure clusters with members
coming from the same classes but unnecessary splits are penalized::
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 2]) # doctest: +ELLIPSIS
0.57...
If classes members are completely split across different clusters, the
assignment is totally incomplete, hence the ARI is very low::
>>> adjusted_rand_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [Hubert1985] `L. Hubert and P. Arabie, Comparing Partitions,
Journal of Classification 1985`
http://www.springerlink.com/content/x64124718341j1j0/
.. [wk] http://en.wikipedia.org/wiki/Rand_index#Adjusted_Rand_index
See also
--------
adjusted_mutual_info_score: Adjusted Mutual Information
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split;
# or trivial clustering where each document is assigned a unique cluster.
# These are perfect matches hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0
or classes.shape[0] == clusters.shape[0] == len(labels_true)):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
# Compute the ARI using the contingency data
sum_comb_c = sum(comb2(n_c) for n_c in contingency.sum(axis=1))
sum_comb_k = sum(comb2(n_k) for n_k in contingency.sum(axis=0))
sum_comb = sum(comb2(n_ij) for n_ij in contingency.flatten())
prod_comb = (sum_comb_c * sum_comb_k) / float(comb(n_samples, 2))
mean_comb = (sum_comb_k + sum_comb_c) / 2.
return ((sum_comb - prod_comb) / (mean_comb - prod_comb))
def homogeneity_completeness_v_measure(labels_true, labels_pred):
"""Compute the homogeneity and completeness and V-Measure scores at once
Those metrics are based on normalized conditional entropy measures of
the clustering labeling to evaluate given the knowledge of a Ground
Truth class labels of the same samples.
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
Both scores have positive values between 0.0 and 1.0, larger values
being desirable.
Those 3 metrics are independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score values in any way.
V-Measure is furthermore symmetric: swapping ``labels_true`` and
``label_pred`` will give the same score. This does not hold for
homogeneity and completeness.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
homogeneity: float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
completeness: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
v_measure: float
harmonic mean of the first two
See also
--------
homogeneity_score
completeness_score
v_measure_score
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
if len(labels_true) == 0:
return 1.0, 1.0, 1.0
entropy_C = entropy(labels_true)
entropy_K = entropy(labels_pred)
MI = mutual_info_score(labels_true, labels_pred)
homogeneity = MI / (entropy_C) if entropy_C else 1.0
completeness = MI / (entropy_K) if entropy_K else 1.0
if homogeneity + completeness == 0.0:
v_measure_score = 0.0
else:
v_measure_score = (2.0 * homogeneity * completeness
/ (homogeneity + completeness))
return homogeneity, completeness, v_measure_score
def homogeneity_score(labels_true, labels_pred):
"""Homogeneity metric of a cluster labeling given a ground truth
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`completeness_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
homogeneity: float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
completeness_score
v_measure_score
Examples
--------
Perfect labelings are homogeneous::
>>> from sklearn.metrics.cluster import homogeneity_score
>>> homogeneity_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that further split classes into more clusters can be
perfectly homogeneous::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
1.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
1.0...
Clusters that include samples from different classes do not make for an
homogeneous labeling::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 0, 1]))
... # doctest: +ELLIPSIS
0.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[0]
def completeness_score(labels_true, labels_pred):
"""Completeness metric of a cluster labeling given a ground truth
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`homogeneity_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
completeness: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
v_measure_score
Examples
--------
Perfect labelings are complete::
>>> from sklearn.metrics.cluster import completeness_score
>>> completeness_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that assign all classes members to the same clusters
are still complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 0, 0, 0]))
1.0
>>> print(completeness_score([0, 1, 2, 3], [0, 0, 1, 1]))
1.0
If classes members are split across different clusters, the
assignment cannot be complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 1, 0, 1]))
0.0
>>> print(completeness_score([0, 0, 0, 0], [0, 1, 2, 3]))
0.0
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[1]
def v_measure_score(labels_true, labels_pred):
"""V-measure cluster labeling given a ground truth.
This score is identical to :func:`normalized_mutual_info_score`.
The V-measure is the harmonic mean between homogeneity and completeness::
v = 2 * (homogeneity * completeness) / (homogeneity + completeness)
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
v_measure: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
completeness_score
Examples
--------
Perfect labelings are both homogeneous and complete, hence have score 1.0::
>>> from sklearn.metrics.cluster import v_measure_score
>>> v_measure_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> v_measure_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not homogeneous, hence penalized::
>>> print("%.6f" % v_measure_score([0, 0, 1, 2], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 1, 2, 3], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.66...
Labelings that have pure clusters with members coming from the same
classes are homogeneous but un-necessary splits harms completeness
and thus penalize V-measure as well::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.66...
If classes members are completely split across different clusters,
the assignment is totally incomplete, hence the V-Measure is null::
>>> print("%.6f" % v_measure_score([0, 0, 0, 0], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.0...
Clusters that include samples from totally different classes totally
destroy the homogeneity of the labeling, hence::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[2]
def mutual_info_score(labels_true, labels_pred, contingency=None):
"""Mutual Information between two clusterings
The Mutual Information is a measure of the similarity between two labels of
the same data. Where :math:`P(i)` is the probability of a random sample
occurring in cluster :math:`U_i` and :math:`P'(j)` is the probability of a
random sample occurring in cluster :math:`V_j`, the Mutual Information
between clusterings :math:`U` and :math:`V` is given as:
.. math::
MI(U,V)=\sum_{i=1}^R \sum_{j=1}^C P(i,j)\log\\frac{P(i,j)}{P(i)P'(j)}
This is equal to the Kullback-Leibler divergence of the joint distribution
with the product distribution of the marginals.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
contingency: None or array, shape = [n_classes_true, n_classes_pred]
A contingency matrix given by the :func:`contingency_matrix` function.
If value is ``None``, it will be computed, otherwise the given value is
used, with ``labels_true`` and ``labels_pred`` ignored.
Returns
-------
mi: float
Mutual information, a non-negative value
See also
--------
adjusted_mutual_info_score: Adjusted against chance Mutual Information
normalized_mutual_info_score: Normalized Mutual Information
"""
if contingency is None:
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
contingency_sum = np.sum(contingency)
pi = np.sum(contingency, axis=1)
pj = np.sum(contingency, axis=0)
outer = np.outer(pi, pj)
nnz = contingency != 0.0
# normalized contingency
contingency_nm = contingency[nnz]
log_contingency_nm = np.log(contingency_nm)
contingency_nm /= contingency_sum
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
log_outer = -np.log(outer[nnz]) + log(pi.sum()) + log(pj.sum())
mi = (contingency_nm * (log_contingency_nm - log(contingency_sum))
+ contingency_nm * log_outer)
return mi.sum()
def adjusted_mutual_info_score(labels_true, labels_pred):
"""Adjusted Mutual Information between two clusterings
Adjusted Mutual Information (AMI) is an adjustment of the Mutual
Information (MI) score to account for chance. It accounts for the fact that
the MI is generally higher for two clusterings with a larger number of
clusters, regardless of whether there is actually more information shared.
For two clusterings :math:`U` and :math:`V`, the AMI is given as::
AMI(U, V) = [MI(U, V) - E(MI(U, V))] / [max(H(U), H(V)) - E(MI(U, V))]
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Be mindful that this function is an order of magnitude slower than other
metrics, such as the Adjusted Rand Index.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
Returns
-------
ami: float(upperlimited by 1.0)
The AMI returns a value of 1 when the two partitions are identical
(ie perfectly matched). Random partitions (independent labellings) have
an expected AMI around 0 on average hence can be negative.
See also
--------
adjusted_rand_score: Adjusted Rand Index
mutual_information_score: Mutual Information (not adjusted for chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import adjusted_mutual_info_score
>>> adjusted_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the AMI is null::
>>> adjusted_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [1] `Vinh, Epps, and Bailey, (2010). Information Theoretic Measures for
Clusterings Comparison: Variants, Properties, Normalization and
Correction for Chance, JMLR
<http://jmlr.csail.mit.edu/papers/volume11/vinh10a/vinh10a.pdf>`_
.. [2] `Wikipedia entry for the Adjusted Mutual Information
<http://en.wikipedia.org/wiki/Adjusted_Mutual_Information>`_
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
emi = expected_mutual_information(contingency, n_samples)
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
ami = (mi - emi) / (max(h_true, h_pred) - emi)
return ami
def normalized_mutual_info_score(labels_true, labels_pred):
"""Normalized Mutual Information between two clusterings
Normalized Mutual Information (NMI) is an normalization of the Mutual
Information (MI) score to scale the results between 0 (no mutual
information) and 1 (perfect correlation). In this function, mutual
information is normalized by ``sqrt(H(labels_true) * H(labels_pred))``
This measure is not adjusted for chance. Therefore
:func:`adjusted_mustual_info_score` might be preferred.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
Returns
-------
nmi: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
See also
--------
adjusted_rand_score: Adjusted Rand Index
adjusted_mutual_info_score: Adjusted Mutual Information (adjusted
against chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import normalized_mutual_info_score
>>> normalized_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> normalized_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the NMI is null::
>>> normalized_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
nmi = mi / max(np.sqrt(h_true * h_pred), 1e-10)
return nmi
def entropy(labels):
"""Calculates the entropy for a labeling."""
if len(labels) == 0:
return 1.0
label_idx = np.unique(labels, return_inverse=True)[1]
pi = bincount(label_idx).astype(np.float)
pi = pi[pi > 0]
pi_sum = np.sum(pi)
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
return -np.sum((pi / pi_sum) * (np.log(pi) - log(pi_sum)))
|
bsd-3-clause
|
manashmndl/scikit-learn
|
examples/linear_model/plot_logistic.py
|
312
|
1426
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Logit function
=========================================================
Show in the plot is how the logistic regression would, in this
synthetic dataset, classify values as either 0 or 1,
i.e. class one or two, using the logit-curve.
"""
print(__doc__)
# Code source: Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# this is our test set, it's just a straight line with some
# Gaussian noise
xmin, xmax = -5, 5
n_samples = 100
np.random.seed(0)
X = np.random.normal(size=n_samples)
y = (X > 0).astype(np.float)
X[X > 0] *= 4
X += .3 * np.random.normal(size=n_samples)
X = X[:, np.newaxis]
# run the classifier
clf = linear_model.LogisticRegression(C=1e5)
clf.fit(X, y)
# and plot the result
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.scatter(X.ravel(), y, color='black', zorder=20)
X_test = np.linspace(-5, 10, 300)
def model(x):
return 1 / (1 + np.exp(-x))
loss = model(X_test * clf.coef_ + clf.intercept_).ravel()
plt.plot(X_test, loss, color='blue', linewidth=3)
ols = linear_model.LinearRegression()
ols.fit(X, y)
plt.plot(X_test, ols.coef_ * X_test + ols.intercept_, linewidth=1)
plt.axhline(.5, color='.5')
plt.ylabel('y')
plt.xlabel('X')
plt.xticks(())
plt.yticks(())
plt.ylim(-.25, 1.25)
plt.xlim(-4, 10)
plt.show()
|
bsd-3-clause
|
uglyboxer/linear_neuron
|
net-p3/lib/python3.5/site-packages/matplotlib/__init__.py
|
10
|
48253
|
"""
This is an object-oriented plotting library.
A procedural interface is provided by the companion pyplot module,
which may be imported directly, e.g.::
import matplotlib.pyplot as plt
or using ipython::
ipython
at your terminal, followed by::
In [1]: %matplotlib
In [2]: import matplotlib.pyplot as plt
at the ipython shell prompt.
For the most part, direct use of the object-oriented library is
encouraged when programming; pyplot is primarily for working
interactively. The
exceptions are the pyplot commands :func:`~matplotlib.pyplot.figure`,
:func:`~matplotlib.pyplot.subplot`,
:func:`~matplotlib.pyplot.subplots`, and
:func:`~pyplot.savefig`, which can greatly simplify scripting.
Modules include:
:mod:`matplotlib.axes`
defines the :class:`~matplotlib.axes.Axes` class. Most pylab
commands are wrappers for :class:`~matplotlib.axes.Axes`
methods. The axes module is the highest level of OO access to
the library.
:mod:`matplotlib.figure`
defines the :class:`~matplotlib.figure.Figure` class.
:mod:`matplotlib.artist`
defines the :class:`~matplotlib.artist.Artist` base class for
all classes that draw things.
:mod:`matplotlib.lines`
defines the :class:`~matplotlib.lines.Line2D` class for
drawing lines and markers
:mod:`matplotlib.patches`
defines classes for drawing polygons
:mod:`matplotlib.text`
defines the :class:`~matplotlib.text.Text`,
:class:`~matplotlib.text.TextWithDash`, and
:class:`~matplotlib.text.Annotate` classes
:mod:`matplotlib.image`
defines the :class:`~matplotlib.image.AxesImage` and
:class:`~matplotlib.image.FigureImage` classes
:mod:`matplotlib.collections`
classes for efficient drawing of groups of lines or polygons
:mod:`matplotlib.colors`
classes for interpreting color specifications and for making
colormaps
:mod:`matplotlib.cm`
colormaps and the :class:`~matplotlib.image.ScalarMappable`
mixin class for providing color mapping functionality to other
classes
:mod:`matplotlib.ticker`
classes for calculating tick mark locations and for formatting
tick labels
:mod:`matplotlib.backends`
a subpackage with modules for various gui libraries and output
formats
The base matplotlib namespace includes:
:data:`~matplotlib.rcParams`
a global dictionary of default configuration settings. It is
initialized by code which may be overridded by a matplotlibrc
file.
:func:`~matplotlib.rc`
a function for setting groups of rcParams values
:func:`~matplotlib.use`
a function for setting the matplotlib backend. If used, this
function must be called immediately after importing matplotlib
for the first time. In particular, it must be called
**before** importing pylab (if pylab is imported).
matplotlib was initially written by John D. Hunter (1968-2012) and is now
developed and maintained by a host of others.
Occasionally the internal documentation (python docstrings) will refer
to MATLAB®, a registered trademark of The MathWorks, Inc.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import sys
import distutils.version
from itertools import chain
__version__ = str('1.4.3')
__version__numpy__ = str('1.6') # minimum required numpy version
try:
import dateutil
except ImportError:
raise ImportError("matplotlib requires dateutil")
def compare_versions(a, b):
"return True if a is greater than or equal to b"
if a:
if six.PY3:
if isinstance(a, bytes):
a = a.decode('ascii')
if isinstance(b, bytes):
b = b.decode('ascii')
a = distutils.version.LooseVersion(a)
b = distutils.version.LooseVersion(b)
return a >= b
else:
return False
if not compare_versions(six.__version__, '1.3'):
raise ImportError(
'six 1.3 or later is required; you have %s' % (
six.__version__))
try:
import pyparsing
except ImportError:
raise ImportError("matplotlib requires pyparsing")
else:
if not compare_versions(pyparsing.__version__, '1.5.6'):
raise ImportError(
"matplotlib requires pyparsing >= 1.5.6")
# pyparsing 2.0.0 bug, but it may be patched in distributions
try:
f = pyparsing.Forward()
f <<= pyparsing.Literal('a')
bad_pyparsing = f is None
except TypeError:
bad_pyparsing = True
# pyparsing 1.5.6 does not have <<= on the Forward class, but
# pyparsing 2.0.0 and later will spew deprecation warnings if
# using << instead. Additionally, the <<= in pyparsing 1.5.7 is
# broken, since it doesn't return self. In order to support
# pyparsing 1.5.6 and above with a common code base, this small
# monkey patch is applied.
if bad_pyparsing:
def _forward_ilshift(self, other):
self.__lshift__(other)
return self
pyparsing.Forward.__ilshift__ = _forward_ilshift
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
import os
import re
import tempfile
import warnings
import contextlib
import distutils.sysconfig
# cbook must import matplotlib only within function
# definitions, so it is safe to import from it here.
from matplotlib.cbook import is_string_like
from matplotlib.compat import subprocess
try:
reload
except NameError:
# Python 3
from imp import reload
if not hasattr(sys, 'argv'): # for modpython
sys.argv = [str('modpython')]
from matplotlib.rcsetup import (defaultParams,
validate_backend)
major, minor1, minor2, s, tmp = sys.version_info
_python24 = (major == 2 and minor1 >= 4) or major >= 3
# the havedate check was a legacy from old matplotlib which preceeded
# datetime support
_havedate = True
#try:
# import pkg_resources # pkg_resources is part of setuptools
#except ImportError: _have_pkg_resources = False
#else: _have_pkg_resources = True
if not _python24:
raise ImportError('matplotlib requires Python 2.4 or later')
import numpy
if not compare_versions(numpy.__version__, __version__numpy__):
raise ImportError(
'numpy %s or later is required; you have %s' % (
__version__numpy__, numpy.__version__))
def _is_writable_dir(p):
"""
p is a string pointing to a putative writable dir -- return True p
is such a string, else False
"""
try:
p + '' # test is string like
except TypeError:
return False
# Test whether the operating system thinks it's a writable directory.
# Note that this check is necessary on Google App Engine, because the
# subsequent check will succeed even though p may not be writable.
if not os.access(p, os.W_OK) or not os.path.isdir(p):
return False
# Also test that it is actually possible to write to a file here.
try:
t = tempfile.TemporaryFile(dir=p)
try:
t.write(b'1')
finally:
t.close()
except OSError:
return False
return True
class Verbose:
"""
A class to handle reporting. Set the fileo attribute to any file
instance to handle the output. Default is sys.stdout
"""
levels = ('silent', 'helpful', 'debug', 'debug-annoying')
vald = dict( [(level, i) for i,level in enumerate(levels)])
# parse the verbosity from the command line; flags look like
# --verbose-silent or --verbose-helpful
_commandLineVerbose = None
for arg in sys.argv[1:]:
# cast to str because we are using unicode_literals,
# and argv is always str
if not arg.startswith(str('--verbose-')):
continue
level_str = arg[10:]
# If it doesn't match one of ours, then don't even
# bother noting it, we are just a 3rd-party library
# to somebody else's script.
if level_str in levels:
_commandLineVerbose = level_str
def __init__(self):
self.set_level('silent')
self.fileo = sys.stdout
def set_level(self, level):
'set the verbosity to one of the Verbose.levels strings'
if self._commandLineVerbose is not None:
level = self._commandLineVerbose
if level not in self.levels:
warnings.warn('matplotlib: unrecognized --verbose-* string "%s".'
' Legal values are %s' % (level, self.levels))
else:
self.level = level
def set_fileo(self, fname):
std = {
'sys.stdout': sys.stdout,
'sys.stderr': sys.stderr,
}
if fname in std:
self.fileo = std[fname]
else:
try:
fileo = open(fname, 'w')
except IOError:
raise ValueError('Verbose object could not open log file "%s" for writing.\nCheck your matplotlibrc verbose.fileo setting'%fname)
else:
self.fileo = fileo
def report(self, s, level='helpful'):
"""
print message s to self.fileo if self.level>=level. Return
value indicates whether a message was issued
"""
if self.ge(level):
print(s, file=self.fileo)
return True
return False
def wrap(self, fmt, func, level='helpful', always=True):
"""
return a callable function that wraps func and reports it
output through the verbose handler if current verbosity level
is higher than level
if always is True, the report will occur on every function
call; otherwise only on the first time the function is called
"""
assert six.callable(func)
def wrapper(*args, **kwargs):
ret = func(*args, **kwargs)
if (always or not wrapper._spoke):
spoke = self.report(fmt%ret, level)
if not wrapper._spoke: wrapper._spoke = spoke
return ret
wrapper._spoke = False
wrapper.__doc__ = func.__doc__
return wrapper
def ge(self, level):
'return true if self.level is >= level'
return self.vald[self.level]>=self.vald[level]
verbose=Verbose()
def checkdep_dvipng():
try:
s = subprocess.Popen(['dvipng','-version'], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = s.communicate()
line = stdout.decode('ascii').split('\n')[1]
v = line.split()[-1]
return v
except (IndexError, ValueError, OSError):
return None
def checkdep_ghostscript():
if sys.platform == 'win32':
gs_execs = ['gswin32c', 'gswin64c', 'gs']
else:
gs_execs = ['gs']
for gs_exec in gs_execs:
try:
s = subprocess.Popen(
[gs_exec, '--version'], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = s.communicate()
if s.returncode == 0:
v = stdout[:-1].decode('ascii')
return gs_exec, v
except (IndexError, ValueError, OSError):
pass
return None, None
def checkdep_tex():
try:
s = subprocess.Popen(['tex','-version'], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = s.communicate()
line = stdout.decode('ascii').split('\n')[0]
pattern = '3\.1\d+'
match = re.search(pattern, line)
v = match.group(0)
return v
except (IndexError, ValueError, AttributeError, OSError):
return None
def checkdep_pdftops():
try:
s = subprocess.Popen(['pdftops','-v'], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = s.communicate()
lines = stderr.decode('ascii').split('\n')
for line in lines:
if 'version' in line:
v = line.split()[-1]
return v
except (IndexError, ValueError, UnboundLocalError, OSError):
return None
def checkdep_inkscape():
try:
s = subprocess.Popen(['inkscape','-V'], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = s.communicate()
lines = stdout.decode('ascii').split('\n')
for line in lines:
if 'Inkscape' in line:
v = line.split()[1]
break
return v
except (IndexError, ValueError, UnboundLocalError, OSError):
return None
def checkdep_xmllint():
try:
s = subprocess.Popen(['xmllint','--version'], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = s.communicate()
lines = stderr.decode('ascii').split('\n')
for line in lines:
if 'version' in line:
v = line.split()[-1]
break
return v
except (IndexError, ValueError, UnboundLocalError, OSError):
return None
def checkdep_ps_distiller(s):
if not s:
return False
flag = True
gs_req = '7.07'
gs_sugg = '7.07'
gs_exec, gs_v = checkdep_ghostscript()
if compare_versions(gs_v, gs_sugg): pass
elif compare_versions(gs_v, gs_req):
verbose.report(('ghostscript-%s found. ghostscript-%s or later '
'is recommended to use the ps.usedistiller option.') % (gs_v, gs_sugg))
else:
flag = False
warnings.warn(('matplotlibrc ps.usedistiller option can not be used '
'unless ghostscript-%s or later is installed on your system') % gs_req)
if s == 'xpdf':
pdftops_req = '3.0'
pdftops_req_alt = '0.9' # poppler version numbers, ugh
pdftops_v = checkdep_pdftops()
if compare_versions(pdftops_v, pdftops_req):
pass
elif compare_versions(pdftops_v, pdftops_req_alt) and not \
compare_versions(pdftops_v, '1.0'):
pass
else:
flag = False
warnings.warn(('matplotlibrc ps.usedistiller can not be set to '
'xpdf unless xpdf-%s or later is installed on your system') % pdftops_req)
if flag:
return s
else:
return False
def checkdep_usetex(s):
if not s:
return False
tex_req = '3.1415'
gs_req = '7.07'
gs_sugg = '7.07'
dvipng_req = '1.5'
flag = True
tex_v = checkdep_tex()
if compare_versions(tex_v, tex_req): pass
else:
flag = False
warnings.warn(('matplotlibrc text.usetex option can not be used '
'unless TeX-%s or later is '
'installed on your system') % tex_req)
dvipng_v = checkdep_dvipng()
if compare_versions(dvipng_v, dvipng_req): pass
else:
flag = False
warnings.warn( 'matplotlibrc text.usetex can not be used with *Agg '
'backend unless dvipng-1.5 or later is '
'installed on your system')
gs_exec, gs_v = checkdep_ghostscript()
if compare_versions(gs_v, gs_sugg): pass
elif compare_versions(gs_v, gs_req):
verbose.report(('ghostscript-%s found. ghostscript-%s or later is '
'recommended for use with the text.usetex '
'option.') % (gs_v, gs_sugg))
else:
flag = False
warnings.warn(('matplotlibrc text.usetex can not be used '
'unless ghostscript-%s or later is '
'installed on your system') % gs_req)
return flag
def _get_home():
"""Find user's home directory if possible.
Otherwise, returns None.
:see: http://mail.python.org/pipermail/python-list/2005-February/325395.html
"""
try:
if six.PY2 and sys.platform == 'win32':
path = os.path.expanduser(b"~").decode(sys.getfilesystemencoding())
else:
path = os.path.expanduser("~")
except ImportError:
# This happens on Google App Engine (pwd module is not present).
pass
else:
if os.path.isdir(path):
return path
for evar in ('HOME', 'USERPROFILE', 'TMP'):
path = os.environ.get(evar)
if path is not None and os.path.isdir(path):
return path
return None
def _create_tmp_config_dir():
"""
If the config directory can not be created, create a temporary
directory.
Returns None if a writable temporary directory could not be created.
"""
import getpass
import tempfile
try:
tempdir = tempfile.gettempdir()
except NotImplementedError:
# Some restricted platforms (such as Google App Engine) do not provide
# gettempdir.
return None
tempdir = os.path.join(tempdir, 'matplotlib-%s' % getpass.getuser())
os.environ['MPLCONFIGDIR'] = tempdir
return tempdir
get_home = verbose.wrap('$HOME=%s', _get_home, always=False)
def _get_xdg_config_dir():
"""
Returns the XDG configuration directory, according to the `XDG
base directory spec
<http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html>`_.
"""
path = os.environ.get('XDG_CONFIG_HOME')
if path is None:
path = get_home()
if path is not None:
path = os.path.join(path, '.config')
return path
def _get_xdg_cache_dir():
"""
Returns the XDG cache directory, according to the `XDG
base directory spec
<http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html>`_.
"""
path = os.environ.get('XDG_CACHE_HOME')
if path is None:
path = get_home()
if path is not None:
path = os.path.join(path, '.cache')
return path
def _get_config_or_cache_dir(xdg_base):
from matplotlib.cbook import mkdirs
configdir = os.environ.get('MPLCONFIGDIR')
if configdir is not None:
configdir = os.path.abspath(configdir)
if not os.path.exists(configdir):
mkdirs(configdir)
if not _is_writable_dir(configdir):
return _create_tmp_config_dir()
return configdir
p = None
h = get_home()
if h is not None:
p = os.path.join(h, '.matplotlib')
if (sys.platform.startswith('linux') and xdg_base):
p = os.path.join(xdg_base, 'matplotlib')
if p is not None:
if os.path.exists(p):
if _is_writable_dir(p):
return p
else:
try:
mkdirs(p)
except OSError:
pass
else:
return p
return _create_tmp_config_dir()
def _get_configdir():
"""
Return the string representing the configuration directory.
The directory is chosen as follows:
1. If the MPLCONFIGDIR environment variable is supplied, choose that.
2a. On Linux, if `$HOME/.matplotlib` exists, choose that, but warn that
that is the old location. Barring that, follow the XDG specification
and look first in `$XDG_CONFIG_HOME`, if defined, or `$HOME/.config`.
2b. On other platforms, choose `$HOME/.matplotlib`.
3. If the chosen directory exists and is writable, use that as the
configuration directory.
4. If possible, create a temporary directory, and use it as the
configuration directory.
5. A writable directory could not be found or created; return None.
"""
return _get_config_or_cache_dir(_get_xdg_config_dir())
get_configdir = verbose.wrap('CONFIGDIR=%s', _get_configdir, always=False)
def _get_cachedir():
"""
Return the location of the cache directory.
The procedure used to find the directory is the same as for
_get_config_dir, except using `$XDG_CACHE_HOME`/`~/.cache` instead.
"""
return _get_config_or_cache_dir(_get_xdg_cache_dir())
get_cachedir = verbose.wrap('CACHEDIR=%s', _get_cachedir, always=False)
def _get_data_path():
'get the path to matplotlib data'
if 'MATPLOTLIBDATA' in os.environ:
path = os.environ['MATPLOTLIBDATA']
if not os.path.isdir(path):
raise RuntimeError('Path in environment MATPLOTLIBDATA not a directory')
return path
path = os.sep.join([os.path.dirname(__file__), 'mpl-data'])
if os.path.isdir(path):
return path
# setuptools' namespace_packages may highjack this init file
# so need to try something known to be in matplotlib, not basemap
import matplotlib.afm
path = os.sep.join([os.path.dirname(matplotlib.afm.__file__), 'mpl-data'])
if os.path.isdir(path):
return path
# py2exe zips pure python, so still need special check
if getattr(sys,'frozen',None):
exe_path = os.path.dirname(sys.executable)
path = os.path.join(exe_path, 'mpl-data')
if os.path.isdir(path):
return path
# Try again assuming we need to step up one more directory
path = os.path.join(os.path.split(exe_path)[0], 'mpl-data')
if os.path.isdir(path):
return path
# Try again assuming sys.path[0] is a dir not a exe
path = os.path.join(sys.path[0], 'mpl-data')
if os.path.isdir(path):
return path
raise RuntimeError('Could not find the matplotlib data files')
def _get_data_path_cached():
if defaultParams['datapath'][0] is None:
defaultParams['datapath'][0] = _get_data_path()
return defaultParams['datapath'][0]
get_data_path = verbose.wrap('matplotlib data path %s', _get_data_path_cached,
always=False)
def get_example_data(fname):
"""
get_example_data is deprecated -- use matplotlib.cbook.get_sample_data instead
"""
raise NotImplementedError('get_example_data is deprecated -- use matplotlib.cbook.get_sample_data instead')
def get_py2exe_datafiles():
datapath = get_data_path()
_, tail = os.path.split(datapath)
d = {}
for root, _, files in os.walk(datapath):
# Need to explicitly remove cocoa_agg files or py2exe complains
# NOTE I dont know why, but do as previous version
if 'Matplotlib.nib' in files:
files.remove('Matplotlib.nib')
files = [os.path.join(root, filename) for filename in files]
root = root.replace(tail, 'mpl-data')
root = root[root.index('mpl-data'):]
d[root] = files
return list(d.items())
def matplotlib_fname():
"""
Get the location of the config file.
The file location is determined in the following order
- `$PWD/matplotlibrc`
- environment variable `MATPLOTLIBRC`
- `$MPLCONFIGDIR/matplotlib`
- On Linux,
- `$HOME/.matplotlib/matplotlibrc`, if it exists
- or `$XDG_CONFIG_HOME/matplotlib/matplotlibrc` (if
$XDG_CONFIG_HOME is defined)
- or `$HOME/.config/matplotlib/matplotlibrc` (if
$XDG_CONFIG_HOME is not defined)
- On other platforms,
- `$HOME/.matplotlib/matplotlibrc` if `$HOME` is defined.
- Lastly, it looks in `$MATPLOTLIBDATA/matplotlibrc` for a
system-defined copy.
"""
if six.PY2:
cwd = os.getcwdu()
else:
cwd = os.getcwd()
fname = os.path.join(cwd, 'matplotlibrc')
if os.path.exists(fname):
return fname
if 'MATPLOTLIBRC' in os.environ:
path = os.environ['MATPLOTLIBRC']
if os.path.exists(path):
fname = os.path.join(path, 'matplotlibrc')
if os.path.exists(fname):
return fname
configdir = _get_configdir()
if configdir is not None:
fname = os.path.join(configdir, 'matplotlibrc')
if os.path.exists(fname):
home = get_home()
if (sys.platform.startswith('linux') and
home is not None and
os.path.exists(os.path.join(
home, '.matplotlib', 'matplotlibrc'))):
warnings.warn(
"Found matplotlib configuration in ~/.matplotlib/. "
"To conform with the XDG base directory standard, "
"this configuration location has been deprecated "
"on Linux, and the new location is now %s/matplotlib/. "
"Please move your configuration there to ensure that "
"matplotlib will continue to find it in the future." %
_get_xdg_config_dir())
return os.path.join(
home, '.matplotlib', 'matplotlibrc')
return fname
path = get_data_path() # guaranteed to exist or raise
fname = os.path.join(path, 'matplotlibrc')
if not os.path.exists(fname):
warnings.warn('Could not find matplotlibrc; using defaults')
return fname
_deprecated_map = {
'text.fontstyle': ('font.style',lambda x: x),
'text.fontangle': ('font.style',lambda x: x),
'text.fontvariant': ('font.variant',lambda x: x),
'text.fontweight': ('font.weight',lambda x: x),
'text.fontsize': ('font.size',lambda x: x),
'tick.size' : ('tick.major.size',lambda x: x),
'svg.embed_char_paths' : ('svg.fonttype',lambda x: "path" if x else "none"),
'savefig.extension' : ('savefig.format',lambda x: x),
}
_deprecated_ignore_map = {
}
_obsolete_set = set(['tk.pythoninspect', ])
_all_deprecated = set(chain(_deprecated_ignore_map,
_deprecated_map, _obsolete_set))
_rcparam_warn_str = ("Trying to set {key} to {value} via the {func} "
"method of RcParams which does not validate cleanly. "
"This warning will turn into an Exception in 1.5. "
"If you think {value} should validate correctly for "
"rcParams[{key}] "
"please create an issue on github."
)
class RcParams(dict):
"""
A dictionary object including validation
validating functions are defined and associated with rc parameters in
:mod:`matplotlib.rcsetup`
"""
validate = dict((key, converter) for key, (default, converter) in
six.iteritems(defaultParams)
if key not in _all_deprecated)
msg_depr = "%s is deprecated and replaced with %s; please use the latter."
msg_depr_ignore = "%s is deprecated and ignored. Use %s"
# validate values on the way in
def __init__(self, *args, **kwargs):
for k, v in six.iteritems(dict(*args, **kwargs)):
try:
self[k] = v
except (ValueError, RuntimeError):
# force the issue
warnings.warn(_rcparam_warn_str.format(key=repr(k),
value=repr(v),
func='__init__'))
dict.__setitem__(self, k, v)
def __setitem__(self, key, val):
try:
if key in _deprecated_map:
alt_key, alt_val = _deprecated_map[key]
warnings.warn(self.msg_depr % (key, alt_key))
key = alt_key
val = alt_val(val)
elif key in _deprecated_ignore_map:
alt = _deprecated_ignore_map[key]
warnings.warn(self.msg_depr_ignore % (key, alt))
return
try:
cval = self.validate[key](val)
except ValueError as ve:
raise ValueError("Key %s: %s" % (key, str(ve)))
dict.__setitem__(self, key, cval)
except KeyError:
raise KeyError('%s is not a valid rc parameter.\
See rcParams.keys() for a list of valid parameters.' % (key,))
def __getitem__(self, key):
if key in _deprecated_map:
alt_key, alt_val = _deprecated_map[key]
warnings.warn(self.msg_depr % (key, alt_key))
key = alt_key
elif key in _deprecated_ignore_map:
alt = _deprecated_ignore_map[key]
warnings.warn(self.msg_depr_ignore % (key, alt))
key = alt
return dict.__getitem__(self, key)
# http://stackoverflow.com/questions/2390827/how-to-properly-subclass-dict-and-override-get-set
# the default dict `update` does not use __setitem__
# so rcParams.update(...) (such as in seaborn) side-steps
# all of the validation over-ride update to force
# through __setitem__
def update(self, *args, **kwargs):
for k, v in six.iteritems(dict(*args, **kwargs)):
try:
self[k] = v
except (ValueError, RuntimeError):
# force the issue
warnings.warn(_rcparam_warn_str.format(key=repr(k),
value=repr(v),
func='update'))
dict.__setitem__(self, k, v)
def __repr__(self):
import pprint
class_name = self.__class__.__name__
indent = len(class_name) + 1
repr_split = pprint.pformat(dict(self), indent=1,
width=80 - indent).split('\n')
repr_indented = ('\n' + ' ' * indent).join(repr_split)
return '{0}({1})'.format(class_name, repr_indented)
def __str__(self):
return '\n'.join('{0}: {1}'.format(k, v)
for k, v in sorted(self.items()))
def keys(self):
"""
Return sorted list of keys.
"""
k = list(dict.keys(self))
k.sort()
return k
def values(self):
"""
Return values in order of sorted keys.
"""
return [self[k] for k in self.keys()]
def find_all(self, pattern):
"""
Return the subset of this RcParams dictionary whose keys match,
using :func:`re.search`, the given ``pattern``.
.. note::
Changes to the returned dictionary are *not* propagated to
the parent RcParams dictionary.
"""
import re
pattern_re = re.compile(pattern)
return RcParams((key, value)
for key, value in self.items()
if pattern_re.search(key))
def rc_params(fail_on_error=False):
"""Return a :class:`matplotlib.RcParams` instance from the
default matplotlib rc file.
"""
fname = matplotlib_fname()
if not os.path.exists(fname):
# this should never happen, default in mpl-data should always be found
message = 'could not find rc file; returning defaults'
ret = RcParams([(key, default) for key, (default, _) in
six.iteritems(defaultParams)
if key not in _all_deprecated])
warnings.warn(message)
return ret
return rc_params_from_file(fname, fail_on_error)
URL_REGEX = re.compile(r'http://|https://|ftp://|file://|file:\\')
def is_url(filename):
"""Return True if string is an http, ftp, or file URL path."""
return URL_REGEX.match(filename) is not None
def _url_lines(f):
# Compatibility for urlopen in python 3, which yields bytes.
for line in f:
yield line.decode('utf8')
@contextlib.contextmanager
def _open_file_or_url(fname):
if is_url(fname):
f = urlopen(fname)
yield _url_lines(f)
f.close()
else:
with open(fname) as f:
yield f
_error_details_fmt = 'line #%d\n\t"%s"\n\tin file "%s"'
def _rc_params_in_file(fname, fail_on_error=False):
"""Return :class:`matplotlib.RcParams` from the contents of the given file.
Unlike `rc_params_from_file`, the configuration class only contains the
parameters specified in the file (i.e. default values are not filled in).
"""
cnt = 0
rc_temp = {}
with _open_file_or_url(fname) as fd:
for line in fd:
cnt += 1
strippedline = line.split('#', 1)[0].strip()
if not strippedline: continue
tup = strippedline.split(':', 1)
if len(tup) != 2:
error_details = _error_details_fmt % (cnt, line, fname)
warnings.warn('Illegal %s' % error_details)
continue
key, val = tup
key = key.strip()
val = val.strip()
if key in rc_temp:
warnings.warn('Duplicate key in file "%s", line #%d' % \
(fname, cnt))
rc_temp[key] = (val, line, cnt)
config = RcParams()
for key in ('verbose.level', 'verbose.fileo'):
if key in rc_temp:
val, line, cnt = rc_temp.pop(key)
if fail_on_error:
config[key] = val # try to convert to proper type or raise
else:
try:
config[key] = val # try to convert to proper type or skip
except Exception as msg:
error_details = _error_details_fmt % (cnt, line, fname)
warnings.warn('Bad val "%s" on %s\n\t%s' %
(val, error_details, msg))
for key, (val, line, cnt) in six.iteritems(rc_temp):
if key in defaultParams:
if fail_on_error:
config[key] = val # try to convert to proper type or raise
else:
try:
config[key] = val # try to convert to proper type or skip
except Exception as msg:
error_details = _error_details_fmt % (cnt, line, fname)
warnings.warn('Bad val "%s" on %s\n\t%s' %
(val, error_details, msg))
elif key in _deprecated_ignore_map:
warnings.warn('%s is deprecated. Update your matplotlibrc to use '
'%s instead.'% (key, _deprecated_ignore_map[key]))
else:
print("""
Bad key "%s" on line %d in
%s.
You probably need to get an updated matplotlibrc file from
http://matplotlib.sf.net/_static/matplotlibrc or from the matplotlib source
distribution""" % (key, cnt, fname), file=sys.stderr)
return config
def rc_params_from_file(fname, fail_on_error=False, use_default_template=True):
"""Return :class:`matplotlib.RcParams` from the contents of the given file.
Parameters
----------
fname : str
Name of file parsed for matplotlib settings.
fail_on_error : bool
If True, raise an error when the parser fails to convert a parameter.
use_default_template : bool
If True, initialize with default parameters before updating with those
in the given file. If False, the configuration class only contains the
parameters specified in the file. (Useful for updating dicts.)
"""
config_from_file = _rc_params_in_file(fname, fail_on_error)
if not use_default_template:
return config_from_file
iter_params = six.iteritems(defaultParams)
config = RcParams([(key, default) for key, (default, _) in iter_params
if key not in _all_deprecated])
config.update(config_from_file)
verbose.set_level(config['verbose.level'])
verbose.set_fileo(config['verbose.fileo'])
if config['datapath'] is None:
config['datapath'] = get_data_path()
if not config['text.latex.preamble'] == ['']:
verbose.report("""
*****************************************************************
You have the following UNSUPPORTED LaTeX preamble customizations:
%s
Please do not ask for support with these customizations active.
*****************************************************************
"""% '\n'.join(config['text.latex.preamble']), 'helpful')
verbose.report('loaded rc file %s'%fname)
return config
# this is the instance used by the matplotlib classes
rcParams = rc_params()
if rcParams['examples.directory']:
# paths that are intended to be relative to matplotlib_fname()
# are allowed for the examples.directory parameter.
# However, we will need to fully qualify the path because
# Sphinx requires absolute paths.
if not os.path.isabs(rcParams['examples.directory']):
_basedir, _fname = os.path.split(matplotlib_fname())
# Sometimes matplotlib_fname() can return relative paths,
# Also, using realpath() guarentees that Sphinx will use
# the same path that matplotlib sees (in case of weird symlinks).
_basedir = os.path.realpath(_basedir)
_fullpath = os.path.join(_basedir, rcParams['examples.directory'])
rcParams['examples.directory'] = _fullpath
rcParamsOrig = rcParams.copy()
rcParamsDefault = RcParams([(key, default) for key, (default, converter) in
six.iteritems(defaultParams)
if key not in _all_deprecated])
rcParams['ps.usedistiller'] = checkdep_ps_distiller(
rcParams['ps.usedistiller'])
rcParams['text.usetex'] = checkdep_usetex(rcParams['text.usetex'])
if rcParams['axes.formatter.use_locale']:
import locale
locale.setlocale(locale.LC_ALL, '')
def rc(group, **kwargs):
"""
Set the current rc params. Group is the grouping for the rc, e.g.,
for ``lines.linewidth`` the group is ``lines``, for
``axes.facecolor``, the group is ``axes``, and so on. Group may
also be a list or tuple of group names, e.g., (*xtick*, *ytick*).
*kwargs* is a dictionary attribute name/value pairs, e.g.,::
rc('lines', linewidth=2, color='r')
sets the current rc params and is equivalent to::
rcParams['lines.linewidth'] = 2
rcParams['lines.color'] = 'r'
The following aliases are available to save typing for interactive
users:
===== =================
Alias Property
===== =================
'lw' 'linewidth'
'ls' 'linestyle'
'c' 'color'
'fc' 'facecolor'
'ec' 'edgecolor'
'mew' 'markeredgewidth'
'aa' 'antialiased'
===== =================
Thus you could abbreviate the above rc command as::
rc('lines', lw=2, c='r')
Note you can use python's kwargs dictionary facility to store
dictionaries of default parameters. e.g., you can customize the
font rc as follows::
font = {'family' : 'monospace',
'weight' : 'bold',
'size' : 'larger'}
rc('font', **font) # pass in the font dict as kwargs
This enables you to easily switch between several configurations.
Use :func:`~matplotlib.pyplot.rcdefaults` to restore the default
rc params after changes.
"""
aliases = {
'lw' : 'linewidth',
'ls' : 'linestyle',
'c' : 'color',
'fc' : 'facecolor',
'ec' : 'edgecolor',
'mew' : 'markeredgewidth',
'aa' : 'antialiased',
}
if is_string_like(group):
group = (group,)
for g in group:
for k, v in six.iteritems(kwargs):
name = aliases.get(k) or k
key = '%s.%s' % (g, name)
try:
rcParams[key] = v
except KeyError:
raise KeyError('Unrecognized key "%s" for group "%s" and name "%s"' %
(key, g, name))
def rcdefaults():
"""
Restore the default rc params. These are not the params loaded by
the rc file, but mpl's internal params. See rc_file_defaults for
reloading the default params from the rc file
"""
rcParams.clear()
rcParams.update(rcParamsDefault)
def rc_file(fname):
"""
Update rc params from file.
"""
rcParams.update(rc_params_from_file(fname))
class rc_context(object):
"""
Return a context manager for managing rc settings.
This allows one to do::
with mpl.rc_context(fname='screen.rc'):
plt.plot(x, a)
with mpl.rc_context(fname='print.rc'):
plt.plot(x, b)
plt.plot(x, c)
The 'a' vs 'x' and 'c' vs 'x' plots would have settings from
'screen.rc', while the 'b' vs 'x' plot would have settings from
'print.rc'.
A dictionary can also be passed to the context manager::
with mpl.rc_context(rc={'text.usetex': True}, fname='screen.rc'):
plt.plot(x, a)
The 'rc' dictionary takes precedence over the settings loaded from
'fname'. Passing a dictionary only is also valid.
"""
def __init__(self, rc=None, fname=None):
self.rcdict = rc
self.fname = fname
self._rcparams = rcParams.copy()
try:
if self.fname:
rc_file(self.fname)
if self.rcdict:
rcParams.update(self.rcdict)
except:
# if anything goes wrong, revert rc parameters and re-raise
rcParams.clear()
rcParams.update(self._rcparams)
raise
def __enter__(self):
return self
def __exit__(self, type, value, tb):
rcParams.update(self._rcparams)
def rc_file_defaults():
"""
Restore the default rc params from the original matplotlib rc that
was loaded
"""
rcParams.update(rcParamsOrig)
_use_error_msg = """ This call to matplotlib.use() has no effect
because the backend has already been chosen;
matplotlib.use() must be called *before* pylab, matplotlib.pyplot,
or matplotlib.backends is imported for the first time.
"""
def use(arg, warn=True, force=False):
"""
Set the matplotlib backend to one of the known backends.
The argument is case-insensitive. *warn* specifies whether a
warning should be issued if a backend has already been set up.
*force* is an **experimental** flag that tells matplotlib to
attempt to initialize a new backend by reloading the backend
module.
.. note::
This function must be called *before* importing pyplot for
the first time; or, if you are not using pyplot, it must be called
before importing matplotlib.backends. If warn is True, a warning
is issued if you try and call this after pylab or pyplot have been
loaded. In certain black magic use cases, e.g.
:func:`pyplot.switch_backend`, we are doing the reloading necessary to
make the backend switch work (in some cases, e.g., pure image
backends) so one can set warn=False to suppress the warnings.
To find out which backend is currently set, see
:func:`matplotlib.get_backend`.
"""
# Lets determine the proper backend name first
if arg.startswith('module://'):
name = arg
else:
# Lowercase only non-module backend names (modules are case-sensitive)
arg = arg.lower()
name = validate_backend(arg)
# Check if we've already set up a backend
if 'matplotlib.backends' in sys.modules:
# Warn only if called with a different name
if (rcParams['backend'] != name) and warn:
warnings.warn(_use_error_msg)
# Unless we've been told to force it, just return
if not force:
return
need_reload = True
else:
need_reload = False
# Store the backend name
rcParams['backend'] = name
# If needed we reload here because a lot of setup code is triggered on
# module import. See backends/__init__.py for more detail.
if need_reload:
reload(sys.modules['matplotlib.backends'])
def get_backend():
"""Return the name of the current backend."""
return rcParams['backend']
def interactive(b):
"""
Set interactive mode to boolean b.
If b is True, then draw after every plotting command, e.g., after xlabel
"""
rcParams['interactive'] = b
def is_interactive():
'Return true if plot mode is interactive'
return rcParams['interactive']
def tk_window_focus():
"""Return true if focus maintenance under TkAgg on win32 is on.
This currently works only for python.exe and IPython.exe.
Both IDLE and Pythonwin.exe fail badly when tk_window_focus is on."""
if rcParams['backend'] != 'TkAgg':
return False
return rcParams['tk.window_focus']
# Now allow command line to override
# Allow command line access to the backend with -d (MATLAB compatible
# flag)
for s in sys.argv[1:]:
# cast to str because we are using unicode_literals,
# and argv is always str
if s.startswith(str('-d')) and len(s) > 2: # look for a -d flag
try:
use(s[2:])
except (KeyError, ValueError):
pass
# we don't want to assume all -d flags are backends, e.g., -debug
default_test_modules = [
'matplotlib.tests.test_agg',
'matplotlib.tests.test_animation',
'matplotlib.tests.test_arrow_patches',
'matplotlib.tests.test_artist',
'matplotlib.tests.test_axes',
'matplotlib.tests.test_backend_bases',
'matplotlib.tests.test_backend_pdf',
'matplotlib.tests.test_backend_pgf',
'matplotlib.tests.test_backend_ps',
'matplotlib.tests.test_backend_qt4',
'matplotlib.tests.test_backend_svg',
'matplotlib.tests.test_basic',
'matplotlib.tests.test_bbox_tight',
'matplotlib.tests.test_cbook',
'matplotlib.tests.test_coding_standards',
'matplotlib.tests.test_collections',
'matplotlib.tests.test_colorbar',
'matplotlib.tests.test_colors',
'matplotlib.tests.test_compare_images',
'matplotlib.tests.test_contour',
'matplotlib.tests.test_dates',
'matplotlib.tests.test_delaunay',
'matplotlib.tests.test_figure',
'matplotlib.tests.test_font_manager',
'matplotlib.tests.test_gridspec',
'matplotlib.tests.test_image',
'matplotlib.tests.test_legend',
'matplotlib.tests.test_lines',
'matplotlib.tests.test_mathtext',
'matplotlib.tests.test_mlab',
'matplotlib.tests.test_patches',
'matplotlib.tests.test_path',
'matplotlib.tests.test_patheffects',
'matplotlib.tests.test_pickle',
'matplotlib.tests.test_png',
'matplotlib.tests.test_quiver',
'matplotlib.tests.test_rcparams',
'matplotlib.tests.test_scale',
'matplotlib.tests.test_simplification',
'matplotlib.tests.test_spines',
'matplotlib.tests.test_streamplot',
'matplotlib.tests.test_style',
'matplotlib.tests.test_subplots',
'matplotlib.tests.test_table',
'matplotlib.tests.test_text',
'matplotlib.tests.test_ticker',
'matplotlib.tests.test_tightlayout',
'matplotlib.tests.test_transforms',
'matplotlib.tests.test_triangulation',
'mpl_toolkits.tests.test_mplot3d',
'mpl_toolkits.tests.test_axes_grid1',
]
def test(verbosity=1):
"""run the matplotlib test suite"""
old_backend = rcParams['backend']
try:
use('agg')
import nose
import nose.plugins.builtin
from .testing.noseclasses import KnownFailure
from nose.plugins.manager import PluginManager
from nose.plugins import multiprocess
# store the old values before overriding
plugins = []
plugins.append( KnownFailure() )
plugins.extend( [plugin() for plugin in nose.plugins.builtin.plugins] )
manager = PluginManager(plugins=plugins)
config = nose.config.Config(verbosity=verbosity, plugins=manager)
# Nose doesn't automatically instantiate all of the plugins in the
# child processes, so we have to provide the multiprocess plugin with
# a list.
multiprocess._instantiate_plugins = [KnownFailure]
success = nose.run(
defaultTest=default_test_modules,
config=config,
)
finally:
if old_backend.lower() != 'agg':
use(old_backend)
return success
test.__test__ = False # nose: this function is not a test
verbose.report('matplotlib version %s'%__version__)
verbose.report('verbose.level %s'%verbose.level)
verbose.report('interactive is %s'%is_interactive())
verbose.report('platform is %s'%sys.platform)
verbose.report('loaded modules: %s'%six.iterkeys(sys.modules), 'debug')
|
mit
|
AnaSula/NYU-Python-Programming-Class
|
course-2/project/get_data.py
|
1
|
2309
|
import pysolr
import requests
import codecs
import pandas as pd
class GetData:
conn = pysolr.Solr('https://cabidx1:Jkxkk3-POIO08723_eWWXeei1078TRw,[email protected]/solr/cab1')
def __init__(self):
self.tids=['1772']
def get_train_data(self):
for tid in self.tids:
self.positives_train = GetData.conn.search('*:*',fq='im_field_regulated_activities:%s' % tid, fl='entity_id, label,content', sort='entity_id asc', start=0, rows=300)
self.negatives_train = GetData.conn.search('*:*',fq='-im_field_regulated_activities:%s' % tid, fl='entity_id, label,content', sort='entity_id asc', start=0, rows=300)
self.d_train = []
for p in self.positives_train:
self.content= p['label']+ p['content']
self.entity_id = p['entity_id']
self.d_train.append((self.entity_id, self.content, 1))
for n in self.negatives_train:
self.content= n['label']+n['content']
self.entity_id=n['entity_id']
self.d_train.append((self.entity_id, self.content, 0))
self.data_train = pd.DataFrame(self.d_train, columns=('entity_id', 'content', 'target'))
return self.data_train
def get_valid_data(self):
for tid in self.tids:
self.positives_valid = GetData.conn.search('*:*',fq='im_field_regulated_activities:%s' % tid, fl='entity_id, label,content', sort='entity_id asc', start=301, rows=182)
self.negatives_valid = GetData.conn.search('*:*',fq='-im_field_regulated_activities:%s' % tid, fl='entity_id, label,content', sort='entity_id asc', start=301, rows=200)
self.d_valid = []
for p in self.positives_valid:
self.content= p['label']+ p['content']
self.entity_id = p['entity_id']
self.d_valid.append((self.entity_id, self.content, 1))
for n in self.negatives_valid:
self.content= n['label']+n['content']
self.entity_id=n['entity_id']
self.d_valid.append((self.entity_id, self.content, 0))
self.data_valid = pd.DataFrame(self.d_valid, columns=('entity_id', 'content', 'target'))
return self.data_valid
if __name__=="__main__":
get=GetData()
train_data=get.get_train_data()
train_data.to_csv("~/desktop/project_data/train_data.csv", sep=',', header= True, encoding='utf-8')
valid_data=get.get_valid_data()
valid_data.to_csv("~/desktop/project_data/valid_data.csv", sep=',', header= True, encoding='utf-8')
|
mit
|
RachitKansal/scikit-learn
|
examples/text/mlcomp_sparse_document_classification.py
|
292
|
4498
|
"""
========================================================
Classification of text documents: using a MLComp dataset
========================================================
This is an example showing how the scikit-learn can be used to classify
documents by topics using a bag-of-words approach. This example uses
a scipy.sparse matrix to store the features instead of standard numpy arrays.
The dataset used in this example is the 20 newsgroups dataset and should be
downloaded from the http://mlcomp.org (free registration required):
http://mlcomp.org/datasets/379
Once downloaded unzip the archive somewhere on your filesystem.
For instance in::
% mkdir -p ~/data/mlcomp
% cd ~/data/mlcomp
% unzip /path/to/dataset-379-20news-18828_XXXXX.zip
You should get a folder ``~/data/mlcomp/379`` with a file named ``metadata``
and subfolders ``raw``, ``train`` and ``test`` holding the text documents
organized by newsgroups.
Then set the ``MLCOMP_DATASETS_HOME`` environment variable pointing to
the root folder holding the uncompressed archive::
% export MLCOMP_DATASETS_HOME="~/data/mlcomp"
Then you are ready to run this example using your favorite python shell::
% ipython examples/mlcomp_sparse_document_classification.py
"""
# Author: Olivier Grisel <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from time import time
import sys
import os
import numpy as np
import scipy.sparse as sp
import pylab as pl
from sklearn.datasets import load_mlcomp
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import SGDClassifier
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn.naive_bayes import MultinomialNB
print(__doc__)
if 'MLCOMP_DATASETS_HOME' not in os.environ:
print("MLCOMP_DATASETS_HOME not set; please follow the above instructions")
sys.exit(0)
# Load the training set
print("Loading 20 newsgroups training set... ")
news_train = load_mlcomp('20news-18828', 'train')
print(news_train.DESCR)
print("%d documents" % len(news_train.filenames))
print("%d categories" % len(news_train.target_names))
print("Extracting features from the dataset using a sparse vectorizer")
t0 = time()
vectorizer = TfidfVectorizer(encoding='latin1')
X_train = vectorizer.fit_transform((open(f).read()
for f in news_train.filenames))
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X_train.shape)
assert sp.issparse(X_train)
y_train = news_train.target
print("Loading 20 newsgroups test set... ")
news_test = load_mlcomp('20news-18828', 'test')
t0 = time()
print("done in %fs" % (time() - t0))
print("Predicting the labels of the test set...")
print("%d documents" % len(news_test.filenames))
print("%d categories" % len(news_test.target_names))
print("Extracting features from the dataset using the same vectorizer")
t0 = time()
X_test = vectorizer.transform((open(f).read() for f in news_test.filenames))
y_test = news_test.target
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X_test.shape)
###############################################################################
# Benchmark classifiers
def benchmark(clf_class, params, name):
print("parameters:", params)
t0 = time()
clf = clf_class(**params).fit(X_train, y_train)
print("done in %fs" % (time() - t0))
if hasattr(clf, 'coef_'):
print("Percentage of non zeros coef: %f"
% (np.mean(clf.coef_ != 0) * 100))
print("Predicting the outcomes of the testing set")
t0 = time()
pred = clf.predict(X_test)
print("done in %fs" % (time() - t0))
print("Classification report on test set for classifier:")
print(clf)
print()
print(classification_report(y_test, pred,
target_names=news_test.target_names))
cm = confusion_matrix(y_test, pred)
print("Confusion matrix:")
print(cm)
# Show confusion matrix
pl.matshow(cm)
pl.title('Confusion matrix of the %s classifier' % name)
pl.colorbar()
print("Testbenching a linear classifier...")
parameters = {
'loss': 'hinge',
'penalty': 'l2',
'n_iter': 50,
'alpha': 0.00001,
'fit_intercept': True,
}
benchmark(SGDClassifier, parameters, 'SGD')
print("Testbenching a MultinomialNB classifier...")
parameters = {'alpha': 0.01}
benchmark(MultinomialNB, parameters, 'MultinomialNB')
pl.show()
|
bsd-3-clause
|
AnasGhrab/scikit-learn
|
examples/feature_selection/plot_feature_selection.py
|
249
|
2827
|
"""
===============================
Univariate Feature Selection
===============================
An example showing univariate feature selection.
Noisy (non informative) features are added to the iris data and
univariate feature selection is applied. For each feature, we plot the
p-values for the univariate feature selection and the corresponding
weights of an SVM. We can see that univariate feature selection
selects the informative features and that these have larger SVM weights.
In the total set of features, only the 4 first ones are significant. We
can see that they have the highest score with univariate feature
selection. The SVM assigns a large weight to one of these features, but also
Selects many of the non-informative features.
Applying univariate feature selection before the SVM
increases the SVM weight attributed to the significant features, and will
thus improve classification.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, svm
from sklearn.feature_selection import SelectPercentile, f_classif
###############################################################################
# import some data to play with
# The iris dataset
iris = datasets.load_iris()
# Some noisy data not correlated
E = np.random.uniform(0, 0.1, size=(len(iris.data), 20))
# Add the noisy data to the informative features
X = np.hstack((iris.data, E))
y = iris.target
###############################################################################
plt.figure(1)
plt.clf()
X_indices = np.arange(X.shape[-1])
###############################################################################
# Univariate feature selection with F-test for feature scoring
# We use the default selection function: the 10% most significant features
selector = SelectPercentile(f_classif, percentile=10)
selector.fit(X, y)
scores = -np.log10(selector.pvalues_)
scores /= scores.max()
plt.bar(X_indices - .45, scores, width=.2,
label=r'Univariate score ($-Log(p_{value})$)', color='g')
###############################################################################
# Compare to the weights of an SVM
clf = svm.SVC(kernel='linear')
clf.fit(X, y)
svm_weights = (clf.coef_ ** 2).sum(axis=0)
svm_weights /= svm_weights.max()
plt.bar(X_indices - .25, svm_weights, width=.2, label='SVM weight', color='r')
clf_selected = svm.SVC(kernel='linear')
clf_selected.fit(selector.transform(X), y)
svm_weights_selected = (clf_selected.coef_ ** 2).sum(axis=0)
svm_weights_selected /= svm_weights_selected.max()
plt.bar(X_indices[selector.get_support()] - .05, svm_weights_selected,
width=.2, label='SVM weights after selection', color='b')
plt.title("Comparing feature selection")
plt.xlabel('Feature number')
plt.yticks(())
plt.axis('tight')
plt.legend(loc='upper right')
plt.show()
|
bsd-3-clause
|
robbymeals/scikit-learn
|
examples/linear_model/plot_sgd_loss_functions.py
|
249
|
1095
|
"""
==========================
SGD: convex loss functions
==========================
A plot that compares the various convex loss functions supported by
:class:`sklearn.linear_model.SGDClassifier` .
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
def modified_huber_loss(y_true, y_pred):
z = y_pred * y_true
loss = -4 * z
loss[z >= -1] = (1 - z[z >= -1]) ** 2
loss[z >= 1.] = 0
return loss
xmin, xmax = -4, 4
xx = np.linspace(xmin, xmax, 100)
plt.plot([xmin, 0, 0, xmax], [1, 1, 0, 0], 'k-',
label="Zero-one loss")
plt.plot(xx, np.where(xx < 1, 1 - xx, 0), 'g-',
label="Hinge loss")
plt.plot(xx, -np.minimum(xx, 0), 'm-',
label="Perceptron loss")
plt.plot(xx, np.log2(1 + np.exp(-xx)), 'r-',
label="Log loss")
plt.plot(xx, np.where(xx < 1, 1 - xx, 0) ** 2, 'b-',
label="Squared hinge loss")
plt.plot(xx, modified_huber_loss(xx, 1), 'y--',
label="Modified Huber loss")
plt.ylim((0, 8))
plt.legend(loc="upper right")
plt.xlabel(r"Decision function $f(x)$")
plt.ylabel("$L(y, f(x))$")
plt.show()
|
bsd-3-clause
|
tapomayukh/projects_in_python
|
classification/Classification_with_HMM/Single_Contact_Classification/force_codes/true_false_cases/continuous_hmm_final_true_prediction.py
|
1
|
9420
|
# Hidden Markov Model Implementation
import pylab as pyl
import numpy as np
import matplotlib.pyplot as pp
#from enthought.mayavi import mlab
import scipy as scp
import scipy.ndimage as ni
import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3')
import rospy
#import hrl_lib.mayavi2_util as mu
import hrl_lib.viz as hv
import hrl_lib.util as ut
import hrl_lib.matplotlib_util as mpu
import pickle
import ghmm
import sys
sys.path.insert(0, '/home/tapo/svn/robot1_data/usr/tapo/data_code/Classification/Data/Single_Contact_HMM/384')
from data_384 import Fmat_original
# Returns mu,sigma for 10 hidden-states from feature-vectors(123,35) for RF,SF,RM,SM models
def feature_to_mu_sigma(fvec):
index = 0
m,n = np.shape(fvec)
#print m,n
mu = np.matrix(np.zeros((10,1)))
sigma = np.matrix(np.zeros((10,1)))
DIVS = m/10
while (index < 10):
m_init = index*DIVS
temp_fvec = fvec[(m_init):(m_init+DIVS),0:]
#if index == 1:
#print temp_fvec
mu[index] = scp.mean(temp_fvec)
sigma[index] = scp.std(temp_fvec)
index = index+1
return mu,sigma
# Returns sequence given raw data
def create_seq(fvec):
m,n = np.shape(fvec)
#print m,n
seq = np.matrix(np.zeros((10,n)))
DIVS = m/10
for i in range(n):
index = 0
while (index < 10):
m_init = index*DIVS
temp_fvec = fvec[(m_init):(m_init+DIVS),i]
#if index == 1:
#print temp_fvec
seq[index,i] = scp.mean(temp_fvec)
index = index+1
return seq
if __name__ == '__main__':
Fmat = Fmat_original
# Checking the Data-Matrix
m_tot, n_tot = np.shape(Fmat)
#print " "
#print 'Total_Matrix_Shape:',m_tot,n_tot
mu_rf,sigma_rf = feature_to_mu_sigma(Fmat[0:121,0:35])
mu_rm,sigma_rm = feature_to_mu_sigma(Fmat[0:121,35:70])
mu_sf,sigma_sf = feature_to_mu_sigma(Fmat[0:121,70:105])
mu_sm,sigma_sm = feature_to_mu_sigma(Fmat[0:121,105:140])
mu_obj1,sigma_obj1 = feature_to_mu_sigma(Fmat[0:121,140:141])
mu_obj2,sigma_obj2 = feature_to_mu_sigma(Fmat[0:121,141:142])
#print [mu_rf, sigma_rf]
# HMM - Implementation:
# 10 Hidden States
# Max. Force(For now), Contact Area(Not now), and Contact Motion(Not Now) as Continuous Gaussian Observations from each hidden state
# Four HMM-Models for Rigid-Fixed, Soft-Fixed, Rigid-Movable, Soft-Movable
# Transition probabilities obtained as upper diagonal matrix (to be trained using Baum_Welch)
# For new objects, it is classified according to which model it represenst the closest..
F = ghmm.Float() # emission domain of this model
# A - Transition Matrix
A = [[0.1, 0.25, 0.15, 0.15, 0.1, 0.05, 0.05, 0.05, 0.05, 0.05],
[0.0, 0.1, 0.25, 0.25, 0.2, 0.1, 0.05, 0.05, 0.05, 0.05],
[0.0, 0.0, 0.1, 0.25, 0.25, 0.2, 0.05, 0.05, 0.05, 0.05],
[0.0, 0.0, 0.0, 0.1, 0.3, 0.30, 0.20, 0.1, 0.05, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.1, 0.30, 0.30, 0.20, 0.05, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.1, 0.35, 0.30, 0.20, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.2, 0.30, 0.30, 0.20],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.2, 0.50, 0.30],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.4, 0.60],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 1.00]]
# B - Emission Matrix, parameters of emission distributions in pairs of (mu, sigma)
B_rf = np.zeros((10,2))
B_rm = np.zeros((10,2))
B_sf = np.zeros((10,2))
B_sm = np.zeros((10,2))
for num_states in range(10):
B_rf[num_states,0] = mu_rf[num_states]
B_rf[num_states,1] = sigma_rf[num_states]
B_rm[num_states,0] = mu_rm[num_states]
B_rm[num_states,1] = sigma_rm[num_states]
B_sf[num_states,0] = mu_sf[num_states]
B_sf[num_states,1] = sigma_sf[num_states]
B_sm[num_states,0] = mu_sm[num_states]
B_sm[num_states,1] = sigma_sm[num_states]
B_rf = B_rf.tolist()
B_rm = B_rm.tolist()
B_sf = B_sf.tolist()
B_sm = B_sm.tolist()
# pi - initial probabilities per state
pi = [0.1] * 10
# generate RF, RM, SF, SM models from parameters
model_rf = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_rf, pi) # Will be Trained
model_rm = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_rm, pi) # Will be Trained
model_sf = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_sf, pi) # Will be Trained
model_sm = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_sm, pi) # Will be Trained
# For Training
total_seq = Fmat[0:121,:]
m_total, n_total = np.shape(total_seq)
#print 'Total_Sequence_Shape:', m_total, n_total
train_seq_rf = (np.array(total_seq[0:121,0:35]).T).tolist()
train_seq_rm = (np.array(total_seq[0:121,35:70]).T).tolist()
train_seq_sf = (np.array(total_seq[0:121,70:105]).T).tolist()
train_seq_sm = (np.array(total_seq[0:121,105:140]).T).tolist()
#print train_seq_rf
final_ts_rf = ghmm.SequenceSet(F,train_seq_rf)
final_ts_rm = ghmm.SequenceSet(F,train_seq_rm)
final_ts_sf = ghmm.SequenceSet(F,train_seq_sf)
final_ts_sm = ghmm.SequenceSet(F,train_seq_sm)
model_rf.baumWelch(final_ts_rf)
model_rm.baumWelch(final_ts_rm)
model_sf.baumWelch(final_ts_sf)
model_sm.baumWelch(final_ts_sm)
print " "
print "TRAINED RIGID-FIXED MODEL"
print model_rf
print " "
print " "
print "TRAINED RIGID-MOVABLE MODEL"
print model_rm
print " "
print " "
print "TRAINED SOFT-FIXED MODEL"
print model_sf
print " "
print " "
print "TRAINED SOFT-MOVABLE MODEL"
print model_sm
print " "
print " "
# Test New Objects
test_seq_obj1 = (np.array(total_seq[0:121,18:19]).T).tolist()
#print test_seq_obj1
test_seq_obj2 = (np.array(total_seq[0:121,81:82]).T).tolist()
new_test_seq_obj1 = np.array(sum(test_seq_obj1,[]))
#print new_test_seq_obj1
new_test_seq_obj2 = np.array(sum(test_seq_obj2,[]))
ts_obj1 = new_test_seq_obj1
ts_obj2 = new_test_seq_obj2
final_ts_obj1 = ghmm.EmissionSequence(F,ts_obj1.tolist())
final_ts_obj2 = ghmm.EmissionSequence(F,ts_obj2.tolist())
#print final_ts_obj2
# Find Viterbi Path
path_rf_obj1 = model_rf.viterbi(final_ts_obj1)
path_rm_obj1 = model_rm.viterbi(final_ts_obj1)
path_sf_obj1 = model_sf.viterbi(final_ts_obj1)
path_sm_obj1 = model_sm.viterbi(final_ts_obj1)
path_rf_obj2 = model_rf.viterbi(final_ts_obj2)
path_rm_obj2 = model_rm.viterbi(final_ts_obj2)
path_sf_obj2 = model_sf.viterbi(final_ts_obj2)
path_sm_obj2 = model_sm.viterbi(final_ts_obj2)
obj1 = max(path_rf_obj1[1],path_rm_obj1[1],path_sf_obj1[1],path_sm_obj1[1])
obj2 = max(path_rf_obj2[1],path_rm_obj2[1],path_sf_obj2[1],path_sm_obj2[1])
print " "
if obj1 == path_rf_obj1[1]:
print "ONE OBJECT IS RIGID-FIXED"
elif obj1 == path_rm_obj1[1]:
print "ONE OBJECT IS RIGID-MOVABLE"
elif obj1 == path_sf_obj1[1]:
print "ONE OBJECT IS SOFT-FIXED"
else:
print "ONE OBJECT IS SOFT-MOVABLE"
print " "
if obj2 == path_rf_obj2[1]:
print "THE OTHER OBJECT IS RIGID-FIXED"
elif obj2 == path_rm_obj2[1]:
print "THE OTHER OBJECT IS RIGID-MOVABLE"
elif obj2 == path_sf_obj2[1]:
print "THE OTHER OBJECT IS SOFT-FIXED"
else:
print "THE OTHER OBJECT IS SOFT-MOVABLE"
print " "
# For Plotting Some Cases
# True Prediction Case (Example: Select total_seq[0:121,18:19] and total_seq[0:121,81:82])
actual_data1 = total_seq[0:121,18:19]
actual_data2 = total_seq[0:121,81:82]
actual_time = np.arange(0,1.21,0.01)
i = 0
reconstructed_data1 = np.zeros((121,1))
reconstructed_data2 = np.zeros((121,1))
while (i < 121):
reconstructed_data1[i] = mu_rf[path_rf_obj1[0][i]]
reconstructed_data2[i] = mu_sf[path_sf_obj2[0][i]]
i = i+1
mean_data1 = mu_rf
mean_data2 = mu_sf
upper_var1 = mu_rf + sigma_rf
upper_var2 = mu_sf + sigma_sf
lower_var1 = mu_rf - sigma_rf
lower_var2 = mu_sf - sigma_sf
short_xaxis = np.linspace(0,1.2,10)
#print reconstructed_data2
#print mean_data2
mpu.figure(1)
pp.title('True Prediction Example: Rigid-Fixed Category',fontsize='24')
pp.xlabel('Time (s)',fontsize='24')
pp.ylabel('Maximum Force (N)',fontsize='24')
pp.plot(actual_time, actual_data1, actual_time, reconstructed_data1, linewidth=3.0)
pp.plot(short_xaxis, mean_data1, short_xaxis, upper_var1, short_xaxis, lower_var1, linewidth=3.0)
pp.legend(["Actual_Force", "Reconstructed Force using HMM", "Mean", "Upper Bound", "Lower Bound"], loc=2)
pp.grid('True')
mpu.figure(2)
pp.title('True Prediction Example: Soft-Fixed Category',fontsize='24')
pp.xlabel('Time (s)',fontsize='24')
pp.ylabel('Maximum Force (N)',fontsize='24')
pp.plot(actual_time, actual_data2, actual_time, reconstructed_data2, linewidth=3.0)
pp.plot(short_xaxis, mean_data2, short_xaxis, upper_var2, short_xaxis, lower_var2, linewidth=3.0)
pp.legend(["Actual_Force", "Reconstructed Force using HMM", "Mean", "Upper Bound", "Lower Bound"], loc=2)
pp.grid('True')
pp.show()
|
mit
|
mfouesneau/ezdata
|
ezdata/dask/hdf5.py
|
1
|
14660
|
""" Wrapper of h5py to dask.dataframe
This package allows one to load HDF5 table into a dask.dataframe regardless of
the structure of it.
"""
from __future__ import absolute_import, division, print_function
from math import ceil
from glob import glob
from collections import OrderedDict
import os
import numpy as np
import pandas as pd
import h5py
from dask.base import tokenize
import dask.dataframe as dd
try:
from tqdm import tqdm
except:
# if not present still works
tqdm = lambda x: x
def _get_columns(grp, key):
""" Get data columns regardless of wether grp or grp/data is the array"""
if isinstance(grp[key], h5py.Group):
return grp[key + '/data']
return grp[key]
def _get_group_info(path, grouppath, keys):
""" Get metadata about a group in the given file
Parameters
----------
path: str
path to hdf5 file
grouppath: str
which group
keys: seq(str)
which columns to read
Returns
-------
nrows: int
number of data entries
keys:
meta:
categoricals:
"""
with h5py.File(path, "r") as input_file:
grp = input_file[grouppath]
if keys is None:
keys = list(grp.keys())
categoricals = {}
for key in keys:
dtype_ = h5py.check_dtype(enum=_get_columns(grp, key).dtype)
if dtype_ is not None:
categoricals[key] = sorted(dtype_, key=dtype_.__getitem__)
# Meta is an empty dataframe that serves as a compound "dtype"
meta = pd.DataFrame(
{key: np.array([], dtype=_get_columns(grp, key).dtype)
for key in keys},
columns=keys)
for key in categoricals:
meta[key] = pd.Categorical([], categories=categoricals[key],
ordered=True)
nrows = len(_get_columns(grp, keys[0]))
return nrows, keys, meta, categoricals
def _slice_dataset(filepath, grouppath, key, slc, lock=None):
""" Get a slice of the dataset """
try:
if lock is not None:
lock.acquire()
with h5py.File(filepath, "r") as input_file:
return _get_columns(input_file[grouppath], key)[slc]
finally:
if lock is not None:
lock.release()
def _slice_group(filepath, grouppath, keys, slc, lock=None):
""" Get a slice of a given group """
try:
if lock is not None:
lock.acquire()
with h5py.File(filepath, "r") as input_file:
return {key: _get_columns(input_file[grouppath], key)[slc]
for key in keys}
finally:
if lock is not None:
lock.release()
def _restore_categories(data, categorical_columns):
""" Restore categories the data """
for key, category_dict in categorical_columns.items():
data[key] = pd.Categorical.from_codes(data[key], category_dict,
ordered=True)
return data
class _H5Collector:
"""
Extract shapes and dtypes of all array objects in a give hdf5 file
It does so recursively and only reports array objects
Allows one to make statistics and checks, which are necessary to
concatenate datasets.
Properties
----------
names: dict
field names and shapes
dtypes: dict
contains the dtype of the registered names
"""
def __init__(self):
""" Constructor """
# Store the columns and shapes in order
self.names = OrderedDict()
self.dtypes = {}
def __repr__(self):
""" Representation """
max_key_length = max([len(k) for k in self.names.keys()])
fmt = ('{key:>' + str(max_key_length) + 's}: {dtype:10s} {shape}')
text = [fmt.format(key=key, shape=shape, dtype=str(self.dtypes[key]))
for key, shape in self.names.items()]
return '\n'.join(text)
def __call__(self, name, h5obj):
""" apply the collector to a new object.
This method is called by `h5py.File.visititems`
within `_H5Collector.add`
"""
# only h5py datasets have dtype attribute, so we can search on this
if hasattr(h5obj, 'dtype') and hasattr(h5obj, 'shape'):
if name not in self.dtypes:
self.dtypes[name] = h5obj.dtype
elif (self.dtypes[name].char == 'S') & (h5obj.dtype.char == 'S'):
# String length updates
dt_size = max(1, max(self.dtypes[name].itemsize, h5obj.dtype.itemsize))
self.dtypes[name] = np.dtype('S' + str(dt_size))
elif self.dtypes[name] != h5obj.dtype:
raise RuntimeError('Type mismatch in {0:s}'.format(name))
try:
shape_x, shape_y = h5obj.shape
shape = self.names.get(name, (0, shape_y))
if shape_y != shape[1]:
raise RuntimeError('Shape mismatch in {0:s}'.format(name))
self.names[name] = shape[0] + shape_x, shape_y
except ValueError:
shape_x, = h5obj.shape
shape, = self.names.get(name, (0,))
self.names[name] = (shape + shape_x, )
def add(self, filename):
""" Add filename to the collection
Parameters
----------
filename : str
file to add to the collection
Returns
-------
self: _H5Collector
itself
"""
with h5py.File(filename, 'r') as datafile:
datafile.visititems(self)
return self
def _ignore_multidimensional_keys(filename, grouppath=None):
""" Check keys to make sure not multi-dimensional arrays are provided """
hls = _H5Collector()
if grouppath is not None:
with h5py.File(filename, 'r') as datafile:
datafile[grouppath].visititems(hls)
else:
hls.add(filename)
keys = [name.replace('/data', '')
for (name, shape) in hls.names.items() if len(shape) < 2]
return keys
def read_table(filepath, grouppath='/', keys=None, chunksize=int(10e6),
index=None, lock=None, ignore_nd_data=True):
"""
Create a dask dataframe around a column-oriented table in HDF5.
A table is a group containing equal-length 1D datasets.
Parameters
----------
filepath: str, seq(str)
path to the filename or pattern to the tables to open at once.
This may be also a sequence of files that will be concatenated.
grouppath : str
tree path to the HDF5 group storing the table.
keys : list, optional
list of HDF5 Dataset keys, default is to use all keys in the group
chunksize : int, optional
Chunk size
index : str, optional
Sorted column to use as index
lock : multiprocessing.Lock, optional
Lock to serialize HDF5 read/write access. Default is no lock.
ignore_nd_data: bool, optional
Set to safely ignore keys of multidimensional data arrays
Note that dask/pandas DataFrame do not support multidimensional data
Returns
-------
:py:class:`dask.dataframe.DataFrame`
Notes
-----
Learn more about the `dask <https://docs.dask.org/en/latest/>`_ project.
"""
# handle pattern input
try:
glob_ = glob(filepath)
except TypeError:
glob_ = filepath
if len(glob_) > 1:
dfs = [read_table(name_k, grouppath=grouppath, keys=keys,
chunksize=chunksize, index=index, lock=lock)
for name_k in glob_]
return dd.concat(dfs, interleave_partitions=True)
else:
filepath = glob_[0]
if not os.path.exists(filepath):
raise FileNotFoundError(filepath + ' does not seem to exist.')
if ignore_nd_data:
keys_1d = _ignore_multidimensional_keys(filepath, grouppath)
if keys is None:
keys = keys_1d
else:
keys = [key for key in keys if key in keys_1d]
nrows, keys, meta, categoricals = _get_group_info(filepath,
grouppath,
keys)
# Make a unique task name
token = tokenize(filepath, grouppath, chunksize, keys)
task_name = "daskify-h5py-table-" + token
# Partition the table
divisions = (0,) + tuple(range(-1, nrows, chunksize))[1:]
if divisions[-1] != nrows - 1:
divisions = divisions + (nrows - 1,)
# Build the task graph
dsk = {}
for i in range(0, int(ceil(nrows / chunksize))):
slc = slice(i * chunksize, (i + 1) * chunksize)
data_dict = (_slice_group, filepath, grouppath, keys, slc, lock)
if categoricals:
data_dict = (_restore_categories, data_dict, categoricals)
dsk[task_name, i] = (pd.DataFrame, data_dict, None, meta.columns)
# Generate ddf from dask graph
_df = dd.DataFrame(dsk, task_name, meta, divisions)
if index is not None:
_df = _df.set_index(index, sorted=True, drop=False)
return _df
def read_vaex_table(filepath, grouppath='/table/columns',
keys=None, chunksize=int(10e6),
index=None, lock=None, ignore_nd_data=True):
"""
Shortcut to :py:func:`read_table`
where the default grouppath is set to Vaex format.
Returns
-------
:py:class:`dask.dataframe.DataFrame`
"""
return read_table(filepath, grouppath=grouppath, keys=keys,
chunksize=chunksize, index=index, lock=lock)
def concatenate(*args, **kwargs):
""" Concatenate multiple HDF5 files with the same structure
This routine is the most flexible I could make. It takes any datashapes
(contrary to vaex, pandas, dask etc) and copies the data into to final
output file.
Parameters
----------
args: seq(str)
filenames to concatenate
pattern: str, optional
pattern of files to concatenate
outputfile: str, optional
filename of the output file containing the data
verbose: bool, optional
set to display information
returns
-------
outputfile: str
the filename of the result
"""
pattern = kwargs.get('pattern', None)
if (pattern is None) and (not args):
raise RuntimeError('Must provide either a pattern or a list of files')
if not args:
args = glob(pattern)
output = kwargs.get('outputfile', None)
if output is None:
output = '.'.join(args[0].split('.')[:-1]) + '_concat.hdf5'
verbose = kwargs.get('verbose', False)
def info(*args, **kwargs):
if verbose:
print(*args, **kwargs)
info('Collecting information from {0:d} files'.format(len(args)))
hls = _H5Collector()
for fname in tqdm(args):
hls.add(fname)
with h5py.File(output, 'w') as outputfile:
# creating the final file with all empty structure
info('Creating {0:s} with empty structure'.format(output))
for name, shape in hls.names.items():
group_name = name.split('/')
group_ = '/'.join(group_name[:-1])
name_ = group_name[-1]
dtype = hls.dtypes[name]
outputfile.create_group(group_)\
.create_dataset(name_, shape=shape, dtype=dtype)
# copy the data over
index = 0
info('Copying data')
for iternum, fname in enumerate(tqdm(args), 1):
with h5py.File(fname, 'r') as fin:
keys = list(hls.names.keys())
length = len(fin[keys[0]])
for name in hls.names.keys():
data = fin[name]
length = len(data)
outputfile[name][index: length + index] = data[:]
index += length
info('... [{0:d} / {1:d}] - done with {2:s}'.format(iternum, len(args), fname))
return output
def to_vaex_file(ds, output, grouppath='/table/columns',
keys=None, **kwargs):
"""
export a dask dataframe into a vaex formatted hdf5 file.
Parameters
----------
ds: dask.DataFrame
data to export
output: str
filename of the exported table
grouppath: str
vaex default path to the dataset
keys: sequence(str)
subset of columns to export (default all)
verbose: bool
set to have information messages
"""
verbose = kwargs.get('verbose', False)
def info(*args, **kwargs):
if verbose:
print(*args, **kwargs)
dtypes = ds.dtypes.to_dict()
if keys is not None:
dtypes = dict(((name, dtypes[name]) for name in keys))
for name, dtype in dtypes.items():
if 'numpy.object_' in str(dtype.type):
# we have a pandas string that does not work well with h5py
maxlen = ds[name].dropna().str.len().max().compute().astype(int)
col_type = np.dtype('{0:s}{1:d}'.format('S', maxlen))
dtypes[name] = col_type
info('Object type conversion: "{0:s}" as "{1:s}"'.format(
name, str(col_type)))
length = ds.shape[0].compute()
def construct_vaex_path(name):
path = '{grouppath:s}/{name:s}/data'
return path.format(name=name, grouppath=grouppath)
with h5py.File(output, 'w') as outputfile:
# creating the final file with all empty structure
info('Creating {0:s} with empty structure'.format(output))
for name, dtype in dtypes.items():
group_name = construct_vaex_path(name).split('/')
group_ = '/'.join(group_name[:-1])
name_ = group_name[-1]
try:
# Pandas datatypes are not compatible with h5py
dtype_ = dtype.numpy_dtype
except AttributeError:
dtype_ = dtype
outputfile.create_group(group_)\
.create_dataset(name_, shape=(length,), dtype=dtype_)
# copy the data over
index = 0
info('Copying data')
names = dtypes.keys()
for part_i in range(ds.npartitions):
df = ds.get_partition(part_i).compute()
df_size = df.shape[0]
for name in names:
vaex_name = construct_vaex_path(name)
data = df[name].values.astype(dtypes[name])
outputfile[vaex_name][index: df_size + index] = data[:]
index += df_size
info('... [{0:d} / {1:d}] partition done'.format(part_i + 1,
ds.npartitions))
return output
|
mit
|
jjx02230808/project0223
|
examples/ensemble/plot_partial_dependence.py
|
8
|
4455
|
"""
========================
Partial Dependence Plots
========================
Partial dependence plots show the dependence between the target function [1]_
and a set of 'target' features, marginalizing over the
values of all other features (the complement features). Due to the limits
of human perception the size of the target feature set must be small (usually,
one or two) thus the target features are usually chosen among the most
important features
(see :attr:`~sklearn.ensemble.GradientBoostingRegressor.feature_importances_`).
This example shows how to obtain partial dependence plots from a
:class:`~sklearn.ensemble.GradientBoostingRegressor` trained on the California
housing dataset. The example is taken from [HTF2009]_.
The plot shows four one-way and one two-way partial dependence plots.
The target variables for the one-way PDP are:
median income (`MedInc`), avg. occupants per household (`AvgOccup`),
median house age (`HouseAge`), and avg. rooms per household (`AveRooms`).
We can clearly see that the median house price shows a linear relationship
with the median income (top left) and that the house price drops when the
avg. occupants per household increases (top middle).
The top right plot shows that the house age in a district does not have
a strong influence on the (median) house price; so does the average rooms
per household.
The tick marks on the x-axis represent the deciles of the feature values
in the training data.
Partial dependence plots with two target features enable us to visualize
interactions among them. The two-way partial dependence plot shows the
dependence of median house price on joint values of house age and avg.
occupants per household. We can clearly see an interaction between the
two features:
For an avg. occupancy greater than two, the house price is nearly independent
of the house age, whereas for values less than two there is a strong dependence
on age.
.. [HTF2009] T. Hastie, R. Tibshirani and J. Friedman,
"Elements of Statistical Learning Ed. 2", Springer, 2009.
.. [1] For classification you can think of it as the regression score before
the link function.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.model_selection import train_test_split
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble.partial_dependence import plot_partial_dependence
from sklearn.ensemble.partial_dependence import partial_dependence
from sklearn.datasets.california_housing import fetch_california_housing
# fetch California housing dataset
cal_housing = fetch_california_housing()
# split 80/20 train-test
X_train, X_test, y_train, y_test = train_test_split(cal_housing.data,
cal_housing.target,
test_size=0.2,
random_state=1)
names = cal_housing.feature_names
print('_' * 80)
print("Training GBRT...")
clf = GradientBoostingRegressor(n_estimators=100, max_depth=4,
learning_rate=0.1, loss='huber',
random_state=1)
clf.fit(X_train, y_train)
print("done.")
print('_' * 80)
print('Convenience plot with ``partial_dependence_plots``')
print
features = [0, 5, 1, 2, (5, 1)]
fig, axs = plot_partial_dependence(clf, X_train, features, feature_names=names,
n_jobs=3, grid_resolution=50)
fig.suptitle('Partial dependence of house value on nonlocation features\n'
'for the California housing dataset')
plt.subplots_adjust(top=0.9) # tight_layout causes overlap with suptitle
print('_' * 80)
print('Custom 3d plot via ``partial_dependence``')
print
fig = plt.figure()
target_feature = (1, 5)
pdp, (x_axis, y_axis) = partial_dependence(clf, target_feature,
X=X_train, grid_resolution=50)
XX, YY = np.meshgrid(x_axis, y_axis)
Z = pdp.T.reshape(XX.shape).T
ax = Axes3D(fig)
surf = ax.plot_surface(XX, YY, Z, rstride=1, cstride=1, cmap=plt.cm.BuPu)
ax.set_xlabel(names[target_feature[0]])
ax.set_ylabel(names[target_feature[1]])
ax.set_zlabel('Partial dependence')
# pretty init view
ax.view_init(elev=22, azim=122)
plt.colorbar(surf)
plt.suptitle('Partial dependence of house value on median age and '
'average occupancy')
plt.subplots_adjust(top=0.9)
plt.show()
|
bsd-3-clause
|
zrhans/python
|
exemplos/Examples.lnk/bokeh/charts/donut.py
|
1
|
1091
|
import pandas as pd
import numpy as np
from collections import OrderedDict
from bokeh.sampledata.olympics2014 import data
from bokeh.charts import Donut
from bokeh.plotting import show, output_file
# we throw the data into a pandas df
df = pd.io.json.json_normalize(data['data'])
# filter by countries with at least one medal and sort
df = df[df['medals.total'] > 8]
df = df.sort("medals.total", ascending=False)
# then, we get the countries and we group the data by medal type
countries = df.abbr.values.tolist()
gold = df['medals.gold'].astype(float).values
silver = df['medals.silver'].astype(float).values
bronze = df['medals.bronze'].astype(float).values
# later, we build a dict containing the grouped data
medals = OrderedDict()
medals['bronze'] = bronze
medals['silver'] = silver
medals['gold'] = gold
# any of the following commented are valid Donut inputs
# medals = list(medals.values())
# medals = np.array(list(medals.values()))
# medals = pd.DataFrame(medals)
output_file("donut.html")
donut = Donut(medals, countries, filename="donut.html")
show(donut) # or donut.show()
|
gpl-2.0
|
dsullivan7/scikit-learn
|
benchmarks/bench_glmnet.py
|
297
|
3848
|
"""
To run this, you'll need to have installed.
* glmnet-python
* scikit-learn (of course)
Does two benchmarks
First, we fix a training set and increase the number of
samples. Then we plot the computation time as function of
the number of samples.
In the second benchmark, we increase the number of dimensions of the
training set. Then we plot the computation time as function of
the number of dimensions.
In both cases, only 10% of the features are informative.
"""
import numpy as np
import gc
from time import time
from sklearn.datasets.samples_generator import make_regression
alpha = 0.1
# alpha = 0.01
def rmse(a, b):
return np.sqrt(np.mean((a - b) ** 2))
def bench(factory, X, Y, X_test, Y_test, ref_coef):
gc.collect()
# start time
tstart = time()
clf = factory(alpha=alpha).fit(X, Y)
delta = (time() - tstart)
# stop time
print("duration: %0.3fs" % delta)
print("rmse: %f" % rmse(Y_test, clf.predict(X_test)))
print("mean coef abs diff: %f" % abs(ref_coef - clf.coef_.ravel()).mean())
return delta
if __name__ == '__main__':
from glmnet.elastic_net import Lasso as GlmnetLasso
from sklearn.linear_model import Lasso as ScikitLasso
# Delayed import of pylab
import pylab as pl
scikit_results = []
glmnet_results = []
n = 20
step = 500
n_features = 1000
n_informative = n_features / 10
n_test_samples = 1000
for i in range(1, n + 1):
print('==================')
print('Iteration %s of %s' % (i, n))
print('==================')
X, Y, coef_ = make_regression(
n_samples=(i * step) + n_test_samples, n_features=n_features,
noise=0.1, n_informative=n_informative, coef=True)
X_test = X[-n_test_samples:]
Y_test = Y[-n_test_samples:]
X = X[:(i * step)]
Y = Y[:(i * step)]
print("benchmarking scikit-learn: ")
scikit_results.append(bench(ScikitLasso, X, Y, X_test, Y_test, coef_))
print("benchmarking glmnet: ")
glmnet_results.append(bench(GlmnetLasso, X, Y, X_test, Y_test, coef_))
pl.clf()
xx = range(0, n * step, step)
pl.title('Lasso regression on sample dataset (%d features)' % n_features)
pl.plot(xx, scikit_results, 'b-', label='scikit-learn')
pl.plot(xx, glmnet_results, 'r-', label='glmnet')
pl.legend()
pl.xlabel('number of samples to classify')
pl.ylabel('Time (s)')
pl.show()
# now do a benchmark where the number of points is fixed
# and the variable is the number of features
scikit_results = []
glmnet_results = []
n = 20
step = 100
n_samples = 500
for i in range(1, n + 1):
print('==================')
print('Iteration %02d of %02d' % (i, n))
print('==================')
n_features = i * step
n_informative = n_features / 10
X, Y, coef_ = make_regression(
n_samples=(i * step) + n_test_samples, n_features=n_features,
noise=0.1, n_informative=n_informative, coef=True)
X_test = X[-n_test_samples:]
Y_test = Y[-n_test_samples:]
X = X[:n_samples]
Y = Y[:n_samples]
print("benchmarking scikit-learn: ")
scikit_results.append(bench(ScikitLasso, X, Y, X_test, Y_test, coef_))
print("benchmarking glmnet: ")
glmnet_results.append(bench(GlmnetLasso, X, Y, X_test, Y_test, coef_))
xx = np.arange(100, 100 + n * step, step)
pl.figure('scikit-learn vs. glmnet benchmark results')
pl.title('Regression in high dimensional spaces (%d samples)' % n_samples)
pl.plot(xx, scikit_results, 'b-', label='scikit-learn')
pl.plot(xx, glmnet_results, 'r-', label='glmnet')
pl.legend()
pl.xlabel('number of features')
pl.ylabel('Time (s)')
pl.axis('tight')
pl.show()
|
bsd-3-clause
|
ZellMechanik-Dresden/dclab
|
examples/ml_builtin.py
|
1
|
4951
|
"""ML: Creating built-in models for dclab
The :ref:`tensorflow example <example_ml_tensorflow>` already
showcased a few convenience functions for machine learning
implemented in dclab. In this example, we want to go even
further and transform the predictions of an ML model into
an :ref:`ancillary feature <sec_features_ancillary>`
(which is then globally available in dclab).
A few things are different from the other example:
- We rename ``model`` to ``bare_model`` to make a clear
distinction between the actual ML model (from tensorflow)
and the model wrapper (see :ref:`sec_av_ml_models`).
- We turn the two-class problem into a regression problem
for one feature only. Consequently, the loss function changes
to "binary crossentropy" and for some inexplicable reason
we have to train for 20 epochs instead of the previously 5
to achieve convergence in accuracy.
- Finally, and this is the whole point of this example, we
register the model as an ancillary feature and perform
inference indirectly by simply accessing the
``ml_score_cel`` feature of the test dataset.
The plot shows the test fraction of the dataset. The x-axis is
(arbitrarily) set to area. The y-axis shows the sigmoid (dclab
automatically applies a sigmoid activation if it is not present
in the final layer; see :func:`dclab.ml.models.TensorflowModel.predict`)
of the model's output `logits
<https://developers.google.com/machine-learning/glossary/#logits>`_.
"""
import matplotlib.pylab as plt
import numpy as np
import tensorflow as tf
import dclab.ml
tf.random.set_seed(42) # for reproducibility
# https://dcor.mpl.mpg.de/dataset/figshare-7771184-v2
dcor_ids = ["fb719fb2-bd9f-817a-7d70-f4002af916f0",
"f7fa778f-6abd-1b53-ae5f-9ce12601d6f8"]
labels = [0, 1] # 0: beads, 1: cells
features = ["area_ratio", "area_um", "bright_sd", "deform"]
tf_kw = {"dc_data": dcor_ids,
"split": .8,
"shuffle": True,
}
# obtain train and test datasets
train, test = dclab.ml.tf_dataset.assemble_tf_dataset_scalars(
labels=labels, feature_inputs=features, **tf_kw)
# build the model
bare_model = tf.keras.Sequential(
layers=[
tf.keras.layers.Input(shape=(len(features),)),
tf.keras.layers.Dense(128),
tf.keras.layers.Dense(32),
tf.keras.layers.Dropout(0.3),
tf.keras.layers.Dense(1)
],
name="scalar_features"
)
# fit the model to the training data
# Note that we did not add a "sigmoid" activation function to the
# final layer and are training with logits here. We also don't
# have to manually add it in a later step, because dclab will
# add it automatically (if it does not exist) before prediction.
loss_fn = tf.keras.losses.BinaryCrossentropy(from_logits=True)
bare_model.compile(optimizer='adam', loss=loss_fn, metrics=['accuracy'])
bare_model.fit(train, epochs=20)
# show accuracy using test data (loss: 0.0725 - accuracy: 0.9877)
bare_model.evaluate(test, verbose=2)
# register the ancillary feature "ml_score_cel" in dclab
dc_model = dclab.ml.models.TensorflowModel(
bare_model=bare_model,
inputs=features,
outputs=["ml_score_cel"],
output_labels=["Probability of having a cell"],
model_name="Distinguish between cells and beads",
)
dc_model.register()
# Now we are actually done already. The only thing left to do is to
# visualize the prediction for the test-fraction of our dataset.
# This involves a bit of data shuffling (obtaining the dataset indices
# from the "index" feature (which starts at 1 and not 0) and creating
# hierarchy children after applying the corresponding manual filters)
# which is less complicated than it looks.
# create dataset hierarchy children for bead and cell test data
bead_train_indices = dclab.ml.tf_dataset.get_dataset_event_feature(
feature="index", dc_data_indices=[0], split_index=0, **tf_kw)
ds_bead = dclab.new_dataset(dcor_ids[0])
ds_bead.filter.manual[np.array(bead_train_indices) - 1] = False
ds_bead.apply_filter()
ds_bead_test = dclab.new_dataset(ds_bead) # hierarchy child with test fraction
cell_train_indices = dclab.ml.tf_dataset.get_dataset_event_feature(
feature="index", dc_data_indices=[1], split_index=0, **tf_kw)
ds_cell = dclab.new_dataset(dcor_ids[1])
ds_cell.filter.manual[np.array(cell_train_indices) - 1] = False
ds_cell.apply_filter()
ds_cell_test = dclab.new_dataset(ds_cell) # hierarchy child with test fraction
fig = plt.figure(figsize=(8, 7))
ax = plt.subplot(111)
plt.plot(ds_bead_test["area_um"], ds_bead_test["ml_score_cel"], ".",
ms=10, alpha=.5, label="test data: beads")
plt.plot(ds_cell_test["area_um"], ds_cell_test["ml_score_cel"], ".",
ms=10, alpha=.5, label="test data: cells")
leg = plt.legend()
for lh in leg.legendHandles:
lh._legmarker.set_alpha(1)
ax.set_xlabel(dclab.dfn.get_feature_label("area_um"))
ax.set_ylabel(dclab.dfn.get_feature_label("ml_score_cel"))
ax.set_xlim(0, 130)
plt.tight_layout()
plt.show()
|
gpl-2.0
|
mmottahedi/neuralnilm_prototype
|
scripts/e176.py
|
2
|
5995
|
from __future__ import print_function, division
import matplotlib
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import Net, RealApplianceSource, BLSTMLayer, DimshuffleLayer
from lasagne.nonlinearities import sigmoid, rectify
from lasagne.objectives import crossentropy, mse
from lasagne.init import Uniform, Normal
from lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer, FeaturePoolLayer
from neuralnilm.updates import nesterov_momentum
from functools import partial
import os
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment
from neuralnilm.net import TrainingError
import __main__
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
SAVE_PLOT_INTERVAL = 250
GRADIENT_STEPS = 100
"""
e103
Discovered that bottom layer is hardly changing. So will try
just a single lstm layer
e104
standard init
lower learning rate
e106
lower learning rate to 0.001
e108
is e107 but with batch size of 5
e109
Normal(1) for BLSTM
e110
* Back to Uniform(5) for BLSTM
* Using nntools eb17bd923ef9ff2cacde2e92d7323b4e51bb5f1f
RESULTS: Seems to run fine again!
e111
* Try with nntools head
* peepholes=False
RESULTS: appears to be working well. Haven't seen a NaN,
even with training rate of 0.1
e112
* n_seq_per_batch = 50
e114
* Trying looking at layer by layer training again.
* Start with single BLSTM layer
e115
* Learning rate = 1
e116
* Standard inits
e117
* Uniform(1) init
e119
* Learning rate 10
# Result: didn't work well!
e120
* init: Normal(1)
* not as good as Uniform(5)
e121
* Uniform(25)
e122
* Just 10 cells
* Uniform(5)
e125
* Pre-train lower layers
e128
* Add back all 5 appliances
* Seq length 1500
* skip_prob = 0.7
e129
* max_input_power = None
* 2nd layer has Uniform(5)
* pre-train bottom layer for 2000 epochs
* add third layer at 4000 epochs
e131
e138
* Trying to replicate e82 and then break it ;)
e140
diff
e141
conv1D layer has Uniform(1), as does 2nd BLSTM layer
e142
diff AND power
e144
diff and power and max power is 5900
e145
Uniform(25) for first layer
e146
gradient clip and use peepholes
e147
* try again with new code
e148
* learning rate 0.1
e150
* Same as e149 but without peepholes and using BLSTM not BBLSTM
e151
* Max pooling
171
lower learning rate
172
even lower learning rate
173
slightly higher learning rate!
175
same as 174 but with skip prob = 0, and LSTM not BLSTM, and only 4000 epochs
176
new cost function
"""
# def scaled_cost(x, t):
# raw_cost = (x - t) ** 2
# energy_per_seq = t.sum(axis=1)
# energy_per_batch = energy_per_seq.sum(axis=1)
# energy_per_batch = energy_per_batch.reshape((-1, 1))
# normaliser = energy_per_seq / energy_per_batch
# cost = raw_cost.mean(axis=1) * (1 - normaliser)
# return cost.mean()
THRESHOLD = 0
def scaled_cost(x, t):
sq_error = (x - t) ** 2
above_thresh_sq_error = sq_error[(t > THRESHOLD).nonzero()]
below_thresh_sq_error = sq_error[(t <= THRESHOLD).nonzero()]
return (above_thresh_sq_error.mean() + below_thresh_sq_error.mean()) / 2.0
def exp_a(name):
# LR of 0.1 didn't NaN but didn't learn well.
source = RealApplianceSource(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television',
'dish washer',
['washer dryer', 'washing machine']
],
max_appliance_powers=None,#[200, 100, 200, 2500, 2400],
on_power_thresholds=[5, 5, 5, 5, 5],
max_input_power=5900,
min_on_durations=[60, 60, 60, 1800, 1800],
min_off_durations=[12, 12, 12, 1800, 600],
window=("2013-06-01", "2014-07-01"),
seq_length=1500,
output_one_appliance=False,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
skip_probability=0,
n_seq_per_batch=25,
include_diff=True
)
net = Net(
experiment_name=name,
source=source,
save_plot_interval=250,
loss_function=scaled_cost,
updates=partial(nesterov_momentum, learning_rate=.00001, clip_range=(-1, 1)),
layers_config=[
{
'type': LSTMLayer,
'num_units': 50,
'W_in_to_cell': Uniform(25),
'gradient_steps': GRADIENT_STEPS,
'peepholes': False
},
{
'type': LSTMLayer,
'num_units': 50,
'W_in_to_cell': Uniform(1),
'gradient_steps': GRADIENT_STEPS,
'peepholes': False
},
{
'type': DenseLayer,
'num_units': 50,
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None,
'W': Uniform(25)
}
]
)
return net
def init_experiment(experiment):
full_exp_name = NAME + experiment
func_call = 'exp_{:s}(full_exp_name)'.format(experiment)
print("***********************************")
print("Preparing", full_exp_name, "...")
net = eval(func_call)
return net
def main():
for experiment in list('a'):
full_exp_name = NAME + experiment
path = os.path.join(PATH, full_exp_name)
try:
net = init_experiment(experiment)
run_experiment(net, path, epochs=None)
except KeyboardInterrupt:
break
except TrainingError as exception:
print("EXCEPTION:", exception)
except Exception as exception:
raise
print("EXCEPTION:", exception)
import ipdb; ipdb.set_trace()
if __name__ == "__main__":
main()
|
mit
|
dr-nate/msmbuilder
|
msmbuilder/tests/test_kernel_approximation.py
|
9
|
1158
|
from __future__ import absolute_import
import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.kernel_approximation import Nystroem as NystroemR
from msmbuilder.decomposition.kernel_approximation import Nystroem, LandmarkNystroem
def test_nystroem_vs_sklearn():
np.random.seed(42)
X = np.random.randn(100, 5)
kernel = Nystroem(kernel='linear', random_state=42)
kernelR = NystroemR(kernel='linear', random_state=42)
y1 = kernel.fit_transform([X])[0]
y2 = kernelR.fit_transform(X)
assert_array_almost_equal(y1, y2)
def test_lndmrk_nystroem_approximation():
np.random.seed(42)
X = np.random.randn(100, 5)
u = np.arange(X.shape[0])[5::1]
v = np.arange(X.shape[0])[::1][:u.shape[0]]
lndmrks = X[np.unique((u, v))]
kernel = LandmarkNystroem(kernel='rbf', random_state=42)
kernelR = NystroemR(kernel='rbf', random_state=42)
y1_1 = kernel.fit_transform([X])[0]
kernel.landmarks = lndmrks
y1_2 = kernel.fit_transform([X])[0]
y2 = kernelR.fit_transform(X)
assert_array_almost_equal(y2, y1_1)
assert not all((np.abs(y2 - y1_2) > 1E-6).flatten())
|
lgpl-2.1
|
shyamalschandra/scikit-learn
|
examples/neighbors/plot_nearest_centroid.py
|
22
|
1803
|
"""
===============================
Nearest Centroid Classification
===============================
Sample usage of Nearest Centroid classification.
It will plot the decision boundaries for each class.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn import datasets
from sklearn.neighbors import NearestCentroid
n_neighbors = 15
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# Create color maps
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
for shrinkage in [None, .2]:
# we create an instance of Neighbours Classifier and fit the data.
clf = NearestCentroid(shrink_threshold=shrinkage)
clf.fit(X, y)
y_pred = clf.predict(X)
print(shrinkage, np.mean(y == y_pred))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
plt.title("3-Class classification (shrink_threshold=%r)"
% shrinkage)
plt.axis('tight')
plt.show()
|
bsd-3-clause
|
nagyistoce/kaggle-galaxies
|
try_convnet_cc_multirotflip_3x69r45_maxout2048_pysex.py
|
7
|
17528
|
import numpy as np
# import pandas as pd
import theano
import theano.tensor as T
import layers
import cc_layers
import custom
import load_data
import realtime_augmentation as ra
import time
import csv
import os
import cPickle as pickle
from datetime import datetime, timedelta
# import matplotlib.pyplot as plt
# plt.ion()
# import utils
BATCH_SIZE = 16
NUM_INPUT_FEATURES = 3
LEARNING_RATE_SCHEDULE = {
0: 0.04,
1800: 0.004,
2300: 0.0004,
}
MOMENTUM = 0.9
WEIGHT_DECAY = 0.0
CHUNK_SIZE = 10000 # 30000 # this should be a multiple of the batch size, ideally.
NUM_CHUNKS = 2500 # 3000 # 1500 # 600 # 600 # 600 # 500
VALIDATE_EVERY = 20 # 12 # 6 # 6 # 6 # 5 # validate only every 5 chunks. MUST BE A DIVISOR OF NUM_CHUNKS!!!
# else computing the analysis data does not work correctly, since it assumes that the validation set is still loaded.
NUM_CHUNKS_NONORM = 1 # train without normalisation for this many chunks, to get the weights in the right 'zone'.
# this should be only a few, just 1 hopefully suffices.
GEN_BUFFER_SIZE = 1
# # need to load the full training data anyway to extract the validation set from it.
# # alternatively we could create separate validation set files.
# DATA_TRAIN_PATH = "data/images_train_color_cropped33_singletf.npy.gz"
# DATA2_TRAIN_PATH = "data/images_train_color_8x_singletf.npy.gz"
# DATA_VALIDONLY_PATH = "data/images_validonly_color_cropped33_singletf.npy.gz"
# DATA2_VALIDONLY_PATH = "data/images_validonly_color_8x_singletf.npy.gz"
# DATA_TEST_PATH = "data/images_test_color_cropped33_singletf.npy.gz"
# DATA2_TEST_PATH = "data/images_test_color_8x_singletf.npy.gz"
TARGET_PATH = "predictions/final/try_convnet_cc_multirotflip_3x69r45_maxout2048_pysex.csv"
ANALYSIS_PATH = "analysis/final/try_convnet_cc_multirotflip_3x69r45_maxout2048_pysex.pkl"
# FEATURES_PATTERN = "features/try_convnet_chunked_ra_b3sched.%s.npy"
print "Set up data loading"
# TODO: adapt this so it loads the validation data from JPEGs and does the processing realtime
input_sizes = [(69, 69), (69, 69)]
ds_transforms = [
ra.build_ds_transform(3.0, target_size=input_sizes[0]),
ra.build_ds_transform(3.0, target_size=input_sizes[1]) + ra.build_augmentation_transform(rotation=45)
]
num_input_representations = len(ds_transforms)
augmentation_params = {
'zoom_range': (1.0 / 1.3, 1.3),
'rotation_range': (0, 360),
'shear_range': (0, 0),
'translation_range': (-4, 4),
'do_flip': True,
}
augmented_data_gen = ra.realtime_augmented_data_gen(num_chunks=NUM_CHUNKS, chunk_size=CHUNK_SIZE,
augmentation_params=augmentation_params, ds_transforms=ds_transforms,
target_sizes=input_sizes, processor_class=ra.LoadAndProcessPysexCenteringRescaling)
post_augmented_data_gen = ra.post_augment_brightness_gen(augmented_data_gen, std=0.5)
train_gen = load_data.buffered_gen_mp(post_augmented_data_gen, buffer_size=GEN_BUFFER_SIZE)
y_train = np.load("data/solutions_train.npy")
train_ids = load_data.train_ids
test_ids = load_data.test_ids
# split training data into training + a small validation set
num_train = len(train_ids)
num_test = len(test_ids)
num_valid = num_train // 10 # integer division
num_train -= num_valid
y_valid = y_train[num_train:]
y_train = y_train[:num_train]
valid_ids = train_ids[num_train:]
train_ids = train_ids[:num_train]
train_indices = np.arange(num_train)
valid_indices = np.arange(num_train, num_train + num_valid)
test_indices = np.arange(num_test)
def create_train_gen():
"""
this generates the training data in order, for postprocessing. Do not use this for actual training.
"""
data_gen_train = ra.realtime_fixed_augmented_data_gen(train_indices, 'train',
ds_transforms=ds_transforms, chunk_size=CHUNK_SIZE, target_sizes=input_sizes,
processor_class=ra.LoadAndProcessFixedPysexCenteringRescaling)
return load_data.buffered_gen_mp(data_gen_train, buffer_size=GEN_BUFFER_SIZE)
def create_valid_gen():
data_gen_valid = ra.realtime_fixed_augmented_data_gen(valid_indices, 'train',
ds_transforms=ds_transforms, chunk_size=CHUNK_SIZE, target_sizes=input_sizes,
processor_class=ra.LoadAndProcessFixedPysexCenteringRescaling)
return load_data.buffered_gen_mp(data_gen_valid, buffer_size=GEN_BUFFER_SIZE)
def create_test_gen():
data_gen_test = ra.realtime_fixed_augmented_data_gen(test_indices, 'test',
ds_transforms=ds_transforms, chunk_size=CHUNK_SIZE, target_sizes=input_sizes,
processor_class=ra.LoadAndProcessFixedPysexCenteringRescaling)
return load_data.buffered_gen_mp(data_gen_test, buffer_size=GEN_BUFFER_SIZE)
print "Preprocess validation data upfront"
start_time = time.time()
xs_valid = [[] for _ in xrange(num_input_representations)]
for data, length in create_valid_gen():
for x_valid_list, x_chunk in zip(xs_valid, data):
x_valid_list.append(x_chunk[:length])
xs_valid = [np.vstack(x_valid) for x_valid in xs_valid]
xs_valid = [x_valid.transpose(0, 3, 1, 2) for x_valid in xs_valid] # move the colour dimension up
print " took %.2f seconds" % (time.time() - start_time)
print "Build model"
l0 = layers.Input2DLayer(BATCH_SIZE, NUM_INPUT_FEATURES, input_sizes[0][0], input_sizes[0][1])
l0_45 = layers.Input2DLayer(BATCH_SIZE, NUM_INPUT_FEATURES, input_sizes[1][0], input_sizes[1][1])
l0r = layers.MultiRotSliceLayer([l0, l0_45], part_size=45, include_flip=True)
l0s = cc_layers.ShuffleBC01ToC01BLayer(l0r)
l1a = cc_layers.CudaConvnetConv2DLayer(l0s, n_filters=32, filter_size=6, weights_std=0.01, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l1 = cc_layers.CudaConvnetPooling2DLayer(l1a, pool_size=2)
l2a = cc_layers.CudaConvnetConv2DLayer(l1, n_filters=64, filter_size=5, weights_std=0.01, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l2 = cc_layers.CudaConvnetPooling2DLayer(l2a, pool_size=2)
l3a = cc_layers.CudaConvnetConv2DLayer(l2, n_filters=128, filter_size=3, weights_std=0.01, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l3b = cc_layers.CudaConvnetConv2DLayer(l3a, n_filters=128, filter_size=3, pad=0, weights_std=0.1, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l3 = cc_layers.CudaConvnetPooling2DLayer(l3b, pool_size=2)
l3s = cc_layers.ShuffleC01BToBC01Layer(l3)
j3 = layers.MultiRotMergeLayer(l3s, num_views=4) # 2) # merge convolutional parts
# l4 = layers.DenseLayer(j3, n_outputs=4096, weights_std=0.001, init_bias_value=0.01, dropout=0.5)
l4a = layers.DenseLayer(j3, n_outputs=4096, weights_std=0.001, init_bias_value=0.01, dropout=0.5, nonlinearity=layers.identity)
l4 = layers.FeatureMaxPoolingLayer(l4a, pool_size=2, feature_dim=1, implementation='reshape')
# l5 = layers.DenseLayer(l4, n_outputs=37, weights_std=0.01, init_bias_value=0.0, dropout=0.5, nonlinearity=custom.clip_01) # nonlinearity=layers.identity)
l5 = layers.DenseLayer(l4, n_outputs=37, weights_std=0.01, init_bias_value=0.1, dropout=0.5, nonlinearity=layers.identity)
# l6 = layers.OutputLayer(l5, error_measure='mse')
l6 = custom.OptimisedDivGalaxyOutputLayer(l5) # this incorporates the constraints on the output (probabilities sum to one, weighting, etc.)
train_loss_nonorm = l6.error(normalisation=False)
train_loss = l6.error() # but compute and print this!
valid_loss = l6.error(dropout_active=False)
all_parameters = layers.all_parameters(l6)
all_bias_parameters = layers.all_bias_parameters(l6)
xs_shared = [theano.shared(np.zeros((1,1,1,1), dtype=theano.config.floatX)) for _ in xrange(num_input_representations)]
y_shared = theano.shared(np.zeros((1,1), dtype=theano.config.floatX))
learning_rate = theano.shared(np.array(LEARNING_RATE_SCHEDULE[0], dtype=theano.config.floatX))
idx = T.lscalar('idx')
givens = {
l0.input_var: xs_shared[0][idx*BATCH_SIZE:(idx+1)*BATCH_SIZE],
l0_45.input_var: xs_shared[1][idx*BATCH_SIZE:(idx+1)*BATCH_SIZE],
l6.target_var: y_shared[idx*BATCH_SIZE:(idx+1)*BATCH_SIZE],
}
# updates = layers.gen_updates(train_loss, all_parameters, learning_rate=LEARNING_RATE, momentum=MOMENTUM, weight_decay=WEIGHT_DECAY)
updates_nonorm = layers.gen_updates_nesterov_momentum_no_bias_decay(train_loss_nonorm, all_parameters, all_bias_parameters, learning_rate=learning_rate, momentum=MOMENTUM, weight_decay=WEIGHT_DECAY)
updates = layers.gen_updates_nesterov_momentum_no_bias_decay(train_loss, all_parameters, all_bias_parameters, learning_rate=learning_rate, momentum=MOMENTUM, weight_decay=WEIGHT_DECAY)
train_nonorm = theano.function([idx], train_loss_nonorm, givens=givens, updates=updates_nonorm)
train_norm = theano.function([idx], train_loss, givens=givens, updates=updates)
compute_loss = theano.function([idx], valid_loss, givens=givens) # dropout_active=False
compute_output = theano.function([idx], l6.predictions(dropout_active=False), givens=givens, on_unused_input='ignore') # not using the labels, so theano complains
compute_features = theano.function([idx], l4.output(dropout_active=False), givens=givens, on_unused_input='ignore')
print "Train model"
start_time = time.time()
prev_time = start_time
num_batches_valid = x_valid.shape[0] // BATCH_SIZE
losses_train = []
losses_valid = []
param_stds = []
for e in xrange(NUM_CHUNKS):
print "Chunk %d/%d" % (e + 1, NUM_CHUNKS)
chunk_data, chunk_length = train_gen.next()
y_chunk = chunk_data.pop() # last element is labels.
xs_chunk = chunk_data
# need to transpose the chunks to move the 'channels' dimension up
xs_chunk = [x_chunk.transpose(0, 3, 1, 2) for x_chunk in xs_chunk]
if e in LEARNING_RATE_SCHEDULE:
current_lr = LEARNING_RATE_SCHEDULE[e]
learning_rate.set_value(LEARNING_RATE_SCHEDULE[e])
print " setting learning rate to %.6f" % current_lr
# train without normalisation for the first # chunks.
if e >= NUM_CHUNKS_NONORM:
train = train_norm
else:
train = train_nonorm
print " load training data onto GPU"
for x_shared, x_chunk in zip(xs_shared, xs_chunk):
x_shared.set_value(x_chunk)
y_shared.set_value(y_chunk)
num_batches_chunk = x_chunk.shape[0] // BATCH_SIZE
# import pdb; pdb.set_trace()
print " batch SGD"
losses = []
for b in xrange(num_batches_chunk):
# if b % 1000 == 0:
# print " batch %d/%d" % (b + 1, num_batches_chunk)
loss = train(b)
losses.append(loss)
# print " loss: %.6f" % loss
mean_train_loss = np.sqrt(np.mean(losses))
print " mean training loss (RMSE):\t\t%.6f" % mean_train_loss
losses_train.append(mean_train_loss)
# store param stds during training
param_stds.append([p.std() for p in layers.get_param_values(l6)])
if ((e + 1) % VALIDATE_EVERY) == 0:
print
print "VALIDATING"
print " load validation data onto GPU"
for x_shared, x_valid in zip(xs_shared, xs_valid):
x_shared.set_value(x_valid)
y_shared.set_value(y_valid)
print " compute losses"
losses = []
for b in xrange(num_batches_valid):
# if b % 1000 == 0:
# print " batch %d/%d" % (b + 1, num_batches_valid)
loss = compute_loss(b)
losses.append(loss)
mean_valid_loss = np.sqrt(np.mean(losses))
print " mean validation loss (RMSE):\t\t%.6f" % mean_valid_loss
losses_valid.append(mean_valid_loss)
now = time.time()
time_since_start = now - start_time
time_since_prev = now - prev_time
prev_time = now
est_time_left = time_since_start * (float(NUM_CHUNKS - (e + 1)) / float(e + 1))
eta = datetime.now() + timedelta(seconds=est_time_left)
eta_str = eta.strftime("%c")
print " %s since start (%.2f s)" % (load_data.hms(time_since_start), time_since_prev)
print " estimated %s to go (ETA: %s)" % (load_data.hms(est_time_left), eta_str)
print
del chunk_data, xs_chunk, x_chunk, y_chunk, xs_valid, x_valid # memory cleanup
print "Compute predictions on validation set for analysis in batches"
predictions_list = []
for b in xrange(num_batches_valid):
# if b % 1000 == 0:
# print " batch %d/%d" % (b + 1, num_batches_valid)
predictions = compute_output(b)
predictions_list.append(predictions)
all_predictions = np.vstack(predictions_list)
# postprocessing: clip all predictions to 0-1
all_predictions[all_predictions > 1] = 1.0
all_predictions[all_predictions < 0] = 0.0
print "Write validation set predictions to %s" % ANALYSIS_PATH
with open(ANALYSIS_PATH, 'w') as f:
pickle.dump({
'ids': valid_ids[:num_batches_valid * BATCH_SIZE], # note that we need to truncate the ids to a multiple of the batch size.
'predictions': all_predictions,
'targets': y_valid,
'mean_train_loss': mean_train_loss,
'mean_valid_loss': mean_valid_loss,
'time_since_start': time_since_start,
'losses_train': losses_train,
'losses_valid': losses_valid,
'param_values': layers.get_param_values(l6),
'param_stds': param_stds,
}, f, pickle.HIGHEST_PROTOCOL)
del predictions_list, all_predictions # memory cleanup
# print "Loading test data"
# x_test = load_data.load_gz(DATA_TEST_PATH)
# x2_test = load_data.load_gz(DATA2_TEST_PATH)
# test_ids = np.load("data/test_ids.npy")
# num_test = x_test.shape[0]
# x_test = x_test.transpose(0, 3, 1, 2) # move the colour dimension up.
# x2_test = x2_test.transpose(0, 3, 1, 2)
# create_test_gen = lambda: load_data.array_chunker_gen([x_test, x2_test], chunk_size=CHUNK_SIZE, loop=False, truncate=False, shuffle=False)
print "Computing predictions on test data"
predictions_list = []
for e, (xs_chunk, chunk_length) in enumerate(create_test_gen()):
print "Chunk %d" % (e + 1)
xs_chunk = [x_chunk.transpose(0, 3, 1, 2) for x_chunk in xs_chunk] # move the colour dimension up.
for x_shared, x_chunk in zip(xs_shared, xs_chunk):
x_shared.set_value(x_chunk)
num_batches_chunk = int(np.ceil(chunk_length / float(BATCH_SIZE))) # need to round UP this time to account for all data
# make predictions for testset, don't forget to cute off the zeros at the end
for b in xrange(num_batches_chunk):
# if b % 1000 == 0:
# print " batch %d/%d" % (b + 1, num_batches_chunk)
predictions = compute_output(b)
predictions_list.append(predictions)
all_predictions = np.vstack(predictions_list)
all_predictions = all_predictions[:num_test] # truncate back to the correct length
# postprocessing: clip all predictions to 0-1
all_predictions[all_predictions > 1] = 1.0
all_predictions[all_predictions < 0] = 0.0
print "Write predictions to %s" % TARGET_PATH
# test_ids = np.load("data/test_ids.npy")
with open(TARGET_PATH, 'wb') as csvfile:
writer = csv.writer(csvfile) # , delimiter=',', quoting=csv.QUOTE_MINIMAL)
# write header
writer.writerow(['GalaxyID', 'Class1.1', 'Class1.2', 'Class1.3', 'Class2.1', 'Class2.2', 'Class3.1', 'Class3.2', 'Class4.1', 'Class4.2', 'Class5.1', 'Class5.2', 'Class5.3', 'Class5.4', 'Class6.1', 'Class6.2', 'Class7.1', 'Class7.2', 'Class7.3', 'Class8.1', 'Class8.2', 'Class8.3', 'Class8.4', 'Class8.5', 'Class8.6', 'Class8.7', 'Class9.1', 'Class9.2', 'Class9.3', 'Class10.1', 'Class10.2', 'Class10.3', 'Class11.1', 'Class11.2', 'Class11.3', 'Class11.4', 'Class11.5', 'Class11.6'])
# write data
for k in xrange(test_ids.shape[0]):
row = [test_ids[k]] + all_predictions[k].tolist()
writer.writerow(row)
print "Gzipping..."
os.system("gzip -c %s > %s.gz" % (TARGET_PATH, TARGET_PATH))
del all_predictions, predictions_list, xs_chunk, x_chunk # memory cleanup
# # need to reload training data because it has been split and shuffled.
# # don't need to reload test data
# x_train = load_data.load_gz(DATA_TRAIN_PATH)
# x2_train = load_data.load_gz(DATA2_TRAIN_PATH)
# x_train = x_train.transpose(0, 3, 1, 2) # move the colour dimension up
# x2_train = x2_train.transpose(0, 3, 1, 2)
# train_gen_features = load_data.array_chunker_gen([x_train, x2_train], chunk_size=CHUNK_SIZE, loop=False, truncate=False, shuffle=False)
# test_gen_features = load_data.array_chunker_gen([x_test, x2_test], chunk_size=CHUNK_SIZE, loop=False, truncate=False, shuffle=False)
# for name, gen, num in zip(['train', 'test'], [train_gen_features, test_gen_features], [x_train.shape[0], x_test.shape[0]]):
# print "Extracting feature representations for all galaxies: %s" % name
# features_list = []
# for e, (xs_chunk, chunk_length) in enumerate(gen):
# print "Chunk %d" % (e + 1)
# x_chunk, x2_chunk = xs_chunk
# x_shared.set_value(x_chunk)
# x2_shared.set_value(x2_chunk)
# num_batches_chunk = int(np.ceil(chunk_length / float(BATCH_SIZE))) # need to round UP this time to account for all data
# # compute features for set, don't forget to cute off the zeros at the end
# for b in xrange(num_batches_chunk):
# if b % 1000 == 0:
# print " batch %d/%d" % (b + 1, num_batches_chunk)
# features = compute_features(b)
# features_list.append(features)
# all_features = np.vstack(features_list)
# all_features = all_features[:num] # truncate back to the correct length
# features_path = FEATURES_PATTERN % name
# print " write features to %s" % features_path
# np.save(features_path, all_features)
print "Done!"
|
bsd-3-clause
|
mlliarm/pythonscience
|
project1_sys36_ode_optim.py
|
1
|
9510
|
""" Optimization and differential system of equations problem.
The main model with the 36 equations.
Implementation idea based on http://wiki.scipy.org/Cookbook/Zombie_Apocalypse_ODEINT
Author: MiLia , [email protected]
"""
#! /usr/bin/python
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import odeint
from scipy.optimize import minimize
import csv
# PART 1: Solving the system.
# Defining the main subroutine of the program, depends on R,s,a,T and returns the square(abs(det)).
def tmp(x,R,n,m,T):
s, a = x[0], x[1]
h = m*((1.-n**2)/(4.*n**2))**(0.5)
c = (-n**2)/(1-n**2)
# Defining some functions
w = lambda t: t*(1.-n)+n
g = lambda t: ((1.-w(t)**2)*n**2)/((1.-n**2)*w(t)**2)
k = lambda t: (1.-n)/w(t)
n1 = lambda t: 4.*k(t)**2*h**2*c-a**2
n2 = lambda t: -(s+h*np.sqrt(T)*g(t)+a*R*np.log(w(t))/np.log(n))
q = lambda t: 4.*c*k(t)**2*h*np.sqrt(T)
b = lambda t: 2.*c*k(t)*h*np.sqrt(T)
# Constructing the time grid [a1,b1]:
alpha = 0.
beta = 1.
Nsteps = 1001.
t= np.linspace(alpha,beta,Nsteps)
# Calculating the first derivatives of the functions y at time t = 0.
# At the same time we setup our system, which is of the form dy/dx = f(y(t),g(t),k(t),n1(t),n2(t),q(t),b(t),t)
#@profile
def fun(y,t):
# Assigning the values of the vector y to the values of y at time zero: y{i}(0) = y{i} = y[i-1]
#
(y1,y2,y3,y4,y5,y6,y7,y8,y9,y10,y11,y12,y13,y14,y15,y16,y17,y18,y19,y20,y21,y22,y23,y24,y25,y26,y27,y28,y29,
y30,y31,y32,y33,y34,y35,y36) = (y[0],y[1],y[2],y[3],y[4],y[5],y[6],y[7],y[8],y[9],y[10],y[11],y[12],y[13],y[14],y[15],
y[16],y[17],y[18],y[19],y[20],y[21],y[22],y[23],y[24],y[25],y[26],y[27],y[28],y[29],y[30],y[31],y[32],y[33],y[34],y[35])
# The equations of the model. All the f{i} are the derivatives of y{i}(t) at t = 0.
#
f1 = -k(t)*y1 + b(t)*y10 + a*y16
f2 = -k(t)*y2 + b(t)*y11 + a*y17
f3 = -k(t)*y3 + b(t)*y12 + a*y18
f4 = -k(t)*y4 - b(t)*y7 - a*y13
f5 = -k(t)*y5 - b(t)*y8 - a*y14
f6 = -k(t)*y6 - b(t)*y9 - a*y15
f7 = y25 - k(t)*y7
f8 = y26 - k(t)*y8
f9 = y27 - k(t)*y9
f10 = y28 - k(t)*y10
f11 = y29 - k(t)*y11
f12 = y30 - k(t)*y12
f13 = y31
f14 = y32
f15 = y33
f16 = y34
f17 = y35
f18 = y36
f19 = -n1(t)*y1 + n2(t)*y4 + T*g(t)*y7 - q(t)*y10
f20 = -n1(t)*y2 + n2(t)*y5 + T*g(t)*y8 - q(t)*y11
f21 = -n1(t)*y3 + n2(t)*y6 + T*g(t)*y9 - q(t)*y12
f22 = -n1(t)*y4 - n2(t)*y1 + T*g(t)*y10 + q(t)*y7
f23 = -n1(t)*y5 - n2(t)*y2 + T*g(t)*y11 + q(t)*y8
f24 = -n1(t)*y6 - n2(t)*y3 + T*g(t)*y12 + q(t)*y9
f25 = (-n1(t)*y7 + n2(t)*y10 + y1 - ((2*k(t)*h*b(t))/np.sqrt(T))*y7 - ((2*k(t)*h*a)/np.sqrt(T))*y13 -((2*k(t)*h)/np.sqrt(T))*y22
-((4*k(t)**2*h)/np.sqrt(T))*y4)
f26 = (-n1(t)*y8 + n2(t)*y11 + y2 - ((2*k(t)*h*b(t))/np.sqrt(T))*y8 - ((2*k(t)*h*a)/np.sqrt(T))*y14 -((2*k(t)*h)/np.sqrt(T))*y23
-((4*k(t)**2*h)/np.sqrt(T))*y5)
f27 = (-n1(t)*y9 + n2(t)*y12 + y3 - ((2*k(t)*h*b(t))/np.sqrt(T))*y9 - ((2*k(t)*h*a)/np.sqrt(T))*y15 -((2*k(t)*h)/np.sqrt(T))*y24
-((4*k(t)**2*h)/np.sqrt(T))*y6)
f28 = (-n1(t)*y10 - n2(t)*y7 + y4 - ((2*k(t)*h*b(t))/np.sqrt(T))*y10 - ((2*k(t)*h*a)/np.sqrt(T))*y16 +((2*k(t)*h)/np.sqrt(T))*y19
+((4*k(t)**2*h)/np.sqrt(T))*y1)
f29 = (-n1(t)*y11 - n2(t)*y8 + y5 - ((2*k(t)*h*b(t))/np.sqrt(T))*y11 - ((2*k(t)*h*a)/np.sqrt(T))*y17 +((2*k(t)*h)/np.sqrt(T))*y20
+((4*k(t)**2*h)/np.sqrt(T))*y2)
f30 = (-n1(t)*y12 - n2(t)*y9 + y6 - ((2*k(t)*h*b(t))/np.sqrt(T))*y12 - ((2*k(t)*h*a)/np.sqrt(T))*y18 +((2*k(t)*h)/np.sqrt(T))*y21
+((4.*k(t)**2*h)/np.sqrt(T))*y3)
f31 = -k(t)*y31 - n1(t)*y13 + n2(t)*y16 + a*b(t)*y7 + a**2*y13 + a*y22 + ((k(t)*R)/np.log(n))*y1
f32 = -k(t)*y32 - n1(t)*y14 + n2(t)*y17 + a*b(t)*y8 + a**2*y14 + a*y23 + ((k(t)*R)/np.log(n))*y2
f33 = -k(t)*y33 - n1(t)*y15 + n2(t)*y18 + a*b(t)*y9 + a**2*y15 + a*y24 + ((k(t)*R)/np.log(n))*y3
f34 = -k(t)*y34 - n1(t)*y16 - n2(t)*y13 + a*b(t)*y10 + a**2*y16 - a*y19 + ((k(t)*R)/np.log(n))*y4
f35 = -k(t)*y35 - n1(t)*y17 - n2(t)*y14 + a*b(t)*y11 + a**2*y17 - a*y20 + ((k(t)*R)/np.log(n))*y5
f36 = -k(t)*y36 - n1(t)*y18 - n2(t)*y15 + a*b(t)*y12 + a**2*y18 - a*y21 + ((k(t)*R)/np.log(n))*y6
return np.array([f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,f19,f20,f21,f22,f23,f24,f25,f26,
f27,f28,f29,f30,f31,f32,f33,f34,f35,f36])
# Initial Conditions
y19o,y22o,y26o,y29o,y33o,y36o = np.ones(6)
(y1o,y2o,y3o,y4o,y5o,y6o,y7o,y8o,y9o,y10o,y11o,y12o,y13o,y14o,y15o,y16o,y17o,y18o,y20o,y21o,y23o,y24o,y25o,y27o,y28o,
y30o,y31o,y32o,y34o,y35o) = np.zeros(30)
y0 = np.array([y1o,y2o,y3o,y4o,y5o,y6o,y7o,y8o,y9o,y10o,y11o,y12o,y13o,y14o,y15o,y16o,y17o,y18o,y19o,y20o,y21o,y22o,y23o,y24o,y25o,y26o,
y27o,y28o,y29o,y30o,y31o,y32o,y33o,y34o,y35o,y36o]) # initial condition vector
# Solve the ODEs
# Information for odeint(): http://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.odeint.html.
soln = odeint(fun,y0,t)
y1final = soln[:,0]
y2final = soln[:,1]
y3final = soln[:,2]
y4final = soln[:,3]
y5final = soln[:,4]
y6final = soln[:,5]
y7final = soln[:,6]
y8final = soln[:,7]
y9final = soln[:,8]
y10final = soln[:,9]
y11final = soln[:,10]
y12final = soln[:,11]
y13final = soln[:,12]
y14final = soln[:,13]
y15final = soln[:,14]
y16final = soln[:,15]
y17final = soln[:,16]
y18final = soln[:,17]
y19final = soln[:,18]
y20final = soln[:,19]
y21final = soln[:,20]
y22final = soln[:,21]
y23final = soln[:,22]
y24final = soln[:,23]
y25final = soln[:,24]
y26final = soln[:,25]
y27final = soln[:,26]
y28final = soln[:,27]
y29final = soln[:,28]
y30final = soln[:,29]
y31final = soln[:,30]
y32final = soln[:,31]
y33final = soln[:,32]
y34final = soln[:,33]
y35final = soln[:,34]
y36final = soln[:,35]
#
# PART 2: Calculating the determinant
# Remember that all matrices in python start from 0. Thus the last element of a 1000 member array will be array[999]
#
M = np.array([
[y1final[Nsteps-1] + 1j*y4final[Nsteps-1], y2final[Nsteps-1] + 1j*y5final[Nsteps-1], y3final[Nsteps-1] + 1j*y6final[Nsteps-1]],
[y7final[Nsteps-1] + 1j*y10final[Nsteps-1], y8final[Nsteps-1] + 1j*y11final[Nsteps-1], y9final[Nsteps-1] + 1j*y12final[Nsteps-1]],
[y13final[Nsteps-1] + 1j*y16final[Nsteps-1], y14final[Nsteps-1] + 1j*y17final[Nsteps-1], y15final[Nsteps-1] + 1j*y18final[Nsteps-1]]
])
# Straightforward method
det = np.linalg.det(M)
# Calculating the rest:
b1 = np.abs(det)
d1 = b1**2
# Exiting the function temp()
return d1
#--------------------------------------------------------------------------------------------------------------------------------------------
# PART 3: Exploring the minimum value of tmp() while looking for the minimum value of T
# at the same time.
#--------------------------------------------------------------------------------------------------------------------------------------------
# Creating a function minT which tries to find the minimum T through a bisection method.
# This function prints the results in csv files as well.
def minTcsv(n,m,R,Tmin,Tmax,myfile):
csvout = csv.writer(open(myfile, "wb"))
csvout.writerow(("m=",m,"n=",n,"R=",R))
csvout.writerow((" "))
csvout.writerow(("Tmin", "Tmax","s","a","tmp"))
a,b= Tmin, Tmax
while (abs(a-b)>1):
c=int(((a+b)/2.)//1)#getting the integer part of the number (a+b)/2: Let number a. Then integer part of a is int(a//1).
T=c
sol = minimize(tmp,[0,3],args=(R,n,m,T),bounds=((-150,0),(1.5,6)),tol=1.e-9)
if sol.fun>1.e-9:
a=c
else:
b=c
print a," ",b," ", sol.x, sol.fun
csvout.writerow((a,b,sol.x[[0]],sol.x[[1]],sol.fun))
csvout.writerow((" "))
csvout.writerow(("Tmin= ",c,"s=",sol.x[[0]],"a=",sol.x[[1]],"tmp=",sol.fun))
return c,sol
# The same function as above, only without printing the results in csv.
def minTsimple(n,m,R,Tmin,Tmax):
a,b= Tmin, Tmax
while (abs(a-b)>1):
c=int(((a+b)/2.)//1)#getting the integer part of the number (a+b)/2: Let number a. Then integer part of a is int(a//1).
T=c
sol = minimize(tmp,[0,3],args=(R,n,m,T),bounds=((-150,0),(1.5,6)),tol=1.e-9)
if sol.fun>1.e-9:
a=c
else:
b=c
print a," ",b," ", sol.x, sol.fun
return c,sol
# A function which will help us create the csv file names iteratevly
def printname(x,m,y,n,z,R):
name = x + str(m) + y + str(n) + z + str(R) + ".csv"
return name
# A function to merge files
def mergefiles():
fout=open("final.csv","a")
for i in [0.0,1.0,2.0,3.0]: # m
for k in [0.9, 0.8, 0.75, 0.6, 0.5, 0.4, 0.3,0.2, 0.1]:# n
for j in [0.,5.,10.,30.,70.,80.,100.]: # R
f = open(printname("minTm",i,"n",k,"R",j))
fout.write("n="+str(k)+"\n")
for line in f:
fout.write(line)
fout.write('\n')
f.close() # not really needed
fout.close()
return
# The main function
def main():
# A for loop which will print us all the files.
for i in [0,1,2,3]: # m
for k in [0.9, 0.8, 0.75, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1]:# n
for j in [0,5,10,30,70,80,100]: # R
print "Writing for: m=",i,",n=",k," and R=",j
print minTcsv(k,i,j,0,10000,printname("minTm",i,"n",k,"R",j)) #Tmax set to 10k.
mergefiles()
# Calling the main function.
if __name__ == "__main__":
main()
|
gpl-2.0
|
poojavade/Genomics_Docker
|
Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/numpy/lib/polynomial.py
|
82
|
37957
|
"""
Functions to operate on polynomials.
"""
from __future__ import division, absolute_import, print_function
__all__ = ['poly', 'roots', 'polyint', 'polyder', 'polyadd',
'polysub', 'polymul', 'polydiv', 'polyval', 'poly1d',
'polyfit', 'RankWarning']
import re
import warnings
import numpy.core.numeric as NX
from numpy.core import (isscalar, abs, finfo, atleast_1d, hstack, dot, array,
ones)
from numpy.lib.twodim_base import diag, vander
from numpy.lib.function_base import trim_zeros, sort_complex
from numpy.lib.type_check import iscomplex, real, imag, mintypecode
from numpy.linalg import eigvals, lstsq, inv
class RankWarning(UserWarning):
"""
Issued by `polyfit` when the Vandermonde matrix is rank deficient.
For more information, a way to suppress the warning, and an example of
`RankWarning` being issued, see `polyfit`.
"""
pass
def poly(seq_of_zeros):
"""
Find the coefficients of a polynomial with the given sequence of roots.
Returns the coefficients of the polynomial whose leading coefficient
is one for the given sequence of zeros (multiple roots must be included
in the sequence as many times as their multiplicity; see Examples).
A square matrix (or array, which will be treated as a matrix) can also
be given, in which case the coefficients of the characteristic polynomial
of the matrix are returned.
Parameters
----------
seq_of_zeros : array_like, shape (N,) or (N, N)
A sequence of polynomial roots, or a square array or matrix object.
Returns
-------
c : ndarray
1D array of polynomial coefficients from highest to lowest degree:
``c[0] * x**(N) + c[1] * x**(N-1) + ... + c[N-1] * x + c[N]``
where c[0] always equals 1.
Raises
------
ValueError
If input is the wrong shape (the input must be a 1-D or square
2-D array).
See Also
--------
polyval : Evaluate a polynomial at a point.
roots : Return the roots of a polynomial.
polyfit : Least squares polynomial fit.
poly1d : A one-dimensional polynomial class.
Notes
-----
Specifying the roots of a polynomial still leaves one degree of
freedom, typically represented by an undetermined leading
coefficient. [1]_ In the case of this function, that coefficient -
the first one in the returned array - is always taken as one. (If
for some reason you have one other point, the only automatic way
presently to leverage that information is to use ``polyfit``.)
The characteristic polynomial, :math:`p_a(t)`, of an `n`-by-`n`
matrix **A** is given by
:math:`p_a(t) = \\mathrm{det}(t\\, \\mathbf{I} - \\mathbf{A})`,
where **I** is the `n`-by-`n` identity matrix. [2]_
References
----------
.. [1] M. Sullivan and M. Sullivan, III, "Algebra and Trignometry,
Enhanced With Graphing Utilities," Prentice-Hall, pg. 318, 1996.
.. [2] G. Strang, "Linear Algebra and Its Applications, 2nd Edition,"
Academic Press, pg. 182, 1980.
Examples
--------
Given a sequence of a polynomial's zeros:
>>> np.poly((0, 0, 0)) # Multiple root example
array([1, 0, 0, 0])
The line above represents z**3 + 0*z**2 + 0*z + 0.
>>> np.poly((-1./2, 0, 1./2))
array([ 1. , 0. , -0.25, 0. ])
The line above represents z**3 - z/4
>>> np.poly((np.random.random(1.)[0], 0, np.random.random(1.)[0]))
array([ 1. , -0.77086955, 0.08618131, 0. ]) #random
Given a square array object:
>>> P = np.array([[0, 1./3], [-1./2, 0]])
>>> np.poly(P)
array([ 1. , 0. , 0.16666667])
Or a square matrix object:
>>> np.poly(np.matrix(P))
array([ 1. , 0. , 0.16666667])
Note how in all cases the leading coefficient is always 1.
"""
seq_of_zeros = atleast_1d(seq_of_zeros)
sh = seq_of_zeros.shape
if len(sh) == 2 and sh[0] == sh[1] and sh[0] != 0:
seq_of_zeros = eigvals(seq_of_zeros)
elif len(sh) == 1:
dt = seq_of_zeros.dtype
# Let object arrays slip through, e.g. for arbitrary precision
if dt != object:
seq_of_zeros = seq_of_zeros.astype(mintypecode(dt.char))
else:
raise ValueError("input must be 1d or non-empty square 2d array.")
if len(seq_of_zeros) == 0:
return 1.0
dt = seq_of_zeros.dtype
a = ones((1,), dtype=dt)
for k in range(len(seq_of_zeros)):
a = NX.convolve(a, array([1, -seq_of_zeros[k]], dtype=dt),
mode='full')
if issubclass(a.dtype.type, NX.complexfloating):
# if complex roots are all complex conjugates, the roots are real.
roots = NX.asarray(seq_of_zeros, complex)
pos_roots = sort_complex(NX.compress(roots.imag > 0, roots))
neg_roots = NX.conjugate(sort_complex(
NX.compress(roots.imag < 0, roots)))
if (len(pos_roots) == len(neg_roots) and
NX.alltrue(neg_roots == pos_roots)):
a = a.real.copy()
return a
def roots(p):
"""
Return the roots of a polynomial with coefficients given in p.
The values in the rank-1 array `p` are coefficients of a polynomial.
If the length of `p` is n+1 then the polynomial is described by::
p[0] * x**n + p[1] * x**(n-1) + ... + p[n-1]*x + p[n]
Parameters
----------
p : array_like
Rank-1 array of polynomial coefficients.
Returns
-------
out : ndarray
An array containing the complex roots of the polynomial.
Raises
------
ValueError
When `p` cannot be converted to a rank-1 array.
See also
--------
poly : Find the coefficients of a polynomial with a given sequence
of roots.
polyval : Evaluate a polynomial at a point.
polyfit : Least squares polynomial fit.
poly1d : A one-dimensional polynomial class.
Notes
-----
The algorithm relies on computing the eigenvalues of the
companion matrix [1]_.
References
----------
.. [1] R. A. Horn & C. R. Johnson, *Matrix Analysis*. Cambridge, UK:
Cambridge University Press, 1999, pp. 146-7.
Examples
--------
>>> coeff = [3.2, 2, 1]
>>> np.roots(coeff)
array([-0.3125+0.46351241j, -0.3125-0.46351241j])
"""
# If input is scalar, this makes it an array
p = atleast_1d(p)
if len(p.shape) != 1:
raise ValueError("Input must be a rank-1 array.")
# find non-zero array entries
non_zero = NX.nonzero(NX.ravel(p))[0]
# Return an empty array if polynomial is all zeros
if len(non_zero) == 0:
return NX.array([])
# find the number of trailing zeros -- this is the number of roots at 0.
trailing_zeros = len(p) - non_zero[-1] - 1
# strip leading and trailing zeros
p = p[int(non_zero[0]):int(non_zero[-1])+1]
# casting: if incoming array isn't floating point, make it floating point.
if not issubclass(p.dtype.type, (NX.floating, NX.complexfloating)):
p = p.astype(float)
N = len(p)
if N > 1:
# build companion matrix and find its eigenvalues (the roots)
A = diag(NX.ones((N-2,), p.dtype), -1)
A[0,:] = -p[1:] / p[0]
roots = eigvals(A)
else:
roots = NX.array([])
# tack any zeros onto the back of the array
roots = hstack((roots, NX.zeros(trailing_zeros, roots.dtype)))
return roots
def polyint(p, m=1, k=None):
"""
Return an antiderivative (indefinite integral) of a polynomial.
The returned order `m` antiderivative `P` of polynomial `p` satisfies
:math:`\\frac{d^m}{dx^m}P(x) = p(x)` and is defined up to `m - 1`
integration constants `k`. The constants determine the low-order
polynomial part
.. math:: \\frac{k_{m-1}}{0!} x^0 + \\ldots + \\frac{k_0}{(m-1)!}x^{m-1}
of `P` so that :math:`P^{(j)}(0) = k_{m-j-1}`.
Parameters
----------
p : array_like or poly1d
Polynomial to differentiate.
A sequence is interpreted as polynomial coefficients, see `poly1d`.
m : int, optional
Order of the antiderivative. (Default: 1)
k : list of `m` scalars or scalar, optional
Integration constants. They are given in the order of integration:
those corresponding to highest-order terms come first.
If ``None`` (default), all constants are assumed to be zero.
If `m = 1`, a single scalar can be given instead of a list.
See Also
--------
polyder : derivative of a polynomial
poly1d.integ : equivalent method
Examples
--------
The defining property of the antiderivative:
>>> p = np.poly1d([1,1,1])
>>> P = np.polyint(p)
>>> P
poly1d([ 0.33333333, 0.5 , 1. , 0. ])
>>> np.polyder(P) == p
True
The integration constants default to zero, but can be specified:
>>> P = np.polyint(p, 3)
>>> P(0)
0.0
>>> np.polyder(P)(0)
0.0
>>> np.polyder(P, 2)(0)
0.0
>>> P = np.polyint(p, 3, k=[6,5,3])
>>> P
poly1d([ 0.01666667, 0.04166667, 0.16666667, 3. , 5. , 3. ])
Note that 3 = 6 / 2!, and that the constants are given in the order of
integrations. Constant of the highest-order polynomial term comes first:
>>> np.polyder(P, 2)(0)
6.0
>>> np.polyder(P, 1)(0)
5.0
>>> P(0)
3.0
"""
m = int(m)
if m < 0:
raise ValueError("Order of integral must be positive (see polyder)")
if k is None:
k = NX.zeros(m, float)
k = atleast_1d(k)
if len(k) == 1 and m > 1:
k = k[0]*NX.ones(m, float)
if len(k) < m:
raise ValueError(
"k must be a scalar or a rank-1 array of length 1 or >m.")
truepoly = isinstance(p, poly1d)
p = NX.asarray(p)
if m == 0:
if truepoly:
return poly1d(p)
return p
else:
# Note: this must work also with object and integer arrays
y = NX.concatenate((p.__truediv__(NX.arange(len(p), 0, -1)), [k[0]]))
val = polyint(y, m - 1, k=k[1:])
if truepoly:
return poly1d(val)
return val
def polyder(p, m=1):
"""
Return the derivative of the specified order of a polynomial.
Parameters
----------
p : poly1d or sequence
Polynomial to differentiate.
A sequence is interpreted as polynomial coefficients, see `poly1d`.
m : int, optional
Order of differentiation (default: 1)
Returns
-------
der : poly1d
A new polynomial representing the derivative.
See Also
--------
polyint : Anti-derivative of a polynomial.
poly1d : Class for one-dimensional polynomials.
Examples
--------
The derivative of the polynomial :math:`x^3 + x^2 + x^1 + 1` is:
>>> p = np.poly1d([1,1,1,1])
>>> p2 = np.polyder(p)
>>> p2
poly1d([3, 2, 1])
which evaluates to:
>>> p2(2.)
17.0
We can verify this, approximating the derivative with
``(f(x + h) - f(x))/h``:
>>> (p(2. + 0.001) - p(2.)) / 0.001
17.007000999997857
The fourth-order derivative of a 3rd-order polynomial is zero:
>>> np.polyder(p, 2)
poly1d([6, 2])
>>> np.polyder(p, 3)
poly1d([6])
>>> np.polyder(p, 4)
poly1d([ 0.])
"""
m = int(m)
if m < 0:
raise ValueError("Order of derivative must be positive (see polyint)")
truepoly = isinstance(p, poly1d)
p = NX.asarray(p)
n = len(p) - 1
y = p[:-1] * NX.arange(n, 0, -1)
if m == 0:
val = p
else:
val = polyder(y, m - 1)
if truepoly:
val = poly1d(val)
return val
def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False):
"""
Least squares polynomial fit.
Fit a polynomial ``p(x) = p[0] * x**deg + ... + p[deg]`` of degree `deg`
to points `(x, y)`. Returns a vector of coefficients `p` that minimises
the squared error.
Parameters
----------
x : array_like, shape (M,)
x-coordinates of the M sample points ``(x[i], y[i])``.
y : array_like, shape (M,) or (M, K)
y-coordinates of the sample points. Several data sets of sample
points sharing the same x-coordinates can be fitted at once by
passing in a 2D-array that contains one dataset per column.
deg : int
Degree of the fitting polynomial
rcond : float, optional
Relative condition number of the fit. Singular values smaller than
this relative to the largest singular value will be ignored. The
default value is len(x)*eps, where eps is the relative precision of
the float type, about 2e-16 in most cases.
full : bool, optional
Switch determining nature of return value. When it is False (the
default) just the coefficients are returned, when True diagnostic
information from the singular value decomposition is also returned.
w : array_like, shape (M,), optional
weights to apply to the y-coordinates of the sample points.
cov : bool, optional
Return the estimate and the covariance matrix of the estimate
If full is True, then cov is not returned.
Returns
-------
p : ndarray, shape (M,) or (M, K)
Polynomial coefficients, highest power first. If `y` was 2-D, the
coefficients for `k`-th data set are in ``p[:,k]``.
residuals, rank, singular_values, rcond :
Present only if `full` = True. Residuals of the least-squares fit,
the effective rank of the scaled Vandermonde coefficient matrix,
its singular values, and the specified value of `rcond`. For more
details, see `linalg.lstsq`.
V : ndarray, shape (M,M) or (M,M,K)
Present only if `full` = False and `cov`=True. The covariance
matrix of the polynomial coefficient estimates. The diagonal of
this matrix are the variance estimates for each coefficient. If y
is a 2-D array, then the covariance matrix for the `k`-th data set
are in ``V[:,:,k]``
Warns
-----
RankWarning
The rank of the coefficient matrix in the least-squares fit is
deficient. The warning is only raised if `full` = False.
The warnings can be turned off by
>>> import warnings
>>> warnings.simplefilter('ignore', np.RankWarning)
See Also
--------
polyval : Computes polynomial values.
linalg.lstsq : Computes a least-squares fit.
scipy.interpolate.UnivariateSpline : Computes spline fits.
Notes
-----
The solution minimizes the squared error
.. math ::
E = \\sum_{j=0}^k |p(x_j) - y_j|^2
in the equations::
x[0]**n * p[0] + ... + x[0] * p[n-1] + p[n] = y[0]
x[1]**n * p[0] + ... + x[1] * p[n-1] + p[n] = y[1]
...
x[k]**n * p[0] + ... + x[k] * p[n-1] + p[n] = y[k]
The coefficient matrix of the coefficients `p` is a Vandermonde matrix.
`polyfit` issues a `RankWarning` when the least-squares fit is badly
conditioned. This implies that the best fit is not well-defined due
to numerical error. The results may be improved by lowering the polynomial
degree or by replacing `x` by `x` - `x`.mean(). The `rcond` parameter
can also be set to a value smaller than its default, but the resulting
fit may be spurious: including contributions from the small singular
values can add numerical noise to the result.
Note that fitting polynomial coefficients is inherently badly conditioned
when the degree of the polynomial is large or the interval of sample points
is badly centered. The quality of the fit should always be checked in these
cases. When polynomial fits are not satisfactory, splines may be a good
alternative.
References
----------
.. [1] Wikipedia, "Curve fitting",
http://en.wikipedia.org/wiki/Curve_fitting
.. [2] Wikipedia, "Polynomial interpolation",
http://en.wikipedia.org/wiki/Polynomial_interpolation
Examples
--------
>>> x = np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0])
>>> y = np.array([0.0, 0.8, 0.9, 0.1, -0.8, -1.0])
>>> z = np.polyfit(x, y, 3)
>>> z
array([ 0.08703704, -0.81349206, 1.69312169, -0.03968254])
It is convenient to use `poly1d` objects for dealing with polynomials:
>>> p = np.poly1d(z)
>>> p(0.5)
0.6143849206349179
>>> p(3.5)
-0.34732142857143039
>>> p(10)
22.579365079365115
High-order polynomials may oscillate wildly:
>>> p30 = np.poly1d(np.polyfit(x, y, 30))
/... RankWarning: Polyfit may be poorly conditioned...
>>> p30(4)
-0.80000000000000204
>>> p30(5)
-0.99999999999999445
>>> p30(4.5)
-0.10547061179440398
Illustration:
>>> import matplotlib.pyplot as plt
>>> xp = np.linspace(-2, 6, 100)
>>> _ = plt.plot(x, y, '.', xp, p(xp), '-', xp, p30(xp), '--')
>>> plt.ylim(-2,2)
(-2, 2)
>>> plt.show()
"""
order = int(deg) + 1
x = NX.asarray(x) + 0.0
y = NX.asarray(y) + 0.0
# check arguments.
if deg < 0:
raise ValueError("expected deg >= 0")
if x.ndim != 1:
raise TypeError("expected 1D vector for x")
if x.size == 0:
raise TypeError("expected non-empty vector for x")
if y.ndim < 1 or y.ndim > 2:
raise TypeError("expected 1D or 2D array for y")
if x.shape[0] != y.shape[0]:
raise TypeError("expected x and y to have same length")
# set rcond
if rcond is None:
rcond = len(x)*finfo(x.dtype).eps
# set up least squares equation for powers of x
lhs = vander(x, order)
rhs = y
# apply weighting
if w is not None:
w = NX.asarray(w) + 0.0
if w.ndim != 1:
raise TypeError("expected a 1-d array for weights")
if w.shape[0] != y.shape[0]:
raise TypeError("expected w and y to have the same length")
lhs *= w[:, NX.newaxis]
if rhs.ndim == 2:
rhs *= w[:, NX.newaxis]
else:
rhs *= w
# scale lhs to improve condition number and solve
scale = NX.sqrt((lhs*lhs).sum(axis=0))
lhs /= scale
c, resids, rank, s = lstsq(lhs, rhs, rcond)
c = (c.T/scale).T # broadcast scale coefficients
# warn on rank reduction, which indicates an ill conditioned matrix
if rank != order and not full:
msg = "Polyfit may be poorly conditioned"
warnings.warn(msg, RankWarning)
if full:
return c, resids, rank, s, rcond
elif cov:
Vbase = inv(dot(lhs.T, lhs))
Vbase /= NX.outer(scale, scale)
# Some literature ignores the extra -2.0 factor in the denominator, but
# it is included here because the covariance of Multivariate Student-T
# (which is implied by a Bayesian uncertainty analysis) includes it.
# Plus, it gives a slightly more conservative estimate of uncertainty.
fac = resids / (len(x) - order - 2.0)
if y.ndim == 1:
return c, Vbase * fac
else:
return c, Vbase[:,:, NX.newaxis] * fac
else:
return c
def polyval(p, x):
"""
Evaluate a polynomial at specific values.
If `p` is of length N, this function returns the value:
``p[0]*x**(N-1) + p[1]*x**(N-2) + ... + p[N-2]*x + p[N-1]``
If `x` is a sequence, then `p(x)` is returned for each element of `x`.
If `x` is another polynomial then the composite polynomial `p(x(t))`
is returned.
Parameters
----------
p : array_like or poly1d object
1D array of polynomial coefficients (including coefficients equal
to zero) from highest degree to the constant term, or an
instance of poly1d.
x : array_like or poly1d object
A number, a 1D array of numbers, or an instance of poly1d, "at"
which to evaluate `p`.
Returns
-------
values : ndarray or poly1d
If `x` is a poly1d instance, the result is the composition of the two
polynomials, i.e., `x` is "substituted" in `p` and the simplified
result is returned. In addition, the type of `x` - array_like or
poly1d - governs the type of the output: `x` array_like => `values`
array_like, `x` a poly1d object => `values` is also.
See Also
--------
poly1d: A polynomial class.
Notes
-----
Horner's scheme [1]_ is used to evaluate the polynomial. Even so,
for polynomials of high degree the values may be inaccurate due to
rounding errors. Use carefully.
References
----------
.. [1] I. N. Bronshtein, K. A. Semendyayev, and K. A. Hirsch (Eng.
trans. Ed.), *Handbook of Mathematics*, New York, Van Nostrand
Reinhold Co., 1985, pg. 720.
Examples
--------
>>> np.polyval([3,0,1], 5) # 3 * 5**2 + 0 * 5**1 + 1
76
>>> np.polyval([3,0,1], np.poly1d(5))
poly1d([ 76.])
>>> np.polyval(np.poly1d([3,0,1]), 5)
76
>>> np.polyval(np.poly1d([3,0,1]), np.poly1d(5))
poly1d([ 76.])
"""
p = NX.asarray(p)
if isinstance(x, poly1d):
y = 0
else:
x = NX.asarray(x)
y = NX.zeros_like(x)
for i in range(len(p)):
y = y * x + p[i]
return y
def polyadd(a1, a2):
"""
Find the sum of two polynomials.
Returns the polynomial resulting from the sum of two input polynomials.
Each input must be either a poly1d object or a 1D sequence of polynomial
coefficients, from highest to lowest degree.
Parameters
----------
a1, a2 : array_like or poly1d object
Input polynomials.
Returns
-------
out : ndarray or poly1d object
The sum of the inputs. If either input is a poly1d object, then the
output is also a poly1d object. Otherwise, it is a 1D array of
polynomial coefficients from highest to lowest degree.
See Also
--------
poly1d : A one-dimensional polynomial class.
poly, polyadd, polyder, polydiv, polyfit, polyint, polysub, polyval
Examples
--------
>>> np.polyadd([1, 2], [9, 5, 4])
array([9, 6, 6])
Using poly1d objects:
>>> p1 = np.poly1d([1, 2])
>>> p2 = np.poly1d([9, 5, 4])
>>> print p1
1 x + 2
>>> print p2
2
9 x + 5 x + 4
>>> print np.polyadd(p1, p2)
2
9 x + 6 x + 6
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1 = atleast_1d(a1)
a2 = atleast_1d(a2)
diff = len(a2) - len(a1)
if diff == 0:
val = a1 + a2
elif diff > 0:
zr = NX.zeros(diff, a1.dtype)
val = NX.concatenate((zr, a1)) + a2
else:
zr = NX.zeros(abs(diff), a2.dtype)
val = a1 + NX.concatenate((zr, a2))
if truepoly:
val = poly1d(val)
return val
def polysub(a1, a2):
"""
Difference (subtraction) of two polynomials.
Given two polynomials `a1` and `a2`, returns ``a1 - a2``.
`a1` and `a2` can be either array_like sequences of the polynomials'
coefficients (including coefficients equal to zero), or `poly1d` objects.
Parameters
----------
a1, a2 : array_like or poly1d
Minuend and subtrahend polynomials, respectively.
Returns
-------
out : ndarray or poly1d
Array or `poly1d` object of the difference polynomial's coefficients.
See Also
--------
polyval, polydiv, polymul, polyadd
Examples
--------
.. math:: (2 x^2 + 10 x - 2) - (3 x^2 + 10 x -4) = (-x^2 + 2)
>>> np.polysub([2, 10, -2], [3, 10, -4])
array([-1, 0, 2])
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1 = atleast_1d(a1)
a2 = atleast_1d(a2)
diff = len(a2) - len(a1)
if diff == 0:
val = a1 - a2
elif diff > 0:
zr = NX.zeros(diff, a1.dtype)
val = NX.concatenate((zr, a1)) - a2
else:
zr = NX.zeros(abs(diff), a2.dtype)
val = a1 - NX.concatenate((zr, a2))
if truepoly:
val = poly1d(val)
return val
def polymul(a1, a2):
"""
Find the product of two polynomials.
Finds the polynomial resulting from the multiplication of the two input
polynomials. Each input must be either a poly1d object or a 1D sequence
of polynomial coefficients, from highest to lowest degree.
Parameters
----------
a1, a2 : array_like or poly1d object
Input polynomials.
Returns
-------
out : ndarray or poly1d object
The polynomial resulting from the multiplication of the inputs. If
either inputs is a poly1d object, then the output is also a poly1d
object. Otherwise, it is a 1D array of polynomial coefficients from
highest to lowest degree.
See Also
--------
poly1d : A one-dimensional polynomial class.
poly, polyadd, polyder, polydiv, polyfit, polyint, polysub,
polyval
convolve : Array convolution. Same output as polymul, but has parameter
for overlap mode.
Examples
--------
>>> np.polymul([1, 2, 3], [9, 5, 1])
array([ 9, 23, 38, 17, 3])
Using poly1d objects:
>>> p1 = np.poly1d([1, 2, 3])
>>> p2 = np.poly1d([9, 5, 1])
>>> print p1
2
1 x + 2 x + 3
>>> print p2
2
9 x + 5 x + 1
>>> print np.polymul(p1, p2)
4 3 2
9 x + 23 x + 38 x + 17 x + 3
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1, a2 = poly1d(a1), poly1d(a2)
val = NX.convolve(a1, a2)
if truepoly:
val = poly1d(val)
return val
def polydiv(u, v):
"""
Returns the quotient and remainder of polynomial division.
The input arrays are the coefficients (including any coefficients
equal to zero) of the "numerator" (dividend) and "denominator"
(divisor) polynomials, respectively.
Parameters
----------
u : array_like or poly1d
Dividend polynomial's coefficients.
v : array_like or poly1d
Divisor polynomial's coefficients.
Returns
-------
q : ndarray
Coefficients, including those equal to zero, of the quotient.
r : ndarray
Coefficients, including those equal to zero, of the remainder.
See Also
--------
poly, polyadd, polyder, polydiv, polyfit, polyint, polymul, polysub,
polyval
Notes
-----
Both `u` and `v` must be 0-d or 1-d (ndim = 0 or 1), but `u.ndim` need
not equal `v.ndim`. In other words, all four possible combinations -
``u.ndim = v.ndim = 0``, ``u.ndim = v.ndim = 1``,
``u.ndim = 1, v.ndim = 0``, and ``u.ndim = 0, v.ndim = 1`` - work.
Examples
--------
.. math:: \\frac{3x^2 + 5x + 2}{2x + 1} = 1.5x + 1.75, remainder 0.25
>>> x = np.array([3.0, 5.0, 2.0])
>>> y = np.array([2.0, 1.0])
>>> np.polydiv(x, y)
(array([ 1.5 , 1.75]), array([ 0.25]))
"""
truepoly = (isinstance(u, poly1d) or isinstance(u, poly1d))
u = atleast_1d(u) + 0.0
v = atleast_1d(v) + 0.0
# w has the common type
w = u[0] + v[0]
m = len(u) - 1
n = len(v) - 1
scale = 1. / v[0]
q = NX.zeros((max(m - n + 1, 1),), w.dtype)
r = u.copy()
for k in range(0, m-n+1):
d = scale * r[k]
q[k] = d
r[k:k+n+1] -= d*v
while NX.allclose(r[0], 0, rtol=1e-14) and (r.shape[-1] > 1):
r = r[1:]
if truepoly:
return poly1d(q), poly1d(r)
return q, r
_poly_mat = re.compile(r"[*][*]([0-9]*)")
def _raise_power(astr, wrap=70):
n = 0
line1 = ''
line2 = ''
output = ' '
while True:
mat = _poly_mat.search(astr, n)
if mat is None:
break
span = mat.span()
power = mat.groups()[0]
partstr = astr[n:span[0]]
n = span[1]
toadd2 = partstr + ' '*(len(power)-1)
toadd1 = ' '*(len(partstr)-1) + power
if ((len(line2) + len(toadd2) > wrap) or
(len(line1) + len(toadd1) > wrap)):
output += line1 + "\n" + line2 + "\n "
line1 = toadd1
line2 = toadd2
else:
line2 += partstr + ' '*(len(power)-1)
line1 += ' '*(len(partstr)-1) + power
output += line1 + "\n" + line2
return output + astr[n:]
class poly1d(object):
"""
A one-dimensional polynomial class.
A convenience class, used to encapsulate "natural" operations on
polynomials so that said operations may take on their customary
form in code (see Examples).
Parameters
----------
c_or_r : array_like
The polynomial's coefficients, in decreasing powers, or if
the value of the second parameter is True, the polynomial's
roots (values where the polynomial evaluates to 0). For example,
``poly1d([1, 2, 3])`` returns an object that represents
:math:`x^2 + 2x + 3`, whereas ``poly1d([1, 2, 3], True)`` returns
one that represents :math:`(x-1)(x-2)(x-3) = x^3 - 6x^2 + 11x -6`.
r : bool, optional
If True, `c_or_r` specifies the polynomial's roots; the default
is False.
variable : str, optional
Changes the variable used when printing `p` from `x` to `variable`
(see Examples).
Examples
--------
Construct the polynomial :math:`x^2 + 2x + 3`:
>>> p = np.poly1d([1, 2, 3])
>>> print np.poly1d(p)
2
1 x + 2 x + 3
Evaluate the polynomial at :math:`x = 0.5`:
>>> p(0.5)
4.25
Find the roots:
>>> p.r
array([-1.+1.41421356j, -1.-1.41421356j])
>>> p(p.r)
array([ -4.44089210e-16+0.j, -4.44089210e-16+0.j])
These numbers in the previous line represent (0, 0) to machine precision
Show the coefficients:
>>> p.c
array([1, 2, 3])
Display the order (the leading zero-coefficients are removed):
>>> p.order
2
Show the coefficient of the k-th power in the polynomial
(which is equivalent to ``p.c[-(i+1)]``):
>>> p[1]
2
Polynomials can be added, subtracted, multiplied, and divided
(returns quotient and remainder):
>>> p * p
poly1d([ 1, 4, 10, 12, 9])
>>> (p**3 + 4) / p
(poly1d([ 1., 4., 10., 12., 9.]), poly1d([ 4.]))
``asarray(p)`` gives the coefficient array, so polynomials can be
used in all functions that accept arrays:
>>> p**2 # square of polynomial
poly1d([ 1, 4, 10, 12, 9])
>>> np.square(p) # square of individual coefficients
array([1, 4, 9])
The variable used in the string representation of `p` can be modified,
using the `variable` parameter:
>>> p = np.poly1d([1,2,3], variable='z')
>>> print p
2
1 z + 2 z + 3
Construct a polynomial from its roots:
>>> np.poly1d([1, 2], True)
poly1d([ 1, -3, 2])
This is the same polynomial as obtained by:
>>> np.poly1d([1, -1]) * np.poly1d([1, -2])
poly1d([ 1, -3, 2])
"""
coeffs = None
order = None
variable = None
__hash__ = None
def __init__(self, c_or_r, r=0, variable=None):
if isinstance(c_or_r, poly1d):
for key in c_or_r.__dict__.keys():
self.__dict__[key] = c_or_r.__dict__[key]
if variable is not None:
self.__dict__['variable'] = variable
return
if r:
c_or_r = poly(c_or_r)
c_or_r = atleast_1d(c_or_r)
if len(c_or_r.shape) > 1:
raise ValueError("Polynomial must be 1d only.")
c_or_r = trim_zeros(c_or_r, trim='f')
if len(c_or_r) == 0:
c_or_r = NX.array([0.])
self.__dict__['coeffs'] = c_or_r
self.__dict__['order'] = len(c_or_r) - 1
if variable is None:
variable = 'x'
self.__dict__['variable'] = variable
def __array__(self, t=None):
if t:
return NX.asarray(self.coeffs, t)
else:
return NX.asarray(self.coeffs)
def __repr__(self):
vals = repr(self.coeffs)
vals = vals[6:-1]
return "poly1d(%s)" % vals
def __len__(self):
return self.order
def __str__(self):
thestr = "0"
var = self.variable
# Remove leading zeros
coeffs = self.coeffs[NX.logical_or.accumulate(self.coeffs != 0)]
N = len(coeffs)-1
def fmt_float(q):
s = '%.4g' % q
if s.endswith('.0000'):
s = s[:-5]
return s
for k in range(len(coeffs)):
if not iscomplex(coeffs[k]):
coefstr = fmt_float(real(coeffs[k]))
elif real(coeffs[k]) == 0:
coefstr = '%sj' % fmt_float(imag(coeffs[k]))
else:
coefstr = '(%s + %sj)' % (fmt_float(real(coeffs[k])),
fmt_float(imag(coeffs[k])))
power = (N-k)
if power == 0:
if coefstr != '0':
newstr = '%s' % (coefstr,)
else:
if k == 0:
newstr = '0'
else:
newstr = ''
elif power == 1:
if coefstr == '0':
newstr = ''
elif coefstr == 'b':
newstr = var
else:
newstr = '%s %s' % (coefstr, var)
else:
if coefstr == '0':
newstr = ''
elif coefstr == 'b':
newstr = '%s**%d' % (var, power,)
else:
newstr = '%s %s**%d' % (coefstr, var, power)
if k > 0:
if newstr != '':
if newstr.startswith('-'):
thestr = "%s - %s" % (thestr, newstr[1:])
else:
thestr = "%s + %s" % (thestr, newstr)
else:
thestr = newstr
return _raise_power(thestr)
def __call__(self, val):
return polyval(self.coeffs, val)
def __neg__(self):
return poly1d(-self.coeffs)
def __pos__(self):
return self
def __mul__(self, other):
if isscalar(other):
return poly1d(self.coeffs * other)
else:
other = poly1d(other)
return poly1d(polymul(self.coeffs, other.coeffs))
def __rmul__(self, other):
if isscalar(other):
return poly1d(other * self.coeffs)
else:
other = poly1d(other)
return poly1d(polymul(self.coeffs, other.coeffs))
def __add__(self, other):
other = poly1d(other)
return poly1d(polyadd(self.coeffs, other.coeffs))
def __radd__(self, other):
other = poly1d(other)
return poly1d(polyadd(self.coeffs, other.coeffs))
def __pow__(self, val):
if not isscalar(val) or int(val) != val or val < 0:
raise ValueError("Power to non-negative integers only.")
res = [1]
for _ in range(val):
res = polymul(self.coeffs, res)
return poly1d(res)
def __sub__(self, other):
other = poly1d(other)
return poly1d(polysub(self.coeffs, other.coeffs))
def __rsub__(self, other):
other = poly1d(other)
return poly1d(polysub(other.coeffs, self.coeffs))
def __div__(self, other):
if isscalar(other):
return poly1d(self.coeffs/other)
else:
other = poly1d(other)
return polydiv(self, other)
__truediv__ = __div__
def __rdiv__(self, other):
if isscalar(other):
return poly1d(other/self.coeffs)
else:
other = poly1d(other)
return polydiv(other, self)
__rtruediv__ = __rdiv__
def __eq__(self, other):
if self.coeffs.shape != other.coeffs.shape:
return False
return (self.coeffs == other.coeffs).all()
def __ne__(self, other):
return not self.__eq__(other)
def __setattr__(self, key, val):
raise ValueError("Attributes cannot be changed this way.")
def __getattr__(self, key):
if key in ['r', 'roots']:
return roots(self.coeffs)
elif key in ['c', 'coef', 'coefficients']:
return self.coeffs
elif key in ['o']:
return self.order
else:
try:
return self.__dict__[key]
except KeyError:
raise AttributeError(
"'%s' has no attribute '%s'" % (self.__class__, key))
def __getitem__(self, val):
ind = self.order - val
if val > self.order:
return 0
if val < 0:
return 0
return self.coeffs[ind]
def __setitem__(self, key, val):
ind = self.order - key
if key < 0:
raise ValueError("Does not support negative powers.")
if key > self.order:
zr = NX.zeros(key-self.order, self.coeffs.dtype)
self.__dict__['coeffs'] = NX.concatenate((zr, self.coeffs))
self.__dict__['order'] = key
ind = 0
self.__dict__['coeffs'][ind] = val
return
def __iter__(self):
return iter(self.coeffs)
def integ(self, m=1, k=0):
"""
Return an antiderivative (indefinite integral) of this polynomial.
Refer to `polyint` for full documentation.
See Also
--------
polyint : equivalent function
"""
return poly1d(polyint(self.coeffs, m=m, k=k))
def deriv(self, m=1):
"""
Return a derivative of this polynomial.
Refer to `polyder` for full documentation.
See Also
--------
polyder : equivalent function
"""
return poly1d(polyder(self.coeffs, m=m))
# Stuff to do on module import
warnings.simplefilter('always', RankWarning)
|
apache-2.0
|
GeoffEvans/aol_model
|
aol_model/figs_from_paper.py
|
1
|
1544
|
import matplotlib.pyplot as plt
import aod_model_expt_comparison as c
import aod_visualisation as a
import aol_model.pointing_efficiency as p
# fig_1 is not generated by the model
def plot_fig_2():
plt.figure()
c.plot_eff_ang_wide()
plt.figure()
c.plot_eff_ang_narrow()
def plot_fig_3():
plt.figure()
c.plot_eff_freq_narrow_expt_model()
# this takes about 5 mins to run
def plot_fig_4():
av_config1 = a.AodVisualisation(920e-9, is_wide=True, freq_bnds=(20,50))
av_config1.plot_efficiency_xangle_freq(ac_power=1.5)
av_config2 = a.AodVisualisation(920e-9, is_wide=False, freq_bnds=(20,70))
av_config2.plot_efficiency_xangle_freq(ac_power=1.5)
av_config3 = a.AodVisualisation(920e-9, is_wide=False, freq_bnds=(20,50))
av_config3.plot_efficiency_xangle_freq_second_order_noise()
# this will take many hours to run
# only the model panels are generated
def plot_fig_5():
pdr_list = [5, 2, 1, 0.5, 0, -0.5, -2, -5]
for pdr in pdr_list:
simulation = p.calc_fov_surf_data(1e9, pdr)
description = 'Model for PDR %s' % pdr
p.generate_plot(simulation, description, pdr_z=(pdr, 1e9))
# takes about an hour to run
# only the model panels are generated
def plot_fig_6():
z_list = [-0.5, 1e9, 0.5] # aol coords
for z in z_list:
effs_norm = p.calc_fov_surf_data(z, 0.3)
description = 'Model, PDR %s, z=%s' % (0.3, z)
p.generate_plot(effs_norm, description)
# fig_7 is not generated by the model
if __name__ == '__main__':
plot_fig_2()
|
gpl-3.0
|
decabyte/task_scheduling
|
task_scheduling/op_problem.py
|
2
|
9881
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2015, lounick and decabyte
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of task_scheduling nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Orienteering problem solver
Implementation of an integer linear formulation for maximizing the targets visited by a vehicle under cost constraint.
The vehicle has to start and finish at the first point and it is allowed to skip targets.
Described in:
Vansteenwegen, Pieter, Wouter Souffriau, and Dirk Van Oudheusden. "The orienteering problem: A survey."
European Journal of Operational Research 209.1 (2011): 1-10.
"""
from __future__ import division
import numpy as np
from gurobipy import *
def _callback(model, where):
"""Callback function for the solver
Callback function that adds lazy constraints for the optimisation process. Here it dynamically imposes cardinality
constraints for the vertices in the solution, ensuring that if a path enters a vertex there must be a path exiting.
Parameters
----------
model : object
The gurobi model instance
where : int
Gurobi specific callback variable
Returns
-------
"""
if where == GRB.callback.MIPSOL:
V = set(range(model._n))
idx_start = model._idxStart
# idx_finish = model._idxFinish
# solmat = np.zeros((model._n, model._n))
selected = []
for i in V:
sol = model.cbGetSolution([model._eVars[i, j] for j in V])
selected += [(i, j) for j in V if sol[j] > 0.5]
# solmat[i, :] = sol
if len(selected) <= 1:
return
for k in range(len(selected)):
el = selected[k]
entry = el[0]
if idx_start != entry:
expr1 = quicksum(model._eVars[i, entry] for i in V)
expr2 = quicksum(model._eVars[entry, j] for j in V)
model.cbLazy(expr1, GRB.EQUAL, expr2)
def op_solver(cost, profit=None, cost_max=None, idx_start=None, idx_finish=None, **kwargs):
"""Orienteering problem solver instance
Cost constrained traveling salesman problem solver for a single vehicle using the Gurobi MILP optimiser.
Parameters
----------
cost : ndarray (n, dims)
Cost matrix for traveling from point to point. Here is time (seconds) needed to go from points a to b.
profit : Optional[vector]
Profit vector for profit of visiting each point.
cost_max : Optional[double]
Maximum running time of the mission in seconds.
idx_start : Optional[int]
Optional starting point for the tour. If none is provided the first point of the array is chosen.
idx_finish : Optional[int]
Optional ending point of the tour. If none is provided the last point of the array is chosen.
kwargs : Optional[list]
Optional extra arguments/
Returns
-------
route : list
The calculated route.
profit : double
The profit of the route.
m : object
A gurobi model object.
"""
# Number of points
n = cost.shape[0]
# other params
node_energy = float(kwargs.get('node_energy', 1.0))
# Check for default values
if idx_start is None:
idx_start = 0
if idx_finish is None:
idx_finish = n - 1
if profit is None:
profit = np.ones(n)
if cost_max is None:
cost_max = cost[idx_start, idx_finish]
# Create the vertices set
V = set(range(n))
m = Model()
# Create model variables
e_vars = {}
for i in V:
for j in V:
e_vars[i, j] = m.addVar(vtype=GRB.BINARY, name='e_' + str(i) + '_' + str(j))
m.update()
for i in V:
e_vars[i, i].ub = 0
m.update()
u_vars = {}
for i in V:
u_vars[i] = m.addVar(vtype=GRB.INTEGER, name='u_' + str(i))
m.update()
# Set objective function (0)
expr = 0
for i in V:
for j in V:
if i != idx_start and i != idx_finish:
expr += profit[i] * e_vars[i, j]
m.setObjective(expr, GRB.MAXIMIZE)
m.update()
# Constraints
# Add constraints for the initial and final node (1)
# None enters the starting point
m.addConstr(quicksum(e_vars[j, idx_start] for j in V.difference([idx_start])) == 0, "s_entry")
m.update()
# None exits the finish point
m.addConstr(quicksum(e_vars[idx_finish, j] for j in V.difference([idx_finish])) == 0, "f_exit")
m.update()
# Always exit the starting point
m.addConstr(quicksum(e_vars[idx_start, i] for i in V.difference([idx_start])) == 1, "s_exit")
m.update()
# Always enter the finish point
m.addConstr(quicksum(e_vars[i, idx_finish] for i in V.difference([idx_finish])) == 1, "f_entry")
m.update()
# From all other points someone may exit
for i in V.difference([idx_start, idx_finish]):
m.addConstr(quicksum(e_vars[i, j] for j in V if i != j) <= 1, "v_" + str(i) + "_exit")
m.update()
# To all other points someone may enter
for i in V.difference([idx_start, idx_finish]):
m.addConstr(quicksum(e_vars[j, i] for j in V if i != j) <= 1, "v_" + str(i) + "_entry")
m.update()
# for i in V.difference([idx_start, idx_finish]):
# m.addConstr(quicksum(e_vars[j, i] for j in V if i != j) == quicksum(e_vars[i, j] for j in V if i != j), "v_" + str(i) + "_cardinality")
# m.update()
# Add cost constraints (3)
expr = 0
for i in V:
for j in V:
# add a fixed cost for intermediate nodes (sensing energy)
if i != idx_start and i != idx_finish:
expr += node_energy * e_vars[i, j]
expr += cost[i, j] * e_vars[i, j]
m.addConstr(expr <= cost_max, "max_energy")
m.update()
# Constraint (4)
for i in V:
u_vars[i].lb = 0
u_vars[i].ub = n
m.update()
# Add subtour constraint (5)
for i in V:
for j in V:
m.addConstr(u_vars[i] - u_vars[j] + 1, GRB.LESS_EQUAL, (n - 1)*(1 - e_vars[i, j]),
"sec_" + str(i) + "_" + str(j))
m.update()
m._n = n
m._eVars = e_vars
m._uVars = u_vars
m._idxStart = idx_start
m._idxFinish = idx_finish
m.update()
m.params.OutputFlag = int(kwargs.get('output_flag', 0))
m.params.TimeLimit = float(kwargs.get('time_limit', 60.0))
m.params.MIPGap = float(kwargs.get('mip_gap', 0.0))
m.params.LazyConstraints = 1
m.optimize(_callback)
solution = m.getAttr('X', e_vars)
selected = [(i, j) for i in V for j in V if solution[i, j] > 0.5]
# solmat = np.zeros((n, n))
# for k, v in solution.iteritems():
# solmat[k[0], k[1]] = v
# print("\n")
# print(solmat)
# print(u)
# print(selected)
# print(sum(cost[s[0], s[1]] for s in selected))
route = []
next_city = idx_start
while len(selected) > 0:
for i in range(len(selected)):
if selected[i][0] == next_city:
route.append(next_city)
next_city = selected[i][1]
selected.pop(i)
break
route.append(next_city)
return route, m.objVal, m
def main():
import matplotlib.pyplot as plt
import task_scheduling.utils as tsu
import random
nodes = tsu.generate_nodes(n=100, lb=-100, up=100, dims=2)
cost = tsu.calculate_distances(nodes)
nodes = []
random.seed(42)
nodes.append([0,0])
for i in range(1,6):
for j in range(-2,3):
ni = i
nj = j
# ni = random.uniform(-0.5,0.5) + i
# nj = random.uniform(-0.5,0.5) + j
nodes.append([ni,nj])
nodes.append([6,0])
nodes = np.array(nodes)
cost = tsu.calculate_distances(nodes)
max_cost = [25.5]
for mc in max_cost:
solution, objective, _ = tsu.solve_problem(op_solver, cost, cost_max=mc, output_flag=1, mip_gap=0.0, time_limit=3600)
util = 0
for i in solution:
extras = 0
if i != 0 and i != solution[len(solution)-1]:
for j in range(cost.shape[0]):
if j != i and j not in solution and j != 0 and j != solution[len(solution)-1]:
extras += np.e**(-2*cost[i,j])
util += 1 + extras
print("Utility: {0}".format(util))
fig, ax = tsu.plot_problem(nodes, solution, objective)
plt.show()
if __name__ == '__main__':
main()
|
bsd-3-clause
|
cmap/cmapPy
|
cmapPy/pandasGEXpress/tests/python3_tests/test_write_gct.py
|
1
|
6920
|
import unittest
import logging
import cmapPy.pandasGEXpress.setup_GCToo_logger as setup_logger
import os
import numpy as np
import pandas as pd
import cmapPy.pandasGEXpress.GCToo as GCToo
import cmapPy.pandasGEXpress.parse_gct as pg
import cmapPy.pandasGEXpress.write_gct as wg
FUNCTIONAL_TESTS_PATH = "cmapPy/pandasGEXpress/tests/functional_tests/"
logger = logging.getLogger(setup_logger.LOGGER_NAME)
class TestWriteGct(unittest.TestCase):
@classmethod
def setUpClass(cls):
# Create dfs to be used by tests
cls.data_df = pd.DataFrame(
[[1, 2, 3], [5, 7, np.nan], [13, 17, -19], [0, 23, 29]],
index=pd.Index(["rid1", "rid2", "rid3", "rid4"], name="rid"),
columns=pd.Index(["cid1", "cid2", "cid3"], name="cid"), dtype=np.float32)
cls.row_metadata_df = pd.DataFrame(
[["Analyte 11", 11, "dp52"],
["Analyte 12", 12, "dp52"],
["Analyte 13", 13, "dp53"],
["Analyte 14", 14, "dp54"]],
index=pd.Index(["rid1", "rid2", "rid3", "rid4"], name="rid"),
columns=pd.Index(["pr_analyte_id", "pr_analyte_num", "pr_bset_id"], name="rhd"))
cls.col_metadata_df = pd.DataFrame(
[[8.38, np.nan, "DMSO", "24 h"],
[7.7, np.nan, "DMSO", "24 h"],
[8.18, np.nan, "DMSO", "24 h"]],
index=pd.Index(["cid1", "cid2", "cid3"], name="cid"),
columns=pd.Index(["qc_iqr", "pert_idose", "pert_iname", "pert_itime"], name="chd"))
def test_write(self):
out_name = os.path.join(FUNCTIONAL_TESTS_PATH, "test_write_out.gct")
gctoo = GCToo.GCToo(data_df=self.data_df,
row_metadata_df=self.row_metadata_df,
col_metadata_df=self.col_metadata_df)
wg.write(gctoo, out_name, data_null="NaN",
metadata_null="-666", filler_null="-666")
# Read in the gct and verify that it's the same as gctoo
new_gct = pg.parse(out_name)
pd.testing.assert_frame_equal(new_gct.data_df, gctoo.data_df)
pd.testing.assert_frame_equal(new_gct.row_metadata_df, gctoo.row_metadata_df)
pd.testing.assert_frame_equal(new_gct.col_metadata_df, gctoo.col_metadata_df)
# Also check that missing values were written to the file as expected
in_df = pd.read_csv(out_name, sep="\t", skiprows=2, keep_default_na=False)
self.assertEqual(in_df.iloc[0, 1], "-666")
self.assertEqual(in_df.iloc[5, 6], "NaN")
# Cleanup
os.remove(out_name)
def test_write_version_and_dims(self):
# Write
fname = "test_file.gct"
f = open(fname, "w")
wg.write_version_and_dims("1.3", ["1", "2", "3", "4"], f)
f.close()
# Read and then remove
f = open(fname, "r")
version_string = f.readline().strip()
dims = f.readline().strip().split("\t")
f.close()
os.remove(fname)
# Check that it was written correctly
self.assertEqual(version_string, "#1.3")
self.assertEqual(dims, ["1", "2", "3", "4"])
def test_write_top_half(self):
# Write
fname = "test_write_top_half.tsv"
f = open(fname, "w")
wg.write_top_half(f, self.row_metadata_df, self.col_metadata_df, "-666", "-666")
f.close()
# Compare what was written to what was expected
e_top_half = pd.DataFrame(
[["id", "pr_analyte_id", "pr_analyte_num", "pr_bset_id", "cid1", "cid2", "cid3"],
["qc_iqr", "-666", "-666", "-666", "8.38", "7.7", "8.18"],
["pert_idose", "-666", "-666", "-666", "-666", "-666", "-666"],
["pert_iname", "-666", "-666", "-666", "DMSO", "DMSO", "DMSO"],
["pert_itime", "-666", "-666", "-666", "24 h", "24 h", "24 h"]])
top_half = pd.read_csv(fname, sep="\t", header=None)
pd.testing.assert_frame_equal(top_half, e_top_half)
os.remove(fname)
def test_write_bottom_half(self):
# Write
fname = "test_write_bottom_half.tsv"
f = open(fname, "w")
wg.write_bottom_half(f, self.row_metadata_df, self.data_df, "NaN", "%.1f", "-666")
f.close()
# Compare what was written to what was expected
e_bottom_half = pd.DataFrame(
[["rid1", "Analyte 11", 11, "dp52", 1., 2., 3.],
["rid2", "Analyte 12", 12, "dp52", 5., 7., np.nan],
["rid3", "Analyte 13", 13, "dp53", 13., 17., -19.],
["rid4", "Analyte 14", 14, "dp54", 0., 23., 29.]])
bottom_half = pd.read_csv(fname, sep="\t", header=None)
pd.testing.assert_frame_equal(bottom_half, e_bottom_half)
os.remove(fname)
def test_append_dims_and_file_extension(self):
data_df = pd.DataFrame([[1, 2], [3, 4]])
fname_no_gct = "a/b/file"
fname_gct = "a/b/file.gct"
e_fname = "a/b/file_n2x2.gct"
fname_out = wg.append_dims_and_file_extension(fname_no_gct, data_df)
self.assertEqual(fname_out, e_fname)
fname_out = wg.append_dims_and_file_extension(fname_gct, data_df)
self.assertEqual(fname_out, e_fname)
def test_l1000_functional(self):
l1000_in_path = os.path.join(FUNCTIONAL_TESTS_PATH, "test_l1000.gct")
l1000_out_path = os.path.join(FUNCTIONAL_TESTS_PATH, "test_l1000_writing.gct")
# Read in original gct file
l1000_in_gct = pg.parse(l1000_in_path)
# do write operation
wg.write(l1000_in_gct, l1000_out_path)
# Read in new gct file
l1000_out_gct = pg.parse(l1000_out_path)
pd.testing.assert_frame_equal(l1000_in_gct.data_df, l1000_out_gct.data_df)
pd.testing.assert_frame_equal(l1000_in_gct.row_metadata_df, l1000_out_gct.row_metadata_df)
pd.testing.assert_frame_equal(l1000_in_gct.col_metadata_df, l1000_out_gct.col_metadata_df)
# Clean up
os.remove(l1000_out_path)
def test_p100_functional(self):
p100_in_path = os.path.join(FUNCTIONAL_TESTS_PATH, "test_p100.gct")
p100_out_path = os.path.join(FUNCTIONAL_TESTS_PATH, "test_p100_writing.gct")
# Read in original gct file
p100_in_gct = pg.parse(p100_in_path)
# do write operation - note data_float_format set to None to preserve precision of input file
wg.write(p100_in_gct, p100_out_path, data_float_format=None)
# Read in new gct file
p100_out_gct = pg.parse(p100_out_path)
pd.testing.assert_frame_equal(p100_in_gct.data_df, p100_out_gct.data_df)
pd.testing.assert_frame_equal(p100_in_gct.row_metadata_df, p100_out_gct.row_metadata_df)
pd.testing.assert_frame_equal(p100_in_gct.col_metadata_df, p100_out_gct.col_metadata_df)
# Clean up
os.remove(p100_out_path)
if __name__ == "__main__":
setup_logger.setup(verbose=True)
unittest.main()
|
bsd-3-clause
|
davidbroadwater/nyc-subway-datascience-project
|
project_2/mean_temp_on_weekends/mean_weekend_temp.py
|
1
|
1959
|
import pandas as pd
import pandasql
def avg_weekend_temperature(filename):
'''
This function should run a SQL query on a dataframe of
weather data. The SQL query should return one column and
one row - the average meantempi on days that are a Saturday
or Sunday (i.e., the the average mean temperature on weekends).
The dataframe will be titled 'weather_data' and you can access
the date in the dataframe via the 'date' column.
You'll need to provide the SQL query.
You might also find that interpreting numbers as integers or floats may not
work initially. In order to get around this issue, it may be equal to cast
these numbers as integers. This can be done by writing cast(column as integer).
So for example, if we wanted to cast the maxtempi column as an integer, we would actually
write something like where cast(maxtempi as integer) = 76, as opposed to simply
where maxtempi = 76.
Also, you can convert dates to days of the week via the 'strftime' keyword in SQL.
For example, cast (strftime('%w', date) as integer) will return 0 if the date
is a Sunday or 6 if the date is a Saturday.
You can see the weather data that we are passing in below:
https://www.dropbox.com/s/7sf0yqc9ykpq3w8/weather_underground.csv
'''
weather_data = pd.read_csv(filename)
q = """
select
avg(cast (meantempi as integer))
from
weather_data
where
cast (strftime('%w', date) as integer) == 6 or cast (strftime('%w', date) as integer) == 7;
"""
#Execute your SQL command against the pandas frame
mean_temp_weekends = pandasql.sqldf(q.lower(), locals())
return mean_temp_weekends
if __name__ == "__main__":
input_filename = "weather_underground.csv"
output_filename = "output.csv"
student_df = avg_min_temperature(input_filename)
student_df.to_csv(output_filename)
|
mit
|
nakul02/incubator-systemml
|
src/main/python/systemml/mllearn/estimators.py
|
4
|
41695
|
#-------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#-------------------------------------------------------------
__all__ = ['LinearRegression', 'LogisticRegression', 'SVM', 'NaiveBayes', 'Caffe2DML', 'Keras2DML']
import numpy as np
from pyspark.ml import Estimator
from pyspark.ml.feature import VectorAssembler
from pyspark.sql import DataFrame
import sklearn as sk
from sklearn.metrics import accuracy_score, r2_score
from py4j.protocol import Py4JError
import traceback
from sklearn.preprocessing import LabelEncoder
import threading
import time
import math
from ..converters import *
from ..classloader import *
def assemble(sparkSession, pdf, inputCols, outputCol):
tmpDF = sparkSession.createDataFrame(pdf, list(pdf.columns))
assembler = VectorAssembler(inputCols=list(inputCols), outputCol=outputCol)
return assembler.transform(tmpDF)
class BaseSystemMLEstimator(Estimator):
features_col = 'features'
label_col = 'label'
def set_features_col(self, colName):
"""
Sets the default column name for features of PySpark DataFrame.
Parameters
----------
colName: column name for features (default: 'features')
"""
self.features_col = colName
def set_label_col(self, colName):
"""
Sets the default column name for features of PySpark DataFrame.
Parameters
----------
colName: column name for features (default: 'label')
"""
self.label_col = colName
def setGPU(self, enable):
"""
Whether or not to enable GPU.
Parameters
----------
enable: boolean
"""
self.estimator.setGPU(enable)
return self
def setForceGPU(self, enable):
"""
Whether or not to force the usage of GPU operators.
Parameters
----------
enable: boolean
"""
self.estimator.setForceGPU(enable)
return self
def setExplain(self, explain):
"""
Explanation about the program. Mainly intended for developers.
Parameters
----------
explain: boolean
"""
self.estimator.setExplain(explain)
return self
def setExplainLevel(self, explainLevel):
"""
Set explain level. Mainly intended for developers.
Parameters
----------
explainLevel: string
Can be one of "hops", "runtime", "recompile_hops", "recompile_runtime"
or in the above in upper case.
"""
self.estimator.setExplainLevel(explainLevel)
return self
def setStatistics(self, statistics):
"""
Whether or not to output statistics (such as execution time, elapsed time)
about script executions.
Parameters
----------
statistics: boolean
"""
self.estimator.setStatistics(statistics)
return self
def setStatisticsMaxHeavyHitters(self, maxHeavyHitters):
"""
The maximum number of heavy hitters that are printed as part of the statistics.
Parameters
----------
maxHeavyHitters: int
"""
self.estimator.setStatisticsMaxHeavyHitters(maxHeavyHitters)
return self
def setConfigProperty(self, propertyName, propertyValue):
"""
Set configuration property, such as setConfigProperty("sysml.localtmpdir", "/tmp/systemml").
Parameters
----------
propertyName: String
propertyValue: String
"""
self.estimator.setConfigProperty(propertyName, propertyValue)
return self
def _fit_df(self):
global default_jvm_stdout, default_jvm_stdout_parallel_flush
try:
if default_jvm_stdout:
with jvm_stdout(parallel_flush=default_jvm_stdout_parallel_flush):
self.model = self.estimator.fit(self.X._jdf)
else:
self.model = self.estimator.fit(self.X._jdf)
except Py4JError:
traceback.print_exc()
def fit_df(self, X):
self.X = X
self._fit_df()
self.X = None
return self
def _fit_numpy(self):
global default_jvm_stdout, default_jvm_stdout_parallel_flush
try:
if type(self.y) == np.ndarray and len(self.y.shape) == 1:
# Since we know that mllearn always needs a column vector
self.y = np.matrix(self.y).T
y_mb = convertToMatrixBlock(self.sc, self.y)
if default_jvm_stdout:
with jvm_stdout(parallel_flush=default_jvm_stdout_parallel_flush):
self.model = self.estimator.fit(convertToMatrixBlock(self.sc, self.X), y_mb)
else:
self.model = self.estimator.fit(convertToMatrixBlock(self.sc, self.X), y_mb)
except Py4JError:
traceback.print_exc()
def fit_numpy(self, X, y):
self.X = X
self.y = y
self._fit_numpy()
self.X = None
self.y = None
return self
def fit_file(self, X_file, y_file):
global default_jvm_stdout, default_jvm_stdout_parallel_flush
try:
if default_jvm_stdout:
with jvm_stdout(parallel_flush=default_jvm_stdout_parallel_flush):
self.model = self.estimator.fit(X_file, y_file)
else:
self.model = self.estimator.fit(X_file, y_file)
except Py4JError:
traceback.print_exc()
return self
# Returns a model after calling fit(df) on Estimator object on JVM
def _fit(self, X):
"""
Invokes the fit method on Estimator object on JVM if X is PySpark DataFrame
Parameters
----------
X: PySpark DataFrame that contain the columns features_col (default: 'features') and label_col (default: 'label')
"""
if hasattr(X, '_jdf') and self.features_col in X.columns and self.label_col in X.columns:
return self.fit_df(X)
else:
raise Exception('Incorrect usage: Expected dataframe as input with features/label as columns')
def fit(self, X, y=None, params=None):
"""
Invokes the fit method on Estimator object on JVM if X and y are on of the supported data types
Parameters
----------
X: NumPy ndarray, Pandas DataFrame, scipy sparse matrix, Spark DataFrame, file path
y: NumPy ndarray, Pandas DataFrame, scipy sparse matrix, file path
"""
if y is None:
return self._fit(X)
elif isinstance(X, str) and isinstance(y, str):
return self.fit_file(X, y)
elif isinstance(X, SUPPORTED_TYPES) and isinstance(y, SUPPORTED_TYPES):
# Donot encode if y is a numpy matrix => useful for segmentation
skipEncodingY = len(y.shape) == 2 and y.shape[0] != 1 and y.shape[1] != 1
y = y if skipEncodingY else self.encode(y)
if self.transferUsingDF:
pdfX = convertToPandasDF(X)
pdfY = convertToPandasDF(y)
if getNumCols(pdfY) != 1 and not skipEncodingY:
raise Exception('y should be a column vector')
if pdfX.shape[0] != pdfY.shape[0]:
raise Exception('Number of rows of X and y should match')
colNames = pdfX.columns
pdfX[self.label_col] = pdfY[pdfY.columns[0]]
df = assemble(self.sparkSession, pdfX, colNames, self.features_col).select(self.features_col, self.label_col)
self.fit_df(df)
else:
numColsy = getNumCols(y)
if numColsy != 1 and not skipEncodingY:
raise Exception('Expected y to be a column vector')
self.fit_numpy(X, y)
if self.setOutputRawPredictionsToFalse:
self.model.setOutputRawPredictions(False)
return self
else:
raise Exception('Unsupported input type')
def transform(self, X):
return self.predict(X)
def _convertPythonXToJavaObject(self, X):
"""
Converts the input python object X to a java-side object (either MatrixBlock or Java DataFrame)
Parameters
----------
X: NumPy ndarray, Pandas DataFrame, scipy sparse matrix or PySpark DataFrame
"""
if isinstance(X, SUPPORTED_TYPES) and self.transferUsingDF:
pdfX = convertToPandasDF(X)
df = assemble(self.sparkSession, pdfX, pdfX.columns, self.features_col).select(self.features_col)
return df._jdf
elif isinstance(X, SUPPORTED_TYPES):
return convertToMatrixBlock(self.sc, X)
elif hasattr(X, '_jdf') and self.features_col in X.columns:
# No need to assemble as input DF is likely coming via MLPipeline
return X._jdf
elif hasattr(X, '_jdf'):
assembler = VectorAssembler(inputCols=X.columns, outputCol=self.features_col)
df = assembler.transform(X)
return df._jdf
else:
raise Exception('Unsupported input type')
def _convertJavaOutputToPythonObject(self, X, output):
"""
Converts the a java-side object output (either MatrixBlock or Java DataFrame) to a python object (based on the type of X).
Parameters
----------
X: NumPy ndarray, Pandas DataFrame, scipy sparse matrix or PySpark DataFrame
output: a java-side object (either MatrixBlock or Java DataFrame)
"""
if isinstance(X, SUPPORTED_TYPES) and self.transferUsingDF:
retDF = DataFrame(output, self.sparkSession)
retPDF = retDF.sort('__INDEX').select('prediction').toPandas()
return retPDF.as_matrix().flatten() if isinstance(X, np.ndarray) else retPDF
elif isinstance(X, SUPPORTED_TYPES):
return convertToNumPyArr(self.sc, output)
elif hasattr(X, '_jdf'):
retDF = DataFrame(output, self.sparkSession)
# Return DF
return retDF.sort('__INDEX')
else:
raise Exception('Unsupported input type')
def predict_proba(self, X):
"""
Invokes the transform_probability method on Estimator object on JVM if X and y are on of the supported data types
Return predicted class probabilities for X.
Parameters
----------
X: NumPy ndarray, Pandas DataFrame, scipy sparse matrix or PySpark DataFrame
"""
global default_jvm_stdout, default_jvm_stdout_parallel_flush
if hasattr(X, '_jdf'):
return self.predict(X)
elif self.transferUsingDF:
raise ValueError('The parameter transferUsingDF is not valid for the method predict_proba')
try:
if self.estimator is not None and self.model is not None:
self.estimator.copyProperties(self.model)
except AttributeError:
pass
try:
if isinstance(X, str):
return self.model.transform_probability(X)
jX = self._convertPythonXToJavaObject(X)
if default_jvm_stdout:
with jvm_stdout(parallel_flush=default_jvm_stdout_parallel_flush):
return self._convertJavaOutputToPythonObject(X, self.model.transform_probability(jX))
else:
return self._convertJavaOutputToPythonObject(X, self.model.transform_probability(jX))
except Py4JError:
traceback.print_exc()
# Returns either a DataFrame or MatrixBlock after calling transform(X:MatrixBlock, y:MatrixBlock) on Model object on JVM
def predict(self, X):
"""
Invokes the transform method on Estimator object on JVM if X and y are on of the supported data types
Parameters
----------
X: NumPy ndarray, Pandas DataFrame, scipy sparse matrix or PySpark DataFrame or file path
"""
global default_jvm_stdout, default_jvm_stdout_parallel_flush
try:
if self.estimator is not None and self.model is not None:
self.estimator.copyProperties(self.model)
except AttributeError:
pass
try:
if isinstance(X, str):
return self.model.transform(X)
jX = self._convertPythonXToJavaObject(X)
if default_jvm_stdout:
with jvm_stdout(parallel_flush=default_jvm_stdout_parallel_flush):
ret = self._convertJavaOutputToPythonObject(X, self.model.transform(jX))
else:
ret = self._convertJavaOutputToPythonObject(X, self.model.transform(jX))
return self.decode(ret) if isinstance(X, SUPPORTED_TYPES) else ret
except Py4JError:
traceback.print_exc()
class BaseSystemMLClassifier(BaseSystemMLEstimator):
def encode(self, y):
self.le = LabelEncoder()
self.le.fit(y)
return self.le.transform(y) + 1
def decode(self, y):
if not hasattr(self, 'le'):
self.le = None
if not hasattr(self, 'labelMap'):
self.labelMap = None
if self.le is not None:
return self.le.inverse_transform(np.asarray(y - 1, dtype=int))
elif self.labelMap is not None:
return [ self.labelMap[int(i)] for i in y ]
else:
return y
def predict(self, X):
predictions = super(BaseSystemMLClassifier, self).predict(X)
from pyspark.sql.dataframe import DataFrame as df
if type(predictions) == df:
return predictions
else:
try:
return np.asarray(predictions, dtype='double')
except ValueError:
print(type(predictions))
return np.asarray(predictions, dtype='str')
def score(self, X, y):
"""
Scores the predicted value with ground truth 'y'
Parameters
----------
X: NumPy ndarray, Pandas DataFrame, scipy sparse matrix
y: NumPy ndarray, Pandas DataFrame, scipy sparse matrix
"""
predictions = np.asarray(self.predict(X))
if np.issubdtype(predictions.dtype.type, np.number):
return accuracy_score(y, predictions)
else:
return accuracy_score(np.asarray(y, dtype='str'), np.asarray(predictions, dtype='str'))
def loadLabels(self, file_path):
createJavaObject(self.sc, 'dummy')
utilObj = self.sc._jvm.org.apache.sysml.api.ml.Utils()
if utilObj.checkIfFileExists(file_path):
df = self.sparkSession.read.csv(file_path, header=False).toPandas()
keys = np.asarray(df._c0, dtype='int')
values = np.asarray(df._c1, dtype='str')
self.labelMap = {}
for i in range(len(keys)):
self.labelMap[int(keys[i])] = values[i]
# self.encode(classes) # Giving incorrect results
def load(self, weights, sep='/', eager=False):
"""
Load a pretrained model.
Parameters
----------
weights: directory whether learned weights are stored
sep: seperator to use (default: '/')
eager: load the model eagerly. This flag should be only used for debugging purposes. (default: False)
"""
self.weights = weights
global default_jvm_stdout, default_jvm_stdout_parallel_flush
if default_jvm_stdout:
with jvm_stdout(parallel_flush=default_jvm_stdout_parallel_flush):
self.model.load(self.sc._jsc, weights, sep, eager)
else:
self.model.load(self.sc._jsc, weights, sep, eager)
self.loadLabels(weights + '/labels.txt')
def save(self, outputDir, format='binary', sep='/'):
"""
Save a trained model.
Parameters
----------
outputDir: Directory to save the model to
format: optional format (default: 'binary')
sep: seperator to use (default: '/')
"""
global default_jvm_stdout, default_jvm_stdout_parallel_flush
if self.model != None:
if default_jvm_stdout:
with jvm_stdout(parallel_flush=default_jvm_stdout_parallel_flush):
self.model.save(self.sc._jsc, outputDir, format, sep)
else:
self.model.save(self.sc._jsc, outputDir, format, sep)
labelMapping = None
if hasattr(self, 'le') and self.le is not None:
labelMapping = dict(enumerate(list(self.le.classes_), 1))
elif hasattr(self, 'labelMap') and self.labelMap is not None:
labelMapping = self.labelMap
if labelMapping is not None:
lStr = [ [ int(k), str(labelMapping[k]) ] for k in labelMapping ]
df = self.sparkSession.createDataFrame(lStr)
df.write.csv(outputDir + sep + 'labels.txt', mode='overwrite', header=False)
else:
raise Exception('Cannot save as you need to train the model first using fit')
return self
class BaseSystemMLRegressor(BaseSystemMLEstimator):
def encode(self, y):
return y
def decode(self, y):
return y
def score(self, X, y):
"""
Scores the predicted value with ground truth 'y'
Parameters
----------
X: NumPy ndarray, Pandas DataFrame, scipy sparse matrix
y: NumPy ndarray, Pandas DataFrame, scipy sparse matrix
"""
return r2_score(y, self.predict(X), multioutput='variance_weighted')
def load(self, weights=None, sep='/', eager=False):
"""
Load a pretrained model.
Parameters
----------
weights: directory whether learned weights are stored (default: None)
sep: seperator to use (default: '/')
eager: load the model eagerly (default: False)
"""
self.weights = weights
global default_jvm_stdout, default_jvm_stdout_parallel_flush
if default_jvm_stdout:
with jvm_stdout(parallel_flush=default_jvm_stdout_parallel_flush):
self.model.load(self.sc._jsc, weights, sep, eager)
else:
self.model.load(self.sc._jsc, weights, sep, eager)
def save(self, outputDir, format='binary', sep='/'):
"""
Save a trained model.
Parameters
----------
outputDir: Directory to save the model to
format: optional format (default: 'binary')
sep: seperator to use (default: '/')
"""
global default_jvm_stdout, default_jvm_stdout_parallel_flush
if self.model != None:
if default_jvm_stdout:
with jvm_stdout(parallel_flush=default_jvm_stdout_parallel_flush):
self.model.save(outputDir, format, sep)
else:
self.model.save(outputDir, format, sep)
else:
raise Exception('Cannot save as you need to train the model first using fit')
return self
class LogisticRegression(BaseSystemMLClassifier):
"""
Performs both binomial and multinomial logistic regression.
Examples
--------
Scikit-learn way
>>> from sklearn import datasets, neighbors
>>> from systemml.mllearn import LogisticRegression
>>> from pyspark.sql import SparkSession
>>> sparkSession = SparkSession.builder.getOrCreate()
>>> digits = datasets.load_digits()
>>> X_digits = digits.data
>>> y_digits = digits.target + 1
>>> n_samples = len(X_digits)
>>> X_train = X_digits[:.9 * n_samples]
>>> y_train = y_digits[:.9 * n_samples]
>>> X_test = X_digits[.9 * n_samples:]
>>> y_test = y_digits[.9 * n_samples:]
>>> logistic = LogisticRegression(sparkSession)
>>> print('LogisticRegression score: %f' % logistic.fit(X_train, y_train).score(X_test, y_test))
MLPipeline way
>>> from pyspark.ml import Pipeline
>>> from systemml.mllearn import LogisticRegression
>>> from pyspark.ml.feature import HashingTF, Tokenizer
>>> from pyspark.sql import SparkSession
>>> sparkSession = SparkSession.builder.getOrCreate()
>>> training = sparkSession.createDataFrame([
>>> (0L, "a b c d e spark", 1.0),
>>> (1L, "b d", 2.0),
>>> (2L, "spark f g h", 1.0),
>>> (3L, "hadoop mapreduce", 2.0),
>>> (4L, "b spark who", 1.0),
>>> (5L, "g d a y", 2.0),
>>> (6L, "spark fly", 1.0),
>>> (7L, "was mapreduce", 2.0),
>>> (8L, "e spark program", 1.0),
>>> (9L, "a e c l", 2.0),
>>> (10L, "spark compile", 1.0),
>>> (11L, "hadoop software", 2.0)
>>> ], ["id", "text", "label"])
>>> tokenizer = Tokenizer(inputCol="text", outputCol="words")
>>> hashingTF = HashingTF(inputCol="words", outputCol="features", numFeatures=20)
>>> lr = LogisticRegression(sparkSession)
>>> pipeline = Pipeline(stages=[tokenizer, hashingTF, lr])
>>> model = pipeline.fit(training)
>>> test = sparkSession.createDataFrame([
>>> (12L, "spark i j k"),
>>> (13L, "l m n"),
>>> (14L, "mapreduce spark"),
>>> (15L, "apache hadoop")], ["id", "text"])
>>> prediction = model.transform(test)
>>> prediction.show()
"""
def __init__(self, sparkSession, penalty='l2', fit_intercept=True, normalize=False, max_iter=100, max_inner_iter=0, tol=0.000001, C=1.0, solver='newton-cg', transferUsingDF=False):
"""
Performs both binomial and multinomial logistic regression.
Parameters
----------
sparkSession: PySpark SparkSession
penalty: Only 'l2' supported
fit_intercept: Specifies whether to add intercept or not (default: True)
normalize: This parameter is ignored when fit_intercept is set to False. (default: False)
max_iter: Maximum number of outer (Fisher scoring) iterations (default: 100)
max_inner_iter: Maximum number of inner (conjugate gradient) iterations, or 0 if no maximum limit provided (default: 0)
tol: Tolerance used in the convergence criterion (default: 0.000001)
C: 1/regularization parameter (default: 1.0 similar to scikit-learn. To disable regularization, please use float("inf"))
solver: Only 'newton-cg' solver supported
"""
self.sparkSession = sparkSession
self.sc = sparkSession._sc
createJavaObject(self.sc, 'dummy')
self.uid = "logReg"
self.estimator = self.sc._jvm.org.apache.sysml.api.ml.LogisticRegression(self.uid, self.sc._jsc.sc())
self.estimator.setMaxOuterIter(max_iter)
self.estimator.setMaxInnerIter(max_inner_iter)
reg = 0.0 if C == float("inf") else 1.0 / C
icpt = 2 if fit_intercept == True and normalize == True else int(fit_intercept)
self.estimator.setRegParam(reg)
self.estimator.setTol(tol)
self.estimator.setIcpt(icpt)
self.transferUsingDF = transferUsingDF
self.setOutputRawPredictionsToFalse = True
self.model = self.sc._jvm.org.apache.sysml.api.ml.LogisticRegressionModel(self.estimator)
if penalty != 'l2':
raise Exception('Only l2 penalty is supported')
if solver != 'newton-cg':
raise Exception('Only newton-cg solver supported')
class LinearRegression(BaseSystemMLRegressor):
"""
Performs linear regression to model the relationship between one numerical response variable and one or more explanatory (feature) variables.
Examples
--------
>>> import numpy as np
>>> from sklearn import datasets
>>> from systemml.mllearn import LinearRegression
>>> from pyspark.sql import SparkSession
>>> # Load the diabetes dataset
>>> diabetes = datasets.load_diabetes()
>>> # Use only one feature
>>> diabetes_X = diabetes.data[:, np.newaxis, 2]
>>> # Split the data into training/testing sets
>>> diabetes_X_train = diabetes_X[:-20]
>>> diabetes_X_test = diabetes_X[-20:]
>>> # Split the targets into training/testing sets
>>> diabetes_y_train = diabetes.target[:-20]
>>> diabetes_y_test = diabetes.target[-20:]
>>> # Create linear regression object
>>> regr = LinearRegression(sparkSession, solver='newton-cg')
>>> # Train the model using the training sets
>>> regr.fit(diabetes_X_train, diabetes_y_train)
>>> # The mean square error
>>> print("Residual sum of squares: %.2f" % np.mean((regr.predict(diabetes_X_test) - diabetes_y_test) ** 2))
"""
def __init__(self, sparkSession, fit_intercept=True, normalize=False, max_iter=100, tol=0.000001, C=float("inf"), solver='newton-cg', transferUsingDF=False):
"""
Performs linear regression to model the relationship between one numerical response variable and one or more explanatory (feature) variables.
Parameters
----------
sparkSession: PySpark SparkSession
fit_intercept: Specifies whether to add intercept or not (default: True)
normalize: If True, the regressors X will be normalized before regression. This parameter is ignored when fit_intercept is set to False. (default: False)
max_iter: Maximum number of conjugate gradient iterations, or 0 if no maximum limit provided (default: 100)
tol: Tolerance used in the convergence criterion (default: 0.000001)
C: 1/regularization parameter (default: float("inf") as scikit learn doesnot support regularization by default)
solver: Supports either 'newton-cg' or 'direct-solve' (default: 'newton-cg').
Depending on the size and the sparsity of the feature matrix, one or the other solver may be more efficient.
'direct-solve' solver is more efficient when the number of features is relatively small (m < 1000) and
input matrix X is either tall or fairly dense; otherwise 'newton-cg' solver is more efficient.
"""
self.sparkSession = sparkSession
self.sc = sparkSession._sc
createJavaObject(self.sc, 'dummy')
self.uid = "lr"
if solver == 'newton-cg' or solver == 'direct-solve':
self.estimator = self.sc._jvm.org.apache.sysml.api.ml.LinearRegression(self.uid, self.sc._jsc.sc(), solver)
else:
raise Exception('Only newton-cg solver supported')
self.estimator.setMaxIter(max_iter)
reg = 0.0 if C == float("inf") else 1.0 / C
icpt = 2 if fit_intercept == True and normalize == True else int(fit_intercept)
self.estimator.setRegParam(reg)
self.estimator.setTol(tol)
self.estimator.setIcpt(icpt)
self.transferUsingDF = transferUsingDF
self.setOutputRawPredictionsToFalse = False
self.model = self.sc._jvm.org.apache.sysml.api.ml.LinearRegressionModel(self.estimator)
class SVM(BaseSystemMLClassifier):
"""
Performs both binary-class and multiclass SVM (Support Vector Machines).
Examples
--------
>>> from sklearn import datasets, neighbors
>>> from systemml.mllearn import SVM
>>> from pyspark.sql import SparkSession
>>> sparkSession = SparkSession.builder.getOrCreate()
>>> digits = datasets.load_digits()
>>> X_digits = digits.data
>>> y_digits = digits.target
>>> n_samples = len(X_digits)
>>> X_train = X_digits[:.9 * n_samples]
>>> y_train = y_digits[:.9 * n_samples]
>>> X_test = X_digits[.9 * n_samples:]
>>> y_test = y_digits[.9 * n_samples:]
>>> svm = SVM(sparkSession, is_multi_class=True)
>>> print('LogisticRegression score: %f' % svm.fit(X_train, y_train).score(X_test, y_test))
"""
def __init__(self, sparkSession, fit_intercept=True, normalize=False, max_iter=100, tol=0.000001, C=1.0, is_multi_class=False, transferUsingDF=False):
"""
Performs both binary-class and multiclass SVM (Support Vector Machines).
Parameters
----------
sparkSession: PySpark SparkSession
fit_intercept: Specifies whether to add intercept or not (default: True)
normalize: This parameter is ignored when fit_intercept is set to False. (default: False)
max_iter: Maximum number iterations (default: 100)
tol: Tolerance used in the convergence criterion (default: 0.000001)
C: 1/regularization parameter (default: 1.0 similar to scikit-learn. To disable regularization, please use float("inf"))
is_multi_class: Specifies whether to use binary-class SVM or multi-class SVM algorithm (default: False)
"""
self.sparkSession = sparkSession
self.sc = sparkSession._sc
self.uid = "svm"
createJavaObject(self.sc, 'dummy')
self.is_multi_class = is_multi_class
self.estimator = self.sc._jvm.org.apache.sysml.api.ml.SVM(self.uid, self.sc._jsc.sc(), is_multi_class)
self.estimator.setMaxIter(max_iter)
if C <= 0:
raise Exception('C has to be positive')
reg = 0.0 if C == float("inf") else 1.0 / C
icpt = 2 if fit_intercept == True and normalize == True else int(fit_intercept)
self.estimator.setRegParam(reg)
self.estimator.setTol(tol)
self.estimator.setIcpt(icpt)
self.transferUsingDF = transferUsingDF
self.setOutputRawPredictionsToFalse = False
self.model = self.sc._jvm.org.apache.sysml.api.ml.SVMModel(self.estimator, self.is_multi_class)
class NaiveBayes(BaseSystemMLClassifier):
"""
Performs Naive Bayes.
Examples
--------
>>> from sklearn.datasets import fetch_20newsgroups
>>> from sklearn.feature_extraction.text import TfidfVectorizer
>>> from systemml.mllearn import NaiveBayes
>>> from sklearn import metrics
>>> from pyspark.sql import SparkSession
>>> sparkSession = SparkSession.builder.getOrCreate(sc)
>>> categories = ['alt.atheism', 'talk.religion.misc', 'comp.graphics', 'sci.space']
>>> newsgroups_train = fetch_20newsgroups(subset='train', categories=categories)
>>> newsgroups_test = fetch_20newsgroups(subset='test', categories=categories)
>>> vectorizer = TfidfVectorizer()
>>> # Both vectors and vectors_test are SciPy CSR matrix
>>> vectors = vectorizer.fit_transform(newsgroups_train.data)
>>> vectors_test = vectorizer.transform(newsgroups_test.data)
>>> nb = NaiveBayes(sparkSession)
>>> nb.fit(vectors, newsgroups_train.target)
>>> pred = nb.predict(vectors_test)
>>> metrics.f1_score(newsgroups_test.target, pred, average='weighted')
"""
def __init__(self, sparkSession, laplace=1.0, transferUsingDF=False):
"""
Performs Naive Bayes.
Parameters
----------
sparkSession: PySpark SparkSession
laplace: Laplace smoothing specified by the user to avoid creation of 0 probabilities (default: 1.0)
"""
self.sparkSession = sparkSession
self.sc = sparkSession._sc
self.uid = "nb"
createJavaObject(self.sc, 'dummy')
self.estimator = self.sc._jvm.org.apache.sysml.api.ml.NaiveBayes(self.uid, self.sc._jsc.sc())
self.estimator.setLaplace(laplace)
self.transferUsingDF = transferUsingDF
self.setOutputRawPredictionsToFalse = False
self.model = self.sc._jvm.org.apache.sysml.api.ml.NaiveBayesModel(self.estimator)
class Caffe2DML(BaseSystemMLClassifier):
"""
Performs training/prediction for a given caffe network.
Examples
--------
>>> from systemml.mllearn import Caffe2DML
>>> from mlxtend.data import mnist_data
>>> import numpy as np
>>> from sklearn.utils import shuffle
>>> X, y = mnist_data()
>>> X, y = shuffle(X, y)
>>> imgShape = (1, 28, 28)
>>> import urllib
>>> urllib.urlretrieve('https://raw.githubusercontent.com/niketanpansare/model_zoo/master/caffe/vision/lenet/mnist/lenet.proto', 'lenet.proto')
>>> urllib.urlretrieve('https://raw.githubusercontent.com/niketanpansare/model_zoo/master/caffe/vision/lenet/mnist/lenet_solver.proto', 'lenet_solver.proto')
>>> caffe2DML = Caffe2DML(spark, 'lenet_solver.proto').set(max_iter=500)
>>> caffe2DML.fit(X, y)
"""
def __init__(self, sparkSession, solver, input_shape, transferUsingDF=False):
"""
Performs training/prediction for a given caffe network.
Parameters
----------
sparkSession: PySpark SparkSession
solver: caffe solver file path
input_shape: 3-element list (number of channels, input height, input width)
transferUsingDF: whether to pass the input dataset via PySpark DataFrame (default: False)
"""
self.sparkSession = sparkSession
self.sc = sparkSession._sc
createJavaObject(self.sc, 'dummy')
self.uid = "Caffe2DML"
self.model = None
if len(input_shape) != 3:
raise ValueError('Expected input_shape as list of 3 element')
solver = self.sc._jvm.org.apache.sysml.api.dl.Utils.readCaffeSolver(solver)
self.estimator = self.sc._jvm.org.apache.sysml.api.dl.Caffe2DML(self.sc._jsc.sc(), solver, str(input_shape[0]), str(input_shape[1]), str(input_shape[2]))
self.transferUsingDF = transferUsingDF
self.setOutputRawPredictionsToFalse = False
def load(self, weights=None, sep='/', ignore_weights=None, eager=False):
"""
Load a pretrained model.
Parameters
----------
weights: directory whether learned weights are stored (default: None)
sep: seperator to use (default: '/')
ignore_weights: names of layers to not read from the weights directory (list of string, default:None)
eager: load the model eagerly (default: False)
"""
global default_jvm_stdout, default_jvm_stdout_parallel_flush
self.weights = weights
self.estimator.setInput("$weights", str(weights))
self.model = self.sc._jvm.org.apache.sysml.api.dl.Caffe2DMLModel(self.estimator)
if default_jvm_stdout:
with jvm_stdout(parallel_flush=default_jvm_stdout_parallel_flush):
self.model.load(self.sc._jsc, weights, sep, eager)
else:
self.model.load(self.sc._jsc, weights, sep, eager)
self.loadLabels(weights + '/labels.txt')
if ignore_weights is not None:
self.estimator.setWeightsToIgnore(ignore_weights)
def set(self, debug=None, train_algo=None, test_algo=None, parallel_batches=None, output_activations=None, perform_one_hot_encoding=None, parfor_parameters=None):
"""
Set input to Caffe2DML
Parameters
----------
debug: to add debugging DML code such as classification report, print DML script, etc (default: False)
train_algo: can be minibatch, batch, allreduce_parallel_batches or allreduce (default: minibatch)
test_algo: can be minibatch, batch, allreduce_parallel_batches or allreduce (default: minibatch)
parallel_batches: number of parallel batches
output_activations: (developer flag) directory to output activations of each layer as csv while prediction. To be used only in batch mode (default: None)
perform_one_hot_encoding: should perform one-hot encoding in DML using table function (default: False)
parfor_parameters: dictionary for parfor parameters when using allreduce-style algorithms (default: "")
"""
if debug is not None: self.estimator.setInput("$debug", str(debug).upper())
if train_algo is not None: self.estimator.setInput("$train_algo", str(train_algo).lower())
if test_algo is not None: self.estimator.setInput("$test_algo", str(test_algo).lower())
if parallel_batches is not None: self.estimator.setInput("$parallel_batches", str(parallel_batches))
if output_activations is not None: self.estimator.setInput("$output_activations", str(output_activations))
if perform_one_hot_encoding is not None: self.estimator.setInput("$perform_one_hot_encoding", str(perform_one_hot_encoding).lower())
if parfor_parameters is not None:
if isinstance(parfor_parameters, dict):
# Convert dictionary to comma-separated list
parfor_parameters = ''.join([ ', ' + str(k) + '=' + str(v) for k, v in parfor_parameters.items()]) if len(parfor_parameters) > 0 else ''
self.estimator.setInput("$parfor_parameters", parfor_parameters)
else:
raise TypeError("parfor_parameters should be a dictionary")
return self
def summary(self):
"""
Print the summary of the network
"""
import pyspark
global default_jvm_stdout, default_jvm_stdout_parallel_flush
if type(self.sparkSession) == pyspark.sql.session.SparkSession:
if default_jvm_stdout:
with jvm_stdout(parallel_flush=default_jvm_stdout_parallel_flush):
self.estimator.summary(self.sparkSession._jsparkSession)
else:
self.estimator.summary(self.sparkSession._jsparkSession)
else:
raise TypeError('Please use spark session of type pyspark.sql.session.SparkSession in the constructor')
class Keras2DML(Caffe2DML):
"""
Peforms training/prediction for a given keras model.
"""
def __init__(self, sparkSession, keras_model, input_shape, transferUsingDF=False, load_keras_weights=True, weights=None, labels=None, batch_size=64, max_iter=2000, test_iter=10, test_interval=500, display=100, lr_policy="step", weight_decay=5e-4, regularization_type="L2"):
"""
Performs training/prediction for a given keras model.
Parameters
----------
sparkSession: PySpark SparkSession
keras_model: keras model
input_shape: 3-element list (number of channels, input height, input width)
transferUsingDF: whether to pass the input dataset via PySpark DataFrame (default: False)
load_keras_weights: whether to load weights from the keras_model. If False, the weights will be initialized to random value using NN libraries' init method (default: True)
weights: directory whether learned weights are stored (default: None)
labels: file containing mapping between index and string labels (default: None)
batch_size: size of the input batch (default: 64)
max_iter: maximum number of iterations (default: 1)
test_iter: test_iter for caffe solver (default: 10)
test_interval: test_interval for caffe solver (default: 500)
display: display for caffe solver (default: 100)
lr_policy: learning rate policy for caffe solver (default: "step")
weight_decay: regularation strength (default: 5e-4)
regularization_type: regularization type (default: "L2")
"""
from .keras2caffe import *
import tempfile
if type(keras_model) == keras.models.Sequential:
# Convert the sequential model to functional model
if keras_model.model is None:
keras_model.build()
keras_model = keras_model.model
self.name = keras_model.name
createJavaObject(sparkSession._sc, 'dummy')
if not hasattr(keras_model, 'optimizer'):
keras_model.compile(loss='categorical_crossentropy', optimizer=keras.optimizers.SGD(lr=0.01, momentum=0.95, decay=5e-4, nesterov=True))
convertKerasToCaffeNetwork(keras_model, self.name + ".proto", int(batch_size))
convertKerasToCaffeSolver(keras_model, self.name + ".proto", self.name + "_solver.proto", int(max_iter), int(test_iter), int(test_interval), int(display), lr_policy, weight_decay, regularization_type)
self.weights = tempfile.mkdtemp() if weights is None else weights
if load_keras_weights:
convertKerasToSystemMLModel(sparkSession, keras_model, self.weights)
if labels is not None and (labels.startswith('https:') or labels.startswith('http:')):
import urllib
urllib.urlretrieve(labels, os.path.join(weights, 'labels.txt'))
elif labels is not None:
from shutil import copyfile
copyfile(labels, os.path.join(weights, 'labels.txt'))
super(Keras2DML,self).__init__(sparkSession, self.name + "_solver.proto", input_shape, transferUsingDF)
if load_keras_weights:
self.load(self.weights)
def close(self):
import shutil
shutil.rmtree(weights)
|
apache-2.0
|
ManuelMBaumann/freqdom_compare
|
python_code/plot_misc.py
|
1
|
3324
|
import scipy.sparse as sparse
import matplotlib.pyplot as plt
import scipy.io as io
from math import sqrt, atan, cos, sin, pi, atan2
import numpy as np
from nutils import *
cc = list('gbcmy')
def opt_tau_anal(e,w,W):
r = sqrt(w*W*(1.0+e**2))
th = atan(-sqrt( (e**2*(W+w)**2+(W-w)**2) /(4.0*w*W) ))
return r*cos(th) + 1j*(r*sin(th))
def plot_circles_on_circle(A, B, om, tau, dd, plot_spec=False, rot=False):
NOP = 100
th = np.linspace(0.0,2.0*pi,NOP)
Nom = len(om)
col = list('r')
j = -1
for k in range(1,Nom-1):
j=j+1
if (j>4):
j=0
col.append(cc[j])
col.append('r')
eta = om/(om-tau)
C = 0.0 + 1j*( (dd*abs(tau)**2)/(2.0*tau.imag*(tau.imag+dd*tau.real)) )
R = sqrt( abs(tau)**2*(dd**2+1.0)/(4.0*(tau.imag+dd*tau.real)**2) )
X = R*np.cos(th)+C.real
Y = R*np.sin(th)+C.imag
with plot.PyPlot( 'circles', figsize=(10,10)) as plt:
plt.plot(X, Y, 'k')
plt.plot(C.real, C.imag, 'kx', markersize=10)
for k in range(0,Nom):
ck = -np.conj(tau)/(tau-np.conj(tau)) - eta[k]
r = abs(tau/(tau-np.conj(tau)))
x = r*np.cos(th)+ck.real
y = r*np.sin(th)+ck.imag
if rot is not False:
tmp = x + 1j*y
tmp = tmp*rot[k,k]
ck = ck*rot[k,k]
plt.plot(tmp.real, tmp.imag, col[k]+'--')
plt.plot(ck.real, ck.imag, col[k]+'x', markersize=10)
else:
plt.plot(x, y, col[k]+'--')
plt.plot(ck.real, ck.imag, col[k]+'x', markersize=10)
if plot_spec:
n = A.shape[0]
I = sparse.identity(n).tocsc()
P = (A - tau*B).tocsc()
Pinv = sparse.linalg.inv(P)
vals, vecs = sparse.linalg.eigs(A.tocsc()*Pinv.tocsc()-eta[k]*I,k=n-2)
plt.plot(vals.real, vals.imag, col[k]+'x', markersize=4)
plt.axhline(linewidth=0.5, color='k')
plt.axvline(linewidth=0.5, color='k')
plt.axis('equal')
def plot_msconvergence(resvec):
Nom = resvec.shape[1]
it = resvec.shape[0]
col = list('r')
j = -1
for k in range(1,Nom-1):
j=j+1
if (j>4):
j=0
col.append(cc[j])
col.append('r')
x_as = np.linspace(0,it,it)
my_leg = []
with plot.PyPlot( 'conv_pmsgmres', figsize=(10,10)) as plt:
for k in range(Nom):
plt.semilogy(x_as, resvec[:,k]/resvec[0,k],col[k])
my_leg = my_leg+['f'+str(k)]
plt.title('Convergence of pmsGMRES')
plt.xlabel('Number of matrix-vector multiplications')
plt.ylabel('Relative residual norm')
plt.ylim((1e-8,1))
plt.legend(my_leg)
plt.grid()
def plot_meconvergence(resvec):
it = len(resvec)
x_as = np.linspace(0,it,it)
with plot.PyPlot( 'conv_megmres', figsize=(10,10)) as plt:
plt.semilogy(x_as, resvec[:]/resvec[0])
plt.title('Convergence of global GMRES')
plt.xlabel('Number of operator applications')
plt.ylabel('Relative residual norm')
plt.ylim((1e-8,1))
plt.grid()
|
mit
|
INM-6/nest-git-migration
|
pynest/nest/voltage_trace.py
|
12
|
6711
|
# -*- coding: utf-8 -*-
#
# voltage_trace.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
import nest
import numpy
import pylab
def from_file(fname, title=None, grayscale=False):
if nest.is_iterable(fname):
data = None
for f in fname:
if data is None:
data = numpy.loadtxt(f)
else:
data = numpy.concatenate((data, numpy.loadtxt(f)))
else:
data = numpy.loadtxt(fname)
if grayscale:
line_style = "k"
else:
line_style = ""
if len(data.shape) == 1:
print("INFO: only found 1 column in the file. Assuming that only one neuron was recorded.")
plotid = pylab.plot(data, line_style)
pylab.xlabel("Time (steps of length interval)")
elif data.shape[1] == 2:
print("INFO: found 2 columns in the file. Assuming them to be gid, pot.")
plotid = []
data_dict = {}
for d in data:
if not d[0] in data_dict:
data_dict[d[0]] = [d[1]]
else:
data_dict[d[0]].append(d[1])
for d in data_dict:
plotid.append(pylab.plot(data_dict[d], line_style, label="Neuron %i" % d))
pylab.xlabel("Time (steps of length interval)")
pylab.legend()
elif data.shape[1] == 3:
plotid = []
data_dict = {}
g = data[0][0]
t = []
for d in data:
if not d[0] in data_dict:
data_dict[d[0]] = [d[2]]
else:
data_dict[d[0]].append(d[2])
if d[0] == g:
t.append(d[1])
for d in data_dict:
plotid.append(pylab.plot(t, data_dict[d], line_style, label="Neuron %i" % d))
pylab.xlabel("Time (ms)")
pylab.legend()
else:
raise ValueError("Inappropriate data shape %i!" % data.shape)
if not title:
title = "Membrane potential from file '%s'" % fname
pylab.title(title)
pylab.ylabel("Membrane potential (mV)")
pylab.draw()
return plotid
def from_device(detec, neurons=None, title=None, grayscale=False, timeunit="ms"):
"""
Plot the membrane potential of a set of neurons recorded by the given voltmeter.
"""
if len(detec) > 1:
raise nest.NESTError("Please provide a single voltmeter.")
if not nest.GetStatus(detec)[0]['model'] in ('voltmeter', 'multimeter'):
raise nest.NESTError("Please provide a voltmeter or a multimeter measuring V_m.")
elif nest.GetStatus(detec)[0]['model'] == 'multimeter':
if not "V_m" in nest.GetStatus(detec, "record_from")[0]:
raise nest.NESTError("Please provide a multimeter measuring V_m.")
elif (not nest.GetStatus(detec, "to_memory")[0] and
len(nest.GetStatus(detec, "record_from")[0]) > 1):
raise nest.NESTError("Please provide a multimeter measuring only V_m or record to memory!")
if nest.GetStatus(detec, "to_memory")[0]:
timefactor = 1.0
if not nest.GetStatus(detec)[0]['time_in_steps']:
if timeunit == "s":
timefactor = 1000.0
else:
timeunit = "ms"
times, voltages = _from_memory(detec)
if not len(times):
raise nest.NESTError("No events recorded! Make sure that withtime and withgid are set to True.")
if neurons is None:
neurons = voltages.keys()
plotids = []
for neuron in neurons:
time_values = numpy.array(times[neuron]) / timefactor
if grayscale:
line_style = "k"
else:
line_style = ""
try:
plotids.append(pylab.plot(time_values, voltages[neuron], line_style, label="Neuron %i" % neuron))
except KeyError:
print("INFO: Wrong ID: {0}".format(neuron))
if not title:
title = "Membrane potential"
pylab.title(title)
pylab.ylabel("Membrane potential (mV)")
if nest.GetStatus(detec)[0]['time_in_steps']:
pylab.xlabel("Steps")
else:
pylab.xlabel("Time (%s)" % timeunit)
pylab.legend(loc="best")
pylab.draw()
return plotids
elif nest.GetStatus(detec, "to_file")[0]:
fname = nest.GetStatus(detec, "filenames")[0]
return from_file(fname, title, grayscale)
else:
raise nest.NESTError("Provided devices neither records to file, nor to memory.")
def _from_memory(detec):
import array
ev = nest.GetStatus(detec, 'events')[0]
potentials = ev['V_m']
senders = ev['senders']
v = {}
t = {}
if 'times' in ev:
times = ev['times']
for s, currentsender in enumerate(senders):
if currentsender not in v:
v[currentsender] = array.array('f')
t[currentsender] = array.array('f')
v[currentsender].append(float(potentials[s]))
t[currentsender].append(float(times[s]))
else:
# reconstruct the time vector, if not stored explicitly
detec_status = nest.GetStatus(detec)[0]
origin = detec_status['origin']
start = detec_status['start']
interval = detec_status['interval']
senders_uniq = numpy.unique(senders)
num_intvls = len(senders) / len(senders_uniq)
times_s = origin + start + interval + interval * numpy.array(range(num_intvls))
for s, currentsender in enumerate(senders):
if currentsender not in v:
v[currentsender] = array.array('f')
t[currentsender] = times_s
v[currentsender].append(float(potentials[s]))
return t, v
def show():
"""
Call pylab.show() to show all figures and enter the GUI main loop.
Python will block until all figure windows are closed again.
You should call this function only once at the end of a script.
See also: http://matplotlib.sourceforge.net/faq/howto_faq.html#use-show
"""
pylab.show()
|
gpl-2.0
|
grundgruen/zipline
|
docs/source/conf.py
|
2
|
2992
|
import sys
import os
from zipline import __version__ as version
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('..'))
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.doctest',
'sphinx.ext.extlinks',
'sphinx.ext.autosummary',
'sphinx.ext.viewcode',
'sphinx.ext.todo',
]
extlinks = dict(issue=('https://github.com/quantopian/zipline/issues/%s', '#'))
# -- Docstrings ---------------------------------------------------------------
extensions += ['numpydoc']
numpydoc_show_class_members = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ['.templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Zipline'
copyright = u'2015, Quantopian Inc.'
# The full version, including alpha/beta/rc tags.
release = version
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
try:
import sphinx_rtd_theme
except ImportError:
html_theme = 'default'
html_theme_path = []
else:
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name of the Pygments (syntax highlighting) style to use.
highlight_language = 'python'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = os.path.join('svg', 'zipline.ico')
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['.static']
# If false, no index is generated.
html_use_index = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = True
# Output file base name for HTML help builder.
htmlhelp_basename = 'ziplinedoc'
intersphinx_mapping = {
'http://docs.python.org/dev': None,
'numpy': ('http://docs.scipy.org/doc/numpy/', None),
'scipy': ('http://docs.scipy.org/doc/scipy/reference/', None),
'pandas': ('http://pandas.pydata.org/pandas-docs/stable/', None),
}
doctest_global_setup = "import zipline"
todo_include_todos = True
|
apache-2.0
|
arvkevi/kneed
|
tests/test_sample.py
|
1
|
14799
|
import math
import matplotlib.pyplot as plt
import numpy as np
import pytest
from kneed.data_generator import DataGenerator as dg
from kneed.knee_locator import KneeLocator
@pytest.mark.parametrize("interp_method", ["interp1d", "polynomial"])
def test_figure2(interp_method):
"""From the kneedle manuscript"""
x, y = dg.figure2()
kl = KneeLocator(x, y, S=1.0, curve="concave", interp_method=interp_method)
assert math.isclose(kl.knee, 0.22, rel_tol=0.05)
assert math.isclose(kl.elbow, 0.22, rel_tol=0.05)
assert math.isclose(kl.norm_elbow, kl.knee, rel_tol=0.05)
def test_NoisyGaussian():
"""From the Kneedle manuscript"""
x, y = dg.noisy_gaussian(mu=50, sigma=10, N=1000, seed=42)
kl = KneeLocator(
x,
y,
S=1.0,
curve="concave",
interp_method="polynomial",
polynomial_degree=11,
online=True,
)
assert math.isclose(kl.knee, 63.0, rel_tol=1e-02)
@pytest.mark.parametrize("interp_method", ["interp1d", "polynomial"])
def test_concave_increasing(interp_method):
"""test a concave increasing function"""
x, y = dg().concave_increasing()
kn = KneeLocator(x, y, curve="concave", interp_method=interp_method)
assert kn.knee == 2
@pytest.mark.parametrize("interp_method", ["interp1d", "polynomial"])
def test_concave_decreasing(interp_method):
"""test a concave decreasing function"""
x, y = dg.concave_decreasing()
kn = KneeLocator(
x, y, curve="concave", direction="decreasing", interp_method=interp_method
)
assert kn.knee == 7
@pytest.mark.parametrize("interp_method", ["interp1d", "polynomial"])
def test_convex_increasing(interp_method):
"""test a convex increasing function"""
x, y = dg.convex_increasing()
kl = KneeLocator(x, y, curve="convex", interp_method=interp_method)
assert kl.knee == 7
@pytest.mark.parametrize("interp_method", ["interp1d", "polynomial"])
def test_convex_decreasing(interp_method):
"""test a convex decreasing function"""
x, y = dg.convex_decreasing()
kl = KneeLocator(
x, y, curve="convex", direction="decreasing", interp_method=interp_method
)
assert kl.knee == 2
@pytest.mark.parametrize("interp_method", ["interp1d", "polynomial"])
def test_concave_increasing_truncated(interp_method):
"""test a truncated concave increasing function"""
x, y = dg.concave_increasing()
kl = KneeLocator(
x[:-3] / 10, y[:-3] / 10, curve="concave", interp_method=interp_method
)
assert kl.knee == 0.2
@pytest.mark.parametrize("interp_method", ["interp1d", "polynomial"])
def test_concave_decreasing_truncated(interp_method):
"""test a truncated concave decreasing function"""
x, y = dg.concave_decreasing()
kl = KneeLocator(
x[:-3] / 10,
y[:-3] / 10,
curve="concave",
direction="decreasing",
interp_method=interp_method,
)
assert kl.knee == 0.4
@pytest.mark.parametrize("interp_method", ["interp1d", "polynomial"])
def test_convex_increasing_truncated(interp_method):
"""test a truncated convex increasing function"""
x, y = dg.convex_increasing()
kl = KneeLocator(
x[:-3] / 10, y[:-3] / 10, curve="convex", interp_method=interp_method
)
assert kl.knee == 0.4
@pytest.mark.parametrize("interp_method", ["interp1d", "polynomial"])
def test_convex_decreasing_truncated(interp_method):
"""test a truncated convex decreasing function"""
x, y = dg.convex_decreasing()
kl = KneeLocator(
x[:-3] / 10,
y[:-3] / 10,
curve="convex",
direction="decreasing",
interp_method=interp_method,
)
assert kl.knee == 0.2
@pytest.mark.parametrize(
"interp_method, expected", [("interp1d", 26), ("polynomial", 28)]
)
def test_convex_decreasing_bumpy(interp_method, expected):
"""test a bumpy convex decreasing function"""
x, y = dg.bumpy()
kl = KneeLocator(
x, y, curve="convex", direction="decreasing", interp_method=interp_method
)
assert kl.knee == expected
@pytest.mark.parametrize("online, expected", [(True, 482), (False, 22)])
def test_gamma_online_offline(online, expected):
"""Tests online and offline knee detection.
Notable that a large number of samples are highly sensitive to S parameter
"""
np.random.seed(23)
n = 1000
x = range(1, n + 1)
y = sorted(np.random.gamma(0.5, 1.0, n), reverse=True)
kl = KneeLocator(x, y, curve="convex", direction="decreasing", online=online)
assert kl.knee == expected
def test_sensitivity():
"""Test the S parameter -- where S is the number of flat points to identify before calling a knee"""
np.random.seed(23)
sensitivity = [1, 3, 5, 10, 100, 200, 400]
detected_knees = []
expected_knees = [43, 137, 178, 258, 305, 482, 482]
n = 1000
x = range(1, n + 1)
y = sorted(np.random.gamma(0.5, 1.0, n), reverse=True)
for s, expected_knee in zip(sensitivity, expected_knees):
kl = KneeLocator(x, y, curve="convex", direction="decreasing", S=s)
detected_knees.append(kl.knee)
assert kl.knee, expected_knee
def test_sine():
x = np.arange(0, 10, 0.1)
y_sin = np.sin(x)
sine_combos = [
("decreasing", "convex"),
("increasing", "convex"),
("increasing", "concave"),
("decreasing", "concave"),
]
expected_knees = [4.5, 4.9, 7.7, 1.8]
detected_knees = []
for direction, curve in sine_combos:
kl_sine = KneeLocator(
x, y_sin, direction=direction, curve=curve, S=1, online=True
)
detected_knees.append(kl_sine.knee)
assert np.isclose(expected_knees, detected_knees).all()
def test_list_input():
"""Indirectly test that flip works on lists as input"""
x, y = dg.figure2()
kl = KneeLocator(
x.tolist(), y.tolist(), S=1.0, curve="concave", interp_method="polynomial"
)
assert math.isclose(kl.knee, 0.22, rel_tol=0.05)
def test_flat_maxima():
"""The global maxima has a sequentially equal value in the difference curve"""
x = [
0,
1.0,
2.0,
3.0,
4.0,
5.0,
6.0,
7.0,
8.0,
9.0,
10.0,
11.0,
12.0,
13.0,
14.0,
15.0,
16.0,
17.0,
]
y = [
1,
0.787701317715959,
0.7437774524158126,
0.6559297218155198,
0.5065885797950219,
0.36749633967789164,
0.2547584187408492,
0.16251830161054173,
0.10395314787701318,
0.06734992679355783,
0.043923865300146414,
0.027818448023426062,
0.01903367496339678,
0.013177159590043924,
0.010248901903367497,
0.007320644216691069,
0.005856515373352855,
0.004392386530014641,
]
# When S=0.0 the first local maximum is found.
kl = KneeLocator(x, y, curve="convex", direction="decreasing", S=0.0)
assert math.isclose(kl.knee, 1.0, rel_tol=0.05)
# When S=1.0 the global maximum is found.
kl = KneeLocator(x, y, curve="convex", direction="decreasing", S=1.0)
assert math.isclose(kl.knee, 8.0, rel_tol=0.05)
def test_all_knees():
x, y = dg.bumpy()
kl = KneeLocator(x, y, curve="convex", direction="decreasing", online=True)
assert np.isclose(sorted(kl.all_elbows), [26, 31, 41, 46, 53]).all()
assert np.isclose(
sorted(kl.all_norm_elbows),
[
0.2921348314606742,
0.348314606741573,
0.4606741573033708,
0.5168539325842696,
0.5955056179775281,
],
).all()
def test_y():
"""Test the y value"""
x, y = dg.figure2()
kl = KneeLocator(x, y, S=1.0, curve="concave", interp_method="interp1d")
assert math.isclose(kl.knee_y, 1.897, rel_tol=0.03)
assert math.isclose(kl.all_knees_y[0], 1.897, rel_tol=0.03)
assert math.isclose(kl.norm_knee_y, 0.758, rel_tol=0.03)
assert math.isclose(kl.all_norm_knees_y[0], 0.758, rel_tol=0.03)
assert math.isclose(kl.elbow_y, 1.897, rel_tol=0.03)
assert math.isclose(kl.all_elbows_y[0], 1.897, rel_tol=0.03)
assert math.isclose(kl.norm_elbow_y, 0.758, rel_tol=0.03)
assert math.isclose(kl.all_norm_elbows_y[0], 0.758, rel_tol=0.03)
def test_y_no_knee():
"""Test the y value, if there is no knee found."""
kl = KneeLocator(
np.array([1, 2, 3]),
np.array([0.90483742, 0.81873075, 0.74081822]),
S=1.0,
curve="convex",
direction="decreasing",
interp_method="interp1d",
online=False,
)
assert kl.knee_y is None
assert kl.norm_knee_y is None
def test_interp_method():
"""Test that the interp_method argument is valid."""
x, y = dg.figure2()
with pytest.raises(ValueError):
kl = KneeLocator(x, y, interp_method="not_a_method")
def test_x_equals_y():
"""Test that a runtime warning is raised when no maxima are found"""
x = range(10)
y = [1] * len(x)
with pytest.warns(RuntimeWarning):
kl = KneeLocator(x, y)
def test_plot_knee_normalized():
"""Test that plotting is functional"""
x, y = dg.figure2()
kl = KneeLocator(x, y, S=1.0, curve="concave", interp_method="interp1d")
num_figures_before = plt.gcf().number
kl.plot_knee_normalized()
num_figures_after = plt.gcf().number
assert num_figures_before < num_figures_after
def test_plot_knee():
"""Test that plotting is functional"""
x, y = dg.figure2()
kl = KneeLocator(x, y, S=1.0, curve="concave", interp_method="interp1d")
num_figures_before = plt.gcf().number
kl.plot_knee()
num_figures_after = plt.gcf().number
assert num_figures_before < num_figures_after
def test_logistic():
y = np.array(
[
2.00855493e-45,
1.10299045e-43,
4.48168384e-42,
1.22376580e-41,
5.10688883e-40,
1.18778110e-38,
5.88777891e-35,
4.25317895e-34,
4.06507035e-33,
6.88084518e-32,
2.99321831e-31,
1.13291723e-30,
1.05244482e-28,
2.67578448e-27,
1.22522190e-26,
2.36517846e-26,
8.30369408e-26,
1.24303033e-25,
2.27726918e-25,
1.06330422e-24,
5.55017673e-24,
1.92068553e-23,
3.31361011e-23,
1.13575247e-22,
1.75386416e-22,
6.52680518e-22,
2.05106011e-21,
6.37285545e-21,
4.16125535e-20,
1.12709507e-19,
5.75853420e-19,
1.73333796e-18,
2.70099890e-18,
7.53254646e-18,
1.38139433e-17,
3.60081965e-17,
8.08419977e-17,
1.86378584e-16,
5.36224556e-16,
8.89404640e-16,
2.34045104e-15,
4.72168880e-15,
6.84378992e-15,
2.26898430e-14,
3.10087652e-14,
2.78081199e-13,
1.06479577e-12,
2.81002203e-12,
4.22067092e-12,
9.27095863e-12,
1.54519738e-11,
4.53347819e-11,
1.35564441e-10,
2.35242087e-10,
4.45253545e-10,
9.78613696e-10,
1.53140922e-09,
2.81648560e-09,
6.70890436e-09,
1.49724785e-08,
5.59553565e-08,
1.39510811e-07,
7.64761811e-07,
1.40723957e-06,
4.97638863e-06,
2.12817943e-05,
3.26471410e-05,
1.02599591e-04,
3.18774179e-04,
5.67297630e-04,
9.22732716e-04,
1.17445643e-03,
3.59279384e-03,
3.61936491e-02,
6.39493416e-02,
1.29304829e-01,
1.72272215e-01,
3.46945901e-01,
5.02826602e-01,
6.24800042e-01,
7.38412957e-01,
7.59931663e-01,
7.73374421e-01,
7.91421897e-01,
8.29325597e-01,
8.57718637e-01,
8.73286061e-01,
8.77056835e-01,
8.93173768e-01,
9.05435646e-01,
9.17217910e-01,
9.19119179e-01,
9.24810910e-01,
9.26306908e-01,
9.28621233e-01,
9.33855835e-01,
9.37263027e-01,
9.41651642e-01,
]
)
x = np.array(
[
1.0,
2.0,
3.0,
4.0,
5.0,
6.0,
7.0,
8.0,
9.0,
10.0,
11.0,
12.0,
13.0,
14.0,
15.0,
16.0,
17.0,
18.0,
19.0,
20.0,
21.0,
22.0,
23.0,
24.0,
25.0,
26.0,
27.0,
28.0,
29.0,
30.0,
31.0,
32.0,
33.0,
34.0,
35.0,
36.0,
37.0,
38.0,
39.0,
40.0,
41.0,
42.0,
43.0,
44.0,
45.0,
46.0,
47.0,
48.0,
49.0,
50.0,
51.0,
52.0,
53.0,
54.0,
55.0,
56.0,
57.0,
58.0,
59.0,
60.0,
61.0,
62.0,
63.0,
64.0,
65.0,
66.0,
67.0,
68.0,
69.0,
70.0,
71.0,
72.0,
73.0,
74.0,
75.0,
76.0,
77.0,
78.0,
79.0,
80.0,
81.0,
82.0,
83.0,
84.0,
85.0,
86.0,
87.0,
88.0,
89.0,
90.0,
91.0,
92.0,
93.0,
94.0,
95.0,
96.0,
97.0,
98.0,
]
)
kl = KneeLocator(x, y, curve="convex", direction="increasing", online=True,)
assert kl.knee == 73
def test_valid_curve_direction():
"""Test that arguments to curve and direction are valid"""
with pytest.raises(ValueError):
kl = KneeLocator(range(3), [1, 3, 5], curve="bad curve")
with pytest.raises(ValueError):
kl = KneeLocator(range(3), [1, 3, 5], direction="bad direction")
|
bsd-3-clause
|
jmmease/pandas
|
pandas/io/parquet.py
|
4
|
6895
|
""" parquet compat """
from warnings import catch_warnings
from distutils.version import LooseVersion
from pandas import DataFrame, RangeIndex, Int64Index, get_option
from pandas.compat import range
from pandas.io.common import get_filepath_or_buffer
def get_engine(engine):
""" return our implementation """
if engine == 'auto':
engine = get_option('io.parquet.engine')
if engine == 'auto':
# try engines in this order
try:
return PyArrowImpl()
except ImportError:
pass
try:
return FastParquetImpl()
except ImportError:
pass
if engine not in ['pyarrow', 'fastparquet']:
raise ValueError("engine must be one of 'pyarrow', 'fastparquet'")
if engine == 'pyarrow':
return PyArrowImpl()
elif engine == 'fastparquet':
return FastParquetImpl()
class PyArrowImpl(object):
def __init__(self):
# since pandas is a dependency of pyarrow
# we need to import on first use
try:
import pyarrow
import pyarrow.parquet
except ImportError:
raise ImportError("pyarrow is required for parquet support\n\n"
"you can install via conda\n"
"conda install pyarrow -c conda-forge\n"
"\nor via pip\n"
"pip install -U pyarrow\n")
if LooseVersion(pyarrow.__version__) < '0.4.1':
raise ImportError("pyarrow >= 0.4.1 is required for parquet"
"support\n\n"
"you can install via conda\n"
"conda install pyarrow -c conda-forge\n"
"\nor via pip\n"
"pip install -U pyarrow\n")
self._pyarrow_lt_050 = LooseVersion(pyarrow.__version__) < '0.5.0'
self._pyarrow_lt_060 = LooseVersion(pyarrow.__version__) < '0.6.0'
self.api = pyarrow
def write(self, df, path, compression='snappy',
coerce_timestamps='ms', **kwargs):
path, _, _ = get_filepath_or_buffer(path)
if self._pyarrow_lt_060:
table = self.api.Table.from_pandas(df, timestamps_to_ms=True)
self.api.parquet.write_table(
table, path, compression=compression, **kwargs)
else:
table = self.api.Table.from_pandas(df)
self.api.parquet.write_table(
table, path, compression=compression,
coerce_timestamps=coerce_timestamps, **kwargs)
def read(self, path):
path, _, _ = get_filepath_or_buffer(path)
return self.api.parquet.read_table(path).to_pandas()
class FastParquetImpl(object):
def __init__(self):
# since pandas is a dependency of fastparquet
# we need to import on first use
try:
import fastparquet
except ImportError:
raise ImportError("fastparquet is required for parquet support\n\n"
"you can install via conda\n"
"conda install fastparquet -c conda-forge\n"
"\nor via pip\n"
"pip install -U fastparquet")
if LooseVersion(fastparquet.__version__) < '0.1.0':
raise ImportError("fastparquet >= 0.1.0 is required for parquet "
"support\n\n"
"you can install via conda\n"
"conda install fastparquet -c conda-forge\n"
"\nor via pip\n"
"pip install -U fastparquet")
self.api = fastparquet
def write(self, df, path, compression='snappy', **kwargs):
# thriftpy/protocol/compact.py:339:
# DeprecationWarning: tostring() is deprecated.
# Use tobytes() instead.
path, _, _ = get_filepath_or_buffer(path)
with catch_warnings(record=True):
self.api.write(path, df,
compression=compression, **kwargs)
def read(self, path):
path, _, _ = get_filepath_or_buffer(path)
return self.api.ParquetFile(path).to_pandas()
def to_parquet(df, path, engine='auto', compression='snappy', **kwargs):
"""
Write a DataFrame to the parquet format.
Parameters
----------
df : DataFrame
path : string
File path
engine : {'auto', 'pyarrow', 'fastparquet'}, default 'auto'
Parquet reader library to use. If 'auto', then the option
'io.parquet.engine' is used. If 'auto', then the first
library to be installed is used.
compression : str, optional, default 'snappy'
compression method, includes {'gzip', 'snappy', 'brotli'}
kwargs
Additional keyword arguments passed to the engine
"""
impl = get_engine(engine)
if not isinstance(df, DataFrame):
raise ValueError("to_parquet only support IO with DataFrames")
valid_types = {'string', 'unicode'}
# validate index
# --------------
# validate that we have only a default index
# raise on anything else as we don't serialize the index
if not isinstance(df.index, Int64Index):
raise ValueError("parquet does not support serializing {} "
"for the index; you can .reset_index()"
"to make the index into column(s)".format(
type(df.index)))
if not df.index.equals(RangeIndex.from_range(range(len(df)))):
raise ValueError("parquet does not support serializing a "
"non-default index for the index; you "
"can .reset_index() to make the index "
"into column(s)")
if df.index.name is not None:
raise ValueError("parquet does not serialize index meta-data on a "
"default index")
# validate columns
# ----------------
# must have value column names (strings only)
if df.columns.inferred_type not in valid_types:
raise ValueError("parquet must have string column names")
return impl.write(df, path, compression=compression)
def read_parquet(path, engine='auto', **kwargs):
"""
Load a parquet object from the file path, returning a DataFrame.
.. versionadded 0.21.0
Parameters
----------
path : string
File path
engine : {'auto', 'pyarrow', 'fastparquet'}, default 'auto'
Parquet reader library to use. If 'auto', then the option
'io.parquet.engine' is used. If 'auto', then the first
library to be installed is used.
kwargs are passed to the engine
Returns
-------
DataFrame
"""
impl = get_engine(engine)
return impl.read(path)
|
bsd-3-clause
|
kaichogami/scikit-learn
|
examples/tree/plot_tree_regression_multioutput.py
|
73
|
1854
|
"""
===================================================================
Multi-output Decision Tree Regression
===================================================================
An example to illustrate multi-output regression with decision tree.
The :ref:`decision trees <tree>`
is used to predict simultaneously the noisy x and y observations of a circle
given a single underlying feature. As a result, it learns local linear
regressions approximating the circle.
We can see that if the maximum depth of the tree (controlled by the
`max_depth` parameter) is set too high, the decision trees learn too fine
details of the training data and learn from the noise, i.e. they overfit.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(200 * rng.rand(100, 1) - 100, axis=0)
y = np.array([np.pi * np.sin(X).ravel(), np.pi * np.cos(X).ravel()]).T
y[::5, :] += (0.5 - rng.rand(20, 2))
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=2)
regr_2 = DecisionTreeRegressor(max_depth=5)
regr_3 = DecisionTreeRegressor(max_depth=8)
regr_1.fit(X, y)
regr_2.fit(X, y)
regr_3.fit(X, y)
# Predict
X_test = np.arange(-100.0, 100.0, 0.01)[:, np.newaxis]
y_1 = regr_1.predict(X_test)
y_2 = regr_2.predict(X_test)
y_3 = regr_3.predict(X_test)
# Plot the results
plt.figure()
s = 50
plt.scatter(y[:, 0], y[:, 1], c="navy", s=s, label="data")
plt.scatter(y_1[:, 0], y_1[:, 1], c="cornflowerblue", s=s, label="max_depth=2")
plt.scatter(y_2[:, 0], y_2[:, 1], c="c", s=s, label="max_depth=5")
plt.scatter(y_3[:, 0], y_3[:, 1], c="orange", s=s, label="max_depth=8")
plt.xlim([-6, 6])
plt.ylim([-6, 6])
plt.xlabel("target 1")
plt.ylabel("target 2")
plt.title("Multi-output Decision Tree Regression")
plt.legend()
plt.show()
|
bsd-3-clause
|
dpshelio/sunpy
|
examples/plotting/sunpy_matplotlib_colormap.py
|
1
|
1323
|
# coding: utf-8
"""
=========================================
Using the SunPy Colormaps with matplotlib
=========================================
How you can use the SunPy colormaps with matplotlib.
"""
import numpy as np
import matplotlib.pyplot as plt
import sunpy.cm as cm
###############################################################################
# When the sunpy colormaps are imported, the SunPy colormaps are registered
# with matplotlib. It is now possible to access the colormaps with the following command
sdoaia171 = plt.get_cmap('sdoaia171')
###############################################################################
# You can get the list of all SunPy colormaps with:
print(cm.cmlist.keys())
###############################################################################
# Let's now create a data array.
delta = 0.025
x = y = np.arange(-3.0, 3.0, delta)
X, Y = np.meshgrid(x, y)
Z1 = np.exp(-X**2 - Y**2)
Z2 = np.exp(-(X - 1)**2 - (Y - 1)**2)
Z = (Z1 - Z2) * 2
###############################################################################
# Let's now plot the results with the colormap.
fig, ax = plt.subplots()
im = ax.imshow(Z, interpolation='bilinear', cmap=sdoaia171,
origin='lower', extent=[-3, 3, -3, 3],
vmax=abs(Z).max(), vmin=-abs(Z).max())
plt.show()
|
bsd-2-clause
|
lukeiwanski/tensorflow-opencl
|
tensorflow/contrib/learn/python/learn/dataframe/dataframe.py
|
85
|
4704
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A DataFrame is a container for ingesting and preprocessing data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from .series import Series
from .transform import Transform
class DataFrame(object):
"""A DataFrame is a container for ingesting and preprocessing data."""
def __init__(self):
self._columns = {}
def columns(self):
"""Set of the column names."""
return frozenset(self._columns.keys())
def __len__(self):
"""The number of columns in the DataFrame."""
return len(self._columns)
def assign(self, **kwargs):
"""Adds columns to DataFrame.
Args:
**kwargs: assignments of the form key=value where key is a string
and value is an `inflow.Series`, a `pandas.Series` or a numpy array.
Raises:
TypeError: keys are not strings.
TypeError: values are not `inflow.Series`, `pandas.Series` or
`numpy.ndarray`.
TODO(jamieas): pandas assign method returns a new DataFrame. Consider
switching to this behavior, changing the name or adding in_place as an
argument.
"""
for k, v in kwargs.items():
if not isinstance(k, str):
raise TypeError("The only supported type for keys is string; got %s" %
type(k))
if v is None:
del self._columns[k]
elif isinstance(v, Series):
self._columns[k] = v
elif isinstance(v, Transform) and v.input_valency() == 0:
self._columns[k] = v()
else:
raise TypeError(
"Column in assignment must be an inflow.Series, inflow.Transform,"
" or None; got type '%s'." % type(v).__name__)
def select_columns(self, keys):
"""Returns a new DataFrame with a subset of columns.
Args:
keys: A list of strings. Each should be the name of a column in the
DataFrame.
Returns:
A new DataFrame containing only the specified columns.
"""
result = type(self)()
for key in keys:
result[key] = self._columns[key]
return result
def exclude_columns(self, exclude_keys):
"""Returns a new DataFrame with all columns not excluded via exclude_keys.
Args:
exclude_keys: A list of strings. Each should be the name of a column in
the DataFrame. These columns will be excluded from the result.
Returns:
A new DataFrame containing all columns except those specified.
"""
result = type(self)()
for key, value in self._columns.items():
if key not in exclude_keys:
result[key] = value
return result
def __getitem__(self, key):
"""Indexing functionality for DataFrames.
Args:
key: a string or an iterable of strings.
Returns:
A Series or list of Series corresponding to the given keys.
"""
if isinstance(key, str):
return self._columns[key]
elif isinstance(key, collections.Iterable):
for i in key:
if not isinstance(i, str):
raise TypeError("Expected a String; entry %s has type %s." %
(i, type(i).__name__))
return [self.__getitem__(i) for i in key]
raise TypeError(
"Invalid index: %s of type %s. Only strings or lists of strings are "
"supported." % (key, type(key)))
def __setitem__(self, key, value):
if isinstance(key, str):
key = [key]
if isinstance(value, Series):
value = [value]
self.assign(**dict(zip(key, value)))
def __delitem__(self, key):
if isinstance(key, str):
key = [key]
value = [None for _ in key]
self.assign(**dict(zip(key, value)))
def build(self, **kwargs):
# We do not allow passing a cache here, because that would encourage
# working around the rule that DataFrames cannot be expected to be
# synced with each other (e.g., they shuffle independently).
cache = {}
tensors = {name: c.build(cache, **kwargs)
for name, c in self._columns.items()}
return tensors
|
apache-2.0
|
legacysurvey/pipeline
|
py/legacyanalysis/subtractor.py
|
2
|
6281
|
import tempfile
import os
import sys
import matplotlib
matplotlib.use('Agg')
import pylab as plt
import numpy as np
import fitsio
from legacypipe.survey import LegacySurveyData
from legacyzpts.legacy_zeropoints import runit, DecamMeasurer
from legacypipe.decam import DecamImage
from legacypipe.forced_photom import get_catalog_in_wcs
from legacypipe.catalog import read_fits_catalog
from tractor import Tractor
from astrometry.util.file import trymakedirs
from astrometry.util.multiproc import multiproc
class GoldsteinDecamImage(DecamImage):
'''
A subclass of the legacypipe's DECam image-handling class to handle
different file structures / header cards.
'''
def compute_filenames(self):
self.dqfn = self.imgfn.replace('.fits', '.bpm.fits')
self.wtfn = self.imgfn.replace('.fits', '.weight.fits')
assert(self.dqfn != self.imgfn)
assert(self.wtfn != self.imgfn)
def funpack_files(self, imgfn, dqfn, hdu, todelete):
return imgfn, dqfn
def read_image_primary_header(self):
hdr = super(GoldsteinDecamImage, self).read_image_primary_header()
hdr['PLPROCID'] = 'xxx'
hdr['DATE'] = 'xxx'
return hdr
def remap_dq(self, dq, hdr):
return dq
class GoldsteinDecamMeasurer(DecamMeasurer):
'''
A subclass of the legacyzpt's DECam image-handling class to handle
different file structures / header cards.
'''
def __init__(self, *args, **kwargs):
super(GoldsteinDecamMeasurer, self).__init__(*args, **kwargs)
self.plver = 'V0.0'
self.procdate = 'xxx'
self.plprocid = 'xxx'
def read_primary_header(self):
hdr = super(GoldsteinDecamMeasurer, self).read_primary_header()
hdr['WCSCAL'] = 'success'
return hdr
def read_header(self, ext):
hdr = fitsio.read_header(self.fn, ext=ext)
hdr['FWHM'] = hdr['SEEING'] / self.pixscale
return hdr
def get_bitmask_fn(self, imgfn):
return imgfn.replace('.fits', '.bpm.fits')
def read_bitmask(self):
dqfn = self.get_bitmask_fn(self.fn)
#### No extension
if self.slc is not None:
mask = fitsio.FITS(dqfn)[self.slc]
else:
mask = fitsio.read(dqfn)
mask = self.remap_bitmask(mask)
return mask
def remap_bitmask(self, mask):
return mask
def get_weight_fn(self, imgfn):
return imgfn.replace('.fits', '.weight.fits')
def main():
fn = '/global/cscratch1/sd/dstn/c4d_190730_024955_ori/c4d_190730_024955_ori.52.fits'
survey_dir = '/global/cscratch1/sd/dstn/subtractor-survey-dir'
imagedir = os.path.join(survey_dir, 'images')
trymakedirs(imagedir)
calibdir = os.path.join(survey_dir, 'calib')
psfexdir = os.path.join(calibdir, 'decam', 'psfex-merged')
trymakedirs(psfexdir)
skydir = os.path.join(calibdir, 'decam', 'splinesky-merged')
trymakedirs(skydir)
basename = os.path.basename(fn)
basename = basename.replace('.fits', '')
# Output filenames for legacyzpts calibration/zeropoint files
f,photfn = tempfile.mkstemp()
os.close(f)
surveyfn = os.path.join(survey_dir, 'survey-ccds-%s.fits.gz' % basename)
annfn = os.path.join(survey_dir, 'annotated-%s.fits' % basename)
mp = multiproc()
survey = LegacySurveyData(survey_dir)
# Use the subclass above to handle DECam images!
survey.image_typemap.update(decam=GoldsteinDecamImage)
# Grab the exposure number and CCD name
hdr = fitsio.read_header(fn)
expnum = hdr['EXPNUM']
ccdname = hdr['EXTNAME'].strip()
print('Exposure', expnum, 'CCD', ccdname)
import logging
lvl = logging.INFO
logging.basicConfig(level=lvl, format='%(message)s', stream=sys.stdout)
# tractor logging is *soooo* chatty
logging.getLogger('tractor.engine').setLevel(lvl + 10)
if not os.path.exists(surveyfn):
# Run calibrations and zeropoints
runit(fn, photfn, surveyfn, annfn, mp, survey=survey, camera='decam', debug=False,
choose_ccd=ccdname, splinesky=True, calibdir=calibdir,
measureclass=GoldsteinDecamMeasurer)
# Find catalog sources touching this CCD
ccds = survey.find_ccds(expnum=expnum, ccdname=ccdname)
assert(len(ccds) == 1)
ccd = ccds[0]
print('Got CCD', ccd)
# Create Tractor image
im = survey.get_image_object(ccd)
print('Got image:', im)
# Look at this sub-image, or the whole chip?
#zoomslice=None
zoomslice = (slice(0, 1000), slice(0, 1000))
tim = im.get_tractor_image(slc=zoomslice, pixPsf=True, splinesky=True,
hybridPsf=True,
normalizePsf=True,
old_calibs_ok=True)
print('Got tim:', tim)
# Read catalog files touching this CCD
catsurvey = LegacySurveyData('/global/project/projectdirs/cosmo/work/legacysurvey/dr8/south')
T = get_catalog_in_wcs(tim.subwcs, catsurvey)
print('Got', len(T), 'DR8 catalog sources within CCD')
# Gaia stars: move RA,Dec to the epoch of this image.
I = np.flatnonzero(T.ref_epoch > 0)
if len(I):
from legacypipe.survey import radec_at_mjd
print('Moving', len(I), 'Gaia stars to MJD', tim.time.toMjd())
ra,dec = radec_at_mjd(T.ra[I], T.dec[I], T.ref_epoch[I].astype(float),
T.pmra[I], T.pmdec[I], T.parallax[I],
tim.time.toMjd())
T.ra [I] = ra
T.dec[I] = dec
# Create Tractor Source objects from the catalog
cat = read_fits_catalog(T, bands=tim.band)
print('Created', len(cat), 'source objects')
# Render model image!
tr = Tractor([tim], cat)
mod = tr.getModelImage(0)
# plots
ima = dict(interpolation='nearest', origin='lower', vmin=-2*tim.sig1, vmax=10*tim.sig1,
cmap='gray')
plt.clf()
plt.imshow(tim.getImage(), **ima)
plt.title('Image')
plt.savefig('img.jpg')
plt.clf()
plt.imshow(mod, **ima)
plt.title('Model')
plt.savefig('mod.jpg')
plt.clf()
plt.imshow(tim.getImage() - mod, **ima)
plt.title('Residual')
plt.savefig('res.jpg')
if __name__ == '__main__':
main()
|
gpl-2.0
|
mattilyra/scikit-learn
|
examples/cluster/plot_mean_shift.py
|
351
|
1793
|
"""
=============================================
A demo of the mean-shift clustering algorithm
=============================================
Reference:
Dorin Comaniciu and Peter Meer, "Mean Shift: A robust approach toward
feature space analysis". IEEE Transactions on Pattern Analysis and
Machine Intelligence. 2002. pp. 603-619.
"""
print(__doc__)
import numpy as np
from sklearn.cluster import MeanShift, estimate_bandwidth
from sklearn.datasets.samples_generator import make_blobs
###############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, _ = make_blobs(n_samples=10000, centers=centers, cluster_std=0.6)
###############################################################################
# Compute clustering with MeanShift
# The following bandwidth can be automatically detected using
bandwidth = estimate_bandwidth(X, quantile=0.2, n_samples=500)
ms = MeanShift(bandwidth=bandwidth, bin_seeding=True)
ms.fit(X)
labels = ms.labels_
cluster_centers = ms.cluster_centers_
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
print("number of estimated clusters : %d" % n_clusters_)
###############################################################################
# Plot result
import matplotlib.pyplot as plt
from itertools import cycle
plt.figure(1)
plt.clf()
colors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')
for k, col in zip(range(n_clusters_), colors):
my_members = labels == k
cluster_center = cluster_centers[k]
plt.plot(X[my_members, 0], X[my_members, 1], col + '.')
plt.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
|
bsd-3-clause
|
danaukes/popupcad
|
popupcad/widgets/operationnetwork.py
|
2
|
5918
|
# -*- coding: utf-8 -*-
"""
Written by Daniel M. Aukes and CONTRIBUTORS
Email: danaukes<at>asu.edu.
Please see LICENSE for full license.
"""
import qt
import qt.QtCore as qc
import qt.QtGui as qg
import matplotlib
import matplotlib.pyplot as plt
plt.ion()
matplotlib.rcParams['backend.qt5']=qt.loaded
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
from matplotlib.figure import Figure
import numpy
spacing = .25
arrow_style = {}
arrow_style['color'] = 'black'
arrow_style['alpha'] = 1
arrow_style['zorder']=1
arrow_style['arrowstyle']=matplotlib.patches.ArrowStyle.Simple(head_length=10, head_width=10,tail_width = 1)
arrow_style['mutation_scale'] = 1
arrow_style['linewidth'] = 1
arrow_style['connectionstyle'] = matplotlib.patches.ConnectionStyle.Arc3(rad=-0.5)
#arrow_style['connectionstyle'] = matplotlib.patches.ConnectionStyle.Bar()
arrow_style['shrinkA'] = 10
arrow_style['shrinkB'] = 10
text_style = {}
text_style['bbox']=None
text_style['clip_on']=True
text_style['size']=16
text_style['alpha'] = 1.0
text_style['color']='k'
text_style['family']='sans-serif'
text_style['fontweight'] = 'normal'
text_style['zorder'] = 2
text_style['horizontalalignment'] = 'right'
text_style['verticalalignment'] = 'center'
circle_style = {}
circle_style['s']=100
circle_style['c']='r'
circle_style['marker']='o'
circle_style['cmap']=None
circle_style['vmin']=None
circle_style['vmax']=None
circle_style['alpha']=1
circle_style['linewidths']=None
circle_style['label']=None
circle_style['zorder']= 0
class GraphView(qg.QWidget):
def __init__(self, name='Name', title='Title', graph_title='Graph Title', parent = None):
super(GraphView, self).__init__(parent)
self.name = name
self.graph_title = graph_title
self.dpi = 100
self.fig = Figure((5.0, 3.0), dpi = self.dpi, facecolor = (1,1,1), edgecolor = (0,0,0))
self.axes = self.fig.add_subplot(111)
self.canvas = FigureCanvas(self.fig)
self.canvas.setParent(self)
self.toolbar = NavigationToolbar(self.canvas,self)
self.layout = qg.QVBoxLayout()
self.layout.addWidget(self.toolbar)
self.layout.addWidget(self.canvas)
self.layout.setStretchFactor(self.canvas, 1)
self.setLayout(self.layout)
self.canvas.show()
def clear(self):
self.axes.clear()
def plot(self,*args,**kwargs):
self.axes.plot(*args,**kwargs)
def draw(self):
self.canvas.draw()
def add_patch(self,patch):
self.axes.add_patch(patch)
def scatter(self,*args,**kwargs):
self.axes.scatter(*args,**kwargs)
def text(self,*args,**kwargs):
self.axes.text(*args,**kwargs)
def action_method(parentwidget):
from matplotlib.patches import FancyArrowPatch,PathPatch
from matplotlib.path import Path
import dev_tools.hierarchy
plt.ion()
operations = parentwidget.design.operations
ids = [operation.id for operation in operations]
children = {}
for item in ids:
children[item] = []
# labels = dict([(operation.id,str(operation)) for operation in operations])
# links = []
for child in operations:
# parentrefs = child.parentrefs()
for parentref in child.parentrefs():
# links.append([parentref,child.id])
children[parentref].append(child.id)
# edges = [(item[0],item[1]) for item in links]
connections = dev_tools.hierarchy.create_sorted_connections(ids,lambda item:children[item])
# num_levels = dev_tools.hierarchy.num_levels(connections)
arrow_points = {}
arrow_points2 = {}
# codes = [Path.MOVETO,Path.CURVE3,Path.CURVE3]
codes = []
codes.append(Path.MOVETO)
codes.append(Path.LINETO)
# codes.append(Path.CURVE3)
# codes.append(Path.CURVE3)
# codes.append(Path.LINETO)
# codes.append(Path.CURVE3)
# codes.append(Path.CURVE3)
# codes.append(Path.LINETO)
codes.append(Path.CURVE4)
codes.append(Path.CURVE4)
codes.append(Path.CURVE4)
codes.append(Path.LINETO)
for c in connections:
vertices = []
vertices.append((0,-c.ii))
vertices.append((.25,-c.ii))
# vertices.append((c.level+.75,c.ii))
vertices.append((c.level+1,-c.ii))
# vertices.append((c.level+1,c.ii+.25))
# vertices.append((c.level+1,c.jj-.25))
vertices.append((c.level+1,-c.jj))
# vertices.append((c.level+.75,c.jj))
vertices.append((.25,-c.jj))
vertices.append((0,-c.jj))
arrow_points[c]=Path(vertices,codes)
arrow_points2[c]=Path([(.25,-c.jj),(0,-c.jj),(0,-c.jj)],[Path.MOVETO,Path.CURVE3,Path.CURVE3])
# y = numpy.r_[spacing*len(ids):0:-1*spacing]
# xy = numpy.c_[y*0,y]
# pos = dict([(item,pos) for item,pos in zip(ids,xy)])
w = GraphView(parentwidget)
w.axes.autoscale(True)
# labelpos = dict((key,value+[-0.1,0]) for key,value in pos.items())
w.clear()
# for link,(x,y) in labelpos.items():
# w.text(x, y,labels[link],**text_style)
for ii,operation in enumerate(operations):
w.text(0,-ii,str(operation),**text_style)
# for edge in edges:
for c in connections:
w.add_patch(PathPatch(path=arrow_points[c],facecolor='none', lw=2))
w.add_patch(FancyArrowPatch(path=arrow_points2[c],**arrow_style))
# circlepos = numpy.array([(0,-item) for item in range(len(operations))])
# w.scatter(circlepos[:,0],circlepos[:,1],**circle_style)
w.axes.axis('equal')
w.axes.axis('off')
w.draw()
return w
if __name__ == '__main__':
mbox = [['Dan','Sara'],['Dan','Bill'],['Sara','Bill']]
labels= {}
labels['Sara']='Sara'
labels['Dan']='Dan'
labels['Bill']='Bill'
order = ['Dan','Sara','Bill']
action_method(mbox,labels,order)
|
mit
|
neuroidss/nupic.research
|
projects/sdr_paper/chart.py
|
17
|
1606
|
#!/usr/bin/env python
# ----------------------------------------------------------------------
# Copyright (C) 2015, Numenta Inc. All rights reserved.
#
# The information and source code contained herein is the
# exclusive property of Numenta Inc. No part of this software
# may be used, reproduced, stored or distributed in any form,
# without explicit written authorization from Numenta Inc.
# ----------------------------------------------------------------------
"""TODO"""
import csv
import sys
from matplotlib import pyplot as plt
if __name__ == "__main__":
if len(sys.argv) != 2:
print "Must specify path argument"
path = sys.argv[1]
with open(path) as f:
reader = csv.reader(f)
n = None
plotParamsList = []
for i, row in enumerate(reader):
if False and n and n != int(row[0]):
for thetas, errors, label in plotParamsList:
p = plt.plot(thetas, errors, label=label)
plt.legend()
plt.show()
plotParamsList = []
n = int(row[0])
w = int(row[1])
w_p = int(row[2])
M = int(row[3])
k = int(row[4])
nTrials = int(row[5])
errors = [float(e) for e in row[6:86]]
thetas = [x+1 for x in xrange(len(errors))]
label = "n=%i, w=%i, w'=%i" % (n, w, w_p)
plotParamsList.append((thetas, errors, label))
for thetas, errors, label in plotParamsList:
#if "n=10000," in label:
p = plt.plot(thetas, errors, label=label)
plt.title("Calculated False Match Curves")
plt.xlabel("Theta")
plt.ylabel("False positive rate")
plt.legend()
plt.show()
|
agpl-3.0
|
pprett/statsmodels
|
statsmodels/distributions/empirical_distribution.py
|
4
|
5015
|
"""
Empirical CDF Functions
"""
import numpy as np
from scipy.interpolate import interp1d
def _conf_set(F, alpha=.05):
r"""
Constructs a Dvoretzky-Kiefer-Wolfowitz confidence band for the eCDF.
Parameters
----------
F : array-like
The empirical distributions
alpha : float
Set alpha for a (1 - alpha) % confidence band.
Notes
-----
Based on the DKW inequality.
..math:: P \left( \sup_x \left| F(x) - \hat(F)_n(X) \right| > \epsilon \right) \leq 2e^{-2n\epsilon^2}
References
----------
Wasserman, L. 2006. `All of Nonparametric Statistics`. Springer.
"""
nobs = len(F)
epsilon = np.sqrt(np.log(2./alpha) / (2 * nobs))
lower = np.clip(F - epsilon, 0, 1)
upper = np.clip(F + epsilon, 0, 1)
return lower, upper
class StepFunction(object):
"""
A basic step function.
Values at the ends are handled in the simplest way possible:
everything to the left of x[0] is set to ival; everything
to the right of x[-1] is set to y[-1].
Parameters
----------
x : array-like
y : array-like
ival : float
ival is the value given to the values to the left of x[0]. Default
is 0.
sorted : bool
Default is False.
side : {'left', 'right'}, optional
Default is 'left'. Defines the shape of the intervals constituting the
steps. 'right' correspond to [a, b) intervals and 'left' to (a, b].
Examples
--------
>>> import numpy as np
>>> from statsmodels.distributions.empirical_distribution import StepFunction
>>>
>>> x = np.arange(20)
>>> y = np.arange(20)
>>> f = StepFunction(x, y)
>>>
>>> print f(3.2)
3.0
>>> print f([[3.2,4.5],[24,-3.1]])
[[ 3. 4.]
[ 19. 0.]]
>>> f2 = StepFunction(x, y, side='right')
>>>
>>> print f(3.0)
2.0
>>> print f2(3.0)
3.0
"""
def __init__(self, x, y, ival=0., sorted=False, side='left'):
if side.lower() not in ['right', 'left']:
msg = "side can take the values 'right' or 'left'"
raise ValueError(msg)
self.side = side
_x = np.asarray(x)
_y = np.asarray(y)
if _x.shape != _y.shape:
msg = "x and y do not have the same shape"
raise ValueError(msg)
if len(_x.shape) != 1:
msg = 'x and y must be 1-dimensional'
raise ValueError(msg)
self.x = np.r_[-np.inf, _x]
self.y = np.r_[ival, _y]
if not sorted:
asort = np.argsort(self.x)
self.x = np.take(self.x, asort, 0)
self.y = np.take(self.y, asort, 0)
self.n = self.x.shape[0]
def __call__(self, time):
tind = np.searchsorted(self.x, time, self.side) - 1
return self.y[tind]
class ECDF(StepFunction):
"""
Return the Empirical CDF of an array as a step function.
Parameters
----------
x : array-like
Observations
side : {'left', 'right'}, optional
Default is 'right'. Defines the shape of the intervals constituting the
steps. 'right' correspond to [a, b) intervals and 'left' to (a, b].
Returns
-------
Empirical CDF as a step function.
Examples
--------
>>> import numpy as np
>>> from statsmodels.distributions.empirical_distribution import ECDF
>>>
>>> ecdf = ECDF([3, 3, 1, 4])
>>>
>>> ecdf([3, 55, 0.5, 1.5])
array([ 0.75, 1. , 0. , 0.25])
"""
def __init__(self, x, side='right'):
step = True
if step: #TODO: make this an arg and have a linear interpolation option?
x = np.array(x, copy=True)
x.sort()
nobs = len(x)
y = np.linspace(1./nobs,1,nobs)
super(ECDF, self).__init__(x, y, side=side, sorted=True)
else:
return interp1d(x,y,drop_errors=False,fill_values=ival)
def monotone_fn_inverter(fn, x, vectorized=True, **keywords):
"""
Given a monotone function x (no checking is done to verify monotonicity)
and a set of x values, return an linearly interpolated approximation
to its inverse from its values on x.
"""
x = np.asarray(x)
if vectorized:
y = fn(x, **keywords)
else:
y = []
for _x in x:
y.append(fn(_x, **keywords))
y = np.array(y)
a = np.argsort(y)
return interp1d(y[a], x[a])
if __name__ == "__main__":
#TODO: Make sure everything is correctly aligned and make a plotting
# function
import matplotlib.pyplot as plt
import urllib
nerve_data = urllib.urlopen('http://www.statsci.org/data/general/nerve.txt')
nerve_data = np.loadtxt(nerve_data)
x = nerve_data / 50. # was in 1/50 seconds
cdf = ECDF(x)
x.sort()
F = cdf(x)
plt.step(x, F)
lower, upper = _conf_set(F)
plt.step(x, lower, 'r')
plt.step(x, upper, 'r')
plt.xlim(0, 1.5)
plt.ylim(0, 1.05)
plt.vlines(x, 0, .05)
plt.show()
|
bsd-3-clause
|
gbrammer/sgas-lens
|
sgas/reprocess_wfc3.py
|
1
|
23705
|
"""
Scripts to reprocess WFC3 exposures with time-variable backgrounds
or satellite trails.
"""
import os
import glob
import shutil
import numpy as np
import numpy.ma
import matplotlib.pyplot as plt
try:
import astropy.io.fits as pyfits
except:
import pyfits
import logging
logger = logging.getLogger('reprocess_wfc3')
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S')
ch.setFormatter(formatter)
if len(logger.handlers) == 0:
logger.addHandler(ch)
def get_flat(hdulist):
"""
Get the flat-field file specified in the header
"""
flat_file = hdulist[0].header['PFLTFILE'].replace('iref$', os.getenv('iref')+'/')
flat_im = pyfits.open(flat_file)
flat = flat_im[1].data
return flat_im, flat
def fetch_calibs(ima_file, ftpdir='https://hst-crds.stsci.edu/unchecked_get/references/hst/', verbose=True):
"""
Fetch necessary calibration files needed for running calwf3 from STScI FTP
Old FTP dir: ftp://ftp.stsci.edu/cdbs/iref/"""
import os
if not os.getenv('iref'):
print('No $iref set! Put it in ~/.bashrc or ~/.cshrc.')
return False
im = pyfits.open(ima_file)
for ctype in ['BPIXTAB', 'CCDTAB', 'OSCNTAB', 'CRREJTAB', 'DARKFILE', 'NLINFILE', 'PFLTFILE', 'IMPHTTAB', 'IDCTAB']:
if verbose:
print('Calib: %s=%s' %(ctype, im[0].header[ctype]))
if im[0].header[ctype] == 'N/A':
continue
cimg = im[0].header[ctype].split('iref$')[1]
iref_file = os.path.join(os.getenv('iref'), cimg)
if not os.path.exists(iref_file):
os.system('curl -o %s %s/%s' %(iref_file, ftpdir, cimg))
return True
def split_multiaccum(ima, scale_flat=True, get_err=False):
"""
Pull out the MultiAccum reads of a RAW or IMA file into a single 3D
matrix.
Returns cube[NSAMP,1024,1014], time, NSAMP
"""
skip_ima = ('ima' in ima.filename()) & (ima[0].header['FLATCORR'] == 'COMPLETE')
if scale_flat & ~skip_ima:
#FLAT_F140W = pyfits.open(os.path.join(os.getenv('iref'), 'uc721143i_pfl.fits'))[1].data
flat_im, flat = get_flat(ima)
else:
#FLAT_F140W = 1
flat_im, flat = None, 1.
is_dark = 'drk' in ima.filename()
if is_dark:
flat = 1
NSAMP = ima[0].header['NSAMP']
sh = ima['SCI',1].shape
cube = np.zeros((NSAMP, sh[0], sh[1]))
if 'ima' in ima.filename():
dq = np.zeros((NSAMP, sh[0], sh[1]), dtype=np.int)
else:
dq = 0
if get_err:
cube_err = cube*0
time = np.zeros(NSAMP)
for i in range(NSAMP):
if (ima[0].header['UNITCORR'] == 'COMPLETE') & (~is_dark):
cube[NSAMP-1-i, :, :] = ima['SCI',i+1].data*ima['TIME',i+1].header['PIXVALUE']/flat
else:
#print 'Dark'
cube[NSAMP-1-i, :, :] = ima['SCI',i+1].data/flat
if get_err:
if ima[0].header['UNITCORR'] == 'COMPLETE':
cube_err[NSAMP-1-i, :, :] = ima['ERR',i+1].data*ima['TIME',i+1].header['PIXVALUE']/flat
else:
cube_err[NSAMP-1-i, :, :] = ima['ERR',i+1].data/flat
if 'ima' in ima.filename():
dq[NSAMP-1-i, :, :] = ima['DQ',i+1].data
time[NSAMP-1-i] = ima['TIME',i+1].header['PIXVALUE']
if get_err:
return cube, cube_err, dq, time, NSAMP
else:
return cube, dq, time, NSAMP
def make_IMA_FLT(raw='ibhj31grq_raw.fits', pop_reads=[], remove_ima=True, fix_saturated=True, flatten_ramp=True, stats_region=[[300,714], [300,714]]):
"""
Run calwf3, if necessary, to generate ima & flt files. Then put the last
read of the ima in the FLT SCI extension and let Multidrizzle flag
the CRs.
Optionally pop out reads affected by satellite trails or earthshine. The
parameter `pop_reads` is a `list` containing the reads to remove, where
a value of 1 corresponds to the first real read after the 2.9s flush.
Requires IRAFX for wfc3tools
"""
import wfc3tools
#### Remove existing products or calwf3 will die
for ext in ['flt','ima']:
if os.path.exists(raw.replace('raw', ext)):
os.remove(raw.replace('raw', ext))
#### Turn off CR rejection
raw_im = pyfits.open(raw, mode='update')
if raw_im[0].header['DETECTOR'] == 'UVIS':
return True
status = fetch_calibs(raw) #, ftpdir='ftp://ftp.stsci.edu/cdbs/iref/')
if not status:
return False
if not pop_reads:
raw_im[0].header['CRCORR'] = 'OMIT'
raw_im.flush()
#### Run calwf3
wfc3tools.calwf3(raw)
flt = pyfits.open(raw.replace('raw', 'flt'), mode='update')
ima = pyfits.open(raw.replace('raw', 'ima'))
#### Pull out the data cube, order in the more natural sense
#### of first reads first
cube, dq, time, NSAMP = split_multiaccum(ima, scale_flat=False)
#### Readnoise in 4 amps
readnoise_2D = np.zeros((1024,1024))
readnoise_2D[512: ,0:512] += ima[0].header['READNSEA']
readnoise_2D[0:512,0:512] += ima[0].header['READNSEB']
readnoise_2D[0:512, 512:] += ima[0].header['READNSEC']
readnoise_2D[512: , 512:] += ima[0].header['READNSED']
readnoise_2D = readnoise_2D**2
#### Gain in 4 amps
gain_2D = np.zeros((1024,1024))
gain_2D[512: ,0:512] += ima[0].header['ATODGNA']
gain_2D[0:512,0:512] += ima[0].header['ATODGNB']
gain_2D[0:512, 512:] += ima[0].header['ATODGNC']
gain_2D[512: , 512:] += ima[0].header['ATODGND']
### Pop out reads affected by satellite trails or earthshine
masks = glob.glob(raw.replace('.fits', '*mask.reg'))
if (len(pop_reads) > 0) | (len(masks) > 0):
print('\n****\nPop reads %s from %s\n****\n' %(pop_reads, ima.filename()))
#### Need to put dark back in for Poisson
dark_file = ima[0].header['DARKFILE'].replace('iref$', os.getenv('iref')+'/')
dark = pyfits.open(dark_file)
dark_cube, dark_dq, dark_time, dark_NSAMP = split_multiaccum(dark, scale_flat=False)
#### Need flat for Poisson
flat_im, flat = get_flat(ima)
#### Subtract diffs of flagged reads
diff = np.diff(cube, axis=0)
dark_diff = np.diff(dark_cube, axis=0)
dt = np.diff(time)
final_exptime = np.ones((1024, 1024))*time[-1]
final_sci = cube[-1,:,:]*1
final_dark = dark_cube[NSAMP-1,:,:]*1
for read in pop_reads:
final_sci -= diff[read,:,:]
final_dark -= dark_diff[read,:,:]
final_exptime -= dt[read]
if False:
### Experimenting with automated flagging
sh = (1024,1024)
drate = (diff.reshape((14,-1)).T/dt).T
med = np.median(drate, axis=0)
fmed = np.median(med)
nmad = 1.48*np.median(np.abs(drate-med), axis=0)
drate_ma = np.ma.masked_array(drate, mask=~np.isfinite(drate))
wht_ma = drate_ma*0
excess = med*0.
for read in range(1,NSAMP-1):
med_i = np.percentile(drate[read,:]-med, 20)
excess_electrons = (drate[read,:]-med-med_i)*dt[read]
rms = np.sqrt((fmed + med_i)*dt[read])
hot = (excess_electrons / rms) > 10
#sm = nd.median_filter(excess_electrons.reshape(sh), 10).flatten()
#hot |= (sm / rms) > 3
med_i = np.percentile((drate[read,:]-med)[~hot], 50)
print(med_i)
drate_ma.mask[read, hot] |= True
drate_ma.data[read,:] -= med_i
wht_ma.mask[read, hot] |= True
wht_ma.data[read,:] = dt[read]
wht_ma.mask[0,:] = True
avg = (drate_ma*wht_ma).sum(axis=0)/wht_ma.sum(axis=0)
pyfits.writeto('%s_avg.fits' %(raw.split('_raw')[0]), data=avg.data.reshape(sh)[5:-5,5:-5], clobber=True)
#### Removed masked regions of individual reads
if len(masks) > 0:
import pyregion
for mask in masks:
mask_read = int(mask.split('.')[-3])
if mask_read in pop_reads:
continue
print('Mask pixels in read %d (%s)' %(mask_read, mask))
refhdu = ima['SCI', 1]
r = pyregion.open(mask).as_imagecoord(header=refhdu.header)
mask_array = r.get_mask(hdu=refhdu)
final_exptime -= mask_array*dt[mask_read]
final_sci -= diff[mask_read,:,:]*mask_array
final_dark -= dark_diff[mask_read,:,:]*mask_array
#### Variance terms
## read noise
final_var = readnoise_2D*1
## poisson term
final_var += (final_sci*flat + final_dark*gain_2D)*(gain_2D/2.368)
## flat errors
final_var += (final_sci*flat*flat_im['ERR'].data)**2
final_err = np.sqrt(final_var)/flat/(gain_2D/2.368)/1.003448/final_exptime
final_sci /= final_exptime
flt[0].header['EXPTIME'] = np.max(final_exptime)
else:
if flatten_ramp:
#### Subtract out the median of each read to make background flat
fix_saturated = False
print('\n*** Flatten ramp ***')
ima = pyfits.open(raw.replace('raw', 'ima'), mode='update')
#### Grism exposures aren't flat-corrected
filter = ima[0].header['FILTER']
if 'G1' in filter:
flats = {'G102': 'uc72113oi_pfl.fits',
'G141': 'uc721143i_pfl.fits'}
flat = pyfits.open('%s/%s' %(os.getenv('iref'), flats[filter]))[1].data
else:
flat = 1.
#### Remove the variable ramp
slx = slice(stats_region[0][0], stats_region[0][1])
sly = slice(stats_region[1][0], stats_region[1][1])
total_countrate = np.median((ima['SCI',1].data/flat)[sly, slx])
for i in range(ima[0].header['NSAMP']-2):
ima['SCI',i+1].data /= flat
med = np.median(ima['SCI',i+1].data[sly, slx])
print('Read #%d, background:%.2f' %(i+1, med))
ima['SCI',i+1].data += total_countrate - med
if 'G1' in filter:
for i in range(ima[0].header['NSAMP']-2):
ima['SCI',i+1].data *= flat
ima[0].header['CRCORR'] = 'PERFORM'
ima[0].header['DRIZCORR'] = 'OMIT'
### May need to generate a simple dummy ASN file for a single exposure
### Not clear why calwf3 needs an ASN if DRIZCORR=OMIT, but it does
need_asn = False
if ima[0].header['ASN_ID'] == 'NONE':
need_asn=True
else:
if not os.path.exists(ima[0].header['ASN_TAB']):
need_asn=True
if need_asn:
import stsci.tools
exp = ima.filename().split('_ima')[0]
params = stsci.tools.asnutil.ASNMember()
asn = stsci.tools.asnutil.ASNTable(output=exp)
asn['members'] = {exp:params}
asn['order'] = [exp]
asn.write()
ima[0].header['ASN_ID'] = exp.upper()
ima[0].header['ASN_TAB'] = '%s_asn.fits' %(exp)
ima.flush()
#### Initial cleanup
files=glob.glob(raw.replace('raw', 'ima_*'))
for file in files:
print('#cleanup: rm %s' %(file))
os.remove(file)
#### Run calwf3 on cleaned IMA
wfc3tools.calwf3(raw.replace('raw', 'ima'))
#### Put results into an FLT-like file
ima = pyfits.open(raw.replace('raw', 'ima_ima'))
flt_new = pyfits.open(raw.replace('raw', 'ima_flt'))
flt['DQ'].data = flt_new['DQ'].data*1
flt['TIME'] = flt_new['TIME']
flt['SAMP'] = flt_new['SAMP']
final_sci = ima['SCI', 1].data*1
final_sci[5:-5,5:-5] = flt_new['SCI'].data*1
#final_err = ima['ERR', 1].data*1
### Need original ERR, something gets messed up
final_err = ima['ERR', 1].data*1
final_err[5:-5,5:-5] = flt['ERR'].data*1
### Clean up
files=glob.glob(raw.replace('raw', 'ima_*'))
for file in files:
print('#cleanup: rm %s' %(file))
os.remove(file)
else:
final_sci = ima['SCI', 1].data*1
final_err = ima['ERR', 1].data*1
final_dq = ima['DQ', 1].data*1
#### For saturated pixels, look for last read that was unsaturated
#### Background will be different under saturated pixels but maybe won't
#### matter so much for such bright objects.
if (fix_saturated):
print('Fix Saturated pixels:')
#### Saturated pixels
zi, yi, xi = np.indices(dq.shape)
saturated = (dq & 256) > 0
# 1024x1024 index array of reads where pixels not saturated
zi_flag = zi*1
zi_flag[saturated] = 0
### 2D array of the last un-saturated read
last_ok_read = np.max(zi_flag, axis=0)
sat_zero = last_ok_read == 0
pyfits.writeto(raw.replace('_raw','_lastread'), data=last_ok_read[5:-5,5:-5], header=flt[1].header, clobber=True)
### keep pixels from first read even if saturated
last_ok_read[sat_zero] = 1
zi_idx = zi < 0
for i in range(1, NSAMP-1):
zi_idx[i,:,:] = zi[i,:,:] == last_ok_read
time_array = time[zi]
time_array[0,:,:] = 1.e-3 # avoid divide-by-zero
# pixels that saturated before the last read
fix = (last_ok_read < (ima[0].header['NSAMP'] - 1)) & (last_ok_read > 0)
#err = np.sqrt(ima[0].header['READNSEA']**2 + cube)/time_array
err = np.sqrt(readnoise_2D + cube)/time_array
final_sci[fix] = np.sum((cube/time_array)*zi_idx, axis=0)[fix]
final_err[fix] = np.sum(err*zi_idx, axis=0)[fix]
fixed_sat = (zi_idx.sum(axis=0) > 0) & ((final_dq & 256) > 0)
final_dq[fixed_sat] -= 256
final_dq[sat_zero] |= 256
print(' Nsat = %d' %(fixed_sat.sum()))
flt['DQ'].data |= final_dq[5:-5,5:-5] & 256
else:
#### Saturated pixels
flt['DQ'].data |= ima['DQ',1].data[5:-5,5:-5] & 256
flt['SCI'].data = final_sci[5:-5,5:-5]
flt['ERR'].data = final_err[5:-5,5:-5]
#### Some earthshine flares DQ masked as 32: "unstable pixels"
mask = (flt['DQ'].data & 32) > 0
if mask.sum() > 1.e4:
print('\n****\nTake out excessive DQ=32 flags (N=%e)\n****\n' %(mask.sum()))
#flt['DQ'].data[mask] -= 32
mask = flt['DQ'].data & 32
### Leave flagged 32 pixels around the edges
flt['DQ'].data[5:-5,5:-5] -= mask[5:-5,5:-5]
### Update the FLT header
flt[0].header['IMA2FLT'] = (1, 'FLT extracted from IMA file')
flt[0].header['IMASAT'] = (fix_saturated*1, 'Manually fixed saturation')
flt[0].header['NPOP'] = (len(pop_reads), 'Number of reads popped from the sequence')
for iread, read in enumerate(pop_reads):
flt[0].header['POPRD%02d' %(iread+1)] = (read, 'Read kicked out of the MULTIACCUM sequence')
flt.flush()
### Remove the IMA file
if remove_ima:
os.remove(raw.replace('raw', 'ima'))
def reprocess_parallel(files, cpu_count=0, skip=True):
"""
"""
import multiprocessing as mp
import time
t0_pool = time.time()
if cpu_count <= 0:
cpu_count = mp.cpu_count()
pool = mp.Pool(processes=cpu_count)
if skip:
for i in range(len(files))[::-1]:
if os.path.exists(files[i].replace('_raw.fits', '_flt.fits')):
p = files.pop(i)
results = [pool.apply_async(make_IMA_FLT, (file, [], True, True, True, [[300,700],[300,700]])) for file in files]
pool.close()
pool.join()
t1_pool = time.time()
def show_ramps_parallel(files, cpu_count=0, skip=True):
"""
"""
import multiprocessing as mp
import time
t0_pool = time.time()
if cpu_count <= 0:
cpu_count = mp.cpu_count()
pool = mp.Pool(processes=cpu_count)
if skip:
for i in range(len(files))[::-1]:
if os.path.exists(files[i].replace('_raw.fits', '_ramp.png')):
p = files.pop(i)
results = [pool.apply_async(show_MultiAccum_reads, (file, False, False, [[300,700],[300,700]])) for file in files]
pool.close()
pool.join()
t1_pool = time.time()
def show_MultiAccum_reads(raw='ibp329isq_raw.fits', flatten_ramp=False, verbose=True, stats_region=[[0,1014], [0,1014]]):
"""
Make a figure (.ramp.png) showing the individual reads of an
IMA or RAW file.
"""
import scipy.ndimage as nd
from matplotlib.figure import Figure
from matplotlib.backends.backend_agg import FigureCanvasAgg
if verbose:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.WARN)
status = fetch_calibs(raw) #, ftpdir='ftp://ftp.stsci.edu/cdbs/iref/')
if not status:
return False
img = pyfits.open(raw)
if 'raw' in raw:
gains = [2.3399999, 2.3699999, 2.3099999, 2.3800001]
gain = np.zeros((1024,1024))
gain[512: ,0:512] += gains[0]
gain[0:512,0:512] += gains[1]
gain[0:512, 512:] += gains[2]
gain[512: , 512:] += gains[3]
else:
gain=1
logger.info('Make MULTIACCUM cube')
#### Split the multiaccum file into individual reads
cube, dq, time, NSAMP = split_multiaccum(img, scale_flat=False)
if 'raw' in raw:
dark_file = img[0].header['DARKFILE'].replace('iref$', os.getenv('iref')+'/')
dark = pyfits.open(dark_file)
dark_cube, dark_dq, dark_time, dark_NSAMP = split_multiaccum(dark, scale_flat=False)
diff = np.diff(cube-dark_cube[:NSAMP,:,:], axis=0)*gain
dt = np.diff(time)
#### Need flat for Poisson
flat_im, flat = get_flat(img)
diff /= flat
else:
diff = np.diff(cube, axis=0)
dt = np.diff(time)
#### Average ramp
slx = slice(stats_region[0][0], stats_region[0][1])
sly = slice(stats_region[1][0], stats_region[1][1])
ramp_cps = np.median(diff[:, sly, slx], axis=1)
avg_ramp = np.median(ramp_cps, axis=1)
#### Initialize the figure
logger.info('Make plot')
plt.ioff()
#fig = plt.figure(figsize=[10,10])
fig = Figure(figsize=[10,10])
## Smoothing
smooth = 1
kernel = np.ones((smooth,smooth))/smooth**2
## Plot the individual reads
for j in range(1,NSAMP-1):
ax = fig.add_subplot(4,4,j)
smooth_read = nd.convolve(diff[j,:,:],kernel)
ax.imshow(smooth_read[5:-5:smooth, 5:-5:smooth]/dt[j],
vmin=0, vmax=4, origin='lower', cmap=plt.get_cmap('cubehelix'))
ax.set_xticklabels([]); ax.set_yticklabels([])
ax.text(20,5,'%d' %(j), ha='left', va='bottom', backgroundcolor='white')
## Show the ramp
fig.tight_layout(h_pad=0.3, w_pad=0.3, pad=0.5)
ax = fig.add_axes((0.6, 0.05, 0.37, 0.18))
#ax = fig.add_subplot(428)
ax.plot(time[2:], (ramp_cps[1:,16:-16:4].T/np.diff(time)[1:]).T, alpha=0.1, color='black')
ax.plot(time[2:], avg_ramp[1:]/np.diff(time)[1:], alpha=0.8, color='red', linewidth=2)
ax.set_xlabel('time'); ax.set_ylabel('background [e/s]')
#fig.tight_layout(h_pad=0.3, w_pad=0.3, pad=0.5)
root=raw.split('_')[0]
#plt.savefig(root+'_ramp.png')
canvas = FigureCanvasAgg(fig)
canvas.print_figure(root+'_ramp.png', dpi=200, transparent=False)
#### Same ramp data file
np.savetxt('%s_ramp.dat' %(root), np.array([time[1:], avg_ramp/np.diff(time)]).T, fmt='%.3f')
if flatten_ramp:
#### Flatten the ramp by setting background countrate to the average.
#### Output saved to "*x_flt.fits" rather than the usual *q_flt.fits.
import wfc3tools
flux = avg_ramp/np.diff(time)
avg = avg_ramp.sum()/time[-1]
min = flux[1:].min()
subval = np.cumsum((flux-avg)*np.diff(time))
imraw = pyfits.open(raw.replace('ima','raw'))
for i in range(1, NSAMP):
logger.info('Remove excess %.2f e/s from read #%d (t=%.1f)' %(flux[-i]-min, NSAMP-i+1, time[-i]))
imraw['SCI',i].data = imraw['SCI',i].data - np.cast[int](subval[-i]/2.36*flat)
files=glob.glob(raw.split('q_')[0]+'x_*')
for file in files:
os.remove(file)
imraw[0].header['CRCORR'] = 'PERFORM'
imraw.writeto(raw.replace('q_raw', 'x_raw'), clobber=True)
## Run calwf3
wfc3tools.calwf3(raw.replace('q_raw', 'x_raw'))
return fig
def in_shadow(file='ibhj07ynq_raw.fits'):
"""
Compute which reads in a RAW file were obtained within the Earth SHADOW.
Requires the associated JIF files that contain this information, for example
"ibhj07040_jif.fits" for the default data file. These can be obtained by requesting
the "observation log" files from MAST.
"""
import astropy.time
import astropy.io.fits as pyfits
import numpy as np
#### Open the raw file
raw = pyfits.open(file)
NSAMP = raw[0].header['NSAMP']
#### Find JIF file. Can either be association or single files
if raw[0].header['ASN_ID'] == 'NONE':
exp = raw[0].header['ROOTNAME']
jif = pyfits.open(exp[:-1]+'j_jif.fits')[1]
else:
exp = raw[0].header['ROOTNAME']
asn = raw[0].header['ASN_TAB']
jif = pyfits.open(asn.replace('asn', 'jif'))
for i in range(len(jif)-1):
if jif[i+1].header['EXPNAME'][:-1] == exp[:-1]:
jif = jif[i+1]
break
#### Shadow timing (last entry and exit)
shadow_in = astropy.time.Time(jif.header['SHADOENT'].replace('.',':'),
format='yday', in_subfmt='date_hms', scale='utc')
shadow_out = astropy.time.Time(jif.header['SHADOEXT'].replace('.',':'),
format='yday', in_subfmt='date_hms', scale='utc')
#### Array of read timings
t0 = []
for i in range(NSAMP):
h = raw['sci',i+1].header
ti = astropy.time.Time(h['ROUTTIME'], format='mjd', scale='utc')
t0.append(ti)
t0 = astropy.time.Time(t0)
#### Test if reads were taken during shadow
test_in_shadow = ((t0-shadow_in).sec < (t0-shadow_out).sec) | ((t0-shadow_out).sec < 0)
return t0, test_in_shadow
|
mit
|
TinyOS-Camp/DDEA-DEV
|
Archive/[14_10_11] Dr_Jung_Update/df_data_analysis_ver3_old.py
|
3
|
111874
|
# coding: utf-8
"""
==============================================
Visualizing the enegy-sensor-weather structure
==============================================
This example employs several unsupervised learning techniques to extract
the energy data structure from variations in Building Automation System (BAS)
and historial weather data.
The fundermental timelet for analysis are 15 min, referred to as Q.
** currently use H (Hour) as a fundermental timelet, need to change later **
Learning a graph structure
--------------------------
We use sparse inverse covariance estimation to find which quotes are
correlated conditionally on the others. Specifically, sparse inverse
covariance gives us a graph, that is a list of connection. For each
symbol, the symbols that it is connected too are those useful to explain
its fluctuations.
Clustering
----------
We use clustering to group together quotes that behave similarly. Here,
amongst the :ref:`various clustering techniques <clustering>` available
in the scikit-learn, we use :ref:`affinity_propagation` as it does
not enforce equal-size clusters, and it can choose automatically the
number of clusters from the data.
Note that this gives us a different indication than the graph, as the
graph reflects conditional relations between variables, while the
clustering reflects marginal properties: variables clustered together can
be considered as having a similar impact at the level of the full stock
market.
Embedding in 2D space
---------------------
For visualization purposes, we need to lay out the different symbols on a
2D canvas. For this we use :ref:`manifold` techniques to retrieve 2D
embedding.
Visualizationimport uuid
-------------
The output of the 3 models are combined in a 2D graph where nodes
represents the stocks and edges the:
- cluster labels are used to define the color of the nodes
- the sparse covariance model is used to display the strength of the edges
- the 2D embedding is used to position the nodes in the plan
This example has a fair amount of visualization-related code, as
visualization is crucial here to display the graph. One of the challenge
is to position the labels minimizing overlap. For this we use an
heuristic based on the direction of the nearest neighbor along each
axis.
"""
#print(__doc__)
# Author: Deokwooo Jung [email protected]
from __future__ import division # To forace float point division
import os
import sys
import numpy as np
from numpy.linalg import inv
from numpy.linalg import norm
import uuid
import pylab as pl
from scipy import signal
from scipy import stats
from scipy import fftpack
from scipy.fftpack import rfft, irfft, fftfreq
from scipy.interpolate import interp1d
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from multiprocessing import Pool
#from datetime import datetime
import datetime as dt
from dateutil import tz
import shlex, subprocess
import mytool as mt
import time
import retrieve_weather as rw
import itertools
import mpl_toolkits.mplot3d.axes3d as p3
import calendar
import random
from stackedBarGraph import StackedBarGrapher
import pprint
import radar_chart
##################################################################
# ETE tree module
#from ete2 import Tree
##################################################################
##################################################################
# Machine Learing Modules
from sklearn.gaussian_process import GaussianProcess
from sklearn import cluster, covariance, manifold # Machine Learning Packeage
from sklearn.decomposition import FastICA
from sklearn.decomposition import PCA
from sklearn import metrics
from sklearn import mixture
from sklearn.cluster import Ward
from sklearn.cluster import KMeans
from sklearn.cluster import MeanShift
from sklearn import mixture
##################################################################
# Custom library
##################################################################
from data_tools import *
from data_retrieval import *
from pack_cluster import *
from data_preprocess import *
from shared_constants import *
from pre_bn_state_processing import *
##################################################################
from matplotlib.collections import LineCollection
#from classify_sensors import get_sim_mat
##################################################################
# Processing Configuraiton Settings
##################################################################
# This option let you use data_dict object saved in hard-disk from the recent execution
IS_USING_SAVED_DICT=-1
# File selection method
Using_LoopUp=0
# Analysis period
ANS_START_T=dt.datetime(2013,7,1,0)
ANS_END_T=dt.datetime(2013,9,30,0)
# Interval of timelet, currently set to 1 Hour
#TIMELET_INV=dt.timedelta(hours=1)
TIMELET_INV=dt.timedelta(minutes=15)
# Interactive mode for plotting
plt.ion()
##################################################################
"""
##################################################################
# Variable Declare and initialization
##################################################################
time_slots=[]
start=ANS_START_T
while start < ANS_END_T:
#print start
time_slots.append(start)
start = start + TIMELET_INV
# Data dictionary
# All sensor and weather data is processed and structred into
# a consistent single data format -- Dictionary
data_dict={}
"""
input_files=[]
###############################################################################
# This directly searches files from bin file name
if (Using_LoopUp==0) and (IS_USING_SAVED_DICT==0):
temp4 = subprocess.check_output("ls "+DATA_DIR+"*"+FL_EXT+" |grep _ACTIVE_POWER_|grep GW2", shell=True)
#temp = subprocess.check_output("ls "+data_dir+"*.bin |grep '_POWER_\|TK.*VAK'", shell=True)
ha_ = subprocess.check_output("ls "+DATA_DIR+"*.bin |grep '\.HA.._'", shell=True)
ha1_ = subprocess.check_output("ls "+DATA_DIR+"*"+FL_EXT+" |grep '\.HA1_'", shell=True)
ha2_ = subprocess.check_output("ls "+DATA_DIR+"*"+FL_EXT+" |grep '\.HA2_'", shell=True)
power_ = subprocess.check_output("ls "+DATA_DIR+"*"+FL_EXT+" |grep _POWER_", shell=True)
#ventilation
iv_ = subprocess.check_output("ls "+DATA_DIR+"*"+FL_EXT+" |grep IV_", shell=True)
# Solar
aurinko_ = subprocess.check_output("ls "+DATA_DIR+"*"+FL_EXT+" |grep AURINKO_", shell=True)
# weather
saa_ = subprocess.check_output("ls "+DATA_DIR+"*"+FL_EXT+" |grep '\.SAA'", shell=True)
# cooling
jaah_ = subprocess.check_output("ls "+DATA_DIR+"*"+FL_EXT+" |grep JAAH", shell=True)
# ground heat
mlp_ = subprocess.check_output("ls "+DATA_DIR+"*"+FL_EXT+" |grep MLP", shell=True)
# GW1 GEO Thermal
gw1geo_ = subprocess.check_output("ls "+DATA_DIR+"*"+FL_EXT+" |grep GW1.GEO", shell=True)
# GW2 GEO Thermal
gw2geo_ = subprocess.check_output("ls "+DATA_DIR+"*"+FL_EXT+" |grep GW2.GEO", shell=True)
# VAK1 GEO Thermal
vak1geo_ = subprocess.check_output("ls "+DATA_DIR+"*"+FL_EXT+" |grep VAK1.GEO", shell=True)
# VAK2 GEO Thermal
vak2geo_ = subprocess.check_output("ls "+DATA_DIR+"*"+FL_EXT+" |grep VAK2.GEO", shell=True)
temp=power_+iv_+aurinko_+mlp_+gw1geo_ +gw2geo_+vak1geo_+vak2geo_+ha1_+ha2_
#temp=temp4
input_files =shlex.split(temp)
# Get rid of duplicated files
input_files=list(set(input_files))
print 'The total number of sensors selected for analysis is ', len(input_files),'......'
###############################################################################
# This look-ups id description tables and find relavant bin files.
elif (Using_LoopUp==1) and (IS_USING_SAVED_DICT==0):
id_dict=get_id_dict('grep kW')
for id_name in id_dict.keys():
binfile_name=id_name+'.bin'
input_files.append(binfile_name)
else:
print 'Search data_dict.bin....'
#import pdb;pdb.set_trace()
###############################################################################
# Analysis script starts here ....
###############################################################################
if IS_USING_SAVED_DICT==0:
start__dictproc_t=time.time()
# IS_USING_PARALLEL_OPT
data_dict=construct_data_dict(input_files,ANS_START_T,ANS_END_T,TIMELET_INV,binfilename='data_dict', IS_USING_PARALLEL=IS_USING_PARALLEL_OPT)
end__dictproc_t=time.time()
print 'the time of construct data dict.bin is ', end__dictproc_t-start__dictproc_t, ' sec'
print '--------------------------------------'
elif IS_USING_SAVED_DICT==1:
print 'Loading data dictionary......'
start__dictproc_t=time.time()
data_dict = mt.loadObjectBinaryFast('data_dict.bin')
end__dictproc_t=time.time()
print 'the time of loading data dict.bin is ', end__dictproc_t-start__dictproc_t, ' sec'
print '--------------------------------------'
else:
print 'Skip data dict'
if IS_USING_SAVED_DICT>0:
# Copy related variables
time_slots=data_dict['time_slots'][:]
Conditions_dict=data_dict['Conditions_dict'].copy()
Events_dict=data_dict['Events_dict'].copy()
sensor_list=data_dict['sensor_list'][:]
weather_list=data_dict['weather_list'][:]
weather_list_used = [data_dict['weather_list'][i] for i in [1,2,3,10,11]]
# data_used is the list of refernece name for all measurements from now on.
data_used=sensor_list+weather_list_used
# This is a global ID for data_used measurement
data_used_idx=range(len(data_used))
sensor_idx=range(len(sensor_list))
weather_idx=range(len(sensor_list),len(data_used))
# [data_used[i] for i in sensor_idx]
# [data_used[i] for i in weather_idx]
# Verify there is no [] or N/A in the list
CHECK_DATA_FORMAT=0
if CHECK_DATA_FORMAT==1:
list_of_wrong_data_format=verify_data_format(data_used,data_dict,time_slots)
if len(list_of_wrong_data_format)>0:
print 'Measurement list below'
print '----------------------------------------'
print list_of_wrong_data_format
raise NameError('Errors in data format')
"""
print '=================================================================='
print ' Data retrieval summary for regualr events'
print '=================================================================='
print '# of weather points: ',len(weather_list_used)
print '# of sensor points: ', len(sensor_list)
print '-------------------------------------------------------------'
print '# of total measurement points: ',len(data_used)
print '-------------------------------------------------------------'
print '# of zero variance measurement points: ',len(X_zero_var_list)
print '-------------------------------------------------------------'
print '# of valid measurement points: ',len(X_names)
print '-------------------------------------------------------------'
print '# of representative measurement points (FLOAT TYPE): ', X_Feature_sfe.shape[1], '/',len(sf_idx)
print '-------------------------------------------------------------'
print '# of representative measurement points (INT TYPE): ', X_Feature_sie.shape[1], '/',len(si_idx)
print '-------------------------------------------------------------'
print '# of total analysis points / total measurement points: ', \
X_Feature_sfe.shape[1]+X_Feature_sie.shape[1]+len(wf_name)+len(wi_name), '/',len(data_used)
print '-------------------------------------------------------------'
"""
EVENT_RETRIEVAL=0
if EVENT_RETRIEVAL==1:
# sensor_list --> float or int --> clustering for float and int --> exemplar
# exemplar of floats --> states , int is states,
# weather_list --> float or int
####################################
# Regular Event Extraction
####################################
# Build feature matrix wiht data interpolation for both sensor and weather data
X_Feature,X_Time,X_names\
,X_zero_var_list, X_zero_var_val\
,X_int_type_list,X_int_type_idx\
,X_float_type_list,X_float_type_idx\
,X_weather_type_idx,X_sensor_type_idx\
=build_feature_matrix(data_dict,sensor_list,weather_list_used\
,time_slots,DO_INTERPOLATE=1\
,max_num_succ_idx_for_itpl=int(len(time_slots)*0.05))
if len(X_names+X_zero_var_list)!=len(data_used):
raise NameError('Missing name is found in X_names or X_zero_var_list')
else:
zero_var_idx=[data_used.index(name_str) for name_str in X_zero_var_list]
nzero_var_idx=list(set(data_used_idx)-set(zero_var_idx))
# From below all index are reference to X_Feature
sf_idx=list(set(X_sensor_type_idx)&set(X_float_type_idx));
# Equivalent to np.array(data_used)[np.array(nzero_var_idx)[sf_idx]]
sf_name=list(np.array(X_names)[sf_idx])
si_idx=list(set(X_sensor_type_idx)&set(X_int_type_idx));
si_name=list(np.array(X_names)[si_idx])
wf_idx=list(set(X_weather_type_idx)&set(X_float_type_idx));
wf_name=list(np.array(X_names)[wf_idx])
wi_idx=list(set(X_weather_type_idx)&set(X_int_type_idx));
wi_name=list(np.array(X_names)[wi_idx])
#Euclidian Distance Matrix of Floating type of data only wf+o
float_idx=list(set(sf_idx)| set(wf_idx))
int_idx=list(set(si_idx)| set(wi_idx))
# Float Type Measurement Clustering
X_Feature_sfe,sf_exemplars_dict,exemplars_,labels_\
=cluster_measurement_points(X_Feature[:,sf_idx],sf_name,corr_bnd=[0.1,0.9],alg='pack')
sfe_idx=list(np.array(sf_idx)[exemplars_])
# InT Type Measurement Clustering
X_Feature_sie,si_exemplars_dict,exemplars_,labels_\
=cluster_measurement_points(X_Feature[:,si_idx],si_name,corr_bnd=[0.0,0.9],alg='pack')
sie_idx=list(np.array(si_idx)[exemplars_])
#plot_label(X_Feature[:,si_idx],si_name,labels_,exemplars_,[27])
sfe_state,sfe_corr_val=X_INPUT_to_states(X_Feature_sfe,CORR_VAL_OUT=1) # sensor -float type
sie_state=X_Feature_sie # sensor -integer type
wf_state,wf_corr_val=X_INPUT_to_states(X_Feature[:,wf_idx],CORR_VAL_OUT=1) # weather -float type
wi_state=X_Feature[:,wi_idx] # weather -integer type
empty_states=np.array([[] for i in range(len(X_Time))])
if len(sfe_state)==0: sfe_state=empty_states
if len(sie_state)==0: sie_state=empty_states
if len(wf_state)==0: wf_state=empty_states
if len(wi_state)==0: wi_state=empty_states
# Exemplar sensor only
X_Sensor_STATE=np.append(sfe_state,sie_state, axis=1)
X_Sensor_STATE=X_Sensor_STATE.astype(int)
X_Sensor_NAMES=list(np.array(X_names)[sfe_idx])+list(np.array(X_names)[sie_idx])
X_Weather_STATE=np.append(wf_state,wi_state, axis=1)
X_Weather_STATE=X_Weather_STATE.astype(int)
X_Weather_NAMES=list(np.array(X_names)[wf_idx])+list(np.array(X_names)[wi_idx])
# months of a year,days of a week, and hours of a day
# (Monday, Tuesday,Wendsday,Thursday,Saturday,Sunday) =(0,1,2,3,4,5,6)
X_Time_STATE_temp=build_time_states(X_Time)
X_Time_NAMES_temp=['MTH','WD','HR']
X_Time_STATE=[]
X_Time_NAMES=[]
for xt_col,xt_name in zip(X_Time_STATE_temp.T,X_Time_NAMES_temp):
if len(set(xt_col))>1:
X_Time_STATE.append(xt_col)
X_Time_NAMES.append(xt_name)
X_Time_STATE=np.array(X_Time_STATE).T
DO_PLOTTING=0
if DO_PLOTTING==1:
sensor_name_temp=['VAK1.HA1_SM_EP_KM','VAK1.HA1_SM_KAM','GW1.HA1_TE16_AH2_M']
plot_compare_sensors(sensor_name_temp,X_Time,X_Feature,X_names)
plot_compare_states(sensor_name_temp[0],data_dict,X_Time,X_Feature,X_names)
#################################################
# FORMATTED DATA FOR REGUALR EVENT
#################################################
#DO_PROB_EST=1 ** Save this variables***
#avgdata_mat = np.hstack([X_Sensor_STATE,X_Weather_STATE,X_Time_STATE])
#avgdata_names = X_Sensor_NAMES+X_Weather_NAMES+X_Time_NAMES
avgdata_exemplar=dict(sf_exemplars_dict.items()+si_exemplars_dict.items())
avgdata_zvar=X_zero_var_list
avgdata_dict={}
#avgdata_dict.update({'avgdata_mat':avgdata_mat})
avgdata_dict.update({'avgdata_state_mat':X_Sensor_STATE})
avgdata_dict.update({'avgdata_weather_mat':X_Weather_STATE})
avgdata_dict.update({'avgdata_time_mat':X_Time_STATE})
avgdata_dict.update({'avg_time_slot':X_Time})
#avgdata_dict.update({'avgdata_names':avgdata_names})
avgdata_dict.update({'avgdata_exemplar':avgdata_exemplar})
avgdata_dict.update({'avgdata_zvar':avgdata_zvar})
avgdata_dict.update({'sensor_names':X_Sensor_NAMES})
avgdata_dict.update({'weather_names':X_Weather_NAMES})
avgdata_dict.update({'time_names':X_Time_NAMES})
mt.saveObjectBinary(avgdata_dict,'avgdata_dict.bin')
####################################
# Irregular Event Extraction
####################################
# Interpolatoin with outlier removal, Here we exclude weather data from irregualr event analysis
# since weather data noramlly show slow changes in time.so we dont expect in any meaningful diffs values
measurement_point_set,num_type_set\
=interpolation_measurement(data_dict,sensor_list,err_rate=1,sgm_bnd=20)
# Irregualr matrix
Xdiff_Mat,Xdiff_Time,Xdiff_Names\
,Xdiff_zero_var_list, Xdiff_zero_var_val\
,Xdiff_int_type_list,Xdiff_int_type_idx\
,Xdiff_float_type_list,Xdiff_float_type_idx\
=build_diff_matrix(measurement_point_set,time_slots,num_type_set,sensor_list,PARALLEL=IS_USING_PARALLEL_OPT)
#==============================================================================
# This code is to fix the dimension difference in diff sensor and weather
# WARNING: this is just a quick fix. A more elegant solution should be implemented
#==============================================================================
time_slots_array = np.sort(np.array(list(set(Xdiff_Time) & set(X_Time))))
# Extract subset of X_Weather_STATE
removed_idx_list = []
for ridx,slot in enumerate(X_Time):
slot_idx = np.where(time_slots_array==slot)[0]
if len(slot_idx) == 0: # slot not in common time slots
removed_idx_list.append(ridx)
XDIFF_Weather_STATE = np.delete(X_Weather_STATE, removed_idx_list,axis=0)
# Extract subset of Xdiff_Mat
removed_idx_list = []
for ridx,slot in enumerate(Xdiff_Time):
slot_idx = np.where(time_slots_array==slot)[0]
if len(slot_idx) == 0: # slot not in common time slots
removed_idx_list.append(ridx)
Xdiff_Mat = np.delete(Xdiff_Mat,removed_idx_list,axis=0)
# Update Xdiff_Time
Xdiff_Time = time_slots_array
XDIFF_Weather_STATE = np.array(XDIFF_Weather_STATE)
#==============================================================================
# End of fix
#==============================================================================
# From below all index are reference to X_Feature
xdiff_sf_idx=Xdiff_float_type_idx;
xdiff_sf_name=Xdiff_float_type_list;
xdiff_si_idx=Xdiff_int_type_idx;
xdiff_si_name=Xdiff_int_type_list
# Float Type Measurement Clustering
X_Diff_sfe,sf_diff_exemplars_dict,exemplars_,labels_\
=cluster_measurement_points(Xdiff_Mat[:,xdiff_sf_idx],xdiff_sf_name,corr_bnd=[0.1,0.9])
xdiff_sfe_idx=list(np.array(xdiff_sf_idx)[exemplars_])
# InT Type Measurement Clustering
X_Diff_sie,si_diff_exemplars_dict,exemplars_,labels_\
=cluster_measurement_points(Xdiff_Mat[:,xdiff_si_idx],xdiff_si_name,corr_bnd=[0.1,0.9])
xdiff_sie_idx=list(np.array(xdiff_si_idx)[exemplars_])
xdiff_sfe_state,xdiff_sfe_corr_val\
=X_INPUT_to_states(X_Diff_sfe,CORR_VAL_OUT=1,PARALLEL =IS_USING_PARALLEL_OPT) # sensor -float type
xdiff_sie_state=X_Diff_sie # sensor -integer type
empty_states=np.array([[] for i in range(len(Xdiff_Time))])
if len(xdiff_sfe_state)==0: xdiff_sfe_state=empty_states
if len(xdiff_sie_state)==0: xdiff_sie_state=empty_states
if len(wf_state)==0: wf_state=empty_states
if len(wi_state)==0: wi_state=empty_states
# Exemplar sensor only
XDIFF_Sensor_STATE=np.append(xdiff_sfe_state,xdiff_sie_state, axis=1)
XDIFF_Sensor_STATE=XDIFF_Sensor_STATE.astype(int)
XDIFF_Sensor_NAMES=list(np.array(Xdiff_Names)[xdiff_sfe_idx])+list(np.array(Xdiff_Names)[xdiff_sie_idx])
# months of a year,days of a week, and hours of a day
# (Monday, Tuesday,Wendsday,Thursday,Saturday,Sunday) =(0,1,2,3,4,5,6)
XDIFF_Time_STATE_temp=build_time_states(Xdiff_Time)
XDIFF_Time_NAMES_temp=['MTH','WD','HR']
XDIFF_Time_STATE=[]
XDIFF_Time_NAMES=[]
for xt_col,xt_name in zip(XDIFF_Time_STATE_temp.T,XDIFF_Time_NAMES_temp):
if len(set(xt_col))>1:
XDIFF_Time_STATE.append(xt_col)
XDIFF_Time_NAMES.append(xt_name)
XDIFF_Time_STATE=np.array(XDIFF_Time_STATE).T
#################################################
# FORMATTED DATA FOR IRREGUALR EVENT
#################################################
#** Save this variables***
#diffdata_mat = np.hstack([XDIFF_Sensor_STATE,X_Weather_STATE,XDIFF_Time_STATE])
#diffdata_names = XDIFF_Sensor_NAMES+X_Weather_NAMES+XDIFF_Time_NAMES
diffdata_exemplar=dict(sf_diff_exemplars_dict.items()+si_diff_exemplars_dict.items())
diffdata_zvar=Xdiff_zero_var_list
diffdata_dict={}
#diffdata_dict.update({'diffdata_mat':diffdata_mat})
diffdata_dict.update({'diffdata_state_mat':XDIFF_Sensor_STATE})
#diffdata_dict.update({'diffdata_weather_mat':X_Weather_STATE})
diffdata_dict.update({'diffdata_weather_mat':XDIFF_Weather_STATE})
diffdata_dict.update({'diffdata_time_mat':XDIFF_Time_STATE})
diffdata_dict.update({'diff_time_slot':Xdiff_Time})
#diffdata_dict.update({'diffdata_names':diffdata_names})
diffdata_dict.update({'diffdata_exemplar':diffdata_exemplar})
diffdata_dict.update({'diffdata_zvar':diffdata_zvar})
diffdata_dict.update({'sensor_names':XDIFF_Sensor_NAMES})
diffdata_dict.update({'weather_names':X_Weather_NAMES})
diffdata_dict.update({'time_names':X_Time_NAMES})
mt.saveObjectBinary(diffdata_dict,'diffdata_dict.bin')
EVENT_ANALYSIS=0
if EVENT_ANALYSIS==1:
# 0-nb distance analysis
####################################################
# Probabiity Computatoin
#---------------------------------------------------
# - Ranking output.
# - Effect Prob Analysis
# - Causal Prob Analysis
####################################################
diffdata_dict = mt.loadObjectBinary('diffdata_dict.bin')
avgdata_dict = mt.loadObjectBinary('avgdata_dict.bin')
# Irregualr Events
diffdata_state_mat=diffdata_dict['diffdata_state_mat']
diffdata_weather_mat=diffdata_dict['diffdata_weather_mat']
diffdata_time_mat=diffdata_dict['diffdata_time_mat']
diff_time_slot=diffdata_dict['diff_time_slot']
diffdata_exemplar=diffdata_dict['diffdata_exemplar']
diffdata_zvar=diffdata_dict['diffdata_zvar']
diffsensor_names=diffdata_dict['sensor_names']
diffweather_names=diffdata_dict['weather_names']
difftime_names=diffdata_dict['time_names']
# Regualr Events
avgdata_state_mat=avgdata_dict['avgdata_state_mat']
avgdata_weather_mat=avgdata_dict['avgdata_weather_mat']
avgdata_time_mat=avgdata_dict['avgdata_time_mat']
avg_time_slot=avgdata_dict['avg_time_slot']
avgdata_exemplar=avgdata_dict['avgdata_exemplar']
avgdata_zvar=avgdata_dict['avgdata_zvar']
avgsensor_names=avgdata_dict['sensor_names']
avgweather_names=avgdata_dict['weather_names']
avgtime_names=avgdata_dict['time_names']
###############################################################################################
# Regualr Event Analysis
#avgdata_state_mat,avgdata_weather_mat, avgdata_time_mat, avg_time_slot
#avgdata_exemplar, avgdata_zvar, avgsensor_names, avgweather_names, avgtime_names
###############################################################################################
#****** Complete Analysis Script***** #
######################################################################
#1. effect prob - time dependecy analysis
######################################################################
# Temporary for correcting month change
######################################################################
# Use this for special cases
######################################################################
"""
monthly_variability,monthly_structure_score\
=time_effect_analysis_all(data_mat,data_name,avgtime_names,avgsensor_names)
start_t=time.time()
s_name=avgsensor_names[0]
state_list,s_prob_log,time_effect_mat_dist,score_in_structure,valid_mon_list,state_list=\
time_effect_analysis(data_mat,data_name,avgtime_names,avgsensor_names[0],DO_PLOT=True)
end_t=time.time()
print 'Total-- ',end_t-start_t, 'secs'
plot_time_effect(s_name,state_list,valid_mon_list,s_prob_log)
wf_tuple=wf_tuple_t
plot_weather_sensitivity(wf_tuple[0],wf_tuple[1],wf_tuple[2],wf_tuple[3],wf_tuple[4],\
avgsensor_names,Conditions_dict,Events_dict,sort_opt='desc',num_of_picks=9)
"""
Conditions_dict=data_dict['Conditions_dict'].copy()
Events_dict=data_dict['Events_dict'].copy()
data_state_mat=avgdata_state_mat
data_time_mat=avgdata_time_mat
data_weather_mat=avgdata_weather_mat
sensor_names=avgsensor_names
time_names=avgtime_names
weather_names=avgweather_names
bldg_tag='VAK_' # building tag
trf_tag='avg_' # transformation tag
dst_t='h'
vak_avg_wtf_tuple,vak_avg_weather_dict=wt_sensitivity_analysis(data_state_mat,data_time_mat,data_weather_mat,sensor_names,time_names,\
Conditions_dict,Events_dict,bldg_tag,trf_tag,weather_names,dst_t='h')
Conditions_dict=data_dict['Conditions_dict'].copy()
Events_dict=data_dict['Events_dict'].copy()
data_state_mat=diffdata_state_mat
data_time_mat=diffdata_time_mat
data_weather_mat=diffdata_weather_mat
sensor_names=diffsensor_names
time_names=difftime_names
weather_names=diffweather_names
bldg_tag='VAK_' # building tag
trf_tag='diff_' # transformation tag
dst_t='h'
vak_diff_wtf_tuple,vak_diff_weather_dict=wt_sensitivity_analysis(data_state_mat,data_time_mat,data_weather_mat,sensor_names,time_names,\
Conditions_dict,Events_dict,bldg_tag,trf_tag,weather_names,dst_t='h')
###############################################################################################
# Irregualr Event Analysis
#avgdata_state_mat,avgdata_weather_mat, avgdata_time_mat, avg_time_slot
#avgdata_exemplar, avgdata_zvar, avgsensor_names, avgweather_names, avgtime_names
###############################################################################################
#########################################################################
# Computes the maximum screwness of distribution of sensors
# max_{i,j} abs(p_i-p_j)/p_i*p_j such that p_i, p_j ~=0
#########################################################################
#plot(irr_state_mat[:,skewness_metric_sort_idx[12]],'-s')
num_of_picks=10
rare_event_sensors=list(np.array(diffsensor_names)[skewness_metric_sort_idx[0:num_of_picks]])
rare_event_sensors_scores=list(skewness_metric_sort[0:num_of_picks])
pprint.pprint(np.array([rare_event_sensors, rare_event_sensors_scores]).T)
data_mat = np.hstack([diffdata_state_mat,diffdata_time_mat])
# Temporary for correcting month change
#data_mat[:,-3]=data_mat[:,-3]-1
data_name = diffsensor_names+difftime_names
dst_t='h'
mth_prob_map,mth_state_map, mth_sensitivity,mth_list\
= param_sensitivity(data_mat,data_name,diffsensor_names,'MTH',dst_type=dst_t)
wday_prob_map,wday_state_map,wday_sensitivity,wday_list\
= param_sensitivity(data_mat,data_name,diffsensor_names,'WD',dst_type=dst_t)
dhr_prob_map,dhr_state_map,dhr_sensitivity,dhr_list\
= param_sensitivity(data_mat,data_name,diffsensor_names,'HR',dst_type=dst_t)
tf_tuple_mth=('MTH',mth_prob_map,mth_state_map,mth_sensitivity,mth_list)
tf_tuple_wday=('WD',wday_prob_map,wday_state_map,wday_sensitivity,wday_list)
tf_tuple_dhr=('HR',dhr_prob_map,dhr_state_map,dhr_sensitivity,dhr_list)
#tf_tuple=tf_tuple_mth
##########################################################################################
# Genarelize this plotting
#plot_xxx_sensitivity(tf_tuple[0],tf_tuple[1],tf_tuple[2],tf_tuple[3],tf_tuple[4],\
# avgsensor_names,Conditions_dict,Events_dict,sort_opt='desc',num_of_picks=9)
##########################################################################################
tf_sstv_tuple=np.array([tf_tuple_mth[3],tf_tuple_wday[3],tf_tuple_dhr[3]])
max_tf_sstv=tf_sstv_tuple[tf_sstv_tuple<np.inf].max()*2
tf_sstv_tuple[tf_sstv_tuple==np.inf]=max_tf_sstv
tf_sstv_total=np.sum(tf_sstv_tuple,0)
arg_idx_s=argsort(tf_sstv_total)[::-1]
arg_idx_is=argsort(tf_sstv_total)
num_of_picks=9
print 'Most time sensitive sensors'
print '---------------------------------------------'
Time_Sensitive_Sensors=list(np.array(diffsensor_names)[arg_idx_s[0:num_of_picks]])
pprint.pprint(Time_Sensitive_Sensors)
print 'Least time sensitive sensors'
print '---------------------------------------------'
Time_Insensitive_Sensors=list(np.array(diffsensor_names)[arg_idx_is[0:num_of_picks]])
pprint.pprint(Time_Insensitive_Sensors)
####################################################################
## Rador Plotting for Weather_Sensitive_Sensors
####################################################################
sensor_no = len(diffsensor_names)
# convert 'inf' to 1
sen_mth = [max_tf_sstv if val == float("inf") else val for val in tf_tuple_mth[3]]
sen_wday = [max_tf_sstv if val == float("inf") else val for val in tf_tuple_wday[3]]
sen_dhr = [max_tf_sstv if val == float("inf") else val for val in tf_tuple_dhr[3]]
SEN = [[sen_mth[i], sen_wday[i], sen_dhr[i]] for i in range(sensor_no)]
TOTAL_SEN = np.array([sum(SEN[i]) for i in range(sensor_no)])
idx = np.argsort(TOTAL_SEN)[-6:] # Best 6 sensors
spoke_labels = ["Month", "Day", "Hour"]
data = [SEN[i] for i in idx]
sensor_labels = [diffsensor_names[i] for i in idx]
#import radar_chart
radar_chart.plot(data, spoke_labels, sensor_labels, saveto="time_radar.png")
import pdb;pdb.set_trace()
"""
diffdata_state_mat=diffdata_dict['diffdata_state_mat']
diffdata_weather_mat=diffdata_dict['diffdata_weather_mat']
diffdata_time_mat=diffdata_dict['diffdata_time_mat']
diff_time_slot=diffdata_dict['diff_time_slot']
diffdata_exemplar=diffdata_dict['diffdata_exemplar']
diffdata_zvar=diffdata_dict['diffdata_zvar']
diffsensor_names=diffdata_dict['sensor_names']
diffweather_names=diffdata_dict['weather_names']
difftime_names=diffdata_dict['time_names']
"""
do_sampling_interval_plot=1
if do_sampling_interval_plot==1:
num_of_picks=5
fig=figure('sampling interval')
for k in range(num_of_picks):
ax=subplot(num_of_picks,1,k)
m_idx=skewness_metric_sort_idx[k]
sensor_name_=diffdata_names[m_idx]
t_=unix_to_dtime(data_dict[sensor_name_][2][0])
plot(t_[1:],abs(diff(data_dict[sensor_name_][2][0])))
plt.title(sensor_name_,fontsize=14,y=0.8)
ylabel('Sampling Intervals')
fig.savefig(fig_dir+'sampling_intervals.png')
do_rare_event_compare_plot=1
if do_rare_event_compare_plot==1:
num_of_picks=3
for k in range(num_of_picks):
fig=figure('irregualr event compare'+str(k))
m_idx=skewness_metric_sort_idx[k]
sensor_name_=diffdata_names[m_idx]
irr_idx=irr_data_name.index(sensor_name_)
t_=unix_to_dtime(data_dict[sensor_name_][2][0])
val_=data_dict[sensor_name_][2][1]
subplot(4,1,1)
plt.title(sensor_name_+' samples',fontsize=14,y=0.8)
plot(t_,val_)
subplot(4,1,2)
plt.title(sensor_name_+' differential',fontsize=14,y=0.8)
plot(t_[1:],abs(diff(val_)))
subplot(4,1,3)
plot(measurement_point_set[irr_idx][0],measurement_point_set[irr_idx][1])
subplot(4,1,4)
plt.title(sensor_name_+' irregular states',fontsize=14,y=0.8)
plot(diff_time_slot,irr_state_mat[:,m_idx])
plt.get_current_fig_manager().window.showMaximized()
fig.savefig(fig_dir+'irr_event_compare'+str(k)+'.png')
BN_ANALYSIS=1
if BN_ANALYSIS==1:
#########################################################################
# Case by Case Analysis.
#########################################################################
##############################
# VTT VTT_POWER data
##############################
print 'VTT_POWER data loading ...'
# VTT_POWER data loading ...
avgdata_dict = mt.loadObjectBinaryFast('./VTT_POWER/avgdata_dict.bin')
avgdata_dict=obj(avgdata_dict)
gw2_power=mt.loadObjectBinaryFast('./VTT_POWER/GW2.CG_PHASE1_ACTIVE_POWER_M.bin')
X_Feature=mt.loadObjectBinaryFast('./VTT_POWER/X_Feature.bin')
X_names=mt.loadObjectBinaryFast('./VTT_POWER/X_names.bin')
X_Time=mt.loadObjectBinaryFast('./VTT_POWER/X_Time.bin')
Xdiff_Mat=mt.loadObjectBinaryFast('./VTT_POWER/Xdiff_Mat.bin')
Xdiff_Names=mt.loadObjectBinaryFast('./VTT_POWER/Xdiff_Names.bin')
Xdiff_Time=mt.loadObjectBinaryFast('./VTT_POWER/Xdiff_Time.bin')
def plotting_vtt_power(start_t,end_t):
for name_ in all_psensors:
idx=grep(name_,X_names)
dt_=X_Time
val_=X_Feature[:,idx]
s_t_idx=np.nonzero((np.array(dt_)>start_t) & (np.array(dt_)<end_t) )[0]
dt_=np.array(dt_)[s_t_idx]
val_=np.array(val_)[s_t_idx]
fig=figure(figsize=(20.0,10.0))
subplot(2,1,1)
if len(idx)>0:
plt.plot(dt_,val_)
plt.ylabel('Power',fontsize=18)
plt.tick_params(labelsize='large')
mn_=min(val_);mx_=max(val_)
ylim([mn_-0.1*abs(mn_),mx_+0.1*abs(mx_)])
title(name_+' - 15 min average value',fontsize=18)
idx=grep(name_,Xdiff_Names)
val_=Xdiff_Mat[:,idx]
dt_=Xdiff_Time
s_t_idx=np.nonzero((np.array(dt_)>start_t) & (np.array(dt_)<end_t) )[0]
dt_=np.array(dt_)[s_t_idx]
val_=np.array(val_)[s_t_idx]
subplot(2,1,2)
if len(idx)>0:
plt.plot(dt_,val_)
plt.xlabel('Time',fontsize=18)
plt.ylabel('Power',fontsize=18)
plt.tick_params(labelsize='large')
mn_=min(val_);mx_=max(val_)
ylim([mn_-0.1*abs(mn_),mx_+0.1*abs(mx_)])
title(name_+'- 15 min differential value',fontsize=18)
fig.savefig(fig_dir+name_+'_'+start_t.strftime("%B %d, %Y") +' - '+end_t.strftime("%B %d, %Y")+'.png')
PLOT_VTT_POWER=0
if PLOT_VTT_POWER==1:
start_t=datetime.datetime(2013, 4, 1, 0, 0, 0)
end_t=datetime.datetime(2014, 4, 1, 0, 0, 0)
plotting_vtt_power(start_t,end_t)
start_t=datetime.datetime(2013, 6, 1, 0, 0, 0)
end_t=datetime.datetime(2013, 6, 7, 0, 0, 0)
plotting_vtt_power(start_t,end_t)
#########################################################################
#########################################################################
### Load all builings bin files.
#########################################################################
print 'Load all builings bin files....'
print '========================='
print 'GW1 BLDG'
print '========================='
dict_dir='./GW1_results/'
bldg_tag='GW1_' # building tag
GW1_data_dict = mt.loadObjectBinaryFast(dict_dir+'data_dict.bin')
GW1_diffdata_dict = mt.loadObjectBinaryFast(dict_dir+'diffdata_dict.bin')
GW1_avgdata_dict = mt.loadObjectBinaryFast(dict_dir+'avgdata_dict.bin')
print '========================='
print 'GW2 BLDG'
print '========================='
dict_dir='./GW2_results/'
bldg_tag='GW2_' # building tag
GW2_data_dict = mt.loadObjectBinaryFast(dict_dir+'data_dict.bin')
GW2_diffdata_dict = mt.loadObjectBinaryFast(dict_dir+'diffdata_dict.bin')
GW2_avgdata_dict = mt.loadObjectBinaryFast(dict_dir+'avgdata_dict.bin')
print '========================='
print 'VAK1 BLDG'
print '========================='
dict_dir='./VAK1_results/'
bldg_tag='VAK1_' # building tag
VAK1_data_dict = mt.loadObjectBinaryFast(dict_dir+'data_dict.bin')
VAK1_diffdata_dict = mt.loadObjectBinaryFast(dict_dir+'diffdata_dict.bin')
VAK1_avgdata_dict = mt.loadObjectBinaryFast(dict_dir+'avgdata_dict.bin')
print '========================='
print 'VAK2 BLDG'
print '========================='
dict_dir='./VAK2_results/'
bldg_tag='VAK2_' # building tag
VAK2_data_dict = mt.loadObjectBinaryFast(dict_dir+'data_dict.bin')
VAK2_diffdata_dict = mt.loadObjectBinaryFast(dict_dir+'diffdata_dict.bin')
VAK2_avgdata_dict = mt.loadObjectBinaryFast(dict_dir+'avgdata_dict.bin')
###########################################################################################
# This part is old data representaiton , but let it be here for a while
###########################################################################################
# old representaiton start....
bldg_tag_set=['GW1_','GW2_','VAK1_','VAK2_']
sig_tag_set=['avg','diff']
for bldg_tag in bldg_tag_set:
for sig_tag in sig_tag_set:
cmd_str=[[]]*9
cmd_str[0]=bldg_tag+sig_tag+'data_state_mat='+bldg_tag+sig_tag+'data_dict[\''+sig_tag+'data_state_mat\']'
cmd_str[1]=bldg_tag+sig_tag+'data_weather_mat='+bldg_tag+sig_tag+'data_dict[\''+sig_tag+'data_weather_mat\']'
cmd_str[2]=bldg_tag+sig_tag+'data_time_mat='+bldg_tag+sig_tag+'data_dict[\''+sig_tag+'data_time_mat\']'
cmd_str[3]=bldg_tag+sig_tag+'_time_slot='+bldg_tag+sig_tag+'data_dict[\''+sig_tag+'_time_slot\']'
cmd_str[4]=bldg_tag+sig_tag+'data_exemplar='+bldg_tag+sig_tag+'data_dict[\''+sig_tag+'data_exemplar\']'
cmd_str[5]=bldg_tag+sig_tag+'data_zvar='+bldg_tag+sig_tag+'data_dict[\''+sig_tag+'data_zvar\']'
cmd_str[6]=bldg_tag+sig_tag+'sensor_names='+bldg_tag+sig_tag+'data_dict[\'sensor_names\']'
cmd_str[7]=bldg_tag+sig_tag+'weather_names='+bldg_tag+sig_tag+'data_dict[\'weather_names\']'
cmd_str[8]=bldg_tag+sig_tag+'time_names='+bldg_tag+sig_tag+'data_dict[\'time_names\']'
for cmd_ in cmd_str:
exec(cmd_)
for i,bldg_tag_ in enumerate(bldg_tag_set):
cmd_str_tmp=bldg_tag_+'avgp_idx=grep(\'POWER\','+bldg_tag_+'avgsensor_names)'
exec(cmd_str_tmp)
cmd_str_tmp=bldg_tag_+'avgp_names=list(np.array('+bldg_tag_+'avgsensor_names)['+bldg_tag_+'avgp_idx])'
exec(cmd_str_tmp)
cmd_str_tmp=bldg_tag_+'diffp_idx=grep(\'POWER\','+bldg_tag_+'diffsensor_names)'
exec(cmd_str_tmp)
cmd_str_tmp=bldg_tag_+'diffp_names=list(np.array('+bldg_tag_+'diffsensor_names)['+bldg_tag_+'diffp_idx])'
exec(cmd_str_tmp)
"""
pprint.pprint(all_psensors)=
['VAK1.CG_SYSTEM_REACTIVE_POWER_M', 'VAK1.CG_PHASE2_REACTIVE_POWER_M',
'GW2.CG_PHASE2_ACTIVE_POWER_M', 'VAK1.CG_PHASE2_POWER_FACTOR_M',
'VAK1.CG_PHASE3_POWER_FACTOR_M', 'GW2.CG_SYSTEM_REACTIVE_POWER_M',
'VAK1.CG_PHASE1_POWER_FACTOR_M', 'VAK1.CG_SYSTEM_POWER_FACTOR_M',
'GW2.CG_PHASE2_POWER_FACTOR_M', 'GW2.CG_SYSTEM_ACTIVE_POWER_M']
"""
all_psensors=list(set(GW1_avgp_names+GW2_avgp_names+VAK1_avgp_names+VAK2_avgp_names+\
GW1_diffp_names+GW2_diffp_names+VAK1_diffp_names+VAK2_diffp_names))
print '--------------------------------------------------------'
print ' Power sensor selected'
print '--------------------------------------------------------'
pprint.pprint([GW1_avgp_idx,GW1_avgp_names,GW1_diffp_idx,GW1_diffp_names])
pprint.pprint([GW2_avgp_idx,GW2_avgp_names,GW2_diffp_idx,GW2_diffp_names])
pprint.pprint([VAK1_avgp_idx,VAK1_avgp_names,VAK1_diffp_idx,VAK1_diffp_names])
pprint.pprint([VAK2_avgp_idx,VAK2_avgp_names,VAK2_diffp_idx,VAK2_diffp_names])
print '--------------------------------------------------------'
# old representaiton end
###########################################################################################
###########################################################################################
###########################################################################################
# New data representaiton starts here...
###########################################################################################
# All data stored in class of bldg_tag_set
###########################################################################################
bldg_tag_set=['GW1_','GW2_','VAK1_','VAK2_']
sig_tag_set=['avg','diff']
for bldg_tag in bldg_tag_set:
print '----------------------------------------'
print 'creating '+ bldg_tag+' obj....'
print '----------------------------------------'
cmd_str_=bldg_tag+'={}'
exec(cmd_str_)
cmd_str_=bldg_tag+'=obj('+bldg_tag+')'
exec(cmd_str_)
for sig_tag in sig_tag_set:
print 'generating '+ sig_tag+' members....'
cmd_str=[[]]*13
cmd_str[0]=bldg_tag+'.'+sig_tag+'data_state_mat='+bldg_tag+sig_tag+'data_dict[\''+sig_tag+'data_state_mat\']'
cmd_str[1]=bldg_tag+'.'+sig_tag+'data_weather_mat='+bldg_tag+sig_tag+'data_dict[\''+sig_tag+'data_weather_mat\']'
cmd_str[2]=bldg_tag+'.'+sig_tag+'data_time_mat='+bldg_tag+sig_tag+'data_dict[\''+sig_tag+'data_time_mat\']'
cmd_str[3]=bldg_tag+'.'+sig_tag+'_time_slot='+bldg_tag+sig_tag+'data_dict[\''+sig_tag+'_time_slot\']'
cmd_str[4]=bldg_tag+'.'+sig_tag+'data_exemplar='+bldg_tag+sig_tag+'data_dict[\''+sig_tag+'data_exemplar\']'
cmd_str[5]=bldg_tag+'.'+sig_tag+'data_zvar='+bldg_tag+sig_tag+'data_dict[\''+sig_tag+'data_zvar\']'
cmd_str[6]=bldg_tag+'.'+sig_tag+'sensor_names='+bldg_tag+sig_tag+'data_dict[\'sensor_names\']'
cmd_str[7]=bldg_tag+'.'+sig_tag+'weather_names='+bldg_tag+sig_tag+'data_dict[\'weather_names\']'
cmd_str[8]=bldg_tag+'.'+sig_tag+'time_names='+bldg_tag+sig_tag+'data_dict[\'time_names\']'
cmd_str[9]=bldg_tag+'.'+sig_tag+'p_idx='+bldg_tag+sig_tag+'p_idx'
cmd_str[10]=bldg_tag+'.'+sig_tag+'p_names='+bldg_tag+sig_tag+'p_names'
cmd_str[11]=bldg_tag+'.'+'Conditions_dict='+bldg_tag+'data_dict[\'Conditions_dict\']'
cmd_str[12]=bldg_tag+'.'+'Events_dict='+bldg_tag+'data_dict[\'Events_dict\']'
for cmd_ in cmd_str: exec(cmd_)
cmd_str_='bldg_obj_weather_convert('+bldg_tag+')'
exec(cmd_str_)
# Create classs strucutre for data analysis
for bldg_tag in bldg_tag_set:
analysis={}
for sig_tag in sig_tag_set:
cmd_str_='p_names='+bldg_tag+'.'+sig_tag+'p_names'
exec(cmd_str_)
temp1={}
for name_ in p_names:
temp_s=obj({'optprob_set':[],'optstate_set':[]})
temp_t=obj({'optprob_set':[],'optstate_set':[]})
temp_w=obj({'optprob_set':[],'optstate_set':[]})
temp2=obj({'Sensor':temp_s,'Time':temp_t,'Weather':temp_w})
temp1.update({remove_dot(name_):temp2})
analysis.update({sig_tag:obj(temp1)})
analysis=obj(analysis)
cmd_str_=bldg_tag+'.analysis=analysis'
exec(cmd_str_)
def find_cond_lh_set(data_state_mat,cause_idx_set,effect_idx,obs_state):
optprob_set=np.zeros(len(cause_idx_set))
optstate_set=np.zeros(len(cause_idx_set))
for i,cause_idx in enumerate(cause_idx_set):
# Compute liklihoood of GW2 avg data state map among sensors
avg_state_temp, avg_prob_temp\
=compute_cause_likelihood(data_state_mat,[cause_idx],[[effect_idx]],[[obs_state]])
# masking its own effect
if cause_idx==effect_idx:
# and its state
max_opt_state=np.nan
# and its probability
max_opt_prob=-np.inf
else:
# find sensor index giving the maximum likelihood
max_idx=argmax(avg_prob_temp)
# and its state
max_opt_state=avg_state_temp[max_idx]
# and its probability
max_opt_prob=avg_prob_temp[max_idx]
optprob_set[i]=max_opt_prob
optstate_set[i]=max_opt_state
return optstate_set, optprob_set
###
#
###
obs_state=PEAK
for bldg_tag in bldg_tag_set:
#bldg_tag='VAK1_'
#bldg_tag='GW2_'
#sig_tag='avg'
print '-------------------------'
print bldg_tag
print '-------------------------'
for sig_tag in sig_tag_set:
print sig_tag+'.....'
cmd_str_='all_data_state_mat=np.vstack(('+bldg_tag+'.'+sig_tag+'data_state_mat.T, '\
+bldg_tag+'.'+sig_tag+'data_time_mat.T,'+bldg_tag+'.'+sig_tag+'data_weather_mat_.T)).T'
exec(cmd_str_)
cmd_str_='p_idx='+bldg_tag+'.'+sig_tag+'p_idx'
exec(cmd_str_)
cmd_str_='p_names='+bldg_tag+'.'+sig_tag+'p_names'
exec(cmd_str_)
cmd_str_='len_sensor='+bldg_tag+'.'+sig_tag+'data_state_mat.shape[1]'
exec(cmd_str_)
cmd_str_='len_time='+bldg_tag+'.'+sig_tag+'data_time_mat.shape[1]'
exec(cmd_str_)
cmd_str_='len_weather='+bldg_tag+'.'+sig_tag+'data_weather_mat.shape[1]'
exec(cmd_str_)
cmd_str_='sensor_cause_idx_set=range(len_sensor)'
exec(cmd_str_)
cmd_str_='time_cause_idx_set=range(len_sensor,len_sensor+len_time)'
exec(cmd_str_)
cmd_str_='weather_cause_idx_set=range(len_sensor+len_time,len_sensor+len_time+len_weather)'
exec(cmd_str_)
for k,effect_idx in enumerate(p_idx):
print 'compute cond. prob of ' + p_names[k]
cmd_str_='p_name_='+bldg_tag+'.'+sig_tag+'p_names[k]'
exec(cmd_str_)
# Sensors
s_optstate_set_temp,s_optprob_set_temp=\
find_cond_lh_set(all_data_state_mat,sensor_cause_idx_set,effect_idx,obs_state)
cmd_str_=bldg_tag+'.analysis.'+sig_tag+'.'+remove_dot(p_name_)+'.Sensor.optprob_set=s_optprob_set_temp'
exec(cmd_str_)
cmd_str_=bldg_tag+'.analysis.'+sig_tag+'.'+remove_dot(p_name_)+'.Sensor.optstate_set=s_optstate_set_temp'
exec(cmd_str_)
# Time
#t_state_map, t_prob_map=\
#compute_cause_likelihood(all_data_state_mat,time_cause_idx_set,[[effect_idx]],[[obs_state]])
#cmd_str_=bldg_tag+'.analysis.'+sig_tag+'.'+remove_dot(p_name_)+'.Time.state_map=t_state_map'
#exec(cmd_str_)
#cmd_str_=bldg_tag+'.analysis.'+sig_tag+'.'+remove_dot(p_name_)+'.Time.prob_map=t_prob_map'
#exec(cmd_str_)
# Weather
w_optstate_set_temp,w_optprob_set_temp=\
find_cond_lh_set(all_data_state_mat,weather_cause_idx_set,effect_idx,obs_state)
cmd_str_=bldg_tag+'.analysis.'+sig_tag+'.'+remove_dot(p_name_)+'.Weather.optprob_set=w_optprob_set_temp'
exec(cmd_str_)
cmd_str_=bldg_tag+'.analysis.'+sig_tag+'.'+remove_dot(p_name_)+'.Weather.optstate_set=w_optstate_set_temp'
exec(cmd_str_)
cmd_str_='mt.saveObjectBinaryFast('+bldg_tag+','+'bldg_tag+\'.bin\')'
exec(cmd_str_)
#import pdb;pdb.set_trace()
"""
dhr_prob_map,dhr_state_map,dhr_sensitivity,dhr_list\
= param_sensitivity(data_mat,data_name,sensor_names,'HR',dst_type=dst_t)
effect_idx=GW2_.avgp_idx[0]
avg_state_temp, avg_prob_temp\
=compute_cause_likelihood(all_data_state_mat,time_cause_idx_set,[[effect_idx]],[[PEAK]])
w_avg_state_temp, w_avg_prob_temp\
=compute_cause_likelihood(all_data_state_mat,weather_cause_idx_set,[[effect_idx]],[[PEAK]])
state_set=np.array(avg_state_temp)
prob_set=np.array(avg_prob_temp)
m_idx=np.nonzero(state_set[:,0]==Aug)
Aug_state=state_set[m_idx,1:][0]
Aug_prob=prob_set[m_idx,1:][0]
m_idx=np.nonzero(state_set[:,0]==Jul)
Jul_state=state_set[m_idx,1:][0]
Jul_prob=prob_set[m_idx,1:][0]
m_idx=np.nonzero(state_set[:,0]==Jun)
Jun_state=state_set[m_idx,1:][0]
Jun_prob=prob_set[m_idx,1:][0]
m_idx=np.nonzero(state_set[:,0]==Dec)
Dec_state=state_set[m_idx,1:][0]
Dec_prob=prob_set[m_idx,1:][0]
m_idx=np.nonzero(state_set[:,0]==Jan)
Jan_state=state_set[m_idx,1:][0]
Jan_prob=prob_set[m_idx,1:][0]
prob_set=sort(VAK1_.analysis.diff.VAK1_CG_SYSTEM_REACTIVE_POWER_M.Sensor.optprob_set)[::-1][0:30]
prob_set1=sort(VAK1_.analysis.avg.VAK1_CG_PHASE2_REACTIVE_POWER_M.Sensor.optprob_set)[::-1][0:30]
prob_set2=sort(GW2_.analysis.avg.GW2_CG_SYSTEM_ACTIVE_POWER_M.Sensor.optprob_set)[::-1][0:30]
prob_set_t=GW2_.analysis.avg.GW2_CG_SYSTEM_ACTIVE_POWER_M.Time.optprob_set
prob_set_w=GW2_.analysis.avg.GW2_CG_SYSTEM_ACTIVE_POWER_M.Weather.optprob_set
plot(prob_set_w,'-s')
"""
#mt.saveObjectBinaryFast(data_dict,binfilename+'.bin')
PLOTTING_LH=0
if PLOTTING_LH==1:
plt.ioff()
for bldg_tag in bldg_tag_set:
print '-------------------------'
print bldg_tag
print '-------------------------'
for sig_tag in sig_tag_set:
print sig_tag+'.....'
cmd_str_='p_names='+bldg_tag+'.'+sig_tag+'p_names'
exec(cmd_str_)
for pname_ in p_names:
try:
blank_idx=pname_.index('.')
pname_=pname_.replace('.','_')
except:
pass
cmd_str_='optprob_set='+bldg_tag+'.analysis.'+sig_tag+'.'+pname_+'.optprob_set'
exec(cmd_str_)
cmd_str_= 's_names='+bldg_tag+'.'+sig_tag+'sensor_names'
exec(cmd_str_)
cmd_str_= 'optstate_set='+bldg_tag+'.analysis.'+sig_tag+'.'+pname_+'.optstate_set'
exec(cmd_str_)
num_picks=30
sort_idx=argsort(optprob_set)[::-1]
sort_lh=optprob_set[sort_idx[:num_picks]].T
sort_state=optstate_set[sort_idx[:num_picks]].T
fig=figure(figsize=(20.0,15.0))
subplot(2,1,1)
plt.plot(sort_lh,'-*')
x_label= list(np.array(s_names)[sort_idx[:num_picks]])
x_ticks=range(len(x_label))
plt.xticks(x_ticks,x_label,rotation=270, fontsize="small")
if sig_tag=='avg':
plt.title('Most relavant '+bldg_tag +'sensors to the peak (demand) of '+pname_,fontsize=20)
else:
plt.title('Most relavant '+bldg_tag +'sensors to the peak variations of '+pname_,fontsize=20)
plt.tick_params(labelsize='large')
plt.ylabel('Likelihood (From 0 to 1)',fontsize=18)
#plt.get_current_fig_manager().window.showMaximized()
plt.savefig(fig_dir+pname_+'_'+sig_tag+'_lh_sensors.png', bbox_inches='tight')
plt.close()
plt.ion()
############################################################################
# Analysis of results
############################################################################
import pdb;pdb.set_trace()
###############################################################
# 1. Regualr Events for GW2.CG_SYSTEM_ACTIVE_POWER_M
###############################################################
num_picks=30
sig_tag='avg'
optprob_set=GW2_.analysis.avg.GW2_CG_SYSTEM_ACTIVE_POWER_M.Sensor.optprob_set
optstate_set=GW2_.analysis.avg.GW2_CG_SYSTEM_ACTIVE_POWER_M.Sensor.optstate_set
s_names=GW2_.avgsensor_names
p_name='GW2.CG_SYSTEM_ACTIVE_POWER_M'
sort_idx=argsort(optprob_set)[::-1]
sort_lh=optprob_set[sort_idx[:num_picks]].T
sort_state=optstate_set[sort_idx[:num_picks]].T
sort_label= list(np.array(s_names)[sort_idx[:num_picks]])
data_state_mat=GW2_.avgdata_state_mat
lh_threshold=0.9
cause_idx=list(np.nonzero(optprob_set>lh_threshold)[0])
cause_label=[GW2_.avgsensor_names[idx] for idx in cause_idx]
effect_idx=GW2_.avgsensor_names.index(p_name)
effect_label=[p_name]
# For PEAK Demand
obs_state=PEAK
peak_state_temp, peak_prob_temp=compute_cause_likelihood(data_state_mat,cause_idx,[[effect_idx]],[[obs_state]])
# For LOW PEAK Demand
obs_state=LOW_PEAK
lowpeak_state_temp,lowpeak_prob_temp=compute_cause_likelihood(data_state_mat,cause_idx,[[effect_idx]],[[obs_state]])
# Check the probability
plt.plot(peak_state_temp,peak_prob_temp,'-^')
plt.plot(lowpeak_state_temp,lowpeak_prob_temp,'-v')
plt.title(cause_label)
plt.xlabel('Measurements')
plt.ylabel('Probability of State of Power Demand')
plt.grid()
plt.legend(('High Peak', 'Low Peak'))
plt.savefig(fig_dir+p_name_+'_'+sig_tag+'_cause_prob.png', bbox_inches='tight')
data_1=get_data_set(cause_label+effect_label)
avg_png_name=plot_data_x(data_1)
import lib_bnlearn as rbn
num_picks=10
p_idx=GW2_.avgsensor_names.index(p_name)
idx_select=[p_idx]+ list(sort_idx[:num_picks])
bndata_mat=GW2_.avgdata_state_mat[:,idx_select]
# File name format - allowing dot
cols_fnames=[GW2_.avgsensor_names[k] for k in idx_select]
# Variable name format - replacing dot with underscore
cols=[remove_dot(GW2_.avgsensor_names[k]) for k in idx_select]
b_arc_list = pair_in_idx([cols[0]],cols[1:])
black_arc_frame = rbn.construct_arcs_frame(b_arc_list)
factor_data_mat = rbn.convert_pymat_to_rfactor(bndata_mat)
data_frame = rbn.construct_data_frame(factor_data_mat,cols)
hc_b = rbn.bnlearn.hc(data_frame,blacklist=black_arc_frame,score='bic')
#hc_b = rbn.bnlearn.tabu(data_frame,blacklist=black_arc_frame,score='bic')
#hc_b = rbn.bnlearn.mmhc(data_frame,blacklist=black_arc_frame,score='bic')
fig=rbn.nx_plot(hc_b,cols)
amat = rbn.py_get_amat(hc_b)
plt.savefig(fig_dir+p_name+'_'+sig_tag+'bn_sensors.png', bbox_inches='tight')
s_cause_label=list(np.array(cols)[np.nonzero(amat[:,0]==1)[0]])
cause_idx=[cols.index(label_) for label_ in s_cause_label]
#fig=figure(figsize=(10,10))
fig=figure()
for k in range(len(cause_idx)):
effect_idx=cols_fnames.index(p_name)
peak_state_0, peak_prob_0=compute_cause_likelihood(bndata_mat,[cause_idx[k]],[[effect_idx]],[[PEAK]])
lowpeak_state_0, lowpeak_prob_0=compute_cause_likelihood(bndata_mat,[cause_idx[k]],[[effect_idx]],[[LOW_PEAK]])
sort_idx1=argsort(peak_state_0)
sort_idx2=argsort(lowpeak_state_0)
subplot(1,len(cause_idx),k+1)
plot(sort(peak_state_0), np.array(peak_prob_0)[sort_idx1],'-^')
plot(sort(lowpeak_state_0), np.array(lowpeak_prob_0)[sort_idx2],'-v')
plt.legend(('measurements','classified states'))
if k==0:
plt.ylabel('Probability of Peak Power Demand')
plt.grid()
plt.legend(('High Peak', 'Low Peak'),loc='center right')
plt.xlabel(wcause_label[k])
if len(peak_state_0)==len(stateDict.keys()):
if sum(abs(sort(stateDict.keys())-sort(peak_state_0)))==0:
plt.xticks(stateDict.keys(),stateDict.values(),rotation=0, fontsize=12)
png_name=str(uuid.uuid4().get_hex().upper()[0:6])
fig.savefig(fig_dir+png_name+'.png', bbox_inches='tight')
fcause_label=list(np.array(cols_fnames)[np.nonzero(amat[:,0]==1)[0]])
start_t=datetime.datetime(2014, 1, 19, 0, 0, 0)
end_t=datetime.datetime(2014, 1, 25, 0, 0, 0)
data_2=get_data_set(fcause_label+effect_label,start_t,end_t)
# data_x=get_data_set([cause_label[1]]+[cause_label[3]]+effect_label,start_t,end_t)
png_namex=plot_data_x(data_2,stype='raw',smark='-')
#png_namex=plot_data_x(data_x,stype='diff',smark='-^')
###############################################################
# 2. Irregualr Events for GW2.CG_SYSTEM_ACTIVE_POWER_M
###############################################################
bldg_tag='GW2_'
sig_tag='diff'
p_name='GW2.CG_PHASE2_ACTIVE_POWER_M'
cmd_str_='optprob_set='+bldg_tag+'.analysis.'+sig_tag+'.'+remove_dot(p_name)+'.Sensor.optprob_set'
exec(cmd_str_)
cmd_str_='optstate_set='+bldg_tag+'.analysis.'+sig_tag+'.'+remove_dot(p_name)+'.Sensor.optstate_set'
exec(cmd_str_)
cmd_str_='s_names='+bldg_tag+sig_tag+'sensor_names'
exec(cmd_str_)
sort_idx=argsort(optprob_set)[::-1]
sort_lh=optprob_set[sort_idx[:num_picks]].T
sort_state=optstate_set[sort_idx[:num_picks]].T
sort_label= list(np.array(s_names)[sort_idx[:num_picks]])
# BN Network Learning
import lib_bnlearn as rbn
num_picks=15
p_idx=GW2_.diffsensor_names.index(p_name)
idx_select=[p_idx]+ list(sort_idx[:num_picks])
bndata_mat=GW2_.diffdata_state_mat[:,idx_select]
# File name format - allowing dot
cols_fnames=[GW2_.diffsensor_names[k] for k in idx_select]
# Variable name format - replacing dot with underscore
cols=[remove_dot(GW2_.diffsensor_names[k]) for k in idx_select]
b_arc_list = pair_in_idx([cols[0]],cols[1:])
black_arc_frame = rbn.construct_arcs_frame(b_arc_list)
factor_data_mat = rbn.convert_pymat_to_rfactor(bndata_mat)
data_frame = rbn.construct_data_frame(factor_data_mat,cols)
hc_b = rbn.bnlearn.hc(data_frame,blacklist=black_arc_frame,score='bic')
#hc_b = rbn.bnlearn.tabu(data_frame,blacklist=black_arc_frame,score='bic')
#hc_b = rbn.bnlearn.mmhc(data_frame,blacklist=black_arc_frame,score='bic')
amat = rbn.py_get_amat(hc_b)
fig=rbn.nx_plot(hc_b,cols)
png_name=str(uuid.uuid4().get_hex().upper()[0:6])
plt.savefig(fig_dir+png_name+'.png', bbox_inches='tight')
#fit = rbn.py_bn_fit(hc_b,data_frame)
#index_temp=2
#prob_dimnames,prob_factors,prob_mat = rbn.py_get_node_cond_mat(fit,index_temp)
data_state_mat=GW2_.diffdata_state_mat
cause_label=list(np.array(cols_fnames)[np.nonzero(amat[:,0]==1)[0]])
cause_idx=[GW2_.diffsensor_names.index(label_) for label_ in cause_label]
effect_idx=GW2_.diffsensor_names.index(p_name)
effect_label=[p_name]
obs_state=PEAK
peak_state_temp, peak_prob_temp=compute_cause_likelihood(data_state_mat,cause_idx,[[effect_idx]],[[obs_state]])
obs_state=LOW_PEAK
lowpeak_state_temp, lowpeak_prob_temp=compute_cause_likelihood(data_state_mat,cause_idx,[[effect_idx]],[[obs_state]])
plt.plot(peak_state_temp,peak_prob_temp,'-^')
plt.plot(lowpeak_state_temp,lowpeak_prob_temp,'-v')
plt.title(cause_label,fontsize='large')
plt.xlabel('Measurements',fontsize='large')
plt.ylabel('Probability of State of Power Demand Variation',fontsize='large')
plt.xticks(fontsize='large')
plt.yticks(fontsize='large')
plt.grid()
plt.legend(('High Variation', 'No Variation'),prop={'size':18})
plt.savefig(fig_dir+p_name_+'_'+sig_tag+'_variaiton_cause_prob.png', bbox_inches='tight')
data_2=get_data_set(cause_label+effect_label)
diff_png_name=plot_data_x(data_2,type='diff')
#sensors_=list(np.array(cols_fnames)[np.nonzero(amat[:,0]==1)[0]])
###############################################################
# 3. Time and Weahter Dependency Analysis
# Weather data dependency
# BN Network Learning
###############################################################
fig1=figure()
plot(GW2_.avg_time_slot,GW2_.avgdata_weather_mat[:,-1])
plot(GW2_.avg_time_slot,GW2_.avgdata_weather_mat_[:,-1],'*r')
ylabel(GW2_.avgweather_names[-1])
plt.legend(('measurements','classified states'))
mn_=min(GW2_.avgdata_weather_mat[:,-1])
mx_=max(GW2_.avgdata_weather_mat[:,-1])
ylim([mn_-0.1*abs(mn_),mx_+0.1*abs(mx_)])
png_name=str(uuid.uuid4().get_hex().upper()[0:6])
fig1.savefig(fig_dir+png_name+'.png', bbox_inches='tight')
fig2=figure()
plot(GW2_.avg_time_slot,GW2_.avgdata_weather_mat[:,-2])
plot(GW2_.avg_time_slot,GW2_.avgdata_weather_mat_[:,-2],'*r')
plt.legend(('measurements','classified states'))
ylabel(GW2_.avgweather_names[-2])
png_name=str(uuid.uuid4().get_hex().upper()[0:6])
fig2.savefig(fig_dir+png_name+'.png', bbox_inches='tight')
fig3=figure()
plot(GW2_.avg_time_slot,GW2_.avgdata_weather_mat[:,-3])
plot(GW2_.avg_time_slot,GW2_.avgdata_weather_mat_[:,-3],'*r')
plt.legend(('measurements','classified states'))
ylabel(GW2_.avgweather_names[-3])
png_name=str(uuid.uuid4().get_hex().upper()[0:6])
fig3.savefig(fig_dir+png_name+'.png', bbox_inches='tight')
# Likelihood of weather factors
optprob_set=GW2_.analysis.avg.GW2_CG_SYSTEM_ACTIVE_POWER_M.Weather.optprob_set
w_names=GW2_.avgweather_names
sort_idx=argsort(optprob_set)[::-1]
sort_lh=optprob_set[sort_idx].T
sort_state=optstate_set[sort_idx].T
figw=figure(figsize=(15.0,10.0))
#figw=figure()
plt.subplot(2,1,1)
plt.plot(sort_lh,'-s')
x_label= list(np.array(w_names)[sort_idx])
x_ticks=range(len(x_label))
#plt.xticks(x_ticks,x_label, fontsize="small")
plt.xticks(x_ticks,x_label,rotation=30, fontsize=12)
plt.tick_params(labelsize='large')
plt.ylabel('Likelihood (From 0 to 1)',fontsize=18)
#plt.get_current_fig_manager().window.showMaximized()
png_name=str(uuid.uuid4().get_hex().upper()[0:6])
figw.savefig(fig_dir+png_name+'.png', bbox_inches='tight')
# regualr event
sig_tag='avg'
p_name='GW2.CG_SYSTEM_ACTIVE_POWER_M'
p_idx=GW2_.avgsensor_names.index(p_name)
bndata_mat=np.vstack((GW2_.avgdata_state_mat[:,p_idx].T,GW2_.avgdata_weather_mat_.T)).T
#bndata_mat=np.vstack((GW2_.avgdata_state_mat[:,p_idx].T,GW2_.avgdata_weather_mat.T)).T
cols_fnames=[p_name]+[w_name for w_name in GW2_.avgweather_names]
cols=[remove_dot(p_name)]+[remove_dot(w_name) for w_name in GW2_.avgweather_names]
b_arc_list = pair_in_idx([cols[0]],cols[1:])
black_arc_frame = rbn.construct_arcs_frame(b_arc_list)
factor_data_mat = rbn.convert_pymat_to_rfactor(bndata_mat)
data_frame = rbn.construct_data_frame(factor_data_mat,cols)
hc_b = rbn.bnlearn.hc(data_frame,blacklist=black_arc_frame,score='bic')
#hc_b = rbn.bnlearn.tabu(data_frame,blacklist=black_arc_frame,score='bic')
#hc_b = rbn.bnlearn.mmhc(data_frame,blacklist=black_arc_frame,score='bic')
amat = rbn.py_get_amat(hc_b)
fig=rbn.nx_plot(hc_b,cols)
png_name=str(uuid.uuid4().get_hex().upper()[0:6])
plt.savefig(fig_dir+png_name+'.png', bbox_inches='tight')
wcause_label=list(np.array(cols)[np.nonzero(amat[:,0]==1)[0]])
cause_idx=[cols.index(label_) for label_ in wcause_label]
effect_idx=cols_fnames.index(p_name)
peak_state_0, peak_prob_0=compute_cause_likelihood(bndata_mat,[cause_idx[0]],[[effect_idx]],[[PEAK]])
lowpeak_state_0, lowpeak_prob_0=compute_cause_likelihood(bndata_mat,[cause_idx[0]],[[effect_idx]],[[LOW_PEAK]])
sort_idx1=argsort(peak_state_0)
sort_idx2=argsort(lowpeak_state_0)
fig0=figure()
plot(sort(peak_state_0), np.array(peak_prob_0)[sort_idx1],'-^')
plot(sort(lowpeak_state_0), np.array(lowpeak_prob_0)[sort_idx2],'-v')
plt.legend(('measurements','classified states'))
plt.ylabel('Probability of State of Power Demand')
plt.grid()
plt.legend(('High Peak', 'Low Peak'))
plt.xlabel(wcause_label[0])
png_name=str(uuid.uuid4().get_hex().upper()[0:6])
fig0.savefig(fig_dir+png_name+'.png', bbox_inches='tight')
# Irregualr event
sig_tag='diff'
p_name='GW2.CG_PHASE2_ACTIVE_POWER_M'
p_idx=GW2_.diffsensor_names.index(p_name)
bndata_mat=np.vstack((GW2_.diffdata_state_mat[:,p_idx].T,GW2_.diffdata_weather_mat_.T)).T
#bndata_mat=np.vstack((GW2_.avgdata_state_mat[:,p_idx].T,GW2_.avgdata_weather_mat.T)).T
cols_fnames=[p_name]+[w_name for w_name in GW2_.diffweather_names]
cols=[remove_dot(p_name)]+[remove_dot(w_name) for w_name in GW2_.diffweather_names]
b_arc_list = pair_in_idx([cols[0]],cols[1:])
black_arc_frame = rbn.construct_arcs_frame(b_arc_list)
factor_data_mat = rbn.convert_pymat_to_rfactor(bndata_mat)
data_frame = rbn.construct_data_frame(factor_data_mat,cols)
hc_b = rbn.bnlearn.hc(data_frame,blacklist=black_arc_frame,score='bic')
#hc_b = rbn.bnlearn.tabu(data_frame,blacklist=black_arc_frame,score='bic')
#hc_b = rbn.bnlearn.mmhc(data_frame,blacklist=black_arc_frame,score='bic')
amat = rbn.py_get_amat(hc_b)
fig=rbn.nx_plot(hc_b,cols)
png_name=str(uuid.uuid4().get_hex().upper()[0:6])
plt.savefig(fig_dir+png_name+'.png', bbox_inches='tight')
wcause_label=list(np.array(cols)[np.nonzero(amat[:,0]==1)[0]])
cause_idx=[cols.index(label_) for label_ in wcause_label]
effect_idx=cols_fnames.index(p_name)
peak_state_0, peak_prob_0=compute_cause_likelihood(bndata_mat,[cause_idx[0]],[[effect_idx]],[[PEAK]])
lowpeak_state_0, lowpeak_prob_0=compute_cause_likelihood(bndata_mat,[cause_idx[0]],[[effect_idx]],[[LOW_PEAK]])
sort_idx1=argsort(peak_state_0)
sort_idx2=argsort(lowpeak_state_0)
fig0=figure()
plot(sort(peak_state_0), np.array(peak_prob_0)[sort_idx1],'-^')
plot(sort(lowpeak_state_0), np.array(lowpeak_prob_0)[sort_idx2],'-v')
plt.legend(('measurements','classified states'))
plt.ylabel('Probability of State of Power Demand')
plt.grid()
plt.legend(('High Peak', 'Low Peak'))
plt.xlabel(wcause_label[0])
png_name=str(uuid.uuid4().get_hex().upper()[0:6])
fig0.savefig(fig_dir+png_name+'.png', bbox_inches='tight')
"""
peak_state_1, peak_prob_1=compute_cause_likelihood(bndata_mat,[cause_idx[1]],[[effect_idx]],[[PEAK]])
lowpeak_state_1, lowpeak_prob_1=compute_cause_likelihood(bndata_mat,[cause_idx[1]],[[effect_idx]],[[LOW_PEAK]])
sort_idx1=argsort(peak_state_1)
sort_idx2=argsort(lowpeak_state_1)
fig1=figure()
plot(sort(peak_state_1), np.array(peak_prob_1)[sort_idx1],'-^')
plot(sort(lowpeak_state_1), np.array(lowpeak_prob_1)[sort_idx2],'-v')
plt.legend(('measurements','classified states'))
plt.ylabel('Probability of State of Power Demand')
plt.grid()
plt.legend(('High Peak', 'Low Peak'))
plt.xlabel(wcause_label[1])
png_name=str(uuid.uuid4().get_hex().upper()[0:6])
fig1.savefig(fig_dir+png_name+'.png', bbox_inches='tight')
"""
# Time data dependency - Likelihood of time factors
# BN Network Learning
# Regualr event
state_map=np.array(GW2_.analysis.avg.GW2_CG_SYSTEM_ACTIVE_POWER_M.Time.state_map)
prob_map=np.array(GW2_.analysis.avg.GW2_CG_SYSTEM_ACTIVE_POWER_M.Time.prob_map)
t_name_set=GW2_.avgtime_names
# [MTH', 'WD', 'HR']
sig_tag='avg'
p_name='GW2.CG_SYSTEM_ACTIVE_POWER_M'
p_idx=GW2_.avgsensor_names.index(p_name)
bndata_mat=np.vstack((GW2_.avgdata_state_mat[:,p_idx].T,GW2_.avgdata_time_mat.T)).T
#bndata_mat=np.vstack((GW2_.avgdata_state_mat[:,p_idx].T,GW2_.avgdata_weather_mat.T)).T
cols_fnames=[p_name]+[w_name for w_name in GW2_.avgtime_names]
cols=[remove_dot(p_name)]+[remove_dot(w_name) for w_name in GW2_.avgtime_names]
effect_idx=cols_fnames.index(p_name)
time_high_peak_liklihood_set=[]
time_low_peak_liklihood_set=[]
for t_name in t_name_set:
idx_t=cols.index(t_name)
peak_state, peak_prob=compute_cause_likelihood(bndata_mat,[idx_t],[[effect_idx]],[[PEAK]])
time_high_peak_liklihood_set.append(np.array([peak_state,peak_prob]))
peak_state, peak_prob=compute_cause_likelihood(bndata_mat,[idx_t],[[effect_idx]],[[LOW_PEAK]])
time_low_peak_liklihood_set.append(np.array([peak_state,peak_prob]))
fig=figure()
subplot(3,1,1)
plot(time_high_peak_liklihood_set[0][0],time_high_peak_liklihood_set[0][1],'-^')
plot(time_low_peak_liklihood_set[0][0],time_low_peak_liklihood_set[0][1],'-v')
plt.xticks(monthDict.keys(),monthDict.values())
plt.xlabel('Months of a year',fontsize='large')
plt.ylabel('Likelihood',fontsize='large')
plt.grid()
plt.legend(('High Peak', 'Low Peak'))
plt.tick_params(labelsize='large')
plt.ylim([-0.05,1.05])
subplot(3,1,2)
plot(time_high_peak_liklihood_set[1][0],time_high_peak_liklihood_set[1][1],'-^')
plot(time_low_peak_liklihood_set[1][0],time_low_peak_liklihood_set[1][1],'-v')
plt.xticks(weekDict.keys(),weekDict.values())
plt.xlabel('Days of a Week',fontsize='large')
plt.ylabel('Likelihood',fontsize='large')
plt.grid()
plt.legend(('High Peak', 'Low Peak'))
plt.tick_params(labelsize='large')
plt.tick_params(labelsize='large')
plt.ylim([-0.05,1.05])
subplot(3,1,3)
plot(time_high_peak_liklihood_set[2][0],time_high_peak_liklihood_set[2][1],'-^')
plot(time_low_peak_liklihood_set[2][0],time_low_peak_liklihood_set[2][1],'-v')
plt.xticks(hourDict.keys(),hourDict.values())
plt.xlabel('Hours of a day',fontsize='large')
plt.ylabel('Likelihood',fontsize='large')
plt.grid()
plt.legend(('High Peak', 'Low Peak'))
plt.tick_params(labelsize='large')
plt.tick_params(labelsize='large')
plt.ylim([-0.05,1.05])
png_name=str(uuid.uuid4().get_hex().upper()[0:6])
fig.savefig(fig_dir+png_name+'.png', bbox_inches='tight')
# BN Network representaiton
b_arc_list = pair_in_idx([cols[0]],cols[1:])
black_arc_frame = rbn.construct_arcs_frame(b_arc_list)
factor_data_mat = rbn.convert_pymat_to_rfactor(bndata_mat)
data_frame = rbn.construct_data_frame(factor_data_mat,cols)
#hc_b = rbn.bnlearn.hc(data_frame,blacklist=black_arc_frame,score='bic')
#hc_b = rbn.bnlearn.tabu(data_frame,blacklist=black_arc_frame,score='bic')
hc_b = rbn.bnlearn.mmhc(data_frame,blacklist=black_arc_frame,score='bic')
amat = rbn.py_get_amat(hc_b)
fig=rbn.nx_plot(hc_b,cols)
png_name=str(uuid.uuid4().get_hex().upper()[0:6])
plt.savefig(fig_dir+png_name+'.png', bbox_inches='tight')
# Irregualr event
state_map=np.array(GW2_.analysis.diff.GW2_CG_PHASE2_ACTIVE_POWER_M.Time.state_map)
prob_map=np.array(GW2_.analysis.diff.GW2_CG_PHASE2_ACTIVE_POWER_M.Time.prob_map)
t_name_set=GW2_.avgtime_names
# [MTH', 'WD', 'HR']
sig_tag='diff'
p_name='GW2.CG_PHASE2_ACTIVE_POWER_M'
p_idx=GW2_.diffsensor_names.index(p_name)
bndata_mat=np.vstack((GW2_.diffdata_state_mat[:,p_idx].T,GW2_.diffdata_time_mat.T)).T
#bndata_mat=np.vstack((GW2_.avgdata_state_mat[:,p_idx].T,GW2_.avgdata_weather_mat.T)).T
cols_fnames=[p_name]+[w_name for w_name in GW2_.difftime_names]
cols=[remove_dot(p_name)]+[remove_dot(w_name) for w_name in GW2_.difftime_names]
effect_idx=cols_fnames.index(p_name)
time_high_peak_liklihood_set=[]
time_low_peak_liklihood_set=[]
for t_name in t_name_set:
idx_t=cols.index(t_name)
peak_state, peak_prob=compute_cause_likelihood(bndata_mat,[idx_t],[[effect_idx]],[[PEAK]])
time_high_peak_liklihood_set.append(np.array([peak_state,peak_prob]))
peak_state, peak_prob=compute_cause_likelihood(bndata_mat,[idx_t],[[effect_idx]],[[LOW_PEAK]])
time_low_peak_liklihood_set.append(np.array([peak_state,peak_prob]))
fig=figure()
subplot(3,1,1)
plot(time_high_peak_liklihood_set[0][0],time_high_peak_liklihood_set[0][1],'-^')
plot(time_low_peak_liklihood_set[0][0],time_low_peak_liklihood_set[0][1],'-v')
plt.xticks(monthDict.keys(),monthDict.values())
plt.xlabel('Months of a year',fontsize='large')
plt.ylabel('Likelihood',fontsize='large')
plt.grid()
plt.legend(('High Variaiton', 'Low Variaiton'))
plt.tick_params(labelsize='large')
plt.ylim([-0.05,1.05])
subplot(3,1,2)
plot(time_high_peak_liklihood_set[1][0],time_high_peak_liklihood_set[1][1],'-^')
plot(time_low_peak_liklihood_set[1][0],time_low_peak_liklihood_set[1][1],'-v')
plt.xticks(weekDict.keys(),weekDict.values())
plt.xlabel('Days of a Week',fontsize='large')
plt.ylabel('Likelihood',fontsize='large')
plt.grid()
plt.legend(('High Variaiton', 'Low Variaiton'))
plt.tick_params(labelsize='large')
plt.tick_params(labelsize='large')
plt.ylim([-0.05,1.05])
subplot(3,1,3)
plot(time_high_peak_liklihood_set[2][0],time_high_peak_liklihood_set[2][1],'-^')
plot(time_low_peak_liklihood_set[2][0],time_low_peak_liklihood_set[2][1],'-v')
plt.xticks(hourDict.keys(),hourDict.values())
plt.xlabel('Hours of a day',fontsize='large')
plt.ylabel('Likelihood',fontsize='large')
plt.grid()
plt.legend(('High Variaiton', 'Low Variaiton'))
plt.tick_params(labelsize='large')
plt.tick_params(labelsize='large')
plt.ylim([-0.05,1.05])
png_name=str(uuid.uuid4().get_hex().upper()[0:6])
fig.savefig(fig_dir+png_name+'.png', bbox_inches='tight')
# BN Network representaiton
b_arc_list = pair_in_idx([cols[0]],cols[1:])
black_arc_frame = rbn.construct_arcs_frame(b_arc_list)
factor_data_mat = rbn.convert_pymat_to_rfactor(bndata_mat)
data_frame = rbn.construct_data_frame(factor_data_mat,cols)
#hc_b = rbn.bnlearn.hc(data_frame,blacklist=black_arc_frame,score='bic')
#hc_b = rbn.bnlearn.tabu(data_frame,blacklist=black_arc_frame,score='bic')
hc_b = rbn.bnlearn.mmhc(data_frame,blacklist=black_arc_frame,score='bic')
amat = rbn.py_get_amat(hc_b)
fig=rbn.nx_plot(hc_b,cols)
png_name=str(uuid.uuid4().get_hex().upper()[0:6])
plt.savefig(fig_dir+png_name+'.png', bbox_inches='tight')
###############################################################
# 4. Sensor, Weather Time Dependency Analysis
# BN Network Learning
###############################################################
# For regualr event.
state_map=np.array(GW2_.analysis.avg.GW2_CG_SYSTEM_ACTIVE_POWER_M.Time.state_map)
prob_map=np.array(GW2_.analysis.avg.GW2_CG_SYSTEM_ACTIVE_POWER_M.Time.prob_map)
t_name_set=GW2_.avgtime_names
# [MTH', 'WD', 'HR']
sig_tag='avg'
p_name=['GW2.CG_SYSTEM_ACTIVE_POWER_M']
sensor_cause_label=['GW2.SAA_UV_INDEX_M','GW2.HA49_AS_TE_KH_FM']
weather_cause_label=['Humidity']
time_cause_label=['MTH', 'HR']
p_idx=[GW2_.avgsensor_names.index(temp) for temp in p_name]
s_idx=[GW2_.avgsensor_names.index(temp) for temp in sensor_cause_label]
w_idx=[GW2_.avgweather_names.index(temp) for temp in weather_cause_label]
t_idx=[GW2_.avgtime_names.index(temp) for temp in time_cause_label]
bndata_mat=np.vstack((GW2_.avgdata_state_mat[:,p_idx].T,\
GW2_.avgdata_state_mat[:,s_idx].T, \
GW2_.avgdata_weather_mat_[:,w_idx].T, \
GW2_.avgdata_time_mat[:,t_idx].T)).T
#bndata_mat=np.vstack((GW2_.avgdata_state_mat[:,p_idx].T,GW2_.avgdata_weather_mat.T)).T
cols_fnames=[name_ for name_ in p_name+sensor_cause_label+weather_cause_label+time_cause_label]
cols=[remove_dot(name_) for name_ in p_name+sensor_cause_label+weather_cause_label+time_cause_label]
# BN Network representaiton
b_arc_list = pair_in_idx([cols[0]],cols[1:])+pair_in_idx([cols[1]],cols[2:])+pair_in_idx([cols[2]],cols[3:])+pair_in_idx([cols[3]],cols[4:])
black_arc_frame = rbn.construct_arcs_frame(b_arc_list)
factor_data_mat = rbn.convert_pymat_to_rfactor(bndata_mat)
data_frame = rbn.construct_data_frame(factor_data_mat,cols)
hc_b = rbn.bnlearn.hc(data_frame,blacklist=black_arc_frame,score='bic')
hc_b = rbn.bnlearn.tabu(data_frame,blacklist=black_arc_frame,score='bic')
#hc_b = rbn.bnlearn.mmhc(data_frame,blacklist=black_arc_frame,score='bic')
amat = rbn.py_get_amat(hc_b)
fig=rbn.nx_plot(hc_b,cols)
png_name=str(uuid.uuid4().get_hex().upper()[0:6])
plt.savefig(fig_dir+png_name+'.png', bbox_inches='tight')
cause_label=list(np.array(cols_fnames)[np.nonzero(amat[:,0]==1)[0]])
cause_idx=[cols_fnames.index(label_) for label_ in cause_label]
effect_idx=[cols_fnames.index(label_) for label_ in p_name]
effect_label=p_name
obs_state=PEAK
peak_state_temp, peak_prob_temp=compute_cause_likelihood(bndata_mat,cause_idx,[effect_idx],[[obs_state]])
obs_state=LOW_PEAK
lowpeak_state_temp, lowpeak_prob_temp=compute_cause_likelihood(bndata_mat,cause_idx,[effect_idx],[[obs_state]])
peak_state=np.array(peak_state_temp)
peak_prob=np.array(peak_prob_temp)
lowpeak_state=np.array(lowpeak_state_temp)
lowpeak_prob=np.array(lowpeak_prob_temp)
# Probability
fig=figure(figsize=(25.0,20.0))
for i,mon in enumerate(yearMonths):
subplot(3,4,mon+1)
idx=np.nonzero(peak_state[:,1]==mon)[0]
plot(peak_state[idx,0],peak_prob[idx],'-^')
idx=np.nonzero(lowpeak_state[:,1]==mon)[0]
plot(lowpeak_state[idx,0],lowpeak_prob[idx],'-v')
plt.ylabel('Likelihood',fontsize='small')
if i>7:
plt.xlabel(cause_label[0]+' Measurements',fontsize='small')
title(monthDict[mon]);plt.ylim([-0.05,1.05])
plt.legend(('High Peak', 'Low Peak'),loc='center right')
plt.tick_params(labelsize='small')
plt.grid()
png_name=str(uuid.uuid4().get_hex().upper()[0:6])
fig.savefig(fig_dir+png_name+'.png', bbox_inches='tight')
print '----------------------------------------'
print 'Likelihoods '
print '----------------------------------------'
print cause_label+['Low Peak','High Peak']
print '----------------------------------------'
print np.vstack((np.int0(peak_state).T,np.int0(100*lowpeak_prob).T,np.int0(100*peak_prob).T)).T
print '----------------------------------------'
s_val_set=set(peak_state[:,0])
m_val_set=set(peak_state[:,1])
Z_peak=np.ones((len(s_val_set),len(m_val_set)))*np.inf
for i,s_val in enumerate(s_val_set):
for j,m_val in enumerate(m_val_set):
idx=np.nonzero((peak_state[:,0]==s_val)&(peak_state[:,1]==m_val))[0][0]
Z_peak[i,j]=peak_prob[idx]
s_val_set=set(lowpeak_state[:,0])
m_val_set=set(lowpeak_state[:,1])
Z_lowpeak=np.ones((len(s_val_set),len(m_val_set)))*np.inf
for i,s_val in enumerate(s_val_set):
for j,m_val in enumerate(m_val_set):
idx=np.nonzero((lowpeak_state[:,0]==s_val)&(lowpeak_state[:,1]==m_val))[0][0]
Z_lowpeak[i,j]=lowpeak_prob[idx]
Z_lowpeak=lowpeak_prob.reshape((len(s_val_set),len(m_val_set)))
Z_peak=peak_prob.reshape((len(s_val_set),len(m_val_set)))
fig1=figure()
im = plt.imshow(Z_peak, cmap='hot',vmin=0, vmax=1,aspect='auto')
plt.colorbar(im, orientation='horizontal')
plt.xticks(monthDict.keys(),monthDict.values(),fontsize='large')
plt.yticks(range(len(s_val_set)),list(s_val_set),fontsize='large')
plt.xlabel(cause_label[1],fontsize='large')
plt.ylabel(cause_label[0],fontsize='large')
plt.title('Likelihood of High-Peak')
png_name=str(uuid.uuid4().get_hex().upper()[0:6])
fig1.savefig(fig_dir+png_name+'.png', bbox_inches='tight')
fig2=figure()
im = plt.imshow(Z_lowpeak, cmap='hot',vmin=0, vmax=1,aspect='auto')
plt.colorbar(im, orientation='horizontal')
plt.xticks(monthDict.keys(),monthDict.values(),fontsize='large')
plt.yticks(range(len(s_val_set)),list(s_val_set),fontsize='large')
plt.xlabel(cause_label[1],fontsize='large')
plt.ylabel(cause_label[0],fontsize='large')
plt.title('Likelihood of Low-Peak')
png_name=str(uuid.uuid4().get_hex().upper()[0:6])
fig2.savefig(fig_dir+png_name+'.png', bbox_inches='tight')
###############################################################
# 3. Irregualr Events for VAK1_CG_SYSTEM_REACTIVE_POWER_M
###############################################################
bldg_tag='VAK1_'
sig_tag='diff'
p_name='VAK1.CG_SYSTEM_REACTIVE_POWER_M'
cmd_str_='optprob_set='+bldg_tag+'.analysis.'+sig_tag+'.'+remove_dot(p_name)+'.Sensor.optprob_set'
exec(cmd_str_)
cmd_str_='optstate_set='+bldg_tag+'.analysis.'+sig_tag+'.'+remove_dot(p_name)+'.Sensor.optstate_set'
exec(cmd_str_)
cmd_str_='s_names='+bldg_tag+sig_tag+'sensor_names'
exec(cmd_str_)
optprob_set=VAK1_.analysis.diff.VAK1_CG_SYSTEM_REACTIVE_POWER_M.Sensor.optprob_set
optstate_set=VAK1_.analysis.diff.VAK1_CG_SYSTEM_REACTIVE_POWER_M.Sensor.optstate_set
s_names=VAK1_.diffsensor_names
sort_idx=argsort(optprob_set)[::-1]
sort_lh=optprob_set[sort_idx[:num_picks]].T
sort_state=optstate_set[sort_idx[:num_picks]].T
sort_label= list(np.array(s_names)[sort_idx[:num_picks]])
# BN Network Learning
import lib_bnlearn as rbn
num_picks=30
p_idx=VAK1_.diffsensor_names.index(p_name)
idx_select=[p_idx]+ list(sort_idx[:num_picks])
bndata_mat=VAK1_.diffdata_state_mat[:,idx_select]
# File name format - allowing dot
cols_fnames=[VAK1_.diffsensor_names[k] for k in idx_select]
# Variable name format - replacing dot with underscore
cols=[remove_dot(VAK1_.diffsensor_names[k]) for k in idx_select]
b_arc_list = pair_in_idx([cols[0]],cols[1:])
black_arc_frame = rbn.construct_arcs_frame(b_arc_list)
factor_data_mat = rbn.convert_pymat_to_rfactor(bndata_mat)
data_frame = rbn.construct_data_frame(factor_data_mat,cols)
hc_b = rbn.bnlearn.hc(data_frame,blacklist=black_arc_frame,score='bic')
amat = rbn.py_get_amat(hc_b)
fig=rbn.nx_plot(hc_b,cols)
plt.savefig(fig_dir+p_name+'_'+sig_tag+'bn_sensors.png', bbox_inches='tight')
#fit = rbn.py_bn_fit(hc_b,data_frame)
#index_temp=2
#prob_dimnames,prob_factors,prob_mat = rbn.py_get_node_cond_mat(fit,index_temp)
data_state_mat=VAK1_.diffdata_state_mat
cause_label=list(np.array(cols_fnames)[np.nonzero(amat[:,0]==1)[0]])
cause_idx=[VAK1_.diffsensor_names.index(label_) for label_ in cause_label]
effect_idx=VAK1_.diffsensor_names.index(p_name)
effect_label=[p_name]
obs_state=PEAK
peak_state_13, peak_prob_13=compute_cause_likelihood(data_state_mat,[cause_idx[1],cause_idx[3]],[[effect_idx]],[[obs_state]])
print_cond_table(peak_state_13, peak_prob_13,[cause_label[1],cause_label[3]])
obs_state=LOW_PEAK
lowpeak_state_13, lowpeak_prob_13=compute_cause_likelihood(data_state_mat,[cause_idx[1],cause_idx[3]],[[effect_idx]],[[obs_state]])
print_cond_table(lowpeak_state_13, lowpeak_prob_13,[cause_label[1],cause_label[3]])
plt.plot(range(len(peak_state_13)), peak_prob_13,'-^')
plt.plot(range(len(lowpeak_state_13)), lowpeak_prob_13,'-v')
plt.title(cause_label[1]+cause_label[3],fontsize='large')
plt.xlabel('State',fontsize='large')
plt.ylabel('Probability of State of Reactuve Power Variation',fontsize='large')
plt.xticks(fontsize='large')
plt.yticks(fontsize='large')
plt.grid()
plt.legend(('High Variation', 'No Variation'),prop={'size':18})
png_name=str(uuid.uuid4().get_hex().upper()[0:6])
plt.savefig(fig_dir+png_name+'.png', bbox_inches='tight')
start_t=datetime.datetime(2014, 2, 20, 15, 44, 52)
end_t=datetime.datetime(2014, 2, 24, 16, 5, 12)
data_x=get_data_set([cause_label[1]]+[cause_label[3]]+effect_label,start_t,end_t)
png_namex=plot_data_x(data_x,stype='raw',smark='-^')
png_namex=plot_data_x(data_x,stype='diff',smark='-^')
############################################################################
############################################################################
#<--------------------------------------------------------------------
#<--------------------------------------------------------------------
#<--------------------------------------------------------------------
###############################################################
# 3. Time and Weahter Dependency Analysis
# Weather data dependency
# BN Network Learning
###############################################################
bldg_tag='VAK1_'
sig_tag='diff'
p_name='VAK1.CG_SYSTEM_REACTIVE_POWER_M'
optprob_set=VAK1_.analysis.diff.VAK1_CG_SYSTEM_REACTIVE_POWER_M.Sensor.optprob_set
optstate_set=VAK1_.analysis.diff.VAK1_CG_SYSTEM_REACTIVE_POWER_M.Sensor.optstate_set
s_names=VAK1_.diffsensor_names
# Likelihood of weather factors
optprob_set=VAK1_.analysis.diff.VAK1_CG_SYSTEM_REACTIVE_POWER_M.Weather.optprob_set
optstate_set=VAK1_.analysis.diff.VAK1_CG_SYSTEM_REACTIVE_POWER_M.Weather.optstate_set
w_names=VAK1_.diffweather_names
sort_idx=argsort(optprob_set)[::-1]
sort_lh=optprob_set[sort_idx].T
sort_state=optstate_set[sort_idx].T
figw=figure(figsize=(15.0,10.0))
#figw=figure()
plt.subplot(2,1,1)
plt.plot(sort_lh,'-s')
x_label= list(np.array(w_names)[sort_idx])
x_ticks=range(len(x_label))
#plt.xticks(x_ticks,x_label, fontsize="small")
plt.xticks(x_ticks,x_label,rotation=30, fontsize=12)
plt.tick_params(labelsize='large')
plt.ylabel('Likelihood (From 0 to 1)',fontsize=18)
plt.title('Likelihood of peak differential measurement of '+p_name+' given weather factors')
#plt.get_current_fig_manager().window.showMaximized()
png_name=str(uuid.uuid4().get_hex().upper()[0:6])
figw.savefig(fig_dir+png_name+'.png', bbox_inches='tight')
# regualr event
import lib_bnlearn as rbn
p_idx=VAK1_.diffsensor_names.index(p_name)
bndata_mat=np.vstack((VAK1_.diffdata_state_mat[:,p_idx].T,VAK1_.diffdata_weather_mat_.T)).T
#bndata_mat=np.vstack((GW2_.avgdata_state_mat[:,p_idx].T,GW2_.avgdata_weather_mat.T)).T
cols_fnames=[p_name]+[w_name for w_name in VAK1_.diffweather_names]
cols=[remove_dot(p_name)]+[remove_dot(w_name) for w_name in VAK1_.diffweather_names]
b_arc_list = pair_in_idx([cols[0]],cols[1:])
black_arc_frame = rbn.construct_arcs_frame(b_arc_list)
factor_data_mat = rbn.convert_pymat_to_rfactor(bndata_mat)
data_frame = rbn.construct_data_frame(factor_data_mat,cols)
hc_b = rbn.bnlearn.hc(data_frame,blacklist=black_arc_frame,score='bic')
#hc_b = rbn.bnlearn.tabu(data_frame,blacklist=black_arc_frame,score='bic')
#hc_b = rbn.bnlearn.mmhc(data_frame,blacklist=black_arc_frame,score='bic')
amat = rbn.py_get_amat(hc_b)
fig=rbn.nx_plot(hc_b,cols)
png_name=str(uuid.uuid4().get_hex().upper()[0:6])
plt.savefig(fig_dir+png_name+'.png', bbox_inches='tight')
wcause_label=list(np.array(cols)[np.nonzero(amat[:,0]==1)[0]])
cause_idx=[cols.index(label_) for label_ in wcause_label]
#fig=figure(figsize=(10,10))
fig=figure()
for k in range(len(cause_idx)):
effect_idx=cols_fnames.index(p_name)
peak_state_0, peak_prob_0=compute_cause_likelihood(bndata_mat,[cause_idx[k]],[[effect_idx]],[[PEAK]])
lowpeak_state_0, lowpeak_prob_0=compute_cause_likelihood(bndata_mat,[cause_idx[k]],[[effect_idx]],[[LOW_PEAK]])
sort_idx1=argsort(peak_state_0)
sort_idx2=argsort(lowpeak_state_0)
subplot(1,len(cause_idx),k+1)
plot(sort(peak_state_0), np.array(peak_prob_0)[sort_idx1],'-^')
plot(sort(lowpeak_state_0), np.array(lowpeak_prob_0)[sort_idx2],'-v')
plt.legend(('measurements','classified states'))
if k==0:
plt.ylabel('Probability of Peak Rective Power Variation')
plt.grid()
plt.legend(('High Peak', 'Low Peak'),loc='center right')
plt.xlabel(wcause_label[k])
png_name=str(uuid.uuid4().get_hex().upper()[0:6])
fig.savefig(fig_dir+png_name+'.png', bbox_inches='tight')
# Time data dependency - Likelihood of time factors
# BN Network Learning
# Regualr event
t_name_set=VAK1_.difftime_names
# [MTH', 'WD', 'HR']
p_idx=VAK1_.diffsensor_names.index(p_name)
bndata_mat=np.vstack((VAK1_.diffdata_state_mat[:,p_idx].T,VAK1_.diffdata_time_mat.T)).T
#bndata_mat=np.vstack((GW2_.avgdata_state_mat[:,p_idx].T,GW2_.avgdata_weather_mat.T)).T
cols_fnames=[p_name]+[w_name for w_name in VAK1_.difftime_names]
cols=[remove_dot(p_name)]+[remove_dot(w_name) for w_name in VAK1_.difftime_names]
effect_idx=cols_fnames.index(p_name)
time_high_peak_liklihood_set=[]
time_low_peak_liklihood_set=[]
for t_name in t_name_set:
idx_t=cols.index(t_name)
peak_state, peak_prob=compute_cause_likelihood(bndata_mat,[idx_t],[[effect_idx]],[[PEAK]])
time_high_peak_liklihood_set.append(np.array([peak_state,peak_prob]))
peak_state, peak_prob=compute_cause_likelihood(bndata_mat,[idx_t],[[effect_idx]],[[LOW_PEAK]])
time_low_peak_liklihood_set.append(np.array([peak_state,peak_prob]))
fig=figure()
subplot(3,1,1)
plot(time_high_peak_liklihood_set[0][0],time_high_peak_liklihood_set[0][1],'-^')
plot(time_low_peak_liklihood_set[0][0],time_low_peak_liklihood_set[0][1],'-v')
plt.xticks(monthDict.keys(),monthDict.values())
plt.xlabel('Months of a year',fontsize='large')
plt.ylabel('Likelihood',fontsize='large')
plt.grid()
plt.legend(('High Peak', 'Low Peak'),loc='center right')
plt.tick_params(labelsize='large')
plt.ylim([-0.05,1.05])
subplot(3,1,2)
plot(time_high_peak_liklihood_set[1][0],time_high_peak_liklihood_set[1][1],'-^')
plot(time_low_peak_liklihood_set[1][0],time_low_peak_liklihood_set[1][1],'-v')
plt.xticks(weekDict.keys(),weekDict.values())
plt.xlabel('Days of a Week',fontsize='large')
plt.ylabel('Likelihood',fontsize='large')
plt.grid()
plt.legend(('High Peak', 'Low Peak'),loc='center right')
plt.tick_params(labelsize='large')
plt.tick_params(labelsize='large')
plt.ylim([-0.05,1.05])
subplot(3,1,3)
plot(time_high_peak_liklihood_set[2][0],time_high_peak_liklihood_set[2][1],'-^')
plot(time_low_peak_liklihood_set[2][0],time_low_peak_liklihood_set[2][1],'-v')
plt.xticks(hourDict.keys(),hourDict.values())
plt.xlabel('Hours of a day',fontsize='large')
plt.ylabel('Likelihood',fontsize='large')
plt.grid()
plt.legend(('High Peak', 'Low Peak'),loc='center right')
plt.tick_params(labelsize='large')
plt.tick_params(labelsize='large')
plt.ylim([-0.05,1.05])
png_name=str(uuid.uuid4().get_hex().upper()[0:6])
fig.savefig(fig_dir+png_name+'.png', bbox_inches='tight')
# BN Network representaiton
b_arc_list = pair_in_idx([cols[0]],cols[1:])
black_arc_frame = rbn.construct_arcs_frame(b_arc_list)
factor_data_mat = rbn.convert_pymat_to_rfactor(bndata_mat)
data_frame = rbn.construct_data_frame(factor_data_mat,cols)
hc_b = rbn.bnlearn.hc(data_frame,blacklist=black_arc_frame,score='bic')
#hc_b = rbn.bnlearn.tabu(data_frame,blacklist=black_arc_frame,score='bic')
#hc_b = rbn.bnlearn.mmhc(data_frame,blacklist=black_arc_frame,score='bic')
amat = rbn.py_get_amat(hc_b)
fig=rbn.nx_plot(hc_b,cols)
png_name=str(uuid.uuid4().gVAK1_.diffet_hex().upper()[0:6])
plt.savefig(fig_dir+png_name+'.png', bbox_inches='tight')
###############################################################
# 4. Sensor, Weather Time Dependency Analysis
# BN Network Learning
###############################################################
# For regualr event.
t_name_set=VAK1_.difftime_names
# [MTH', 'WD', 'HR']
sig_tag='diff'
p_name=['VAK1.CG_SYSTEM_REACTIVE_POWER_M']
sensor_cause_label=['VAK1.GEO_LM5_TE1_FM','VAK1.AK_TE50_4_M']
weather_cause_label=['Dew PointC','Humidity']
time_cause_label=['MTH', 'HR']
p_idx=[VAK1_.diffsensor_names.index(temp) for temp in p_name]
s_idx=[VAK1_.diffsensor_names.index(temp) for temp in sensor_cause_label]
w_idx=[VAK1_.diffweather_names.index(temp) for temp in weather_cause_label]
t_idx=[VAK1_.difftime_names.index(temp) for temp in time_cause_label]
bndata_mat=np.vstack((GW2_.avgdata_state_mat[:,p_idx].T,\
GW2_.avgdata_state_mat[:,s_idx].T, \
GW2_.avgdata_weather_mat_[:,w_idx].T, \
GW2_.avgdata_time_mat[:,t_idx].T)).T
#bndata_mat=np.vstack((GW2_.avgdata_state_mat[:,p_idx].T,GW2_.avgdata_weather_mat.T)).T
cols_fnames=[name_ for name_ in p_name+sensor_cause_label+weather_cause_label+time_cause_label]
cols=[remove_dot(name_) for name_ in p_name+sensor_cause_label+weather_cause_label+time_cause_label]
# BN Network representaiton
b_arc_list = pair_in_idx([cols[0]],cols[1:])+pair_in_idx([cols[1]],cols[2:])+pair_in_idx([cols[2]],cols[3:])
black_arc_frame = rbn.construct_arcs_frame(b_arc_list)
factor_data_mat = rbn.convert_pymat_to_rfactor(bndata_mat)
data_frame = rbn.construct_data_frame(factor_data_mat,cols)
hc_b = rbn.bnlearn.hc(data_frame,blacklist=black_arc_frame,score='bic')
#hc_b = rbn.bnlearn.tabu(data_frame,blacklist=black_arc_frame,score='bic')
amat = rbn.py_get_amat(hc_b)
fig=rbn.nx_plot(hc_b,cols)
png_name=str(uuid.uuid4().get_hex().upper()[0:6])
plt.savefig(fig_dir+png_name+'.png', bbox_inches='tight')
cause_label=list(np.array(cols_fnames)[np.nonzero(amat[:,0]==1)[0]])
cause_idx=[cols_fnames.index(label_) for label_ in cause_label]
effect_idx=[cols_fnames.index(label_) for label_ in p_name]
effect_label=p_name
obs_state=PEAK
peak_state_temp, peak_prob_temp=compute_cause_likelihood(bndata_mat,cause_idx,[effect_idx],[[obs_state]])
obs_state=LOW_PEAK
lowpeak_state_temp, lowpeak_prob_temp=compute_cause_likelihood(bndata_mat,cause_idx,[effect_idx],[[obs_state]])
peak_state=np.array(peak_state_temp)
peak_prob=np.array(peak_prob_temp)
lowpeak_state=np.array(lowpeak_state_temp)
lowpeak_prob=np.array(lowpeak_prob_temp)
# Probability
fig=figure(figsize=(30.0,25.0))
for i,mon in enumerate(yearMonths):
subplot(3,4,mon+1)
idx=np.nonzero(peak_state[:,2]==mon)[0]
x_set=peak_state[idx,0:2]
plot(range(len(x_set)),peak_prob[idx],'-^')
idx=np.nonzero(lowpeak_state[:,2]==mon)[0]
plot(range(len(x_set)),lowpeak_prob[idx],'-v')
x_label=[(stateDict[peak_tpl[0]],stateDict[peak_tpl[1]]) for peak_tpl in x_set]
x_ticks=range(len(x_set))
plt.ylabel('Likelihood',fontsize='small')
if i>7:
#plt.xlabel(cause_label[0]+' Measurements',fontsize='small')
plt.xticks(x_ticks,x_label,rotation=270, fontsize=10)
plt.tick_params(labelsize='small')
title(monthDict[mon]);plt.ylim([-0.05,1.05])
plt.legend(('High Peak', 'Low Peak'),loc='center right')
plt.tick_params(labelsize='small')
plt.grid()
png_name=str(uuid.uuid4().get_hex().upper()[0:6])
fig.savefig(fig_dir+png_name+'.png', bbox_inches='tight')
print '----------------------------------------'
print 'Likelihoods '
print '----------------------------------------'
print cause_label+['Low Peak','High Peak']
print '----------------------------------------'
print np.vstack((np.int0(peak_state).T,np.int0(100*lowpeak_prob).T,np.int0(100*peak_prob).T)).T
print '----------------------------------------'
#<----------------------------------------------------------------------
#import pdb;pdb.set_trace()
DO_BN_LEARN=0
# This is BN Learn example
if DO_BN_LEARN==1:
import lib_bnlearn as rbn
irr_state_mat,irr_state_prob,skewness_metric_sort,skewness_metric_sort_idx=irr_state_mapping(diffdata_state_mat,weight_coeff=10)
bndata_dict = mt.loadObjectBinary('diffdata_dict.bin')
bn_col=bndata_dict['diffdata_names']
bn_sn=bndata_dict['sensor_names']
bn_wn=bndata_dict['weather_names']
bn_tn=bndata_dict['time_names']
bndata_mat=bndata_dict['diffdata_mat']
# If the variable is discrete, we should convert the data into R's factor data type
#cols = X_Sensor_NAMES+X_Time_NAMES
for k,name_temp in enumerate(bn_wn):
try:
blank_idx=name_temp.index(' ')
#print blank_idx,X_Weather_NAMES[k][blank_idx]
bn_wn[k]=bn_wn[k].replace(' ','_')
except:
pass
for k,name_temp in enumerate(bn_col):
try:
blank_idx=name_temp.index(' ')
#print blank_idx,X_Weather_NAMES[k][blank_idx]
bn_col[k]=bn_col[k].replace(' ','_')
except:
pass
factor_data_mat = rbn.convert_pymat_to_rfactor(bndata_mat[:,:len(bn_sn)])
#cols = X_Sensor_NAMES+X_Weather_NAMES+X_Time_NAMES
cols =bn_col[:len(bn_sn)]
# Construct data frame, given data matrix (np.array) and column names
# if column names are not given, we use column index [0,1,..] as the column names
data_frame = rbn.construct_data_frame(factor_data_mat,cols)
#arc_list = pair_in_idx(X_Sensor_NAMES,X_Time_NAMES)
# Black list
b_arc_list = pair_in_idx(bn_sn,bn_tn)\
+pair_in_idx(bn_sn,bn_wn)\
+pair_in_idx(bn_wn,bn_tn)\
+pair_in_idx(bn_wn,bn_wn)\
+pair_in_idx(bn_tn,bn_tn)
black_arc_frame = rbn.construct_arcs_frame(b_arc_list)
# White list
w_arc_list = pair_in_idx(bn_tn,bn_sn)\
+pair_in_idx(bn_tn,bn_wn)
white_arc_frame = rbn.construct_arcs_frame(w_arc_list)
"""
Step2: Using bnlearn to learn graph structure from data frame
"""
# Use hill-climbing learning algorithm
# With blacklisting arcs
hc = rbn.bnlearn.hc(data_frame,score='bic')
hc_score=rbn.bnlearn.score(hc,data_frame,type="bic")
hc_bw = rbn.bnlearn.hc(data_frame,blacklist=black_arc_frame,whitelist=white_arc_frame,score='bic')
hc_bw_score=rbn.bnlearn.score(hc_bw,data_frame,type="bic")
hc_b = rbn.bnlearn.hc(data_frame,blacklist=black_arc_frame,score='bic')
hc_b_score=rbn.bnlearn.score(hc_b,data_frame,type="bic")
print 'hc_score: ',hc_score,'hc_b_score: ',hc_b_score,'hc_bw_score: ',hc_bw_score
# Print some output from the learning process
#print str(hc_b)
# Get the adjacent matrix from the graph structure
# the return is numpy array
amat = rbn.py_get_amat(hc_b)
"""
There are other learning algorithms available too
E.g.:
gs = rbn.bnlearn.gs(data_frame)
"""
"""
Step 3: Plotting the graph, given the graph structure
and the names of nodes
"""
#hc = rbn.bnlearn.hc(data_frame,score='k2')
figure(2)
rbn.nx_plot(hc_b,cols)
rbn.nx_plot(hc,cols)
#rbn.nx_plot(hc,rbn.bnlearn.nodes(hc))
"""
Step4: Fitting the data into graph structure
to estimate the conditional probability
NOTE: in order for fitting to happen, the graph must be completely directed
"""
fit = rbn.py_bn_fit(hc_b,data_frame)
#print str(fit)
#index_temp=cols.index('GW1.HA1_SM_K')
index_temp=1
prob_dimnames,prob_factors,prob_mat = rbn.py_get_node_cond_mat(fit,index_temp)
#rbn.write_to_file('fit.dat',str(fit))
###############################################################################
# Find a low-dimension embedding for visualization: find the best position of
# the nodes (the stocks) on a 2D plane
# We use a dense eigen_solver to achieve reproducibility (arpack is
# initiated with random vectors that we don't control). In addition, we
# use a large number of neighbors to capture the large-scale structure.
###############################################################################
DATA_EMBEDDING_ANALYSIS=0
if DATA_EMBEDDING_ANALYSIS==1:
# Covariance Estimation
edge_model = covariance.GraphLassoCV()
edge_model.fit(X_INPUT)
cov_mat=edge_model.covariance_
node_position_model = manifold.LocallyLinearEmbedding(
n_components=2, eigen_solver='dense', n_neighbors=X_INPUT.shape[1]-1)
embedding = node_position_model.fit_transform(X_INPUT.T).T
plt.figure('Data strucutre map', facecolor='w', figsize=(10, 8))
plt.clf()
ax = plt.axes([0., 0., 1., 1.])
plt.axis('off')
# Display a graph of the partial correlations
partial_correlations = edge_model.precision_.copy()
d = 1 / np.sqrt(np.diag(partial_correlations))
partial_correlations *= d
partial_correlations *= d[:, np.newaxis]
non_zero = (np.abs(np.triu(partial_correlations, k=1)) > 0.01)
# Plot the nodes using the coordinates of our embedding
plt.scatter(embedding[0], embedding[1], s=100*d**2,c=labels, cmap=pl.cm.spectral)
# Plot the edges
start_idx, end_idx = np.where(non_zero)
#a sequence of (*line0*, *line1*, *line2*), where::
# linen = (x0, y0), (x1, y1), ... (xm, ym)
segments = [[embedding[:, start], embedding[:, stop]]
for start, stop in zip(start_idx, end_idx)]
values = np.abs(partial_correlations[non_zero])
lc = LineCollection(segments,
zorder=0, cmap=pl.cm.hot_r,
norm=pl.Normalize(0, .7 * values.max()))
lc.set_array(values)
lc.set_linewidths(15 * values)
ax.add_collection(lc)
# Add a label to each node. The challenge here is that we want to
# position the labels to avoid overlap with other labels
for index, (name, label, (x, y)) in enumerate(zip(input_names, labels, embedding.T)):
dx = x - embedding[0]
dx[index] = 1
dy = y - embedding[1]
dy[index] = 1
this_dx = dx[np.argmin(np.abs(dy))]
this_dy = dy[np.argmin(np.abs(dx))]
if this_dx > 0:
horizontalalignment = 'left'
x = x + .002
else:
horizontalalignment = 'right'
x = x - .002
if this_dy > 0:
verticalalignment = 'bottom'
y = y + .002
else:
verticalalignment = 'top'
y = y - .002
plt.text(x, y, name, size=12,
horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment,
bbox=dict(facecolor='w',
edgecolor=plt.cm.spectral(label / float(n_labels)),
alpha=.6))
plt.xlim(embedding[0].min() - .15 * embedding[0].ptp(),
embedding[0].max() + .10 * embedding[0].ptp(),)
plt.ylim(embedding[1].min() - .03 * embedding[1].ptp(),
embedding[1].max() + .03 * embedding[1].ptp())
DATA_NAME_ANALYSIS=0
if DATA_NAME_ANALYSIS==1:
#################################################################################
# Graph strucutre analysis of sensor naming
#################################################################################
print '--------------------------------------------------'
print 'Graph strucutre analysis of sensor naming'
print '--------------------------------------------------'
print 'get simialirty matrix of sensor naming'
#sim_mat, uuid_list, phrases, key_description, phrase_count = get_sim_mat()
sim_mat = mt.loadObjectBinary('../data_year/sim_mat.bin')
uuid_list = mt.loadObjectBinary('../data_year/uuid_list.bin')
phrases = mt.loadObjectBinary('../data_year/phrases.bin')
key_description = mt.loadObjectBinary('../data_year/key_description.bin')
phrase_count = mt.loadObjectBinary('../data_year/phrase_count.bin')
print 'build tree.....'
for sensor_name in uuid_list:
print len(sensor_name)
print '**************************** End of Program ****************************'
"""
# Obslete Lines
###########################################################################
# Float Type Measurement Clustering
###########################################################################
DIST_MAT_sf=find_norm_dist_matrix(X_Feature[:,sf_idx])
# Find representative set of sensor measurements
min_dist_=np.sqrt(2*(1-(0.9)))
max_dist_=np.sqrt(2*(1-(0.1)))
distmat_input=DIST_MAT_sf
DO_CLUSTERING_TEST=0
if DO_CLUSTERING_TEST==1:
CLUSTERING_TEST(distmat_input,min_corr=0.1,max_corr=0.9)
pack_exemplars_float,pack_labels_float=max_pack_cluster(distmat_input,min_dist=min_dist_,max_dist=max_dist_)
pack_num_clusters_float=int(pack_labels_float.max()+1)
print '-------------------------------------------------------------------------'
print pack_num_clusters_float, 'clusters out of ', len(pack_labels_float), ' float type measurements'
print '-------------------------------------------------------------------------'
validity,intra_dist,inter_dist=compute_cluster_err(distmat_input,pack_labels_float)
print 'validity:',round(validity,2),', intra_dist: ',np.round(intra_dist,2),', inter_dist: ',np.round(inter_dist,2)
print '-------------------------------------------------------------------------'
sf_exemplars_dict={}
sfe_name=list(np.array(sf_name)[pack_exemplars_float])
sfe_idx=np.array(sf_idx)[pack_exemplars_float]
for label_id,(m_idx,exemplar_label) in enumerate(zip(pack_exemplars_float,sfe_name)):
print exemplar_label
children_set=list(set(np.nonzero(pack_labels_float==label_id)[0])-set([m_idx]))
print 'Label ', label_id, ': ',m_idx,'<--', children_set
sf_exemplars_dict.update({exemplar_label:list(np.array(sf_name)[children_set])})
# exemplar index
###########################################################################
# InT Type Measurement Clustering
###########################################################################
DIST_MAT_si=find_norm_dist_matrix(X_Feature[:,si_idx])
# Find representative set of sensor measurements
min_dist_=np.sqrt(2*(1-(0.9)))
max_dist_=np.sqrt(2*(1-(0.1)))
distmat_input=DIST_MAT_si
DO_CLUSTERING_TEST=0
if DO_CLUSTERING_TEST==1:
CLUSTERING_TEST(distmat_input,min_corr=0.1,max_corr=0.9)
pack_exemplars_int,pack_labels_int=max_pack_cluster(distmat_input,min_dist=min_dist_,max_dist=max_dist_)
pack_num_clusters_int=int(pack_labels_int.max()+1)
print '-------------------------------------------------------------------------'
print pack_num_clusters_int, 'clusters out of ', len(pack_labels_int), ' int type measurements'
print '-------------------------------------------------------------------------'
validity,intra_dist,inter_dist=compute_cluster_err(distmat_input,pack_labels_int)
print 'validity:',round(validity,2),', intra_dist: ',np.round(intra_dist,2),', inter_dist: ',np.round(inter_dist,2)
print '-------------------------------------------------------------------------'
si_exemplars_dict={}
sie_name=list(np.array(si_name)[pack_exemplars_int])
sie_idx=np.array(si_idx)[pack_exemplars_int]
for label_id,(m_idx,exemplar_label_int) in enumerate(zip(pack_exemplars_int,sie_name)):
print exemplar_label_int
children_set=list(set(np.nonzero(pack_labels_int==label_id)[0])-set([m_idx]))
print 'Label ', label_id, ': ',m_idx,'<--', children_set
si_exemplars_dict.update({exemplar_label_int:list(np.array(si_name)[children_set])})
# If no data availalbe, then imputes the data by weighted mean
print 'Before imputation'
for i,key in enumerate(data_used):
plt.figure(1)
print key
print [k for k in np.nonzero(X[:,i]==np.infty)[0]]
plt.subplot(len(data_used),1,i+1)
plt.plot(time_slots,X[:,i],'.')
plt.title(key,fontsize=6)
plt.xticks(fontsize=6);plt.yticks(fontsize=6)
# If no data availalbe, then imputes the data by weighted mean
print 'Impute misssing data'
for i,key in enumerate(data_used):
for inf_idx in np.nonzero(X[:,i]==np.infty)[0]:
whgt_bottom_sum=0;whgt_top_sum=0
for h_idx in np.nonzero(hr_set==hr_set[inf_idx])[0]:
#import pdb; pdb.set_trace()
sample_temp=X[h_idx,i]
if (sample_temp<np.infty and h_idx!=inf_idx):
wght=1/np.abs(daycount_set[h_idx]-daycount_set[inf_idx])
whgt_bottom_sum=whgt_bottom_sum+wght
whgt_top_sum=whgt_top_sum+wght*sample_temp
new_sample=whgt_top_sum/whgt_bottom_sum
X[inf_idx,i]=new_sample
# If no data availalbe, then imputes the data by weighted mean
print 'After imputation'
for i,key in enumerate(data_used):
plt.figure(1)
print key
print [k for k in np.nonzero(X[:,i]==np.infty)[0]]
plt.subplot(len(data_used),1,i+1)
plt.plot(time_slots,X[:,i])
plt.title(key,fontsize=6)
plt.xticks(fontsize=6);plt.yticks(fontsize=6)
gmm_labels=gmm.predict(obs)
labels=gmm_labels
#kmean=KMeans(n_clusters=2).fit(obs[:,newaxis])
#labels=kmean.labels_
subplot(3,1,1)
for i in range(num_cluster):
plot(t_new[labels==i]-t_new[0],val_new[labels==i],'s')
title(input_names[k])
subplot(3,1,2)
plot(t_new[1:]-t_new[0],abs(diff(val_new))/max(abs(diff(val_new))))
subplot(3,1,3)
a=diff(val_new)
plot(t_new[1:]-t_new[0],a/max(abs(a)))
#labels=kmean.labels_ len(sie_idx
subplot(2,1,1)
for i in range(opt_num_cluster):
plot(t_new[label==i]-t_new[0],val_new[label==i],'*')
title(input_names[k])
subplot(2,1,2)
plot(t_new[1:]-t_new[0],abs(diff(val_new))/max(abs(diff(val_new))))
plot(t_new[0:50],label[0:50],'s')
#plt.ioff()
# Only do state classification for number of samples greater than
k=0
dt=intpl_intv[k]
# Reference time unit is 5 min, 15 min, 30 min and 1 hour
num_samples_set=np.round(np.array([60*5,60*15,60*30, 60*60 ])*(1/dt))
min_num_samples_for_analysis=2**5
for i,nfft_temp in enumerate(num_samples_set):
if nfft_temp>min_num_samples_for_analysis:
NFFT=int(2**ceil(log2(nfft_temp)));break;
window_duration=NFFT*dt
Fs = (1.0/dt) # the sampling frequency
# Pxx is the segments x freqs array of instantaneous power, freqs is
# the frequency vector, bins are the centers of the time bins in which
# the power is computed, and im is the matplotlib.image.AxesImage
# instance
"""
|
gpl-2.0
|
fabiotanniguchi/mc857
|
kernel/libFL/pythonUtils/plotHist.py
|
4
|
1972
|
import csv
import numpy as np
import matplotlib.pyplot as plt
import sys
#1 - input File
#2 - row or col based
#3 - split figures (color image only)
def plotGrayHistogram(x,y):
plt.bar(x,y)
plt.title("Histogram")
plt.xlabel("bins")
plt.ylabel("counts")
plt.show();
def plotColorHistogram(x,r,g,b,split):
if(split):
plt.figure(0)
plt.bar(x,r)
plt.title("Histogram Red Channel")
plt.xlabel("bins")
plt.ylabel("counts")
plt.figure(1)
plt.bar(x,g)
plt.title("Histogram Green Channel")
plt.xlabel("bins")
plt.ylabel("counts")
plt.figure(2)
plt.bar(x,b)
plt.title("Histogram Blue Channel")
plt.xlabel("bins")
plt.ylabel("counts")
plt.show();
else:
plt.figure(0)
plt.subplot(221)
plt.bar(x,r)
plt.title("Histogram Red Channel")
plt.xlabel("bins")
plt.ylabel("counts")
plt.subplot(222)
plt.bar(x,g)
plt.title("Histogram Green Channel")
plt.xlabel("bins")
plt.ylabel("counts")
plt.subplot(223)
plt.bar(x,b)
plt.title("Histogram Blue Channel")
plt.xlabel("bins")
plt.ylabel("counts")
plt.show();
inFile = sys.argv[1]
colBased = True;
split = False;
if(len(sys.argv) >= 3):
if (sys.argv[2] == 'False'):
colBased = False;
else:
colBased = True;
if(len(sys.argv) >= 4):
if (sys.argv[3] == 'False'):
split = False;
else:
split = True;
inputData = np.loadtxt(inFile,delimiter=' ')
[row,col] = inputData.shape
if(colBased):
if(col == 2):
x = inputData[:, 0]
y = inputData[:, 1]
plotGrayHistogram(x,y)
else:
x = inputData[:, 0]
r = inputData[:, 1]
g = inputData[:, 2]
b = inputData[:, 3]
plotColorHistogram(x, r, g, b,split)
else:
if(row == 2):
x = inputData[0, :]
y = inputData[1, :]
plotGrayHistogram(x, y)
else:
x = inputData[0, :]
r = inputData[1, :]
g = inputData[2, :]
b = inputData[3, :]
plotColorHistogram(x,r,g,b,split)
|
mit
|
trankmichael/scikit-learn
|
examples/cluster/plot_digits_linkage.py
|
369
|
2959
|
"""
=============================================================================
Various Agglomerative Clustering on a 2D embedding of digits
=============================================================================
An illustration of various linkage option for agglomerative clustering on
a 2D embedding of the digits dataset.
The goal of this example is to show intuitively how the metrics behave, and
not to find good clusters for the digits. This is why the example works on a
2D embedding.
What this example shows us is the behavior "rich getting richer" of
agglomerative clustering that tends to create uneven cluster sizes.
This behavior is especially pronounced for the average linkage strategy,
that ends up with a couple of singleton clusters.
"""
# Authors: Gael Varoquaux
# License: BSD 3 clause (C) INRIA 2014
print(__doc__)
from time import time
import numpy as np
from scipy import ndimage
from matplotlib import pyplot as plt
from sklearn import manifold, datasets
digits = datasets.load_digits(n_class=10)
X = digits.data
y = digits.target
n_samples, n_features = X.shape
np.random.seed(0)
def nudge_images(X, y):
# Having a larger dataset shows more clearly the behavior of the
# methods, but we multiply the size of the dataset only by 2, as the
# cost of the hierarchical clustering methods are strongly
# super-linear in n_samples
shift = lambda x: ndimage.shift(x.reshape((8, 8)),
.3 * np.random.normal(size=2),
mode='constant',
).ravel()
X = np.concatenate([X, np.apply_along_axis(shift, 1, X)])
Y = np.concatenate([y, y], axis=0)
return X, Y
X, y = nudge_images(X, y)
#----------------------------------------------------------------------
# Visualize the clustering
def plot_clustering(X_red, X, labels, title=None):
x_min, x_max = np.min(X_red, axis=0), np.max(X_red, axis=0)
X_red = (X_red - x_min) / (x_max - x_min)
plt.figure(figsize=(6, 4))
for i in range(X_red.shape[0]):
plt.text(X_red[i, 0], X_red[i, 1], str(y[i]),
color=plt.cm.spectral(labels[i] / 10.),
fontdict={'weight': 'bold', 'size': 9})
plt.xticks([])
plt.yticks([])
if title is not None:
plt.title(title, size=17)
plt.axis('off')
plt.tight_layout()
#----------------------------------------------------------------------
# 2D embedding of the digits dataset
print("Computing embedding")
X_red = manifold.SpectralEmbedding(n_components=2).fit_transform(X)
print("Done.")
from sklearn.cluster import AgglomerativeClustering
for linkage in ('ward', 'average', 'complete'):
clustering = AgglomerativeClustering(linkage=linkage, n_clusters=10)
t0 = time()
clustering.fit(X_red)
print("%s : %.2fs" % (linkage, time() - t0))
plot_clustering(X_red, X, clustering.labels_, "%s linkage" % linkage)
plt.show()
|
bsd-3-clause
|
sibis-platform/ncanda-data-integration
|
datadict/datadict_update.py
|
2
|
5598
|
#!/usr/bin/env python
# """
# Given a CSV with the current data dictionary and a list of patch files with
# updated / newly inserted variables, produce a full patched data dictionary.
# """
from __future__ import print_function
from __future__ import absolute_import
import sys
import pandas as pd
import csv
import argparse
from datadict_utils import load_datadict, insert_rows_at
parser = argparse.ArgumentParser(
description="Apply patches to the current data dictionary.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('-c', '--current',
help="CSV file with the current data dictionary",
action="store", required=True)
parser.add_argument('-o', '--output',
help="CSV file to write output in.",
action="store",
default=sys.stdout)
parser.add_argument('patch_files', help="CSV file(s) with patch for datadict",
nargs='+',
action="store")
parser.add_argument('-v', '--verbose',
help="Write to stdout what the script is doing",
action="store_true")
parser.add_argument('--update-only',
help="Do not add any new variables.",
action="store_true")
# TODO: Instead of "do not update", enhance logic to "do not overwrite"
parser.add_argument('--skip-branching',
help="Do not update branching logic information.",
action="store_true")
parser.add_argument('--skip-section-headers',
help="Do not update section headers.",
action="store_true")
parser.add_argument('--skip-field-notes',
help="Do not update field notes.",
action="store_true")
# TODO: Implement
parser.add_argument('--keep-options',
help=("Prevent the patch from downgrading Field Type to "
"text and/or removing options"),
action="store_true")
# TODO: Trimming options
args = parser.parse_args()
dd = load_datadict(args.current)
dd_columns = dd.columns.tolist() # To preserve order
# 0. For each patch file:
for patch_file in args.patch_files:
patch_df = load_datadict(patch_file, trim_all=True)
existing_rows = dd.index.intersection(patch_df.index)
new_rows = patch_df.index.difference(dd.index)
if args.verbose:
print("\nProcessing %s:" % patch_file.name)
print("Updating the following columns:")
print(existing_rows.tolist())
if args.update_only:
print("Ignoring the following new columns:")
else:
print("Inserting the following new columns:")
print(new_rows.tolist())
# 1. In the patch, find the entries that already exist and simply rewrite
# them
#
# TODO: Implement overwriting only a subset of values
overwrite_columns = set(dd.columns)
if args.skip_branching:
overwrite_columns = overwrite_columns - set(["Branching Logic (Show field only if...)"])
if args.skip_section_headers:
overwrite_columns = overwrite_columns - set(["Section Header"])
if args.skip_field_notes:
overwrite_columns = overwrite_columns - set(["Field Note"])
if len(existing_rows) > 0:
dd.loc[existing_rows, overwrite_columns] = patch_df.loc[existing_rows, overwrite_columns]
# 2. If there were new entries:
if (len(new_rows) > 0) and (not args.update_only):
# 2a. If there were existing entries, try smart placement of the new
# variables
if len(existing_rows) > 0: # Try smart placement of new entries
buffer_new = []
last_old = None
for colname, _ in patch_df.iterrows():
# Check if it's an existing row; if it is, mark it
if colname in existing_rows:
if len(buffer_new) > 0:
if last_old is None:
# We must insert before this variable
insert_before = True
else:
# We can insert after the last found variable
insert_before = False
# Insert buffer_new
dd = insert_rows_at(dd, colname,
patch_df.loc[buffer_new],
insert_before)
buffer_new = []
# Reset last_old
last_old = colname
else:
# It's a new one -> put it in the buffer
buffer_new.append(colname)
# 2b. If there were no already-existing entries, append the new entries
# to the end of the form (or whatever CLI says)
else: # No existing entries to append to
forms = patch_df['Form Name'].unique().tolist()
# Find the shared form name (if possible) and append to its end
for form in forms:
if dd['Form Name'].str.contains(form).any():
insertion_point = dd[dd['Form Name'] == form].index[-1]
else:
insertion_point = dd.index[-1]
dd = insert_rows_at(dd, insertion_point,
patch_df[patch_df['Form Name'] == form])
# Write out the updated data dictionary (with correctly ordered entries)
dd[dd_columns].to_csv(args.output, quoting=csv.QUOTE_NONNUMERIC)
|
bsd-3-clause
|
fbagirov/scikit-learn
|
sklearn/decomposition/nmf.py
|
100
|
19059
|
""" Non-negative matrix factorization
"""
# Author: Vlad Niculae
# Lars Buitinck <[email protected]>
# Author: Chih-Jen Lin, National Taiwan University (original projected gradient
# NMF implementation)
# Author: Anthony Di Franco (original Python and NumPy port)
# License: BSD 3 clause
from __future__ import division
from math import sqrt
import warnings
import numpy as np
import scipy.sparse as sp
from scipy.optimize import nnls
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, check_array
from ..utils.extmath import randomized_svd, safe_sparse_dot, squared_norm
from ..utils.validation import check_is_fitted, check_non_negative
def safe_vstack(Xs):
if any(sp.issparse(X) for X in Xs):
return sp.vstack(Xs)
else:
return np.vstack(Xs)
def norm(x):
"""Dot product-based Euclidean norm implementation
See: http://fseoane.net/blog/2011/computing-the-vector-norm/
"""
return sqrt(squared_norm(x))
def trace_dot(X, Y):
"""Trace of np.dot(X, Y.T)."""
return np.dot(X.ravel(), Y.ravel())
def _sparseness(x):
"""Hoyer's measure of sparsity for a vector"""
sqrt_n = np.sqrt(len(x))
return (sqrt_n - np.linalg.norm(x, 1) / norm(x)) / (sqrt_n - 1)
def _initialize_nmf(X, n_components, variant=None, eps=1e-6,
random_state=None):
"""NNDSVD algorithm for NMF initialization.
Computes a good initial guess for the non-negative
rank k matrix approximation for X: X = WH
Parameters
----------
X : array, [n_samples, n_features]
The data matrix to be decomposed.
n_components : array, [n_components, n_features]
The number of components desired in the approximation.
variant : None | 'a' | 'ar'
The variant of the NNDSVD algorithm.
Accepts None, 'a', 'ar'
None: leaves the zero entries as zero
'a': Fills the zero entries with the average of X
'ar': Fills the zero entries with standard normal random variates.
Default: None
eps: float
Truncate all values less then this in output to zero.
random_state : numpy.RandomState | int, optional
The generator used to fill in the zeros, when using variant='ar'
Default: numpy.random
Returns
-------
(W, H) :
Initial guesses for solving X ~= WH such that
the number of columns in W is n_components.
References
----------
C. Boutsidis, E. Gallopoulos: SVD based initialization: A head start for
nonnegative matrix factorization - Pattern Recognition, 2008
http://tinyurl.com/nndsvd
"""
check_non_negative(X, "NMF initialization")
if variant not in (None, 'a', 'ar'):
raise ValueError("Invalid variant name")
random_state = check_random_state(random_state)
U, S, V = randomized_svd(X, n_components, random_state=random_state)
W, H = np.zeros(U.shape), np.zeros(V.shape)
# The leading singular triplet is non-negative
# so it can be used as is for initialization.
W[:, 0] = np.sqrt(S[0]) * np.abs(U[:, 0])
H[0, :] = np.sqrt(S[0]) * np.abs(V[0, :])
for j in range(1, n_components):
x, y = U[:, j], V[j, :]
# extract positive and negative parts of column vectors
x_p, y_p = np.maximum(x, 0), np.maximum(y, 0)
x_n, y_n = np.abs(np.minimum(x, 0)), np.abs(np.minimum(y, 0))
# and their norms
x_p_nrm, y_p_nrm = norm(x_p), norm(y_p)
x_n_nrm, y_n_nrm = norm(x_n), norm(y_n)
m_p, m_n = x_p_nrm * y_p_nrm, x_n_nrm * y_n_nrm
# choose update
if m_p > m_n:
u = x_p / x_p_nrm
v = y_p / y_p_nrm
sigma = m_p
else:
u = x_n / x_n_nrm
v = y_n / y_n_nrm
sigma = m_n
lbd = np.sqrt(S[j] * sigma)
W[:, j] = lbd * u
H[j, :] = lbd * v
W[W < eps] = 0
H[H < eps] = 0
if variant == "a":
avg = X.mean()
W[W == 0] = avg
H[H == 0] = avg
elif variant == "ar":
avg = X.mean()
W[W == 0] = abs(avg * random_state.randn(len(W[W == 0])) / 100)
H[H == 0] = abs(avg * random_state.randn(len(H[H == 0])) / 100)
return W, H
def _nls_subproblem(V, W, H, tol, max_iter, sigma=0.01, beta=0.1):
"""Non-negative least square solver
Solves a non-negative least squares subproblem using the
projected gradient descent algorithm.
min || WH - V ||_2
Parameters
----------
V, W : array-like
Constant matrices.
H : array-like
Initial guess for the solution.
tol : float
Tolerance of the stopping condition.
max_iter : int
Maximum number of iterations before timing out.
sigma : float
Constant used in the sufficient decrease condition checked by the line
search. Smaller values lead to a looser sufficient decrease condition,
thus reducing the time taken by the line search, but potentially
increasing the number of iterations of the projected gradient
procedure. 0.01 is a commonly used value in the optimization
literature.
beta : float
Factor by which the step size is decreased (resp. increased) until
(resp. as long as) the sufficient decrease condition is satisfied.
Larger values allow to find a better step size but lead to longer line
search. 0.1 is a commonly used value in the optimization literature.
Returns
-------
H : array-like
Solution to the non-negative least squares problem.
grad : array-like
The gradient.
n_iter : int
The number of iterations done by the algorithm.
References
----------
C.-J. Lin. Projected gradient methods for non-negative matrix factorization.
Neural Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
"""
WtV = safe_sparse_dot(W.T, V)
WtW = np.dot(W.T, W)
# values justified in the paper
alpha = 1
for n_iter in range(1, max_iter + 1):
grad = np.dot(WtW, H) - WtV
# The following multiplication with a boolean array is more than twice
# as fast as indexing into grad.
if norm(grad * np.logical_or(grad < 0, H > 0)) < tol:
break
Hp = H
for inner_iter in range(19):
# Gradient step.
Hn = H - alpha * grad
# Projection step.
Hn *= Hn > 0
d = Hn - H
gradd = np.dot(grad.ravel(), d.ravel())
dQd = np.dot(np.dot(WtW, d).ravel(), d.ravel())
suff_decr = (1 - sigma) * gradd + 0.5 * dQd < 0
if inner_iter == 0:
decr_alpha = not suff_decr
if decr_alpha:
if suff_decr:
H = Hn
break
else:
alpha *= beta
elif not suff_decr or (Hp == Hn).all():
H = Hp
break
else:
alpha /= beta
Hp = Hn
if n_iter == max_iter:
warnings.warn("Iteration limit reached in nls subproblem.")
return H, grad, n_iter
class ProjectedGradientNMF(BaseEstimator, TransformerMixin):
"""Non-Negative matrix factorization by Projected Gradient (NMF)
Read more in the :ref:`User Guide <NMF>`.
Parameters
----------
n_components : int or None
Number of components, if n_components is not set all components
are kept
init : 'nndsvd' | 'nndsvda' | 'nndsvdar' | 'random'
Method used to initialize the procedure.
Default: 'nndsvd' if n_components < n_features, otherwise random.
Valid options::
'nndsvd': Nonnegative Double Singular Value Decomposition (NNDSVD)
initialization (better for sparseness)
'nndsvda': NNDSVD with zeros filled with the average of X
(better when sparsity is not desired)
'nndsvdar': NNDSVD with zeros filled with small random values
(generally faster, less accurate alternative to NNDSVDa
for when sparsity is not desired)
'random': non-negative random matrices
sparseness : 'data' | 'components' | None, default: None
Where to enforce sparsity in the model.
beta : double, default: 1
Degree of sparseness, if sparseness is not None. Larger values mean
more sparseness.
eta : double, default: 0.1
Degree of correctness to maintain, if sparsity is not None. Smaller
values mean larger error.
tol : double, default: 1e-4
Tolerance value used in stopping conditions.
max_iter : int, default: 200
Number of iterations to compute.
nls_max_iter : int, default: 2000
Number of iterations in NLS subproblem.
random_state : int or RandomState
Random number generator seed control.
Attributes
----------
components_ : array, [n_components, n_features]
Non-negative components of the data.
reconstruction_err_ : number
Frobenius norm of the matrix difference between
the training data and the reconstructed data from
the fit produced by the model. ``|| X - WH ||_2``
n_iter_ : int
Number of iterations run.
Examples
--------
>>> import numpy as np
>>> X = np.array([[1,1], [2, 1], [3, 1.2], [4, 1], [5, 0.8], [6, 1]])
>>> from sklearn.decomposition import ProjectedGradientNMF
>>> model = ProjectedGradientNMF(n_components=2, init='random',
... random_state=0)
>>> model.fit(X) #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
ProjectedGradientNMF(beta=1, eta=0.1, init='random', max_iter=200,
n_components=2, nls_max_iter=2000, random_state=0, sparseness=None,
tol=0.0001)
>>> model.components_
array([[ 0.77032744, 0.11118662],
[ 0.38526873, 0.38228063]])
>>> model.reconstruction_err_ #doctest: +ELLIPSIS
0.00746...
>>> model = ProjectedGradientNMF(n_components=2,
... sparseness='components', init='random', random_state=0)
>>> model.fit(X) #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
ProjectedGradientNMF(beta=1, eta=0.1, init='random', max_iter=200,
n_components=2, nls_max_iter=2000, random_state=0,
sparseness='components', tol=0.0001)
>>> model.components_
array([[ 1.67481991, 0.29614922],
[ 0. , 0.4681982 ]])
>>> model.reconstruction_err_ #doctest: +ELLIPSIS
0.513...
References
----------
This implements
C.-J. Lin. Projected gradient methods
for non-negative matrix factorization. Neural
Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
P. Hoyer. Non-negative Matrix Factorization with
Sparseness Constraints. Journal of Machine Learning
Research 2004.
NNDSVD is introduced in
C. Boutsidis, E. Gallopoulos: SVD based
initialization: A head start for nonnegative
matrix factorization - Pattern Recognition, 2008
http://tinyurl.com/nndsvd
"""
def __init__(self, n_components=None, init=None, sparseness=None, beta=1,
eta=0.1, tol=1e-4, max_iter=200, nls_max_iter=2000,
random_state=None):
self.n_components = n_components
self.init = init
self.tol = tol
if sparseness not in (None, 'data', 'components'):
raise ValueError(
'Invalid sparseness parameter: got %r instead of one of %r' %
(sparseness, (None, 'data', 'components')))
self.sparseness = sparseness
self.beta = beta
self.eta = eta
self.max_iter = max_iter
self.nls_max_iter = nls_max_iter
self.random_state = random_state
def _init(self, X):
n_samples, n_features = X.shape
init = self.init
if init is None:
if self.n_components_ < n_features:
init = 'nndsvd'
else:
init = 'random'
rng = check_random_state(self.random_state)
if init == 'nndsvd':
W, H = _initialize_nmf(X, self.n_components_, random_state=rng)
elif init == 'nndsvda':
W, H = _initialize_nmf(X, self.n_components_, variant='a',
random_state=rng)
elif init == 'nndsvdar':
W, H = _initialize_nmf(X, self.n_components_, variant='ar',
random_state=rng)
elif init == "random":
W = rng.randn(n_samples, self.n_components_)
# we do not write np.abs(W, out=W) to stay compatible with
# numpy 1.5 and earlier where the 'out' keyword is not
# supported as a kwarg on ufuncs
np.abs(W, W)
H = rng.randn(self.n_components_, n_features)
np.abs(H, H)
else:
raise ValueError(
'Invalid init parameter: got %r instead of one of %r' %
(init, (None, 'nndsvd', 'nndsvda', 'nndsvdar', 'random')))
return W, H
def _update_W(self, X, H, W, tolW):
n_samples, n_features = X.shape
if self.sparseness is None:
W, gradW, iterW = _nls_subproblem(X.T, H.T, W.T, tolW,
self.nls_max_iter)
elif self.sparseness == 'data':
W, gradW, iterW = _nls_subproblem(
safe_vstack([X.T, np.zeros((1, n_samples))]),
safe_vstack([H.T, np.sqrt(self.beta) * np.ones((1,
self.n_components_))]),
W.T, tolW, self.nls_max_iter)
elif self.sparseness == 'components':
W, gradW, iterW = _nls_subproblem(
safe_vstack([X.T,
np.zeros((self.n_components_, n_samples))]),
safe_vstack([H.T,
np.sqrt(self.eta) * np.eye(self.n_components_)]),
W.T, tolW, self.nls_max_iter)
return W.T, gradW.T, iterW
def _update_H(self, X, H, W, tolH):
n_samples, n_features = X.shape
if self.sparseness is None:
H, gradH, iterH = _nls_subproblem(X, W, H, tolH,
self.nls_max_iter)
elif self.sparseness == 'data':
H, gradH, iterH = _nls_subproblem(
safe_vstack([X, np.zeros((self.n_components_, n_features))]),
safe_vstack([W,
np.sqrt(self.eta) * np.eye(self.n_components_)]),
H, tolH, self.nls_max_iter)
elif self.sparseness == 'components':
H, gradH, iterH = _nls_subproblem(
safe_vstack([X, np.zeros((1, n_features))]),
safe_vstack([W,
np.sqrt(self.beta)
* np.ones((1, self.n_components_))]),
H, tolH, self.nls_max_iter)
return H, gradH, iterH
def fit_transform(self, X, y=None):
"""Learn a NMF model for the data X and returns the transformed data.
This is more efficient than calling fit followed by transform.
Parameters
----------
X: {array-like, sparse matrix}, shape = [n_samples, n_features]
Data matrix to be decomposed
Returns
-------
data: array, [n_samples, n_components]
Transformed data
"""
X = check_array(X, accept_sparse='csr')
check_non_negative(X, "NMF.fit")
n_samples, n_features = X.shape
if not self.n_components:
self.n_components_ = n_features
else:
self.n_components_ = self.n_components
W, H = self._init(X)
gradW = (np.dot(W, np.dot(H, H.T))
- safe_sparse_dot(X, H.T, dense_output=True))
gradH = (np.dot(np.dot(W.T, W), H)
- safe_sparse_dot(W.T, X, dense_output=True))
init_grad = norm(np.r_[gradW, gradH.T])
tolW = max(0.001, self.tol) * init_grad # why max?
tolH = tolW
tol = self.tol * init_grad
for n_iter in range(1, self.max_iter + 1):
# stopping condition
# as discussed in paper
proj_norm = norm(np.r_[gradW[np.logical_or(gradW < 0, W > 0)],
gradH[np.logical_or(gradH < 0, H > 0)]])
if proj_norm < tol:
break
# update W
W, gradW, iterW = self._update_W(X, H, W, tolW)
if iterW == 1:
tolW = 0.1 * tolW
# update H
H, gradH, iterH = self._update_H(X, H, W, tolH)
if iterH == 1:
tolH = 0.1 * tolH
if not sp.issparse(X):
error = norm(X - np.dot(W, H))
else:
sqnorm_X = np.dot(X.data, X.data)
norm_WHT = trace_dot(np.dot(np.dot(W.T, W), H), H)
cross_prod = trace_dot((X * H.T), W)
error = sqrt(sqnorm_X + norm_WHT - 2. * cross_prod)
self.reconstruction_err_ = error
self.comp_sparseness_ = _sparseness(H.ravel())
self.data_sparseness_ = _sparseness(W.ravel())
H[H == 0] = 0 # fix up negative zeros
self.components_ = H
if n_iter == self.max_iter:
warnings.warn("Iteration limit reached during fit. Solving for W exactly.")
return self.transform(X)
self.n_iter_ = n_iter
return W
def fit(self, X, y=None, **params):
"""Learn a NMF model for the data X.
Parameters
----------
X: {array-like, sparse matrix}, shape = [n_samples, n_features]
Data matrix to be decomposed
Returns
-------
self
"""
self.fit_transform(X, **params)
return self
def transform(self, X):
"""Transform the data X according to the fitted NMF model
Parameters
----------
X: {array-like, sparse matrix}, shape = [n_samples, n_features]
Data matrix to be transformed by the model
Returns
-------
data: array, [n_samples, n_components]
Transformed data
"""
check_is_fitted(self, 'n_components_')
X = check_array(X, accept_sparse='csc')
Wt = np.zeros((self.n_components_, X.shape[0]))
check_non_negative(X, "ProjectedGradientNMF.transform")
if sp.issparse(X):
Wt, _, _ = _nls_subproblem(X.T, self.components_.T, Wt,
tol=self.tol,
max_iter=self.nls_max_iter)
else:
for j in range(0, X.shape[0]):
Wt[:, j], _ = nnls(self.components_.T, X[j, :])
return Wt.T
class NMF(ProjectedGradientNMF):
__doc__ = ProjectedGradientNMF.__doc__
pass
|
bsd-3-clause
|
fergalbyrne/nupic
|
external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_wxagg.py
|
70
|
9051
|
from __future__ import division
"""
backend_wxagg.py
A wxPython backend for Agg. This uses the GUI widgets written by
Jeremy O'Donoghue ([email protected]) and the Agg backend by John
Hunter ([email protected])
Copyright (C) 2003-5 Jeremy O'Donoghue, John Hunter, Illinois Institute of
Technology
License: This work is licensed under the matplotlib license( PSF
compatible). A copy should be included with this source code.
"""
import wx
import matplotlib
from matplotlib.figure import Figure
from backend_agg import FigureCanvasAgg
import backend_wx
from backend_wx import FigureManager, FigureManagerWx, FigureCanvasWx, \
FigureFrameWx, DEBUG_MSG, NavigationToolbar2Wx, error_msg_wx, \
draw_if_interactive, show, Toolbar, backend_version
class FigureFrameWxAgg(FigureFrameWx):
def get_canvas(self, fig):
return FigureCanvasWxAgg(self, -1, fig)
def _get_toolbar(self, statbar):
if matplotlib.rcParams['toolbar']=='classic':
toolbar = NavigationToolbarWx(self.canvas, True)
elif matplotlib.rcParams['toolbar']=='toolbar2':
toolbar = NavigationToolbar2WxAgg(self.canvas)
toolbar.set_status_bar(statbar)
else:
toolbar = None
return toolbar
class FigureCanvasWxAgg(FigureCanvasAgg, FigureCanvasWx):
"""
The FigureCanvas contains the figure and does event handling.
In the wxPython backend, it is derived from wxPanel, and (usually)
lives inside a frame instantiated by a FigureManagerWx. The parent
window probably implements a wxSizer to control the displayed
control size - but we give a hint as to our preferred minimum
size.
"""
def draw(self, drawDC=None):
"""
Render the figure using agg.
"""
DEBUG_MSG("draw()", 1, self)
FigureCanvasAgg.draw(self)
self.bitmap = _convert_agg_to_wx_bitmap(self.get_renderer(), None)
self._isDrawn = True
self.gui_repaint(drawDC=drawDC)
def blit(self, bbox=None):
"""
Transfer the region of the agg buffer defined by bbox to the display.
If bbox is None, the entire buffer is transferred.
"""
if bbox is None:
self.bitmap = _convert_agg_to_wx_bitmap(self.get_renderer(), None)
self.gui_repaint()
return
l, b, w, h = bbox.bounds
r = l + w
t = b + h
x = int(l)
y = int(self.bitmap.GetHeight() - t)
srcBmp = _convert_agg_to_wx_bitmap(self.get_renderer(), None)
srcDC = wx.MemoryDC()
srcDC.SelectObject(srcBmp)
destDC = wx.MemoryDC()
destDC.SelectObject(self.bitmap)
destDC.BeginDrawing()
destDC.Blit(x, y, int(w), int(h), srcDC, x, y)
destDC.EndDrawing()
destDC.SelectObject(wx.NullBitmap)
srcDC.SelectObject(wx.NullBitmap)
self.gui_repaint()
filetypes = FigureCanvasAgg.filetypes
def print_figure(self, filename, *args, **kwargs):
# Use pure Agg renderer to draw
FigureCanvasAgg.print_figure(self, filename, *args, **kwargs)
# Restore the current view; this is needed because the
# artist contains methods rely on particular attributes
# of the rendered figure for determining things like
# bounding boxes.
if self._isDrawn:
self.draw()
class NavigationToolbar2WxAgg(NavigationToolbar2Wx):
def get_canvas(self, frame, fig):
return FigureCanvasWxAgg(frame, -1, fig)
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
# in order to expose the Figure constructor to the pylab
# interface we need to create the figure here
DEBUG_MSG("new_figure_manager()", 3, None)
backend_wx._create_wx_app()
FigureClass = kwargs.pop('FigureClass', Figure)
fig = FigureClass(*args, **kwargs)
frame = FigureFrameWxAgg(num, fig)
figmgr = frame.get_figure_manager()
if matplotlib.is_interactive():
figmgr.frame.Show()
return figmgr
#
# agg/wxPython image conversion functions (wxPython <= 2.6)
#
def _py_convert_agg_to_wx_image(agg, bbox):
"""
Convert the region of the agg buffer bounded by bbox to a wx.Image. If
bbox is None, the entire buffer is converted.
Note: agg must be a backend_agg.RendererAgg instance.
"""
image = wx.EmptyImage(int(agg.width), int(agg.height))
image.SetData(agg.tostring_rgb())
if bbox is None:
# agg => rgb -> image
return image
else:
# agg => rgb -> image => bitmap => clipped bitmap => image
return wx.ImageFromBitmap(_clipped_image_as_bitmap(image, bbox))
def _py_convert_agg_to_wx_bitmap(agg, bbox):
"""
Convert the region of the agg buffer bounded by bbox to a wx.Bitmap. If
bbox is None, the entire buffer is converted.
Note: agg must be a backend_agg.RendererAgg instance.
"""
if bbox is None:
# agg => rgb -> image => bitmap
return wx.BitmapFromImage(_py_convert_agg_to_wx_image(agg, None))
else:
# agg => rgb -> image => bitmap => clipped bitmap
return _clipped_image_as_bitmap(
_py_convert_agg_to_wx_image(agg, None),
bbox)
def _clipped_image_as_bitmap(image, bbox):
"""
Convert the region of a wx.Image bounded by bbox to a wx.Bitmap.
"""
l, b, width, height = bbox.get_bounds()
r = l + width
t = b + height
srcBmp = wx.BitmapFromImage(image)
srcDC = wx.MemoryDC()
srcDC.SelectObject(srcBmp)
destBmp = wx.EmptyBitmap(int(width), int(height))
destDC = wx.MemoryDC()
destDC.SelectObject(destBmp)
destDC.BeginDrawing()
x = int(l)
y = int(image.GetHeight() - t)
destDC.Blit(0, 0, int(width), int(height), srcDC, x, y)
destDC.EndDrawing()
srcDC.SelectObject(wx.NullBitmap)
destDC.SelectObject(wx.NullBitmap)
return destBmp
#
# agg/wxPython image conversion functions (wxPython >= 2.8)
#
def _py_WX28_convert_agg_to_wx_image(agg, bbox):
"""
Convert the region of the agg buffer bounded by bbox to a wx.Image. If
bbox is None, the entire buffer is converted.
Note: agg must be a backend_agg.RendererAgg instance.
"""
if bbox is None:
# agg => rgb -> image
image = wx.EmptyImage(int(agg.width), int(agg.height))
image.SetData(agg.tostring_rgb())
return image
else:
# agg => rgba buffer -> bitmap => clipped bitmap => image
return wx.ImageFromBitmap(_WX28_clipped_agg_as_bitmap(agg, bbox))
def _py_WX28_convert_agg_to_wx_bitmap(agg, bbox):
"""
Convert the region of the agg buffer bounded by bbox to a wx.Bitmap. If
bbox is None, the entire buffer is converted.
Note: agg must be a backend_agg.RendererAgg instance.
"""
if bbox is None:
# agg => rgba buffer -> bitmap
return wx.BitmapFromBufferRGBA(int(agg.width), int(agg.height),
agg.buffer_rgba(0, 0))
else:
# agg => rgba buffer -> bitmap => clipped bitmap
return _WX28_clipped_agg_as_bitmap(agg, bbox)
def _WX28_clipped_agg_as_bitmap(agg, bbox):
"""
Convert the region of a the agg buffer bounded by bbox to a wx.Bitmap.
Note: agg must be a backend_agg.RendererAgg instance.
"""
l, b, width, height = bbox.get_bounds()
r = l + width
t = b + height
srcBmp = wx.BitmapFromBufferRGBA(int(agg.width), int(agg.height),
agg.buffer_rgba(0, 0))
srcDC = wx.MemoryDC()
srcDC.SelectObject(srcBmp)
destBmp = wx.EmptyBitmap(int(width), int(height))
destDC = wx.MemoryDC()
destDC.SelectObject(destBmp)
destDC.BeginDrawing()
x = int(l)
y = int(int(agg.height) - t)
destDC.Blit(0, 0, int(width), int(height), srcDC, x, y)
destDC.EndDrawing()
srcDC.SelectObject(wx.NullBitmap)
destDC.SelectObject(wx.NullBitmap)
return destBmp
def _use_accelerator(state):
"""
Enable or disable the WXAgg accelerator, if it is present and is also
compatible with whatever version of wxPython is in use.
"""
global _convert_agg_to_wx_image
global _convert_agg_to_wx_bitmap
if getattr(wx, '__version__', '0.0')[0:3] < '2.8':
# wxPython < 2.8, so use the C++ accelerator or the Python routines
if state and _wxagg is not None:
_convert_agg_to_wx_image = _wxagg.convert_agg_to_wx_image
_convert_agg_to_wx_bitmap = _wxagg.convert_agg_to_wx_bitmap
else:
_convert_agg_to_wx_image = _py_convert_agg_to_wx_image
_convert_agg_to_wx_bitmap = _py_convert_agg_to_wx_bitmap
else:
# wxPython >= 2.8, so use the accelerated Python routines
_convert_agg_to_wx_image = _py_WX28_convert_agg_to_wx_image
_convert_agg_to_wx_bitmap = _py_WX28_convert_agg_to_wx_bitmap
# try to load the WXAgg accelerator
try:
import _wxagg
except ImportError:
_wxagg = None
# if it's present, use it
_use_accelerator(True)
|
agpl-3.0
|
kaichogami/scikit-learn
|
sklearn/ensemble/tests/test_gradient_boosting.py
|
43
|
39945
|
"""
Testing for the gradient boosting module (sklearn.ensemble.gradient_boosting).
"""
import warnings
import numpy as np
from itertools import product
from scipy.sparse import csr_matrix
from scipy.sparse import csc_matrix
from scipy.sparse import coo_matrix
from sklearn import datasets
from sklearn.base import clone
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble.gradient_boosting import ZeroEstimator
from sklearn.metrics import mean_squared_error
from sklearn.utils import check_random_state, tosequence
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import skip_if_32bit
from sklearn.exceptions import DataConversionWarning
from sklearn.exceptions import NotFittedError
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
rng = np.random.RandomState(0)
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def check_classification_toy(presort, loss):
# Check classification on a toy dataset.
clf = GradientBoostingClassifier(loss=loss, n_estimators=10,
random_state=1, presort=presort)
assert_raises(ValueError, clf.predict, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf.estimators_))
deviance_decrease = (clf.train_score_[:-1] - clf.train_score_[1:])
assert_true(np.any(deviance_decrease >= 0.0))
leaves = clf.apply(X)
assert_equal(leaves.shape, (6, 10, 1))
def test_classification_toy():
for presort, loss in product(('auto', True, False),
('deviance', 'exponential')):
yield check_classification_toy, presort, loss
def test_parameter_checks():
# Check input parameter validation.
assert_raises(ValueError,
GradientBoostingClassifier(n_estimators=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(n_estimators=-1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(learning_rate=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(learning_rate=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='foobar').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=1.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_leaf=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_leaf=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_weight_fraction_leaf=-1.).fit,
X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_weight_fraction_leaf=0.6).fit,
X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=1.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=-0.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(max_depth=-0.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(max_depth=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(init={}).fit, X, y)
# test fit before feature importance
assert_raises(ValueError,
lambda: GradientBoostingClassifier().feature_importances_)
# deviance requires ``n_classes >= 2``.
assert_raises(ValueError,
lambda X, y: GradientBoostingClassifier(
loss='deviance').fit(X, y),
X, [0, 0, 0, 0])
def test_loss_function():
assert_raises(ValueError,
GradientBoostingClassifier(loss='ls').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='lad').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='quantile').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='huber').fit, X, y)
assert_raises(ValueError,
GradientBoostingRegressor(loss='deviance').fit, X, y)
assert_raises(ValueError,
GradientBoostingRegressor(loss='exponential').fit, X, y)
def check_classification_synthetic(presort, loss):
# Test GradientBoostingClassifier on synthetic dataset used by
# Hastie et al. in ESLII Example 12.7.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
gbrt = GradientBoostingClassifier(n_estimators=100, min_samples_split=2,
max_depth=1, loss=loss,
learning_rate=1.0, random_state=0)
gbrt.fit(X_train, y_train)
error_rate = (1.0 - gbrt.score(X_test, y_test))
assert_less(error_rate, 0.09)
gbrt = GradientBoostingClassifier(n_estimators=200, min_samples_split=2,
max_depth=1, loss=loss,
learning_rate=1.0, subsample=0.5,
random_state=0,
presort=presort)
gbrt.fit(X_train, y_train)
error_rate = (1.0 - gbrt.score(X_test, y_test))
assert_less(error_rate, 0.08)
def test_classification_synthetic():
for presort, loss in product(('auto', True, False), ('deviance', 'exponential')):
yield check_classification_synthetic, presort, loss
def check_boston(presort, loss, subsample):
# Check consistency on dataset boston house prices with least squares
# and least absolute deviation.
ones = np.ones(len(boston.target))
last_y_pred = None
for sample_weight in None, ones, 2 * ones:
clf = GradientBoostingRegressor(n_estimators=100,
loss=loss,
max_depth=4,
subsample=subsample,
min_samples_split=2,
random_state=1,
presort=presort)
assert_raises(ValueError, clf.predict, boston.data)
clf.fit(boston.data, boston.target,
sample_weight=sample_weight)
leaves = clf.apply(boston.data)
assert_equal(leaves.shape, (506, 100))
y_pred = clf.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_less(mse, 6.0)
if last_y_pred is not None:
assert_array_almost_equal(last_y_pred, y_pred)
last_y_pred = y_pred
def test_boston():
for presort, loss, subsample in product(('auto', True, False),
('ls', 'lad', 'huber'),
(1.0, 0.5)):
yield check_boston, presort, loss, subsample
def check_iris(presort, subsample, sample_weight):
# Check consistency on dataset iris.
clf = GradientBoostingClassifier(n_estimators=100,
loss='deviance',
random_state=1,
subsample=subsample,
presort=presort)
clf.fit(iris.data, iris.target, sample_weight=sample_weight)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.9)
leaves = clf.apply(iris.data)
assert_equal(leaves.shape, (150, 100, 3))
def test_iris():
ones = np.ones(len(iris.target))
for presort, subsample, sample_weight in product(('auto', True, False),
(1.0, 0.5),
(None, ones)):
yield check_iris, presort, subsample, sample_weight
def test_regression_synthetic():
# Test on synthetic regression datasets used in Leo Breiman,
# `Bagging Predictors?. Machine Learning 24(2): 123-140 (1996).
random_state = check_random_state(1)
regression_params = {'n_estimators': 100, 'max_depth': 4,
'min_samples_split': 2, 'learning_rate': 0.1,
'loss': 'ls'}
# Friedman1
X, y = datasets.make_friedman1(n_samples=1200,
random_state=random_state,
noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
for presort in True, False:
clf = GradientBoostingRegressor(presort=presort)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert_less(mse, 5.0)
# Friedman2
X, y = datasets.make_friedman2(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
for presort in True, False:
regression_params['presort'] = presort
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert_less(mse, 1700.0)
# Friedman3
X, y = datasets.make_friedman3(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
for presort in True, False:
regression_params['presort'] = presort
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert_less(mse, 0.015)
def test_feature_importances():
X = np.array(boston.data, dtype=np.float32)
y = np.array(boston.target, dtype=np.float32)
for presort in True, False:
clf = GradientBoostingRegressor(n_estimators=100, max_depth=5,
min_samples_split=2, random_state=1,
presort=presort)
clf.fit(X, y)
assert_true(hasattr(clf, 'feature_importances_'))
# XXX: Remove this test in 0.19 after transform support to estimators
# is removed.
X_new = assert_warns(
DeprecationWarning, clf.transform, X, threshold="mean")
assert_less(X_new.shape[1], X.shape[1])
feature_mask = (
clf.feature_importances_ > clf.feature_importances_.mean())
assert_array_almost_equal(X_new, X[:, feature_mask])
def test_probability_log():
# Predict probabilities.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.predict_proba, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# check if probabilities are in [0, 1].
y_proba = clf.predict_proba(T)
assert_true(np.all(y_proba >= 0.0))
assert_true(np.all(y_proba <= 1.0))
# derive predictions from probabilities
y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0)
assert_array_equal(y_pred, true_result)
def test_check_inputs():
# Test input checks (shape and type of X and y).
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.fit, X, y + [0, 1])
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.fit, X, y,
sample_weight=([1] * len(y)) + [0, 1])
def test_check_inputs_predict():
# X has wrong shape
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y)
x = np.array([1.0, 2.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
x = np.array([[]])
assert_raises(ValueError, clf.predict, x)
x = np.array([1.0, 2.0, 3.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1)
clf.fit(X, rng.rand(len(X)))
x = np.array([1.0, 2.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
x = np.array([[]])
assert_raises(ValueError, clf.predict, x)
x = np.array([1.0, 2.0, 3.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
def test_check_max_features():
# test if max_features is valid.
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=0)
assert_raises(ValueError, clf.fit, X, y)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=(len(X[0]) + 1))
assert_raises(ValueError, clf.fit, X, y)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=-0.1)
assert_raises(ValueError, clf.fit, X, y)
def test_max_feature_regression():
# Test to make sure random state is set properly.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
gbrt = GradientBoostingClassifier(n_estimators=100, min_samples_split=5,
max_depth=2, learning_rate=.1,
max_features=2, random_state=1)
gbrt.fit(X_train, y_train)
deviance = gbrt.loss_(y_test, gbrt.decision_function(X_test))
assert_true(deviance < 0.5, "GB failed with deviance %.4f" % deviance)
def test_max_feature_auto():
# Test if max features is set properly for floats and str.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
_, n_features = X.shape
X_train = X[:2000]
y_train = y[:2000]
gbrt = GradientBoostingClassifier(n_estimators=1, max_features='auto')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.sqrt(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='auto')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, n_features)
gbrt = GradientBoostingRegressor(n_estimators=1, max_features=0.3)
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(n_features * 0.3))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='sqrt')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.sqrt(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='log2')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.log2(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1,
max_features=0.01 / X.shape[1])
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, 1)
def test_staged_predict():
# Test whether staged decision function eventually gives
# the same prediction.
X, y = datasets.make_friedman1(n_samples=1200,
random_state=1, noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test = X[200:]
clf = GradientBoostingRegressor()
# test raise ValueError if not fitted
assert_raises(ValueError, lambda X: np.fromiter(
clf.staged_predict(X), dtype=np.float64), X_test)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
# test if prediction for last stage equals ``predict``
for y in clf.staged_predict(X_test):
assert_equal(y.shape, y_pred.shape)
assert_array_equal(y_pred, y)
def test_staged_predict_proba():
# Test whether staged predict proba eventually gives
# the same prediction.
X, y = datasets.make_hastie_10_2(n_samples=1200,
random_state=1)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingClassifier(n_estimators=20)
# test raise NotFittedError if not fitted
assert_raises(NotFittedError, lambda X: np.fromiter(
clf.staged_predict_proba(X), dtype=np.float64), X_test)
clf.fit(X_train, y_train)
# test if prediction for last stage equals ``predict``
for y_pred in clf.staged_predict(X_test):
assert_equal(y_test.shape, y_pred.shape)
assert_array_equal(clf.predict(X_test), y_pred)
# test if prediction for last stage equals ``predict_proba``
for staged_proba in clf.staged_predict_proba(X_test):
assert_equal(y_test.shape[0], staged_proba.shape[0])
assert_equal(2, staged_proba.shape[1])
assert_array_equal(clf.predict_proba(X_test), staged_proba)
def test_staged_functions_defensive():
# test that staged_functions make defensive copies
rng = np.random.RandomState(0)
X = rng.uniform(size=(10, 3))
y = (4 * X[:, 0]).astype(np.int) + 1 # don't predict zeros
for estimator in [GradientBoostingRegressor(),
GradientBoostingClassifier()]:
estimator.fit(X, y)
for func in ['predict', 'decision_function', 'predict_proba']:
staged_func = getattr(estimator, "staged_" + func, None)
if staged_func is None:
# regressor has no staged_predict_proba
continue
with warnings.catch_warnings(record=True):
staged_result = list(staged_func(X))
staged_result[1][:] = 0
assert_true(np.all(staged_result[0] != 0))
def test_serialization():
# Check model serialization.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
try:
import cPickle as pickle
except ImportError:
import pickle
serialized_clf = pickle.dumps(clf, protocol=pickle.HIGHEST_PROTOCOL)
clf = None
clf = pickle.loads(serialized_clf)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_degenerate_targets():
# Check if we can fit even though all targets are equal.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
# classifier should raise exception
assert_raises(ValueError, clf.fit, X, np.ones(len(X)))
clf = GradientBoostingRegressor(n_estimators=100, random_state=1)
clf.fit(X, np.ones(len(X)))
clf.predict([rng.rand(2)])
assert_array_equal(np.ones((1,), dtype=np.float64),
clf.predict([rng.rand(2)]))
def test_quantile_loss():
# Check if quantile loss with alpha=0.5 equals lad.
clf_quantile = GradientBoostingRegressor(n_estimators=100, loss='quantile',
max_depth=4, alpha=0.5,
random_state=7)
clf_quantile.fit(boston.data, boston.target)
y_quantile = clf_quantile.predict(boston.data)
clf_lad = GradientBoostingRegressor(n_estimators=100, loss='lad',
max_depth=4, random_state=7)
clf_lad.fit(boston.data, boston.target)
y_lad = clf_lad.predict(boston.data)
assert_array_almost_equal(y_quantile, y_lad, decimal=4)
def test_symbol_labels():
# Test with non-integer class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
symbol_y = tosequence(map(str, y))
clf.fit(X, symbol_y)
assert_array_equal(clf.predict(T), tosequence(map(str, true_result)))
assert_equal(100, len(clf.estimators_))
def test_float_class_labels():
# Test with float class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
float_y = np.asarray(y, dtype=np.float32)
clf.fit(X, float_y)
assert_array_equal(clf.predict(T),
np.asarray(true_result, dtype=np.float32))
assert_equal(100, len(clf.estimators_))
def test_shape_y():
# Test with float class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
y_ = np.asarray(y, dtype=np.int32)
y_ = y_[:, np.newaxis]
# This will raise a DataConversionWarning that we want to
# "always" raise, elsewhere the warnings gets ignored in the
# later tests, and the tests that check for this warning fail
assert_warns(DataConversionWarning, clf.fit, X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_mem_layout():
# Test with different memory layouts of X and y
X_ = np.asfortranarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
X_ = np.ascontiguousarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
y_ = np.asarray(y, dtype=np.int32)
y_ = np.ascontiguousarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
y_ = np.asarray(y, dtype=np.int32)
y_ = np.asfortranarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_oob_improvement():
# Test if oob improvement has correct shape and regression test.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
subsample=0.5)
clf.fit(X, y)
assert_equal(clf.oob_improvement_.shape[0], 100)
# hard-coded regression test - change if modification in OOB computation
assert_array_almost_equal(clf.oob_improvement_[:5],
np.array([0.19, 0.15, 0.12, -0.12, -0.11]),
decimal=2)
def test_oob_improvement_raise():
# Test if oob improvement has correct shape.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
subsample=1.0)
clf.fit(X, y)
assert_raises(AttributeError, lambda: clf.oob_improvement_)
def test_oob_multilcass_iris():
# Check OOB improvement on multi-class dataset.
clf = GradientBoostingClassifier(n_estimators=100, loss='deviance',
random_state=1, subsample=0.5)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.9)
assert_equal(clf.oob_improvement_.shape[0], clf.n_estimators)
# hard-coded regression test - change if modification in OOB computation
# FIXME: the following snippet does not yield the same results on 32 bits
# assert_array_almost_equal(clf.oob_improvement_[:5],
# np.array([12.68, 10.45, 8.18, 6.43, 5.13]),
# decimal=2)
def test_verbose_output():
# Check verbose=1 does not cause error.
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
sys.stdout = StringIO()
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
verbose=1, subsample=0.8)
clf.fit(X, y)
verbose_output = sys.stdout
sys.stdout = old_stdout
# check output
verbose_output.seek(0)
header = verbose_output.readline().rstrip()
# with OOB
true_header = ' '.join(['%10s'] + ['%16s'] * 3) % (
'Iter', 'Train Loss', 'OOB Improve', 'Remaining Time')
assert_equal(true_header, header)
n_lines = sum(1 for l in verbose_output.readlines())
# one for 1-10 and then 9 for 20-100
assert_equal(10 + 9, n_lines)
def test_more_verbose_output():
# Check verbose=2 does not cause error.
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
sys.stdout = StringIO()
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
verbose=2)
clf.fit(X, y)
verbose_output = sys.stdout
sys.stdout = old_stdout
# check output
verbose_output.seek(0)
header = verbose_output.readline().rstrip()
# no OOB
true_header = ' '.join(['%10s'] + ['%16s'] * 2) % (
'Iter', 'Train Loss', 'Remaining Time')
assert_equal(true_header, header)
n_lines = sum(1 for l in verbose_output.readlines())
# 100 lines for n_estimators==100
assert_equal(100, n_lines)
def test_warm_start():
# Test if warm start equals fit.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=200, max_depth=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=200)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.predict(X), est.predict(X))
def test_warm_start_n_estimators():
# Test if warm start equals fit - set n_estimators.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=300, max_depth=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=300)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.predict(X), est.predict(X))
def test_warm_start_max_depth():
# Test if possible to fit trees of different depth in ensemble.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=110, max_depth=2)
est.fit(X, y)
# last 10 trees have different depth
assert_equal(est.estimators_[0, 0].max_depth, 1)
for i in range(1, 11):
assert_equal(est.estimators_[-i, 0].max_depth, 2)
def test_warm_start_clear():
# Test if fit clears state.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1)
est.fit(X, y)
est_2 = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_2.fit(X, y) # inits state
est_2.set_params(warm_start=False)
est_2.fit(X, y) # clears old state and equals est
assert_array_almost_equal(est_2.predict(X), est.predict(X))
def test_warm_start_zero_n_estimators():
# Test if warm start with zero n_estimators raises error
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=0)
assert_raises(ValueError, est.fit, X, y)
def test_warm_start_smaller_n_estimators():
# Test if warm start with smaller n_estimators raises error
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=99)
assert_raises(ValueError, est.fit, X, y)
def test_warm_start_equal_n_estimators():
# Test if warm start with equal n_estimators does nothing
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1)
est.fit(X, y)
est2 = clone(est)
est2.set_params(n_estimators=est.n_estimators, warm_start=True)
est2.fit(X, y)
assert_array_almost_equal(est2.predict(X), est.predict(X))
def test_warm_start_oob_switch():
# Test if oob can be turned on during warm start.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=110, subsample=0.5)
est.fit(X, y)
assert_array_equal(est.oob_improvement_[:100], np.zeros(100))
# the last 10 are not zeros
assert_array_equal(est.oob_improvement_[-10:] == 0.0,
np.zeros(10, dtype=np.bool))
def test_warm_start_oob():
# Test if warm start OOB equals fit.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=200, max_depth=1, subsample=0.5,
random_state=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, subsample=0.5,
random_state=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=200)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.oob_improvement_[:100],
est.oob_improvement_[:100])
def early_stopping_monitor(i, est, locals):
"""Returns True on the 10th iteration. """
if i == 9:
return True
else:
return False
def test_monitor_early_stopping():
# Test if monitor return value works.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=20, max_depth=1, random_state=1, subsample=0.5)
est.fit(X, y, monitor=early_stopping_monitor)
assert_equal(est.n_estimators, 20) # this is not altered
assert_equal(est.estimators_.shape[0], 10)
assert_equal(est.train_score_.shape[0], 10)
assert_equal(est.oob_improvement_.shape[0], 10)
# try refit
est.set_params(n_estimators=30)
est.fit(X, y)
assert_equal(est.n_estimators, 30)
assert_equal(est.estimators_.shape[0], 30)
assert_equal(est.train_score_.shape[0], 30)
est = Cls(n_estimators=20, max_depth=1, random_state=1, subsample=0.5,
warm_start=True)
est.fit(X, y, monitor=early_stopping_monitor)
assert_equal(est.n_estimators, 20)
assert_equal(est.estimators_.shape[0], 10)
assert_equal(est.train_score_.shape[0], 10)
assert_equal(est.oob_improvement_.shape[0], 10)
# try refit
est.set_params(n_estimators=30, warm_start=False)
est.fit(X, y)
assert_equal(est.n_estimators, 30)
assert_equal(est.train_score_.shape[0], 30)
assert_equal(est.estimators_.shape[0], 30)
assert_equal(est.oob_improvement_.shape[0], 30)
def test_complete_classification():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
est = GradientBoostingClassifier(n_estimators=20, max_depth=None,
random_state=1, max_leaf_nodes=k + 1)
est.fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_equal(tree.max_depth, k)
assert_equal(tree.children_left[tree.children_left == TREE_LEAF].shape[0],
k + 1)
def test_complete_regression():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
k = 4
est = GradientBoostingRegressor(n_estimators=20, max_depth=None,
random_state=1, max_leaf_nodes=k + 1)
est.fit(boston.data, boston.target)
tree = est.estimators_[-1, 0].tree_
assert_equal(tree.children_left[tree.children_left == TREE_LEAF].shape[0],
k + 1)
def test_zero_estimator_reg():
# Test if ZeroEstimator works for regression.
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init=ZeroEstimator())
est.fit(boston.data, boston.target)
y_pred = est.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_almost_equal(mse, 33.0, decimal=0)
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(boston.data, boston.target)
y_pred = est.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_almost_equal(mse, 33.0, decimal=0)
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init='foobar')
assert_raises(ValueError, est.fit, boston.data, boston.target)
def test_zero_estimator_clf():
# Test if ZeroEstimator works for classification.
X = iris.data
y = np.array(iris.target)
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init=ZeroEstimator())
est.fit(X, y)
assert_greater(est.score(X, y), 0.96)
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(X, y)
assert_greater(est.score(X, y), 0.96)
# binary clf
mask = y != 0
y[mask] = 1
y[~mask] = 0
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(X, y)
assert_greater(est.score(X, y), 0.96)
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='foobar')
assert_raises(ValueError, est.fit, X, y)
def test_max_leaf_nodes_max_depth():
# Test precedence of max_leaf_nodes over max_depth.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
all_estimators = [GradientBoostingRegressor,
GradientBoostingClassifier]
k = 4
for GBEstimator in all_estimators:
est = GBEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_greater(tree.max_depth, 1)
est = GBEstimator(max_depth=1).fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_equal(tree.max_depth, 1)
def test_warm_start_wo_nestimators_change():
# Test if warm_start does nothing if n_estimators is not changed.
# Regression test for #3513.
clf = GradientBoostingClassifier(n_estimators=10, warm_start=True)
clf.fit([[0, 1], [2, 3]], [0, 1])
assert_equal(clf.estimators_.shape[0], 10)
clf.fit([[0, 1], [2, 3]], [0, 1])
assert_equal(clf.estimators_.shape[0], 10)
def test_probability_exponential():
# Predict probabilities.
clf = GradientBoostingClassifier(loss='exponential',
n_estimators=100, random_state=1)
assert_raises(ValueError, clf.predict_proba, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# check if probabilities are in [0, 1].
y_proba = clf.predict_proba(T)
assert_true(np.all(y_proba >= 0.0))
assert_true(np.all(y_proba <= 1.0))
score = clf.decision_function(T).ravel()
assert_array_almost_equal(y_proba[:, 1],
1.0 / (1.0 + np.exp(-2 * score)))
# derive predictions from probabilities
y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0)
assert_array_equal(y_pred, true_result)
def test_non_uniform_weights_toy_edge_case_reg():
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1]]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
for loss in ('huber', 'ls', 'lad', 'quantile'):
gb = GradientBoostingRegressor(learning_rate=1.0, n_estimators=2,
loss=loss)
gb.fit(X, y, sample_weight=sample_weight)
assert_greater(gb.predict([[1, 0]])[0], 0.5)
def test_non_uniform_weights_toy_edge_case_clf():
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1]]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
for loss in ('deviance', 'exponential'):
gb = GradientBoostingClassifier(n_estimators=5)
gb.fit(X, y, sample_weight=sample_weight)
assert_array_equal(gb.predict([[1, 0]]), [1])
def check_sparse_input(EstimatorClass, X, X_sparse, y):
dense = EstimatorClass(n_estimators=10, random_state=0,
max_depth=2).fit(X, y)
sparse = EstimatorClass(n_estimators=10, random_state=0, max_depth=2,
presort=False).fit(X_sparse, y)
auto = EstimatorClass(n_estimators=10, random_state=0, max_depth=2,
presort='auto').fit(X_sparse, y)
assert_array_almost_equal(sparse.apply(X), dense.apply(X))
assert_array_almost_equal(sparse.predict(X), dense.predict(X))
assert_array_almost_equal(sparse.feature_importances_,
dense.feature_importances_)
assert_array_almost_equal(sparse.apply(X), auto.apply(X))
assert_array_almost_equal(sparse.predict(X), auto.predict(X))
assert_array_almost_equal(sparse.feature_importances_,
auto.feature_importances_)
if isinstance(EstimatorClass, GradientBoostingClassifier):
assert_array_almost_equal(sparse.predict_proba(X),
dense.predict_proba(X))
assert_array_almost_equal(sparse.predict_log_proba(X),
dense.predict_log_proba(X))
assert_array_almost_equal(sparse.predict_proba(X),
auto.predict_proba(X))
assert_array_almost_equal(sparse.predict_log_proba(X),
auto.predict_log_proba(X))
@skip_if_32bit
def test_sparse_input():
ests = (GradientBoostingClassifier, GradientBoostingRegressor)
sparse_matrices = (csr_matrix, csc_matrix, coo_matrix)
y, X = datasets.make_multilabel_classification(random_state=0,
n_samples=50,
n_features=1,
n_classes=20)
y = y[:, 0]
for EstimatorClass, sparse_matrix in product(ests, sparse_matrices):
yield check_sparse_input, EstimatorClass, X, sparse_matrix(X), y
|
bsd-3-clause
|
cuemacro/chartpy
|
chartpy_examples/xkcd_example.py
|
1
|
2081
|
__author__ = 'saeedamen' # Saeed Amen
#
# Copyright 2016 Cuemacro
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and limitations under the License.
#
# support Quandl 3.x.x
try:
import quandl as Quandl
except:
# if import fails use Quandl 2.x.x
import Quandl
from chartpy import Chart, Style
# get your own free bQuandl API key from https://www.quandl.com/
try:
from chartpy.chartcred import ChartCred
cred = ChartCred()
quandl_api_key = cred.quandl_api_key
except:
quandl_api_key = "x"
# choose run_example = 0 for everything
# run_example = 1 - xkcd example
# run_example = 2 - fun xkcd example
run_example = 0
if run_example == 1 or run_example == 0:
df = Quandl.get(["FRED/A191RL1Q225SBEA"], authtoken=quandl_api_key)
df.columns = ["Real QoQ"]
# set the style of the plot
style = Style(title="US GDP", source="Quandl/Fred", xkcd=True)
# Chart object is initialised with the dataframe and our chart style
chart = Chart(df=df, chart_type='line', style=style, engine='matplotlib')
chart.plot()
if run_example == 2 or run_example == 0:
import pandas, numpy
dt = pandas.date_range(start="1 Jan 1950", end="1 Apr 2017", freq='M')
data = numpy.arange(len(dt))
df = pandas.DataFrame(index=dt, data=data, columns=['Importance'])
# set the style of the plot
style = Style(title="Importance of puns", source="@saeedamenfx", xkcd=True, x_title="Puns", y_title="Importance")
# Chart object is initialised with the dataframe and our chart style
chart = Chart(df=df, chart_type='line', style=style, engine='matplotlib')
chart.plot()
|
apache-2.0
|
tarasane/h2o-3
|
py2/h2o_gbm.py
|
30
|
16328
|
import re, random, math
import h2o_args
import h2o_nodes
import h2o_cmd
from h2o_test import verboseprint, dump_json, check_sandbox_for_errors
def plotLists(xList, xLabel=None, eListTitle=None, eList=None, eLabel=None, fListTitle=None, fList=None, fLabel=None, server=False):
if h2o_args.python_username!='kevin':
return
# Force matplotlib to not use any Xwindows backend.
if server:
import matplotlib
matplotlib.use('Agg')
import pylab as plt
print "xList", xList
print "eList", eList
print "fList", fList
font = {'family' : 'normal',
'weight' : 'normal',
'size' : 26}
### plt.rc('font', **font)
plt.rcdefaults()
if eList:
if eListTitle:
plt.title(eListTitle)
plt.figure()
plt.plot (xList, eList)
plt.xlabel(xLabel)
plt.ylabel(eLabel)
plt.draw()
plt.savefig('eplot.jpg',format='jpg')
# Image.open('testplot.jpg').save('eplot.jpg','JPEG')
if fList:
if fListTitle:
plt.title(fListTitle)
plt.figure()
plt.plot (xList, fList)
plt.xlabel(xLabel)
plt.ylabel(fLabel)
plt.draw()
plt.savefig('fplot.jpg',format='jpg')
# Image.open('fplot.jpg').save('fplot.jpg','JPEG')
if eList or fList:
plt.show()
# pretty print a cm that the C
def pp_cm(jcm, header=None):
# header = jcm['header']
# hack col index header for now..where do we get it?
header = ['"%s"'%i for i in range(len(jcm[0]))]
# cm = ' '.join(header)
cm = '{0:<8}'.format('')
for h in header:
cm = '{0}|{1:<8}'.format(cm, h)
cm = '{0}|{1:<8}'.format(cm, 'error')
c = 0
for line in jcm:
lineSum = sum(line)
if c < 0 or c >= len(line):
raise Exception("Error in h2o_gbm.pp_cm. c: %s line: %s len(line): %s jcm: %s" % (c, line, len(line), dump_json(jcm)))
print "c:", c, "line:", line
errorSum = lineSum - line[c]
if (lineSum>0):
err = float(errorSum) / lineSum
else:
err = 0.0
fl = '{0:<8}'.format(header[c])
for num in line: fl = '{0}|{1:<8}'.format(fl, num)
fl = '{0}|{1:<8.2f}'.format(fl, err)
cm = "{0}\n{1}".format(cm, fl)
c += 1
return cm
def pp_cm_summary(cm):
# hack cut and past for now (should be in h2o_gbm.py?
scoresList = cm
totalScores = 0
totalRight = 0
# individual scores can be all 0 if nothing for that output class
# due to sampling
classErrorPctList = []
predictedClassDict = {} # may be missing some? so need a dict?
for classIndex,s in enumerate(scoresList):
classSum = sum(s)
if classSum == 0 :
# why would the number of scores for a class be 0?
# in any case, tolerate. (it shows up in test.py on poker100)
print "classIndex:", classIndex, "classSum", classSum, "<- why 0?"
else:
if classIndex >= len(s):
print "Why is classindex:", classIndex, 'for s:"', s
else:
# H2O should really give me this since it's in the browser, but it doesn't
classRightPct = ((s[classIndex] + 0.0)/classSum) * 100
totalRight += s[classIndex]
classErrorPct = 100 - classRightPct
classErrorPctList.append(classErrorPct)
### print "s:", s, "classIndex:", classIndex
print "class:", classIndex, "classSum", classSum, "classErrorPct:", "%4.2f" % classErrorPct
# gather info for prediction summary
for pIndex,p in enumerate(s):
if pIndex not in predictedClassDict:
predictedClassDict[pIndex] = p
else:
predictedClassDict[pIndex] += p
totalScores += classSum
print "Predicted summary:"
# FIX! Not sure why we weren't working with a list..hack with dict for now
for predictedClass,p in predictedClassDict.items():
print str(predictedClass)+":", p
# this should equal the num rows in the dataset if full scoring? (minus any NAs)
print "totalScores:", totalScores
print "totalRight:", totalRight
if totalScores != 0: pctRight = 100.0 * totalRight/totalScores
else: pctRight = 0.0
print "pctRight:", "%5.2f" % pctRight
pctWrong = 100 - pctRight
print "pctWrong:", "%5.2f" % pctWrong
return pctWrong
# I just copied and changed GBM to GBM. Have to update to match GBM params and responses
def pickRandGbmParams(paramDict, params):
colX = 0
randomGroupSize = random.randint(1,len(paramDict))
for i in range(randomGroupSize):
randomKey = random.choice(paramDict.keys())
randomV = paramDict[randomKey]
randomValue = random.choice(randomV)
params[randomKey] = randomValue
# compare this glm to last one. since the files are concatenations,
# the results should be similar? 10% of first is allowed delta
def compareToFirstGbm(self, key, glm, firstglm):
# if isinstance(firstglm[key], list):
# in case it's not a list allready (err is a list)
verboseprint("compareToFirstGbm key:", key)
verboseprint("compareToFirstGbm glm[key]:", glm[key])
# key could be a list or not. if a list, don't want to create list of that list
# so use extend on an empty list. covers all cases?
if type(glm[key]) is list:
kList = glm[key]
firstkList = firstglm[key]
elif type(glm[key]) is dict:
raise Exception("compareToFirstGLm: Not expecting dict for " + key)
else:
kList = [glm[key]]
firstkList = [firstglm[key]]
for k, firstk in zip(kList, firstkList):
# delta must be a positive number ?
delta = .1 * abs(float(firstk))
msg = "Too large a delta (" + str(delta) + ") comparing current and first for: " + key
self.assertAlmostEqual(float(k), float(firstk), delta=delta, msg=msg)
self.assertGreaterEqual(abs(float(k)), 0.0, str(k) + " abs not >= 0.0 in current")
def goodXFromColumnInfo(y,
num_cols=None, missingValuesDict=None, constantValuesDict=None, enumSizeDict=None,
colTypeDict=None, colNameDict=None, keepPattern=None, key=None,
timeoutSecs=120, forRF=False, noPrint=False):
y = str(y)
# if we pass a key, means we want to get the info ourselves here
if key is not None:
(missingValuesDict, constantValuesDict, enumSizeDict, colTypeDict, colNameDict) = \
h2o_cmd.columnInfoFromInspect(key, exceptionOnMissingValues=False,
max_column_display=99999999, timeoutSecs=timeoutSecs)
num_cols = len(colNameDict)
# now remove any whose names don't match the required keepPattern
if keepPattern is not None:
keepX = re.compile(keepPattern)
else:
keepX = None
x = range(num_cols)
# need to walk over a copy, cause we change x
xOrig = x[:]
ignore_x = [] # for use by RF
for k in xOrig:
name = colNameDict[k]
# remove it if it has the same name as the y output
if str(k)== y: # if they pass the col index as y
if not noPrint:
print "Removing %d because name: %s matches output %s" % (k, str(k), y)
x.remove(k)
# rf doesn't want it in ignore list
# ignore_x.append(k)
elif name == y: # if they pass the name as y
if not noPrint:
print "Removing %d because name: %s matches output %s" % (k, name, y)
x.remove(k)
# rf doesn't want it in ignore list
# ignore_x.append(k)
elif keepX is not None and not keepX.match(name):
if not noPrint:
print "Removing %d because name: %s doesn't match desired keepPattern %s" % (k, name, keepPattern)
x.remove(k)
ignore_x.append(k)
# missing values reports as constant also. so do missing first.
# remove all cols with missing values
# could change it against num_rows for a ratio
elif k in missingValuesDict:
value = missingValuesDict[k]
if not noPrint:
print "Removing %d with name: %s because it has %d missing values" % (k, name, value)
x.remove(k)
ignore_x.append(k)
elif k in constantValuesDict:
value = constantValuesDict[k]
if not noPrint:
print "Removing %d with name: %s because it has constant value: %s " % (k, name, str(value))
x.remove(k)
ignore_x.append(k)
# this is extra pruning..
# remove all cols with enums, if not already removed
elif k in enumSizeDict:
value = enumSizeDict[k]
if not noPrint:
print "Removing %d %s because it has enums of size: %d" % (k, name, value)
x.remove(k)
ignore_x.append(k)
if not noPrint:
print "x has", len(x), "cols"
print "ignore_x has", len(ignore_x), "cols"
x = ",".join(map(str,x))
ignore_x = ",".join(map(str,ignore_x))
if not noPrint:
print "\nx:", x
print "\nignore_x:", ignore_x
if forRF:
return ignore_x
else:
return x
def showGBMGridResults(GBMResult, expectedErrorMax, classification=True):
# print "GBMResult:", dump_json(GBMResult)
jobs = GBMResult['jobs']
print "GBM jobs:", jobs
for jobnum, j in enumerate(jobs):
_distribution = j['_distribution']
model_key = j['destination_key']
job_key = j['job_key']
# inspect = h2o_cmd.runInspect(key=model_key)
# print "jobnum:", jobnum, dump_json(inspect)
gbmTrainView = h2o_cmd.runGBMView(model_key=model_key)
print "jobnum:", jobnum, dump_json(gbmTrainView)
if classification:
cms = gbmTrainView['gbm_model']['cms']
cm = cms[-1]['_arr'] # take the last one
print "GBM cms[-1]['_predErr']:", cms[-1]['_predErr']
print "GBM cms[-1]['_classErr']:", cms[-1]['_classErr']
pctWrongTrain = pp_cm_summary(cm);
if pctWrongTrain > expectedErrorMax:
raise Exception("Should have < %s error here. pctWrongTrain: %s" % (expectedErrorMax, pctWrongTrain))
errsLast = gbmTrainView['gbm_model']['errs'][-1]
print "\nTrain", jobnum, job_key, "\n==========\n", "pctWrongTrain:", pctWrongTrain, "errsLast:", errsLast
print "GBM 'errsLast'", errsLast
print pp_cm(cm)
else:
print "\nTrain", jobnum, job_key, "\n==========\n", "errsLast:", errsLast
print "GBMTrainView errs:", gbmTrainView['gbm_model']['errs']
def simpleCheckGBMView(node=None, gbmv=None, noPrint=False, **kwargs):
if not node:
node = h2o_nodes.nodes[0]
if 'warnings' in gbmv:
warnings = gbmv['warnings']
# catch the 'Failed to converge" for now
for w in warnings:
if not noPrint: print "\nwarning:", w
if ('Failed' in w) or ('failed' in w):
raise Exception(w)
if 'cm' in gbmv:
cm = gbmv['cm'] # only one
else:
if 'gbm_model' in gbmv:
gbm_model = gbmv['gbm_model']
else:
raise Exception("no gbm_model in gbmv? %s" % dump_json(gbmv))
cms = gbm_model['cms']
print "number of cms:", len(cms)
print "FIX! need to add reporting of h2o's _perr per class error"
# FIX! what if regression. is rf only classification?
print "cms[-1]['_arr']:", cms[-1]['_arr']
print "cms[-1]['_predErr']:", cms[-1]['_predErr']
print "cms[-1]['_classErr']:", cms[-1]['_classErr']
## print "cms[-1]:", dump_json(cms[-1])
## for i,c in enumerate(cms):
## print "cm %s: %s" % (i, c['_arr'])
cm = cms[-1]['_arr'] # take the last one
scoresList = cm
used_trees = gbm_model['N']
errs = gbm_model['errs']
print "errs[0]:", errs[0]
print "errs[-1]:", errs[-1]
print "errs:", errs
# if we got the ntree for comparison. Not always there in kwargs though!
param_ntrees = kwargs.get('ntrees',None)
if (param_ntrees is not None and used_trees != param_ntrees):
raise Exception("used_trees should == param_ntree. used_trees: %s" % used_trees)
if (used_trees+1)!=len(cms) or (used_trees+1)!=len(errs):
raise Exception("len(cms): %s and len(errs): %s should be one more than N %s trees" % (len(cms), len(errs), used_trees))
totalScores = 0
totalRight = 0
# individual scores can be all 0 if nothing for that output class
# due to sampling
classErrorPctList = []
predictedClassDict = {} # may be missing some? so need a dict?
for classIndex,s in enumerate(scoresList):
classSum = sum(s)
if classSum == 0 :
# why would the number of scores for a class be 0? does GBM CM have entries for non-existent classes
# in a range??..in any case, tolerate. (it shows up in test.py on poker100)
if not noPrint: print "class:", classIndex, "classSum", classSum, "<- why 0?"
else:
# H2O should really give me this since it's in the browser, but it doesn't
classRightPct = ((s[classIndex] + 0.0)/classSum) * 100
totalRight += s[classIndex]
classErrorPct = round(100 - classRightPct, 2)
classErrorPctList.append(classErrorPct)
### print "s:", s, "classIndex:", classIndex
if not noPrint: print "class:", classIndex, "classSum", classSum, "classErrorPct:", "%4.2f" % classErrorPct
# gather info for prediction summary
for pIndex,p in enumerate(s):
if pIndex not in predictedClassDict:
predictedClassDict[pIndex] = p
else:
predictedClassDict[pIndex] += p
totalScores += classSum
#****************************
if not noPrint:
print "Predicted summary:"
# FIX! Not sure why we weren't working with a list..hack with dict for now
for predictedClass,p in predictedClassDict.items():
print str(predictedClass)+":", p
# this should equal the num rows in the dataset if full scoring? (minus any NAs)
print "totalScores:", totalScores
print "totalRight:", totalRight
if totalScores != 0:
pctRight = 100.0 * totalRight/totalScores
else:
pctRight = 0.0
pctWrong = 100 - pctRight
print "pctRight:", "%5.2f" % pctRight
print "pctWrong:", "%5.2f" % pctWrong
#****************************
# more testing for GBMView
# it's legal to get 0's for oobe error # if sample_rate = 1
sample_rate = kwargs.get('sample_rate', None)
validation = kwargs.get('validation', None)
if (sample_rate==1 and not validation):
pass
elif (totalScores<=0 or totalScores>5e9):
raise Exception("scores in GBMView seems wrong. scores:", scoresList)
varimp = gbm_model['varimp']
treeStats = gbm_model['treeStats']
if not treeStats:
raise Exception("treeStats not right?: %s" % dump_json(treeStats))
# print "json:", dump_json(gbmv)
data_key = gbm_model['_dataKey']
model_key = gbm_model['_key']
classification_error = pctWrong
if not noPrint:
if 'minLeaves' not in treeStats or not treeStats['minLeaves']:
raise Exception("treeStats seems to be missing minLeaves %s" % dump_json(treeStats))
print """
Leaves: {0} / {1} / {2}
Depth: {3} / {4} / {5}
Err: {6:0.2f} %
""".format(
treeStats['minLeaves'],
treeStats['meanLeaves'],
treeStats['maxLeaves'],
treeStats['minDepth'],
treeStats['meanDepth'],
treeStats['maxDepth'],
classification_error,
)
### modelInspect = node.inspect(model_key)
dataInspect = h2o_cmd.runInspect(key=data_key)
check_sandbox_for_errors()
return (round(classification_error,2), classErrorPctList, totalScores)
|
apache-2.0
|
radjkarl/imgProcessor
|
imgProcessor/measure/sharpness/SharpnessfromPoints.py
|
1
|
14331
|
# coding=utf-8
from __future__ import division
from __future__ import print_function
import numpy as np
from numpy.linalg import norm
import cv2
from numba import jit
from scipy.ndimage.filters import maximum_filter
from scipy.optimize import curve_fit
from scipy.ndimage.interpolation import map_coordinates
from fancytools.math.boundingBox import boundingBox
# local
from imgProcessor.imgIO import imread
from imgProcessor.measure.sharpness._base import SharpnessBase
from imgProcessor.transformations import toUIntArray
from scipy.ndimage.measurements import center_of_mass
@jit(nopython=True)
def _findPoints(img, thresh, min_dist, points):
gx = img.shape[0]
gy = img.shape[1]
px = 0
n = 0
l = len(points)
for i in range(gx):
for j in range(gy):
px = img[i, j]
if px > thresh:
if n == l:
return
points[n, 0] = j
points[n, 1] = i
n += 1
# get kernel boundaries:
xmn = i - min_dist
if xmn < 0:
xmn = 0
xmx = i + min_dist
if xmx > gx:
xmx = gx
ymn = j - min_dist
if ymn < 0:
ymn = 0
ymx = j + min_dist
if ymx > gy:
ymx = gy
# set surrounding area to zero
# to ignore it
for ii in range(xmx - xmn):
for jj in range(ymx - ymn):
img[xmn + ii, ymn + jj] = 0
class SharpnessfromPointSources(SharpnessBase):
def __init__(self, min_dist=None, max_kernel_size=51,
max_points=3000, calc_std=False):
# self.n_points = 0
self.max_points = max_points
self.calc_std = calc_std
# ensure odd number:
self.kernel_size = k = max_kernel_size // 2 * 2 + 1
if min_dist is None:
min_dist = max_kernel_size // 2 + 10
self.min_dist = min_dist
self._psf = np.zeros(shape=(k, k))
def addImg(self, img, roi=None):
'''
img - background, flat field, ste corrected image
roi - [(x1,y1),...,(x4,y4)] - boundaries where points are
'''
self.img = imread(img, 'gray')
s0, s1 = self.img.shape
if roi is None:
roi = ((0, 0), (s0, 0), (s0, s1), (0, s1))
k = self.kernel_size
hk = k // 2
# mask image
img2 = self.img.copy() # .astype(int)
mask = np.zeros(self.img.shape)
cv2.fillConvexPoly(mask, np.asarray(roi, dtype=np.int32), color=1)
mask = mask.astype(bool)
im = img2[mask]
bg = im.mean() # assume image average with in roi == background
mask = ~mask
img2[mask] = -1
# find points from local maxima:
self.points = np.zeros(shape=(self.max_points, 2), dtype=int)
thresh = 0.8 * bg + 0.2 * im.max()
_findPoints(img2, thresh, self.min_dist, self.points)
self.points = self.points[:np.argmin(self.points, axis=0)[0]]
# correct point position, to that every point is over max value:
for n, p in enumerate(self.points):
sub = self.img[p[1] - hk:p[1] + hk + 1, p[0] - hk:p[0] + hk + 1]
i, j = np.unravel_index(np.nanargmax(sub), sub.shape)
self.points[n] += [j - hk, i - hk]
# remove points that are too close to their neighbour or the border
mask = maximum_filter(mask, hk)
i = np.ones(self.points.shape[0], dtype=bool)
for n, p in enumerate(self.points):
if mask[p[1], p[0]]: # too close to border
i[n] = False
else:
# too close to other points
for pp in self.points[n + 1:]:
if norm(p - pp) < hk + 1:
i[n] = False
isum = i.sum()
ll = len(i) - isum
print('found %s points' % isum)
if ll:
print(
'removed %s points (too close to border or other points)' %
ll)
self.points = self.points[i]
# self.n_points += len(self.points)
# for finding best peak position:
# def fn(xy,cx,cy):#par
# (x,y) = xy
# return 1-(((x-cx)**2 + (y-cy)**2)*(1/8)).flatten()
# x,y = np.mgrid[-2:3,-2:3]
# x = x.flatten()
# y = y.flatten()
# for shifting peak:
xx, yy = np.mgrid[0:k, 0:k]
xx = xx.astype(float)
yy = yy.astype(float)
self.subs = []
# import pylab as plt
# plt.figure(20)
# img = self.drawPoints()
# plt.imshow(img, interpolation='none')
# # plt.figure(21)
# # plt.imshow(sub2, interpolation='none')
# plt.show()
#thresh = 0.8*bg + 0.1*im.max()
for i, p in enumerate(self.points):
sub = self.img[p[1] - hk:p[1] + hk + 1,
p[0] - hk:p[0] + hk + 1].astype(float)
sub2 = sub.copy()
mean = sub2.mean()
mx = sub2.max()
sub2[sub2 < 0.5 * (mean + mx)] = 0 # only select peak
try:
# SHIFT SUB ARRAY to align peak maximum exactly in middle:
# only eval a 5x5 array in middle of sub:
# peak = sub[hk-3:hk+4,hk-3:hk+4]#.copy()
# peak -= peak.min()
# peak/=peak.max()
# peak = peak.flatten()
# fit paraboloid to get shift in x,y:
# p, _ = curve_fit(fn, (x,y), peak, (0,0))
c0, c1 = center_of_mass(sub2)
# print (p,c0,c1,hk)
#coords = np.array([xx+p[0],yy+p[1]])
coords = np.array([xx + (c0 - hk), yy + (c1 - hk)])
#print (c0,c1)
#import pylab as plt
#plt.imshow(sub2, interpolation='none')
# shift array:
sub = map_coordinates(sub, coords,
mode='nearest').reshape(k, k)
# plt.figure(2)
#plt.imshow(sub, interpolation='none')
# plt.show()
#normalize:
bg = 0.25* ( sub[0].mean() + sub[-1].mean()
+ sub[:,0].mean() + sub[:,-1].mean())
sub-=bg
sub /= sub.max()
# import pylab as plt
# plt.figure(20)
# plt.imshow(sub, interpolation='none')
# # plt.figure(21)
# # plt.imshow(sub2, interpolation='none')
# plt.show()
self._psf += sub
if self.calc_std:
self.subs.append(sub)
except ValueError:
pass #sub.shape == (0,0)
def intermetidatePSF(self, n=5, steps=None):
s0,s1 = self._psf.shape
if steps is not None:
n = len(steps)
else:
steps = np.linspace(1,len(self.subs)-1,n, dtype=int)
ipsf = np.empty((n,s0,s1))
for o, j in enumerate(steps):
ipsf[o] = np.mean(self.subs[:j], axis=0)
ipsf[o] /= ipsf[o].sum()
return ipsf, steps
def std(self, i=None, filter_below=1.0, ref_psf=None):
if i is None:
i = len(self.points)
# p = self.psf(filter_below)
s0, s1 = self.subs[0].shape
# subs = np.array([s[s0,s1] for s in self.subs])
subs = np.array(self.subs)
ipsfs,_ = self.intermetidatePSF(steps=range(len(subs)))
# np.save('sssss', ipsfs)
# subs/=subs.sum(axis=(1,2))
# for s in subs:
# self._filter(s, filter_below)
# s/=s.sum()
# print p
# print subs[0]
# return subs
# sp = ((subs-p)**2)
# trend = [np.nan]
trend = []
if ref_psf is None:
ref_psf = ipsfs[-1]
for n in range(1,len(subs)):
#RMSE per step
# if n ==100:
# import pylab as plt
# plt.plot(ref_psf.sum(axis=0))
# plt.plot(ipsfs[n].sum(axis=0), 'o-')
# plt.plot(ipsfs[-1].sum(axis=0), 'o-')
#
# plt.show()
trend.append( ((ref_psf-ipsfs[n])**2).mean()**0.5 )
# for n in range(2,len(subs)+1):
# trend.append( ((1/(n-1)) * ( sp[:n].sum(axis=0)
# )**0.5).mean() )
#standard deviation per step (psf.sum()==1)
# import pylab as plt
# plt.plot(p.mean(axis=0))
# plt.plot(sp[0].mean(axis=0))
#
# plt.show()
# trend.append( ((1/(n-1)) * ( sp[:n].sum(axis=0)
# )**0.5).mean() )
return np.array(trend), (None,None,None)
stdmap = (1/(i-1)) * ( sp.sum(axis=0) )**0.5
stdmap = stdmap.sum(axis=0)
p = p.sum(axis=0)
return np.array(trend), (p - stdmap, p, p + stdmap)
# TODO: move and unit in extra PSF filter file
@staticmethod
def _filter(arr, val):
a = (arr[0, :], arr[1, :], arr[:, 0], arr[:, -1])
m = np.mean([aa.mean() for aa in a])
s = np.mean([aa.std() for aa in a])
t = m + val * s
arr -= t
arr[arr < 0] = 0
#TODO: remove because is already in module PSF
def psf(self, correct_size=True, filter_below=0.00):
p = self._psf.copy()
# filter background oscillations
if filter_below:
self._filter(p, filter_below)
# mn = p.argsort()[4:].mean()
# mn +=filter_below*p.max()-mn
# ibg = p<mn
# p[ibg] = mn
# else:
# ibg = p < p.min()
# decrease kernel size if possible
if correct_size:
b = boundingBox(p == 0)
s = p.shape
ix = min(b[0].start, s[0] - b[0].stop)
iy = min(b[1].start, s[1] - b[1].stop)
s0, s1 = self._shape = (slice(ix, s[0] - ix),
slice(iy, s[1] - iy))
p = p[s0, s1]
# scale
# p-=p.min()
p /= p.sum()
self._corrPsf = p
return p
# @staticmethod
# def _fn(v,sx,sy,rho):
# r = gaussian2d((v[0],v[1]), sx, sy, 0, 0, rho)
# r/=r.sum()
# return r
def drawPoints(self, img=None):
c = False
if img is None:
img = self.img.copy()
elif img is False:
img = np.zeros(self.img.shape)
c = 1
if not c:
c = img.max() - 1
for p in self.points:
cv2.circle(img, (p[0], p[1]), self.kernel_size // 2, c)
return img
if __name__ == '__main__':
# TODO: generic example
pass
# from imgProcessor.imgIO import imread, imwrite, out
# from skimage import restoration
# from fancytools.os.PathStr import PathStr
# from imgProcessor.zDenoise import zDenoise
# from matplotlib import pyplot as plt
#
#
# def correct(i1,i2,bg1,bg2):
# folder = i1.dirname()
# i1 = imread(i1)
# i2 = imread(i2)
# bg1 = imread(bg1)
# bg2 = imread(bg2)
#
# i = zDenoise([i1,i2])[0]
# bg = zDenoise([bg1,bg2])[0]
# corr = i.astype(int)-bg
# imwrite(folder.join('corrected.tif'), corr)
#
#
# f = PathStr('C:\\Users\\elkb4\\Desktop\\PhD\\Measurements\\HuLC\\Calibration\\psfFromPoints')
# f1 = f.join('1')
# f2 = f.join('2')
# f3 = f.join('3')
#
# #imgs = f1.all()[1:]
# #correct(imgs[2],imgs[3],imgs[0],imgs[1])
# #imgs = f2.all()[1:]
# #correct(imgs[2],imgs[3],imgs[0],imgs[1])
# # imgs = f3.all()[1:]
# # correct(imgs[2],imgs[3],imgs[0],imgs[1])
#
# p = SharpnessfromPointSources()
#
#
# # img = f1.join('corrected.tif')
# # roi = [(1483,1353),
# # (1781,1344),
# # (1797,727),
# # (1499,703)]
# # p.addImg(img, roi)
# #
# #
# img = f2.join('corrected.tif')
# # roi = [(1083,1814),
# # (1378,1817),
# # (1358,1192),
# # (1076,1180)]
# # p.addImg(img, roi)
#
#
# img = f3.join('corrected.tif')
# roi = [(794,1870),
# (2275,1874),
# (2290,925),
# (798,878)]
# p.addImg(img, roi)
# print 'found %s points' %p.n_points
#
# psf = p.psf(filter_below=0.05)
# print 'standard deviation: %s' %p.stdDev()
# #p._std = 0.7
# #psf = p.gaussianPsf()
#
# np.save(f.join('psf.npy'), psf)
#
#
#
# plt.imshow(psf, interpolation='none')
# plt.colorbar()
# plt.show()
#
# img = p.drawPoints()
# plt.imshow(img)
# plt.colorbar()
# plt.show()
#
# #SHARPEN:
# #img = imread(f2.join('corrected.tif'), 'gray', float)
#
# img = imread('C:\\Users\\elkb4\Desktop\\PhD\Measurements\\EL round robin\\HULC el round robin\\mod7\\mod7_e180_g4_b1_V38-970_I7-801_T19-062_p2-2_n1__3.tif', 'gray', float)
#
#
# mx = img.max()
# img/=mx
# #BEST:
# deconvolved, _ = restoration.unsupervised_wiener(img, psf)
#
# #FAST BUT STILL BLURRY:
# #deconvolved = restoration.wiener(img, psf, balance=0.1)
#
# #AS GOOD AS unsupervised, BUT SLOWER:
# #deconvolved = restoration.richardson_lucy(img, psf, iterations=4, clip=True)
#
# deconvolved*=mx
#
# imwrite(f.join('deblurred.tif'), deconvolved)
# plt.imshow(deconvolved)
# plt.colorbar()
# plt.show()
|
gpl-3.0
|
LFPy/LFPy
|
examples/bioRxiv281717/figure_5.py
|
1
|
17284
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''plotting script for figure 5 in manuscript preprint on output of
example_parallel_network.py
Copyright (C) 2018 Computational Neuroscience Group, NMBU.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
'''
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec, GridSpecFromSubplotSpec
from mpl_toolkits.axisartist.axislines import SubplotZero
import os
import numpy as np
import h5py
from LFPy import FourSphereVolumeConductor, MEG
import example_parallel_network_plotting as plotting
from mpi4py import MPI
# set up MPI environment
COMM = MPI.COMM_WORLD
SIZE = COMM.Get_size()
RANK = COMM.Get_rank()
fontsize = 14
titlesize = 16
legendsize = 12
plt.rcParams.update({
'axes.xmargin': 0.0,
'axes.ymargin': 0.0,
'axes.labelsize': fontsize,
'axes.titlesize': titlesize,
'figure.titlesize': fontsize,
'font.size': fontsize,
'legend.fontsize': legendsize,
})
def plot_spike_raster(ax, PSET, T):
f = h5py.File(
os.path.join(
PSET.OUTPUTPATH,
'example_parallel_network_output.h5'),
'r')
for i, (m_name, name) in enumerate(
zip(PSET.populationParameters['m_type'],
PSET.populationParameters['me_type'])):
x = []
y = []
ax.hlines(f['SPIKES'][name]['gids'][()].min(),
T[0], T[1], 'k', lw=0.25)
for gid, spt in zip(f['SPIKES'][name]['gids'],
f['SPIKES'][name]['times']):
if len(spt) > 0:
y += [gid] * spt.size
x += list(spt)
x = np.array(x)
y = np.array(y)
inds = (x >= T[0]) & (x <= T[1])
ax.plot(x[inds], y[inds], '|',
color=colors[i], markersize=2,
lw=2, clip_on=True, label=m_name)
f.close()
ax.set_xlim(T[0], T[1])
ax.set_ylim(-0.5, PSET.populationParameters['POP_SIZE'].sum() + 0.5)
ax.invert_yaxis()
ax.legend(loc=1, markerscale=5)
ax.set_xlabel('time (ms)', labelpad=0)
ax.set_ylabel('cell ID')
ax.set_title('spike raster')
if __name__ == '__main__':
# get simulation parameters
from example_parallel_network_parameters import PSET
# cell type colors
colors = [
plt.get_cmap(
'Set1',
PSET.populationParameters.size)(i) for i in range(
PSET.populationParameters.size)]
# time shown
T = (PSET.TRANSIENT, PSET.TRANSIENT + 1000.)
# Set up figure and subplots
fig = plt.figure(figsize=(16, 15.5))
gs = GridSpec(5, 3, hspace=0.3, wspace=0.2, left=0.06, right=0.95,
top=0.95, bottom=0.05)
alphabet = 'ABCDEFGHIJKLMNOPQRST'
# 90 deg rotation matrices around x-, y- and z-axis
Rx90 = np.array([[1, 0, 0], [0, 0, -1], [0, 1, 0]])
Ry90 = np.array([[0, 0, 1], [0, 1, 0], [-1, 0, 0]])
Rz90 = np.array([[0, -1, 0], [1, 0, 0], [0, 0, 1]])
# PANEL A. Spike raster
ax = fig.add_subplot(gs[:2, 0])
plot_spike_raster(ax, PSET, T)
ax.set_xticklabels([])
ax.set_xlabel('')
plotting.remove_axis_junk(ax)
ax.text(-0.1, 1.025, alphabet[0],
horizontalalignment='center',
verticalalignment='center',
fontsize=16, fontweight='demibold',
transform=ax.transAxes)
# PANEL B. Spike count histogram
gs0 = GridSpecFromSubplotSpec(4, 1, subplot_spec=gs[:2, 1])
axes = [
fig.add_subplot(
gs0[i]) for i in range(
PSET.populationParameters['me_type'].size)]
f = h5py.File(
os.path.join(
PSET.OUTPUTPATH,
'example_parallel_network_output.h5'),
'r')
dt = 5. # bin size for histograms
bins = np.arange(T[0], T[1] + dt, dt)
axes[0].set_title(r'spike-count histograms ($\Delta t={}$ ms)'.format(dt))
for i, (m_name, name) in enumerate(
zip(PSET.populationParameters['m_type'],
PSET.populationParameters['me_type'])):
ax = axes[i]
plotting.remove_axis_junk(ax)
data = np.hstack(f['SPIKES'][name]['times'][()])
ax.hist(data, bins=bins, color=colors[i][:-1], label=m_name)
ax.axis(ax.axis('tight'))
ax.set_xlim(PSET.TRANSIENT, PSET.TRANSIENT + 1000.)
ax.legend(loc=1)
ax.set_ylabel('count')
if ax != axes[-1]:
ax.set_xticklabels([])
else:
ax.set_xlabel('time (ms)', labelpad=0)
axes[0].text(-0.1, 1.1, alphabet[1],
horizontalalignment='center',
verticalalignment='center',
fontsize=16, fontweight='demibold',
transform=axes[0].transAxes)
# PANEL C Extracellular potential
gs0 = GridSpecFromSubplotSpec(12, 1, subplot_spec=gs[:2, 2])
ax = fig.add_subplot(gs0[:-2])
f = h5py.File(
os.path.join(
PSET.OUTPUTPATH,
'example_parallel_network_output.h5'),
'r')
for data, title, color in zip(
[f['SUMMED_OUTPUT'][()]['imem']],
['extracellular potentials, summed'],
['k']):
ax.set_title(title)
vlimround = plotting.draw_lineplot(
ax=ax,
data=plotting.decimate(
data,
q=PSET.decimate_q),
dt=PSET.dt *
PSET.decimate_q,
T=T,
color=color,
scalebarbasis='log10')
f.close()
ax.set_xticklabels([])
ax.set_xlabel('')
ax.set_ylabel('')
ax.text(-0.1, 1.055, alphabet[2],
horizontalalignment='center',
verticalalignment='center',
fontsize=16, fontweight='demibold',
transform=ax.transAxes)
# PANEL D ECoG potential
ax = fig.add_subplot(gs0[-1])
f = h5py.File(
os.path.join(
PSET.OUTPUTPATH,
'example_parallel_network_output.h5'),
'r')
data = f['SUMMED_ECOG'][()]['imem']
title = 'ECoG potential, summed'
color = 'k'
ax.set_title(title)
vlimround = plotting.draw_lineplot(
ax=ax,
data=plotting.decimate(
f['SUMMED_OUTPUT'][()]['imem'][0, ].reshape((1, -1)),
q=PSET.decimate_q),
dt=PSET.dt * PSET.decimate_q,
scalebar=False,
T=T, color='0.5', scalebarpos=-1.5, scalebarbasis='log10')
vlimround = plotting.draw_lineplot(
ax=ax, data=plotting.decimate(data, q=PSET.decimate_q),
dt=PSET.dt * PSET.decimate_q,
vlimround=vlimround, scalebar=True,
T=T, color=color, scalebarpos=-1.5, scalebarbasis='log10')
lines = ax.get_lines()
ax.legend(('ch. 1', 'ECoG'), loc=8, ncol=2,
bbox_to_anchor=(0.5, -1.25), frameon=False)
f.close()
ax.set_xticklabels([])
ax.set_xlabel('')
ax.set_ylabel('')
ax.set_yticklabels([])
ax.text(-0.1, 1.175, alphabet[3],
horizontalalignment='center',
verticalalignment='center',
fontsize=16, fontweight='demibold',
transform=ax.transAxes)
# PANEL E. current dipole moment signal
f = h5py.File(os.path.join(PSET.OUTPUTPATH,
'example_parallel_network_output.h5'), 'r')
p_temp = np.zeros(f['CURRENT_DIPOLE_MOMENT'].shape)
for name in f['CURRENT_DIPOLE_MOMENT'].dtype.names:
p_temp += f['CURRENT_DIPOLE_MOMENT'][name]
p_net = plotting.decimate(p_temp, q=PSET.decimate_q)
p_net *= 1E-3 # nA um -> 1E-3 nA m unit conversion
del p_temp
gs0 = GridSpecFromSubplotSpec(3, 1, subplot_spec=gs[2, 0])
for i, ylabel in enumerate(
[r'$\mathbf{p \cdot \hat{x}}$',
r'$\mathbf{p \cdot \hat{y}}$',
r'$\mathbf{p \cdot \hat{z}}$']):
ax = fig.add_subplot(gs0[i])
if i == 0:
ax.set_title(r'current dipole moment ($10^{-3}$ nA m)')
ax.text(-0.1, 1.15, alphabet[4],
horizontalalignment='center',
verticalalignment='center',
fontsize=16, fontweight='demibold',
transform=ax.transAxes)
plotting.remove_axis_junk(ax)
t = np.arange(p_net.shape[1]) * PSET.dt * PSET.decimate_q
inds = (t >= T[0]) & (t <= T[1])
ax.plot(t[inds], p_net[i, inds], 'k', lw=1)
ax.set_ylabel(ylabel)
ax.set_xticklabels([])
# panel F. Illustration of 4-sphere volume conductor model geometry
ax = SubplotZero(fig, gs[2, 1])
fig.add_subplot(ax)
ax.set_title('four-sphere volume conductor model')
for direction in ["xzero"]:
ax.axis[direction].set_visible(True)
for direction in ["left", "right", "bottom", "top"]:
ax.axis[direction].set_visible(False)
theta = np.linspace(0, np.pi, 31)
# draw some circles:
for i, r, label in zip(range(4), PSET.foursphereParams['radii'], [
'brain', 'CSF', 'skull', 'scalp']):
ax.plot(
np.cos(theta) *
r,
np.sin(theta) *
r,
'C{}'.format(i),
label=label +
r', $r_%i=%i$ mm' %
(i +
1,
r /
1000),
clip_on=False)
# draw measurement points
ax.plot(PSET.foursphereParams['r_electrodes'][:, 0],
PSET.foursphereParams['r_electrodes'][:, 2],
'ko',
label='EEG/MEG sites')
for i, (x, y, z) in enumerate(PSET.foursphereParams['r_electrodes']):
ax.text(x, z + 2500, r'{}'.format(i + 1), ha='center')
# dipole location
ax.plot([0], [PSET.foursphereParams['radii'][0] +
PSET.layer_data['center'][3]], 'k.', label='dipole site')
ax.axis('equal')
ax.set_ylim(top=max(PSET.foursphereParams['radii']) + 5000)
ax.set_xticks(np.r_[-np.array(PSET.foursphereParams['radii']),
0, PSET.foursphereParams['radii']])
ax.set_xticklabels([])
ax.legend(loc=(0.25, 0.05), frameon=False)
ax.text(-0.1, 1.05, alphabet[5],
horizontalalignment='center',
verticalalignment='center',
fontsize=16, fontweight='demibold',
transform=ax.transAxes)
# PANEL G. EEG signal
ax = fig.add_subplot(gs[2, 2])
ax.set_title(r'surface potential $\phi_\mathbf{p}(\mathbf{r})$ ')
f = h5py.File(os.path.join(PSET.OUTPUTPATH,
'example_parallel_network_output.h5'), 'r')
# compute dipole potentials as the sum of contributions in
# different positions
phi_p = np.zeros(
(PSET.foursphereParams['r_electrodes'].shape[0],
f['CURRENT_DIPOLE_MOMENT'][name].shape[1]))
for i, name in enumerate(PSET.populationParameters['me_type']):
p = f['CURRENT_DIPOLE_MOMENT'][name]
# four-sphere volume conductor
sphere = FourSphereVolumeConductor(
**PSET.foursphereParams
)
phi_p += sphere.get_dipole_potential(
p=p,
dipole_location=np.array([0, 0,
PSET.foursphereParams['radii'][0]
+ PSET.layer_data['center'][3:][i % 2]])
)
vlimround = plotting.draw_lineplot(
ax=ax,
data=plotting.decimate(
phi_p,
q=PSET.decimate_q)[::-1, ] * 1E3, # mV -> µV unit conversion
unit=r'$\mu$V',
dt=PSET.dt * PSET.decimate_q,
T=T, color='k', scalebarbasis='log10')
ax.set_xticklabels([])
ax.set_xlabel('')
ax.set_yticklabels(['{}'.format(i + 1)
for i in range(sphere.rxyz.shape[0])])
ax.set_ylabel('position', labelpad=10)
ax.text(-0.1, 1.05, alphabet[6],
horizontalalignment='center',
verticalalignment='center',
fontsize=16, fontweight='demibold',
transform=ax.transAxes)
# PANEL H. tangential component of MEG signal (as recorded by squid
# outside scull)
# compute the radial unit vector from the center of the sphere to each
# measurement point, then unit vectors along theta and phi
r_hat = (sphere.rxyz.T / sphere.r).T
theta = np.arccos(sphere.rxyz[:, 2] / sphere.r)
phi = np.arctan2(sphere.rxyz[:, 1], sphere.rxyz[:, 0])
theta_hat = np.array([np.cos(theta) * np.cos(phi),
np.cos(theta) * np.sin(phi),
-np.sin(phi)]).T
phi_hat = np.array([-np.sin(phi), np.cos(phi), np.zeros(r_hat.shape[0])]).T
for j, (unitvector, akse) in enumerate(zip(
[theta_hat, phi_hat, r_hat],
[r'\hat{\mathbf{\theta}}',
r'\hat{\mathbf{\varphi}}',
r'\hat{\mathbf{r}}'])):
ax = fig.add_subplot(gs[3, j])
ax.set_title(
'surface magn. field '
+ r'$\mathbf{B}_\mathbf{p}(\mathbf{r}) \cdot %s$' % akse)
# radial/tangential component of H at squid locations
H_rt = np.zeros(phi_p.shape)
for i, name in enumerate(PSET.populationParameters['me_type']):
# dipole position
dipole_position = np.array(
[0, 0,
PSET.foursphereParams['radii'][0]
+ PSET.layer_data['center'][3:][i % 2]])
# create MEG object and compute magnetic field
meg = MEG(sensor_locations=PSET.foursphereParams['r_electrodes'])
H = meg.calculate_H(
f['CURRENT_DIPOLE_MOMENT'][name],
dipole_position)
for k, (h, u) in enumerate(zip(H, unitvector)):
H_rt[k, ] += np.dot(h.T, u)
B_rt = H_rt * meg.mu # unit mT (from nA/µm * Tm/A)
vlimround = plotting.draw_lineplot(
ax=ax,
data=plotting.decimate(B_rt, q=PSET.decimate_q)[
::-1, ] * 1E12, # mT --> fT unit conversion
dt=PSET.dt * PSET.decimate_q, unit=r'fT',
T=T, color='k', scalebarbasis='log10')
ax.set_yticklabels(['{}'.format(i + 1)
for i in range(sphere.rxyz.shape[0])])
ax.set_xlabel('')
ax.set_xticklabels([])
if j == 0:
ax.set_ylabel('position', labelpad=10)
ax.text(-0.1, 1.05, alphabet[7],
horizontalalignment='center',
verticalalignment='center',
fontsize=16, fontweight='demibold',
transform=ax.transAxes)
else:
ax.set_yticklabels([])
ax.set_ylabel('')
# PANEL I. tangential components of MEG signal (as recorded by squid
# outside scull)
# with dipole sources rotated 90 deg counterclockwise around x-axis
ax = fig.add_subplot(gs[4, j])
ax.set_title(
'surface magn. field '
+ r'$\mathbf{B}_{R_x(\pi/2)\mathbf{p}}(\mathbf{r}) '
+ r'\cdot %s$' % akse)
# radial/tangential component of H at squid locations
H_rt = np.zeros(phi_p.shape)
for i, name in enumerate(PSET.populationParameters['me_type']):
# dipole position
dipole_position = np.array(
[0, 0,
PSET.foursphereParams['radii'][0]
+ PSET.layer_data['center'][3:][i % 2]])
# create MEG object and compute magnetic field
meg = MEG(sensor_locations=PSET.foursphereParams['r_electrodes'])
H = meg.calculate_H(
np.dot(
Rx90,
f['CURRENT_DIPOLE_MOMENT'][name]),
dipole_position)
# compute the radial unit vector from the center of the sphere to
# each
# measurement point
for k, (h, u) in enumerate(zip(H, unitvector)):
H_rt[k, ] += np.dot(h.T, u)
B_rt = H_rt * meg.mu # unit mT (from nA/µm * Tm/A)
vlimround = plotting.draw_lineplot(
ax=ax,
data=plotting.decimate(B_rt, q=PSET.decimate_q)[
::-1, ] * 1E12, # mT --> fT unit conversion
dt=PSET.dt * PSET.decimate_q, unit=r'fT',
T=T, color='k', scalebarbasis='log10')
ax.set_yticklabels(['{}'.format(i + 1)
for i in range(sphere.rxyz.shape[0])])
ax.set_xlabel('time (ms)', labelpad=0)
if j == 0:
ax.set_ylabel('position', labelpad=10)
ax.text(-0.1, 1.05, alphabet[8],
horizontalalignment='center',
verticalalignment='center',
fontsize=16, fontweight='demibold',
transform=ax.transAxes)
else:
ax.set_yticklabels([])
ax.set_ylabel('')
fig.savefig(
os.path.join(
PSET.OUTPUTPATH,
'figure_5.pdf'),
bbox_inches='tight')
plt.show()
|
gpl-3.0
|
alrusdi/lettuce
|
lettuce/django/steps/mail.py
|
20
|
1903
|
"""
Step definitions for working with Django email.
"""
from smtplib import SMTPException
from django.core import mail
from lettuce import step
STEP_PREFIX = r'(?:Given|And|Then|When) '
CHECK_PREFIX = r'(?:And|Then) '
EMAIL_PARTS = ('subject', 'body', 'from_email', 'to', 'bcc', 'cc')
GOOD_MAIL = mail.EmailMessage.send
@step(CHECK_PREFIX + r'I have sent (\d+) emails?')
def mail_sent_count(step, count):
"""
Then I have sent 2 emails
"""
count = int(count)
assert len(mail.outbox) == count, "Length of outbox is {0}".format(count)
@step(r'I have not sent any emails')
def mail_not_sent(step):
"""
I have not sent any emails
"""
return mail_sent_count(step, 0)
@step(CHECK_PREFIX + (r'I have sent an email with "([^"]*)" in the ({0})'
'').format('|'.join(EMAIL_PARTS)))
def mail_sent_content(step, text, part):
"""
Then I have sent an email with "pandas" in the body
"""
assert any(text in getattr(email, part)
for email
in mail.outbox
), "An email contained expected text in the {0}".format(part)
@step(CHECK_PREFIX + r'I have sent an email with the following in the body:')
def mail_sent_content_multiline(step):
"""
I have sent an email with the following in the body:
\"""
Name: Mr. Panda
\"""
"""
return mail_sent_content(step, step.multiline, 'body')
@step(STEP_PREFIX + r'I clear my email outbox')
def mail_clear(step):
"""
I clear my email outbox
"""
mail.EmailMessage.send = GOOD_MAIL
mail.outbox = []
def broken_send(*args, **kwargs):
"""
Broken send function for email_broken step
"""
raise SMTPException("Failure mocked by lettuce")
@step(STEP_PREFIX + r'sending email does not work')
def email_broken(step):
"""
Break email sending
"""
mail.EmailMessage.send = broken_send
|
gpl-3.0
|
walterreade/scikit-learn
|
examples/gaussian_process/plot_gpc_xor.py
|
104
|
2132
|
"""
========================================================================
Illustration of Gaussian process classification (GPC) on the XOR dataset
========================================================================
This example illustrates GPC on XOR data. Compared are a stationary, isotropic
kernel (RBF) and a non-stationary kernel (DotProduct). On this particular
dataset, the DotProduct kernel obtains considerably better results because the
class-boundaries are linear and coincide with the coordinate axes. In general,
stationary kernels often obtain better results.
"""
print(__doc__)
# Authors: Jan Hendrik Metzen <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF, DotProduct
xx, yy = np.meshgrid(np.linspace(-3, 3, 50),
np.linspace(-3, 3, 50))
rng = np.random.RandomState(0)
X = rng.randn(200, 2)
Y = np.logical_xor(X[:, 0] > 0, X[:, 1] > 0)
# fit the model
plt.figure(figsize=(10, 5))
kernels = [1.0 * RBF(length_scale=1.0), 1.0 * DotProduct(sigma_0=1.0)**2]
for i, kernel in enumerate(kernels):
clf = GaussianProcessClassifier(kernel=kernel, warm_start=True).fit(X, Y)
# plot the decision function for each datapoint on the grid
Z = clf.predict_proba(np.vstack((xx.ravel(), yy.ravel())).T)[:, 1]
Z = Z.reshape(xx.shape)
plt.subplot(1, 2, i + 1)
image = plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()),
aspect='auto', origin='lower', cmap=plt.cm.PuOr_r)
contours = plt.contour(xx, yy, Z, levels=[0], linewidths=2,
linetypes='--')
plt.scatter(X[:, 0], X[:, 1], s=30, c=Y, cmap=plt.cm.Paired)
plt.xticks(())
plt.yticks(())
plt.axis([-3, 3, -3, 3])
plt.colorbar(image)
plt.title("%s\n Log-Marginal-Likelihood:%.3f"
% (clf.kernel_, clf.log_marginal_likelihood(clf.kernel_.theta)),
fontsize=12)
plt.tight_layout()
plt.show()
|
bsd-3-clause
|
arabenjamin/scikit-learn
|
sklearn/decomposition/tests/test_truncated_svd.py
|
240
|
6055
|
"""Test truncated SVD transformer."""
import numpy as np
import scipy.sparse as sp
from sklearn.decomposition import TruncatedSVD
from sklearn.utils import check_random_state
from sklearn.utils.testing import (assert_array_almost_equal, assert_equal,
assert_raises, assert_greater,
assert_array_less)
# Make an X that looks somewhat like a small tf-idf matrix.
# XXX newer versions of SciPy have scipy.sparse.rand for this.
shape = 60, 55
n_samples, n_features = shape
rng = check_random_state(42)
X = rng.randint(-100, 20, np.product(shape)).reshape(shape)
X = sp.csr_matrix(np.maximum(X, 0), dtype=np.float64)
X.data[:] = 1 + np.log(X.data)
Xdense = X.A
def test_algorithms():
svd_a = TruncatedSVD(30, algorithm="arpack")
svd_r = TruncatedSVD(30, algorithm="randomized", random_state=42)
Xa = svd_a.fit_transform(X)[:, :6]
Xr = svd_r.fit_transform(X)[:, :6]
assert_array_almost_equal(Xa, Xr)
comp_a = np.abs(svd_a.components_)
comp_r = np.abs(svd_r.components_)
# All elements are equal, but some elements are more equal than others.
assert_array_almost_equal(comp_a[:9], comp_r[:9])
assert_array_almost_equal(comp_a[9:], comp_r[9:], decimal=3)
def test_attributes():
for n_components in (10, 25, 41):
tsvd = TruncatedSVD(n_components).fit(X)
assert_equal(tsvd.n_components, n_components)
assert_equal(tsvd.components_.shape, (n_components, n_features))
def test_too_many_components():
for algorithm in ["arpack", "randomized"]:
for n_components in (n_features, n_features+1):
tsvd = TruncatedSVD(n_components=n_components, algorithm=algorithm)
assert_raises(ValueError, tsvd.fit, X)
def test_sparse_formats():
for fmt in ("array", "csr", "csc", "coo", "lil"):
Xfmt = Xdense if fmt == "dense" else getattr(X, "to" + fmt)()
tsvd = TruncatedSVD(n_components=11)
Xtrans = tsvd.fit_transform(Xfmt)
assert_equal(Xtrans.shape, (n_samples, 11))
Xtrans = tsvd.transform(Xfmt)
assert_equal(Xtrans.shape, (n_samples, 11))
def test_inverse_transform():
for algo in ("arpack", "randomized"):
# We need a lot of components for the reconstruction to be "almost
# equal" in all positions. XXX Test means or sums instead?
tsvd = TruncatedSVD(n_components=52, random_state=42)
Xt = tsvd.fit_transform(X)
Xinv = tsvd.inverse_transform(Xt)
assert_array_almost_equal(Xinv, Xdense, decimal=1)
def test_integers():
Xint = X.astype(np.int64)
tsvd = TruncatedSVD(n_components=6)
Xtrans = tsvd.fit_transform(Xint)
assert_equal(Xtrans.shape, (n_samples, tsvd.n_components))
def test_explained_variance():
# Test sparse data
svd_a_10_sp = TruncatedSVD(10, algorithm="arpack")
svd_r_10_sp = TruncatedSVD(10, algorithm="randomized", random_state=42)
svd_a_20_sp = TruncatedSVD(20, algorithm="arpack")
svd_r_20_sp = TruncatedSVD(20, algorithm="randomized", random_state=42)
X_trans_a_10_sp = svd_a_10_sp.fit_transform(X)
X_trans_r_10_sp = svd_r_10_sp.fit_transform(X)
X_trans_a_20_sp = svd_a_20_sp.fit_transform(X)
X_trans_r_20_sp = svd_r_20_sp.fit_transform(X)
# Test dense data
svd_a_10_de = TruncatedSVD(10, algorithm="arpack")
svd_r_10_de = TruncatedSVD(10, algorithm="randomized", random_state=42)
svd_a_20_de = TruncatedSVD(20, algorithm="arpack")
svd_r_20_de = TruncatedSVD(20, algorithm="randomized", random_state=42)
X_trans_a_10_de = svd_a_10_de.fit_transform(X.toarray())
X_trans_r_10_de = svd_r_10_de.fit_transform(X.toarray())
X_trans_a_20_de = svd_a_20_de.fit_transform(X.toarray())
X_trans_r_20_de = svd_r_20_de.fit_transform(X.toarray())
# helper arrays for tests below
svds = (svd_a_10_sp, svd_r_10_sp, svd_a_20_sp, svd_r_20_sp, svd_a_10_de,
svd_r_10_de, svd_a_20_de, svd_r_20_de)
svds_trans = (
(svd_a_10_sp, X_trans_a_10_sp),
(svd_r_10_sp, X_trans_r_10_sp),
(svd_a_20_sp, X_trans_a_20_sp),
(svd_r_20_sp, X_trans_r_20_sp),
(svd_a_10_de, X_trans_a_10_de),
(svd_r_10_de, X_trans_r_10_de),
(svd_a_20_de, X_trans_a_20_de),
(svd_r_20_de, X_trans_r_20_de),
)
svds_10_v_20 = (
(svd_a_10_sp, svd_a_20_sp),
(svd_r_10_sp, svd_r_20_sp),
(svd_a_10_de, svd_a_20_de),
(svd_r_10_de, svd_r_20_de),
)
svds_sparse_v_dense = (
(svd_a_10_sp, svd_a_10_de),
(svd_a_20_sp, svd_a_20_de),
(svd_r_10_sp, svd_r_10_de),
(svd_r_20_sp, svd_r_20_de),
)
# Assert the 1st component is equal
for svd_10, svd_20 in svds_10_v_20:
assert_array_almost_equal(
svd_10.explained_variance_ratio_,
svd_20.explained_variance_ratio_[:10],
decimal=5,
)
# Assert that 20 components has higher explained variance than 10
for svd_10, svd_20 in svds_10_v_20:
assert_greater(
svd_20.explained_variance_ratio_.sum(),
svd_10.explained_variance_ratio_.sum(),
)
# Assert that all the values are greater than 0
for svd in svds:
assert_array_less(0.0, svd.explained_variance_ratio_)
# Assert that total explained variance is less than 1
for svd in svds:
assert_array_less(svd.explained_variance_ratio_.sum(), 1.0)
# Compare sparse vs. dense
for svd_sparse, svd_dense in svds_sparse_v_dense:
assert_array_almost_equal(svd_sparse.explained_variance_ratio_,
svd_dense.explained_variance_ratio_)
# Test that explained_variance is correct
for svd, transformed in svds_trans:
total_variance = np.var(X.toarray(), axis=0).sum()
variances = np.var(transformed, axis=0)
true_explained_variance_ratio = variances / total_variance
assert_array_almost_equal(
svd.explained_variance_ratio_,
true_explained_variance_ratio,
)
|
bsd-3-clause
|
nan86150/ImageFusion
|
lib/python2.7/site-packages/matplotlib/testing/jpl_units/UnitDblFormatter.py
|
23
|
1485
|
#===========================================================================
#
# UnitDblFormatter
#
#===========================================================================
"""UnitDblFormatter module containing class UnitDblFormatter."""
#===========================================================================
# Place all imports after here.
#
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import matplotlib.ticker as ticker
#
# Place all imports before here.
#===========================================================================
__all__ = [ 'UnitDblFormatter' ]
#===========================================================================
class UnitDblFormatter( ticker.ScalarFormatter ):
"""The formatter for UnitDbl data types. This allows for formatting
with the unit string.
"""
def __init__( self, *args, **kwargs ):
'The arguments are identical to matplotlib.ticker.ScalarFormatter.'
ticker.ScalarFormatter.__init__( self, *args, **kwargs )
def __call__( self, x, pos = None ):
'Return the format for tick val x at position pos'
if len(self.locs) == 0:
return ''
else:
return str(x)
def format_data_short( self, value ):
"Return the value formatted in 'short' format."
return str(value)
def format_data( self, value ):
"Return the value formatted into a string."
return str(value)
|
mit
|
SimonBiggs/electronfactors
|
electronfactors/visuals/print_to_scale.py
|
1
|
2223
|
# Copyright (C) 2015 Simon Biggs
# This program is free software: you can redistribute it and/or
# modify it under the terms of the GNU Affero General Public
# License as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public
# License along with this program. If not, see
# http://www.gnu.org/licenses/.
import numpy as np
import shapely.affinity as aff
import matplotlib.pyplot as plt
from matplotlib import pylab
import descartes as des
import subprocess
import os
def print_to_pdf(shapley_list, filename, random_colours=True, **kwargs):
scale = kwargs['scale']
pylab.rcParams['savefig.dpi'] = 254
x_min = []
x_max = []
y_min = []
y_max = []
for shape in shapley_list:
bound = shape.bounds
x_min.append(bound[0])
y_min.append(bound[1])
x_max.append(bound[2])
y_max.append(bound[3])
x_limits = [np.min(x_min), np.max(x_max)]
y_limits = [np.min(y_min), np.max(y_max)]
fig_width = np.ptp(x_limits)
fig_height = np.ptp(y_limits)
fig = plt.figure(figsize=(fig_width/2.54, fig_height/2.54))
fig.subplots_adjust(left=0, right=1, top=1, bottom=0)
ax = fig.add_subplot(111)
for shape in shapley_list:
if random_colours:
colours = np.append(
np.random.uniform(size=3), 0.3)
else:
colours = [0, 0, 0, 0.3]
scaled_shape = aff.scale(
shape, xfact=scale, yfact=scale)
patch = des.PolygonPatch(
scaled_shape, fc=colours
)
ax.add_patch(patch)
ax.set_xlim(x_limits)
ax.set_ylim(y_limits)
plt.grid(True)
plt.savefig("temp.png")
subprocess.call([
"convert", "-units", "PixelsPerInch",
"temp.png", "-density", "254", "temp.pdf"
])
os.rename("temp.pdf", filename)
os.remove("temp.png")
|
agpl-3.0
|
junmin-zhu/chromium-rivertrail
|
chrome/test/nacl_test_injection/buildbot_nacl_integration.py
|
2
|
2643
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import subprocess
import sys
def Main(args):
pwd = os.environ.get('PWD', '')
is_integration_bot = 'nacl-chrome' in pwd
# This environment variable check mimics what
# buildbot_chrome_nacl_stage.py does.
is_win64 = (sys.platform in ('win32', 'cygwin') and
('64' in os.environ.get('PROCESSOR_ARCHITECTURE', '') or
'64' in os.environ.get('PROCESSOR_ARCHITEW6432', '')))
# On the main Chrome waterfall, we may need to control where the tests are
# run.
# If there is serious skew in the PPAPI interface that causes all of
# the NaCl integration tests to fail, you can uncomment the
# following block. (Make sure you comment it out when the issues
# are resolved.) *However*, it is much preferred to add tests to
# the 'tests_to_disable' list below.
#if not is_integration_bot:
# return
tests_to_disable = []
# In general, you should disable tests inside this conditional. This turns
# them off on the main Chrome waterfall, but not on NaCl's integration bots.
# This makes it easier to see when things have been fixed NaCl side.
if not is_integration_bot:
# http://code.google.com/p/nativeclient/issues/detail?id=2511
tests_to_disable.append('run_ppapi_ppb_image_data_browser_test')
if sys.platform == 'darwin':
# TODO(mseaborn) fix
# http://code.google.com/p/nativeclient/issues/detail?id=1835
tests_to_disable.append('run_ppapi_crash_browser_test')
if sys.platform in ('win32', 'cygwin'):
# This one is only failing for nacl_glibc on x64 Windows
# but it is not clear how to disable only that limited case.
# See http://crbug.com/132395
tests_to_disable.append('run_inbrowser_test_runner')
script_dir = os.path.dirname(os.path.abspath(__file__))
test_dir = os.path.dirname(script_dir)
chrome_dir = os.path.dirname(test_dir)
src_dir = os.path.dirname(chrome_dir)
nacl_integration_script = os.path.join(
src_dir, 'native_client/build/buildbot_chrome_nacl_stage.py')
cmd = [sys.executable,
nacl_integration_script,
# TODO(ncbray) re-enable.
# https://code.google.com/p/chromium/issues/detail?id=133568
'--disable_glibc',
'--disable_tests=%s' % ','.join(tests_to_disable)]
cmd += args
sys.stdout.write('Running %s\n' % ' '.join(cmd))
sys.stdout.flush()
return subprocess.call(cmd)
if __name__ == '__main__':
sys.exit(Main(sys.argv[1:]))
|
bsd-3-clause
|
midusi/handshape_recognition
|
tutorial/tfenv/share/doc/networkx-1.11/examples/graph/napoleon_russian_campaign.py
|
14
|
3184
|
#!/usr/bin/env python
"""
Minard's data from Napoleon's 1812-1813 Russian Campaign.
http://www.math.yorku.ca/SCS/Gallery/minard/minard.txt
"""
# Author: Aric Hagberg ([email protected])
# Copyright (C) 2006-2016 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
import string
import networkx as nx
def minard_graph():
data1="""\
24.0,54.9,340000,A,1
24.5,55.0,340000,A,1
25.5,54.5,340000,A,1
26.0,54.7,320000,A,1
27.0,54.8,300000,A,1
28.0,54.9,280000,A,1
28.5,55.0,240000,A,1
29.0,55.1,210000,A,1
30.0,55.2,180000,A,1
30.3,55.3,175000,A,1
32.0,54.8,145000,A,1
33.2,54.9,140000,A,1
34.4,55.5,127100,A,1
35.5,55.4,100000,A,1
36.0,55.5,100000,A,1
37.6,55.8,100000,A,1
37.7,55.7,100000,R,1
37.5,55.7,98000,R,1
37.0,55.0,97000,R,1
36.8,55.0,96000,R,1
35.4,55.3,87000,R,1
34.3,55.2,55000,R,1
33.3,54.8,37000,R,1
32.0,54.6,24000,R,1
30.4,54.4,20000,R,1
29.2,54.3,20000,R,1
28.5,54.2,20000,R,1
28.3,54.3,20000,R,1
27.5,54.5,20000,R,1
26.8,54.3,12000,R,1
26.4,54.4,14000,R,1
25.0,54.4,8000,R,1
24.4,54.4,4000,R,1
24.2,54.4,4000,R,1
24.1,54.4,4000,R,1"""
data2="""\
24.0,55.1,60000,A,2
24.5,55.2,60000,A,2
25.5,54.7,60000,A,2
26.6,55.7,40000,A,2
27.4,55.6,33000,A,2
28.7,55.5,33000,R,2
29.2,54.2,30000,R,2
28.5,54.1,30000,R,2
28.3,54.2,28000,R,2"""
data3="""\
24.0,55.2,22000,A,3
24.5,55.3,22000,A,3
24.6,55.8,6000,A,3
24.6,55.8,6000,R,3
24.2,54.4,6000,R,3
24.1,54.4,6000,R,3"""
cities="""\
24.0,55.0,Kowno
25.3,54.7,Wilna
26.4,54.4,Smorgoni
26.8,54.3,Moiodexno
27.7,55.2,Gloubokoe
27.6,53.9,Minsk
28.5,54.3,Studienska
28.7,55.5,Polotzk
29.2,54.4,Bobr
30.2,55.3,Witebsk
30.4,54.5,Orscha
30.4,53.9,Mohilow
32.0,54.8,Smolensk
33.2,54.9,Dorogobouge
34.3,55.2,Wixma
34.4,55.5,Chjat
36.0,55.5,Mojaisk
37.6,55.8,Moscou
36.6,55.3,Tarantino
36.5,55.0,Malo-Jarosewii"""
c={}
for line in cities.split('\n'):
x,y,name=line.split(',')
c[name]=(float(x),float(y))
g=[]
for data in [data1,data2,data3]:
G=nx.Graph()
i=0
G.pos={} # location
G.pop={} # size
last=None
for line in data.split('\n'):
x,y,p,r,n=line.split(',')
G.pos[i]=(float(x),float(y))
G.pop[i]=int(p)
if last is None:
last=i
else:
G.add_edge(i,last,{r:int(n)})
last=i
i=i+1
g.append(G)
return g,c
if __name__ == "__main__":
(g,city)=minard_graph()
try:
import matplotlib.pyplot as plt
plt.figure(1,figsize=(11,5))
plt.clf()
colors=['b','g','r']
for G in g:
c=colors.pop(0)
node_size=[int(G.pop[n]/300.0) for n in G]
nx.draw_networkx_edges(G,G.pos,edge_color=c,width=4,alpha=0.5)
nx.draw_networkx_nodes(G,G.pos,node_size=node_size,node_color=c,alpha=0.5)
nx.draw_networkx_nodes(G,G.pos,node_size=5,node_color='k')
for c in city:
x,y=city[c]
plt.text(x,y+0.1,c)
plt.savefig("napoleon_russian_campaign.png")
except ImportError:
pass
|
agpl-3.0
|
flaviovdf/tribeflow
|
scripts/paper-data/plot_figure_mem.py
|
2
|
1653
|
#-*- coding: utf8
from __future__ import division, print_function
import matplotlib
matplotlib.use('Agg')
from matplotlib import rc
import matplotlib.pyplot as plt
import pandas as pd
def initialize_matplotlib():
inches_per_pt = 1.0 / 72.27
fig_width = 240 * inches_per_pt # width in inches
fig_height = 160 * inches_per_pt #.4 * fig_width
rc('axes', labelsize=8)
rc('axes', titlesize=8)
rc('axes', unicode_minus=False)
rc('axes', grid=False)
rc('figure', figsize=(fig_width, fig_height))
rc('grid', linestyle=':')
rc('font', family='serif')
rc('legend', fontsize=8)
rc('lines', linewidth=.7)
rc('ps', usedistiller='xpdf')
rc('text', usetex=True)
rc('xtick', labelsize=8)
rc('ytick', labelsize=8)
initialize_matplotlib()
df = pd.read_excel('results_for_figure1.xlsx', sheetname='Figure5')
colors = {
'LFM-1k':'go-',
'LFM-G':'ms-',
'Bkite':'y*-',
'FourSQ':'bD-',
'Yoo':'rH-'
}
for dset in colors:
idx = (df['Dataset'] == dset)
x_ax = df[idx]['MEM']
y_ax = df[idx]['MRR']
plt.plot(x_ax, y_ax, colors[dset], alpha=.5, markersize=5, label=dset)
ax = plt.gca()
ax.tick_params(direction='out', pad=0.3)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
plt.ylim((0.1, 0.6))
plt.xlim((0, 6))
plt.minorticks_off()
plt.ylabel('Mean Reciprocal Rank (MRR)', labelpad=0)
plt.xlabel('Burst size', labelpad=0)
plt.tight_layout(pad=0.2)
plt.legend(loc='center right', frameon=False, ncol=3)
plt.savefig('burst.pdf')
|
bsd-3-clause
|
saiwing-yeung/scikit-learn
|
sklearn/metrics/tests/test_ranking.py
|
32
|
41905
|
from __future__ import division, print_function
import numpy as np
from itertools import product
import warnings
from scipy.sparse import csr_matrix
from sklearn import datasets
from sklearn import svm
from sklearn import ensemble
from sklearn.datasets import make_multilabel_classification
from sklearn.random_projection import sparse_random_matrix
from sklearn.utils.validation import check_array, check_consistent_length
from sklearn.utils.validation import check_random_state
from sklearn.utils.testing import assert_raises, clean_warning_registry
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.metrics import auc
from sklearn.metrics import average_precision_score
from sklearn.metrics import coverage_error
from sklearn.metrics import label_ranking_average_precision_score
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import label_ranking_loss
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
from sklearn.exceptions import UndefinedMetricWarning
###############################################################################
# Utilities for testing
def make_prediction(dataset=None, binary=False):
"""Make some classification predictions on a toy dataset using a SVC
If binary is True restrict to a binary classification problem instead of a
multiclass classification problem
"""
if dataset is None:
# import some data to play with
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
if binary:
# restrict to a binary classification task
X, y = X[y < 2], y[y < 2]
n_samples, n_features = X.shape
p = np.arange(n_samples)
rng = check_random_state(37)
rng.shuffle(p)
X, y = X[p], y[p]
half = int(n_samples / 2)
# add noisy features to make the problem harder and avoid perfect results
rng = np.random.RandomState(0)
X = np.c_[X, rng.randn(n_samples, 200 * n_features)]
# run classifier, get class probabilities and label predictions
clf = svm.SVC(kernel='linear', probability=True, random_state=0)
probas_pred = clf.fit(X[:half], y[:half]).predict_proba(X[half:])
if binary:
# only interested in probabilities of the positive case
# XXX: do we really want a special API for the binary case?
probas_pred = probas_pred[:, 1]
y_pred = clf.predict(X[half:])
y_true = y[half:]
return y_true, y_pred, probas_pred
###############################################################################
# Tests
def _auc(y_true, y_score):
"""Alternative implementation to check for correctness of
`roc_auc_score`."""
pos_label = np.unique(y_true)[1]
# Count the number of times positive samples are correctly ranked above
# negative samples.
pos = y_score[y_true == pos_label]
neg = y_score[y_true != pos_label]
diff_matrix = pos.reshape(1, -1) - neg.reshape(-1, 1)
n_correct = np.sum(diff_matrix > 0)
return n_correct / float(len(pos) * len(neg))
def _average_precision(y_true, y_score):
"""Alternative implementation to check for correctness of
`average_precision_score`."""
pos_label = np.unique(y_true)[1]
n_pos = np.sum(y_true == pos_label)
order = np.argsort(y_score)[::-1]
y_score = y_score[order]
y_true = y_true[order]
score = 0
for i in range(len(y_score)):
if y_true[i] == pos_label:
# Compute precision up to document i
# i.e, percentage of relevant documents up to document i.
prec = 0
for j in range(0, i + 1):
if y_true[j] == pos_label:
prec += 1.0
prec /= (i + 1.0)
score += prec
return score / n_pos
def test_roc_curve():
# Test Area under Receiver Operating Characteristic (ROC) curve
y_true, _, probas_pred = make_prediction(binary=True)
expected_auc = _auc(y_true, probas_pred)
for drop in [True, False]:
fpr, tpr, thresholds = roc_curve(y_true, probas_pred,
drop_intermediate=drop)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, expected_auc, decimal=2)
assert_almost_equal(roc_auc, roc_auc_score(y_true, probas_pred))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_end_points():
# Make sure that roc_curve returns a curve start at 0 and ending and
# 1 even in corner cases
rng = np.random.RandomState(0)
y_true = np.array([0] * 50 + [1] * 50)
y_pred = rng.randint(3, size=100)
fpr, tpr, thr = roc_curve(y_true, y_pred, drop_intermediate=True)
assert_equal(fpr[0], 0)
assert_equal(fpr[-1], 1)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thr.shape)
def test_roc_returns_consistency():
# Test whether the returned threshold matches up with tpr
# make small toy dataset
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred)
# use the given thresholds to determine the tpr
tpr_correct = []
for t in thresholds:
tp = np.sum((probas_pred >= t) & y_true)
p = np.sum(y_true)
tpr_correct.append(1.0 * tp / p)
# compare tpr and tpr_correct to see if the thresholds' order was correct
assert_array_almost_equal(tpr, tpr_correct, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_nonrepeating_thresholds():
# Test to ensure that we don't return spurious repeating thresholds.
# Duplicated thresholds can arise due to machine precision issues.
dataset = datasets.load_digits()
X = dataset['data']
y = dataset['target']
# This random forest classifier can only return probabilities
# significant to two decimal places
clf = ensemble.RandomForestClassifier(n_estimators=100, random_state=0)
# How well can the classifier predict whether a digit is less than 5?
# This task contributes floating point roundoff errors to the probabilities
train, test = slice(None, None, 2), slice(1, None, 2)
probas_pred = clf.fit(X[train], y[train]).predict_proba(X[test])
y_score = probas_pred[:, :5].sum(axis=1) # roundoff errors begin here
y_true = [yy < 5 for yy in y[test]]
# Check for repeating values in the thresholds
fpr, tpr, thresholds = roc_curve(y_true, y_score, drop_intermediate=False)
assert_equal(thresholds.size, np.unique(np.round(thresholds, 2)).size)
def test_roc_curve_multi():
# roc_curve not applicable for multi-class problems
y_true, _, probas_pred = make_prediction(binary=False)
assert_raises(ValueError, roc_curve, y_true, probas_pred)
def test_roc_curve_confidence():
# roc_curve for confidence scores
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred - 0.5)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.90, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_hard():
# roc_curve for hard decisions
y_true, pred, probas_pred = make_prediction(binary=True)
# always predict one
trivial_pred = np.ones(y_true.shape)
fpr, tpr, thresholds = roc_curve(y_true, trivial_pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.50, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# always predict zero
trivial_pred = np.zeros(y_true.shape)
fpr, tpr, thresholds = roc_curve(y_true, trivial_pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.50, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# hard decisions
fpr, tpr, thresholds = roc_curve(y_true, pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.78, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_one_label():
y_true = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
y_pred = [0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
# assert there are warnings
w = UndefinedMetricWarning
fpr, tpr, thresholds = assert_warns(w, roc_curve, y_true, y_pred)
# all true labels, all fpr should be nan
assert_array_equal(fpr,
np.nan * np.ones(len(thresholds)))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# assert there are warnings
fpr, tpr, thresholds = assert_warns(w, roc_curve,
[1 - x for x in y_true],
y_pred)
# all negative labels, all tpr should be nan
assert_array_equal(tpr,
np.nan * np.ones(len(thresholds)))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_toydata():
# Binary classification
y_true = [0, 1]
y_score = [0, 1]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [1, 1])
assert_almost_equal(roc_auc, 1.)
y_true = [0, 1]
y_score = [1, 0]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1, 1])
assert_array_almost_equal(fpr, [0, 0, 1])
assert_almost_equal(roc_auc, 0.)
y_true = [1, 0]
y_score = [1, 1]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [0, 1])
assert_almost_equal(roc_auc, 0.5)
y_true = [1, 0]
y_score = [1, 0]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [1, 1])
assert_almost_equal(roc_auc, 1.)
y_true = [1, 0]
y_score = [0.5, 0.5]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [0, 1])
assert_almost_equal(roc_auc, .5)
y_true = [0, 0]
y_score = [0.25, 0.75]
# assert UndefinedMetricWarning because of no positive sample in y_true
tpr, fpr, _ = assert_warns(UndefinedMetricWarning, roc_curve, y_true, y_score)
assert_raises(ValueError, roc_auc_score, y_true, y_score)
assert_array_almost_equal(tpr, [0., 0.5, 1.])
assert_array_almost_equal(fpr, [np.nan, np.nan, np.nan])
y_true = [1, 1]
y_score = [0.25, 0.75]
# assert UndefinedMetricWarning because of no negative sample in y_true
tpr, fpr, _ = assert_warns(UndefinedMetricWarning, roc_curve, y_true, y_score)
assert_raises(ValueError, roc_auc_score, y_true, y_score)
assert_array_almost_equal(tpr, [np.nan, np.nan])
assert_array_almost_equal(fpr, [0.5, 1.])
# Multi-label classification task
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [0, 1]])
assert_raises(ValueError, roc_auc_score, y_true, y_score, average="macro")
assert_raises(ValueError, roc_auc_score, y_true, y_score,
average="weighted")
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 1.)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 1.)
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_raises(ValueError, roc_auc_score, y_true, y_score, average="macro")
assert_raises(ValueError, roc_auc_score, y_true, y_score,
average="weighted")
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 0.5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 0.5)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_almost_equal(roc_auc_score(y_true, y_score, average="macro"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="weighted"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 0)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0.5, 0.5], [0.5, 0.5]])
assert_almost_equal(roc_auc_score(y_true, y_score, average="macro"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="weighted"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), .5)
def test_roc_curve_drop_intermediate():
# Test that drop_intermediate drops the correct thresholds
y_true = [0, 0, 0, 0, 1, 1]
y_score = [0., 0.2, 0.5, 0.6, 0.7, 1.0]
tpr, fpr, thresholds = roc_curve(y_true, y_score, drop_intermediate=True)
assert_array_almost_equal(thresholds, [1., 0.7, 0.])
# Test dropping thresholds with repeating scores
y_true = [0, 0, 0, 0, 0, 0, 0,
1, 1, 1, 1, 1, 1]
y_score = [0., 0.1, 0.6, 0.6, 0.7, 0.8, 0.9,
0.6, 0.7, 0.8, 0.9, 0.9, 1.0]
tpr, fpr, thresholds = roc_curve(y_true, y_score, drop_intermediate=True)
assert_array_almost_equal(thresholds,
[1.0, 0.9, 0.7, 0.6, 0.])
def test_auc():
# Test Area Under Curve (AUC) computation
x = [0, 1]
y = [0, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [1, 0]
y = [0, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [1, 0, 0]
y = [0, 1, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [0, 1]
y = [1, 1]
assert_array_almost_equal(auc(x, y), 1)
x = [0, 0.5, 1]
y = [0, 0.5, 1]
assert_array_almost_equal(auc(x, y), 0.5)
def test_auc_duplicate_values():
# Test Area Under Curve (AUC) computation with duplicate values
# auc() was previously sorting the x and y arrays according to the indices
# from numpy.argsort(x), which was reordering the tied 0's in this example
# and resulting in an incorrect area computation. This test detects the
# error.
x = [-2.0, 0.0, 0.0, 0.0, 1.0]
y1 = [2.0, 0.0, 0.5, 1.0, 1.0]
y2 = [2.0, 1.0, 0.0, 0.5, 1.0]
y3 = [2.0, 1.0, 0.5, 0.0, 1.0]
for y in (y1, y2, y3):
assert_array_almost_equal(auc(x, y, reorder=True), 3.0)
def test_auc_errors():
# Incompatible shapes
assert_raises(ValueError, auc, [0.0, 0.5, 1.0], [0.1, 0.2])
# Too few x values
assert_raises(ValueError, auc, [0.0], [0.1])
# x is not in order
assert_raises(ValueError, auc, [1.0, 0.0, 0.5], [0.0, 0.0, 0.0])
def test_auc_score_non_binary_class():
# Test that roc_auc_score function returns an error when trying
# to compute AUC for non-binary class values.
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains only one class value
y_true = np.zeros(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = -np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
roc_auc_score, y_true, y_pred)
clean_warning_registry()
with warnings.catch_warnings(record=True):
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains only one class value
y_true = np.zeros(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = -np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
roc_auc_score, y_true, y_pred)
def test_precision_recall_curve():
y_true, _, probas_pred = make_prediction(binary=True)
_test_precision_recall_curve(y_true, probas_pred)
# Use {-1, 1} for labels; make sure original labels aren't modified
y_true[np.where(y_true == 0)] = -1
y_true_copy = y_true.copy()
_test_precision_recall_curve(y_true, probas_pred)
assert_array_equal(y_true_copy, y_true)
labels = [1, 0, 0, 1]
predict_probas = [1, 2, 3, 4]
p, r, t = precision_recall_curve(labels, predict_probas)
assert_array_almost_equal(p, np.array([0.5, 0.33333333, 0.5, 1., 1.]))
assert_array_almost_equal(r, np.array([1., 0.5, 0.5, 0.5, 0.]))
assert_array_almost_equal(t, np.array([1, 2, 3, 4]))
assert_equal(p.size, r.size)
assert_equal(p.size, t.size + 1)
def test_precision_recall_curve_pos_label():
y_true, _, probas_pred = make_prediction(binary=False)
pos_label = 2
p, r, thresholds = precision_recall_curve(y_true,
probas_pred[:, pos_label],
pos_label=pos_label)
p2, r2, thresholds2 = precision_recall_curve(y_true == pos_label,
probas_pred[:, pos_label])
assert_array_almost_equal(p, p2)
assert_array_almost_equal(r, r2)
assert_array_almost_equal(thresholds, thresholds2)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
def _test_precision_recall_curve(y_true, probas_pred):
# Test Precision-Recall and aread under PR curve
p, r, thresholds = precision_recall_curve(y_true, probas_pred)
precision_recall_auc = auc(r, p)
assert_array_almost_equal(precision_recall_auc, 0.85, 2)
assert_array_almost_equal(precision_recall_auc,
average_precision_score(y_true, probas_pred))
assert_almost_equal(_average_precision(y_true, probas_pred),
precision_recall_auc, 1)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
# Smoke test in the case of proba having only one value
p, r, thresholds = precision_recall_curve(y_true,
np.zeros_like(probas_pred))
precision_recall_auc = auc(r, p)
assert_array_almost_equal(precision_recall_auc, 0.75, 3)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
def test_precision_recall_curve_errors():
# Contains non-binary labels
assert_raises(ValueError, precision_recall_curve,
[0, 1, 2], [[0.0], [1.0], [1.0]])
def test_precision_recall_curve_toydata():
with np.errstate(all="raise"):
# Binary classification
y_true = [0, 1]
y_score = [0, 1]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [1, 1])
assert_array_almost_equal(r, [1, 0])
assert_almost_equal(auc_prc, 1.)
y_true = [0, 1]
y_score = [1, 0]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 0., 1.])
assert_array_almost_equal(r, [1., 0., 0.])
assert_almost_equal(auc_prc, 0.25)
y_true = [1, 0]
y_score = [1, 1]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 1])
assert_array_almost_equal(r, [1., 0])
assert_almost_equal(auc_prc, .75)
y_true = [1, 0]
y_score = [1, 0]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [1, 1])
assert_array_almost_equal(r, [1, 0])
assert_almost_equal(auc_prc, 1.)
y_true = [1, 0]
y_score = [0.5, 0.5]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 1])
assert_array_almost_equal(r, [1, 0.])
assert_almost_equal(auc_prc, .75)
y_true = [0, 0]
y_score = [0.25, 0.75]
assert_raises(Exception, precision_recall_curve, y_true, y_score)
assert_raises(Exception, average_precision_score, y_true, y_score)
y_true = [1, 1]
y_score = [0.25, 0.75]
p, r, _ = precision_recall_curve(y_true, y_score)
assert_almost_equal(average_precision_score(y_true, y_score), 1.)
assert_array_almost_equal(p, [1., 1., 1.])
assert_array_almost_equal(r, [1, 0.5, 0.])
# Multi-label classification task
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [0, 1]])
assert_raises(Exception, average_precision_score, y_true, y_score,
average="macro")
assert_raises(Exception, average_precision_score, y_true, y_score,
average="weighted")
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 1.)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 1.)
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_raises(Exception, average_precision_score, y_true, y_score,
average="macro")
assert_raises(Exception, average_precision_score, y_true, y_score,
average="weighted")
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.625)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.625)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_almost_equal(average_precision_score(y_true, y_score,
average="macro"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="weighted"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.25)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0.5, 0.5], [0.5, 0.5]])
assert_almost_equal(average_precision_score(y_true, y_score,
average="macro"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="weighted"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.75)
def test_score_scale_invariance():
# Test that average_precision_score and roc_auc_score are invariant by
# the scaling or shifting of probabilities
y_true, _, probas_pred = make_prediction(binary=True)
roc_auc = roc_auc_score(y_true, probas_pred)
roc_auc_scaled = roc_auc_score(y_true, 100 * probas_pred)
roc_auc_shifted = roc_auc_score(y_true, probas_pred - 10)
assert_equal(roc_auc, roc_auc_scaled)
assert_equal(roc_auc, roc_auc_shifted)
pr_auc = average_precision_score(y_true, probas_pred)
pr_auc_scaled = average_precision_score(y_true, 100 * probas_pred)
pr_auc_shifted = average_precision_score(y_true, probas_pred - 10)
assert_equal(pr_auc, pr_auc_scaled)
assert_equal(pr_auc, pr_auc_shifted)
def check_lrap_toy(lrap_score):
# Check on several small example that it works
assert_almost_equal(lrap_score([[0, 1]], [[0.25, 0.75]]), 1)
assert_almost_equal(lrap_score([[0, 1]], [[0.75, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[1, 1]], [[0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.25, 0.5, 0.75]]), 1 / 2)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.25, 0.5, 0.75]]), 1 / 3)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.25, 0.5, 0.75]]),
(2 / 3 + 1 / 1) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.25, 0.5, 0.75]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.75, 0.5, 0.25]]), 1 / 3)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.75, 0.5, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.75, 0.5, 0.25]]),
(1 / 2 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.75, 0.5, 0.25]]),
(1 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.5, 0.75, 0.25]]), 1 / 3)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.5, 0.75, 0.25]]),
(1 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.5, 0.75, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.5, 0.75, 0.25]]),
(1 / 2 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.5, 0.75, 0.25]]), 1)
# Tie handling
assert_almost_equal(lrap_score([[1, 0]], [[0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1]], [[0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[1, 1]], [[0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.25, 0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.25, 0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 1 / 3)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.25, 0.5, 0.5]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.25, 0.5, 0.5]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.5, 0.5, 0.5]]), 2 / 3)
assert_almost_equal(lrap_score([[1, 1, 1, 0]], [[0.5, 0.5, 0.5, 0.5]]),
3 / 4)
def check_zero_or_all_relevant_labels(lrap_score):
random_state = check_random_state(0)
for n_labels in range(2, 5):
y_score = random_state.uniform(size=(1, n_labels))
y_score_ties = np.zeros_like(y_score)
# No relevant labels
y_true = np.zeros((1, n_labels))
assert_equal(lrap_score(y_true, y_score), 1.)
assert_equal(lrap_score(y_true, y_score_ties), 1.)
# Only relevant labels
y_true = np.ones((1, n_labels))
assert_equal(lrap_score(y_true, y_score), 1.)
assert_equal(lrap_score(y_true, y_score_ties), 1.)
# Degenerate case: only one label
assert_almost_equal(lrap_score([[1], [0], [1], [0]],
[[0.5], [0.5], [0.5], [0.5]]), 1.)
def check_lrap_error_raised(lrap_score):
# Raise value error if not appropriate format
assert_raises(ValueError, lrap_score,
[0, 1, 0], [0.25, 0.3, 0.2])
assert_raises(ValueError, lrap_score, [0, 1, 2],
[[0.25, 0.75, 0.0], [0.7, 0.3, 0.0], [0.8, 0.2, 0.0]])
assert_raises(ValueError, lrap_score, [(0), (1), (2)],
[[0.25, 0.75, 0.0], [0.7, 0.3, 0.0], [0.8, 0.2, 0.0]])
# Check that y_true.shape != y_score.shape raise the proper exception
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [0, 1])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0, 1]])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0], [1]])
assert_raises(ValueError, lrap_score, [[0, 1]], [[0, 1], [0, 1]])
assert_raises(ValueError, lrap_score, [[0], [1]], [[0, 1], [0, 1]])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0], [1]])
def check_lrap_only_ties(lrap_score):
# Check tie handling in score
# Basic check with only ties and increasing label space
for n_labels in range(2, 10):
y_score = np.ones((1, n_labels))
# Check for growing number of consecutive relevant
for n_relevant in range(1, n_labels):
# Check for a bunch of positions
for pos in range(n_labels - n_relevant):
y_true = np.zeros((1, n_labels))
y_true[0, pos:pos + n_relevant] = 1
assert_almost_equal(lrap_score(y_true, y_score),
n_relevant / n_labels)
def check_lrap_without_tie_and_increasing_score(lrap_score):
# Check that Label ranking average precision works for various
# Basic check with increasing label space size and decreasing score
for n_labels in range(2, 10):
y_score = n_labels - (np.arange(n_labels).reshape((1, n_labels)) + 1)
# First and last
y_true = np.zeros((1, n_labels))
y_true[0, 0] = 1
y_true[0, -1] = 1
assert_almost_equal(lrap_score(y_true, y_score),
(2 / n_labels + 1) / 2)
# Check for growing number of consecutive relevant label
for n_relevant in range(1, n_labels):
# Check for a bunch of position
for pos in range(n_labels - n_relevant):
y_true = np.zeros((1, n_labels))
y_true[0, pos:pos + n_relevant] = 1
assert_almost_equal(lrap_score(y_true, y_score),
sum((r + 1) / ((pos + r + 1) * n_relevant)
for r in range(n_relevant)))
def _my_lrap(y_true, y_score):
"""Simple implementation of label ranking average precision"""
check_consistent_length(y_true, y_score)
y_true = check_array(y_true)
y_score = check_array(y_score)
n_samples, n_labels = y_true.shape
score = np.empty((n_samples, ))
for i in range(n_samples):
# The best rank correspond to 1. Rank higher than 1 are worse.
# The best inverse ranking correspond to n_labels.
unique_rank, inv_rank = np.unique(y_score[i], return_inverse=True)
n_ranks = unique_rank.size
rank = n_ranks - inv_rank
# Rank need to be corrected to take into account ties
# ex: rank 1 ex aequo means that both label are rank 2.
corr_rank = np.bincount(rank, minlength=n_ranks + 1).cumsum()
rank = corr_rank[rank]
relevant = y_true[i].nonzero()[0]
if relevant.size == 0 or relevant.size == n_labels:
score[i] = 1
continue
score[i] = 0.
for label in relevant:
# Let's count the number of relevant label with better rank
# (smaller rank).
n_ranked_above = sum(rank[r] <= rank[label] for r in relevant)
# Weight by the rank of the actual label
score[i] += n_ranked_above / rank[label]
score[i] /= relevant.size
return score.mean()
def check_alternative_lrap_implementation(lrap_score, n_classes=5,
n_samples=20, random_state=0):
_, y_true = make_multilabel_classification(n_features=1,
allow_unlabeled=False,
random_state=random_state,
n_classes=n_classes,
n_samples=n_samples)
# Score with ties
y_score = sparse_random_matrix(n_components=y_true.shape[0],
n_features=y_true.shape[1],
random_state=random_state)
if hasattr(y_score, "toarray"):
y_score = y_score.toarray()
score_lrap = label_ranking_average_precision_score(y_true, y_score)
score_my_lrap = _my_lrap(y_true, y_score)
assert_almost_equal(score_lrap, score_my_lrap)
# Uniform score
random_state = check_random_state(random_state)
y_score = random_state.uniform(size=(n_samples, n_classes))
score_lrap = label_ranking_average_precision_score(y_true, y_score)
score_my_lrap = _my_lrap(y_true, y_score)
assert_almost_equal(score_lrap, score_my_lrap)
def test_label_ranking_avp():
for fn in [label_ranking_average_precision_score, _my_lrap]:
yield check_lrap_toy, fn
yield check_lrap_without_tie_and_increasing_score, fn
yield check_lrap_only_ties, fn
yield check_zero_or_all_relevant_labels, fn
yield check_lrap_error_raised, label_ranking_average_precision_score
for n_samples, n_classes, random_state in product((1, 2, 8, 20),
(2, 5, 10),
range(1)):
yield (check_alternative_lrap_implementation,
label_ranking_average_precision_score,
n_classes, n_samples, random_state)
def test_coverage_error():
# Toy case
assert_almost_equal(coverage_error([[0, 1]], [[0.25, 0.75]]), 1)
assert_almost_equal(coverage_error([[0, 1]], [[0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1]], [[0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[0, 0]], [[0.75, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.25, 0.5, 0.75]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.25, 0.5, 0.75]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.25, 0.5, 0.75]]), 2)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.75, 0.5, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.75, 0.5, 0.25]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.75, 0.5, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.5, 0.75, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.5, 0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.5, 0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.5, 0.75, 0.25]]), 3)
# Non trival case
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0]],
[[0.1, 10., -3], [0, 1, 3]]),
(1 + 3) / 2.)
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [0, 1, 3], [0, 2, 0]]),
(1 + 3 + 3) / 3.)
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [3, 1, 3], [0, 2, 0]]),
(1 + 3 + 3) / 3.)
def test_coverage_tie_handling():
assert_almost_equal(coverage_error([[0, 0]], [[0.5, 0.5]]), 0)
assert_almost_equal(coverage_error([[1, 0]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[1, 1]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.25, 0.5, 0.5]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 3)
def test_label_ranking_loss():
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.25, 0.75]]), 0)
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.75, 0.25]]), 1)
assert_almost_equal(label_ranking_loss([[0, 0, 1]], [[0.25, 0.5, 0.75]]),
0)
assert_almost_equal(label_ranking_loss([[0, 1, 0]], [[0.25, 0.5, 0.75]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 1]], [[0.25, 0.5, 0.75]]),
0)
assert_almost_equal(label_ranking_loss([[1, 0, 0]], [[0.25, 0.5, 0.75]]),
2 / 2)
assert_almost_equal(label_ranking_loss([[1, 0, 1]], [[0.25, 0.5, 0.75]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[1, 1, 0]], [[0.25, 0.5, 0.75]]),
2 / 2)
# Undefined metrics - the ranking doesn't matter
assert_almost_equal(label_ranking_loss([[0, 0]], [[0.75, 0.25]]), 0)
assert_almost_equal(label_ranking_loss([[1, 1]], [[0.75, 0.25]]), 0)
assert_almost_equal(label_ranking_loss([[0, 0]], [[0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[1, 1]], [[0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[0, 0, 0]], [[0.5, 0.75, 0.25]]),
0)
assert_almost_equal(label_ranking_loss([[1, 1, 1]], [[0.5, 0.75, 0.25]]),
0)
assert_almost_equal(label_ranking_loss([[0, 0, 0]], [[0.25, 0.5, 0.5]]),
0)
assert_almost_equal(label_ranking_loss([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 0)
# Non trival case
assert_almost_equal(label_ranking_loss([[0, 1, 0], [1, 1, 0]],
[[0.1, 10., -3], [0, 1, 3]]),
(0 + 2 / 2) / 2.)
assert_almost_equal(label_ranking_loss(
[[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [0, 1, 3], [0, 2, 0]]),
(0 + 2 / 2 + 1 / 2) / 3.)
assert_almost_equal(label_ranking_loss(
[[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [3, 1, 3], [0, 2, 0]]),
(0 + 2 / 2 + 1 / 2) / 3.)
# Sparse csr matrices
assert_almost_equal(label_ranking_loss(
csr_matrix(np.array([[0, 1, 0], [1, 1, 0]])),
[[0.1, 10, -3], [3, 1, 3]]),
(0 + 2 / 2) / 2.)
def test_ranking_appropriate_input_shape():
# Check that y_true.shape != y_score.shape raise the proper exception
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [0, 1])
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [[0, 1]])
assert_raises(ValueError, label_ranking_loss,
[[0, 1], [0, 1]], [[0], [1]])
assert_raises(ValueError, label_ranking_loss, [[0, 1]], [[0, 1], [0, 1]])
assert_raises(ValueError, label_ranking_loss,
[[0], [1]], [[0, 1], [0, 1]])
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [[0], [1]])
def test_ranking_loss_ties_handling():
# Tie handling
assert_almost_equal(label_ranking_loss([[1, 0]], [[0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[0, 0, 1]], [[0.25, 0.5, 0.5]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 0]], [[0.25, 0.5, 0.5]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[1, 0, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[1, 1, 0]], [[0.25, 0.5, 0.5]]), 1)
|
bsd-3-clause
|
jian-li/rpg_svo
|
svo_analysis/src/svo_analysis/analyse_logs.py
|
17
|
3497
|
#!/usr/bin/python
import os
import yaml
import numpy as np
import matplotlib.pyplot as plt
def analyse_logs(D, trace_dir):
# identify measurements which result from normal frames and which from keyframes
is_kf = np.argwhere( (D['dropout'] == 1) & (D['repr_n_mps'] >= 0))
is_frame = np.argwhere(D['repr_n_mps'] >= 0)
is_nokf = np.argwhere( (D['dropout'] == 0) & (D['repr_n_mps'] >= 0))
# set initial time to zero
D['timestamp'] = D['timestamp'] - D['timestamp'][0]
# ----------------------------------------------------------------------------
# plot number of reprojected points
mean_n_reproj_points = np.mean(D['repr_n_mps'][is_frame]);
mean_n_reproj_matches = np.mean(D['repr_n_new_references'][is_frame]);
mean_n_edges_final = np.mean(D['sfba_n_edges_final'][is_frame]);
fig = plt.figure(figsize=(8,3))
ax = fig.add_subplot(111, xlabel='time [s]')
ax.plot(D['timestamp'][is_frame], D['repr_n_mps'][is_frame], 'r-',
label='Reprojected Points, avg = %.2f'%mean_n_reproj_points)
ax.plot(D['timestamp'][is_frame], D['repr_n_new_references'][is_frame], 'b-',
label='Feature Matches, avg = %.2f'%mean_n_reproj_matches)
ax.plot(D['timestamp'][is_frame], D['sfba_n_edges_final'][is_frame], 'g-',
label='Points after Optimization, avg = %.2f'%mean_n_edges_final)
ax.set_ylim(bottom=0)
ax.legend(loc='lower right')
fig.tight_layout()
fig.savefig(os.path.join(trace_dir,'num_reprojected.pdf'), bbox_inches="tight")
# ----------------------------------------------------------------------------
# plot median error before and after pose-optimzation and bundle adjustment
init_error_avg = np.mean(D['sfba_error_init'][is_frame])
opt1_avg = np.mean(D['sfba_error_final'][is_frame])
fig = plt.figure(figsize=(8,2))
ax = fig.add_subplot(111, xlabel='time [s]', ylabel='error [px]')
ax.plot(D['timestamp'][is_frame], D['sfba_error_init'][is_frame], 'r-', label='Initial error')
ax.plot(D['timestamp'][is_frame], D['sfba_error_final'][is_frame], 'b-', label='Final error')
ax.legend(ncol=2)
fig.tight_layout()
fig.savefig(os.path.join(trace_dir,'reprojection_error.pdf'), bbox_inches="tight")
print 'average reprojection error improvement: ' + str(init_error_avg - opt1_avg)
# ----------------------------------------------------------------------------
# plot number of candidate points
fig = plt.figure(figsize=(8,3))
ax = fig.add_subplot(111, xlabel='time [s]')
ax.plot(D['timestamp'][is_frame], D['n_candidates'][is_frame], 'r-', label='Candidate Points')
fig.tight_layout()
fig.savefig(os.path.join(trace_dir,'candidate_points.pdf'), bbox_inches="tight")
# ----------------------------------------------------------------------------
# plot number of candidate points
fig = plt.figure(figsize=(8,2))
ax = fig.add_subplot(111, xlabel='time [s]', ylabel='px')
ax.plot(D['timestamp'][is_frame], D['sfba_thresh'][is_frame], 'r-', label='Threshold')
fig.tight_layout()
fig.savefig(os.path.join(trace_dir,'optimization_thresh.pdf'), bbox_inches="tight")
# ----------------------------------------------------------------------------
# write other statistics to file
stat = {'num_frames': len(is_frame),
'num_kfs': len(is_kf),
'reproj_error_avg_improvement': float(init_error_avg - opt1_avg)}
with open(os.path.join(trace_dir,'dataset_stats.yaml'),'w') as outfile:
outfile.write(yaml.dump(stat, default_flow_style=False))
|
gpl-3.0
|
martinwicke/tensorflow
|
tensorflow/examples/learn/boston.py
|
25
|
1932
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of DNNRegressor for Housing dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn import cross_validation
from sklearn import metrics
from sklearn import preprocessing
import tensorflow as tf
from tensorflow.contrib import learn
def main(unused_argv):
# Load dataset
boston = learn.datasets.load_dataset('boston')
x, y = boston.data, boston.target
# Split dataset into train / test
x_train, x_test, y_train, y_test = cross_validation.train_test_split(
x, y, test_size=0.2, random_state=42)
# Scale data (training set) to 0 mean and unit standard deviation.
scaler = preprocessing.StandardScaler()
x_train = scaler.fit_transform(x_train)
# Build 2 layer fully connected DNN with 10, 10 units respectively.
feature_columns = learn.infer_real_valued_columns_from_input(x_train)
regressor = learn.DNNRegressor(
feature_columns=feature_columns, hidden_units=[10, 10])
# Fit
regressor.fit(x_train, y_train, steps=5000, batch_size=1)
# Predict and score
y_predicted = list(
regressor.predict(scaler.transform(x_test), as_iterable=True))
score = metrics.mean_squared_error(y_predicted, y_test)
print('MSE: {0:f}'.format(score))
if __name__ == '__main__':
tf.app.run()
|
apache-2.0
|
marcocaccin/scikit-learn
|
sklearn/feature_selection/rfe.py
|
6
|
17502
|
# Authors: Alexandre Gramfort <[email protected]>
# Vincent Michel <[email protected]>
# Gilles Louppe <[email protected]>
#
# License: BSD 3 clause
"""Recursive feature elimination for feature ranking"""
import warnings
import numpy as np
from ..utils import check_X_y, safe_sqr
from ..utils.metaestimators import if_delegate_has_method
from ..base import BaseEstimator
from ..base import MetaEstimatorMixin
from ..base import clone
from ..base import is_classifier
from ..cross_validation import check_cv
from ..cross_validation import _safe_split, _score
from ..metrics.scorer import check_scoring
from .base import SelectorMixin
class RFE(BaseEstimator, MetaEstimatorMixin, SelectorMixin):
"""Feature ranking with recursive feature elimination.
Given an external estimator that assigns weights to features (e.g., the
coefficients of a linear model), the goal of recursive feature elimination
(RFE) is to select features by recursively considering smaller and smaller
sets of features. First, the estimator is trained on the initial set of
features and weights are assigned to each one of them. Then, features whose
absolute weights are the smallest are pruned from the current set features.
That procedure is recursively repeated on the pruned set until the desired
number of features to select is eventually reached.
Read more in the :ref:`User Guide <rfe>`.
Parameters
----------
estimator : object
A supervised learning estimator with a `fit` method that updates a
`coef_` attribute that holds the fitted parameters. Important features
must correspond to high absolute values in the `coef_` array.
For instance, this is the case for most supervised learning
algorithms such as Support Vector Classifiers and Generalized
Linear Models from the `svm` and `linear_model` modules.
n_features_to_select : int or None (default=None)
The number of features to select. If `None`, half of the features
are selected.
step : int or float, optional (default=1)
If greater than or equal to 1, then `step` corresponds to the (integer)
number of features to remove at each iteration.
If within (0.0, 1.0), then `step` corresponds to the percentage
(rounded down) of features to remove at each iteration.
estimator_params : dict
Parameters for the external estimator.
This attribute is deprecated as of version 0.16 and will be removed in
0.18. Use estimator initialisation or set_params method instead.
verbose : int, default=0
Controls verbosity of output.
Attributes
----------
n_features_ : int
The number of selected features.
support_ : array of shape [n_features]
The mask of selected features.
ranking_ : array of shape [n_features]
The feature ranking, such that ``ranking_[i]`` corresponds to the
ranking position of the i-th feature. Selected (i.e., estimated
best) features are assigned rank 1.
estimator_ : object
The external estimator fit on the reduced dataset.
Examples
--------
The following example shows how to retrieve the 5 right informative
features in the Friedman #1 dataset.
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.feature_selection import RFE
>>> from sklearn.svm import SVR
>>> X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)
>>> estimator = SVR(kernel="linear")
>>> selector = RFE(estimator, 5, step=1)
>>> selector = selector.fit(X, y)
>>> selector.support_ # doctest: +NORMALIZE_WHITESPACE
array([ True, True, True, True, True,
False, False, False, False, False], dtype=bool)
>>> selector.ranking_
array([1, 1, 1, 1, 1, 6, 4, 3, 2, 5])
References
----------
.. [1] Guyon, I., Weston, J., Barnhill, S., & Vapnik, V., "Gene selection
for cancer classification using support vector machines",
Mach. Learn., 46(1-3), 389--422, 2002.
"""
def __init__(self, estimator, n_features_to_select=None, step=1,
estimator_params=None, verbose=0):
self.estimator = estimator
self.n_features_to_select = n_features_to_select
self.step = step
self.estimator_params = estimator_params
self.verbose = verbose
@property
def _estimator_type(self):
return self.estimator._estimator_type
def fit(self, X, y):
"""Fit the RFE model and then the underlying estimator on the selected
features.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
The training input samples.
y : array-like, shape = [n_samples]
The target values.
"""
return self._fit(X, y)
def _fit(self, X, y, step_score=None):
X, y = check_X_y(X, y, "csc")
# Initialization
n_features = X.shape[1]
if self.n_features_to_select is None:
n_features_to_select = n_features // 2
else:
n_features_to_select = self.n_features_to_select
if 0.0 < self.step < 1.0:
step = int(max(1, self.step * n_features))
else:
step = int(self.step)
if step <= 0:
raise ValueError("Step must be >0")
if self.estimator_params is not None:
warnings.warn("The parameter 'estimator_params' is deprecated as "
"of version 0.16 and will be removed in 0.18. The "
"parameter is no longer necessary because the value "
"is set via the estimator initialisation or "
"set_params method.", DeprecationWarning)
support_ = np.ones(n_features, dtype=np.bool)
ranking_ = np.ones(n_features, dtype=np.int)
if step_score:
self.scores_ = []
# Elimination
while np.sum(support_) > n_features_to_select:
# Remaining features
features = np.arange(n_features)[support_]
# Rank the remaining features
estimator = clone(self.estimator)
if self.estimator_params:
estimator.set_params(**self.estimator_params)
if self.verbose > 0:
print("Fitting estimator with %d features." % np.sum(support_))
estimator.fit(X[:, features], y)
# Get coefs
if hasattr(estimator, 'coef_'):
coefs = estimator.coef_
elif hasattr(estimator, 'feature_importances_'):
coefs = estimator.feature_importances_
else:
raise RuntimeError('The classifier does not expose '
'"coef_" or "feature_importances_" '
'attributes')
# Get ranks
if coefs.ndim > 1:
ranks = np.argsort(safe_sqr(coefs).sum(axis=0))
else:
ranks = np.argsort(safe_sqr(coefs))
# for sparse case ranks is matrix
ranks = np.ravel(ranks)
# Eliminate the worse features
threshold = min(step, np.sum(support_) - n_features_to_select)
# Compute step score on the previous selection iteration
# because 'estimator' must use features
# that have not been eliminated yet
if step_score:
self.scores_.append(step_score(estimator, features))
support_[features[ranks][:threshold]] = False
ranking_[np.logical_not(support_)] += 1
# Set final attributes
features = np.arange(n_features)[support_]
self.estimator_ = clone(self.estimator)
if self.estimator_params:
self.estimator_.set_params(**self.estimator_params)
self.estimator_.fit(X[:, features], y)
# Compute step score when only n_features_to_select features left
if step_score:
self.scores_.append(step_score(self.estimator_, features))
self.n_features_ = support_.sum()
self.support_ = support_
self.ranking_ = ranking_
return self
@if_delegate_has_method(delegate='estimator')
def predict(self, X):
"""Reduce X to the selected features and then predict using the
underlying estimator.
Parameters
----------
X : array of shape [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape [n_samples]
The predicted target values.
"""
return self.estimator_.predict(self.transform(X))
@if_delegate_has_method(delegate='estimator')
def score(self, X, y):
"""Reduce X to the selected features and then return the score of the
underlying estimator.
Parameters
----------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The target values.
"""
return self.estimator_.score(self.transform(X), y)
def _get_support_mask(self):
return self.support_
@if_delegate_has_method(delegate='estimator')
def decision_function(self, X):
return self.estimator_.decision_function(self.transform(X))
@if_delegate_has_method(delegate='estimator')
def predict_proba(self, X):
return self.estimator_.predict_proba(self.transform(X))
@if_delegate_has_method(delegate='estimator')
def predict_log_proba(self, X):
return self.estimator_.predict_log_proba(self.transform(X))
class RFECV(RFE, MetaEstimatorMixin):
"""Feature ranking with recursive feature elimination and cross-validated
selection of the best number of features.
Read more in the :ref:`User Guide <rfe>`.
Parameters
----------
estimator : object
A supervised learning estimator with a `fit` method that updates a
`coef_` attribute that holds the fitted parameters. Important features
must correspond to high absolute values in the `coef_` array.
For instance, this is the case for most supervised learning
algorithms such as Support Vector Classifiers and Generalized
Linear Models from the `svm` and `linear_model` modules.
step : int or float, optional (default=1)
If greater than or equal to 1, then `step` corresponds to the (integer)
number of features to remove at each iteration.
If within (0.0, 1.0), then `step` corresponds to the percentage
(rounded down) of features to remove at each iteration.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`StratifiedKFold` used. If the estimator is a classifier
or if ``y`` is neither binary nor multiclass, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
estimator_params : dict
Parameters for the external estimator.
This attribute is deprecated as of version 0.16 and will be removed in
0.18. Use estimator initialisation or set_params method instead.
verbose : int, default=0
Controls verbosity of output.
Attributes
----------
n_features_ : int
The number of selected features with cross-validation.
support_ : array of shape [n_features]
The mask of selected features.
ranking_ : array of shape [n_features]
The feature ranking, such that `ranking_[i]`
corresponds to the ranking
position of the i-th feature.
Selected (i.e., estimated best)
features are assigned rank 1.
grid_scores_ : array of shape [n_subsets_of_features]
The cross-validation scores such that
``grid_scores_[i]`` corresponds to
the CV score of the i-th subset of features.
estimator_ : object
The external estimator fit on the reduced dataset.
Notes
-----
The size of ``grid_scores_`` is equal to ceil((n_features - 1) / step) + 1,
where step is the number of features removed at each iteration.
Examples
--------
The following example shows how to retrieve the a-priori not known 5
informative features in the Friedman #1 dataset.
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.feature_selection import RFECV
>>> from sklearn.svm import SVR
>>> X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)
>>> estimator = SVR(kernel="linear")
>>> selector = RFECV(estimator, step=1, cv=5)
>>> selector = selector.fit(X, y)
>>> selector.support_ # doctest: +NORMALIZE_WHITESPACE
array([ True, True, True, True, True,
False, False, False, False, False], dtype=bool)
>>> selector.ranking_
array([1, 1, 1, 1, 1, 6, 4, 3, 2, 5])
References
----------
.. [1] Guyon, I., Weston, J., Barnhill, S., & Vapnik, V., "Gene selection
for cancer classification using support vector machines",
Mach. Learn., 46(1-3), 389--422, 2002.
"""
def __init__(self, estimator, step=1, cv=None, scoring=None,
estimator_params=None, verbose=0):
self.estimator = estimator
self.step = step
self.cv = cv
self.scoring = scoring
self.estimator_params = estimator_params
self.verbose = verbose
def fit(self, X, y):
"""Fit the RFE model and automatically tune the number of selected
features.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where `n_samples` is the number of samples and
`n_features` is the total number of features.
y : array-like, shape = [n_samples]
Target values (integers for classification, real numbers for
regression).
"""
X, y = check_X_y(X, y, "csr")
if self.estimator_params is not None:
warnings.warn("The parameter 'estimator_params' is deprecated as "
"of version 0.16 and will be removed in 0.18. "
"The parameter is no longer necessary because the "
"value is set via the estimator initialisation or "
"set_params method.", DeprecationWarning)
# Initialization
cv = check_cv(self.cv, X, y, is_classifier(self.estimator))
scorer = check_scoring(self.estimator, scoring=self.scoring)
n_features = X.shape[1]
n_features_to_select = 1
# Determine the number of subsets of features
scores = []
# Cross-validation
for n, (train, test) in enumerate(cv):
X_train, y_train = _safe_split(self.estimator, X, y, train)
X_test, y_test = _safe_split(self.estimator, X, y, test, train)
rfe = RFE(estimator=self.estimator,
n_features_to_select=n_features_to_select,
step=self.step, estimator_params=self.estimator_params,
verbose=self.verbose - 1)
rfe._fit(X_train, y_train, lambda estimator, features:
_score(estimator, X_test[:, features], y_test, scorer))
scores.append(np.array(rfe.scores_[::-1]).reshape(1, -1))
scores = np.sum(np.concatenate(scores, 0), 0)
# The index in 'scores' when 'n_features' features are selected
n_feature_index = np.ceil((n_features - n_features_to_select) /
float(self.step))
n_features_to_select = max(n_features_to_select,
n_features - ((n_feature_index -
np.argmax(scores)) *
self.step))
# Re-execute an elimination with best_k over the whole set
rfe = RFE(estimator=self.estimator,
n_features_to_select=n_features_to_select,
step=self.step, estimator_params=self.estimator_params)
rfe.fit(X, y)
# Set final attributes
self.support_ = rfe.support_
self.n_features_ = rfe.n_features_
self.ranking_ = rfe.ranking_
self.estimator_ = clone(self.estimator)
if self.estimator_params:
self.estimator_.set_params(**self.estimator_params)
self.estimator_.fit(self.transform(X), y)
# Fixing a normalization error, n is equal to len(cv) - 1
# here, the scores are normalized by len(cv)
self.grid_scores_ = scores / len(cv)
return self
|
bsd-3-clause
|
rs2/pandas
|
pandas/tests/arrays/categorical/test_dtypes.py
|
1
|
6692
|
import numpy as np
import pytest
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas import Categorical, CategoricalIndex, Index, Series, Timestamp
import pandas._testing as tm
class TestCategoricalDtypes:
def test_is_equal_dtype(self):
# test dtype comparisons between cats
c1 = Categorical(list("aabca"), categories=list("abc"), ordered=False)
c2 = Categorical(list("aabca"), categories=list("cab"), ordered=False)
c3 = Categorical(list("aabca"), categories=list("cab"), ordered=True)
assert c1.is_dtype_equal(c1)
assert c2.is_dtype_equal(c2)
assert c3.is_dtype_equal(c3)
assert c1.is_dtype_equal(c2)
assert not c1.is_dtype_equal(c3)
assert not c1.is_dtype_equal(Index(list("aabca")))
assert not c1.is_dtype_equal(c1.astype(object))
assert c1.is_dtype_equal(CategoricalIndex(c1))
assert c1.is_dtype_equal(CategoricalIndex(c1, categories=list("cab")))
assert not c1.is_dtype_equal(CategoricalIndex(c1, ordered=True))
# GH 16659
s1 = Series(c1)
s2 = Series(c2)
s3 = Series(c3)
assert c1.is_dtype_equal(s1)
assert c2.is_dtype_equal(s2)
assert c3.is_dtype_equal(s3)
assert c1.is_dtype_equal(s2)
assert not c1.is_dtype_equal(s3)
assert not c1.is_dtype_equal(s1.astype(object))
def test_set_dtype_same(self):
c = Categorical(["a", "b", "c"])
result = c._set_dtype(CategoricalDtype(["a", "b", "c"]))
tm.assert_categorical_equal(result, c)
def test_set_dtype_new_categories(self):
c = Categorical(["a", "b", "c"])
result = c._set_dtype(CategoricalDtype(list("abcd")))
tm.assert_numpy_array_equal(result.codes, c.codes)
tm.assert_index_equal(result.dtype.categories, Index(list("abcd")))
@pytest.mark.parametrize(
"values, categories, new_categories",
[
# No NaNs, same cats, same order
(["a", "b", "a"], ["a", "b"], ["a", "b"]),
# No NaNs, same cats, different order
(["a", "b", "a"], ["a", "b"], ["b", "a"]),
# Same, unsorted
(["b", "a", "a"], ["a", "b"], ["a", "b"]),
# No NaNs, same cats, different order
(["b", "a", "a"], ["a", "b"], ["b", "a"]),
# NaNs
(["a", "b", "c"], ["a", "b"], ["a", "b"]),
(["a", "b", "c"], ["a", "b"], ["b", "a"]),
(["b", "a", "c"], ["a", "b"], ["a", "b"]),
(["b", "a", "c"], ["a", "b"], ["a", "b"]),
# Introduce NaNs
(["a", "b", "c"], ["a", "b"], ["a"]),
(["a", "b", "c"], ["a", "b"], ["b"]),
(["b", "a", "c"], ["a", "b"], ["a"]),
(["b", "a", "c"], ["a", "b"], ["a"]),
# No overlap
(["a", "b", "c"], ["a", "b"], ["d", "e"]),
],
)
@pytest.mark.parametrize("ordered", [True, False])
def test_set_dtype_many(self, values, categories, new_categories, ordered):
c = Categorical(values, categories)
expected = Categorical(values, new_categories, ordered)
result = c._set_dtype(expected.dtype)
tm.assert_categorical_equal(result, expected)
def test_set_dtype_no_overlap(self):
c = Categorical(["a", "b", "c"], ["d", "e"])
result = c._set_dtype(CategoricalDtype(["a", "b"]))
expected = Categorical([None, None, None], categories=["a", "b"])
tm.assert_categorical_equal(result, expected)
def test_codes_dtypes(self):
# GH 8453
result = Categorical(["foo", "bar", "baz"])
assert result.codes.dtype == "int8"
result = Categorical([f"foo{i:05d}" for i in range(400)])
assert result.codes.dtype == "int16"
result = Categorical([f"foo{i:05d}" for i in range(40000)])
assert result.codes.dtype == "int32"
# adding cats
result = Categorical(["foo", "bar", "baz"])
assert result.codes.dtype == "int8"
result = result.add_categories([f"foo{i:05d}" for i in range(400)])
assert result.codes.dtype == "int16"
# removing cats
result = result.remove_categories([f"foo{i:05d}" for i in range(300)])
assert result.codes.dtype == "int8"
@pytest.mark.parametrize("ordered", [True, False])
def test_astype(self, ordered):
# string
cat = Categorical(list("abbaaccc"), ordered=ordered)
result = cat.astype(object)
expected = np.array(cat)
tm.assert_numpy_array_equal(result, expected)
msg = "could not convert string to float"
with pytest.raises(ValueError, match=msg):
cat.astype(float)
# numeric
cat = Categorical([0, 1, 2, 2, 1, 0, 1, 0, 2], ordered=ordered)
result = cat.astype(object)
expected = np.array(cat, dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = cat.astype(int)
expected = np.array(cat, dtype=int)
tm.assert_numpy_array_equal(result, expected)
result = cat.astype(float)
expected = np.array(cat, dtype=float)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("dtype_ordered", [True, False])
@pytest.mark.parametrize("cat_ordered", [True, False])
def test_astype_category(self, dtype_ordered, cat_ordered):
# GH 10696/18593
data = list("abcaacbab")
cat = Categorical(data, categories=list("bac"), ordered=cat_ordered)
# standard categories
dtype = CategoricalDtype(ordered=dtype_ordered)
result = cat.astype(dtype)
expected = Categorical(data, categories=cat.categories, ordered=dtype_ordered)
tm.assert_categorical_equal(result, expected)
# non-standard categories
dtype = CategoricalDtype(list("adc"), dtype_ordered)
result = cat.astype(dtype)
expected = Categorical(data, dtype=dtype)
tm.assert_categorical_equal(result, expected)
if dtype_ordered is False:
# dtype='category' can't specify ordered, so only test once
result = cat.astype("category")
expected = cat
tm.assert_categorical_equal(result, expected)
def test_iter_python_types(self):
# GH-19909
cat = Categorical([1, 2])
assert isinstance(list(cat)[0], int)
assert isinstance(cat.tolist()[0], int)
def test_iter_python_types_datetime(self):
cat = Categorical([Timestamp("2017-01-01"), Timestamp("2017-01-02")])
assert isinstance(list(cat)[0], Timestamp)
assert isinstance(cat.tolist()[0], Timestamp)
|
bsd-3-clause
|
RobertABT/heightmap
|
build/matplotlib/examples/axes_grid/inset_locator_demo2.py
|
8
|
1239
|
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes
from mpl_toolkits.axes_grid1.inset_locator import mark_inset
import numpy as np
def get_demo_image():
from matplotlib.cbook import get_sample_data
import numpy as np
f = get_sample_data("axes_grid/bivariate_normal.npy", asfileobj=False)
z = np.load(f)
# z is a numpy array of 15x15
return z, (-3,4,-4,3)
fig, ax = plt.subplots(figsize=[5,4])
# prepare the demo image
Z, extent = get_demo_image()
Z2 = np.zeros([150, 150], dtype="d")
ny, nx = Z.shape
Z2[30:30+ny, 30:30+nx] = Z
# extent = [-3, 4, -4, 3]
ax.imshow(Z2, extent=extent, interpolation="nearest",
origin="lower")
axins = zoomed_inset_axes(ax, 6, loc=1) # zoom = 6
axins.imshow(Z2, extent=extent, interpolation="nearest",
origin="lower")
# sub region of the original image
x1, x2, y1, y2 = -1.5, -0.9, -2.5, -1.9
axins.set_xlim(x1, x2)
axins.set_ylim(y1, y2)
plt.xticks(visible=False)
plt.yticks(visible=False)
# draw a bbox of the region of the inset axes in the parent axes and
# connecting lines between the bbox and the inset axes area
mark_inset(ax, axins, loc1=2, loc2=4, fc="none", ec="0.5")
plt.draw()
plt.show()
|
mit
|
kashif/scikit-learn
|
examples/model_selection/randomized_search.py
|
44
|
3253
|
"""
=========================================================================
Comparing randomized search and grid search for hyperparameter estimation
=========================================================================
Compare randomized search and grid search for optimizing hyperparameters of a
random forest.
All parameters that influence the learning are searched simultaneously
(except for the number of estimators, which poses a time / quality tradeoff).
The randomized search and the grid search explore exactly the same space of
parameters. The result in parameter settings is quite similar, while the run
time for randomized search is drastically lower.
The performance is slightly worse for the randomized search, though this
is most likely a noise effect and would not carry over to a held-out test set.
Note that in practice, one would not search over this many different parameters
simultaneously using grid search, but pick only the ones deemed most important.
"""
print(__doc__)
import numpy as np
from time import time
from operator import itemgetter
from scipy.stats import randint as sp_randint
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RandomizedSearchCV
from sklearn.datasets import load_digits
from sklearn.ensemble import RandomForestClassifier
# get some data
digits = load_digits()
X, y = digits.data, digits.target
# build a classifier
clf = RandomForestClassifier(n_estimators=20)
# Utility function to report best scores
def report(grid_scores, n_top=3):
top_scores = sorted(grid_scores, key=itemgetter(1), reverse=True)[:n_top]
for i, score in enumerate(top_scores):
print("Model with rank: {0}".format(i + 1))
print("Mean validation score: {0:.3f} (std: {1:.3f})".format(
score.mean_validation_score,
np.std(score.cv_validation_scores)))
print("Parameters: {0}".format(score.parameters))
print("")
# specify parameters and distributions to sample from
param_dist = {"max_depth": [3, None],
"max_features": sp_randint(1, 11),
"min_samples_split": sp_randint(1, 11),
"min_samples_leaf": sp_randint(1, 11),
"bootstrap": [True, False],
"criterion": ["gini", "entropy"]}
# run randomized search
n_iter_search = 20
random_search = RandomizedSearchCV(clf, param_distributions=param_dist,
n_iter=n_iter_search)
start = time()
random_search.fit(X, y)
print("RandomizedSearchCV took %.2f seconds for %d candidates"
" parameter settings." % ((time() - start), n_iter_search))
report(random_search.grid_scores_)
# use a full grid over all parameters
param_grid = {"max_depth": [3, None],
"max_features": [1, 3, 10],
"min_samples_split": [1, 3, 10],
"min_samples_leaf": [1, 3, 10],
"bootstrap": [True, False],
"criterion": ["gini", "entropy"]}
# run grid search
grid_search = GridSearchCV(clf, param_grid=param_grid)
start = time()
grid_search.fit(X, y)
print("GridSearchCV took %.2f seconds for %d candidate parameter settings."
% (time() - start, len(grid_search.grid_scores_)))
report(grid_search.grid_scores_)
|
bsd-3-clause
|
akrherz/idep
|
scripts/cscap/monthly.py
|
2
|
2375
|
"""Make plots of monthly values or differences"""
from __future__ import print_function
from pandas.io.sql import read_sql
import matplotlib.pyplot as plt
from pyiem.util import get_dbconn
PGCONN = get_dbconn("idep")
def get_scenario(scenario):
df = read_sql(
"""
WITH yearly as (
SELECT huc_12, generate_series(2008, 2016) as yr
from huc12 where states = 'IA' and scenario = 0),
combos as (
SELECT huc_12, yr, generate_series(1, 12) as mo from yearly),
results as (
SELECT r.huc_12, extract(year from valid)::int as yr,
extract(month from valid)::int as mo,
sum(qc_precip) as precip, sum(avg_runoff) as runoff,
sum(avg_delivery) as delivery,
sum(avg_loss) as detachment from results_by_huc12 r
WHERE r.scenario = %s and r.valid >= '2008-01-01'
and r.valid < '2017-01-01' GROUP by r.huc_12, yr, mo),
agg as (
SELECT c.huc_12, c.yr, c.mo, coalesce(r.precip, 0) as precip,
coalesce(r.runoff, 0) as runoff,
coalesce(r.delivery, 0) as delivery,
coalesce(r.detachment, 0) as detachment
from combos c LEFT JOIN results r on (c.huc_12 = r.huc_12 and
c.yr = r.yr and c.mo = r.mo))
select mo,
avg(runoff) / 25.4 as runoff_in,
avg(delivery) * 4.463 as delivery_ta,
avg(detachment) * 4.463 as detachment_ta
from agg GROUP by mo ORDER by mo ASC
""",
PGCONN,
params=(scenario,),
index_col="mo",
)
return df
def main():
"""Go Main"""
adf = get_scenario(0)
b25 = get_scenario(25)
b26 = get_scenario(26)
delta25 = b25 - adf
delta26 = b26 - adf
(fig, ax) = plt.subplots(1, 1)
ax.bar(
delta25.index.values - 0.2,
delta25["delivery_ta"].values,
width=0.4,
label="HI 0.8",
)
ax.bar(
delta26.index.values + 0.2,
delta26["delivery_ta"].values,
width=0.4,
label="HI 0.9",
)
ax.legend(loc="best")
ax.grid(True)
ax.set_title("2008-2016 Change in Delivery vs DEP Baseline")
ax.set_ylabel("Change [tons/acre]")
ax.set_xticks(range(1, 13))
ax.set_xticklabels(calendar.month_abbr[1:])
fig.savefig("test.png")
if __name__ == "__main__":
main()
|
mit
|
louisLouL/pair_trading
|
capstone_env/lib/python3.6/site-packages/pandas/core/groupby.py
|
4
|
148451
|
import types
from functools import wraps
import numpy as np
import datetime
import collections
import warnings
import copy
from textwrap import dedent
from pandas.compat import (
zip, range, lzip,
callable, map
)
from pandas import compat
from pandas.compat.numpy import function as nv, _np_version_under1p8
from pandas.compat import set_function_name
from pandas.core.dtypes.common import (
is_numeric_dtype,
is_timedelta64_dtype, is_datetime64_dtype,
is_categorical_dtype,
is_interval_dtype,
is_datetimelike,
is_datetime64_any_dtype,
is_bool, is_integer_dtype,
is_complex_dtype,
is_bool_dtype,
is_scalar,
is_list_like,
needs_i8_conversion,
_ensure_float64,
_ensure_platform_int,
_ensure_int64,
_ensure_object,
_ensure_categorical,
_ensure_float)
from pandas.core.dtypes.cast import maybe_downcast_to_dtype
from pandas.core.dtypes.missing import isnull, notnull, _maybe_fill
from pandas.core.common import (_values_from_object, AbstractMethodError,
_default_index)
from pandas.core.base import (PandasObject, SelectionMixin, GroupByError,
DataError, SpecificationError)
from pandas.core.index import (Index, MultiIndex,
CategoricalIndex, _ensure_index)
from pandas.core.categorical import Categorical
from pandas.core.frame import DataFrame
from pandas.core.generic import NDFrame, _shared_docs
from pandas.core.internals import BlockManager, make_block
from pandas.core.series import Series
from pandas.core.panel import Panel
from pandas.core.sorting import (get_group_index_sorter, get_group_index,
compress_group_index, get_flattened_iterator,
decons_obs_group_ids, get_indexer_dict)
from pandas.util._decorators import (cache_readonly, Substitution,
Appender, make_signature)
from pandas.io.formats.printing import pprint_thing
from pandas.util._validators import validate_kwargs
import pandas.core.algorithms as algorithms
import pandas.core.common as com
from pandas.core.config import option_context
from pandas._libs import lib, groupby as libgroupby, Timestamp, NaT, iNaT
from pandas._libs.lib import count_level_2d
_doc_template = """
See also
--------
pandas.Series.%(name)s
pandas.DataFrame.%(name)s
pandas.Panel.%(name)s
"""
_transform_template = """
Call function producing a like-indexed %(klass)s on each group and
return a %(klass)s having the same indexes as the original object
filled with the transformed values
Parameters
----------
f : function
Function to apply to each group
Notes
-----
Each group is endowed the attribute 'name' in case you need to know
which group you are working on.
The current implementation imposes three requirements on f:
* f must return a value that either has the same shape as the input
subframe or can be broadcast to the shape of the input subframe.
For example, f returns a scalar it will be broadcast to have the
same shape as the input subframe.
* if this is a DataFrame, f must support application column-by-column
in the subframe. If f also supports application to the entire subframe,
then a fast path is used starting from the second chunk.
* f must not mutate groups. Mutation is not supported and may
produce unexpected results.
Returns
-------
%(klass)s
See also
--------
aggregate, transform
Examples
--------
# Same shape
>>> df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar',
... 'foo', 'bar'],
... 'B' : ['one', 'one', 'two', 'three',
... 'two', 'two'],
... 'C' : [1, 5, 5, 2, 5, 5],
... 'D' : [2.0, 5., 8., 1., 2., 9.]})
>>> grouped = df.groupby('A')
>>> grouped.transform(lambda x: (x - x.mean()) / x.std())
C D
0 -1.154701 -0.577350
1 0.577350 0.000000
2 0.577350 1.154701
3 -1.154701 -1.000000
4 0.577350 -0.577350
5 0.577350 1.000000
# Broadcastable
>>> grouped.transform(lambda x: x.max() - x.min())
C D
0 4 6.0
1 3 8.0
2 4 6.0
3 3 8.0
4 4 6.0
5 3 8.0
"""
# special case to prevent duplicate plots when catching exceptions when
# forwarding methods from NDFrames
_plotting_methods = frozenset(['plot', 'boxplot', 'hist'])
_common_apply_whitelist = frozenset([
'last', 'first',
'head', 'tail', 'median',
'mean', 'sum', 'min', 'max',
'cumcount', 'ngroup',
'resample',
'rank', 'quantile',
'fillna',
'mad',
'any', 'all',
'take',
'idxmax', 'idxmin',
'shift', 'tshift',
'ffill', 'bfill',
'pct_change', 'skew',
'corr', 'cov', 'diff',
]) | _plotting_methods
_series_apply_whitelist = ((_common_apply_whitelist |
{'nlargest', 'nsmallest'}) -
{'boxplot'}) | frozenset(['dtype', 'unique'])
_dataframe_apply_whitelist = (_common_apply_whitelist |
frozenset(['dtypes', 'corrwith']))
_cython_transforms = frozenset(['cumprod', 'cumsum', 'shift',
'cummin', 'cummax'])
class Grouper(object):
"""
A Grouper allows the user to specify a groupby instruction for a target
object
This specification will select a column via the key parameter, or if the
level and/or axis parameters are given, a level of the index of the target
object.
These are local specifications and will override 'global' settings,
that is the parameters axis and level which are passed to the groupby
itself.
Parameters
----------
key : string, defaults to None
groupby key, which selects the grouping column of the target
level : name/number, defaults to None
the level for the target index
freq : string / frequency object, defaults to None
This will groupby the specified frequency if the target selection
(via key or level) is a datetime-like object. For full specification
of available frequencies, please see `here
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`_.
axis : number/name of the axis, defaults to 0
sort : boolean, default to False
whether to sort the resulting labels
additional kwargs to control time-like groupers (when freq is passed)
closed : closed end of interval; left or right
label : interval boundary to use for labeling; left or right
convention : {'start', 'end', 'e', 's'}
If grouper is PeriodIndex
Returns
-------
A specification for a groupby instruction
Examples
--------
Syntactic sugar for ``df.groupby('A')``
>>> df.groupby(Grouper(key='A'))
Specify a resample operation on the column 'date'
>>> df.groupby(Grouper(key='date', freq='60s'))
Specify a resample operation on the level 'date' on the columns axis
with a frequency of 60s
>>> df.groupby(Grouper(level='date', freq='60s', axis=1))
"""
def __new__(cls, *args, **kwargs):
if kwargs.get('freq') is not None:
from pandas.core.resample import TimeGrouper
cls = TimeGrouper
return super(Grouper, cls).__new__(cls)
def __init__(self, key=None, level=None, freq=None, axis=0, sort=False):
self.key = key
self.level = level
self.freq = freq
self.axis = axis
self.sort = sort
self.grouper = None
self.obj = None
self.indexer = None
self.binner = None
@property
def ax(self):
return self.grouper
def _get_grouper(self, obj):
"""
Parameters
----------
obj : the subject object
Returns
-------
a tuple of binner, grouper, obj (possibly sorted)
"""
self._set_grouper(obj)
self.grouper, exclusions, self.obj = _get_grouper(self.obj, [self.key],
axis=self.axis,
level=self.level,
sort=self.sort)
return self.binner, self.grouper, self.obj
def _set_grouper(self, obj, sort=False):
"""
given an object and the specifications, setup the internal grouper
for this particular specification
Parameters
----------
obj : the subject object
sort : bool, default False
whether the resulting grouper should be sorted
"""
if self.key is not None and self.level is not None:
raise ValueError(
"The Grouper cannot specify both a key and a level!")
# the key must be a valid info item
if self.key is not None:
key = self.key
if key not in obj._info_axis:
raise KeyError("The grouper name {0} is not found".format(key))
ax = Index(obj[key], name=key)
else:
ax = obj._get_axis(self.axis)
if self.level is not None:
level = self.level
# if a level is given it must be a mi level or
# equivalent to the axis name
if isinstance(ax, MultiIndex):
level = ax._get_level_number(level)
ax = Index(ax._get_level_values(level),
name=ax.names[level])
else:
if level not in (0, ax.name):
raise ValueError(
"The level {0} is not valid".format(level))
# possibly sort
if (self.sort or sort) and not ax.is_monotonic:
# use stable sort to support first, last, nth
indexer = self.indexer = ax.argsort(kind='mergesort')
ax = ax.take(indexer)
obj = obj.take(indexer, axis=self.axis,
convert=False, is_copy=False)
self.obj = obj
self.grouper = ax
return self.grouper
def _get_binner_for_grouping(self, obj):
""" default to the standard binner here """
group_axis = obj._get_axis(self.axis)
return Grouping(group_axis, None, obj=obj, name=self.key,
level=self.level, sort=self.sort, in_axis=False)
@property
def groups(self):
return self.grouper.groups
class GroupByPlot(PandasObject):
"""
Class implementing the .plot attribute for groupby objects
"""
def __init__(self, groupby):
self._groupby = groupby
def __call__(self, *args, **kwargs):
def f(self):
return self.plot(*args, **kwargs)
f.__name__ = 'plot'
return self._groupby.apply(f)
def __getattr__(self, name):
def attr(*args, **kwargs):
def f(self):
return getattr(self.plot, name)(*args, **kwargs)
return self._groupby.apply(f)
return attr
class _GroupBy(PandasObject, SelectionMixin):
_group_selection = None
_apply_whitelist = frozenset([])
def __init__(self, obj, keys=None, axis=0, level=None,
grouper=None, exclusions=None, selection=None, as_index=True,
sort=True, group_keys=True, squeeze=False, **kwargs):
self._selection = selection
if isinstance(obj, NDFrame):
obj._consolidate_inplace()
self.level = level
if not as_index:
if not isinstance(obj, DataFrame):
raise TypeError('as_index=False only valid with DataFrame')
if axis != 0:
raise ValueError('as_index=False only valid for axis=0')
self.as_index = as_index
self.keys = keys
self.sort = sort
self.group_keys = group_keys
self.squeeze = squeeze
self.mutated = kwargs.pop('mutated', False)
if grouper is None:
grouper, exclusions, obj = _get_grouper(obj, keys,
axis=axis,
level=level,
sort=sort,
mutated=self.mutated)
self.obj = obj
self.axis = obj._get_axis_number(axis)
self.grouper = grouper
self.exclusions = set(exclusions) if exclusions else set()
# we accept no other args
validate_kwargs('group', kwargs, {})
def __len__(self):
return len(self.groups)
def __unicode__(self):
# TODO: Better unicode/repr for GroupBy object
return object.__repr__(self)
def _assure_grouper(self):
"""
we create the grouper on instantiation
sub-classes may have a different policy
"""
pass
@property
def groups(self):
""" dict {group name -> group labels} """
self._assure_grouper()
return self.grouper.groups
@property
def ngroups(self):
self._assure_grouper()
return self.grouper.ngroups
@property
def indices(self):
""" dict {group name -> group indices} """
self._assure_grouper()
return self.grouper.indices
def _get_indices(self, names):
"""
safe get multiple indices, translate keys for
datelike to underlying repr
"""
def get_converter(s):
# possibly convert to the actual key types
# in the indices, could be a Timestamp or a np.datetime64
if isinstance(s, (Timestamp, datetime.datetime)):
return lambda key: Timestamp(key)
elif isinstance(s, np.datetime64):
return lambda key: Timestamp(key).asm8
else:
return lambda key: key
if len(names) == 0:
return []
if len(self.indices) > 0:
index_sample = next(iter(self.indices))
else:
index_sample = None # Dummy sample
name_sample = names[0]
if isinstance(index_sample, tuple):
if not isinstance(name_sample, tuple):
msg = ("must supply a tuple to get_group with multiple"
" grouping keys")
raise ValueError(msg)
if not len(name_sample) == len(index_sample):
try:
# If the original grouper was a tuple
return [self.indices[name] for name in names]
except KeyError:
# turns out it wasn't a tuple
msg = ("must supply a a same-length tuple to get_group"
" with multiple grouping keys")
raise ValueError(msg)
converters = [get_converter(s) for s in index_sample]
names = [tuple([f(n) for f, n in zip(converters, name)])
for name in names]
else:
converter = get_converter(index_sample)
names = [converter(name) for name in names]
return [self.indices.get(name, []) for name in names]
def _get_index(self, name):
""" safe get index, translate keys for datelike to underlying repr """
return self._get_indices([name])[0]
@cache_readonly
def _selected_obj(self):
if self._selection is None or isinstance(self.obj, Series):
if self._group_selection is not None:
return self.obj[self._group_selection]
return self.obj
else:
return self.obj[self._selection]
def _reset_group_selection(self):
"""
Clear group based selection. Used for methods needing to return info on
each group regardless of whether a group selection was previously set.
"""
if self._group_selection is not None:
self._group_selection = None
# GH12839 clear cached selection too when changing group selection
self._reset_cache('_selected_obj')
def _set_group_selection(self):
"""
Create group based selection. Used when selection is not passed
directly but instead via a grouper.
"""
grp = self.grouper
if self.as_index and getattr(grp, 'groupings', None) is not None and \
self.obj.ndim > 1:
ax = self.obj._info_axis
groupers = [g.name for g in grp.groupings
if g.level is None and g.in_axis]
if len(groupers):
self._group_selection = ax.difference(Index(groupers)).tolist()
# GH12839 clear selected obj cache when group selection changes
self._reset_cache('_selected_obj')
def _set_result_index_ordered(self, result):
# set the result index on the passed values object and
# return the new object, xref 8046
# the values/counts are repeated according to the group index
# shortcut if we have an already ordered grouper
if not self.grouper.is_monotonic:
index = Index(np.concatenate(
self._get_indices(self.grouper.result_index)))
result.set_axis(self.axis, index)
result = result.sort_index(axis=self.axis)
result.set_axis(self.axis, self.obj._get_axis(self.axis))
return result
def _dir_additions(self):
return self.obj._dir_additions() | self._apply_whitelist
def __getattr__(self, attr):
if attr in self._internal_names_set:
return object.__getattribute__(self, attr)
if attr in self.obj:
return self[attr]
if hasattr(self.obj, attr):
return self._make_wrapper(attr)
raise AttributeError("%r object has no attribute %r" %
(type(self).__name__, attr))
plot = property(GroupByPlot)
def _make_wrapper(self, name):
if name not in self._apply_whitelist:
is_callable = callable(getattr(self._selected_obj, name, None))
kind = ' callable ' if is_callable else ' '
msg = ("Cannot access{0}attribute {1!r} of {2!r} objects, try "
"using the 'apply' method".format(kind, name,
type(self).__name__))
raise AttributeError(msg)
# need to setup the selection
# as are not passed directly but in the grouper
self._set_group_selection()
f = getattr(self._selected_obj, name)
if not isinstance(f, types.MethodType):
return self.apply(lambda self: getattr(self, name))
f = getattr(type(self._selected_obj), name)
def wrapper(*args, **kwargs):
# a little trickery for aggregation functions that need an axis
# argument
kwargs_with_axis = kwargs.copy()
if 'axis' not in kwargs_with_axis or \
kwargs_with_axis['axis'] is None:
kwargs_with_axis['axis'] = self.axis
def curried_with_axis(x):
return f(x, *args, **kwargs_with_axis)
def curried(x):
return f(x, *args, **kwargs)
# preserve the name so we can detect it when calling plot methods,
# to avoid duplicates
curried.__name__ = curried_with_axis.__name__ = name
# special case otherwise extra plots are created when catching the
# exception below
if name in _plotting_methods:
return self.apply(curried)
try:
return self.apply(curried_with_axis)
except Exception:
try:
return self.apply(curried)
except Exception:
# related to : GH3688
# try item-by-item
# this can be called recursively, so need to raise
# ValueError
# if we don't have this method to indicated to aggregate to
# mark this column as an error
try:
return self._aggregate_item_by_item(name,
*args, **kwargs)
except (AttributeError):
raise ValueError
return wrapper
def get_group(self, name, obj=None):
"""
Constructs NDFrame from group with provided name
Parameters
----------
name : object
the name of the group to get as a DataFrame
obj : NDFrame, default None
the NDFrame to take the DataFrame out of. If
it is None, the object groupby was called on will
be used
Returns
-------
group : type of obj
"""
if obj is None:
obj = self._selected_obj
inds = self._get_index(name)
if not len(inds):
raise KeyError(name)
return obj.take(inds, axis=self.axis, convert=False)
def __iter__(self):
"""
Groupby iterator
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
"""
return self.grouper.get_iterator(self.obj, axis=self.axis)
@Substitution(name='groupby')
def apply(self, func, *args, **kwargs):
"""
Apply function and combine results together in an intelligent way. The
split-apply-combine combination rules attempt to be as common sense
based as possible. For example:
case 1:
group DataFrame
apply aggregation function (f(chunk) -> Series)
yield DataFrame, with group axis having group labels
case 2:
group DataFrame
apply transform function ((f(chunk) -> DataFrame with same indexes)
yield DataFrame with resulting chunks glued together
case 3:
group Series
apply function with f(chunk) -> DataFrame
yield DataFrame with result of chunks glued together
Parameters
----------
func : function
Notes
-----
See online documentation for full exposition on how to use apply.
In the current implementation apply calls func twice on the
first group to decide whether it can take a fast or slow code
path. This can lead to unexpected behavior if func has
side-effects, as they will take effect twice for the first
group.
See also
--------
aggregate, transform"""
func = self._is_builtin_func(func)
# this is needed so we don't try and wrap strings. If we could
# resolve functions to their callable functions prior, this
# wouldn't be needed
if args or kwargs:
if callable(func):
@wraps(func)
def f(g):
with np.errstate(all='ignore'):
return func(g, *args, **kwargs)
else:
raise ValueError('func must be a callable if args or '
'kwargs are supplied')
else:
f = func
# ignore SettingWithCopy here in case the user mutates
with option_context('mode.chained_assignment', None):
return self._python_apply_general(f)
def _python_apply_general(self, f):
keys, values, mutated = self.grouper.apply(f, self._selected_obj,
self.axis)
return self._wrap_applied_output(
keys,
values,
not_indexed_same=mutated or self.mutated)
def _iterate_slices(self):
yield self._selection_name, self._selected_obj
def transform(self, func, *args, **kwargs):
raise AbstractMethodError(self)
def _cumcount_array(self, ascending=True):
"""
Parameters
----------
ascending : bool, default True
If False, number in reverse, from length of group - 1 to 0.
Note
----
this is currently implementing sort=False
(though the default is sort=True) for groupby in general
"""
ids, _, ngroups = self.grouper.group_info
sorter = get_group_index_sorter(ids, ngroups)
ids, count = ids[sorter], len(ids)
if count == 0:
return np.empty(0, dtype=np.int64)
run = np.r_[True, ids[:-1] != ids[1:]]
rep = np.diff(np.r_[np.nonzero(run)[0], count])
out = (~run).cumsum()
if ascending:
out -= np.repeat(out[run], rep)
else:
out = np.repeat(out[np.r_[run[1:], True]], rep) - out
rev = np.empty(count, dtype=np.intp)
rev[sorter] = np.arange(count, dtype=np.intp)
return out[rev].astype(np.int64, copy=False)
def _index_with_as_index(self, b):
"""
Take boolean mask of index to be returned from apply, if as_index=True
"""
# TODO perf, it feels like this should already be somewhere...
from itertools import chain
original = self._selected_obj.index
gp = self.grouper
levels = chain((gp.levels[i][gp.labels[i][b]]
for i in range(len(gp.groupings))),
(original._get_level_values(i)[b]
for i in range(original.nlevels)))
new = MultiIndex.from_arrays(list(levels))
new.names = gp.names + original.names
return new
def _try_cast(self, result, obj, numeric_only=False):
"""
try to cast the result to our obj original type,
we may have roundtripped thru object in the mean-time
if numeric_only is True, then only try to cast numerics
and not datetimelikes
"""
if obj.ndim > 1:
dtype = obj.values.dtype
else:
dtype = obj.dtype
if not is_scalar(result):
if numeric_only and is_numeric_dtype(dtype) or not numeric_only:
result = maybe_downcast_to_dtype(result, dtype)
return result
def _cython_transform(self, how, numeric_only=True):
output = collections.OrderedDict()
for name, obj in self._iterate_slices():
is_numeric = is_numeric_dtype(obj.dtype)
if numeric_only and not is_numeric:
continue
try:
result, names = self.grouper.transform(obj.values, how)
except NotImplementedError:
continue
except AssertionError as e:
raise GroupByError(str(e))
output[name] = self._try_cast(result, obj)
if len(output) == 0:
raise DataError('No numeric types to aggregate')
return self._wrap_transformed_output(output, names)
def _cython_agg_general(self, how, alt=None, numeric_only=True):
output = {}
for name, obj in self._iterate_slices():
is_numeric = is_numeric_dtype(obj.dtype)
if numeric_only and not is_numeric:
continue
try:
result, names = self.grouper.aggregate(obj.values, how)
except AssertionError as e:
raise GroupByError(str(e))
output[name] = self._try_cast(result, obj)
if len(output) == 0:
raise DataError('No numeric types to aggregate')
return self._wrap_aggregated_output(output, names)
def _python_agg_general(self, func, *args, **kwargs):
func = self._is_builtin_func(func)
f = lambda x: func(x, *args, **kwargs)
# iterate through "columns" ex exclusions to populate output dict
output = {}
for name, obj in self._iterate_slices():
try:
result, counts = self.grouper.agg_series(obj, f)
output[name] = self._try_cast(result, obj, numeric_only=True)
except TypeError:
continue
if len(output) == 0:
return self._python_apply_general(f)
if self.grouper._filter_empty_groups:
mask = counts.ravel() > 0
for name, result in compat.iteritems(output):
# since we are masking, make sure that we have a float object
values = result
if is_numeric_dtype(values.dtype):
values = _ensure_float(values)
output[name] = self._try_cast(values[mask], result)
return self._wrap_aggregated_output(output)
def _wrap_applied_output(self, *args, **kwargs):
raise AbstractMethodError(self)
def _concat_objects(self, keys, values, not_indexed_same=False):
from pandas.core.reshape.concat import concat
def reset_identity(values):
# reset the identities of the components
# of the values to prevent aliasing
for v in values:
if v is not None:
ax = v._get_axis(self.axis)
ax._reset_identity()
return values
if not not_indexed_same:
result = concat(values, axis=self.axis)
ax = self._selected_obj._get_axis(self.axis)
if isinstance(result, Series):
result = result.reindex(ax)
else:
# this is a very unfortunate situation
# we have a multi-index that is NOT lexsorted
# and we have a result which is duplicated
# we can't reindex, so we resort to this
# GH 14776
if isinstance(ax, MultiIndex) and not ax.is_unique:
result = result.take(result.index.get_indexer_for(
ax.values).unique(), axis=self.axis)
else:
result = result.reindex_axis(ax, axis=self.axis)
elif self.group_keys:
values = reset_identity(values)
if self.as_index:
# possible MI return case
group_keys = keys
group_levels = self.grouper.levels
group_names = self.grouper.names
result = concat(values, axis=self.axis, keys=group_keys,
levels=group_levels, names=group_names)
else:
# GH5610, returns a MI, with the first level being a
# range index
keys = list(range(len(values)))
result = concat(values, axis=self.axis, keys=keys)
else:
values = reset_identity(values)
result = concat(values, axis=self.axis)
if (isinstance(result, Series) and
getattr(self, '_selection_name', None) is not None):
result.name = self._selection_name
return result
def _apply_filter(self, indices, dropna):
if len(indices) == 0:
indices = np.array([], dtype='int64')
else:
indices = np.sort(np.concatenate(indices))
if dropna:
filtered = self._selected_obj.take(indices, axis=self.axis)
else:
mask = np.empty(len(self._selected_obj.index), dtype=bool)
mask.fill(False)
mask[indices.astype(int)] = True
# mask fails to broadcast when passed to where; broadcast manually.
mask = np.tile(mask, list(self._selected_obj.shape[1:]) + [1]).T
filtered = self._selected_obj.where(mask) # Fill with NaNs.
return filtered
class GroupBy(_GroupBy):
"""
Class for grouping and aggregating relational data. See aggregate,
transform, and apply functions on this object.
It's easiest to use obj.groupby(...) to use GroupBy, but you can also do:
::
grouped = groupby(obj, ...)
Parameters
----------
obj : pandas object
axis : int, default 0
level : int, default None
Level of MultiIndex
groupings : list of Grouping objects
Most users should ignore this
exclusions : array-like, optional
List of columns to exclude
name : string
Most users should ignore this
Notes
-----
After grouping, see aggregate, apply, and transform functions. Here are
some other brief notes about usage. When grouping by multiple groups, the
result index will be a MultiIndex (hierarchical) by default.
Iteration produces (key, group) tuples, i.e. chunking the data by group. So
you can write code like:
::
grouped = obj.groupby(keys, axis=axis)
for key, group in grouped:
# do something with the data
Function calls on GroupBy, if not specially implemented, "dispatch" to the
grouped data. So if you group a DataFrame and wish to invoke the std()
method on each group, you can simply do:
::
df.groupby(mapper).std()
rather than
::
df.groupby(mapper).aggregate(np.std)
You can pass arguments to these "wrapped" functions, too.
See the online documentation for full exposition on these topics and much
more
Returns
-------
**Attributes**
groups : dict
{group name -> group labels}
len(grouped) : int
Number of groups
"""
_apply_whitelist = _common_apply_whitelist
@Substitution(name='groupby')
@Appender(_doc_template)
def count(self):
"""Compute count of group, excluding missing values"""
# defined here for API doc
raise NotImplementedError
@Substitution(name='groupby')
@Appender(_doc_template)
def mean(self, *args, **kwargs):
"""
Compute mean of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
nv.validate_groupby_func('mean', args, kwargs, ['numeric_only'])
try:
return self._cython_agg_general('mean', **kwargs)
except GroupByError:
raise
except Exception: # pragma: no cover
self._set_group_selection()
f = lambda x: x.mean(axis=self.axis, **kwargs)
return self._python_agg_general(f)
@Substitution(name='groupby')
@Appender(_doc_template)
def median(self, **kwargs):
"""
Compute median of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
try:
return self._cython_agg_general('median', **kwargs)
except GroupByError:
raise
except Exception: # pragma: no cover
self._set_group_selection()
def f(x):
if isinstance(x, np.ndarray):
x = Series(x)
return x.median(axis=self.axis, **kwargs)
return self._python_agg_general(f)
@Substitution(name='groupby')
@Appender(_doc_template)
def std(self, ddof=1, *args, **kwargs):
"""
Compute standard deviation of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
Parameters
----------
ddof : integer, default 1
degrees of freedom
"""
# TODO: implement at Cython level?
nv.validate_groupby_func('std', args, kwargs)
return np.sqrt(self.var(ddof=ddof, **kwargs))
@Substitution(name='groupby')
@Appender(_doc_template)
def var(self, ddof=1, *args, **kwargs):
"""
Compute variance of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
Parameters
----------
ddof : integer, default 1
degrees of freedom
"""
nv.validate_groupby_func('var', args, kwargs)
if ddof == 1:
return self._cython_agg_general('var', **kwargs)
else:
self._set_group_selection()
f = lambda x: x.var(ddof=ddof, **kwargs)
return self._python_agg_general(f)
@Substitution(name='groupby')
@Appender(_doc_template)
def sem(self, ddof=1):
"""
Compute standard error of the mean of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
Parameters
----------
ddof : integer, default 1
degrees of freedom
"""
return self.std(ddof=ddof) / np.sqrt(self.count())
@Substitution(name='groupby')
@Appender(_doc_template)
def size(self):
"""Compute group sizes"""
result = self.grouper.size()
if isinstance(self.obj, Series):
result.name = getattr(self.obj, 'name', None)
return result
@classmethod
def _add_numeric_operations(cls):
""" add numeric operations to the GroupBy generically """
def groupby_function(name, alias, npfunc,
numeric_only=True, _convert=False):
_local_template = "Compute %(f)s of group values"
@Substitution(name='groupby', f=name)
@Appender(_doc_template)
@Appender(_local_template)
def f(self, **kwargs):
if 'numeric_only' not in kwargs:
kwargs['numeric_only'] = numeric_only
self._set_group_selection()
try:
return self._cython_agg_general(
alias, alt=npfunc, **kwargs)
except AssertionError as e:
raise SpecificationError(str(e))
except Exception:
result = self.aggregate(
lambda x: npfunc(x, axis=self.axis))
if _convert:
result = result._convert(datetime=True)
return result
set_function_name(f, name, cls)
return f
def first_compat(x, axis=0):
def first(x):
x = np.asarray(x)
x = x[notnull(x)]
if len(x) == 0:
return np.nan
return x[0]
if isinstance(x, DataFrame):
return x.apply(first, axis=axis)
else:
return first(x)
def last_compat(x, axis=0):
def last(x):
x = np.asarray(x)
x = x[notnull(x)]
if len(x) == 0:
return np.nan
return x[-1]
if isinstance(x, DataFrame):
return x.apply(last, axis=axis)
else:
return last(x)
cls.sum = groupby_function('sum', 'add', np.sum)
cls.prod = groupby_function('prod', 'prod', np.prod)
cls.min = groupby_function('min', 'min', np.min, numeric_only=False)
cls.max = groupby_function('max', 'max', np.max, numeric_only=False)
cls.first = groupby_function('first', 'first', first_compat,
numeric_only=False, _convert=True)
cls.last = groupby_function('last', 'last', last_compat,
numeric_only=False, _convert=True)
@Substitution(name='groupby')
@Appender(_doc_template)
def ohlc(self):
"""
Compute sum of values, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
return self._apply_to_column_groupbys(
lambda x: x._cython_agg_general('ohlc'))
@Appender(DataFrame.describe.__doc__)
@Substitution(name='groupby')
@Appender(_doc_template)
def describe(self, **kwargs):
self._set_group_selection()
result = self.apply(lambda x: x.describe(**kwargs))
if self.axis == 1:
return result.T
return result.unstack()
@Substitution(name='groupby')
@Appender(_doc_template)
def resample(self, rule, *args, **kwargs):
"""
Provide resampling when using a TimeGrouper
Return a new grouper with our resampler appended
"""
from pandas.core.resample import get_resampler_for_grouping
return get_resampler_for_grouping(self, rule, *args, **kwargs)
@Substitution(name='groupby')
@Appender(_doc_template)
def rolling(self, *args, **kwargs):
"""
Return a rolling grouper, providing rolling
functionaility per group
"""
from pandas.core.window import RollingGroupby
return RollingGroupby(self, *args, **kwargs)
@Substitution(name='groupby')
@Appender(_doc_template)
def expanding(self, *args, **kwargs):
"""
Return an expanding grouper, providing expanding
functionaility per group
"""
from pandas.core.window import ExpandingGroupby
return ExpandingGroupby(self, *args, **kwargs)
@Substitution(name='groupby')
@Appender(_doc_template)
def pad(self, limit=None):
"""
Forward fill the values
Parameters
----------
limit : integer, optional
limit of how many values to fill
See Also
--------
Series.fillna
DataFrame.fillna
"""
return self.apply(lambda x: x.ffill(limit=limit))
ffill = pad
@Substitution(name='groupby')
@Appender(_doc_template)
def backfill(self, limit=None):
"""
Backward fill the values
Parameters
----------
limit : integer, optional
limit of how many values to fill
See Also
--------
Series.fillna
DataFrame.fillna
"""
return self.apply(lambda x: x.bfill(limit=limit))
bfill = backfill
@Substitution(name='groupby')
@Appender(_doc_template)
def nth(self, n, dropna=None):
"""
Take the nth row from each group if n is an int, or a subset of rows
if n is a list of ints.
If dropna, will take the nth non-null row, dropna is either
Truthy (if a Series) or 'all', 'any' (if a DataFrame);
this is equivalent to calling dropna(how=dropna) before the
groupby.
Parameters
----------
n : int or list of ints
a single nth value for the row or a list of nth values
dropna : None or str, optional
apply the specified dropna operation before counting which row is
the nth row. Needs to be None, 'any' or 'all'
Examples
--------
>>> df = pd.DataFrame({'A': [1, 1, 2, 1, 2],
... 'B': [np.nan, 2, 3, 4, 5]}, columns=['A', 'B'])
>>> g = df.groupby('A')
>>> g.nth(0)
B
A
1 NaN
2 3.0
>>> g.nth(1)
B
A
1 2.0
2 5.0
>>> g.nth(-1)
B
A
1 4.0
2 5.0
>>> g.nth([0, 1])
B
A
1 NaN
1 2.0
2 3.0
2 5.0
Specifying ``dropna`` allows count ignoring NaN
>>> g.nth(0, dropna='any')
B
A
1 2.0
2 3.0
NaNs denote group exhausted when using dropna
>>> g.nth(3, dropna='any')
B
A
1 NaN
2 NaN
Specifying ``as_index=False`` in ``groupby`` keeps the original index.
>>> df.groupby('A', as_index=False).nth(1)
A B
1 1 2.0
4 2 5.0
"""
if isinstance(n, int):
nth_values = [n]
elif isinstance(n, (set, list, tuple)):
nth_values = list(set(n))
if dropna is not None:
raise ValueError(
"dropna option with a list of nth values is not supported")
else:
raise TypeError("n needs to be an int or a list/set/tuple of ints")
nth_values = np.array(nth_values, dtype=np.intp)
self._set_group_selection()
if not dropna:
mask = np.in1d(self._cumcount_array(), nth_values) | \
np.in1d(self._cumcount_array(ascending=False) + 1, -nth_values)
out = self._selected_obj[mask]
if not self.as_index:
return out
ids, _, _ = self.grouper.group_info
out.index = self.grouper.result_index[ids[mask]]
return out.sort_index() if self.sort else out
if isinstance(self._selected_obj, DataFrame) and \
dropna not in ['any', 'all']:
# Note: when agg-ing picker doesn't raise this, just returns NaN
raise ValueError("For a DataFrame groupby, dropna must be "
"either None, 'any' or 'all', "
"(was passed %s)." % (dropna),)
# old behaviour, but with all and any support for DataFrames.
# modified in GH 7559 to have better perf
max_len = n if n >= 0 else - 1 - n
dropped = self.obj.dropna(how=dropna, axis=self.axis)
# get a new grouper for our dropped obj
if self.keys is None and self.level is None:
# we don't have the grouper info available
# (e.g. we have selected out
# a column that is not in the current object)
axis = self.grouper.axis
grouper = axis[axis.isin(dropped.index)]
else:
# create a grouper with the original parameters, but on the dropped
# object
grouper, _, _ = _get_grouper(dropped, key=self.keys,
axis=self.axis, level=self.level,
sort=self.sort,
mutated=self.mutated)
grb = dropped.groupby(grouper, as_index=self.as_index, sort=self.sort)
sizes, result = grb.size(), grb.nth(n)
mask = (sizes < max_len).values
# set the results which don't meet the criteria
if len(result) and mask.any():
result.loc[mask] = np.nan
# reset/reindex to the original groups
if len(self.obj) == len(dropped) or \
len(result) == len(self.grouper.result_index):
result.index = self.grouper.result_index
else:
result = result.reindex(self.grouper.result_index)
return result
@Substitution(name='groupby')
@Appender(_doc_template)
def ngroup(self, ascending=True):
"""
Number each group from 0 to the number of groups - 1.
This is the enumerative complement of cumcount. Note that the
numbers given to the groups match the order in which the groups
would be seen when iterating over the groupby object, not the
order they are first observed.
.. versionadded:: 0.20.2
Parameters
----------
ascending : bool, default True
If False, number in reverse, from number of group - 1 to 0.
Examples
--------
>>> df = pd.DataFrame({"A": list("aaabba")})
>>> df
A
0 a
1 a
2 a
3 b
4 b
5 a
>>> df.groupby('A').ngroup()
0 0
1 0
2 0
3 1
4 1
5 0
dtype: int64
>>> df.groupby('A').ngroup(ascending=False)
0 1
1 1
2 1
3 0
4 0
5 1
dtype: int64
>>> df.groupby(["A", [1,1,2,3,2,1]]).ngroup()
0 0
1 0
2 1
3 3
4 2
5 0
dtype: int64
See also
--------
.cumcount : Number the rows in each group.
"""
self._set_group_selection()
index = self._selected_obj.index
result = Series(self.grouper.group_info[0], index)
if not ascending:
result = self.ngroups - 1 - result
return result
@Substitution(name='groupby')
@Appender(_doc_template)
def cumcount(self, ascending=True):
"""
Number each item in each group from 0 to the length of that group - 1.
Essentially this is equivalent to
>>> self.apply(lambda x: Series(np.arange(len(x)), x.index))
Parameters
----------
ascending : bool, default True
If False, number in reverse, from length of group - 1 to 0.
Examples
--------
>>> df = pd.DataFrame([['a'], ['a'], ['a'], ['b'], ['b'], ['a']],
... columns=['A'])
>>> df
A
0 a
1 a
2 a
3 b
4 b
5 a
>>> df.groupby('A').cumcount()
0 0
1 1
2 2
3 0
4 1
5 3
dtype: int64
>>> df.groupby('A').cumcount(ascending=False)
0 3
1 2
2 1
3 1
4 0
5 0
dtype: int64
See also
--------
.ngroup : Number the groups themselves.
"""
self._set_group_selection()
index = self._selected_obj.index
cumcounts = self._cumcount_array(ascending=ascending)
return Series(cumcounts, index)
@Substitution(name='groupby')
@Appender(_doc_template)
def cumprod(self, axis=0, *args, **kwargs):
"""Cumulative product for each group"""
nv.validate_groupby_func('cumprod', args, kwargs, ['numeric_only'])
if axis != 0:
return self.apply(lambda x: x.cumprod(axis=axis, **kwargs))
return self._cython_transform('cumprod', **kwargs)
@Substitution(name='groupby')
@Appender(_doc_template)
def cumsum(self, axis=0, *args, **kwargs):
"""Cumulative sum for each group"""
nv.validate_groupby_func('cumsum', args, kwargs, ['numeric_only'])
if axis != 0:
return self.apply(lambda x: x.cumsum(axis=axis, **kwargs))
return self._cython_transform('cumsum', **kwargs)
@Substitution(name='groupby')
@Appender(_doc_template)
def cummin(self, axis=0, **kwargs):
"""Cumulative min for each group"""
if axis != 0:
return self.apply(lambda x: np.minimum.accumulate(x, axis))
return self._cython_transform('cummin', numeric_only=False)
@Substitution(name='groupby')
@Appender(_doc_template)
def cummax(self, axis=0, **kwargs):
"""Cumulative max for each group"""
if axis != 0:
return self.apply(lambda x: np.maximum.accumulate(x, axis))
return self._cython_transform('cummax', numeric_only=False)
@Substitution(name='groupby')
@Appender(_doc_template)
def shift(self, periods=1, freq=None, axis=0):
"""
Shift each group by periods observations
Parameters
----------
periods : integer, default 1
number of periods to shift
freq : frequency string
axis : axis to shift, default 0
"""
if freq is not None or axis != 0:
return self.apply(lambda x: x.shift(periods, freq, axis))
labels, _, ngroups = self.grouper.group_info
# filled in by Cython
indexer = np.zeros_like(labels)
libgroupby.group_shift_indexer(indexer, labels, ngroups, periods)
output = {}
for name, obj in self._iterate_slices():
output[name] = algorithms.take_nd(obj.values, indexer)
return self._wrap_transformed_output(output)
@Substitution(name='groupby')
@Appender(_doc_template)
def head(self, n=5):
"""
Returns first n rows of each group.
Essentially equivalent to ``.apply(lambda x: x.head(n))``,
except ignores as_index flag.
Examples
--------
>>> df = DataFrame([[1, 2], [1, 4], [5, 6]],
columns=['A', 'B'])
>>> df.groupby('A', as_index=False).head(1)
A B
0 1 2
2 5 6
>>> df.groupby('A').head(1)
A B
0 1 2
2 5 6
"""
self._reset_group_selection()
mask = self._cumcount_array() < n
return self._selected_obj[mask]
@Substitution(name='groupby')
@Appender(_doc_template)
def tail(self, n=5):
"""
Returns last n rows of each group
Essentially equivalent to ``.apply(lambda x: x.tail(n))``,
except ignores as_index flag.
Examples
--------
>>> df = DataFrame([['a', 1], ['a', 2], ['b', 1], ['b', 2]],
columns=['A', 'B'])
>>> df.groupby('A').tail(1)
A B
1 a 2
3 b 2
>>> df.groupby('A').head(1)
A B
0 a 1
2 b 1
"""
self._reset_group_selection()
mask = self._cumcount_array(ascending=False) < n
return self._selected_obj[mask]
GroupBy._add_numeric_operations()
@Appender(GroupBy.__doc__)
def groupby(obj, by, **kwds):
if isinstance(obj, Series):
klass = SeriesGroupBy
elif isinstance(obj, DataFrame):
klass = DataFrameGroupBy
else: # pragma: no cover
raise TypeError('invalid type: %s' % type(obj))
return klass(obj, by, **kwds)
def _get_axes(group):
if isinstance(group, Series):
return [group.index]
else:
return group.axes
def _is_indexed_like(obj, axes):
if isinstance(obj, Series):
if len(axes) > 1:
return False
return obj.index.equals(axes[0])
elif isinstance(obj, DataFrame):
return obj.index.equals(axes[0])
return False
class BaseGrouper(object):
"""
This is an internal Grouper class, which actually holds
the generated groups
"""
def __init__(self, axis, groupings, sort=True, group_keys=True,
mutated=False):
self._filter_empty_groups = self.compressed = len(groupings) != 1
self.axis = axis
self.groupings = groupings
self.sort = sort
self.group_keys = group_keys
self.mutated = mutated
@property
def shape(self):
return tuple(ping.ngroups for ping in self.groupings)
def __iter__(self):
return iter(self.indices)
@property
def nkeys(self):
return len(self.groupings)
def get_iterator(self, data, axis=0):
"""
Groupby iterator
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
"""
splitter = self._get_splitter(data, axis=axis)
keys = self._get_group_keys()
for key, (i, group) in zip(keys, splitter):
yield key, group
def _get_splitter(self, data, axis=0):
comp_ids, _, ngroups = self.group_info
return get_splitter(data, comp_ids, ngroups, axis=axis)
def _get_group_keys(self):
if len(self.groupings) == 1:
return self.levels[0]
else:
comp_ids, _, ngroups = self.group_info
# provide "flattened" iterator for multi-group setting
return get_flattened_iterator(comp_ids,
ngroups,
self.levels,
self.labels)
def apply(self, f, data, axis=0):
mutated = self.mutated
splitter = self._get_splitter(data, axis=axis)
group_keys = self._get_group_keys()
# oh boy
f_name = com._get_callable_name(f)
if (f_name not in _plotting_methods and
hasattr(splitter, 'fast_apply') and axis == 0):
try:
values, mutated = splitter.fast_apply(f, group_keys)
return group_keys, values, mutated
except (lib.InvalidApply):
# we detect a mutation of some kind
# so take slow path
pass
except Exception:
# raise this error to the caller
pass
result_values = []
for key, (i, group) in zip(group_keys, splitter):
object.__setattr__(group, 'name', key)
# group might be modified
group_axes = _get_axes(group)
res = f(group)
if not _is_indexed_like(res, group_axes):
mutated = True
result_values.append(res)
return group_keys, result_values, mutated
@cache_readonly
def indices(self):
""" dict {group name -> group indices} """
if len(self.groupings) == 1:
return self.groupings[0].indices
else:
label_list = [ping.labels for ping in self.groupings]
keys = [_values_from_object(ping.group_index)
for ping in self.groupings]
return get_indexer_dict(label_list, keys)
@property
def labels(self):
return [ping.labels for ping in self.groupings]
@property
def levels(self):
return [ping.group_index for ping in self.groupings]
@property
def names(self):
return [ping.name for ping in self.groupings]
def size(self):
"""
Compute group sizes
"""
ids, _, ngroup = self.group_info
ids = _ensure_platform_int(ids)
out = np.bincount(ids[ids != -1], minlength=ngroup or None)
return Series(out,
index=self.result_index,
dtype='int64')
@cache_readonly
def _max_groupsize(self):
"""
Compute size of largest group
"""
# For many items in each group this is much faster than
# self.size().max(), in worst case marginally slower
if self.indices:
return max(len(v) for v in self.indices.values())
else:
return 0
@cache_readonly
def groups(self):
""" dict {group name -> group labels} """
if len(self.groupings) == 1:
return self.groupings[0].groups
else:
to_groupby = lzip(*(ping.grouper for ping in self.groupings))
to_groupby = Index(to_groupby)
return self.axis.groupby(to_groupby)
@cache_readonly
def is_monotonic(self):
# return if my group orderings are monotonic
return Index(self.group_info[0]).is_monotonic
@cache_readonly
def group_info(self):
comp_ids, obs_group_ids = self._get_compressed_labels()
ngroups = len(obs_group_ids)
comp_ids = _ensure_int64(comp_ids)
return comp_ids, obs_group_ids, ngroups
def _get_compressed_labels(self):
all_labels = [ping.labels for ping in self.groupings]
if len(all_labels) > 1:
group_index = get_group_index(all_labels, self.shape,
sort=True, xnull=True)
return compress_group_index(group_index, sort=self.sort)
ping = self.groupings[0]
return ping.labels, np.arange(len(ping.group_index))
@cache_readonly
def ngroups(self):
return len(self.result_index)
@property
def recons_labels(self):
comp_ids, obs_ids, _ = self.group_info
labels = (ping.labels for ping in self.groupings)
return decons_obs_group_ids(comp_ids,
obs_ids, self.shape, labels, xnull=True)
@cache_readonly
def result_index(self):
if not self.compressed and len(self.groupings) == 1:
return self.groupings[0].group_index.rename(self.names[0])
return MultiIndex(levels=[ping.group_index for ping in self.groupings],
labels=self.recons_labels,
verify_integrity=False,
names=self.names)
def get_group_levels(self):
if not self.compressed and len(self.groupings) == 1:
return [self.groupings[0].group_index]
name_list = []
for ping, labels in zip(self.groupings, self.recons_labels):
labels = _ensure_platform_int(labels)
levels = ping.group_index.take(labels)
name_list.append(levels)
return name_list
# ------------------------------------------------------------
# Aggregation functions
_cython_functions = {
'aggregate': {
'add': 'group_add',
'prod': 'group_prod',
'min': 'group_min',
'max': 'group_max',
'mean': 'group_mean',
'median': {
'name': 'group_median'
},
'var': 'group_var',
'first': {
'name': 'group_nth',
'f': lambda func, a, b, c, d: func(a, b, c, d, 1)
},
'last': 'group_last',
'ohlc': 'group_ohlc',
},
'transform': {
'cumprod': 'group_cumprod',
'cumsum': 'group_cumsum',
'cummin': 'group_cummin',
'cummax': 'group_cummax',
}
}
_cython_arity = {
'ohlc': 4, # OHLC
}
_name_functions = {
'ohlc': lambda *args: ['open', 'high', 'low', 'close']
}
def _is_builtin_func(self, arg):
"""
if we define an builtin function for this argument, return it,
otherwise return the arg
"""
return SelectionMixin._builtin_table.get(arg, arg)
def _get_cython_function(self, kind, how, values, is_numeric):
dtype_str = values.dtype.name
def get_func(fname):
# see if there is a fused-type version of function
# only valid for numeric
f = getattr(libgroupby, fname, None)
if f is not None and is_numeric:
return f
# otherwise find dtype-specific version, falling back to object
for dt in [dtype_str, 'object']:
f = getattr(libgroupby, "%s_%s" % (fname, dtype_str), None)
if f is not None:
return f
ftype = self._cython_functions[kind][how]
if isinstance(ftype, dict):
func = afunc = get_func(ftype['name'])
# a sub-function
f = ftype.get('f')
if f is not None:
def wrapper(*args, **kwargs):
return f(afunc, *args, **kwargs)
# need to curry our sub-function
func = wrapper
else:
func = get_func(ftype)
if func is None:
raise NotImplementedError("function is not implemented for this"
"dtype: [how->%s,dtype->%s]" %
(how, dtype_str))
return func, dtype_str
def _cython_operation(self, kind, values, how, axis):
assert kind in ['transform', 'aggregate']
# can we do this operation with our cython functions
# if not raise NotImplementedError
# we raise NotImplemented if this is an invalid operation
# entirely, e.g. adding datetimes
# categoricals are only 1d, so we
# are not setup for dim transforming
if is_categorical_dtype(values):
raise NotImplementedError(
"categoricals are not support in cython ops ATM")
elif is_datetime64_any_dtype(values):
if how in ['add', 'prod', 'cumsum', 'cumprod']:
raise NotImplementedError(
"datetime64 type does not support {} "
"operations".format(how))
elif is_timedelta64_dtype(values):
if how in ['prod', 'cumprod']:
raise NotImplementedError(
"timedelta64 type does not support {} "
"operations".format(how))
arity = self._cython_arity.get(how, 1)
vdim = values.ndim
swapped = False
if vdim == 1:
values = values[:, None]
out_shape = (self.ngroups, arity)
else:
if axis > 0:
swapped = True
values = values.swapaxes(0, axis)
if arity > 1:
raise NotImplementedError("arity of more than 1 is not "
"supported for the 'how' argument")
out_shape = (self.ngroups,) + values.shape[1:]
is_datetimelike = needs_i8_conversion(values.dtype)
is_numeric = is_numeric_dtype(values.dtype)
if is_datetimelike:
values = values.view('int64')
is_numeric = True
elif is_bool_dtype(values.dtype):
values = _ensure_float64(values)
elif is_integer_dtype(values):
# we use iNaT for the missing value on ints
# so pre-convert to guard this condition
if (values == iNaT).any():
values = _ensure_float64(values)
else:
values = values.astype('int64', copy=False)
elif is_numeric and not is_complex_dtype(values):
values = _ensure_float64(values)
else:
values = values.astype(object)
try:
func, dtype_str = self._get_cython_function(
kind, how, values, is_numeric)
except NotImplementedError:
if is_numeric:
values = _ensure_float64(values)
func, dtype_str = self._get_cython_function(
kind, how, values, is_numeric)
else:
raise
if is_numeric:
out_dtype = '%s%d' % (values.dtype.kind, values.dtype.itemsize)
else:
out_dtype = 'object'
labels, _, _ = self.group_info
if kind == 'aggregate':
result = _maybe_fill(np.empty(out_shape, dtype=out_dtype),
fill_value=np.nan)
counts = np.zeros(self.ngroups, dtype=np.int64)
result = self._aggregate(
result, counts, values, labels, func, is_numeric,
is_datetimelike)
elif kind == 'transform':
result = _maybe_fill(np.empty_like(values, dtype=out_dtype),
fill_value=np.nan)
result = self._transform(
result, values, labels, func, is_numeric, is_datetimelike)
if is_integer_dtype(result):
mask = result == iNaT
if mask.any():
result = result.astype('float64')
result[mask] = np.nan
if kind == 'aggregate' and \
self._filter_empty_groups and not counts.all():
if result.ndim == 2:
try:
result = lib.row_bool_subset(
result, (counts > 0).view(np.uint8))
except ValueError:
result = lib.row_bool_subset_object(
_ensure_object(result),
(counts > 0).view(np.uint8))
else:
result = result[counts > 0]
if vdim == 1 and arity == 1:
result = result[:, 0]
if how in self._name_functions:
# TODO
names = self._name_functions[how]()
else:
names = None
if swapped:
result = result.swapaxes(0, axis)
return result, names
def aggregate(self, values, how, axis=0):
return self._cython_operation('aggregate', values, how, axis)
def transform(self, values, how, axis=0):
return self._cython_operation('transform', values, how, axis)
def _aggregate(self, result, counts, values, comp_ids, agg_func,
is_numeric, is_datetimelike):
if values.ndim > 3:
# punting for now
raise NotImplementedError("number of dimensions is currently "
"limited to 3")
elif values.ndim > 2:
for i, chunk in enumerate(values.transpose(2, 0, 1)):
chunk = chunk.squeeze()
agg_func(result[:, :, i], counts, chunk, comp_ids)
else:
agg_func(result, counts, values, comp_ids)
return result
def _transform(self, result, values, comp_ids, transform_func,
is_numeric, is_datetimelike):
comp_ids, _, ngroups = self.group_info
if values.ndim > 3:
# punting for now
raise NotImplementedError("number of dimensions is currently "
"limited to 3")
elif values.ndim > 2:
for i, chunk in enumerate(values.transpose(2, 0, 1)):
chunk = chunk.squeeze()
transform_func(result[:, :, i], values,
comp_ids, is_datetimelike)
else:
transform_func(result, values, comp_ids, is_datetimelike)
return result
def agg_series(self, obj, func):
try:
return self._aggregate_series_fast(obj, func)
except Exception:
return self._aggregate_series_pure_python(obj, func)
def _aggregate_series_fast(self, obj, func):
func = self._is_builtin_func(func)
if obj.index._has_complex_internals:
raise TypeError('Incompatible index for Cython grouper')
group_index, _, ngroups = self.group_info
# avoids object / Series creation overhead
dummy = obj._get_values(slice(None, 0)).to_dense()
indexer = get_group_index_sorter(group_index, ngroups)
obj = obj.take(indexer, convert=False).to_dense()
group_index = algorithms.take_nd(
group_index, indexer, allow_fill=False)
grouper = lib.SeriesGrouper(obj, func, group_index, ngroups,
dummy)
result, counts = grouper.get_result()
return result, counts
def _aggregate_series_pure_python(self, obj, func):
group_index, _, ngroups = self.group_info
counts = np.zeros(ngroups, dtype=int)
result = None
splitter = get_splitter(obj, group_index, ngroups, axis=self.axis)
for label, group in splitter:
res = func(group)
if result is None:
if (isinstance(res, (Series, Index, np.ndarray)) or
isinstance(res, list)):
raise ValueError('Function does not reduce')
result = np.empty(ngroups, dtype='O')
counts[label] = group.shape[0]
result[label] = res
result = lib.maybe_convert_objects(result, try_float=0)
return result, counts
def generate_bins_generic(values, binner, closed):
"""
Generate bin edge offsets and bin labels for one array using another array
which has bin edge values. Both arrays must be sorted.
Parameters
----------
values : array of values
binner : a comparable array of values representing bins into which to bin
the first array. Note, 'values' end-points must fall within 'binner'
end-points.
closed : which end of bin is closed; left (default), right
Returns
-------
bins : array of offsets (into 'values' argument) of bins.
Zero and last edge are excluded in result, so for instance the first
bin is values[0:bin[0]] and the last is values[bin[-1]:]
"""
lenidx = len(values)
lenbin = len(binner)
if lenidx <= 0 or lenbin <= 0:
raise ValueError("Invalid length for values or for binner")
# check binner fits data
if values[0] < binner[0]:
raise ValueError("Values falls before first bin")
if values[lenidx - 1] > binner[lenbin - 1]:
raise ValueError("Values falls after last bin")
bins = np.empty(lenbin - 1, dtype=np.int64)
j = 0 # index into values
bc = 0 # bin count
# linear scan, presume nothing about values/binner except that it fits ok
for i in range(0, lenbin - 1):
r_bin = binner[i + 1]
# count values in current bin, advance to next bin
while j < lenidx and (values[j] < r_bin or
(closed == 'right' and values[j] == r_bin)):
j += 1
bins[bc] = j
bc += 1
return bins
class BinGrouper(BaseGrouper):
def __init__(self, bins, binlabels, filter_empty=False, mutated=False):
self.bins = _ensure_int64(bins)
self.binlabels = _ensure_index(binlabels)
self._filter_empty_groups = filter_empty
self.mutated = mutated
@cache_readonly
def groups(self):
""" dict {group name -> group labels} """
# this is mainly for compat
# GH 3881
result = {}
for key, value in zip(self.binlabels, self.bins):
if key is not NaT:
result[key] = value
return result
@property
def nkeys(self):
return 1
def get_iterator(self, data, axis=0):
"""
Groupby iterator
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
"""
if isinstance(data, NDFrame):
slicer = lambda start, edge: data._slice(
slice(start, edge), axis=axis)
length = len(data.axes[axis])
else:
slicer = lambda start, edge: data[slice(start, edge)]
length = len(data)
start = 0
for edge, label in zip(self.bins, self.binlabels):
if label is not NaT:
yield label, slicer(start, edge)
start = edge
if start < length:
yield self.binlabels[-1], slicer(start, None)
@cache_readonly
def indices(self):
indices = collections.defaultdict(list)
i = 0
for label, bin in zip(self.binlabels, self.bins):
if i < bin:
if label is not NaT:
indices[label] = list(range(i, bin))
i = bin
return indices
@cache_readonly
def group_info(self):
ngroups = self.ngroups
obs_group_ids = np.arange(ngroups)
rep = np.diff(np.r_[0, self.bins])
rep = _ensure_platform_int(rep)
if ngroups == len(self.bins):
comp_ids = np.repeat(np.arange(ngroups), rep)
else:
comp_ids = np.repeat(np.r_[-1, np.arange(ngroups)], rep)
return comp_ids.astype('int64', copy=False), \
obs_group_ids.astype('int64', copy=False), ngroups
@cache_readonly
def ngroups(self):
return len(self.result_index)
@cache_readonly
def result_index(self):
if len(self.binlabels) != 0 and isnull(self.binlabels[0]):
return self.binlabels[1:]
return self.binlabels
@property
def levels(self):
return [self.binlabels]
@property
def names(self):
return [self.binlabels.name]
@property
def groupings(self):
return [Grouping(lvl, lvl, in_axis=False, level=None, name=name)
for lvl, name in zip(self.levels, self.names)]
def agg_series(self, obj, func):
dummy = obj[:0]
grouper = lib.SeriesBinGrouper(obj, func, self.bins, dummy)
return grouper.get_result()
# ----------------------------------------------------------------------
# cython aggregation
_cython_functions = copy.deepcopy(BaseGrouper._cython_functions)
class Grouping(object):
"""
Holds the grouping information for a single key
Parameters
----------
index : Index
grouper :
obj :
name :
level :
in_axis : if the Grouping is a column in self.obj and hence among
Groupby.exclusions list
Returns
-------
**Attributes**:
* indices : dict of {group -> index_list}
* labels : ndarray, group labels
* ids : mapping of label -> group
* counts : array of group counts
* group_index : unique groups
* groups : dict of {group -> label_list}
"""
def __init__(self, index, grouper=None, obj=None, name=None, level=None,
sort=True, in_axis=False):
self.name = name
self.level = level
self.grouper = _convert_grouper(index, grouper)
self.index = index
self.sort = sort
self.obj = obj
self.in_axis = in_axis
# right place for this?
if isinstance(grouper, (Series, Index)) and name is None:
self.name = grouper.name
if isinstance(grouper, MultiIndex):
self.grouper = grouper.values
# pre-computed
self._should_compress = True
# we have a single grouper which may be a myriad of things,
# some of which are dependent on the passing in level
if level is not None:
if not isinstance(level, int):
if level not in index.names:
raise AssertionError('Level %s not in index' % str(level))
level = index.names.index(level)
if self.name is None:
self.name = index.names[level]
self.grouper, self._labels, self._group_index = \
index._get_grouper_for_level(self.grouper, level)
else:
if self.grouper is None and self.name is not None:
self.grouper = self.obj[self.name]
elif isinstance(self.grouper, (list, tuple)):
self.grouper = com._asarray_tuplesafe(self.grouper)
# a passed Categorical
elif is_categorical_dtype(self.grouper):
self.grouper = self.grouper._codes_for_groupby(self.sort)
# we make a CategoricalIndex out of the cat grouper
# preserving the categories / ordered attributes
self._labels = self.grouper.codes
c = self.grouper.categories
self._group_index = CategoricalIndex(
Categorical.from_codes(np.arange(len(c)),
categories=c,
ordered=self.grouper.ordered))
# a passed Grouper like
elif isinstance(self.grouper, Grouper):
# get the new grouper
grouper = self.grouper._get_binner_for_grouping(self.obj)
self.obj = self.grouper.obj
self.grouper = grouper
if self.name is None:
self.name = grouper.name
# we are done
if isinstance(self.grouper, Grouping):
self.grouper = self.grouper.grouper
# no level passed
elif not isinstance(self.grouper,
(Series, Index, Categorical, np.ndarray)):
if getattr(self.grouper, 'ndim', 1) != 1:
t = self.name or str(type(self.grouper))
raise ValueError("Grouper for '%s' not 1-dimensional" % t)
self.grouper = self.index.map(self.grouper)
if not (hasattr(self.grouper, "__len__") and
len(self.grouper) == len(self.index)):
errmsg = ('Grouper result violates len(labels) == '
'len(data)\nresult: %s' %
pprint_thing(self.grouper))
self.grouper = None # Try for sanity
raise AssertionError(errmsg)
# if we have a date/time-like grouper, make sure that we have
# Timestamps like
if getattr(self.grouper, 'dtype', None) is not None:
if is_datetime64_dtype(self.grouper):
from pandas import to_datetime
self.grouper = to_datetime(self.grouper)
elif is_timedelta64_dtype(self.grouper):
from pandas import to_timedelta
self.grouper = to_timedelta(self.grouper)
def __repr__(self):
return 'Grouping({0})'.format(self.name)
def __iter__(self):
return iter(self.indices)
_labels = None
_group_index = None
@property
def ngroups(self):
return len(self.group_index)
@cache_readonly
def indices(self):
values = _ensure_categorical(self.grouper)
return values._reverse_indexer()
@property
def labels(self):
if self._labels is None:
self._make_labels()
return self._labels
@property
def group_index(self):
if self._group_index is None:
self._make_labels()
return self._group_index
def _make_labels(self):
if self._labels is None or self._group_index is None:
labels, uniques = algorithms.factorize(
self.grouper, sort=self.sort)
uniques = Index(uniques, name=self.name)
self._labels = labels
self._group_index = uniques
@cache_readonly
def groups(self):
return self.index.groupby(Categorical.from_codes(self.labels,
self.group_index))
def _get_grouper(obj, key=None, axis=0, level=None, sort=True,
mutated=False):
"""
create and return a BaseGrouper, which is an internal
mapping of how to create the grouper indexers.
This may be composed of multiple Grouping objects, indicating
multiple groupers
Groupers are ultimately index mappings. They can originate as:
index mappings, keys to columns, functions, or Groupers
Groupers enable local references to axis,level,sort, while
the passed in axis, level, and sort are 'global'.
This routine tries to figure out what the passing in references
are and then creates a Grouping for each one, combined into
a BaseGrouper.
"""
group_axis = obj._get_axis(axis)
# validate that the passed level is compatible with the passed
# axis of the object
if level is not None:
if not isinstance(group_axis, MultiIndex):
# allow level to be a length-one list-like object
# (e.g., level=[0])
# GH 13901
if is_list_like(level):
nlevels = len(level)
if nlevels == 1:
level = level[0]
elif nlevels == 0:
raise ValueError('No group keys passed!')
else:
raise ValueError('multiple levels only valid with '
'MultiIndex')
if isinstance(level, compat.string_types):
if obj.index.name != level:
raise ValueError('level name %s is not the name of the '
'index' % level)
elif level > 0 or level < -1:
raise ValueError('level > 0 or level < -1 only valid with '
' MultiIndex')
level = None
key = group_axis
# a passed-in Grouper, directly convert
if isinstance(key, Grouper):
binner, grouper, obj = key._get_grouper(obj)
if key.key is None:
return grouper, [], obj
else:
return grouper, set([key.key]), obj
# already have a BaseGrouper, just return it
elif isinstance(key, BaseGrouper):
return key, [], obj
if not isinstance(key, (tuple, list)):
keys = [key]
match_axis_length = False
else:
keys = key
match_axis_length = len(keys) == len(group_axis)
# what are we after, exactly?
any_callable = any(callable(g) or isinstance(g, dict) for g in keys)
any_groupers = any(isinstance(g, Grouper) for g in keys)
any_arraylike = any(isinstance(g, (list, tuple, Series, Index, np.ndarray))
for g in keys)
try:
if isinstance(obj, DataFrame):
all_in_columns = all(g in obj.columns for g in keys)
else:
all_in_columns = False
except Exception:
all_in_columns = False
if not any_callable and not all_in_columns and \
not any_arraylike and not any_groupers and \
match_axis_length and level is None:
keys = [com._asarray_tuplesafe(keys)]
if isinstance(level, (tuple, list)):
if key is None:
keys = [None] * len(level)
levels = level
else:
levels = [level] * len(keys)
groupings = []
exclusions = []
# if the actual grouper should be obj[key]
def is_in_axis(key):
if not _is_label_like(key):
try:
obj._data.items.get_loc(key)
except Exception:
return False
return True
# if the the grouper is obj[name]
def is_in_obj(gpr):
try:
return id(gpr) == id(obj[gpr.name])
except Exception:
return False
for i, (gpr, level) in enumerate(zip(keys, levels)):
if is_in_obj(gpr): # df.groupby(df['name'])
in_axis, name = True, gpr.name
exclusions.append(name)
elif is_in_axis(gpr): # df.groupby('name')
if gpr in obj:
if gpr in obj.index.names:
warnings.warn(
("'%s' is both a column name and an index level.\n"
"Defaulting to column but "
"this will raise an ambiguity error in a "
"future version") % gpr,
FutureWarning, stacklevel=5)
in_axis, name, gpr = True, gpr, obj[gpr]
exclusions.append(name)
elif gpr in obj.index.names:
in_axis, name, level, gpr = False, None, gpr, None
else:
raise KeyError(gpr)
elif isinstance(gpr, Grouper) and gpr.key is not None:
# Add key to exclusions
exclusions.append(gpr.key)
in_axis, name = False, None
else:
in_axis, name = False, None
if is_categorical_dtype(gpr) and len(gpr) != len(obj):
raise ValueError("Categorical dtype grouper must "
"have len(grouper) == len(data)")
# create the Grouping
# allow us to passing the actual Grouping as the gpr
ping = Grouping(group_axis,
gpr,
obj=obj,
name=name,
level=level,
sort=sort,
in_axis=in_axis) \
if not isinstance(gpr, Grouping) else gpr
groupings.append(ping)
if len(groupings) == 0:
raise ValueError('No group keys passed!')
# create the internals grouper
grouper = BaseGrouper(group_axis, groupings, sort=sort, mutated=mutated)
return grouper, exclusions, obj
def _is_label_like(val):
return (isinstance(val, compat.string_types) or
(val is not None and is_scalar(val)))
def _convert_grouper(axis, grouper):
if isinstance(grouper, dict):
return grouper.get
elif isinstance(grouper, Series):
if grouper.index.equals(axis):
return grouper._values
else:
return grouper.reindex(axis)._values
elif isinstance(grouper, (list, Series, Index, np.ndarray)):
if len(grouper) != len(axis):
raise ValueError('Grouper and axis must be same length')
return grouper
else:
return grouper
def _whitelist_method_generator(klass, whitelist):
"""
Yields all GroupBy member defs for DataFrame/Series names in _whitelist.
Parameters
----------
klass - class where members are defined. Should be Series or DataFrame
whitelist - list of names of klass methods to be constructed
Returns
-------
The generator yields a sequence of strings, each suitable for exec'ing,
that define implementations of the named methods for DataFrameGroupBy
or SeriesGroupBy.
Since we don't want to override methods explicitly defined in the
base class, any such name is skipped.
"""
method_wrapper_template = \
"""def %(name)s(%(sig)s) :
\"""
%(doc)s
\"""
f = %(self)s.__getattr__('%(name)s')
return f(%(args)s)"""
property_wrapper_template = \
"""@property
def %(name)s(self) :
\"""
%(doc)s
\"""
return self.__getattr__('%(name)s')"""
for name in whitelist:
# don't override anything that was explicitly defined
# in the base class
if hasattr(GroupBy, name):
continue
# ugly, but we need the name string itself in the method.
f = getattr(klass, name)
doc = f.__doc__
doc = doc if type(doc) == str else ''
if isinstance(f, types.MethodType):
wrapper_template = method_wrapper_template
decl, args = make_signature(f)
# pass args by name to f because otherwise
# GroupBy._make_wrapper won't know whether
# we passed in an axis parameter.
args_by_name = ['{0}={0}'.format(arg) for arg in args[1:]]
params = {'name': name,
'doc': doc,
'sig': ','.join(decl),
'self': args[0],
'args': ','.join(args_by_name)}
else:
wrapper_template = property_wrapper_template
params = {'name': name, 'doc': doc}
yield wrapper_template % params
class SeriesGroupBy(GroupBy):
#
# Make class defs of attributes on SeriesGroupBy whitelist
_apply_whitelist = _series_apply_whitelist
for _def_str in _whitelist_method_generator(Series,
_series_apply_whitelist):
exec(_def_str)
@property
def _selection_name(self):
"""
since we are a series, we by definition only have
a single name, but may be the result of a selection or
the name of our object
"""
if self._selection is None:
return self.obj.name
else:
return self._selection
_agg_doc = dedent("""
Examples
--------
>>> s = Series([1, 2, 3, 4])
>>> s
0 1
1 2
2 3
3 4
dtype: int64
>>> s.groupby([1, 1, 2, 2]).min()
1 1
2 3
dtype: int64
>>> s.groupby([1, 1, 2, 2]).agg('min')
1 1
2 3
dtype: int64
>>> s.groupby([1, 1, 2, 2]).agg(['min', 'max'])
min max
1 1 2
2 3 4
See also
--------
pandas.Series.groupby.apply
pandas.Series.groupby.transform
pandas.Series.aggregate
""")
@Appender(_agg_doc)
@Appender(_shared_docs['aggregate'] % dict(
klass='Series',
versionadded=''))
def aggregate(self, func_or_funcs, *args, **kwargs):
_level = kwargs.pop('_level', None)
if isinstance(func_or_funcs, compat.string_types):
return getattr(self, func_or_funcs)(*args, **kwargs)
if hasattr(func_or_funcs, '__iter__'):
ret = self._aggregate_multiple_funcs(func_or_funcs,
(_level or 0) + 1)
else:
cyfunc = self._is_cython_func(func_or_funcs)
if cyfunc and not args and not kwargs:
return getattr(self, cyfunc)()
if self.grouper.nkeys > 1:
return self._python_agg_general(func_or_funcs, *args, **kwargs)
try:
return self._python_agg_general(func_or_funcs, *args, **kwargs)
except Exception:
result = self._aggregate_named(func_or_funcs, *args, **kwargs)
index = Index(sorted(result), name=self.grouper.names[0])
ret = Series(result, index=index)
if not self.as_index: # pragma: no cover
print('Warning, ignoring as_index=True')
# _level handled at higher
if not _level and isinstance(ret, dict):
from pandas import concat
ret = concat(ret, axis=1)
return ret
agg = aggregate
def _aggregate_multiple_funcs(self, arg, _level):
if isinstance(arg, dict):
# show the deprecation, but only if we
# have not shown a higher level one
# GH 15931
if isinstance(self._selected_obj, Series) and _level <= 1:
warnings.warn(
("using a dict on a Series for aggregation\n"
"is deprecated and will be removed in a future "
"version"),
FutureWarning, stacklevel=3)
columns = list(arg.keys())
arg = list(arg.items())
elif any(isinstance(x, (tuple, list)) for x in arg):
arg = [(x, x) if not isinstance(x, (tuple, list)) else x
for x in arg]
# indicated column order
columns = lzip(*arg)[0]
else:
# list of functions / function names
columns = []
for f in arg:
if isinstance(f, compat.string_types):
columns.append(f)
else:
# protect against callables without names
columns.append(com._get_callable_name(f))
arg = lzip(columns, arg)
results = {}
for name, func in arg:
obj = self
if name in results:
raise SpecificationError('Function names must be unique, '
'found multiple named %s' % name)
# reset the cache so that we
# only include the named selection
if name in self._selected_obj:
obj = copy.copy(obj)
obj._reset_cache()
obj._selection = name
results[name] = obj.aggregate(func)
if isinstance(list(compat.itervalues(results))[0],
DataFrame):
# let higher level handle
if _level:
return results
return list(compat.itervalues(results))[0]
return DataFrame(results, columns=columns)
def _wrap_output(self, output, index, names=None):
""" common agg/transform wrapping logic """
output = output[self._selection_name]
if names is not None:
return DataFrame(output, index=index, columns=names)
else:
name = self._selection_name
if name is None:
name = self._selected_obj.name
return Series(output, index=index, name=name)
def _wrap_aggregated_output(self, output, names=None):
return self._wrap_output(output=output,
index=self.grouper.result_index,
names=names)
def _wrap_transformed_output(self, output, names=None):
return self._wrap_output(output=output,
index=self.obj.index,
names=names)
def _wrap_applied_output(self, keys, values, not_indexed_same=False):
if len(keys) == 0:
# GH #6265
return Series([], name=self._selection_name, index=keys)
def _get_index():
if self.grouper.nkeys > 1:
index = MultiIndex.from_tuples(keys, names=self.grouper.names)
else:
index = Index(keys, name=self.grouper.names[0])
return index
if isinstance(values[0], dict):
# GH #823
index = _get_index()
result = DataFrame(values, index=index).stack()
result.name = self._selection_name
return result
if isinstance(values[0], (Series, dict)):
return self._concat_objects(keys, values,
not_indexed_same=not_indexed_same)
elif isinstance(values[0], DataFrame):
# possible that Series -> DataFrame by applied function
return self._concat_objects(keys, values,
not_indexed_same=not_indexed_same)
else:
# GH #6265
return Series(values, index=_get_index(),
name=self._selection_name)
def _aggregate_named(self, func, *args, **kwargs):
result = {}
for name, group in self:
group.name = name
output = func(group, *args, **kwargs)
if isinstance(output, (Series, Index, np.ndarray)):
raise Exception('Must produce aggregated value')
result[name] = self._try_cast(output, group)
return result
@Substitution(klass='Series', selected='A.')
@Appender(_transform_template)
def transform(self, func, *args, **kwargs):
func = self._is_cython_func(func) or func
# if string function
if isinstance(func, compat.string_types):
if func in _cython_transforms:
# cythonized transform
return getattr(self, func)(*args, **kwargs)
else:
# cythonized aggregation and merge
return self._transform_fast(
lambda: getattr(self, func)(*args, **kwargs))
# reg transform
klass = self._selected_obj.__class__
results = []
wrapper = lambda x: func(x, *args, **kwargs)
for name, group in self:
object.__setattr__(group, 'name', name)
res = wrapper(group)
if hasattr(res, 'values'):
res = res.values
indexer = self._get_index(name)
s = klass(res, indexer)
results.append(s)
from pandas.core.reshape.concat import concat
result = concat(results).sort_index()
# we will only try to coerce the result type if
# we have a numeric dtype, as these are *always* udfs
# the cython take a different path (and casting)
dtype = self._selected_obj.dtype
if is_numeric_dtype(dtype):
result = maybe_downcast_to_dtype(result, dtype)
result.name = self._selected_obj.name
result.index = self._selected_obj.index
return result
def _transform_fast(self, func):
"""
fast version of transform, only applicable to
builtin/cythonizable functions
"""
if isinstance(func, compat.string_types):
func = getattr(self, func)
ids, _, ngroup = self.grouper.group_info
cast = (self.size().fillna(0) > 0).any()
out = algorithms.take_1d(func().values, ids)
if cast:
out = self._try_cast(out, self.obj)
return Series(out, index=self.obj.index, name=self.obj.name)
def filter(self, func, dropna=True, *args, **kwargs): # noqa
"""
Return a copy of a Series excluding elements from groups that
do not satisfy the boolean criterion specified by func.
Parameters
----------
func : function
To apply to each group. Should return True or False.
dropna : Drop groups that do not pass the filter. True by default;
if False, groups that evaluate False are filled with NaNs.
Examples
--------
>>> import pandas as pd
>>> df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar',
... 'foo', 'bar'],
... 'B' : [1, 2, 3, 4, 5, 6],
... 'C' : [2.0, 5., 8., 1., 2., 9.]})
>>> grouped = df.groupby('A')
>>> df.groupby('A').B.filter(lambda x: x.mean() > 3.)
1 2
3 4
5 6
Name: B, dtype: int64
Returns
-------
filtered : Series
"""
if isinstance(func, compat.string_types):
wrapper = lambda x: getattr(x, func)(*args, **kwargs)
else:
wrapper = lambda x: func(x, *args, **kwargs)
# Interpret np.nan as False.
def true_and_notnull(x, *args, **kwargs):
b = wrapper(x, *args, **kwargs)
return b and notnull(b)
try:
indices = [self._get_index(name) for name, group in self
if true_and_notnull(group)]
except ValueError:
raise TypeError("the filter must return a boolean result")
except TypeError:
raise TypeError("the filter must return a boolean result")
filtered = self._apply_filter(indices, dropna)
return filtered
def nunique(self, dropna=True):
""" Returns number of unique elements in the group """
ids, _, _ = self.grouper.group_info
val = self.obj.get_values()
try:
sorter = np.lexsort((val, ids))
except TypeError: # catches object dtypes
assert val.dtype == object, \
'val.dtype must be object, got %s' % val.dtype
val, _ = algorithms.factorize(val, sort=False)
sorter = np.lexsort((val, ids))
_isnull = lambda a: a == -1
else:
_isnull = isnull
ids, val = ids[sorter], val[sorter]
# group boundaries are where group ids change
# unique observations are where sorted values change
idx = np.r_[0, 1 + np.nonzero(ids[1:] != ids[:-1])[0]]
inc = np.r_[1, val[1:] != val[:-1]]
# 1st item of each group is a new unique observation
mask = _isnull(val)
if dropna:
inc[idx] = 1
inc[mask] = 0
else:
inc[mask & np.r_[False, mask[:-1]]] = 0
inc[idx] = 1
out = np.add.reduceat(inc, idx).astype('int64', copy=False)
if len(ids):
res = out if ids[0] != -1 else out[1:]
else:
res = out[1:]
ri = self.grouper.result_index
# we might have duplications among the bins
if len(res) != len(ri):
res, out = np.zeros(len(ri), dtype=out.dtype), res
res[ids[idx]] = out
return Series(res,
index=ri,
name=self._selection_name)
@Appender(Series.describe.__doc__)
def describe(self, **kwargs):
self._set_group_selection()
result = self.apply(lambda x: x.describe(**kwargs))
if self.axis == 1:
return result.T
return result.unstack()
def value_counts(self, normalize=False, sort=True, ascending=False,
bins=None, dropna=True):
from functools import partial
from pandas.core.reshape.tile import cut
from pandas.core.reshape.merge import _get_join_indexers
if bins is not None and not np.iterable(bins):
# scalar bins cannot be done at top level
# in a backward compatible way
return self.apply(Series.value_counts,
normalize=normalize,
sort=sort,
ascending=ascending,
bins=bins)
ids, _, _ = self.grouper.group_info
val = self.obj.get_values()
# groupby removes null keys from groupings
mask = ids != -1
ids, val = ids[mask], val[mask]
if bins is None:
lab, lev = algorithms.factorize(val, sort=True)
llab = lambda lab, inc: lab[inc]
else:
# lab is a Categorical with categories an IntervalIndex
lab = cut(Series(val), bins, include_lowest=True)
lev = lab.cat.categories
lab = lev.take(lab.cat.codes)
llab = lambda lab, inc: lab[inc]._multiindex.labels[-1]
if is_interval_dtype(lab):
# TODO: should we do this inside II?
sorter = np.lexsort((lab.left, lab.right, ids))
else:
sorter = np.lexsort((lab, ids))
ids, lab = ids[sorter], lab[sorter]
# group boundaries are where group ids change
idx = np.r_[0, 1 + np.nonzero(ids[1:] != ids[:-1])[0]]
# new values are where sorted labels change
lchanges = llab(lab, slice(1, None)) != llab(lab, slice(None, -1))
inc = np.r_[True, lchanges]
inc[idx] = True # group boundaries are also new values
out = np.diff(np.nonzero(np.r_[inc, True])[0]) # value counts
# num. of times each group should be repeated
rep = partial(np.repeat, repeats=np.add.reduceat(inc, idx))
# multi-index components
labels = list(map(rep, self.grouper.recons_labels)) + [llab(lab, inc)]
levels = [ping.group_index for ping in self.grouper.groupings] + [lev]
names = self.grouper.names + [self._selection_name]
if dropna:
mask = labels[-1] != -1
if mask.all():
dropna = False
else:
out, labels = out[mask], [label[mask] for label in labels]
if normalize:
out = out.astype('float')
d = np.diff(np.r_[idx, len(ids)])
if dropna:
m = ids[lab == -1]
if _np_version_under1p8:
mi, ml = algorithms.factorize(m)
d[ml] = d[ml] - np.bincount(mi)
else:
np.add.at(d, m, -1)
acc = rep(d)[mask]
else:
acc = rep(d)
out /= acc
if sort and bins is None:
cat = ids[inc][mask] if dropna else ids[inc]
sorter = np.lexsort((out if ascending else -out, cat))
out, labels[-1] = out[sorter], labels[-1][sorter]
if bins is None:
mi = MultiIndex(levels=levels, labels=labels, names=names,
verify_integrity=False)
if is_integer_dtype(out):
out = _ensure_int64(out)
return Series(out, index=mi, name=self._selection_name)
# for compat. with libgroupby.value_counts need to ensure every
# bin is present at every index level, null filled with zeros
diff = np.zeros(len(out), dtype='bool')
for lab in labels[:-1]:
diff |= np.r_[True, lab[1:] != lab[:-1]]
ncat, nbin = diff.sum(), len(levels[-1])
left = [np.repeat(np.arange(ncat), nbin),
np.tile(np.arange(nbin), ncat)]
right = [diff.cumsum() - 1, labels[-1]]
_, idx = _get_join_indexers(left, right, sort=False, how='left')
out = np.where(idx != -1, out[idx], 0)
if sort:
sorter = np.lexsort((out if ascending else -out, left[0]))
out, left[-1] = out[sorter], left[-1][sorter]
# build the multi-index w/ full levels
labels = list(map(lambda lab: np.repeat(lab[diff], nbin), labels[:-1]))
labels.append(left[-1])
mi = MultiIndex(levels=levels, labels=labels, names=names,
verify_integrity=False)
if is_integer_dtype(out):
out = _ensure_int64(out)
return Series(out, index=mi, name=self._selection_name)
def count(self):
""" Compute count of group, excluding missing values """
ids, _, ngroups = self.grouper.group_info
val = self.obj.get_values()
mask = (ids != -1) & ~isnull(val)
ids = _ensure_platform_int(ids)
out = np.bincount(ids[mask], minlength=ngroups or None)
return Series(out,
index=self.grouper.result_index,
name=self._selection_name,
dtype='int64')
def _apply_to_column_groupbys(self, func):
""" return a pass thru """
return func(self)
class NDFrameGroupBy(GroupBy):
def _iterate_slices(self):
if self.axis == 0:
# kludge
if self._selection is None:
slice_axis = self.obj.columns
else:
slice_axis = self._selection_list
slicer = lambda x: self.obj[x]
else:
slice_axis = self.obj.index
slicer = self.obj.xs
for val in slice_axis:
if val in self.exclusions:
continue
yield val, slicer(val)
def _cython_agg_general(self, how, alt=None, numeric_only=True):
new_items, new_blocks = self._cython_agg_blocks(
how, alt=alt, numeric_only=numeric_only)
return self._wrap_agged_blocks(new_items, new_blocks)
def _wrap_agged_blocks(self, items, blocks):
obj = self._obj_with_exclusions
new_axes = list(obj._data.axes)
# more kludge
if self.axis == 0:
new_axes[0], new_axes[1] = new_axes[1], self.grouper.result_index
else:
new_axes[self.axis] = self.grouper.result_index
# Make sure block manager integrity check passes.
assert new_axes[0].equals(items)
new_axes[0] = items
mgr = BlockManager(blocks, new_axes)
new_obj = type(obj)(mgr)
return self._post_process_cython_aggregate(new_obj)
_block_agg_axis = 0
def _cython_agg_blocks(self, how, alt=None, numeric_only=True):
# TODO: the actual managing of mgr_locs is a PITA
# here, it should happen via BlockManager.combine
data, agg_axis = self._get_data_to_aggregate()
if numeric_only:
data = data.get_numeric_data(copy=False)
new_blocks = []
new_items = []
deleted_items = []
for block in data.blocks:
locs = block.mgr_locs.as_array
try:
result, _ = self.grouper.aggregate(
block.values, how, axis=agg_axis)
except NotImplementedError:
# generally if we have numeric_only=False
# and non-applicable functions
# try to python agg
if alt is None:
# we cannot perform the operation
# in an alternate way, exclude the block
deleted_items.append(locs)
continue
# call our grouper again with only this block
obj = self.obj[data.items[locs]]
s = groupby(obj, self.grouper)
result = s.aggregate(lambda x: alt(x, axis=self.axis))
newb = result._data.blocks[0]
finally:
# see if we can cast the block back to the original dtype
result = block._try_coerce_and_cast_result(result)
newb = block.make_block(result)
new_items.append(locs)
new_blocks.append(newb)
if len(new_blocks) == 0:
raise DataError('No numeric types to aggregate')
# reset the locs in the blocks to correspond to our
# current ordering
indexer = np.concatenate(new_items)
new_items = data.items.take(np.sort(indexer))
if len(deleted_items):
# we need to adjust the indexer to account for the
# items we have removed
# really should be done in internals :<
deleted = np.concatenate(deleted_items)
ai = np.arange(len(data))
mask = np.zeros(len(data))
mask[deleted] = 1
indexer = (ai - mask.cumsum())[indexer]
offset = 0
for b in new_blocks:
l = len(b.mgr_locs)
b.mgr_locs = indexer[offset:(offset + l)]
offset += l
return new_items, new_blocks
def _get_data_to_aggregate(self):
obj = self._obj_with_exclusions
if self.axis == 0:
return obj.swapaxes(0, 1)._data, 1
else:
return obj._data, self.axis
def _post_process_cython_aggregate(self, obj):
# undoing kludge from below
if self.axis == 0:
obj = obj.swapaxes(0, 1)
return obj
def aggregate(self, arg, *args, **kwargs):
_level = kwargs.pop('_level', None)
result, how = self._aggregate(arg, _level=_level, *args, **kwargs)
if how is None:
return result
if result is None:
# grouper specific aggregations
if self.grouper.nkeys > 1:
return self._python_agg_general(arg, *args, **kwargs)
else:
# try to treat as if we are passing a list
try:
assert not args and not kwargs
result = self._aggregate_multiple_funcs(
[arg], _level=_level, _axis=self.axis)
result.columns = Index(
result.columns.levels[0],
name=self._selected_obj.columns.name)
except:
result = self._aggregate_generic(arg, *args, **kwargs)
if not self.as_index:
self._insert_inaxis_grouper_inplace(result)
result.index = np.arange(len(result))
return result._convert(datetime=True)
agg = aggregate
def _aggregate_generic(self, func, *args, **kwargs):
if self.grouper.nkeys != 1:
raise AssertionError('Number of keys must be 1')
axis = self.axis
obj = self._obj_with_exclusions
result = {}
if axis != obj._info_axis_number:
try:
for name, data in self:
result[name] = self._try_cast(func(data, *args, **kwargs),
data)
except Exception:
return self._aggregate_item_by_item(func, *args, **kwargs)
else:
for name in self.indices:
try:
data = self.get_group(name, obj=obj)
result[name] = self._try_cast(func(data, *args, **kwargs),
data)
except Exception:
wrapper = lambda x: func(x, *args, **kwargs)
result[name] = data.apply(wrapper, axis=axis)
return self._wrap_generic_output(result, obj)
def _wrap_aggregated_output(self, output, names=None):
raise AbstractMethodError(self)
def _aggregate_item_by_item(self, func, *args, **kwargs):
# only for axis==0
obj = self._obj_with_exclusions
result = {}
cannot_agg = []
errors = None
for item in obj:
try:
data = obj[item]
colg = SeriesGroupBy(data, selection=item,
grouper=self.grouper)
result[item] = self._try_cast(
colg.aggregate(func, *args, **kwargs), data)
except ValueError:
cannot_agg.append(item)
continue
except TypeError as e:
cannot_agg.append(item)
errors = e
continue
result_columns = obj.columns
if cannot_agg:
result_columns = result_columns.drop(cannot_agg)
# GH6337
if not len(result_columns) and errors is not None:
raise errors
return DataFrame(result, columns=result_columns)
def _decide_output_index(self, output, labels):
if len(output) == len(labels):
output_keys = labels
else:
output_keys = sorted(output)
try:
output_keys.sort()
except Exception: # pragma: no cover
pass
if isinstance(labels, MultiIndex):
output_keys = MultiIndex.from_tuples(output_keys,
names=labels.names)
return output_keys
def _wrap_applied_output(self, keys, values, not_indexed_same=False):
from pandas.core.index import _all_indexes_same
from pandas.core.tools.numeric import to_numeric
if len(keys) == 0:
return DataFrame(index=keys)
key_names = self.grouper.names
# GH12824.
def first_non_None_value(values):
try:
v = next(v for v in values if v is not None)
except StopIteration:
return None
return v
v = first_non_None_value(values)
if v is None:
# GH9684. If all values are None, then this will throw an error.
# We'd prefer it return an empty dataframe.
return DataFrame()
elif isinstance(v, DataFrame):
return self._concat_objects(keys, values,
not_indexed_same=not_indexed_same)
elif self.grouper.groupings is not None:
if len(self.grouper.groupings) > 1:
key_index = MultiIndex.from_tuples(keys, names=key_names)
else:
ping = self.grouper.groupings[0]
if len(keys) == ping.ngroups:
key_index = ping.group_index
key_index.name = key_names[0]
key_lookup = Index(keys)
indexer = key_lookup.get_indexer(key_index)
# reorder the values
values = [values[i] for i in indexer]
else:
key_index = Index(keys, name=key_names[0])
# don't use the key indexer
if not self.as_index:
key_index = None
# make Nones an empty object
v = first_non_None_value(values)
if v is None:
return DataFrame()
elif isinstance(v, NDFrame):
values = [
x if x is not None else
v._constructor(**v._construct_axes_dict())
for x in values
]
v = values[0]
if isinstance(v, (np.ndarray, Index, Series)):
if isinstance(v, Series):
applied_index = self._selected_obj._get_axis(self.axis)
all_indexed_same = _all_indexes_same([
x.index for x in values
])
singular_series = (len(values) == 1 and
applied_index.nlevels == 1)
# GH3596
# provide a reduction (Frame -> Series) if groups are
# unique
if self.squeeze:
# assign the name to this series
if singular_series:
values[0].name = keys[0]
# GH2893
# we have series in the values array, we want to
# produce a series:
# if any of the sub-series are not indexed the same
# OR we don't have a multi-index and we have only a
# single values
return self._concat_objects(
keys, values, not_indexed_same=not_indexed_same
)
# still a series
# path added as of GH 5545
elif all_indexed_same:
from pandas.core.reshape.concat import concat
return concat(values)
if not all_indexed_same:
# GH 8467
return self._concat_objects(
keys, values, not_indexed_same=True,
)
try:
if self.axis == 0:
# GH6124 if the list of Series have a consistent name,
# then propagate that name to the result.
index = v.index.copy()
if index.name is None:
# Only propagate the series name to the result
# if all series have a consistent name. If the
# series do not have a consistent name, do
# nothing.
names = set(v.name for v in values)
if len(names) == 1:
index.name = list(names)[0]
# normally use vstack as its faster than concat
# and if we have mi-columns
if isinstance(v.index,
MultiIndex) or key_index is None:
stacked_values = np.vstack(map(np.asarray, values))
result = DataFrame(stacked_values, index=key_index,
columns=index)
else:
# GH5788 instead of stacking; concat gets the
# dtypes correct
from pandas.core.reshape.concat import concat
result = concat(values, keys=key_index,
names=key_index.names,
axis=self.axis).unstack()
result.columns = index
else:
stacked_values = np.vstack(map(np.asarray, values))
result = DataFrame(stacked_values.T, index=v.index,
columns=key_index)
except (ValueError, AttributeError):
# GH1738: values is list of arrays of unequal lengths fall
# through to the outer else caluse
return Series(values, index=key_index,
name=self._selection_name)
# if we have date/time like in the original, then coerce dates
# as we are stacking can easily have object dtypes here
so = self._selected_obj
if (so.ndim == 2 and so.dtypes.apply(is_datetimelike).any()):
result = result.apply(
lambda x: to_numeric(x, errors='ignore'))
date_cols = self._selected_obj.select_dtypes(
include=['datetime', 'timedelta']).columns
date_cols = date_cols.intersection(result.columns)
result[date_cols] = (result[date_cols]
._convert(datetime=True,
coerce=True))
else:
result = result._convert(datetime=True)
return self._reindex_output(result)
# values are not series or array-like but scalars
else:
# only coerce dates if we find at least 1 datetime
coerce = True if any([isinstance(x, Timestamp)
for x in values]) else False
# self._selection_name not passed through to Series as the
# result should not take the name of original selection
# of columns
return (Series(values, index=key_index)
._convert(datetime=True,
coerce=coerce))
else:
# Handle cases like BinGrouper
return self._concat_objects(keys, values,
not_indexed_same=not_indexed_same)
def _transform_general(self, func, *args, **kwargs):
from pandas.core.reshape.concat import concat
applied = []
obj = self._obj_with_exclusions
gen = self.grouper.get_iterator(obj, axis=self.axis)
fast_path, slow_path = self._define_paths(func, *args, **kwargs)
path = None
for name, group in gen:
object.__setattr__(group, 'name', name)
if path is None:
# Try slow path and fast path.
try:
path, res = self._choose_path(fast_path, slow_path, group)
except TypeError:
return self._transform_item_by_item(obj, fast_path)
except ValueError:
msg = 'transform must return a scalar value for each group'
raise ValueError(msg)
else:
res = path(group)
if isinstance(res, Series):
# we need to broadcast across the
# other dimension; this will preserve dtypes
# GH14457
if not np.prod(group.shape):
continue
elif res.index.is_(obj.index):
r = concat([res] * len(group.columns), axis=1)
r.columns = group.columns
r.index = group.index
else:
r = DataFrame(
np.concatenate([res.values] * len(group.index)
).reshape(group.shape),
columns=group.columns, index=group.index)
applied.append(r)
else:
applied.append(res)
concat_index = obj.columns if self.axis == 0 else obj.index
concatenated = concat(applied, join_axes=[concat_index],
axis=self.axis, verify_integrity=False)
return self._set_result_index_ordered(concatenated)
@Substitution(klass='DataFrame', selected='')
@Appender(_transform_template)
def transform(self, func, *args, **kwargs):
# optimized transforms
func = self._is_cython_func(func) or func
if isinstance(func, compat.string_types):
if func in _cython_transforms:
# cythonized transform
return getattr(self, func)(*args, **kwargs)
else:
# cythonized aggregation and merge
result = getattr(self, func)(*args, **kwargs)
else:
return self._transform_general(func, *args, **kwargs)
# a reduction transform
if not isinstance(result, DataFrame):
return self._transform_general(func, *args, **kwargs)
obj = self._obj_with_exclusions
# nuiscance columns
if not result.columns.equals(obj.columns):
return self._transform_general(func, *args, **kwargs)
return self._transform_fast(result, obj)
def _transform_fast(self, result, obj):
"""
Fast transform path for aggregations
"""
# if there were groups with no observations (Categorical only?)
# try casting data to original dtype
cast = (self.size().fillna(0) > 0).any()
# for each col, reshape to to size of original frame
# by take operation
ids, _, ngroup = self.grouper.group_info
output = []
for i, _ in enumerate(result.columns):
res = algorithms.take_1d(result.iloc[:, i].values, ids)
if cast:
res = self._try_cast(res, obj.iloc[:, i])
output.append(res)
return DataFrame._from_arrays(output, columns=result.columns,
index=obj.index)
def _define_paths(self, func, *args, **kwargs):
if isinstance(func, compat.string_types):
fast_path = lambda group: getattr(group, func)(*args, **kwargs)
slow_path = lambda group: group.apply(
lambda x: getattr(x, func)(*args, **kwargs), axis=self.axis)
else:
fast_path = lambda group: func(group, *args, **kwargs)
slow_path = lambda group: group.apply(
lambda x: func(x, *args, **kwargs), axis=self.axis)
return fast_path, slow_path
def _choose_path(self, fast_path, slow_path, group):
path = slow_path
res = slow_path(group)
# if we make it here, test if we can use the fast path
try:
res_fast = fast_path(group)
# compare that we get the same results
if res.shape == res_fast.shape:
res_r = res.values.ravel()
res_fast_r = res_fast.values.ravel()
mask = notnull(res_r)
if (res_r[mask] == res_fast_r[mask]).all():
path = fast_path
except:
pass
return path, res
def _transform_item_by_item(self, obj, wrapper):
# iterate through columns
output = {}
inds = []
for i, col in enumerate(obj):
try:
output[col] = self[col].transform(wrapper)
inds.append(i)
except Exception:
pass
if len(output) == 0: # pragma: no cover
raise TypeError('Transform function invalid for data types')
columns = obj.columns
if len(output) < len(obj.columns):
columns = columns.take(inds)
return DataFrame(output, index=obj.index, columns=columns)
def filter(self, func, dropna=True, *args, **kwargs): # noqa
"""
Return a copy of a DataFrame excluding elements from groups that
do not satisfy the boolean criterion specified by func.
Parameters
----------
f : function
Function to apply to each subframe. Should return True or False.
dropna : Drop groups that do not pass the filter. True by default;
if False, groups that evaluate False are filled with NaNs.
Notes
-----
Each subframe is endowed the attribute 'name' in case you need to know
which group you are working on.
Examples
--------
>>> import pandas as pd
>>> df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar',
... 'foo', 'bar'],
... 'B' : [1, 2, 3, 4, 5, 6],
... 'C' : [2.0, 5., 8., 1., 2., 9.]})
>>> grouped = df.groupby('A')
>>> grouped.filter(lambda x: x['B'].mean() > 3.)
A B C
1 bar 2 5.0
3 bar 4 1.0
5 bar 6 9.0
Returns
-------
filtered : DataFrame
"""
indices = []
obj = self._selected_obj
gen = self.grouper.get_iterator(obj, axis=self.axis)
for name, group in gen:
object.__setattr__(group, 'name', name)
res = func(group, *args, **kwargs)
try:
res = res.squeeze()
except AttributeError: # allow e.g., scalars and frames to pass
pass
# interpret the result of the filter
if is_bool(res) or (is_scalar(res) and isnull(res)):
if res and notnull(res):
indices.append(self._get_index(name))
else:
# non scalars aren't allowed
raise TypeError("filter function returned a %s, "
"but expected a scalar bool" %
type(res).__name__)
return self._apply_filter(indices, dropna)
class DataFrameGroupBy(NDFrameGroupBy):
_apply_whitelist = _dataframe_apply_whitelist
#
# Make class defs of attributes on DataFrameGroupBy whitelist.
for _def_str in _whitelist_method_generator(DataFrame, _apply_whitelist):
exec(_def_str)
_block_agg_axis = 1
_agg_doc = dedent("""
Examples
--------
>>> df = pd.DataFrame({'A': [1, 1, 2, 2],
... 'B': [1, 2, 3, 4],
... 'C': np.random.randn(4)})
>>> df
A B C
0 1 1 0.362838
1 1 2 0.227877
2 2 3 1.267767
3 2 4 -0.562860
The aggregation is for each column.
>>> df.groupby('A').agg('min')
B C
A
1 1 0.227877
2 3 -0.562860
Multiple aggregations
>>> df.groupby('A').agg(['min', 'max'])
B C
min max min max
A
1 1 2 0.227877 0.362838
2 3 4 -0.562860 1.267767
Select a column for aggregation
>>> df.groupby('A').B.agg(['min', 'max'])
min max
A
1 1 2
2 3 4
Different aggregations per column
>>> df.groupby('A').agg({'B': ['min', 'max'], 'C': 'sum'})
B C
min max sum
A
1 1 2 0.590716
2 3 4 0.704907
See also
--------
pandas.DataFrame.groupby.apply
pandas.DataFrame.groupby.transform
pandas.DataFrame.aggregate
""")
@Appender(_agg_doc)
@Appender(_shared_docs['aggregate'] % dict(
klass='DataFrame',
versionadded=''))
def aggregate(self, arg, *args, **kwargs):
return super(DataFrameGroupBy, self).aggregate(arg, *args, **kwargs)
agg = aggregate
def _gotitem(self, key, ndim, subset=None):
"""
sub-classes to define
return a sliced object
Parameters
----------
key : string / list of selections
ndim : 1,2
requested ndim of result
subset : object, default None
subset to act on
"""
if ndim == 2:
if subset is None:
subset = self.obj
return DataFrameGroupBy(subset, self.grouper, selection=key,
grouper=self.grouper,
exclusions=self.exclusions,
as_index=self.as_index)
elif ndim == 1:
if subset is None:
subset = self.obj[key]
return SeriesGroupBy(subset, selection=key,
grouper=self.grouper)
raise AssertionError("invalid ndim for _gotitem")
def _wrap_generic_output(self, result, obj):
result_index = self.grouper.levels[0]
if self.axis == 0:
return DataFrame(result, index=obj.columns,
columns=result_index).T
else:
return DataFrame(result, index=obj.index,
columns=result_index)
def _get_data_to_aggregate(self):
obj = self._obj_with_exclusions
if self.axis == 1:
return obj.T._data, 1
else:
return obj._data, 1
def _insert_inaxis_grouper_inplace(self, result):
# zip in reverse so we can always insert at loc 0
izip = zip(* map(reversed, (
self.grouper.names,
self.grouper.get_group_levels(),
[grp.in_axis for grp in self.grouper.groupings])))
for name, lev, in_axis in izip:
if in_axis:
result.insert(0, name, lev)
def _wrap_aggregated_output(self, output, names=None):
agg_axis = 0 if self.axis == 1 else 1
agg_labels = self._obj_with_exclusions._get_axis(agg_axis)
output_keys = self._decide_output_index(output, agg_labels)
if not self.as_index:
result = DataFrame(output, columns=output_keys)
self._insert_inaxis_grouper_inplace(result)
result = result._consolidate()
else:
index = self.grouper.result_index
result = DataFrame(output, index=index, columns=output_keys)
if self.axis == 1:
result = result.T
return self._reindex_output(result)._convert(datetime=True)
def _wrap_transformed_output(self, output, names=None):
return DataFrame(output, index=self.obj.index)
def _wrap_agged_blocks(self, items, blocks):
if not self.as_index:
index = np.arange(blocks[0].values.shape[1])
mgr = BlockManager(blocks, [items, index])
result = DataFrame(mgr)
self._insert_inaxis_grouper_inplace(result)
result = result._consolidate()
else:
index = self.grouper.result_index
mgr = BlockManager(blocks, [items, index])
result = DataFrame(mgr)
if self.axis == 1:
result = result.T
return self._reindex_output(result)._convert(datetime=True)
def _reindex_output(self, result):
"""
if we have categorical groupers, then we want to make sure that
we have a fully reindex-output to the levels. These may have not
participated in the groupings (e.g. may have all been
nan groups)
This can re-expand the output space
"""
groupings = self.grouper.groupings
if groupings is None:
return result
elif len(groupings) == 1:
return result
elif not any([isinstance(ping.grouper, (Categorical, CategoricalIndex))
for ping in groupings]):
return result
levels_list = [ping.group_index for ping in groupings]
index, _ = MultiIndex.from_product(
levels_list, names=self.grouper.names).sortlevel()
if self.as_index:
d = {self.obj._get_axis_name(self.axis): index, 'copy': False}
return result.reindex(**d)
# GH 13204
# Here, the categorical in-axis groupers, which need to be fully
# expanded, are columns in `result`. An idea is to do:
# result = result.set_index(self.grouper.names)
# .reindex(index).reset_index()
# but special care has to be taken because of possible not-in-axis
# groupers.
# So, we manually select and drop the in-axis grouper columns,
# reindex `result`, and then reset the in-axis grouper columns.
# Select in-axis groupers
in_axis_grps = [(i, ping.name) for (i, ping)
in enumerate(groupings) if ping.in_axis]
g_nums, g_names = zip(*in_axis_grps)
result = result.drop(labels=list(g_names), axis=1)
# Set a temp index and reindex (possibly expanding)
result = result.set_index(self.grouper.result_index
).reindex(index, copy=False)
# Reset in-axis grouper columns
# (using level numbers `g_nums` because level names may not be unique)
result = result.reset_index(level=g_nums)
return result.reset_index(drop=True)
def _iterate_column_groupbys(self):
for i, colname in enumerate(self._selected_obj.columns):
yield colname, SeriesGroupBy(self._selected_obj.iloc[:, i],
selection=colname,
grouper=self.grouper,
exclusions=self.exclusions)
def _apply_to_column_groupbys(self, func):
from pandas.core.reshape.concat import concat
return concat(
(func(col_groupby) for _, col_groupby
in self._iterate_column_groupbys()),
keys=self._selected_obj.columns, axis=1)
def count(self):
""" Compute count of group, excluding missing values """
from functools import partial
from pandas.core.dtypes.missing import _isnull_ndarraylike as isnull
data, _ = self._get_data_to_aggregate()
ids, _, ngroups = self.grouper.group_info
mask = ids != -1
val = ((mask & ~isnull(blk.get_values())) for blk in data.blocks)
loc = (blk.mgr_locs for blk in data.blocks)
counter = partial(count_level_2d, labels=ids, max_bin=ngroups, axis=1)
blk = map(make_block, map(counter, val), loc)
return self._wrap_agged_blocks(data.items, list(blk))
def nunique(self, dropna=True):
"""
Return DataFrame with number of distinct observations per group for
each column.
.. versionadded:: 0.20.0
Parameters
----------
dropna : boolean, default True
Don't include NaN in the counts.
Returns
-------
nunique: DataFrame
Examples
--------
>>> df = pd.DataFrame({'id': ['spam', 'egg', 'egg', 'spam',
... 'ham', 'ham'],
... 'value1': [1, 5, 5, 2, 5, 5],
... 'value2': list('abbaxy')})
>>> df
id value1 value2
0 spam 1 a
1 egg 5 b
2 egg 5 b
3 spam 2 a
4 ham 5 x
5 ham 5 y
>>> df.groupby('id').nunique()
id value1 value2
id
egg 1 1 1
ham 1 1 2
spam 1 2 1
# check for rows with the same id but conflicting values
>>> df.groupby('id').filter(lambda g: (g.nunique() > 1).any())
id value1 value2
0 spam 1 a
3 spam 2 a
4 ham 5 x
5 ham 5 y
"""
obj = self._selected_obj
def groupby_series(obj, col=None):
return SeriesGroupBy(obj,
selection=col,
grouper=self.grouper).nunique(dropna=dropna)
if isinstance(obj, Series):
results = groupby_series(obj)
else:
from pandas.core.reshape.concat import concat
results = [groupby_series(obj[col], col) for col in obj.columns]
results = concat(results, axis=1)
if not self.as_index:
results.index = _default_index(len(results))
return results
from pandas.plotting._core import boxplot_frame_groupby # noqa
DataFrameGroupBy.boxplot = boxplot_frame_groupby
class PanelGroupBy(NDFrameGroupBy):
def aggregate(self, arg, *args, **kwargs):
return super(PanelGroupBy, self).aggregate(arg, *args, **kwargs)
agg = aggregate
def _iterate_slices(self):
if self.axis == 0:
# kludge
if self._selection is None:
slice_axis = self._selected_obj.items
else:
slice_axis = self._selection_list
slicer = lambda x: self._selected_obj[x]
else:
raise NotImplementedError("axis other than 0 is not supported")
for val in slice_axis:
if val in self.exclusions:
continue
yield val, slicer(val)
def aggregate(self, arg, *args, **kwargs):
"""
Aggregate using input function or dict of {column -> function}
Parameters
----------
arg : function or dict
Function to use for aggregating groups. If a function, must either
work when passed a Panel or when passed to Panel.apply. If
pass a dict, the keys must be DataFrame column names
Returns
-------
aggregated : Panel
"""
if isinstance(arg, compat.string_types):
return getattr(self, arg)(*args, **kwargs)
return self._aggregate_generic(arg, *args, **kwargs)
def _wrap_generic_output(self, result, obj):
if self.axis == 0:
new_axes = list(obj.axes)
new_axes[0] = self.grouper.result_index
elif self.axis == 1:
x, y, z = obj.axes
new_axes = [self.grouper.result_index, z, x]
else:
x, y, z = obj.axes
new_axes = [self.grouper.result_index, y, x]
result = Panel._from_axes(result, new_axes)
if self.axis == 1:
result = result.swapaxes(0, 1).swapaxes(0, 2)
elif self.axis == 2:
result = result.swapaxes(0, 2)
return result
def _aggregate_item_by_item(self, func, *args, **kwargs):
obj = self._obj_with_exclusions
result = {}
if self.axis > 0:
for item in obj:
try:
itemg = DataFrameGroupBy(obj[item],
axis=self.axis - 1,
grouper=self.grouper)
result[item] = itemg.aggregate(func, *args, **kwargs)
except (ValueError, TypeError):
raise
new_axes = list(obj.axes)
new_axes[self.axis] = self.grouper.result_index
return Panel._from_axes(result, new_axes)
else:
raise ValueError("axis value must be greater than 0")
def _wrap_aggregated_output(self, output, names=None):
raise AbstractMethodError(self)
class NDArrayGroupBy(GroupBy):
pass
# ----------------------------------------------------------------------
# Splitting / application
class DataSplitter(object):
def __init__(self, data, labels, ngroups, axis=0):
self.data = data
self.labels = _ensure_int64(labels)
self.ngroups = ngroups
self.axis = axis
@cache_readonly
def slabels(self):
# Sorted labels
return algorithms.take_nd(self.labels, self.sort_idx, allow_fill=False)
@cache_readonly
def sort_idx(self):
# Counting sort indexer
return get_group_index_sorter(self.labels, self.ngroups)
def __iter__(self):
sdata = self._get_sorted_data()
if self.ngroups == 0:
# we are inside a generator, rather than raise StopIteration
# we merely return signal the end
return
starts, ends = lib.generate_slices(self.slabels, self.ngroups)
for i, (start, end) in enumerate(zip(starts, ends)):
# Since I'm now compressing the group ids, it's now not "possible"
# to produce empty slices because such groups would not be observed
# in the data
# if start >= end:
# raise AssertionError('Start %s must be less than end %s'
# % (str(start), str(end)))
yield i, self._chop(sdata, slice(start, end))
def _get_sorted_data(self):
return self.data.take(self.sort_idx, axis=self.axis, convert=False)
def _chop(self, sdata, slice_obj):
return sdata.iloc[slice_obj]
def apply(self, f):
raise AbstractMethodError(self)
class ArraySplitter(DataSplitter):
pass
class SeriesSplitter(DataSplitter):
def _chop(self, sdata, slice_obj):
return sdata._get_values(slice_obj).to_dense()
class FrameSplitter(DataSplitter):
def __init__(self, data, labels, ngroups, axis=0):
super(FrameSplitter, self).__init__(data, labels, ngroups, axis=axis)
def fast_apply(self, f, names):
# must return keys::list, values::list, mutated::bool
try:
starts, ends = lib.generate_slices(self.slabels, self.ngroups)
except:
# fails when all -1
return [], True
sdata = self._get_sorted_data()
results, mutated = lib.apply_frame_axis0(sdata, f, names, starts, ends)
return results, mutated
def _chop(self, sdata, slice_obj):
if self.axis == 0:
return sdata.iloc[slice_obj]
else:
return sdata._slice(slice_obj, axis=1) # .loc[:, slice_obj]
class NDFrameSplitter(DataSplitter):
def __init__(self, data, labels, ngroups, axis=0):
super(NDFrameSplitter, self).__init__(data, labels, ngroups, axis=axis)
self.factory = data._constructor
def _get_sorted_data(self):
# this is the BlockManager
data = self.data._data
# this is sort of wasteful but...
sorted_axis = data.axes[self.axis].take(self.sort_idx)
sorted_data = data.reindex_axis(sorted_axis, axis=self.axis)
return sorted_data
def _chop(self, sdata, slice_obj):
return self.factory(sdata.get_slice(slice_obj, axis=self.axis))
def get_splitter(data, *args, **kwargs):
if isinstance(data, Series):
klass = SeriesSplitter
elif isinstance(data, DataFrame):
klass = FrameSplitter
else:
klass = NDFrameSplitter
return klass(data, *args, **kwargs)
|
mit
|
rl-institut/reegis-hp
|
reegis_hp/de21/ew.py
|
3
|
2788
|
# http://www.geodatenzentrum.de/auftrag1/archiv/vektor/vg250_ebenen/2015/vg250-ew_2015-12-31.geo84.shape.ebenen.zip
import os
import pandas as pd
import geopandas as gpd
from oemof.tools import logger
from shapely.wkt import loads as wkt_loads
from reegis_hp.de21 import tools as t
import configuration as config
import zipfile
import shutil
import glob
import logging
def get_ew_shp_file(c, year):
url = ('http://www.geodatenzentrum.de/auftrag1/archiv/vektor/' +
'vg250_ebenen/{0}/vg250-ew_{0}-12-31.geo84.shape'.format(year) +
'.{0}.zip')
filename_zip = os.path.join(c.paths['general'], c.files['vg250_ew_zip'])
msg = t.download_file(filename_zip, url.format('ebene'))
if msg == 404:
logging.warning("Wrong URL. Try again with different URL.")
t.download_file(filename_zip, url.format('ebenen'), overwrite=True)
zip_ref = zipfile.ZipFile(filename_zip, 'r')
zip_ref.extractall(c.paths['general'])
zip_ref.close()
subs = next(os.walk(c.paths['general']))[1]
mysub = None
for sub in subs:
if 'vg250' in sub:
mysub = sub
pattern_path = os.path.join(c.paths['general'],
mysub,
'vg250-ew_ebenen',
'VG250_VWG*')
for file in glob.glob(pattern_path):
file_new = os.path.join(c.paths['general'],
'VG250_VWG_' + str(year) + file[-4:])
shutil.copyfile(file, file_new)
shutil.rmtree(os.path.join(c.paths['general'], mysub))
os.remove(filename_zip)
def get_ew_by_region(c, spatial, year):
filename_shp = os.path.join(c.paths['general'],
'VG250_VWG_' + str(year) + '.shp')
if not os.path.isfile(filename_shp):
get_ew_shp_file(c, year)
vwg = gpd.read_file(filename_shp)
# replace polygon geometry by its centroid
vwg['geometry'] = vwg.representative_point()
ewz = pd.Series()
spatial.sort_index(inplace=True)
n = 0
for i, v in spatial.iterrows():
n += 1
ewz.loc[i] = vwg.loc[vwg.intersects(wkt_loads(v.geom)), 'EWZ'].sum()
print(i, end=', ', flush=True)
if n % 10 == 0:
print()
print()
if vwg.EWZ.sum() - ewz.sum() > 0:
logging.warning(
"Overall sum {0} is higher than localised sum {1}.".format(
ewz.sum(), vwg.EWZ.sum()))
return ewz
if __name__ == "__main__":
logger.define_logging()
cfg = config.get_configuration()
spatial_file = os.path.join(cfg.paths['geometry'],
cfg.files['federal_states_polygon'])
spatial_dfs = pd.read_csv(spatial_file, index_col='gen')
print(get_ew_by_region(cfg, spatial_dfs, 2014))
|
gpl-3.0
|
ocefpaf/iris
|
lib/iris/tests/unit/plot/test_contourf.py
|
5
|
2475
|
# Copyright Iris contributors
#
# This file is part of Iris and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
"""Unit tests for the `iris.plot.contourf` function."""
# Import iris.tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests
from unittest import mock
import numpy as np
from iris.tests.stock import simple_2d
from iris.tests.unit.plot import TestGraphicStringCoord, MixinCoords
if tests.MPL_AVAILABLE:
import iris.plot as iplt
@tests.skip_plot
class TestStringCoordPlot(TestGraphicStringCoord):
def test_yaxis_labels(self):
iplt.contourf(self.cube, coords=("bar", "str_coord"))
self.assertPointsTickLabels("yaxis")
def test_xaxis_labels(self):
iplt.contourf(self.cube, coords=("str_coord", "bar"))
self.assertPointsTickLabels("xaxis")
def test_yaxis_labels_with_axes(self):
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
iplt.contourf(self.cube, axes=ax, coords=("bar", "str_coord"))
plt.close(fig)
self.assertPointsTickLabels("yaxis", ax)
def test_xaxis_labels_with_axes(self):
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
iplt.contourf(self.cube, axes=ax, coords=("str_coord", "bar"))
plt.close(fig)
self.assertPointsTickLabels("xaxis", ax)
def test_geoaxes_exception(self):
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
self.assertRaises(TypeError, iplt.contourf, self.lat_lon_cube, axes=ax)
plt.close(fig)
@tests.skip_plot
class TestCoords(tests.IrisTest, MixinCoords):
def setUp(self):
# We have a 2d cube with dimensionality (bar: 3; foo: 4)
self.cube = simple_2d(with_bounds=False)
self.foo = self.cube.coord("foo").points
self.foo_index = np.arange(self.foo.size)
self.bar = self.cube.coord("bar").points
self.bar_index = np.arange(self.bar.size)
self.data = self.cube.data
self.dataT = self.data.T
mocker = mock.Mock(alpha=0, antialiased=False)
self.mpl_patch = self.patch(
"matplotlib.pyplot.contourf", return_value=mocker
)
self.draw_func = iplt.contourf
if __name__ == "__main__":
tests.main()
|
lgpl-3.0
|
uglyboxer/linear_neuron
|
net-p3/lib/python3.5/site-packages/sklearn/utils/tests/test_random.py
|
38
|
7410
|
from __future__ import division
import numpy as np
import scipy.sparse as sp
from scipy.misc import comb as combinations
from numpy.testing import assert_array_almost_equal
from sklearn.utils.random import sample_without_replacement
from sklearn.utils.random import random_choice_csc
from sklearn.utils.testing import (
assert_raises,
assert_equal,
assert_true)
###############################################################################
# test custom sampling without replacement algorithm
###############################################################################
def test_invalid_sample_without_replacement_algorithm():
assert_raises(ValueError, sample_without_replacement, 5, 4, "unknown")
def test_sample_without_replacement_algorithms():
methods = ("auto", "tracking_selection", "reservoir_sampling", "pool")
for m in methods:
def sample_without_replacement_method(n_population, n_samples,
random_state=None):
return sample_without_replacement(n_population, n_samples,
method=m,
random_state=random_state)
check_edge_case_of_sample_int(sample_without_replacement_method)
check_sample_int(sample_without_replacement_method)
check_sample_int_distribution(sample_without_replacement_method)
def check_edge_case_of_sample_int(sample_without_replacement):
# n_poluation < n_sample
assert_raises(ValueError, sample_without_replacement, 0, 1)
assert_raises(ValueError, sample_without_replacement, 1, 2)
# n_population == n_samples
assert_equal(sample_without_replacement(0, 0).shape, (0, ))
assert_equal(sample_without_replacement(1, 1).shape, (1, ))
# n_population >= n_samples
assert_equal(sample_without_replacement(5, 0).shape, (0, ))
assert_equal(sample_without_replacement(5, 1).shape, (1, ))
# n_population < 0 or n_samples < 0
assert_raises(ValueError, sample_without_replacement, -1, 5)
assert_raises(ValueError, sample_without_replacement, 5, -1)
def check_sample_int(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# the sample is of the correct length and contains only unique items
n_population = 100
for n_samples in range(n_population + 1):
s = sample_without_replacement(n_population, n_samples)
assert_equal(len(s), n_samples)
unique = np.unique(s)
assert_equal(np.size(unique), n_samples)
assert_true(np.all(unique < n_population))
# test edge case n_population == n_samples == 0
assert_equal(np.size(sample_without_replacement(0, 0)), 0)
def check_sample_int_distribution(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# sample generates all possible permutations
n_population = 10
# a large number of trials prevents false negatives without slowing normal
# case
n_trials = 10000
for n_samples in range(n_population):
# Counting the number of combinations is not as good as counting the
# the number of permutations. However, it works with sampling algorithm
# that does not provide a random permutation of the subset of integer.
n_expected = combinations(n_population, n_samples, exact=True)
output = {}
for i in range(n_trials):
output[frozenset(sample_without_replacement(n_population,
n_samples))] = None
if len(output) == n_expected:
break
else:
raise AssertionError(
"number of combinations != number of expected (%s != %s)" %
(len(output), n_expected))
def test_random_choice_csc(n_samples=10000, random_state=24):
# Explicit class probabilities
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
got = random_choice_csc(n_samples, classes, class_probabilites,
random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / float(n_samples)
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# Implicit class probabilities
classes = [[0, 1], [1, 2]] # test for array-like support
class_probabilites = [np.array([0.5, 0.5]), np.array([0, 1/2, 1/2])]
got = random_choice_csc(n_samples=n_samples,
classes=classes,
random_state=random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / float(n_samples)
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# Edge case proabilites 1.0 and 0.0
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([1.0, 0.0]), np.array([0.0, 1.0, 0.0])]
got = random_choice_csc(n_samples, classes, class_probabilites,
random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel(),
minlength=len(class_probabilites[k])) / n_samples
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# One class target data
classes = [[1], [0]] # test for array-like support
class_probabilites = [np.array([0.0, 1.0]), np.array([1.0])]
got = random_choice_csc(n_samples=n_samples,
classes=classes,
random_state=random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / n_samples
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
def test_random_choice_csc_errors():
# the length of an array in classes and class_probabilites is mismatched
classes = [np.array([0, 1]), np.array([0, 1, 2, 3])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# the class dtype is not supported
classes = [np.array(["a", "1"]), np.array(["z", "1", "2"])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# the class dtype is not supported
classes = [np.array([4.2, 0.1]), np.array([0.1, 0.2, 9.4])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# Given proabilites don't sum to 1
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([0.5, 0.6]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
if __name__ == '__main__':
import nose
nose.runmodule()
|
mit
|
gatieme/AderXCoding
|
language/python/matplotlib/test_numpy.py
|
1
|
3105
|
#!coding:utf-8
import re
import sys
import urllib2
import matplotlib.pyplot as plt
import numpy as np
# 从xml中匹配出数据
def ReSaltStatisData( ):
# 这段HTML代码是从博客列表页面中摘取出来的单个博客的信息, 我们要从中摘取出
file = open("./data.xml", 'r')
linesList = file.read()
file.close()
#print linesList.decode('GBK').encode('utf-8')
reData = r'''<record>\s*<field name=".*?">(.*?)</field>\s*<field name=".*?">(\d*).*?(\d*)</field>\s*<field name=".*?">(.*?)</field>\s*</record>'''
pattern = re.compile(reData, re.S)
myItems = re.findall(pattern, linesList)
saltData = []
#print len(myItems) / 4
#print myItems
for item in myItems:
name = item[0].replace("\n", "")
year = item[1].replace("\n", "")
month = item[2].replace("\n", "")
data = float(item[3].replace("\n", ""))
#print name, year, month, data
saltData.append(data)
#print len(saltData)
return sorted(saltData[0:36])
# 经验累计分布函数cumulative distribution function
def ShowCDFPlot(data, step):
"""
step 步长
"""
minData = int(min(data) / 100) * 100
maxData = int(max(data) / 100 + 1) * 100
print "min = ", minData, "max = ", maxData
#print "min = ", int(minData / 100) * 100, "max = ", int(maxData / 100 + 1) * 100
# 生成X轴的数据,从minData~maxData,步长为step
xdata = range(minData, maxData + step, step)
# 生成y的数据,Count(x < xdata)
ydata = []
pos = 0
count = 0
for num in data:
#print "num = ", num, "data = ", xdata[pos]
if num < xdata[pos]: # 如果当前的元素比下标小
count = count + 1
else:
ydata.append(count)
#print "Count(X < %f) = %d [pos = %d]" % (xdata[pos], count, pos)
pos = pos + 1
while pos < len(xdata):
ydata.append(count)
pos += 1
print "xdata =", xdata
print "ydata =", ydata
# 设置图表的信息
plt.figure(num = 1, figsize = (8, 6))
plt.title("Cumulative Distribution Function")
plt.xlabel("alt", size = 14)
plt.ylabel("count", size = 14)
plt.plot(xdata, ydata, color = 'r', linestyle = '-', label = "cdf")
plt.legend(loc = "upper left")
#plt.savefig('cdf.pdf', format = 'pdf')
plt.savefig('cdf.png', format = 'png')
#plt.show()
# 直方图(Histogram
#def ShowHistogramPlot(data):
ax1 = plt.subplots(1, figsize = (8, 4))
plt.title("Histogram")
plt.legend(loc = "upper left")
plt.savefig('hist.png', format = 'png')
plt.hist(data)
#def ShowBoxPlot(data):
ax2 = plt.subplots(1, figsize = (8, 4))
plt.boxplot(data)
plt.title("Boxplot")
plt.legend(loc = "upper left")
plt.savefig('box.png', format = 'png')
plt.show()
# 主函数
if __name__ == "__main__" :
reload(sys)
sys.setdefaultencoding("utf-8")
saltData = ReSaltStatisData( )
print saltData
ShowCDFPlot(saltData, 50)
#ShowHistogramPlot(saltData)
#ShowBoxPlot(saltData)
|
gpl-2.0
|
bhargav/scikit-learn
|
sklearn/decomposition/tests/test_online_lda.py
|
50
|
13330
|
import numpy as np
from scipy.linalg import block_diag
from scipy.sparse import csr_matrix
from scipy.special import psi
from sklearn.decomposition import LatentDirichletAllocation
from sklearn.decomposition._online_lda import (_dirichlet_expectation_1d,
_dirichlet_expectation_2d)
from sklearn.utils.testing import assert_allclose
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import if_safe_multiprocessing_with_blas
from sklearn.exceptions import NotFittedError
from sklearn.externals.six.moves import xrange
def _build_sparse_mtx():
# Create 3 topics and each topic has 3 distinct words.
# (Each word only belongs to a single topic.)
n_topics = 3
block = n_topics * np.ones((3, 3))
blocks = [block] * n_topics
X = block_diag(*blocks)
X = csr_matrix(X)
return (n_topics, X)
def test_lda_default_prior_params():
# default prior parameter should be `1 / topics`
# and verbose params should not affect result
n_topics, X = _build_sparse_mtx()
prior = 1. / n_topics
lda_1 = LatentDirichletAllocation(n_topics=n_topics, doc_topic_prior=prior,
topic_word_prior=prior, random_state=0)
lda_2 = LatentDirichletAllocation(n_topics=n_topics, random_state=0)
topic_distr_1 = lda_1.fit_transform(X)
topic_distr_2 = lda_2.fit_transform(X)
assert_almost_equal(topic_distr_1, topic_distr_2)
def test_lda_fit_batch():
# Test LDA batch learning_offset (`fit` method with 'batch' learning)
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, evaluate_every=1,
learning_method='batch', random_state=rng)
lda.fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for component in lda.components_:
# Find top 3 words in each LDA component
top_idx = set(component.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_fit_online():
# Test LDA online learning (`fit` method with 'online' learning)
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, learning_offset=10.,
evaluate_every=1, learning_method='online',
random_state=rng)
lda.fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for component in lda.components_:
# Find top 3 words in each LDA component
top_idx = set(component.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_partial_fit():
# Test LDA online learning (`partial_fit` method)
# (same as test_lda_batch)
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, learning_offset=10.,
total_samples=100, random_state=rng)
for i in xrange(3):
lda.partial_fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for c in lda.components_:
top_idx = set(c.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_dense_input():
# Test LDA with dense input.
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, learning_method='batch',
random_state=rng)
lda.fit(X.toarray())
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for component in lda.components_:
# Find top 3 words in each LDA component
top_idx = set(component.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_transform():
# Test LDA transform.
# Transform result cannot be negative and should be normalized
rng = np.random.RandomState(0)
X = rng.randint(5, size=(20, 10))
n_topics = 3
lda = LatentDirichletAllocation(n_topics=n_topics, random_state=rng)
X_trans = lda.fit_transform(X)
assert_true((X_trans > 0.0).any())
assert_array_almost_equal(np.sum(X_trans, axis=1), np.ones(X_trans.shape[0]))
def test_lda_fit_transform():
# Test LDA fit_transform & transform
# fit_transform and transform result should be the same
for method in ('online', 'batch'):
rng = np.random.RandomState(0)
X = rng.randint(10, size=(50, 20))
lda = LatentDirichletAllocation(n_topics=5, learning_method=method,
random_state=rng)
X_fit = lda.fit_transform(X)
X_trans = lda.transform(X)
assert_array_almost_equal(X_fit, X_trans, 4)
def test_lda_partial_fit_dim_mismatch():
# test `n_features` mismatch in `partial_fit`
rng = np.random.RandomState(0)
n_topics = rng.randint(3, 6)
n_col = rng.randint(6, 10)
X_1 = np.random.randint(4, size=(10, n_col))
X_2 = np.random.randint(4, size=(10, n_col + 1))
lda = LatentDirichletAllocation(n_topics=n_topics, learning_offset=5.,
total_samples=20, random_state=rng)
lda.partial_fit(X_1)
assert_raises_regexp(ValueError, r"^The provided data has",
lda.partial_fit, X_2)
def test_invalid_params():
# test `_check_params` method
X = np.ones((5, 10))
invalid_models = (
('n_topics', LatentDirichletAllocation(n_topics=0)),
('learning_method',
LatentDirichletAllocation(learning_method='unknown')),
('total_samples', LatentDirichletAllocation(total_samples=0)),
('learning_offset', LatentDirichletAllocation(learning_offset=-1)),
)
for param, model in invalid_models:
regex = r"^Invalid %r parameter" % param
assert_raises_regexp(ValueError, regex, model.fit, X)
def test_lda_negative_input():
# test pass dense matrix with sparse negative input.
X = -np.ones((5, 10))
lda = LatentDirichletAllocation()
regex = r"^Negative values in data passed"
assert_raises_regexp(ValueError, regex, lda.fit, X)
def test_lda_no_component_error():
# test `transform` and `perplexity` before `fit`
rng = np.random.RandomState(0)
X = rng.randint(4, size=(20, 10))
lda = LatentDirichletAllocation()
regex = r"^no 'components_' attribute"
assert_raises_regexp(NotFittedError, regex, lda.transform, X)
assert_raises_regexp(NotFittedError, regex, lda.perplexity, X)
def test_lda_transform_mismatch():
# test `n_features` mismatch in partial_fit and transform
rng = np.random.RandomState(0)
X = rng.randint(4, size=(20, 10))
X_2 = rng.randint(4, size=(10, 8))
n_topics = rng.randint(3, 6)
lda = LatentDirichletAllocation(n_topics=n_topics, random_state=rng)
lda.partial_fit(X)
assert_raises_regexp(ValueError, r"^The provided data has",
lda.partial_fit, X_2)
@if_safe_multiprocessing_with_blas
def test_lda_multi_jobs():
n_topics, X = _build_sparse_mtx()
# Test LDA batch training with multi CPU
for method in ('online', 'batch'):
rng = np.random.RandomState(0)
lda = LatentDirichletAllocation(n_topics=n_topics, n_jobs=2,
learning_method=method,
evaluate_every=1,
random_state=rng)
lda.fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for c in lda.components_:
top_idx = set(c.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
@if_safe_multiprocessing_with_blas
def test_lda_partial_fit_multi_jobs():
# Test LDA online training with multi CPU
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, n_jobs=2,
learning_offset=5., total_samples=30,
random_state=rng)
for i in range(2):
lda.partial_fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for c in lda.components_:
top_idx = set(c.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_preplexity_mismatch():
# test dimension mismatch in `perplexity` method
rng = np.random.RandomState(0)
n_topics = rng.randint(3, 6)
n_samples = rng.randint(6, 10)
X = np.random.randint(4, size=(n_samples, 10))
lda = LatentDirichletAllocation(n_topics=n_topics, learning_offset=5.,
total_samples=20, random_state=rng)
lda.fit(X)
# invalid samples
invalid_n_samples = rng.randint(4, size=(n_samples + 1, n_topics))
assert_raises_regexp(ValueError, r'Number of samples', lda.perplexity, X,
invalid_n_samples)
# invalid topic number
invalid_n_topics = rng.randint(4, size=(n_samples, n_topics + 1))
assert_raises_regexp(ValueError, r'Number of topics', lda.perplexity, X,
invalid_n_topics)
def test_lda_perplexity():
# Test LDA perplexity for batch training
# perplexity should be lower after each iteration
n_topics, X = _build_sparse_mtx()
for method in ('online', 'batch'):
lda_1 = LatentDirichletAllocation(n_topics=n_topics, max_iter=1,
learning_method=method,
total_samples=100, random_state=0)
lda_2 = LatentDirichletAllocation(n_topics=n_topics, max_iter=10,
learning_method=method,
total_samples=100, random_state=0)
distr_1 = lda_1.fit_transform(X)
perp_1 = lda_1.perplexity(X, distr_1, sub_sampling=False)
distr_2 = lda_2.fit_transform(X)
perp_2 = lda_2.perplexity(X, distr_2, sub_sampling=False)
assert_greater_equal(perp_1, perp_2)
perp_1_subsampling = lda_1.perplexity(X, distr_1, sub_sampling=True)
perp_2_subsampling = lda_2.perplexity(X, distr_2, sub_sampling=True)
assert_greater_equal(perp_1_subsampling, perp_2_subsampling)
def test_lda_score():
# Test LDA score for batch training
# score should be higher after each iteration
n_topics, X = _build_sparse_mtx()
for method in ('online', 'batch'):
lda_1 = LatentDirichletAllocation(n_topics=n_topics, max_iter=1,
learning_method=method,
total_samples=100, random_state=0)
lda_2 = LatentDirichletAllocation(n_topics=n_topics, max_iter=10,
learning_method=method,
total_samples=100, random_state=0)
lda_1.fit_transform(X)
score_1 = lda_1.score(X)
lda_2.fit_transform(X)
score_2 = lda_2.score(X)
assert_greater_equal(score_2, score_1)
def test_perplexity_input_format():
# Test LDA perplexity for sparse and dense input
# score should be the same for both dense and sparse input
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, max_iter=1,
learning_method='batch',
total_samples=100, random_state=0)
distr = lda.fit_transform(X)
perp_1 = lda.perplexity(X)
perp_2 = lda.perplexity(X, distr)
perp_3 = lda.perplexity(X.toarray(), distr)
assert_almost_equal(perp_1, perp_2)
assert_almost_equal(perp_1, perp_3)
def test_lda_score_perplexity():
# Test the relationship between LDA score and perplexity
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, max_iter=10,
random_state=0)
distr = lda.fit_transform(X)
perplexity_1 = lda.perplexity(X, distr, sub_sampling=False)
score = lda.score(X)
perplexity_2 = np.exp(-1. * (score / np.sum(X.data)))
assert_almost_equal(perplexity_1, perplexity_2)
def test_lda_empty_docs():
"""Test LDA on empty document (all-zero rows)."""
Z = np.zeros((5, 4))
for X in [Z, csr_matrix(Z)]:
lda = LatentDirichletAllocation(max_iter=750).fit(X)
assert_almost_equal(lda.components_.sum(axis=0),
np.ones(lda.components_.shape[1]))
def test_dirichlet_expectation():
"""Test Cython version of Dirichlet expectation calculation."""
x = np.logspace(-100, 10, 10000)
expectation = np.empty_like(x)
_dirichlet_expectation_1d(x, 0, expectation)
assert_allclose(expectation, np.exp(psi(x) - psi(np.sum(x))),
atol=1e-19)
x = x.reshape(100, 100)
assert_allclose(_dirichlet_expectation_2d(x),
psi(x) - psi(np.sum(x, axis=1)[:, np.newaxis]),
rtol=1e-11, atol=3e-9)
|
bsd-3-clause
|
jblackburne/scikit-learn
|
examples/decomposition/plot_pca_vs_lda.py
|
176
|
2027
|
"""
=======================================================
Comparison of LDA and PCA 2D projection of Iris dataset
=======================================================
The Iris dataset represents 3 kind of Iris flowers (Setosa, Versicolour
and Virginica) with 4 attributes: sepal length, sepal width, petal length
and petal width.
Principal Component Analysis (PCA) applied to this data identifies the
combination of attributes (principal components, or directions in the
feature space) that account for the most variance in the data. Here we
plot the different samples on the 2 first principal components.
Linear Discriminant Analysis (LDA) tries to identify attributes that
account for the most variance *between classes*. In particular,
LDA, in contrast to PCA, is a supervised method, using known class labels.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.decomposition import PCA
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
iris = datasets.load_iris()
X = iris.data
y = iris.target
target_names = iris.target_names
pca = PCA(n_components=2)
X_r = pca.fit(X).transform(X)
lda = LinearDiscriminantAnalysis(n_components=2)
X_r2 = lda.fit(X, y).transform(X)
# Percentage of variance explained for each components
print('explained variance ratio (first two components): %s'
% str(pca.explained_variance_ratio_))
plt.figure()
colors = ['navy', 'turquoise', 'darkorange']
lw = 2
for color, i, target_name in zip(colors, [0, 1, 2], target_names):
plt.scatter(X_r[y == i, 0], X_r[y == i, 1], color=color, alpha=.8, lw=lw,
label=target_name)
plt.legend(loc='best', shadow=False, scatterpoints=1)
plt.title('PCA of IRIS dataset')
plt.figure()
for color, i, target_name in zip(colors, [0, 1, 2], target_names):
plt.scatter(X_r2[y == i, 0], X_r2[y == i, 1], alpha=.8, color=color,
label=target_name)
plt.legend(loc='best', shadow=False, scatterpoints=1)
plt.title('LDA of IRIS dataset')
plt.show()
|
bsd-3-clause
|
ecino/compassion-switzerland
|
partner_compassion/__manifest__.py
|
2
|
2820
|
# -*- coding: utf-8 -*-
##############################################################################
#
# ______ Releasing children from poverty _
# / ____/___ ____ ___ ____ ____ ___________(_)___ ____
# / / / __ \/ __ `__ \/ __ \/ __ `/ ___/ ___/ / __ \/ __ \
# / /___/ /_/ / / / / / / /_/ / /_/ (__ |__ ) / /_/ / / / /
# \____/\____/_/ /_/ /_/ .___/\__,_/____/____/_/\____/_/ /_/
# /_/
# in Jesus' name
#
# Copyright (C) 2014-2018 Compassion CH (http://www.compassion.ch)
# @author: Emanuel Cino <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
# pylint: disable=C8101
{
'name': 'Upgrade Partners for Compassion Suisse',
'version': '10.0.2.2.1',
'category': 'Partner',
'author': 'Compassion CH',
'license': 'AGPL-3',
'website': 'http://www.compassion.ch',
'depends': [
'sbc_compassion',
'thankyou_letters',
'mail_sendgrid',
'partner_contact_birthdate',
'account_banking_mandate',
'geoengine_partner',
'base_geolocalize',
'web_notify',
'partner_survey',
'partner_contact_in_several_companies',
'crm_claim',
'base_search_fuzzy',
],
'external_dependencies': {
'python': ['pandas', 'pyminizip', 'sendgrid']
},
'data': [
'security/ir.model.access.csv',
'data/partner_category_data.xml',
'data/partner_title_data.xml',
'data/advocate_engagement_data.xml',
'data/calendar_event_type.xml',
'data/ir_cron.xml',
'data/mail_channel.xml',
'data/res_partner_actions.xml',
'data/gist_indexes.xml',
'views/advocate_details.xml',
'views/partner_compassion_view.xml',
'views/product_view.xml',
'views/partner_check_double.xml',
'views/notification_settings_view.xml',
'templates/child_protection_charter.xml'
],
'qweb': [
'static/src/xml/thread_custom.xml'
],
'installable': True,
'auto_install': False,
}
|
agpl-3.0
|
glemaitre/UnbalancedDataset
|
imblearn/ensemble/easy_ensemble.py
|
2
|
5799
|
"""Class to perform under-sampling using easy ensemble."""
# Authors: Guillaume Lemaitre <[email protected]>
# Christos Aridas
# License: MIT
import numpy as np
from sklearn.utils import check_random_state
from .base import BaseEnsembleSampler
from ..under_sampling import RandomUnderSampler
MAX_INT = np.iinfo(np.int32).max
class EasyEnsemble(BaseEnsembleSampler):
"""Create an ensemble sets by iteratively applying random under-sampling.
This method iteratively select a random subset and make an ensemble of the
different sets.
Read more in the :ref:`User Guide <ensemble_samplers>`.
Parameters
----------
ratio : str, dict, or callable, optional (default='auto')
Ratio to use for resampling the data set.
- If ``str``, has to be one of: (i) ``'minority'``: resample the
minority class; (ii) ``'majority'``: resample the majority class,
(iii) ``'not minority'``: resample all classes apart of the minority
class, (iv) ``'all'``: resample all classes, and (v) ``'auto'``:
correspond to ``'all'`` with for over-sampling methods and ``'not
minority'`` for under-sampling methods. The classes targeted will be
over-sampled or under-sampled to achieve an equal number of sample
with the majority or minority class.
- If ``dict``, the keys correspond to the targeted classes. The values
correspond to the desired number of samples.
- If callable, function taking ``y`` and returns a ``dict``. The keys
correspond to the targeted classes. The values correspond to the
desired number of samples.
return_indices : bool, optional (default=False)
Whether or not to return the indices of the samples randomly
selected from the majority class.
random_state : int, RandomState instance or None, optional (default=None)
If int, ``random_state`` is the seed used by the random number
generator; If ``RandomState`` instance, random_state is the random
number generator; If ``None``, the random number generator is the
``RandomState`` instance used by ``np.random``.
replacement : bool, optional (default=False)
Whether or not to sample randomly with replacement or not.
n_subsets : int, optional (default=10)
Number of subsets to generate.
Notes
-----
The method is described in [1]_.
Supports mutli-class resampling by sampling each class independently.
See :ref:`sphx_glr_auto_examples_ensemble_plot_easy_ensemble.py`.
See also
--------
BalanceCascade, BalancedBaggingClassifier
References
----------
.. [1] X. Y. Liu, J. Wu and Z. H. Zhou, "Exploratory Undersampling for
Class-Imbalance Learning," in IEEE Transactions on Systems, Man, and
Cybernetics, Part B (Cybernetics), vol. 39, no. 2, pp. 539-550,
April 2009.
Examples
--------
>>> from collections import Counter
>>> from sklearn.datasets import make_classification
>>> from imblearn.ensemble import \
EasyEnsemble # doctest: +NORMALIZE_WHITESPACE
>>> X, y = make_classification(n_classes=2, class_sep=2,
... weights=[0.1, 0.9], n_informative=3, n_redundant=1, flip_y=0,
... n_features=20, n_clusters_per_class=1, n_samples=1000, random_state=10)
>>> print('Original dataset shape {}'.format(Counter(y)))
Original dataset shape Counter({1: 900, 0: 100})
>>> ee = EasyEnsemble(random_state=42)
>>> X_res, y_res = ee.fit_sample(X, y)
>>> print('Resampled dataset shape {}'.format(Counter(y_res[0])))
Resampled dataset shape Counter({0: 100, 1: 100})
"""
def __init__(self,
ratio='auto',
return_indices=False,
random_state=None,
replacement=False,
n_subsets=10):
super(EasyEnsemble, self).__init__(ratio=ratio)
self.random_state = random_state
self.return_indices = return_indices
self.replacement = replacement
self.n_subsets = n_subsets
def _sample(self, X, y):
"""Resample the dataset.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Matrix containing the data which have to be sampled.
y : array-like, shape (n_samples,)
Corresponding label for each sample in X.
Returns
-------
X_resampled : {ndarray, sparse matrix}, shape \
(n_subset, n_samples_new, n_features)
The array containing the resampled data.
y_resampled : ndarray, shape (n_subset, n_samples_new)
The corresponding label of `X_resampled`
idx_under : ndarray, shape (n_subset, n_samples, )
If `return_indices` is `True`, a boolean array will be returned
containing the which samples have been selected.
"""
random_state = check_random_state(self.random_state)
X_resampled = []
y_resampled = []
if self.return_indices:
idx_under = []
for _ in range(self.n_subsets):
rus = RandomUnderSampler(
ratio=self.ratio_, return_indices=True,
random_state=random_state.randint(MAX_INT),
replacement=self.replacement)
sel_x, sel_y, sel_idx = rus.fit_sample(X, y)
X_resampled.append(sel_x)
y_resampled.append(sel_y)
if self.return_indices:
idx_under.append(sel_idx)
if self.return_indices:
return (np.array(X_resampled), np.array(y_resampled),
np.array(idx_under))
else:
return np.array(X_resampled), np.array(y_resampled)
|
mit
|
OshynSong/scikit-learn
|
examples/ensemble/plot_forest_iris.py
|
335
|
6271
|
"""
====================================================================
Plot the decision surfaces of ensembles of trees on the iris dataset
====================================================================
Plot the decision surfaces of forests of randomized trees trained on pairs of
features of the iris dataset.
This plot compares the decision surfaces learned by a decision tree classifier
(first column), by a random forest classifier (second column), by an extra-
trees classifier (third column) and by an AdaBoost classifier (fourth column).
In the first row, the classifiers are built using the sepal width and the sepal
length features only, on the second row using the petal length and sepal length
only, and on the third row using the petal width and the petal length only.
In descending order of quality, when trained (outside of this example) on all
4 features using 30 estimators and scored using 10 fold cross validation, we see::
ExtraTreesClassifier() # 0.95 score
RandomForestClassifier() # 0.94 score
AdaBoost(DecisionTree(max_depth=3)) # 0.94 score
DecisionTree(max_depth=None) # 0.94 score
Increasing `max_depth` for AdaBoost lowers the standard deviation of the scores (but
the average score does not improve).
See the console's output for further details about each model.
In this example you might try to:
1) vary the ``max_depth`` for the ``DecisionTreeClassifier`` and
``AdaBoostClassifier``, perhaps try ``max_depth=3`` for the
``DecisionTreeClassifier`` or ``max_depth=None`` for ``AdaBoostClassifier``
2) vary ``n_estimators``
It is worth noting that RandomForests and ExtraTrees can be fitted in parallel
on many cores as each tree is built independently of the others. AdaBoost's
samples are built sequentially and so do not use multiple cores.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import clone
from sklearn.datasets import load_iris
from sklearn.ensemble import (RandomForestClassifier, ExtraTreesClassifier,
AdaBoostClassifier)
from sklearn.externals.six.moves import xrange
from sklearn.tree import DecisionTreeClassifier
# Parameters
n_classes = 3
n_estimators = 30
plot_colors = "ryb"
cmap = plt.cm.RdYlBu
plot_step = 0.02 # fine step width for decision surface contours
plot_step_coarser = 0.5 # step widths for coarse classifier guesses
RANDOM_SEED = 13 # fix the seed on each iteration
# Load data
iris = load_iris()
plot_idx = 1
models = [DecisionTreeClassifier(max_depth=None),
RandomForestClassifier(n_estimators=n_estimators),
ExtraTreesClassifier(n_estimators=n_estimators),
AdaBoostClassifier(DecisionTreeClassifier(max_depth=3),
n_estimators=n_estimators)]
for pair in ([0, 1], [0, 2], [2, 3]):
for model in models:
# We only take the two corresponding features
X = iris.data[:, pair]
y = iris.target
# Shuffle
idx = np.arange(X.shape[0])
np.random.seed(RANDOM_SEED)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# Standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
# Train
clf = clone(model)
clf = model.fit(X, y)
scores = clf.score(X, y)
# Create a title for each column and the console by using str() and
# slicing away useless parts of the string
model_title = str(type(model)).split(".")[-1][:-2][:-len("Classifier")]
model_details = model_title
if hasattr(model, "estimators_"):
model_details += " with {} estimators".format(len(model.estimators_))
print( model_details + " with features", pair, "has a score of", scores )
plt.subplot(3, 4, plot_idx)
if plot_idx <= len(models):
# Add a title at the top of each column
plt.title(model_title)
# Now plot the decision boundary using a fine mesh as input to a
# filled contour plot
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
# Plot either a single DecisionTreeClassifier or alpha blend the
# decision surfaces of the ensemble of classifiers
if isinstance(model, DecisionTreeClassifier):
Z = model.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=cmap)
else:
# Choose alpha blend level with respect to the number of estimators
# that are in use (noting that AdaBoost can use fewer estimators
# than its maximum if it achieves a good enough fit early on)
estimator_alpha = 1.0 / len(model.estimators_)
for tree in model.estimators_:
Z = tree.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, alpha=estimator_alpha, cmap=cmap)
# Build a coarser grid to plot a set of ensemble classifications
# to show how these are different to what we see in the decision
# surfaces. These points are regularly space and do not have a black outline
xx_coarser, yy_coarser = np.meshgrid(np.arange(x_min, x_max, plot_step_coarser),
np.arange(y_min, y_max, plot_step_coarser))
Z_points_coarser = model.predict(np.c_[xx_coarser.ravel(), yy_coarser.ravel()]).reshape(xx_coarser.shape)
cs_points = plt.scatter(xx_coarser, yy_coarser, s=15, c=Z_points_coarser, cmap=cmap, edgecolors="none")
# Plot the training points, these are clustered together and have a
# black outline
for i, c in zip(xrange(n_classes), plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=c, label=iris.target_names[i],
cmap=cmap)
plot_idx += 1 # move on to the next plot in sequence
plt.suptitle("Classifiers on feature subsets of the Iris dataset")
plt.axis("tight")
plt.show()
|
bsd-3-clause
|
ahoyosid/scikit-learn
|
examples/datasets/plot_random_multilabel_dataset.py
|
93
|
3460
|
"""
==============================================
Plot randomly generated multilabel dataset
==============================================
This illustrates the `datasets.make_multilabel_classification` dataset
generator. Each sample consists of counts of two features (up to 50 in
total), which are differently distributed in each of two classes.
Points are labeled as follows, where Y means the class is present:
===== ===== ===== ======
1 2 3 Color
===== ===== ===== ======
Y N N Red
N Y N Blue
N N Y Yellow
Y Y N Purple
Y N Y Orange
Y Y N Green
Y Y Y Brown
===== ===== ===== ======
A star marks the expected sample for each class; its size reflects the
probability of selecting that class label.
The left and right examples highlight the ``n_labels`` parameter:
more of the samples in the right plot have 2 or 3 labels.
Note that this two-dimensional example is very degenerate:
generally the number of features would be much greater than the
"document length", while here we have much larger documents than vocabulary.
Similarly, with ``n_classes > n_features``, it is much less likely that a
feature distinguishes a particular class.
"""
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_multilabel_classification as make_ml_clf
print(__doc__)
COLORS = np.array(['!',
'#FF3333', # red
'#0198E1', # blue
'#BF5FFF', # purple
'#FCD116', # yellow
'#FF7216', # orange
'#4DBD33', # green
'#87421F' # brown
])
# Use same random seed for multiple calls to make_multilabel_classification to
# ensure same distributions
RANDOM_SEED = np.random.randint(2 ** 10)
def plot_2d(ax, n_labels=1, n_classes=3, length=50):
X, Y, p_c, p_w_c = make_ml_clf(n_samples=150, n_features=2,
n_classes=n_classes, n_labels=n_labels,
length=length, allow_unlabeled=False,
return_indicator=True,
return_distributions=True,
random_state=RANDOM_SEED)
ax.scatter(X[:, 0], X[:, 1], color=COLORS.take((Y * [1, 2, 4]
).sum(axis=1)),
marker='.')
ax.scatter(p_w_c[0] * length, p_w_c[1] * length,
marker='*', linewidth=.5, edgecolor='black',
s=20 + 1500 * p_c ** 2,
color=COLORS.take([1, 2, 4]))
ax.set_xlabel('Feature 0 count')
return p_c, p_w_c
_, (ax1, ax2) = plt.subplots(1, 2, sharex='row', sharey='row', figsize=(8, 4))
plt.subplots_adjust(bottom=.15)
p_c, p_w_c = plot_2d(ax1, n_labels=1)
ax1.set_title('n_labels=1, length=50')
ax1.set_ylabel('Feature 1 count')
plot_2d(ax2, n_labels=3)
ax2.set_title('n_labels=3, length=50')
ax2.set_xlim(left=0, auto=True)
ax2.set_ylim(bottom=0, auto=True)
plt.show()
print('The data was generated from (random_state=%d):' % RANDOM_SEED)
print('Class', 'P(C)', 'P(w0|C)', 'P(w1|C)', sep='\t')
for k, p, p_w in zip(['red', 'blue', 'yellow'], p_c, p_w_c.T):
print('%s\t%0.2f\t%0.2f\t%0.2f' % (k, p, p_w[0], p_w[1]))
|
bsd-3-clause
|
sjyk/activedetect
|
activedetect/model_based/preprocessing_utils.py
|
1
|
2147
|
#!/usr/bin/env python
import csv
import numpy as np
from sklearn.preprocessing import FunctionTransformer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics import accuracy_score, f1_score, roc_auc_score
import scipy
import unicodedata
#basic error handling
def tryParse(X):
vals = []
if X.shape == (1,1):
try:
vals.append(float(X.tolist()[0][0]))
except ValueError:
vals.append(0)
return vals
for x in np.squeeze(X.T):
try:
vals.append(float(x))
except ValueError:
vals.append(0)
return vals
def tryParseList(Y):
return tryParse(np.array(Y))
#converts the labeled dataset into features and labels
def featurize(features_dataset, types):
feature_list = []
transform_list = []
for i,t in enumerate(types):
col = [f[i] for f in features_dataset]
if t == "string" or t == "categorical" or t =="address":
vectorizer = CountVectorizer(min_df=1, token_pattern='\S+')
vectorizer.fit(col)
feature_list.append(vectorizer.transform(col))
###print
transform_list.append(vectorizer)
else:
vectorizer = FunctionTransformer(tryParse)
vectorizer.fit(col)
feature_list.append(scipy.sparse.csr_matrix(vectorizer.transform(col)).T)
transform_list.append(vectorizer)
features = scipy.sparse.hstack(feature_list).tocsr()
return features, transform_list
#converts the labeled dataset into features and labels
def featurizeFromList(features_dataset, types, tlist):
feature_list = []
transform_list = []
for i,t in enumerate(types):
col = [f[i] for f in features_dataset]
if t == "string" or t == "categorical" or t =="address":
vectorizer = tlist[i]
feature_list.append(vectorizer.transform(col))
else:
vectorizer = tlist[i]
#print scipy.sparse.csr_matrix(vectorizer.transform(col)).T
feature_list.append(scipy.sparse.csr_matrix(vectorizer.transform(col)).T)
features = scipy.sparse.hstack(feature_list).tocsr()
return features
def get_acc_scores(ytrue, ypred, yscores=None):
if yscores == None:
yscores = ypred
return [accuracy_score(ytrue, ypred), f1_score(ytrue, ypred), roc_auc_score(ytrue, yscores, 'weighted')]
|
mit
|
stggh/PyDiatomic
|
examples/example_RKR.py
|
1
|
1765
|
# -*- coding: utf-8 -*-
#################################################################
# Rydberg-Klein-Rees evaluation of a potential energy curve
# from spectroscopic constants
#
# see also example_Morse.py
#
# [email protected]
# 2016
#################################################################
import numpy as np
import cse
import scipy.constants as const
from scipy.interpolate import splrep, splev
import matplotlib.pyplot as plt
import sys
print("example_RKR.py - for this example accept the default inputs\n")
fn = input("RKR: Spectroscopic constants filename [data/GB.dat]: ")
fn = 'data/GB.dat' if fn == '' else fn
try:
vv, Gv, Bv = np.loadtxt(fn, unpack=True)
except FileNotFoundError:
print(f"RKR: file '{fn:s}' not found")
# reduced mass in atomic mass units- see Huber+Herzberg - default is O2 = 7.9975
mol = input("RKR: diatomic molecule [O2]: ")
if mol == '':
mol = 'O2'
μ = cse.cse_setup.reduced_mass(mol)[0]/const.m_u
# De - dissociation energy
De = input("RKR: De [42021.47 cm-1]: ")
De = 42021.47 if De == '' else float(De)
# outer limb extension
limb = input("RKR: Outer-limb LeRoy(L) or Morse(M) [L]: ")
if limb == '':
limb = 'L'
R, PEC, RTP, PTP = cse.tools.RKR.rkr(μ, vv, Gv, Bv, De, limb, dv=0.1,
Rgrid=np.arange(0.005, 10.004, 0.005))
data = np.column_stack((R.T, PEC.T))
np.savetxt("data/RKR.dat", data)
print("RKR: potential curve written to 'data/RKR.dat'")
plt.plot(R, PEC, label='RKR potential curve')
plt.plot(RTP[::10], PTP[::10], 'o', label='turning points')
plt.legend()
plt.axis(xmin=0.8, xmax=4, ymin=-0.1, ymax=6)
plt.title("example_RKR.py")
plt.xlabel(r"R($\AA$)")
plt.ylabel("E(eV)")
plt.savefig("output/example_RKR.png", dpi=75)
plt.show()
|
gpl-3.0
|
molliewebb/aston
|
aston/spectra/Scan.py
|
3
|
4028
|
import numpy as np
class Scan(object):
def __init__(self, x, abn, name='', source=None):
assert len(x) == len(abn)
self.x, self.abn = x, abn
self.name = name
self.source = source
def plot(self, color=None, label=False, ax=None):
#TODO: this is extremely ugly; needs a rewrite
#TODO: should use source?
if ax is None:
import matplotlib.pyplot as plt
ax = plt.gca()
if color is None:
color = 'k'
scn = np.vstack([self.x, self.abn])
if scn.shape[1] > 10 and np.all(np.abs(np.diff(scn[0]) - \
(scn[0, 1] - scn[0, 0])) < 1e-9):
#if the spacing between all the points is equal, plot as a line
scn = scn[:, np.argsort(scn)[0]]
ax.plot(scn[0], scn[1], '-', color=color)
else:
# remove 0's
scn = scn[:, scn[1] != 0]
try:
#FIXME: this crashes on Windows unless the user has clicked on
#the spectrum graph previously. Matplotlib bug needs workaround
ax.vlines(scn[0], 0, scn[1], color=color, alpha=0.5)
except:
pass
ax.plot(scn[0], scn[1], ',', color=color)
if label:
#FIXME: doesn't look good if less than AMU spacing
max_val = max(np.array(scn[1])) # only label peaks X % of this
filt_scn = scn[:, 0.5 * np.roll(scn[1], 1) - scn[1] <= 0]
for s in filt_scn[:, filt_scn[1] > 0.01 * max_val].T:
ax.text(s[0], s[1], str(s[0]), ha='center', \
va='bottom', rotation=90, size=10, color=color)
# #go through the top 10% highest ions from highest to lowest
# #always have at least 10 labels, but no more than 50 (arbitrary)
# #if an ion is close to one seen previously, don't display it
# v2lbl = {} # values to label
# plbl = [] # all values so far
# max_val = max(np.array(scn[1])) # only label peaks X % of this
# for i in np.array(scn[1]).argsort()[::-1]:
# mz = scn[0][i]
# #don't allow a new label within 1.5 units of another
# if not np.any(np.abs(np.array(plbl) - mz) < 1.5) and \
# scn[1][i] > 0.01 * max_val:
# v2lbl[mz] = scn[1][i]
# plbl.append(mz)
# #add peak labels
# for v in v2lbl:
# ax.text(v, v2lbl[v], str(v), ha='center', \
# va='bottom', rotation=90, size=10, color=clr)
# #ax.text(v, v2lbl[v], str(v), ha='center', \
# # va='bottom', rotation=90, size=10, color=clr, \
# # bbox={'boxstyle': 'larrow,pad=0.3', 'fc': clr, \
# # 'ec': clr, 'lw': 1, 'alpha': '0.25'})
@property
def xmin(self):
return min(self.x)
@property
def xmax(self):
return max(self.x)
@property
def ymin(self):
return min(self.abn)
@property
def ymax(self):
return max(self.abn)
def d13C(self):
#FIXME: this needs to be moved to somewhere else;
# can't get parent in here
if self.source != 'irms':
return None
pass
#dt = self.getParentOfType('file')
#if self.info['sp-type'] == 'Isotope Standard':
# return dt.info['r-d13c-std']
## if there's no reference number, we can't do this
#try:
# float(dt.info['r-d13c-std'])
#except:
# return ''
#r45std = dt.get_point('r45std', float(self.info['sp-time']))
#r46std = dt.get_point('r46std', float(self.info['sp-time']))
## if no peak has been designated as a isotope std
#if r45std == 0.0:
# return ''
#d = delta13C_Santrock(self.ion(44), self.ion(45), self.ion(46), \
# float(dt.info['r-d13c-std']), r45std, r46std)
#return str(d)
|
gpl-3.0
|
jorvis/biocode
|
fasta/fasta_size_distribution_plot.py
|
3
|
4191
|
#!/usr/bin/env python3
import argparse
import matplotlib
# back-end options are here: http://matplotlib.sourceforge.net/faq/usage_faq.html#what-is-a-backend
matplotlib.use('Agg')
import matplotlib.pyplot as plot
import os
import re
def fasta_entry_sizes(file):
seq_lengths = []
## these are reset as each seq entry is encountered
seq_lines = []
seq_count = 0
for line in open(file, 'r'):
if re.match('^\>', line):
seq_count += 1
seq = ''.join(seq_lines)
seq = re.sub(r'\s', '', seq)
seq_lengths.append( len(seq) )
seq_lines = []
else:
seq_lines.append(line)
return seq_lengths
def get_legend_labels(label_arg, file_count):
labels = []
if label_arg is not None:
labels = label_arg.split(',')
if len(labels) != file_count:
raise Exception("Error: number of input files doesn't match number of labels specified in --legend_names")
return labels
def main():
parser = argparse.ArgumentParser( description='Generate FASTA file(s) size distribution plot')
parser.add_argument('fasta_files', metavar='N', type=str, nargs='+', help='Pass one or more FASTA files')
parser.add_argument('-o', '--output_file', type=str, required=True, help='Path to an output file to be created' )
parser.add_argument('-t', '--title', type=str, required=False, default='FASTA size distribution', \
help='Pass a title for the graph')
parser.add_argument('-b', '--bin_count', type=int, required=False, default=30, \
help='Data will be placed into this many bins. This is the default behavior. ' + \
'Alternatively, use --bin_size and --bin_max')
parser.add_argument('-s', '--bin_size', type=int, required=False, default=0, \
help='Instead of a --bin_count, use this to specify the size of your bins.')
parser.add_argument('-m', '--bin_max', type=int, required=False, default=0, \
help='If specifying --bin_size, you can optionally use this to limit the ' + \
'maximum bound of your bins (prevents long tails in plots)')
parser.add_argument('-l', '--legend_names', type=str, required=False, help='For a legend with labels ' + \
'of each of your datasets, pass a comma-separated list with no spaces.')
parser.add_argument('-g', '--log_scale', type=bool, required=False, default=False, help='Set to true for log10 Y scale')
args = parser.parse_args()
data_ranges = []
fasta_files = args.fasta_files
size_max = 0
seqs_above_size_max = 0
for fasta_file in fasta_files:
print("INFO: parsing seq lengths in file: {0}".format(fasta_file))
sizes = fasta_entry_sizes(fasta_file)
print("INFO: {0} sequences found in {1}".format(len(sizes), fasta_file))
data_ranges.append(sizes)
this_max_size = max(sizes)
if this_max_size > size_max:
size_max = this_max_size
## calculate the bins. default is to use the bin_count
bin_opt = args.bin_count
if args.bin_size != 0:
bin_opt = []
for bin_min in range(args.bin_size, size_max, args.bin_size):
if args.bin_max == 0 or args.bin_max > bin_min:
bin_opt.append(bin_min)
if args.bin_max > bin_min:
seqs_above_size_max += 1
plot.xlabel('Sequence size (bins)')
plot.ylabel('Sequence counts')
plot.title(args.title)
if args.log_scale == True:
n, bins, patches = plot.hist(data_ranges, bin_opt, normed=0, histtype='bar', log=True)
else:
n, bins, patches = plot.hist(data_ranges, bin_opt, normed=0, histtype='bar')
plot.grid(True)
legend_labels = get_legend_labels( args.legend_names, len(fasta_files) )
if len(legend_labels) > 0:
plot.legend(legend_labels)
plot.savefig(args.output_file)
print("INFO: there were {0} data points above the range defined in the histogram".format(seqs_above_size_max))
if __name__ == '__main__':
main()
|
mit
|
guschmue/tensorflow
|
tensorflow/python/keras/_impl/keras/engine/training.py
|
5
|
98431
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras training and evaluation routines.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import numpy as np
from tensorflow.python.keras._impl.keras import backend as K
from tensorflow.python.keras._impl.keras import callbacks as cbks
from tensorflow.python.keras._impl.keras import losses
from tensorflow.python.keras._impl.keras import metrics as metrics_module
from tensorflow.python.keras._impl.keras import optimizers
from tensorflow.python.keras._impl.keras.engine.topology import Network
from tensorflow.python.keras._impl.keras.utils.data_utils import GeneratorEnqueuer
from tensorflow.python.keras._impl.keras.utils.data_utils import OrderedEnqueuer
from tensorflow.python.keras._impl.keras.utils.data_utils import Sequence
from tensorflow.python.keras._impl.keras.utils.generic_utils import Progbar
from tensorflow.python.platform import tf_logging as logging
def _standardize_input_data(data,
names,
shapes=None,
check_batch_axis=True,
exception_prefix=''):
"""Normalizes inputs and targets provided by users.
Users may pass data as a list of arrays, dictionary of arrays,
or as a single array. We normalize this to an ordered list of
arrays (same order as `names`), while checking that the provided
arrays have shapes that match the network's expectations.
Arguments:
data: User-provided input data (polymorphic).
names: List of expected array names.
shapes: Optional list of expected array shapes.
check_batch_axis: Boolean; whether to check that
the batch axis of the arrays matches the expected
value found in `shapes`.
exception_prefix: String prefix used for exception formatting.
Returns:
List of standardized input arrays (one array per model input).
Raises:
ValueError: in case of improperly formatted user-provided data.
"""
if not names:
if data is not None and hasattr(data, '__len__') and len(data):
raise ValueError('Error when checking model ' + exception_prefix + ': '
'expected no data, but got:', data)
return []
if data is None:
return [None for _ in range(len(names))]
if isinstance(data, dict):
for key, value in data.items():
if value.__class__.__name__ == 'DataFrame':
data[key] = value.values
arrays = []
for name in names:
if name not in data:
raise ValueError('No data provided for "' + name +
'". Need data for each key in: ' + str(names))
arrays.append(data[name])
elif isinstance(data, list):
for key, value in enumerate(data):
if value.__class__.__name__ == 'DataFrame':
data[key] = value.values
if len(data) != len(names):
if data and hasattr(data[0], 'shape'):
raise ValueError(
'Error when checking model ' + exception_prefix +
': the list of Numpy arrays '
'that you are passing to your model '
'is not the size the model expected. '
'Expected to see ' + str(len(names)) + ' array(s), but instead got '
'the following list of ' + str(len(data)) + ' arrays: ' +
str(data)[:200] + '...')
else:
if len(names) == 1:
data = [np.asarray(data)]
else:
raise ValueError('Error when checking model ' + exception_prefix +
': you are passing a list as '
'input to your model, '
'but the model expects '
'a list of ' + str(len(names)) +
' Numpy arrays instead. '
'The list you passed was: ' + str(data)[:200])
arrays = data
elif data.__class__.__name__ == 'DataFrame':
# test if data is a DataFrame, without pandas installed
arrays = data.values
else:
if not hasattr(data, 'shape'):
raise TypeError('Error when checking model ' + exception_prefix +
': data should be a Numpy array, '
'or list/dict of Numpy arrays. '
'Found: ' + str(data)[:200] + '...')
if len(names) > 1:
# Case: model expects multiple inputs but only received
# a single Numpy array.
raise ValueError('The model expects ' + str(len(names)) + ' ' +
exception_prefix +
' arrays, but only received one array. '
'Found: array with shape ' + str(data.shape))
arrays = [data]
# Make arrays at least 2D.
for i in range(len(names)):
array = arrays[i]
if len(array.shape) == 1:
array = np.expand_dims(array, 1)
arrays[i] = array
# Check shapes compatibility.
if shapes:
for i in range(len(names)):
if shapes[i] is None:
continue
array = arrays[i]
if len(array.shape) != len(shapes[i]):
raise ValueError(
'Error when checking ' + exception_prefix + ': expected ' + names[i]
+ ' to have ' + str(len(shapes[i])) +
' dimensions, but got array with shape ' + str(array.shape))
for j, (dim, ref_dim) in enumerate(zip(array.shape, shapes[i])):
if not j and not check_batch_axis:
# skip the first axis
continue
if ref_dim:
if ref_dim != dim:
raise ValueError('Error when checking ' + exception_prefix +
': expected ' + names[i] + ' to have shape ' +
str(shapes[i]) + ' but got array with shape ' +
str(array.shape))
return arrays
def _standardize_sample_or_class_weights(x_weight, output_names, weight_type):
"""Maps `sample_weight` or `class_weight` to model outputs.
Arguments:
x_weight: User-provided `sample_weight` or `class_weight` argument.
output_names: List of output names (strings) in the model.
weight_type: A string used purely for exception printing.
Returns:
A list of `sample_weight` or `class_weight` where there are exactly
one element per model output.
Raises:
ValueError: In case of invalid user-provided argument.
"""
if x_weight is None or len(x_weight) == 0: # pylint: disable=g-explicit-length-test
return [None for _ in output_names]
if len(output_names) == 1:
if isinstance(x_weight, list) and len(x_weight) == 1:
return x_weight
if isinstance(x_weight, dict) and output_names[0] in x_weight:
return [x_weight[output_names[0]]]
else:
return [x_weight]
if isinstance(x_weight, list):
if len(x_weight) != len(output_names):
raise ValueError('Provided `' + weight_type + '` was a list of ' +
str(len(x_weight)) + ' elements, but the model has ' +
str(len(output_names)) + ' outputs. '
'You should provide one `' + weight_type + '`'
'array per model output.')
return x_weight
if isinstance(x_weight, dict):
x_weights = []
for name in output_names:
x_weights.append(x_weight.get(name))
return x_weights
else:
raise TypeError('The model has multiple outputs, so `' + weight_type + '` '
'should be either a list of a dict. '
'Provided `' + weight_type + '` type not understood: ' +
str(x_weight))
def _standardize_class_weights(class_weight, output_names):
return _standardize_sample_or_class_weights(class_weight, output_names,
'class_weight')
def _standardize_sample_weights(sample_weight, output_names):
return _standardize_sample_or_class_weights(sample_weight, output_names,
'sample_weight')
def _check_array_lengths(inputs, targets, weights=None):
"""Does user input validation for numpy arrays.
Arguments:
inputs: list of Numpy arrays of inputs.
targets: list of Numpy arrays of targets.
weights: list of Numpy arrays of sample weights.
Raises:
ValueError: in case of incorrectly formatted data.
"""
def set_of_lengths(x):
# return a set with the variation between
# different shapes, with None => 0
if x is None:
return {0}
else:
return set([0 if y is None else y.shape[0] for y in x])
set_x = set_of_lengths(inputs)
set_y = set_of_lengths(targets)
set_w = set_of_lengths(weights)
if len(set_x) > 1:
raise ValueError('All input arrays (x) should have '
'the same number of samples. Got array shapes: ' + str(
[x.shape for x in inputs]))
if len(set_y) > 1:
raise ValueError('All target arrays (y) should have '
'the same number of samples. Got array shapes: ' + str(
[y.shape for y in targets]))
if set_x and set_y and list(set_x)[0] != list(set_y)[0]:
raise ValueError('Input arrays should have '
'the same number of samples as target arrays. '
'Found ' + str(list(set_x)[0]) + ' input samples '
'and ' + str(list(set_y)[0]) + ' target samples.')
if len(set_w) > 1:
raise ValueError('All sample_weight arrays should have '
'the same number of samples. Got array shapes: ' + str(
[w.shape for w in weights]))
if set_y and set_w and list(set_y)[0] != list(set_w)[0]:
raise ValueError('Sample_weight arrays should have '
'the same number of samples as target arrays. Got ' +
str(list(set_y)[0]) + ' input samples and ' +
str(list(set_w)[0]) + ' target samples.')
def _check_loss_and_target_compatibility(targets, loss_fns, output_shapes):
"""Does validation on the compatibility of targets and loss functions.
This helps prevent users from using loss functions incorrectly.
Arguments:
targets: list of Numpy arrays of targets.
loss_fns: list of loss functions.
output_shapes: list of shapes of model outputs.
Raises:
ValueError: if a loss function or target array
is incompatible with an output.
"""
key_losses = {
losses.mean_squared_error, losses.binary_crossentropy,
losses.categorical_crossentropy
}
for y, loss, shape in zip(targets, loss_fns, output_shapes):
if loss is None:
continue
if loss is losses.categorical_crossentropy:
if y.shape[-1] == 1:
raise ValueError('You are passing a target array of shape ' + str(
y.shape) + ' while using as loss `categorical_crossentropy`. '
'`categorical_crossentropy` expects '
'targets to be binary matrices (1s and 0s) '
'of shape (samples, classes). '
'If your targets are integer classes, '
'you can convert them to the expected format via:\n'
'```\n'
'from keras.utils import to_categorical\n'
'y_binary = to_categorical(y_int)\n'
'```\n'
'\n'
'Alternatively, you can use the loss function '
'`sparse_categorical_crossentropy` instead, '
'which does expect integer targets.')
if loss in key_losses:
for target_dim, out_dim in zip(y.shape[1:], shape[1:]):
if out_dim is not None and target_dim != out_dim:
raise ValueError('A target array with shape ' + str(y.shape) +
' was passed for an output of shape ' + str(shape) +
' while using as loss `' + loss.__name__ + '`. '
'This loss expects '
'targets to have the same shape '
'as the output.')
def _collect_metrics(metrics, output_names):
"""Maps metric functions to model outputs.
Arguments:
metrics: a list or dict of metric functions.
output_names: a list of the names (strings) of model outputs.
Returns:
A list (one entry per model output) of lists of metric functions.
For instance, if the model has 2 outputs, and for the first output
we want to compute "binary_accuracy" and "binary_crossentropy",
and just "binary_accuracy" for the second output,
the list would look like:
`[[binary_accuracy, binary_crossentropy], [binary_accuracy]]`
Raises:
TypeError: if an incorrect type is passed for the `metrics` argument.
"""
if not metrics:
return [[] for _ in output_names]
if isinstance(metrics, list):
# we then apply all metrics to all outputs.
return [copy.copy(metrics) for _ in output_names]
elif isinstance(metrics, dict):
nested_metrics = []
for name in output_names:
output_metrics = metrics.get(name, [])
if not isinstance(output_metrics, list):
output_metrics = [output_metrics]
nested_metrics.append(output_metrics)
return nested_metrics
else:
raise TypeError('Type of `metrics` argument not understood. '
'Expected a list or dictionary, found: ' + str(metrics))
def _batch_shuffle(index_array, batch_size):
"""Shuffles an array in a batch-wise fashion.
Useful for shuffling HDF5 arrays
(where one cannot access arbitrary indices).
Arguments:
index_array: array of indices to be shuffled.
batch_size: integer.
Returns:
The `index_array` array, shuffled in a batch-wise fashion.
"""
batch_count = int(len(index_array) / batch_size)
# to reshape we need to be cleanly divisible by batch size
# we stash extra items and reappend them after shuffling
last_batch = index_array[batch_count * batch_size:]
index_array = index_array[:batch_count * batch_size]
index_array = index_array.reshape((batch_count, batch_size))
np.random.shuffle(index_array)
index_array = index_array.flatten()
return np.append(index_array, last_batch)
def _make_batches(size, batch_size):
"""Returns a list of batch indices (tuples of indices).
Arguments:
size: Integer, total size of the data to slice into batches.
batch_size: Integer, batch size.
Returns:
A list of tuples of array indices.
"""
num_batches = int(np.ceil(size / float(batch_size)))
return [(i * batch_size, min(size, (i + 1) * batch_size))
for i in range(num_batches)]
def _slice_arrays(arrays, start=None, stop=None):
"""Slice an array or list of arrays.
This takes an array-like, or a list of
array-likes, and outputs:
- arrays[start:stop] if `arrays` is an array-like
- [x[start:stop] for x in arrays] if `arrays` is a list
Can also work on list/array of indices: `_slice_arrays(x, indices)`
Arguments:
arrays: Single array or list of arrays.
start: can be an integer index (start index)
or a list/array of indices
stop: integer (stop index); should be None if
`start` was a list.
Returns:
A slice of the array(s).
"""
if arrays is None:
return [None]
elif isinstance(arrays, list):
if hasattr(start, '__len__'):
# hdf5 datasets only support list objects as indices
if hasattr(start, 'shape'):
start = start.tolist()
return [None if x is None else x[start] for x in arrays]
else:
return [None if x is None else x[start:stop] for x in arrays]
else:
if hasattr(start, '__len__'):
if hasattr(start, 'shape'):
start = start.tolist()
return arrays[start]
elif hasattr(start, '__getitem__'):
return arrays[start:stop]
else:
return [None]
def _weighted_masked_objective(fn):
"""Adds support for masking and sample-weighting to an objective function.
It transforms an objective function `fn(y_true, y_pred)`
into a sample-weighted, cost-masked objective function
`fn(y_true, y_pred, weights, mask)`.
Arguments:
fn: The objective function to wrap,
with signature `fn(y_true, y_pred)`.
Returns:
A function with signature `fn(y_true, y_pred, weights, mask)`.
"""
if fn is None:
return None
def weighted(y_true, y_pred, weights, mask=None):
"""Wrapper function.
Arguments:
y_true: `y_true` argument of `fn`.
y_pred: `y_pred` argument of `fn`.
weights: Weights tensor.
mask: Mask tensor.
Returns:
Scalar tensor.
"""
# score_array has ndim >= 2
score_array = fn(y_true, y_pred)
if mask is not None:
# Cast the mask to floatX to avoid float64 upcasting in theano
mask = K.cast(mask, K.floatx())
# mask should have the same shape as score_array
score_array *= mask
# the loss per batch should be proportional
# to the number of unmasked samples.
score_array /= K.mean(mask)
# apply sample weighting
if weights is not None:
# reduce score_array to same ndim as weight array
ndim = K.ndim(score_array)
weight_ndim = K.ndim(weights)
score_array = K.mean(score_array, axis=list(range(weight_ndim, ndim)))
score_array *= weights
score_array /= K.mean(K.cast(K.not_equal(weights, 0), K.floatx()))
return K.mean(score_array)
return weighted
def _standardize_weights(y,
sample_weight=None,
class_weight=None,
sample_weight_mode=None):
"""Performs sample weight validation and standardization.
Everything gets normalized to a single sample-wise (or timestep-wise)
weight array.
Arguments:
y: Numpy array of model targets to be weighted.
sample_weight: User-provided `sample_weight` argument.
class_weight: User-provided `class_weight` argument.
sample_weight_mode: One of `None` or `"temporal"`.
`"temporal"` indicated that we expect 2D weight data
that will be applied to the last 2 dimensions of
the targets (i.e. we are weighting timesteps, not samples).
Returns:
A numpy array of target weights, one entry per sample to weight.
Raises:
ValueError: In case of invalid user-provided arguments.
"""
if sample_weight_mode is not None:
if sample_weight_mode != 'temporal':
raise ValueError('"sample_weight_mode '
'should be None or "temporal". '
'Found: ' + str(sample_weight_mode))
if len(y.shape) < 3:
raise ValueError('Found a sample_weight array for '
'an input with shape ' + str(y.shape) + '. '
'Timestep-wise sample weighting (use of '
'sample_weight_mode="temporal") is restricted to '
'outputs that are at least 3D, i.e. that have '
'a time dimension.')
if sample_weight is not None and len(sample_weight.shape) != 2:
raise ValueError('Found a sample_weight array with shape ' +
str(sample_weight.shape) + '. '
'In order to use timestep-wise sample weighting, '
'you should pass a 2D sample_weight array.')
else:
if sample_weight is not None and len(sample_weight.shape) != 1:
raise ValueError('Found a sample_weight array with shape ' +
str(sample_weight.shape) + '. '
'In order to use timestep-wise sample weights, '
'you should specify '
'sample_weight_mode="temporal" '
'in compile(). If you just mean to use '
'sample-wise weights, make sure your '
'sample_weight array is 1D.')
if sample_weight is not None:
if len(sample_weight.shape) > len(y.shape):
raise ValueError('Found a sample_weight with shape' +
str(sample_weight.shape) + '.'
'Expected sample_weight with rank '
'less than or equal to ' + str(len(y.shape)))
if y.shape[:sample_weight.ndim] != sample_weight.shape:
raise ValueError('Found a sample_weight array with shape ' +
str(sample_weight.shape) + ' for an input with shape ' +
str(y.shape) + '. '
'sample_weight cannot be broadcast.')
return sample_weight
elif isinstance(class_weight, dict):
if len(y.shape) > 2:
raise ValueError('`class_weight` not supported for '
'3+ dimensional targets.')
if y.shape[1] > 1:
y_classes = y.argmax(axis=1)
elif y.shape[1] == 1:
y_classes = np.reshape(y, y.shape[0])
else:
y_classes = y
weights = np.asarray(
[class_weight[cls] for cls in y_classes if cls in class_weight])
if len(weights) != len(y_classes):
# subtract the sets to pick all missing classes
existing_classes = set(y_classes)
existing_class_weight = set(class_weight.keys())
raise ValueError('`class_weight` must contain all classes in the data.'
' The classes %s exist in the data but not in '
'`class_weight`.' %
(existing_classes - existing_class_weight))
return weights
else:
if sample_weight_mode is None:
return np.ones((y.shape[0],), dtype=K.floatx())
else:
return np.ones((y.shape[0], y.shape[1]), dtype=K.floatx())
class Model(Network):
"""The `Model` class adds training & evaluation routines to a `Network`.
"""
def compile(self,
optimizer,
loss,
metrics=None,
loss_weights=None,
sample_weight_mode=None,
weighted_metrics=None,
target_tensors=None,
**kwargs):
"""Configures the model for training.
Arguments:
optimizer: String (name of optimizer) or optimizer instance.
See [optimizers](/optimizers).
loss: String (name of objective function) or objective function.
See [losses](/losses).
If the model has multiple outputs, you can use a different loss
on each output by passing a dictionary or a list of losses.
The loss value that will be minimized by the model
will then be the sum of all individual losses.
metrics: List of metrics to be evaluated by the model
during training and testing.
Typically you will use `metrics=['accuracy']`.
To specify different metrics for different outputs of a
multi-output model, you could also pass a dictionary,
such as `metrics={'output_a': 'accuracy'}`.
loss_weights: Optional list or dictionary specifying scalar
coefficients (Python floats) to weight the loss contributions
of different model outputs.
The loss value that will be minimized by the model
will then be the *weighted sum* of all individual losses,
weighted by the `loss_weights` coefficients.
If a list, it is expected to have a 1:1 mapping
to the model's outputs. If a tensor, it is expected to map
output names (strings) to scalar coefficients.
sample_weight_mode: If you need to do timestep-wise
sample weighting (2D weights), set this to `"temporal"`.
`None` defaults to sample-wise weights (1D).
If the model has multiple outputs, you can use a different
`sample_weight_mode` on each output by passing a
dictionary or a list of modes.
weighted_metrics: List of metrics to be evaluated and weighted
by sample_weight or class_weight during training and testing.
target_tensors: By default, Keras will create placeholders for the
model's target, which will be fed with the target data during
training. If instead you would like to use your own
target tensors (in turn, Keras will not expect external
Numpy data for these targets at training time), you
can specify them via the `target_tensors` argument. It can be
a single tensor (for a single-output model), a list of tensors,
or a dict mapping output names to target tensors.
**kwargs: These arguments are passed to `tf.Session.run`.
Raises:
ValueError: In case of invalid arguments for
`optimizer`, `loss`, `metrics` or `sample_weight_mode`.
"""
loss = loss or {}
self.optimizer = optimizers.get(optimizer)
self.sample_weight_mode = sample_weight_mode
self.loss = loss
self.loss_weights = loss_weights
self.sample_weight_mode = sample_weight_mode
# Prepare loss functions.
if isinstance(loss, dict):
for name in loss:
if name not in self.output_names:
raise ValueError('Unknown entry in loss '
'dictionary: "' + name + '". '
'Only expected the following keys: ' +
str(self.output_names))
loss_functions = []
for name in self.output_names:
if name not in loss:
logging.warning(
'Output "' + name + '" missing from loss dictionary. '
'We assume this was done on purpose, '
'and we will not be expecting '
'any data to be passed to "' + name + '" during training.')
loss_functions.append(losses.get(loss.get(name)))
elif isinstance(loss, list):
if len(loss) != len(self.outputs):
raise ValueError('When passing a list as loss, '
'it should have one entry per model outputs. '
'The model has ' + str(len(self.outputs)) +
' outputs, but you passed loss=' + str(loss))
loss_functions = [losses.get(l) for l in loss]
else:
loss_function = losses.get(loss)
loss_functions = [loss_function for _ in range(len(self.outputs))]
self.loss_functions = loss_functions
weighted_losses = [_weighted_masked_objective(fn) for fn in loss_functions]
skip_target_indices = []
skip_target_weighing_indices = []
self._feed_outputs = []
self._feed_output_names = []
self._feed_output_shapes = []
self._feed_loss_fns = []
for i in range(len(weighted_losses)):
if weighted_losses[i] is None:
skip_target_indices.append(i)
skip_target_weighing_indices.append(i)
# Prepare output masks.
masks = self.compute_mask(self.inputs, mask=None)
if masks is None:
masks = [None for _ in self.outputs]
if not isinstance(masks, list):
masks = [masks]
# Prepare loss weights.
if loss_weights is None:
loss_weights_list = [1. for _ in range(len(self.outputs))]
elif isinstance(loss_weights, dict):
for name in loss_weights:
if name not in self.output_names:
raise ValueError('Unknown entry in loss_weights '
'dictionary: "' + name + '". '
'Only expected the following keys: ' +
str(self.output_names))
loss_weights_list = []
for name in self.output_names:
loss_weights_list.append(loss_weights.get(name, 1.))
elif isinstance(loss_weights, list):
if len(loss_weights) != len(self.outputs):
raise ValueError('When passing a list as loss_weights, '
'it should have one entry per model outputs. '
'The model has ' + str(len(self.outputs)) +
' outputs, but you passed loss_weights=' +
str(loss_weights))
loss_weights_list = loss_weights
else:
raise TypeError('Could not interpret loss_weights argument: ' +
str(loss_weights) + ' - expected a list of dicts.')
# Prepare targets of model.
self.targets = []
self._feed_targets = []
if target_tensors is not None:
if isinstance(target_tensors, list):
if len(target_tensors) != len(self.outputs):
raise ValueError('When passing a list as `target_tensors`, '
'it should have one entry per model outputs. '
'The model has ' + str(len(self.outputs)) +
' outputs, but you passed target_tensors=' +
str(target_tensors))
elif isinstance(target_tensors, dict):
for name in target_tensors:
if name not in self.output_names:
raise ValueError('Unknown entry in `target_tensors` '
'dictionary: "' + name + '". '
'Only expected the following keys: ' +
str(self.output_names))
target_tensors_ = []
for name in self.output_names:
target_tensors_.append(target_tensors.get(name, None))
target_tensors = target_tensors_
else:
raise TypeError('Expected `target_tensors` to be '
'a list or dict, but got:', target_tensors)
for i in range(len(self.outputs)):
if i in skip_target_indices:
self.targets.append(None)
else:
shape = self.internal_output_shapes[i]
name = self.output_names[i]
if target_tensors is not None:
target = target_tensors[i]
else:
target = None
if target is None or K.is_placeholder(target):
if target is None:
target = K.placeholder(
ndim=len(shape),
name=name + '_target',
sparse=K.is_sparse(self.outputs[i]),
dtype=K.dtype(self.outputs[i]))
self._feed_targets.append(target)
self._feed_outputs.append(self.outputs[i])
self._feed_output_names.append(name)
self._feed_output_shapes.append(shape)
self._feed_loss_fns.append(self.loss_functions[i])
else:
skip_target_weighing_indices.append(i)
self.targets.append(target)
# Prepare sample weights.
sample_weights = []
sample_weight_modes = []
if isinstance(sample_weight_mode, dict):
for name in sample_weight_mode:
if name not in self.output_names:
raise ValueError('Unknown entry in '
'sample_weight_mode dictionary: "' + name + '". '
'Only expected the following keys: ' +
str(self.output_names))
for i, name in enumerate(self.output_names):
if i in skip_target_weighing_indices:
weight = None
sample_weight_modes.append(None)
else:
if name not in sample_weight_mode:
raise ValueError('Output "' + name +
'" missing from sample_weight_modes '
'dictionary')
if sample_weight_mode.get(name) == 'temporal':
weight = K.placeholder(ndim=2, name=name + '_sample_weights')
sample_weight_modes.append('temporal')
else:
weight = K.placeholder(ndim=1, name=name + '_sample_weights')
sample_weight_modes.append(None)
sample_weights.append(weight)
elif isinstance(sample_weight_mode, list):
if len(sample_weight_mode) != len(self.outputs):
raise ValueError('When passing a list as sample_weight_mode, '
'it should have one entry per model outputs. '
'The model has ' + str(len(self.outputs)) +
' outputs, but you passed '
'sample_weight_mode=' + str(sample_weight_mode))
for i in range(len(self.output_names)):
if i in skip_target_weighing_indices:
weight = None
sample_weight_modes.append(None)
else:
mode = sample_weight_mode[i]
name = self.output_names[i]
if mode == 'temporal':
weight = K.placeholder(ndim=2, name=name + '_sample_weights')
sample_weight_modes.append('temporal')
else:
weight = K.placeholder(ndim=1, name=name + '_sample_weights')
sample_weight_modes.append(None)
sample_weights.append(weight)
else:
for i, name in enumerate(self.output_names):
if i in skip_target_weighing_indices:
sample_weight_modes.append(None)
sample_weights.append(None)
else:
if sample_weight_mode == 'temporal':
sample_weights.append(
K.placeholder(ndim=2, name=name + '_sample_weights'))
sample_weight_modes.append('temporal')
else:
sample_weights.append(
K.placeholder(ndim=1, name=name + '_sample_weights'))
sample_weight_modes.append(None)
self.sample_weight_modes = sample_weight_modes
self._feed_sample_weight_modes = []
for i in range(len(self.outputs)):
if i not in skip_target_weighing_indices:
self._feed_sample_weight_modes.append(self.sample_weight_modes[i])
# Prepare metrics.
self.metrics = metrics
self.weighted_metrics = weighted_metrics
self.metrics_names = ['loss']
self.metrics_tensors = []
# Compute total loss.
total_loss = None
with K.name_scope('loss'):
for i in range(len(self.outputs)):
if i in skip_target_indices:
continue
y_true = self.targets[i]
y_pred = self.outputs[i]
weighted_loss = weighted_losses[i]
sample_weight = sample_weights[i]
mask = masks[i]
loss_weight = loss_weights_list[i]
with K.name_scope(self.output_names[i] + '_loss'):
output_loss = weighted_loss(y_true, y_pred, sample_weight, mask)
if len(self.outputs) > 1:
self.metrics_tensors.append(output_loss)
self.metrics_names.append(self.output_names[i] + '_loss')
if total_loss is None:
total_loss = loss_weight * output_loss
else:
total_loss += loss_weight * output_loss
if total_loss is None:
if not self.losses:
raise ValueError('The model cannot be compiled '
'because it has no loss to optimize.')
else:
total_loss = 0.
# Add regularization penalties
# and other layer-specific losses.
for loss_tensor in self.losses:
total_loss += loss_tensor
# List of same size as output_names.
# contains tuples (metrics for output, names of metrics).
nested_metrics = _collect_metrics(metrics, self.output_names)
nested_weighted_metrics = _collect_metrics(weighted_metrics,
self.output_names)
def append_metric(layer_index, metric_name, metric_tensor):
"""Helper function used in loop below."""
if len(self.output_names) > 1:
metric_name = self.output_names[layer_index] + '_' + metric_name
self.metrics_names.append(metric_name)
self.metrics_tensors.append(metric_tensor)
with K.name_scope('metrics'):
for i in range(len(self.outputs)):
if i in skip_target_indices:
continue
y_true = self.targets[i]
y_pred = self.outputs[i]
weights = sample_weights[i]
output_metrics = nested_metrics[i]
output_weighted_metrics = nested_weighted_metrics[i]
def handle_metrics(metrics, weights=None):
metric_name_prefix = 'weighted_' if weights is not None else ''
for metric in metrics:
if metric == 'accuracy' or metric == 'acc':
# custom handling of accuracy
# (because of class mode duality)
output_shape = self.internal_output_shapes[i]
if (output_shape[-1] == 1 or
self.loss_functions[i] == losses.binary_crossentropy):
# case: binary accuracy
acc_fn = metrics_module.binary_accuracy
elif self.loss_functions[
i] == losses.sparse_categorical_crossentropy:
# case: categorical accuracy with sparse targets
acc_fn = metrics_module.sparse_categorical_accuracy
else:
acc_fn = metrics_module.categorical_accuracy
weighted_metric_fn = _weighted_masked_objective(acc_fn)
metric_name = metric_name_prefix + 'acc'
else:
metric_fn = metrics_module.get(metric)
weighted_metric_fn = _weighted_masked_objective(metric_fn)
metric_name = metric_name_prefix + metric_fn.__name__
with K.name_scope(metric_name):
metric_result = weighted_metric_fn(
y_true, y_pred, weights=weights, mask=masks[i])
append_metric(i, metric_name, metric_result)
handle_metrics(output_metrics)
handle_metrics(output_weighted_metrics, weights=weights)
# Prepare gradient updates and state updates.
self.total_loss = total_loss
self.sample_weights = sample_weights
self._feed_sample_weights = []
for i in range(len(self.sample_weights)):
if i not in skip_target_weighing_indices:
self._feed_sample_weights.append(sample_weights[i])
# Functions for train, test and predict will
# be compiled lazily when required.
# This saves time when the user is not using all functions.
self._function_kwargs = kwargs
self.train_function = None
self.test_function = None
self.predict_function = None
# Collected trainable weights, sorted in topological order.
trainable_weights = self.trainable_weights
self._collected_trainable_weights = trainable_weights
def _check_trainable_weights_consistency(self):
"""Check trainable weights count consistency.
This will raise a warning if `trainable_weights` and
`_collected_trainable_weights` are consistent (i.e. have the same
number of parameters).
Inconsistency will typically arise when one modifies `model.trainable`
without calling `model.compile` again.
"""
if not hasattr(self, '_collected_trainable_weights'):
return
if len(self.trainable_weights) != len(self._collected_trainable_weights):
logging.warning(
'Discrepancy between trainable weights and collected trainable'
' weights, did you set `model.trainable` without calling'
' `model.compile` after ?')
def _make_train_function(self):
if not hasattr(self, 'train_function'):
raise RuntimeError('You must compile your model before using it.')
self._check_trainable_weights_consistency()
if self.train_function is None:
inputs = (self._feed_inputs +
self._feed_targets +
self._feed_sample_weights)
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
inputs += [K.learning_phase()]
with K.name_scope('training'):
with K.name_scope(self.optimizer.__class__.__name__):
training_updates = self.optimizer.get_updates(
params=self._collected_trainable_weights, loss=self.total_loss)
updates = self.updates + training_updates
# Gets loss and metrics. Updates weights at each call.
self.train_function = K.function(
inputs, [self.total_loss] + self.metrics_tensors,
updates=updates,
name='train_function',
**self._function_kwargs)
def _make_test_function(self):
if not hasattr(self, 'test_function'):
raise RuntimeError('You must compile your model before using it.')
if self.test_function is None:
inputs = (self._feed_inputs +
self._feed_targets +
self._feed_sample_weights)
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
inputs += [K.learning_phase()]
# Return loss and metrics, no gradient updates.
# Does update the network states.
self.test_function = K.function(
inputs, [self.total_loss] + self.metrics_tensors,
updates=self.state_updates,
name='test_function',
**self._function_kwargs)
def _make_predict_function(self):
if not hasattr(self, 'predict_function'):
self.predict_function = None
if self.predict_function is None:
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
inputs = self._feed_inputs + [K.learning_phase()]
else:
inputs = self._feed_inputs
# Gets network outputs. Does not update weights.
# Does update the network states.
kwargs = getattr(self, '_function_kwargs', {})
self.predict_function = K.function(
inputs,
self.outputs,
updates=self.state_updates,
name='predict_function',
**kwargs)
def _check_num_samples(self,
ins,
batch_size=None,
steps=None,
steps_name='steps'):
"""Determine the number of samples provided for training and evaluation.
The number of samples is not defined when running with `steps`,
in which case the number of samples is set to `None`.
Arguments:
ins: List of tensors to be fed to the Keras function.
batch_size: Integer batch size or `None` if not defined.
steps: Total number of steps (batches of samples)
before declaring `_predict_loop` finished.
Ignored with the default value of `None`.
steps_name: The public API's parameter name for `steps`.
Raises:
ValueError: when `steps` is `None` and the attribute `ins.shape`
does not exist. Also raises ValueError when `steps` is not `None`
and `batch_size` is not `None` because they are mutually
exclusive.
Returns:
When steps is `None`, returns the number of samples to be
processed based on the size of the first dimension of the
first input numpy array. When steps is not `None` and
`batch_size` is `None`, returns `None`.
"""
if steps is not None:
num_samples = None
if batch_size is not None:
raise ValueError('If ' + steps_name +
' is set, the `batch_size` must be None.')
elif ins and hasattr(ins[0], 'shape'):
num_samples = ins[0].shape[0]
else:
raise ValueError('Either the input data should have '
'a defined shape, or ' + steps_name +
' should be specified.')
return num_samples
def _fit_loop(self,
f,
ins,
out_labels=None,
batch_size=None,
epochs=100,
verbose=1,
callbacks=None,
val_f=None,
val_ins=None,
shuffle=True,
callback_metrics=None,
initial_epoch=0,
steps_per_epoch=None,
validation_steps=None):
"""Abstract fit function for `f(ins)`.
Assume that f returns a list, labeled by out_labels.
Arguments:
f: Keras function returning a list of tensors
ins: List of tensors to be fed to `f`
out_labels: List of strings, display names of
the outputs of `f`
batch_size: Integer batch size or None if unknown.
epochs: Number of times to iterate over the data
verbose: Verbosity mode, 0, 1 or 2
callbacks: List of callbacks to be called during training
val_f: Keras function to call for validation
val_ins: List of tensors to be fed to `val_f`
shuffle: Whether to shuffle the data at the beginning of each epoch
callback_metrics: List of strings, the display names of the metrics
passed to the callbacks. They should be the
concatenation of list the display names of the outputs of
`f` and the list of display names of the outputs of `f_val`.
initial_epoch: Epoch at which to start training
(useful for resuming a previous training run)
steps_per_epoch: Total number of steps (batches of samples)
before declaring one epoch finished and starting the
next epoch. Ignored with the default value of `None`.
validation_steps: Number of steps to run validation for (only if doing
validation from data tensors). Ignored with default value of `None`.
Returns:
`History` object.
Raises:
ValueError: In case of invalid argument values.
"""
do_validation = False
if val_f and val_ins:
do_validation = True
if (verbose and ins and
hasattr(ins[0], 'shape') and hasattr(val_ins[0], 'shape')):
print('Train on %d samples, validate on %d samples' %
(ins[0].shape[0], val_ins[0].shape[0]))
if validation_steps:
if steps_per_epoch is None:
raise ValueError('Can only use `validation_steps` when doing step-wise '
'training, i.e. `steps_per_epoch` must be set.')
do_validation = True
num_train_samples = self._check_num_samples(
ins, batch_size, steps_per_epoch, 'steps_per_epoch')
if num_train_samples is not None:
index_array = np.arange(num_train_samples)
self.history = cbks.History()
callbacks = [cbks.BaseLogger()] + (callbacks or []) + [self.history]
if verbose:
if steps_per_epoch is not None:
count_mode = 'steps'
else:
count_mode = 'samples'
callbacks += [cbks.ProgbarLogger(count_mode)]
callbacks = cbks.CallbackList(callbacks)
out_labels = out_labels or []
# it's possible to callback a different model than self
# (used by Sequential models)
if hasattr(self, 'callback_model') and self.callback_model:
callback_model = self.callback_model
else:
callback_model = self
callbacks.set_model(callback_model)
callbacks.set_params({
'batch_size': batch_size,
'epochs': epochs,
'steps': steps_per_epoch,
'samples': num_train_samples,
'verbose': verbose,
'do_validation': do_validation,
'metrics': callback_metrics or [],
})
callbacks.on_train_begin()
callback_model.stop_training = False
for cbk in callbacks:
cbk.validation_data = val_ins
for epoch in range(initial_epoch, epochs):
callbacks.on_epoch_begin(epoch)
epoch_logs = {}
if steps_per_epoch is not None:
for step_index in range(steps_per_epoch):
batch_logs = {}
batch_logs['batch'] = step_index
batch_logs['size'] = 1
callbacks.on_batch_begin(step_index, batch_logs)
outs = f(ins)
if not isinstance(outs, list):
outs = [outs]
for l, o in zip(out_labels, outs):
batch_logs[l] = o
callbacks.on_batch_end(step_index, batch_logs)
if callback_model.stop_training:
break
if do_validation:
val_outs = self._test_loop(
val_f,
val_ins,
batch_size=batch_size,
steps=validation_steps,
verbose=0)
if not isinstance(val_outs, list):
val_outs = [val_outs]
# Same labels assumed.
for l, o in zip(out_labels, val_outs):
epoch_logs['val_' + l] = o
else:
if shuffle == 'batch':
index_array = _batch_shuffle(index_array, batch_size)
elif shuffle:
np.random.shuffle(index_array)
batches = _make_batches(num_train_samples, batch_size)
for batch_index, (batch_start, batch_end) in enumerate(batches):
batch_ids = index_array[batch_start:batch_end]
try:
if isinstance(ins[-1], float):
# Do not slice the training phase flag.
ins_batch = _slice_arrays(ins[:-1], batch_ids) + [ins[-1]]
else:
ins_batch = _slice_arrays(ins, batch_ids)
except TypeError:
raise TypeError('TypeError while preparing batch. '
'If using HDF5 input data, '
'pass shuffle="batch".')
batch_logs = {}
batch_logs['batch'] = batch_index
batch_logs['size'] = len(batch_ids)
callbacks.on_batch_begin(batch_index, batch_logs)
outs = f(ins_batch)
if not isinstance(outs, list):
outs = [outs]
for l, o in zip(out_labels, outs):
batch_logs[l] = o
callbacks.on_batch_end(batch_index, batch_logs)
if callback_model.stop_training:
break
if batch_index == len(batches) - 1: # Last batch.
if do_validation:
val_outs = self._test_loop(
val_f, val_ins, batch_size=batch_size, verbose=0)
if not isinstance(val_outs, list):
val_outs = [val_outs]
# Same labels assumed.
for l, o in zip(out_labels, val_outs):
epoch_logs['val_' + l] = o
callbacks.on_epoch_end(epoch, epoch_logs)
if callback_model.stop_training:
break
callbacks.on_train_end()
return self.history
def _predict_loop(self, f, ins, batch_size=32, verbose=0, steps=None):
"""Abstract method to loop over some data in batches.
Arguments:
f: Keras function returning a list of tensors.
ins: list of tensors to be fed to `f`.
batch_size: integer batch size.
verbose: verbosity mode.
steps: Total number of steps (batches of samples)
before declaring `_predict_loop` finished.
Ignored with the default value of `None`.
Returns:
Array of predictions (if the model has a single output)
or list of arrays of predictions
(if the model has multiple outputs).
"""
num_samples = self._check_num_samples(ins, batch_size, steps, 'steps')
if verbose == 1:
if steps is not None:
progbar = Progbar(target=steps)
else:
progbar = Progbar(target=num_samples)
if steps is not None:
# Step-based predictions.
# Since we do not know how many samples
# we will see, we cannot pre-allocate
# the returned Numpy arrays.
# Instead, we store one array per batch seen
# and concatenate them upon returning.
unconcatenated_outs = []
for step in range(steps):
batch_outs = f(ins)
if not isinstance(batch_outs, list):
batch_outs = [batch_outs]
if step == 0:
for batch_out in batch_outs:
unconcatenated_outs.append([])
for i, batch_out in enumerate(batch_outs):
unconcatenated_outs[i].append(batch_out)
if verbose == 1:
progbar.update(step + 1)
if len(unconcatenated_outs) == 1:
return np.concatenate(unconcatenated_outs[0], axis=0)
return [
np.concatenate(unconcatenated_outs[i], axis=0)
for i in range(len(unconcatenated_outs))
]
else:
# Sample-based predictions.
outs = []
batches = _make_batches(num_samples, batch_size)
index_array = np.arange(num_samples)
for batch_index, (batch_start, batch_end) in enumerate(batches):
batch_ids = index_array[batch_start:batch_end]
if ins and isinstance(ins[-1], float):
# Do not slice the training phase flag.
ins_batch = _slice_arrays(ins[:-1], batch_ids) + [ins[-1]]
else:
ins_batch = _slice_arrays(ins, batch_ids)
batch_outs = f(ins_batch)
if not isinstance(batch_outs, list):
batch_outs = [batch_outs]
if batch_index == 0:
# Pre-allocate the results arrays.
for batch_out in batch_outs:
shape = (num_samples,) + batch_out.shape[1:]
outs.append(np.zeros(shape, dtype=batch_out.dtype))
for i, batch_out in enumerate(batch_outs):
outs[i][batch_start:batch_end] = batch_out
if verbose == 1:
progbar.update(batch_end)
if len(outs) == 1:
return outs[0]
return outs
def _test_loop(self, f, ins, batch_size=None, verbose=0, steps=None):
"""Abstract method to loop over some data in batches.
Arguments:
f: Keras function returning a list of tensors.
ins: list of tensors to be fed to `f`.
batch_size: integer batch size or `None`.
verbose: verbosity mode.
steps: Total number of steps (batches of samples)
before declaring predictions finished.
Ignored with the default value of `None`.
Returns:
Scalar loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
"""
num_samples = self._check_num_samples(ins, batch_size, steps, 'steps')
outs = []
if verbose == 1:
if steps is not None:
progbar = Progbar(target=steps)
else:
progbar = Progbar(target=num_samples)
if steps is not None:
for step in range(steps):
batch_outs = f(ins)
if isinstance(batch_outs, list):
if step == 0:
for _ in enumerate(batch_outs):
outs.append(0.)
for i, batch_out in enumerate(batch_outs):
outs[i] += batch_out
else:
if step == 0:
outs.append(0.)
outs[0] += batch_outs
if verbose == 1:
progbar.update(step + 1)
for i in range(len(outs)):
outs[i] /= steps
else:
if verbose == 1:
progbar = Progbar(target=num_samples)
batches = _make_batches(num_samples, batch_size)
index_array = np.arange(num_samples)
for batch_index, (batch_start, batch_end) in enumerate(batches):
batch_ids = index_array[batch_start:batch_end]
if isinstance(ins[-1], float):
# Do not slice the training phase flag.
ins_batch = _slice_arrays(ins[:-1], batch_ids) + [ins[-1]]
else:
ins_batch = _slice_arrays(ins, batch_ids)
batch_outs = f(ins_batch)
if isinstance(batch_outs, list):
if batch_index == 0:
for batch_out in enumerate(batch_outs):
outs.append(0.)
for i, batch_out in enumerate(batch_outs):
outs[i] += batch_out * len(batch_ids)
else:
if batch_index == 0:
outs.append(0.)
outs[0] += batch_outs * len(batch_ids)
if verbose == 1:
progbar.update(batch_end)
for i in range(len(outs)):
outs[i] /= num_samples
if len(outs) == 1:
return outs[0]
return outs
def _standardize_user_data(self,
x,
y,
sample_weight=None,
class_weight=None,
check_batch_axis=True,
batch_size=None):
if not hasattr(self, 'optimizer'):
raise RuntimeError('You must compile a model before '
'training/testing. '
'Use `model.compile(optimizer, loss)`.')
output_shapes = []
for output_shape, loss_fn in zip(self._feed_output_shapes,
self._feed_loss_fns):
if loss_fn is losses.sparse_categorical_crossentropy:
output_shapes.append(output_shape[:-1] + (1,))
else:
output_shapes.append(output_shape)
x = _standardize_input_data(
x,
self._feed_input_names,
self._feed_input_shapes,
check_batch_axis=False,
exception_prefix='input')
y = _standardize_input_data(
y,
self._feed_output_names,
output_shapes,
check_batch_axis=False,
exception_prefix='target')
sample_weights = _standardize_sample_weights(sample_weight,
self._feed_output_names)
class_weights = _standardize_class_weights(class_weight,
self._feed_output_names)
sample_weights = [
_standardize_weights(ref, sw, cw, mode)
for (ref, sw, cw, mode) in zip(y, sample_weights, class_weights,
self._feed_sample_weight_modes)
]
_check_array_lengths(x, y, sample_weights)
_check_loss_and_target_compatibility(y, self._feed_loss_fns,
self._feed_output_shapes)
if self.stateful and batch_size:
if x[0].shape[0] % batch_size != 0:
raise ValueError('In a stateful network, '
'you should only pass inputs with '
'a number of samples that can be '
'divided by the batch size. Found: ' +
str(x[0].shape[0]) + ' samples')
return x, y, sample_weights
def _get_deduped_metrics_names(self):
out_labels = self.metrics_names
# Rename duplicated metrics name
# (can happen with an output layer shared among multiple dataflows).
deduped_out_labels = []
for i, label in enumerate(out_labels):
new_label = label
if out_labels.count(label) > 1:
dup_idx = out_labels[:i].count(label)
new_label += '_' + str(dup_idx + 1)
deduped_out_labels.append(new_label)
return deduped_out_labels
def fit(self,
x=None,
y=None,
batch_size=None,
epochs=1,
verbose=1,
callbacks=None,
validation_split=0.,
validation_data=None,
shuffle=True,
class_weight=None,
sample_weight=None,
initial_epoch=0,
steps_per_epoch=None,
validation_steps=None):
"""Trains the model for a fixed number of epochs (iterations on a dataset).
Arguments:
x: Numpy array of training data (if the model has a single input),
or list of Numpy arrays (if the model has multiple inputs).
If input layers in the model are named, you can also pass a
dictionary mapping input names to Numpy arrays.
`x` can be `None` (default) if feeding from
TensorFlow data tensors.
y: Numpy array of target (label) data
(if the model has a single output),
or list of Numpy arrays (if the model has multiple outputs).
If output layers in the model are named, you can also pass a
dictionary mapping output names to Numpy arrays.
`y` can be `None` (default) if feeding from
TensorFlow data tensors.
Can be `None` (default) if feeding from framework-native tensors.
batch_size: Integer or `None`.
Number of samples per gradient update.
If unspecified, it will default to 32.
epochs: Integer. Number of epochs to train the model.
An epoch is an iteration over the entire `x` and `y`
data provided.
Note that in conjunction with `initial_epoch`,
`epochs` is to be understood as "final epoch".
The model is not trained for a number of iterations
given by `epochs`, but merely until the epoch
of index `epochs` is reached.
verbose: 0, 1, or 2. Verbosity mode.
0 = silent, 1 = progress bar, 2 = one line per epoch.
callbacks: List of `keras.callbacks.Callback` instances.
List of callbacks to apply during training.
See [callbacks](/callbacks).
validation_split: Float between 0 and 1.
Fraction of the training data to be used as validation data.
The model will set apart this fraction of the training data,
will not train on it, and will evaluate
the loss and any model metrics
on this data at the end of each epoch.
The validation data is selected from the last samples
in the `x` and `y` data provided, before shuffling.
validation_data: tuple `(x_val, y_val)` or tuple
`(x_val, y_val, val_sample_weights)` on which to evaluate
the loss and any model metrics at the end of each epoch.
The model will not be trained on this data.
This will override `validation_split`.
shuffle: Boolean (whether to shuffle the training data
before each epoch) or str (for 'batch').
'batch' is a special option for dealing with the
limitations of HDF5 data; it shuffles in batch-sized chunks.
Has no effect when `steps_per_epoch` is not `None`.
class_weight: Optional dictionary mapping class indices (integers)
to a weight (float) value, used for weighting the loss function
(during training only).
This can be useful to tell the model to
"pay more attention" to samples from
an under-represented class.
sample_weight: Optional Numpy array of weights for
the training samples, used for weighting the loss function
(during training only). You can either pass a flat (1D)
Numpy array with the same length as the input samples
(1:1 mapping between weights and samples),
or in the case of temporal data,
you can pass a 2D array with shape
`(samples, sequence_length)`,
to apply a different weight to every timestep of every sample.
In this case you should make sure to specify
`sample_weight_mode="temporal"` in `compile()`.
initial_epoch: Epoch at which to start training
(useful for resuming a previous training run).
steps_per_epoch: Total number of steps (batches of samples)
before declaring one epoch finished and starting the
next epoch. When training with input tensors such as
TensorFlow data tensors, the default `None` is equal to
the number of unique samples in your dataset divided by
the batch size, or 1 if that cannot be determined.
validation_steps: Only relevant if `steps_per_epoch`
is specified. Total number of steps (batches of samples)
to validate before stopping.
Returns:
A `History` object. Its `History.history` attribute is
a record of training loss values and metrics values
at successive epochs, as well as validation loss values
and validation metrics values (if applicable).
Raises:
ValueError: In case of mismatch between the provided input data
and what the model expects.
"""
# Backwards compatibility
if batch_size is None and steps_per_epoch is None:
batch_size = 32
if x is None and y is None and steps_per_epoch is None:
raise ValueError('If fitting from data tensors, '
'you should specify the `steps_per_epoch` '
'argument.')
# Validate user data.
x, y, sample_weights = self._standardize_user_data(
x,
y,
sample_weight=sample_weight,
class_weight=class_weight,
check_batch_axis=False,
batch_size=batch_size)
# Prepare validation data.
do_validation = False
val_ins = []
if validation_data:
do_validation = True
if len(validation_data) == 2:
val_x, val_y = validation_data # pylint: disable=unpacking-non-sequence
val_sample_weight = None
elif len(validation_data) == 3:
val_x, val_y, val_sample_weight = validation_data # pylint: disable=unpacking-non-sequence
else:
raise ValueError(
'When passing validation_data, '
'it must contain 2 (x_val, y_val) '
'or 3 (x_val, y_val, val_sample_weights) '
'items, however it contains %d items' % len(validation_data))
val_x, val_y, val_sample_weights = self._standardize_user_data(
val_x,
val_y,
sample_weight=val_sample_weight,
check_batch_axis=False,
batch_size=batch_size)
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
val_ins = val_x + val_y + val_sample_weights + [0.]
else:
val_ins = val_x + val_y + val_sample_weights
elif validation_split and 0. < validation_split < 1.:
do_validation = True
if hasattr(x[0], 'shape'):
split_at = int(x[0].shape[0] * (1. - validation_split))
else:
split_at = int(len(x[0]) * (1. - validation_split))
x, val_x = (_slice_arrays(x, 0, split_at), _slice_arrays(x, split_at))
y, val_y = (_slice_arrays(y, 0, split_at), _slice_arrays(y, split_at))
sample_weights, val_sample_weights = (_slice_arrays(
sample_weights, 0, split_at), _slice_arrays(sample_weights, split_at))
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
val_ins = val_x + val_y + val_sample_weights + [0.]
else:
val_ins = val_x + val_y + val_sample_weights
elif validation_steps:
do_validation = True
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
val_ins = [0.]
# Prepare input arrays and training function.
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
ins = x + y + sample_weights + [1.]
else:
ins = x + y + sample_weights
self._make_train_function()
f = self.train_function
# Prepare display labels.
out_labels = self._get_deduped_metrics_names()
if do_validation:
self._make_test_function()
val_f = self.test_function
callback_metrics = copy.copy(out_labels) + [
'val_' + n for n in out_labels
]
else:
val_f = None
callback_metrics = copy.copy(out_labels)
# Delegate logic to `_fit_loop`.
return self._fit_loop(
f,
ins,
out_labels=out_labels,
batch_size=batch_size,
epochs=epochs,
verbose=verbose,
callbacks=callbacks,
val_f=val_f,
val_ins=val_ins,
shuffle=shuffle,
callback_metrics=callback_metrics,
initial_epoch=initial_epoch,
steps_per_epoch=steps_per_epoch,
validation_steps=validation_steps)
def evaluate(self,
x=None,
y=None,
batch_size=None,
verbose=1,
sample_weight=None,
steps=None):
"""Returns the loss value & metrics values for the model in test mode.
Computation is done in batches.
Arguments:
x: Numpy array of test data (if the model has a single input),
or list of Numpy arrays (if the model has multiple inputs).
If input layers in the model are named, you can also pass a
dictionary mapping input names to Numpy arrays.
`x` can be `None` (default) if feeding from
framework-native tensors (e.g. TensorFlow data tensors).
y: Numpy array of target (label) data
(if the model has a single output),
or list of Numpy arrays (if the model has multiple outputs).
If output layers in the model are named, you can also pass a
dictionary mapping output names to Numpy arrays.
`y` can be `None` (default) if feeding from
framework-native tensors (e.g. TensorFlow data tensors).
batch_size: Integer or `None`.
Number of samples per evaluation step.
If unspecified, `batch_size` will default to 32.
verbose: 0 or 1. Verbosity mode.
0 = silent, 1 = progress bar.
sample_weight: Optional Numpy array of weights for
the test samples, used for weighting the loss function.
You can either pass a flat (1D)
Numpy array with the same length as the input samples
(1:1 mapping between weights and samples),
or in the case of temporal data,
you can pass a 2D array with shape
`(samples, sequence_length)`,
to apply a different weight to every timestep of every sample.
In this case you should make sure to specify
`sample_weight_mode="temporal"` in `compile()`.
steps: Integer or `None`.
Total number of steps (batches of samples)
before declaring the evaluation round finished.
The default `None` is equal to the number of unique samples in
your dataset divided by the batch size.
Returns:
Scalar test loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
Raises:
ValueError: In case of invalid arguments.
"""
# Backwards compatibility.
if batch_size is None and steps is None:
batch_size = 32
if x is None and y is None and steps is None:
raise ValueError('If evaluating from data tensors, '
'you should specify the `steps` '
'argument.')
# Validate user data.
x, y, sample_weights = self._standardize_user_data(
x,
y,
sample_weight=sample_weight,
check_batch_axis=False,
batch_size=batch_size)
# Prepare inputs, delegate logic to `_test_loop`.
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
ins = x + y + sample_weights + [0.]
else:
ins = x + y + sample_weights
self._make_test_function()
f = self.test_function
return self._test_loop(
f, ins, batch_size=batch_size, verbose=verbose, steps=steps)
def predict(self, x, batch_size=None, verbose=0, steps=None):
"""Generates output predictions for the input samples.
Computation is done in batches.
Arguments:
x: The input data, as a Numpy array
(or list of Numpy arrays if the model has multiple outputs).
batch_size: Integer. If unspecified, it will default to 32.
verbose: Verbosity mode, 0 or 1.
steps: Total number of steps (batches of samples)
before declaring the prediction round finished.
Ignored with the default value of `None`.
Returns:
Numpy array(s) of predictions.
Raises:
ValueError: In case of mismatch between the provided
input data and the model's expectations,
or in case a stateful model receives a number of samples
that is not a multiple of the batch size.
"""
# Backwards compatibility.
if batch_size is None and steps is None:
batch_size = 32
if x is None and steps is None:
raise ValueError('If predicting from data tensors, '
'you should specify the `steps` '
'argument.')
# Validate user data.
x = _standardize_input_data(
x,
self._feed_input_names,
self._feed_input_shapes,
check_batch_axis=False)
if self.stateful:
if x[0].shape[0] > batch_size and x[0].shape[0] % batch_size != 0:
raise ValueError('In a stateful network, '
'you should only pass inputs with '
'a number of samples that can be '
'divided by the batch size. Found: ' +
str(x[0].shape[0]) + ' samples. '
'Batch size: ' + str(batch_size) + '.')
# Prepare inputs, delegate logic to `_predict_loop`.
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
ins = x + [0.]
else:
ins = x
self._make_predict_function()
f = self.predict_function
return self._predict_loop(
f, ins, batch_size=batch_size, verbose=verbose, steps=steps)
def train_on_batch(self, x, y, sample_weight=None, class_weight=None):
"""Runs a single gradient update on a single batch of data.
Arguments:
x: Numpy array of training data,
or list of Numpy arrays if the model has multiple inputs.
If all inputs in the model are named,
you can also pass a dictionary
mapping input names to Numpy arrays.
y: Numpy array of target data,
or list of Numpy arrays if the model has multiple outputs.
If all outputs in the model are named,
you can also pass a dictionary
mapping output names to Numpy arrays.
sample_weight: Optional array of the same length as x, containing
weights to apply to the model's loss for each sample.
In the case of temporal data, you can pass a 2D array
with shape (samples, sequence_length),
to apply a different weight to every timestep of every sample.
In this case you should make sure to specify
sample_weight_mode="temporal" in compile().
class_weight: Optional dictionary mapping
class indices (integers) to
a weight (float) to apply to the model's loss for the samples
from this class during training.
This can be useful to tell the model to "pay more attention" to
samples from an under-represented class.
Returns:
Scalar training loss
(if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
"""
x, y, sample_weights = self._standardize_user_data(
x,
y,
sample_weight=sample_weight,
class_weight=class_weight,
check_batch_axis=True)
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
ins = x + y + sample_weights + [1.]
else:
ins = x + y + sample_weights
self._make_train_function()
outputs = self.train_function(ins)
if len(outputs) == 1:
return outputs[0]
return outputs
def test_on_batch(self, x, y, sample_weight=None):
"""Test the model on a single batch of samples.
Arguments:
x: Numpy array of test data,
or list of Numpy arrays if the model has multiple inputs.
If all inputs in the model are named,
you can also pass a dictionary
mapping input names to Numpy arrays.
y: Numpy array of target data,
or list of Numpy arrays if the model has multiple outputs.
If all outputs in the model are named,
you can also pass a dictionary
mapping output names to Numpy arrays.
sample_weight: Optional array of the same length as x, containing
weights to apply to the model's loss for each sample.
In the case of temporal data, you can pass a 2D array
with shape (samples, sequence_length),
to apply a different weight to every timestep of every sample.
In this case you should make sure to specify
sample_weight_mode="temporal" in compile().
Returns:
Scalar test loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
"""
x, y, sample_weights = self._standardize_user_data(
x, y, sample_weight=sample_weight, check_batch_axis=True)
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
ins = x + y + sample_weights + [0.]
else:
ins = x + y + sample_weights
self._make_test_function()
outputs = self.test_function(ins)
if len(outputs) == 1:
return outputs[0]
return outputs
def predict_on_batch(self, x):
"""Returns predictions for a single batch of samples.
Arguments:
x: Input samples, as a Numpy array.
Returns:
Numpy array(s) of predictions.
"""
x = _standardize_input_data(x, self._feed_input_names,
self._feed_input_shapes)
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
ins = x + [0.]
else:
ins = x
self._make_predict_function()
outputs = self.predict_function(ins)
if len(outputs) == 1:
return outputs[0]
return outputs
def fit_generator(self,
generator,
steps_per_epoch,
epochs=1,
verbose=1,
callbacks=None,
validation_data=None,
validation_steps=None,
class_weight=None,
max_queue_size=10,
workers=1,
use_multiprocessing=False,
shuffle=True,
initial_epoch=0,
**kwargs):
"""Fits the model on data yielded batch-by-batch by a Python generator.
The generator is run in parallel to the model, for efficiency.
For instance, this allows you to do real-time data augmentation
on images on CPU in parallel to training your model on GPU.
The use of `keras.utils.Sequence` guarantees the ordering
and guarantees the single use of every input per epoch when
using `use_multiprocessing=True`.
Arguments:
generator: A generator or an instance of Sequence (keras.utils.Sequence)
object in order to avoid duplicate data when using multiprocessing.
The output of the generator must be either
- a tuple (inputs, targets)
- a tuple (inputs, targets, sample_weights).
All arrays should contain the same number of samples.
The generator is expected to loop over its data
indefinitely. An epoch finishes when `steps_per_epoch`
batches have been seen by the model.
steps_per_epoch: Total number of steps (batches of samples)
to yield from `generator` before declaring one epoch
finished and starting the next epoch. It should typically
be equal to the number of unique samples of your dataset
divided by the batch size. Not used if using `Sequence`.
epochs: Integer, total number of iterations on the data.
verbose: Verbosity mode, 0, 1, or 2.
callbacks: List of callbacks to be called during training.
validation_data: This can be either
- a generator for the validation data
- a tuple (inputs, targets)
- a tuple (inputs, targets, sample_weights).
validation_steps: Only relevant if `validation_data`
is a generator. Total number of steps (batches of samples)
to yield from `generator` before stopping.
class_weight: Dictionary mapping class indices to a weight
for the class.
max_queue_size: Maximum size for the generator queue
workers: Maximum number of processes to spin up
when using process-based threading.
use_multiprocessing: If True, use process based threading.
Note that because
this implementation relies on multiprocessing,
you should not pass
non picklable arguments to the generator
as they can't be passed
easily to children processes.
shuffle: Whether to shuffle the data at the beginning of each
epoch. Only used with instances of `Sequence`
(`keras.utils.Sequence`).
initial_epoch: Epoch at which to start training
(useful for resuming a previous training run)
**kwargs: support for legacy arguments.
Returns:
A `History` object.
Example:
```python
def generate_arrays_from_file(path):
while 1:
f = open(path)
for line in f:
# create numpy arrays of input data
# and labels, from each line in the file
x1, x2, y = process_line(line)
yield ({'input_1': x1, 'input_2': x2}, {'output': y})
f.close()
model.fit_generator(generate_arrays_from_file('/my_file.txt'),
steps_per_epoch=10000, epochs=10)
```
Raises:
ValueError: In case the generator yields
data in an invalid format.
"""
# Legacy support
if 'max_q_size' in kwargs:
max_queue_size = kwargs.pop('max_q_size')
logging.warning('The argument `max_q_size` has been renamed '
'`max_queue_size`. Update your method calls accordingly.')
if 'pickle_safe' in kwargs:
use_multiprocessing = kwargs.pop('pickle_safe')
logging.warning('The argument `pickle_safe` has been renamed '
'`use_multiprocessing`. '
'Update your method calls accordingly.')
if kwargs:
raise ValueError('Unrecognized keyword arguments: ' + str(kwargs))
wait_time = 0.01 # in seconds
epoch = initial_epoch
do_validation = bool(validation_data)
self._make_train_function()
if do_validation:
self._make_test_function()
# python 2 has 'next', 3 has '__next__'
# avoid any explicit version checks
val_gen = (hasattr(validation_data, 'next') or
hasattr(validation_data, '__next__') or
isinstance(validation_data, Sequence))
if val_gen and not validation_steps:
raise ValueError('When using a generator for validation data, '
'you must specify a value for '
'`validation_steps`.')
# Prepare display labels.
out_labels = self._get_deduped_metrics_names()
callback_metrics = out_labels + ['val_' + n for n in out_labels]
# prepare callbacks
self.history = cbks.History()
callbacks = [cbks.BaseLogger()] + (callbacks or []) + [self.history]
if verbose:
callbacks += [cbks.ProgbarLogger(count_mode='steps')]
callbacks = cbks.CallbackList(callbacks)
# it's possible to callback a different model than self:
if hasattr(self, 'callback_model') and self.callback_model:
callback_model = self.callback_model
else:
callback_model = self
callbacks.set_model(callback_model)
callbacks.set_params({
'epochs': epochs,
'steps': steps_per_epoch,
'verbose': verbose,
'do_validation': do_validation,
'metrics': callback_metrics,
})
callbacks.on_train_begin()
if do_validation and not val_gen:
if len(validation_data) == 2:
val_x, val_y = validation_data # pylint: disable=unpacking-non-sequence
val_sample_weight = None
elif len(validation_data) == 3:
val_x, val_y, val_sample_weight = validation_data # pylint: disable=unpacking-non-sequence
else:
raise ValueError('`validation_data` should be a tuple '
'`(val_x, val_y, val_sample_weight)` '
'or `(val_x, val_y)`. Found: ' + str(validation_data))
val_x, val_y, val_sample_weights = self._standardize_user_data(
val_x, val_y, val_sample_weight)
val_data = val_x + val_y + val_sample_weights
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
val_data += [0.]
for cbk in callbacks:
cbk.validation_data = val_data
is_sequence = isinstance(generator, Sequence)
if not is_sequence and use_multiprocessing and workers > 1:
logging.warning(
logging.warning('Using a generator with `use_multiprocessing=True`'
' and multiple workers may duplicate your data.'
' Please consider using the`keras.utils.Sequence'
' class.'))
if is_sequence:
steps_per_epoch = len(generator)
enqueuer = None
try:
if is_sequence:
enqueuer = OrderedEnqueuer(
generator, use_multiprocessing=use_multiprocessing, shuffle=shuffle)
else:
enqueuer = GeneratorEnqueuer(
generator,
use_multiprocessing=use_multiprocessing,
wait_time=wait_time)
enqueuer.start(workers=workers, max_queue_size=max_queue_size)
output_generator = enqueuer.get()
callback_model.stop_training = False
while epoch < epochs:
callbacks.on_epoch_begin(epoch)
steps_done = 0
batch_index = 0
while steps_done < steps_per_epoch:
generator_output = next(output_generator)
if not hasattr(generator_output, '__len__'):
raise ValueError('Output of generator should be '
'a tuple `(x, y, sample_weight)` '
'or `(x, y)`. Found: ' + str(generator_output))
if len(generator_output) == 2:
x, y = generator_output
sample_weight = None
elif len(generator_output) == 3:
x, y, sample_weight = generator_output
else:
raise ValueError('Output of generator should be '
'a tuple `(x, y, sample_weight)` '
'or `(x, y)`. Found: ' + str(generator_output))
# build batch logs
batch_logs = {}
if isinstance(x, list):
batch_size = x[0].shape[0]
elif isinstance(x, dict):
batch_size = list(x.values())[0].shape[0]
else:
batch_size = x.shape[0]
batch_logs['batch'] = batch_index
batch_logs['size'] = batch_size
callbacks.on_batch_begin(batch_index, batch_logs)
outs = self.train_on_batch(
x, y, sample_weight=sample_weight, class_weight=class_weight)
if not isinstance(outs, list):
outs = [outs]
for l, o in zip(out_labels, outs):
batch_logs[l] = o
callbacks.on_batch_end(batch_index, batch_logs)
# Construct epoch logs.
epoch_logs = {}
batch_index += 1
steps_done += 1
# Epoch finished.
if steps_done >= steps_per_epoch and do_validation:
if val_gen:
val_outs = self.evaluate_generator(
validation_data,
validation_steps,
max_queue_size=max_queue_size,
workers=workers,
use_multiprocessing=use_multiprocessing)
else:
# No need for try/except because
# data has already been validated.
val_outs = self.evaluate(
val_x,
val_y,
batch_size=batch_size,
sample_weight=val_sample_weights,
verbose=0)
if not isinstance(val_outs, list):
val_outs = [val_outs]
# Same labels assumed.
for l, o in zip(out_labels, val_outs):
epoch_logs['val_' + l] = o
if callback_model.stop_training:
break
callbacks.on_epoch_end(epoch, epoch_logs)
epoch += 1
if callback_model.stop_training:
break
finally:
if enqueuer is not None:
enqueuer.stop()
callbacks.on_train_end()
return self.history
def evaluate_generator(self,
generator,
steps,
max_queue_size=10,
workers=1,
use_multiprocessing=False,
**kwargs):
"""Evaluates the model on a data generator.
The generator should return the same kind of data
as accepted by `test_on_batch`.
Arguments:
generator: Generator yielding tuples (inputs, targets)
or (inputs, targets, sample_weights)
or an instance of Sequence (keras.utils.Sequence)
object in order to avoid duplicate data
when using multiprocessing.
steps: Total number of steps (batches of samples)
to yield from `generator` before stopping.
Not used if using `Sequence`.
max_queue_size: maximum size for the generator queue
workers: maximum number of processes to spin up
when using process-based threading.
use_multiprocessing: if True, use process based threading.
Note that because
this implementation relies on multiprocessing,
you should not pass
non picklable arguments to the generator
as they can't be passed
easily to children processes.
**kwargs: support for legacy arguments.
Returns:
Scalar test loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
Raises:
ValueError: In case the generator yields
data in an invalid format.
"""
# Legacy support
if 'max_q_size' in kwargs:
max_queue_size = kwargs.pop('max_q_size')
logging.warning('The argument `max_q_size` has been renamed '
'`max_queue_size`. Update your method calls accordingly.')
if 'pickle_safe' in kwargs:
use_multiprocessing = kwargs.pop('pickle_safe')
logging.warning('The argument `pickle_safe` has been renamed '
'`use_multiprocessing`. '
'Update your method calls accordingly.')
if kwargs:
raise ValueError('Unrecognized keyword arguments: ' + str(kwargs))
self._make_test_function()
steps_done = 0
wait_time = 0.01
all_outs = []
batch_sizes = []
is_sequence = isinstance(generator, Sequence)
if not is_sequence and use_multiprocessing and workers > 1:
logging.warning(
logging.warning('Using a generator with `use_multiprocessing=True`'
' and multiple workers may duplicate your data.'
' Please consider using the`keras.utils.Sequence'
' class.'))
if is_sequence:
steps = len(generator)
enqueuer = None
try:
if is_sequence:
enqueuer = OrderedEnqueuer(
generator, use_multiprocessing=use_multiprocessing)
else:
enqueuer = GeneratorEnqueuer(
generator,
use_multiprocessing=use_multiprocessing,
wait_time=wait_time)
enqueuer.start(workers=workers, max_queue_size=max_queue_size)
output_generator = enqueuer.get()
while steps_done < steps:
generator_output = next(output_generator)
if not hasattr(generator_output, '__len__'):
raise ValueError('Output of generator should be a tuple '
'(x, y, sample_weight) '
'or (x, y). Found: ' + str(generator_output))
if len(generator_output) == 2:
x, y = generator_output
sample_weight = None
elif len(generator_output) == 3:
x, y, sample_weight = generator_output
else:
raise ValueError('Output of generator should be a tuple '
'(x, y, sample_weight) '
'or (x, y). Found: ' + str(generator_output))
outs = self.test_on_batch(x, y, sample_weight=sample_weight)
if isinstance(x, list):
batch_size = len(x[0])
elif isinstance(x, dict):
batch_size = len(list(x.values())[0])
else:
batch_size = len(x)
if batch_size == 0:
raise ValueError('Received an empty batch. '
'Batches should at least contain one item.')
all_outs.append(outs)
steps_done += 1
batch_sizes.append(batch_size)
finally:
if enqueuer is not None:
enqueuer.stop()
if not isinstance(outs, list):
return np.average(np.asarray(all_outs), weights=batch_sizes)
else:
averages = []
for i in range(len(outs)):
averages.append(
np.average([out[i] for out in all_outs], weights=batch_sizes))
return averages
def predict_generator(self,
generator,
steps,
max_queue_size=10,
workers=1,
use_multiprocessing=False,
verbose=0,
**kwargs):
"""Generates predictions for the input samples from a data generator.
The generator should return the same kind of data as accepted by
`predict_on_batch`.
Arguments:
generator: Generator yielding batches of input samples
or an instance of Sequence (keras.utils.Sequence)
object in order to avoid duplicate data
when using multiprocessing.
steps: Total number of steps (batches of samples)
to yield from `generator` before stopping.
max_queue_size: Maximum size for the generator queue.
Not used if using `Sequence`.
workers: Maximum number of processes to spin up
when using process-based threading.
use_multiprocessing: If `True`, use process based threading.
Note that because
this implementation relies on multiprocessing,
you should not pass
non picklable arguments to the generator
as they can't be passed
easily to children processes.
verbose: verbosity mode, 0 or 1.
**kwargs: support for legacy arguments.
Returns:
Numpy array(s) of predictions.
Raises:
ValueError: In case the generator yields
data in an invalid format.
"""
# Legacy support
if 'max_q_size' in kwargs:
max_queue_size = kwargs.pop('max_q_size')
logging.warning('The argument `max_q_size` has been renamed '
'`max_queue_size`. Update your method calls accordingly.')
if 'pickle_safe' in kwargs:
use_multiprocessing = kwargs.pop('pickle_safe')
logging.warning('The argument `pickle_safe` has been renamed '
'`use_multiprocessing`. '
'Update your method calls accordingly.')
self._make_predict_function()
steps_done = 0
wait_time = 0.01
all_outs = []
is_sequence = isinstance(generator, Sequence)
if not is_sequence and use_multiprocessing and workers > 1:
logging.warning(
logging.warning('Using a generator with `use_multiprocessing=True`'
' and multiple workers may duplicate your data.'
' Please consider using the`keras.utils.Sequence'
' class.'))
if is_sequence:
steps = len(generator)
enqueuer = None
try:
if is_sequence:
enqueuer = OrderedEnqueuer(
generator, use_multiprocessing=use_multiprocessing)
else:
enqueuer = GeneratorEnqueuer(
generator,
use_multiprocessing=use_multiprocessing,
wait_time=wait_time)
enqueuer.start(workers=workers, max_queue_size=max_queue_size)
output_generator = enqueuer.get()
if verbose == 1:
progbar = Progbar(target=steps)
while steps_done < steps:
generator_output = next(output_generator)
if isinstance(generator_output, tuple):
# Compatibility with the generators
# used for training.
if len(generator_output) == 2:
x, _ = generator_output
elif len(generator_output) == 3:
x, _, _ = generator_output
else:
raise ValueError('Output of generator should be '
'a tuple `(x, y, sample_weight)` '
'or `(x, y)`. Found: ' + str(generator_output))
else:
# Assumes a generator that only
# yields inputs (not targets and sample weights).
x = generator_output
outs = self.predict_on_batch(x)
if not isinstance(outs, list):
outs = [outs]
if not all_outs:
for out in outs:
all_outs.append([])
for i, out in enumerate(outs):
all_outs[i].append(out)
steps_done += 1
if verbose == 1:
progbar.update(steps_done)
finally:
if enqueuer is not None:
enqueuer.stop()
if len(all_outs) == 1:
if steps_done == 1:
return all_outs[0][0]
else:
return np.concatenate(all_outs[0])
if steps_done == 1:
return [out for out in all_outs]
else:
return [np.concatenate(out) for out in all_outs]
|
apache-2.0
|
blublud/networkx
|
networkx/algorithms/kernels.py
|
1
|
5277
|
import networkx as nx
import pandas as pd
import numpy as np
from scipy.sparse import diags, csr_matrix
'''
Calculate the various graph kernels for a graph
'''
__all__=['matrix_power_kernel','matrix_power_kernel_graph']
def matrix_power_kernel_graph(g,method='von_neumann',weight=None,nodes_from=[],nodes_to=None,**kwargs):
n2idx = {n:i for i,n in enumerate(g.nodes())}
idx_from = [n2idx[n] for n in nodes_from]
idx_to = [n2idx[n] for n in nodes_to] if nodes_to else None
return matrix_power_kernel(nx.adjacency_matrix(g,weight=weight),method,idx_from,idx_to,**kwargs)
def matrix_power_kernel(A, method='von_neumann', idx_from=[], idx_to=None, **kwargs):
U = None
if method in ['laplace', 'exp_laplace','root_page_rank']:
import scipy.sparse
diag = np.array(A.sum(axis=1).flatten().tolist()[0])
D = diags(diag,0)
with np.errstate(divide='ignore'):
inv_diag = 1 /diag
inv_diag[inv_diag == np.inf] = 0
D_inv = diags(inv_diag, 0)
L = D - A
if method == 'von_neumann':
U = __von_neumann__(A,idx_from,idx_to, **kwargs)
elif method == 'exponential':
U = __exponential__(A,idx_from,idx_to, **kwargs)
elif method == 'laplace':
U = __laplace__(L,idx_from,idx_to, **kwargs)
elif method == 'exp_laplace':
U = __exp_laplace__(L,idx_from,idx_to, **kwargs)
elif method == 'root_page_rank':
T = D_inv*L
U = __root_page_rank(T, idx_from, idx_to, **kwargs)
else:
raise Exception('Unknown method')
return U.T
def __von_neumann__(A, idx_from, idx_to, path_length=12, alpha = 1.0e-3):
'''
A: adjacency_matrix
idx_from,idx_to: lists of indices
'''
n_col = len(idx_from)
data_I = np.ones(n_col)
row_I = idx_from
col_I = np.arange(n_col)
U_I = csr_matrix((data_I,(row_I,col_I)), shape=(A.shape[0],n_col))
U = U_I
for i in range(1, path_length+1):
U = A.dot(U)*alpha + U_I
if idx_to:
U = U[idx_to,:]
return U
def __exponential__(A,idx_from,idx_to, path_length=12,alpha = 1.0e-3):
'''
A: adjacency_matrix
idx_from,idx_to: lists of indices
'''
n_col = len(idx_from)
data_I = np.ones(n_col)
row_I = idx_from
col_I = np.arange(n_col)
U_I = csr_matrix((data_I,(row_I,col_I)), shape=(A.shape[0],n_col))
U = U_I
for i in range(path_length,0,-1):
U = A.dot(U)*alpha/i + U_I
if idx_to:
U = U[idx_to,:]
return U
def __laplace__(L,idx_from, idx_to, path_length=12, alpha=1.0e-3):
L = -L
n_col = len(idx_from)
data_I = np.ones(n_col)
row_I = idx_from
col_I = np.arange(n_col)
U_I = csr_matrix((data_I,(row_I,col_I)), shape=(L.shape[0],n_col))
U = U_I
for i in range(path_length,0,-1):
U = L.dot(U)*alpha/i + U_I
if idx_to:
U = U[idx_to,:]
return U
def __exp_laplace__(L,idx_from, idx_to, path_length=12, alpha=1.0e-3):
#L = -L
n_col = len(idx_from)
data_I = np.ones(n_col)
row_I = idx_from
col_I = np.arange(n_col)
U_I = csr_matrix((data_I,(row_I,col_I)), shape=(L.shape[0],n_col))
U = U_I
for i in range(path_length,0,-1):
U = L.dot(U)*alpha/i + U_I
if idx_to:
U = U[idx_to,:]
return U
def __root_page_rank(T, idx_from, idx_to, path_length=12, alpha=1.0e-3):
U = T[:, idx_from]
for i in range(path_length):
U = U + (T.dot(U))*alpha
if idx_to:
U = U[idx_to,:]
U = U*(1 - alpha)
return U
class MatrixPowerKernel:
def __init__(self,kernel='von_neumann',lmax=5,even_step=False,**kwargs):
if kernel not in ['von_neumann','exp_diffusion']:
raise Exception("Unimplemented kernel:",kernel)
self.kernel = kernel
self.lmax = lmax
self.even_step = even_step
for k in kwargs:
self.__setattr__(k,kwargs[k])
def fit(self,A,idx_from=None,idx_to=None):
'''
Compute matrix power-based graph kernel.
Params:
A: adjacency_matrix
idx_from: If None ==> Compute centrality
'''
M = self.__affinity_matrix__(A)
if idx_from:
n_col = len(idx_from)
data_I = np.ones(n_col)
row_I = idx_from
col_I = np.arange(n_col)
I_Src2All = csr_matrix((data_I,(row_I,col_I)), shape=(M.shape[0],n_col))
Src2All = I_Src2All.todense()
else:
I_Src2All = np.ones(M.shape[0])
Src2All = I_Src2All
for path_length in range(self.lmax):
damping_arg = self.__damping_arg__(path_length)
if self.even_step:
Src2All = damping_arg*M.dot(M.dot(Src2All)) + I_Src2All
else:
Src2All = damping_arg*M.dot(Src2All) + I_Src2All
if idx_from and idx_to:
Src2All = Src2All[idx_to,:]
elif idx_to:#centrality Src2All is just a vector
Src2All = Src2All[idx_to]
return np.asarray(Src2All)
def fit_with_g(self,g,src=None,dst=None,weight=None):
A = nx.adjacency_matrix(g,weight=weight)
n2idx = {n:i for i,n in enumerate(g.nodes())}
idx_from = [n2idx[n] for n in src] if src else None
idx_to = [n2idx[n] for n in dst] if dst else None
return self.fit(A,idx_from,idx_to)
def __damping_arg__(self, path_length):
if self.kernel == 'von_neumann':
return self.alpha
elif self.kernel == 'exp_diffusion':
return self.apha/path_length
def __affinity_matrix__(self,A):
if self.kernel == 'von_neumann':
return A
elif self.kernel == 'exp_diffusion':
return A
@classmethod
def __I_Src2All__(M,idx_from):
n_col = len(idx_from)
data_I = np.ones(n_col)
row_I = idx_from
col_I = np.arange(n_col)
return csr_matrix((data_I,(row_I,col_I)), shape=(M.shape[0],n_col))
|
bsd-3-clause
|
imaculate/scikit-learn
|
examples/decomposition/plot_pca_vs_lda.py
|
176
|
2027
|
"""
=======================================================
Comparison of LDA and PCA 2D projection of Iris dataset
=======================================================
The Iris dataset represents 3 kind of Iris flowers (Setosa, Versicolour
and Virginica) with 4 attributes: sepal length, sepal width, petal length
and petal width.
Principal Component Analysis (PCA) applied to this data identifies the
combination of attributes (principal components, or directions in the
feature space) that account for the most variance in the data. Here we
plot the different samples on the 2 first principal components.
Linear Discriminant Analysis (LDA) tries to identify attributes that
account for the most variance *between classes*. In particular,
LDA, in contrast to PCA, is a supervised method, using known class labels.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.decomposition import PCA
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
iris = datasets.load_iris()
X = iris.data
y = iris.target
target_names = iris.target_names
pca = PCA(n_components=2)
X_r = pca.fit(X).transform(X)
lda = LinearDiscriminantAnalysis(n_components=2)
X_r2 = lda.fit(X, y).transform(X)
# Percentage of variance explained for each components
print('explained variance ratio (first two components): %s'
% str(pca.explained_variance_ratio_))
plt.figure()
colors = ['navy', 'turquoise', 'darkorange']
lw = 2
for color, i, target_name in zip(colors, [0, 1, 2], target_names):
plt.scatter(X_r[y == i, 0], X_r[y == i, 1], color=color, alpha=.8, lw=lw,
label=target_name)
plt.legend(loc='best', shadow=False, scatterpoints=1)
plt.title('PCA of IRIS dataset')
plt.figure()
for color, i, target_name in zip(colors, [0, 1, 2], target_names):
plt.scatter(X_r2[y == i, 0], X_r2[y == i, 1], alpha=.8, color=color,
label=target_name)
plt.legend(loc='best', shadow=False, scatterpoints=1)
plt.title('LDA of IRIS dataset')
plt.show()
|
bsd-3-clause
|
pytroll/pyresample
|
docs/source/conf.py
|
1
|
8291
|
# -*- coding: utf-8 -*-
#
# pyresample documentation build configuration file, created by
# sphinx-quickstart on Tue Jan 5 13:01:32 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
class Mock(object):
def __init__(self, *args, **kwargs):
pass
def __call__(self, *args, **kwargs):
return Mock()
@classmethod
def __getattr__(cls, name):
if name in ('__file__', '__path__'):
return '/dev/null'
elif name[0] == name[0].upper():
mockType = type(name, (), {})
mockType.__module__ = __name__
return mockType
elif name == "inf":
return 0
else:
return Mock()
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../../'))
from pyresample import __version__ # noqa
# -- General configuration -----------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.doctest', 'sphinx.ext.autodoc', 'sphinx.ext.napoleon', 'sphinx.ext.intersphinx']
# DocTest Settings
# don't run regular >>> code blocks
doctest_test_doctest_blocks = ''
# setup imports so we can skip certain doctests
doctest_global_setup = '''
try:
import matplotlib.pyplot as plt
except ImportError:
plt = None
try:
import cartopy
except ImportError:
cartopy = None
try:
from mpl_toolkits.basemap import Basemap
except ImportError:
Basemap = None
'''
# Napoleon Settings (to support numpy style docs)
napoleon_numpy_docstring = True
napoleon_use_admonition_for_examples = True
napoleon_use_admonition_for_notes = True
napoleon_use_admonition_for_references = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pyresample'
copyright = u'2013, Esben S. Nielsen'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
version = __version__.split('+')[0]
# The full version, including alpha/beta/rc tags.
release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'pyresampledoc'
# -- Options for LaTeX output --------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'pyresample.tex', u'pyresample Documentation',
u'Esben S. Nielsen', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# Additional stuff for the LaTeX preamble.
# latex_preamble = ''
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_use_modindex = True
# Intersphinx extention
intersphinx_mapping = {
'python': ('https://docs.python.org/3', None),
'numpy': ('https://docs.scipy.org/doc/numpy', None),
'scipy': ('https://docs.scipy.org/doc/scipy/reference', None),
'xarray': ('https://xarray.pydata.org/en/stable', None),
'dask': ('https://docs.dask.org/en/latest', None),
'pandas': ('https://pandas.pydata.org/docs', None),
'pyresample': ('https://pyresample.readthedocs.io/en/stable', None),
'trollsift': ('https://trollsift.readthedocs.io/en/stable', None),
'trollimage': ('https://trollimage.readthedocs.io/en/stable', None),
'pyproj': ('https://pyproj4.github.io/pyproj/dev/', None),
'proj4': ('https://proj.org', None),
}
|
lgpl-3.0
|
PF2-pasteur-fr/seqan
|
apps/ngs_roi/tool_shed/roi_details.py
|
18
|
3825
|
#!/usr/bin/env python
"""Generation of detailed ROI reports with larger plots.
This report generation works for hundred of ROIs.
"""
try:
import argparse
except ImportError:
import argparse26 as argparse
import math
import os.path
import sys
import Cheetah.Template
import matplotlib.pyplot as plt
import ngs_roi.app
import ngs_roi.argparse
import ngs_roi.io
PAGE_TPL = """
<html>
<head>
<title>ROI Table</title>
<style type="text/css">
div.plot
{
float: left;
padding: 4px;
margin: 2px;
width: 420px;
}
.plot h2 { margin-top: 3px; margin-bottom: 3px; text-align: center; }
.plot img { display: block; margin: 0 auto; }
</style>
</head>
<body>
<h1>Detailed ROI Report</h1>
#for i, roi in enumerate($records)
<div class="plot">
<h2>${roi.ref}:${roi.start_pos + 1}-${roi.end_pos+1}</h2>
<a href="${href($roi)}" target="dead"><img src="plot_${i}.png" /></a>
<p>
<b>chr:start-end</b> <a href="${href($roi)}" target="dead">${roi.ref}:${roi.start_pos}-${roi.end_pos} ${roi.strand}</a>;
<b>region name</b> ${roi.region_name};
<b>region length</b> ${roi.region_length};
</p>
#if $roi.data
<p>#for j, key in enumerate($data_keys)#<b>$key:</b> ${roi.data[$j]}; #end for#</p>
#end if
</div>
#end for
<iframe name="dead" height="0" width="0"></iframe>
<div><code>$args</code></div>
</body>
</html>
"""
class DetailedRoiGenerator(ngs_roi.app.App):
"""Generate detailed ROI report.
:ivar args:Arguments from the comment line.
"""
def __init__(self, args):
self.args = args
def run(self):
"""Run report generation, return status code.
:return: integer with the result.
"""
print >>sys.stderr, 'Loading ROI'
records = ngs_roi.io.load(self.args.in_file, self.args.max_rois)
keys = records[0].data_keys
self.writeHtml(keys, records)
self.writePlots(records)
return 0
def writePlots(self, records):
COLOR = 'blue'
LINE_WIDTH = .5
LINE_STYLE = '-'
TICK_FONT_SIZE = 8
LABEL_FONT_SIZE = 10
for i, roi in enumerate(records):
file_name = 'plot_%d.png' % i
file_name = os.path.join(self.args.out_dir, file_name)
print >>sys.stderr, 'Writing plot %s' % file_name
plt.figure(figsize=(4, 2.5))
plt.gcf().subplots_adjust(bottom=0.16, left=0.15)
plt.plot(roi.points, color=COLOR, linewidth=LINE_WIDTH, linestyle=LINE_STYLE)
plt.ylim(ymin=0)
if self.args.max_value:
plt.ylim(ymax=self.args.max_value)
plt.tick_params(labelsize=TICK_FONT_SIZE)
plt.ylabel('coverage', fontsize=LABEL_FONT_SIZE, weight='semibold')
plt.xlabel('ROI beginPos', fontsize=LABEL_FONT_SIZE, weight='semibold')
plt.savefig(file_name)
def writeHtml(self, keys, records):
file_name = self.args.out_file
print >>sys.stderr, 'Writing HTML file %s' % file_name
vals = {'args': self.args, 'records': records, 'data_keys': keys,
'href': lambda x: self.buildHref(x.ref, x.start_pos, x.end_pos)}
t = Cheetah.Template.Template(PAGE_TPL, searchList=vals)
with open(file_name, 'wb') as f:
f.write(str(t))
def main():
parser = argparse.ArgumentParser(description='Plot ROI file.')
ngs_roi.argparse.addFileArguments(parser)
ngs_roi.argparse.addPlotGridArguments(parser)
ngs_roi.argparse.addLinkArguments(parser)
args = parser.parse_args()
ngs_roi.argparse.applyFileDefaults(args)
app = DetailedRoiGenerator(args)
return app.run()
if __name__ == '__main__':
sys.exit(main())
|
bsd-3-clause
|
keflavich/spectral-cube
|
spectral_cube/spectral_cube.py
|
2
|
165227
|
"""
A class to represent a 3-d position-position-velocity spectral cube.
"""
from __future__ import print_function, absolute_import, division
import warnings
from functools import wraps
import operator
import re
import itertools
import copy
import tempfile
import textwrap
from pathlib import PosixPath
import six
from six.moves import zip, range
import dask.array as da
import astropy.wcs
from astropy import units as u
from astropy.io.fits import PrimaryHDU, BinTableHDU, Header, Card, HDUList
from astropy.utils.console import ProgressBar
from astropy import log
from astropy import wcs
from astropy import convolution
from astropy import stats
from astropy.constants import si
from astropy.io.registry import UnifiedReadWriteMethod
import numpy as np
from radio_beam import Beam, Beams
from . import cube_utils
from . import wcs_utils
from . import spectral_axis
from .masks import (LazyMask, LazyComparisonMask, BooleanArrayMask, MaskBase,
is_broadcastable_and_smaller)
from .ytcube import ytCube
from .lower_dimensional_structures import (Projection, Slice, OneDSpectrum,
LowerDimensionalObject,
VaryingResolutionOneDSpectrum
)
from .base_class import (BaseNDClass, SpectralAxisMixinClass,
DOPPLER_CONVENTIONS, SpatialCoordMixinClass,
MaskableArrayMixinClass, MultiBeamMixinClass,
HeaderMixinClass, BeamMixinClass,
)
from .utils import (cached, warn_slow, VarianceWarning, BeamWarning,
UnsupportedIterationStrategyWarning, WCSMismatchWarning,
NotImplementedWarning, SliceWarning, SmoothingWarning,
StokesWarning, ExperimentalImplementationWarning,
BeamAverageWarning, NonFiniteBeamsWarning, BeamWarning,
WCSCelestialError)
from .spectral_axis import (determine_vconv_from_ctype, get_rest_value_from_wcs,
doppler_beta, doppler_gamma, doppler_z)
from .io.core import SpectralCubeRead, SpectralCubeWrite
from distutils.version import LooseVersion
__all__ = ['BaseSpectralCube', 'SpectralCube', 'VaryingResolutionSpectralCube']
# apply_everywhere, world: do not have a valid cube to test on
__doctest_skip__ = ['BaseSpectralCube._apply_everywhere']
try:
from scipy import ndimage
scipyOK = True
except ImportError:
scipyOK = False
warnings.filterwarnings('ignore', category=wcs.FITSFixedWarning, append=True)
SIGMA2FWHM = 2. * np.sqrt(2. * np.log(2.))
# convenience structures to keep track of the reversed index
# conventions between WCS and numpy
np2wcs = {2: 0, 1: 1, 0: 2}
_NP_DOC = """
Ignores excluded mask elements.
Parameters
----------
axis : int (optional)
The axis to collapse, or None to perform a global aggregation
how : cube | slice | ray | auto
How to compute the aggregation. All strategies give the same
result, but certain strategies are more efficient depending
on data size and layout. Cube/slice/ray iterate over
decreasing subsets of the data, to conserve memory.
Default='auto'
""".replace('\n', '\n ')
def aggregation_docstring(func):
@wraps(func)
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
wrapper.__doc__ += _NP_DOC
return wrapper
_PARALLEL_DOC = """
Other Parameters
----------------
parallel : bool
Use joblib to parallelize the operation.
If set to ``False``, will force the use of a single core without
using ``joblib``.
num_cores : int or None
The number of cores to use when applying this function in parallel
across the cube.
use_memmap : bool
If specified, a memory mapped temporary file on disk will be
written to rather than storing the intermediate spectra in memory.
"""
def parallel_docstring(func):
@wraps(func)
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
line1 = wrapper.__doc__.split("\n")[1]
indentation = " "*(len(line1) - len(line1.lstrip()))
try:
wrapper.__doc__ += textwrap.indent(_PARALLEL_DOC, indentation)
except AttributeError:
# python2.7
wrapper.__doc__ = textwrap.dedent(wrapper.__doc__) + _PARALLEL_DOC
return wrapper
def _apply_spectral_function(arguments, outcube, function, **kwargs):
"""
Helper function to apply a function to a spectrum.
Needs to be declared toward the top of the code to allow pickling by
joblib.
"""
(spec, includemask, ii, jj) = arguments
if np.any(includemask):
outcube[:,jj,ii] = function(spec, **kwargs)
else:
outcube[:,jj,ii] = spec
def _apply_spatial_function(arguments, outcube, function, **kwargs):
"""
Helper function to apply a function to an image.
Needs to be declared toward the top of the code to allow pickling by
joblib.
"""
(img, includemask, ii) = arguments
if np.any(includemask):
outcube[ii, :, :] = function(img, **kwargs)
else:
outcube[ii, :, :] = img
class BaseSpectralCube(BaseNDClass, MaskableArrayMixinClass,
SpectralAxisMixinClass, SpatialCoordMixinClass,
HeaderMixinClass):
def __init__(self, data, wcs, mask=None, meta=None, fill_value=np.nan,
header=None, allow_huge_operations=False, wcs_tolerance=0.0):
# Deal with metadata first because it can affect data reading
self._meta = meta or {}
# must extract unit from data before stripping it
if 'BUNIT' in self._meta:
self._unit = cube_utils.convert_bunit(self._meta["BUNIT"])
elif hasattr(data, 'unit'):
self._unit = data.unit
else:
self._unit = None
# data must not be a quantity when stored in self._data
if hasattr(data, 'unit'):
# strip the unit so that it can be treated as cube metadata
data = data.value
# TODO: mask should be oriented? Or should we assume correctly oriented here?
self._data, self._wcs = cube_utils._orient(data, wcs)
self._wcs_tolerance = wcs_tolerance
self._spectral_axis = None
self._mask = mask # specifies which elements to Nan/blank/ignore
# object or array-like object, given that WCS needs
# to be consistent with data?
#assert mask._wcs == self._wcs
self._fill_value = fill_value
self._header = Header() if header is None else header
if not isinstance(self._header, Header):
raise TypeError("If a header is given, it must be a fits.Header")
# We don't pass the spectral unit via the initializer since the user
# should be using ``with_spectral_unit`` if they want to set it.
# However, we do want to keep track of what units the spectral axis
# should be returned in, otherwise astropy's WCS can change the units,
# e.g. km/s -> m/s.
# This can be overridden with Header below
self._spectral_unit = u.Unit(self._wcs.wcs.cunit[2])
# This operation is kind of expensive?
header_specaxnum = astropy.wcs.WCS(header).wcs.spec
header_specaxunit = spectral_axis.unit_from_header(self._header,
spectral_axis_number=header_specaxnum+1)
# Allow the original header spectral axis unit to override the default
# unit
if header_specaxunit is not None:
self._spectral_unit = header_specaxunit
self._spectral_scale = spectral_axis.wcs_unit_scale(self._spectral_unit)
self.allow_huge_operations = allow_huge_operations
self._cache = {}
@property
def _is_huge(self):
return cube_utils.is_huge(self)
@property
def _new_thing_with(self):
return self._new_cube_with
def _new_cube_with(self, data=None, wcs=None, mask=None, meta=None,
fill_value=None, spectral_unit=None, unit=None,
wcs_tolerance=None, **kwargs):
data = self._data if data is None else data
if unit is None and hasattr(data, 'unit'):
if data.unit != self.unit:
raise u.UnitsError("New data unit '{0}' does not"
" match cube unit '{1}'. You can"
" override this by specifying the"
" `unit` keyword."
.format(data.unit, self.unit))
unit = data.unit
elif unit is not None:
# convert string units to Units
if not isinstance(unit, u.Unit):
unit = u.Unit(unit)
if hasattr(data, 'unit'):
if u.Unit(unit) != data.unit:
raise u.UnitsError("The specified new cube unit '{0}' "
"does not match the input unit '{1}'."
.format(unit, data.unit))
else:
data = u.Quantity(data, unit=unit, copy=False)
elif self._unit is not None:
unit = self.unit
wcs = self._wcs if wcs is None else wcs
mask = self._mask if mask is None else mask
if meta is None:
meta = {}
meta.update(self._meta)
if unit is not None:
meta['BUNIT'] = unit.to_string(format='FITS')
fill_value = self._fill_value if fill_value is None else fill_value
spectral_unit = self._spectral_unit if spectral_unit is None else u.Unit(spectral_unit)
cube = self.__class__(data=data, wcs=wcs, mask=mask, meta=meta,
fill_value=fill_value, header=self._header,
allow_huge_operations=self.allow_huge_operations,
wcs_tolerance=wcs_tolerance or self._wcs_tolerance,
**kwargs)
cube._spectral_unit = spectral_unit
cube._spectral_scale = spectral_axis.wcs_unit_scale(spectral_unit)
return cube
read = UnifiedReadWriteMethod(SpectralCubeRead)
write = UnifiedReadWriteMethod(SpectralCubeWrite)
@property
def unit(self):
""" The flux unit """
if self._unit:
return self._unit
else:
return u.one
@property
def shape(self):
""" Length of cube along each axis """
return self._data.shape
@property
def size(self):
""" Number of elements in the cube """
return self._data.size
@property
def base(self):
""" The data type 'base' of the cube - useful for, e.g., joblib """
return self._data.base
def __len__(self):
return self.shape[0]
@property
def ndim(self):
""" Dimensionality of the data """
return self._data.ndim
def __repr__(self):
s = "{1} with shape={0}".format(self.shape, self.__class__.__name__)
if self.unit is u.one:
s += ":\n"
else:
s += " and unit={0}:\n".format(self.unit)
s += (" n_x: {0:6d} type_x: {1:8s} unit_x: {2:5s}"
" range: {3:12.6f}:{4:12.6f}\n".format(self.shape[2],
self.wcs.wcs.ctype[0],
self.wcs.wcs.cunit[0],
self.longitude_extrema[0],
self.longitude_extrema[1],))
s += (" n_y: {0:6d} type_y: {1:8s} unit_y: {2:5s}"
" range: {3:12.6f}:{4:12.6f}\n".format(self.shape[1],
self.wcs.wcs.ctype[1],
self.wcs.wcs.cunit[1],
self.latitude_extrema[0],
self.latitude_extrema[1],
))
s += (" n_s: {0:6d} type_s: {1:8s} unit_s: {2:5s}"
" range: {3:12.3f}:{4:12.3f}".format(self.shape[0],
self.wcs.wcs.ctype[2],
self._spectral_unit,
self.spectral_extrema[0],
self.spectral_extrema[1],
))
return s
@property
@cached
def spectral_extrema(self):
_spectral_min = self.spectral_axis.min()
_spectral_max = self.spectral_axis.max()
return _spectral_min, _spectral_max
def apply_numpy_function(self, function, fill=np.nan,
reduce=True, how='auto',
projection=False,
unit=None,
check_endian=False,
progressbar=False,
includemask=False,
**kwargs):
"""
Apply a numpy function to the cube
Parameters
----------
function : Numpy ufunc
A numpy ufunc to apply to the cube
fill : float
The fill value to use on the data
reduce : bool
reduce indicates whether this is a reduce-like operation,
that can be accumulated one slice at a time.
sum/max/min are like this. argmax/argmin/stddev are not
how : cube | slice | ray | auto
How to compute the moment. All strategies give the same
result, but certain strategies are more efficient depending
on data size and layout. Cube/slice/ray iterate over
decreasing subsets of the data, to conserve memory.
Default='auto'
projection : bool
Return a :class:`~spectral_cube.lower_dimensional_structures.Projection` if the resulting array is 2D or a
OneDProjection if the resulting array is 1D and the sum is over both
spatial axes?
unit : None or `astropy.units.Unit`
The unit to include for the output array. For example,
`SpectralCube.max` calls
``SpectralCube.apply_numpy_function(np.max, unit=self.unit)``,
inheriting the unit from the original cube.
However, for other numpy functions, e.g. `numpy.argmax`, the return
is an index and therefore unitless.
check_endian : bool
A flag to check the endianness of the data before applying the
function. This is only needed for optimized functions, e.g. those
in the `bottleneck <https://pypi.python.org/pypi/Bottleneck>`_ package.
progressbar : bool
Show a progressbar while iterating over the slices through the
cube?
kwargs : dict
Passed to the numpy function.
Returns
-------
result : :class:`~spectral_cube.lower_dimensional_structures.Projection` or `~astropy.units.Quantity` or float
The result depends on the value of ``axis``, ``projection``, and
``unit``. If ``axis`` is None, the return will be a scalar with or
without units. If axis is an integer, the return will be a
:class:`~spectral_cube.lower_dimensional_structures.Projection` if ``projection`` is set
"""
# leave axis in kwargs to avoid overriding numpy defaults, e.g. if the
# default is axis=-1, we don't want to force it to be axis=None by
# specifying that in the function definition
axis = kwargs.get('axis', None)
if how == 'auto':
strategy = cube_utils.iterator_strategy(self, axis)
else:
strategy = how
out = None
log.debug("applying numpy function {0} with strategy {1}"
.format(function, strategy))
if strategy == 'slice' and reduce:
out = self._reduce_slicewise(function, fill, check_endian,
includemask=includemask,
progressbar=progressbar, **kwargs)
elif how == 'ray':
out = self.apply_function(function, **kwargs)
elif how not in ['auto', 'cube']:
warnings.warn("Cannot use how=%s. Using how=cube" % how,
UnsupportedIterationStrategyWarning)
if out is None:
out = function(self._get_filled_data(fill=fill,
check_endian=check_endian),
**kwargs)
if axis is None:
# return is scalar
if unit is not None:
return u.Quantity(out, unit=unit)
else:
return out
elif projection and reduce:
meta = {'collapse_axis': axis}
meta.update(self._meta)
if hasattr(axis, '__len__') and len(axis) == 2:
# if operation is over two spatial dims
if set(axis) == set((1,2)):
new_wcs = self._wcs.sub([wcs.WCSSUB_SPECTRAL])
header = self._nowcs_header
if cube_utils._has_beam(self):
bmarg = {'beam': self.beam}
elif cube_utils._has_beams(self):
bmarg = {'beams': self.unmasked_beams}
else:
bmarg = {}
return self._oned_spectrum(value=out,
wcs=new_wcs,
copy=False,
unit=unit,
header=header,
meta=meta,
spectral_unit=self._spectral_unit,
**bmarg
)
else:
warnings.warn("Averaging over a spatial and a spectral "
"dimension cannot produce a Projection "
"quantity (no units or WCS are preserved).",
SliceWarning
)
return out
else:
new_wcs = wcs_utils.drop_axis(self._wcs, np2wcs[axis])
header = self._nowcs_header
return Projection(out, copy=False, wcs=new_wcs, meta=meta,
unit=unit, header=header)
else:
return out
def _reduce_slicewise(self, function, fill, check_endian,
includemask=False, progressbar=False, **kwargs):
"""
Compute a numpy aggregation by grabbing one slice at a time
"""
ax = kwargs.pop('axis', None)
full_reduce = ax is None
ax = ax or 0
if isinstance(ax, tuple):
assert len(ax) == 2 # we only work with cubes...
iterax = [x for x in range(3) if x not in ax][0]
else:
iterax = ax
log.debug("reducing slicewise with axis = {0}".format(ax))
if includemask:
planes = self._iter_mask_slices(iterax)
else:
planes = self._iter_slices(iterax, fill=fill, check_endian=check_endian)
result = next(planes)
if progressbar:
progressbar = ProgressBar(self.shape[iterax])
pbu = progressbar.update
else:
pbu = lambda: True
if isinstance(ax, tuple):
# have to make a result a list of itself, since we already "got"
# the first plane above
result = [function(result, axis=(0,1), **kwargs)]
for plane in planes:
# apply to axes 0 and 1, because we're fully reducing the plane
# to a number if we're applying over two axes
result.append(function(plane, axis=(0,1), **kwargs))
pbu()
result = np.array(result)
else:
for plane in planes:
# axis = 2 means we're stacking two planes, the previously
# computed one and the current one
result = function(np.dstack((result, plane)), axis=2, **kwargs)
pbu()
if full_reduce:
result = function(result)
return result
def get_mask_array(self):
"""
Convert the mask to a boolean numpy array
"""
return self._mask.include(data=self._data, wcs=self._wcs,
wcs_tolerance=self._wcs_tolerance)
def _naxes_dropped(self, view):
"""
Determine how many axes are being selected given a view.
(1,2) -> 2
None -> 3
1 -> 1
2 -> 1
"""
if hasattr(view,'__len__'):
return len(view)
elif view is None:
return 3
else:
return 1
@aggregation_docstring
@warn_slow
def sum(self, axis=None, how='auto', **kwargs):
"""
Return the sum of the cube, optionally over an axis.
"""
from .np_compat import allbadtonan
projection = self._naxes_dropped(axis) in (1,2)
return self.apply_numpy_function(allbadtonan(np.nansum), fill=np.nan,
how=how, axis=axis, unit=self.unit,
projection=projection, **kwargs)
@aggregation_docstring
@warn_slow
def mean(self, axis=None, how='cube', **kwargs):
"""
Return the mean of the cube, optionally over an axis.
"""
projection = self._naxes_dropped(axis) in (1,2)
if how == 'slice':
# two-pass approach: first total the # of points,
# then total the value of the points, then divide
# (a one-pass approach is possible but requires
# more sophisticated bookkeeping)
counts = self._count_nonzero_slicewise(axis=axis,
progressbar=kwargs.get('progressbar'))
ttl = self.apply_numpy_function(np.nansum, fill=np.nan, how=how,
axis=axis, unit=None,
projection=False, **kwargs)
out = ttl / counts
if projection:
if self._naxes_dropped(axis) == 1:
new_wcs = wcs_utils.drop_axis(self._wcs, np2wcs[axis])
meta = {'collapse_axis': axis}
meta.update(self._meta)
return Projection(out, copy=False, wcs=new_wcs,
meta=meta,
unit=self.unit, header=self._nowcs_header)
elif axis == (1,2):
newwcs = self._wcs.sub([wcs.WCSSUB_SPECTRAL])
if cube_utils._has_beam(self):
bmarg = {'beam': self.beam}
elif cube_utils._has_beams(self):
bmarg = {'beams': self.unmasked_beams}
else:
bmarg = {}
return self._oned_spectrum(value=out,
wcs=newwcs,
copy=False,
unit=self.unit,
spectral_unit=self._spectral_unit,
meta=self.meta,
**bmarg
)
else:
# this is a weird case, but even if projection is
# specified, we can't return a Quantity here because of WCS
# issues. `apply_numpy_function` already does this
# silently, which is unfortunate.
warnings.warn("Averaging over a spatial and a spectral "
"dimension cannot produce a Projection "
"quantity (no units or WCS are preserved).",
SliceWarning
)
return out
else:
return out
return self.apply_numpy_function(np.nanmean, fill=np.nan, how=how,
axis=axis, unit=self.unit,
projection=projection, **kwargs)
def _count_nonzero_slicewise(self, axis=None, progressbar=False):
"""
Count the number of finite pixels along an axis slicewise. This is a
helper function for the mean and std deviation slicewise iterators.
"""
counts = self.apply_numpy_function(np.sum, fill=np.nan,
how='slice', axis=axis,
unit=None,
projection=False,
progressbar=progressbar,
includemask=True)
return counts
@aggregation_docstring
@warn_slow
def std(self, axis=None, how='cube', ddof=0, **kwargs):
"""
Return the standard deviation of the cube, optionally over an axis.
Other Parameters
----------------
ddof : int
Means Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements. By
default ``ddof`` is zero.
"""
projection = self._naxes_dropped(axis) in (1,2)
if how == 'slice':
if axis is None:
raise NotImplementedError("The overall standard deviation "
"cannot be computed in a slicewise "
"manner. Please use a "
"different strategy.")
if hasattr(axis, '__len__') and len(axis) == 2:
return self.apply_numpy_function(np.nanstd,
axis=axis,
how='slice',
projection=projection,
unit=self.unit,
**kwargs)
else:
counts = self._count_nonzero_slicewise(axis=axis)
ttl = self.apply_numpy_function(np.nansum, fill=np.nan, how='slice',
axis=axis, unit=None,
projection=False, **kwargs)
# Equivalent, but with more overhead:
# ttl = self.sum(axis=axis, how='slice').value
mean = ttl/counts
planes = self._iter_slices(axis, fill=np.nan, check_endian=False)
result = (next(planes)-mean)**2
for plane in planes:
result = np.nansum(np.dstack((result, (plane-mean)**2)), axis=2)
out = (result/(counts-ddof))**0.5
if projection:
new_wcs = wcs_utils.drop_axis(self._wcs, np2wcs[axis])
meta = {'collapse_axis': axis}
meta.update(self._meta)
return Projection(out, copy=False, wcs=new_wcs,
meta=meta,
unit=self.unit, header=self._nowcs_header)
else:
return out
# standard deviation cannot be computed as a trivial step-by-step
# process. There IS a one-pass algorithm for std dev, but it is not
# implemented, so we must force cube here. We could and should also
# implement raywise reduction
return self.apply_numpy_function(np.nanstd, fill=np.nan, how=how,
axis=axis, unit=self.unit,
projection=projection, **kwargs)
@aggregation_docstring
@warn_slow
def mad_std(self, axis=None, how='cube', **kwargs):
"""
Use astropy's mad_std to computer the standard deviation
"""
if int(astropy.__version__[0]) < 2:
raise NotImplementedError("mad_std requires astropy >= 2")
projection = self._naxes_dropped(axis) in (1,2)
if how == 'ray' and not hasattr(axis, '__len__'):
# no need for fill here; masked-out data are simply not included
return self.apply_numpy_function(stats.mad_std,
axis=axis,
how='ray',
unit=self.unit,
projection=projection,
ignore_nan=True,
)
elif how == 'slice' and hasattr(axis, '__len__') and len(axis) == 2:
return self.apply_numpy_function(stats.mad_std,
axis=axis,
how='slice',
projection=projection,
unit=self.unit,
fill=np.nan,
ignore_nan=True,
**kwargs)
elif how in ('ray', 'slice'):
raise NotImplementedError('Cannot run mad_std slicewise or raywise '
'unless the dimensionality is also reduced in the same direction.')
else:
return self.apply_numpy_function(stats.mad_std,
fill=np.nan,
axis=axis,
unit=self.unit,
ignore_nan=True,
how=how,
projection=projection, **kwargs)
@aggregation_docstring
@warn_slow
def max(self, axis=None, how='auto', **kwargs):
"""
Return the maximum data value of the cube, optionally over an axis.
"""
projection = self._naxes_dropped(axis) in (1,2)
return self.apply_numpy_function(np.nanmax, fill=np.nan, how=how,
axis=axis, unit=self.unit,
projection=projection, **kwargs)
@aggregation_docstring
@warn_slow
def min(self, axis=None, how='auto', **kwargs):
"""
Return the minimum data value of the cube, optionally over an axis.
"""
projection = self._naxes_dropped(axis) in (1,2)
return self.apply_numpy_function(np.nanmin, fill=np.nan, how=how,
axis=axis, unit=self.unit,
projection=projection, **kwargs)
@aggregation_docstring
@warn_slow
def argmax(self, axis=None, how='auto', **kwargs):
"""
Return the index of the maximum data value.
The return value is arbitrary if all pixels along ``axis`` are
excluded from the mask.
"""
return self.apply_numpy_function(np.nanargmax, fill=-np.inf,
reduce=False, projection=False,
how=how, axis=axis, **kwargs)
@aggregation_docstring
@warn_slow
def argmin(self, axis=None, how='auto', **kwargs):
"""
Return the index of the minimum data value.
The return value is arbitrary if all pixels along ``axis`` are
excluded from the mask
"""
return self.apply_numpy_function(np.nanargmin, fill=np.inf,
reduce=False, projection=False,
how=how, axis=axis, **kwargs)
def _argmaxmin_world(self, axis, method, **kwargs):
'''
Return the spatial or spectral index of the maximum or minimum value.
Use `argmax_world` and `argmin_world` directly.
'''
operation_name = '{}_world'.format(method)
if wcs_utils.is_pixel_axis_to_wcs_correlated(self.wcs, axis):
raise WCSCelestialError("{} requires the celestial axes"
" to be aligned along image axes."
.format(operation_name))
if method == 'argmin':
arg_pixel_plane = self.argmin(axis=axis, **kwargs)
elif method == 'argmax':
arg_pixel_plane = self.argmax(axis=axis, **kwargs)
else:
raise ValueError("`method` must be 'argmin' or 'argmax'")
# Convert to WCS coordinates.
out = cube_utils.world_take_along_axis(self, arg_pixel_plane, axis)
# Compute whether the mask has any valid data along `axis`
collapsed_mask = self.mask.include().any(axis=axis)
out[~collapsed_mask] = np.NaN
# Return a Projection.
new_wcs = wcs_utils.drop_axis(self._wcs, np2wcs[axis])
meta = {'collapse_axis': axis}
meta.update(self._meta)
return Projection(out, copy=False, wcs=new_wcs, meta=meta,
unit=out.unit, header=self._nowcs_header)
@warn_slow
def argmax_world(self, axis, **kwargs):
'''
Return the spatial or spectral index of the maximum value
along a line of sight.
Parameters
----------
axis : int
The axis to return the peak location along. e.g., `axis=0`
will return the value of the spectral axis at the peak value.
kwargs : dict
Passed to `~SpectralCube.argmax`.
'''
return self._argmaxmin_world(axis, 'argmax', **kwargs)
@warn_slow
def argmin_world(self, axis, **kwargs):
'''
Return the spatial or spectral index of the minimum value
along a line of sight.
Parameters
----------
axis : int
The axis to return the peak location along. e.g., `axis=0`
will return the value of the spectral axis at the peak value.
kwargs : dict
Passed to `~SpectralCube.argmin`.
'''
return self._argmaxmin_world(axis, 'argmin', **kwargs)
def chunked(self, chunksize=1000):
"""
Not Implemented.
Iterate over chunks of valid data
"""
raise NotImplementedError()
def _get_flat_shape(self, axis):
"""
Get the shape of the array after flattening along an axis
"""
iteraxes = [0, 1, 2]
iteraxes.remove(axis)
# x,y are defined as first,second dim to iterate over
# (not x,y in pixel space...)
nx = self.shape[iteraxes[0]]
ny = self.shape[iteraxes[1]]
return nx, ny
@warn_slow
def _apply_everywhere(self, function, *args):
"""
Return a new cube with ``function`` applied to all pixels
Private because this doesn't have an obvious and easy-to-use API
Examples
--------
>>> newcube = cube.apply_everywhere(np.add, 0.5*u.Jy)
"""
try:
test_result = function(np.ones([1,1,1])*self.unit, *args)
# First, check that function returns same # of dims?
assert test_result.ndim == 3,"Output is not 3-dimensional"
except Exception as ex:
raise AssertionError("Function could not be applied to a simple "
"cube. The error was: {0}".format(ex))
data = function(u.Quantity(self._get_filled_data(fill=self._fill_value),
self.unit, copy=False),
*args)
return self._new_cube_with(data=data, unit=data.unit)
@warn_slow
def _cube_on_cube_operation(self, function, cube, equivalencies=[], **kwargs):
"""
Apply an operation between two cubes. Inherits the metadata of the
left cube.
Parameters
----------
function : function
A function to apply to the cubes
cube : SpectralCube
Another cube to put into the function
equivalencies : list
A list of astropy equivalencies
kwargs : dict
Passed to np.testing.assert_almost_equal
"""
assert cube.shape == self.shape
if not self.unit.is_equivalent(cube.unit, equivalencies=equivalencies):
raise u.UnitsError("{0} is not equivalent to {1}"
.format(self.unit, cube.unit))
if not wcs_utils.check_equality(self.wcs, cube.wcs, warn_missing=True,
**kwargs):
warnings.warn("Cube WCSs do not match, but their shapes do",
WCSMismatchWarning)
try:
test_result = function(np.ones([1,1,1])*self.unit,
np.ones([1,1,1])*self.unit)
# First, check that function returns same # of dims?
assert test_result.shape == (1,1,1)
except Exception as ex:
raise AssertionError("Function {1} could not be applied to a "
"pair of simple "
"cube. The error was: {0}".format(ex,
function))
cube = cube.to(self.unit)
data = function(self._data, cube._data)
try:
# multiplication, division, etc. are valid inter-unit operations
unit = function(self.unit, cube.unit)
except TypeError:
# addition, subtraction are not
unit = self.unit
return self._new_cube_with(data=data, unit=unit)
def apply_function(self, function, axis=None, weights=None, unit=None,
projection=False, progressbar=False,
update_function=None, keep_shape=False, **kwargs):
"""
Apply a function to valid data along the specified axis or to the whole
cube, optionally using a weight array that is the same shape (or at
least can be sliced in the same way)
Parameters
----------
function : function
A function that can be applied to a numpy array. Does not need to
be nan-aware
axis : 1, 2, 3, or None
The axis to operate along. If None, the return is scalar.
weights : (optional) np.ndarray
An array with the same shape (or slicing abilities/results) as the
data cube
unit : (optional) `~astropy.units.Unit`
The unit of the output projection or value. Not all functions
should return quantities with units.
projection : bool
Return a projection if the resulting array is 2D?
progressbar : bool
Show a progressbar while iterating over the slices/rays through the
cube?
keep_shape : bool
If `True`, the returned object will be the same dimensionality as
the cube.
update_function : function
An alternative tracker for the progress of applying the function
to the cube data. If ``progressbar`` is ``True``, this argument is
ignored.
Returns
-------
result : :class:`~spectral_cube.lower_dimensional_structures.Projection` or `~astropy.units.Quantity` or float
The result depends on the value of ``axis``, ``projection``, and
``unit``. If ``axis`` is None, the return will be a scalar with or
without units. If axis is an integer, the return will be a
:class:`~spectral_cube.lower_dimensional_structures.Projection` if ``projection`` is set
"""
if axis is None:
out = function(self.flattened(), **kwargs)
if unit is not None:
return u.Quantity(out, unit=unit)
else:
return out
if hasattr(axis, '__len__'):
raise NotImplementedError("`apply_function` does not support "
"function application across multiple "
"axes. Try `apply_numpy_function`.")
# determine the output array shape
nx, ny = self._get_flat_shape(axis)
nz = self.shape[axis] if keep_shape else 1
# allocate memory for output array
out = np.empty([nz, nx, ny]) * np.nan
if progressbar:
progressbar = ProgressBar(nx*ny)
pbu = progressbar.update
elif update_function is not None:
pbu = update_function
else:
pbu = lambda: True
# iterate over "lines of sight" through the cube
for y, x, slc in self._iter_rays(axis):
# acquire the flattened, valid data for the slice
data = self.flattened(slc, weights=weights)
if len(data) != 0:
result = function(data, **kwargs)
if hasattr(result, 'value'):
# store result in array
out[:, y, x] = result.value
else:
out[:, y, x] = result
pbu()
if not keep_shape:
out = out[0, :, :]
if projection and axis in (0, 1, 2):
new_wcs = wcs_utils.drop_axis(self._wcs, np2wcs[axis])
meta = {'collapse_axis': axis}
meta.update(self._meta)
return Projection(out, copy=False, wcs=new_wcs, meta=meta,
unit=unit, header=self._nowcs_header)
else:
return out
def _iter_rays(self, axis=None):
"""
Iterate over view corresponding to lines-of-sight through a cube
along the specified axis
"""
ny, nx = self._get_flat_shape(axis)
for y in range(ny):
for x in range(nx):
# create length-1 view for each position
slc = [slice(y, y + 1), slice(x, x + 1), ]
# create a length-N slice (all-inclusive) along the selected axis
slc.insert(axis, slice(None))
yield y, x, tuple(slc)
def _iter_slices(self, axis, fill=np.nan, check_endian=False):
"""
Iterate over the cube one slice at a time,
replacing masked elements with fill
"""
view = [slice(None)] * 3
for x in range(self.shape[axis]):
view[axis] = x
yield self._get_filled_data(view=tuple(view), fill=fill,
check_endian=check_endian)
def _iter_mask_slices(self, axis):
"""
Iterate over the cube one slice at a time,
replacing masked elements with fill
"""
view = [slice(None)] * 3
for x in range(self.shape[axis]):
view[axis] = x
yield self._mask.include(data=self._data,
view=tuple(view),
wcs=self._wcs,
wcs_tolerance=self._wcs_tolerance,
)
def flattened(self, slice=(), weights=None):
"""
Return a slice of the cube giving only the valid data (i.e., removing
bad values)
Parameters
----------
slice: 3-tuple
A length-3 tuple of view (or any equivalent valid slice of a
cube)
weights: (optional) np.ndarray
An array with the same shape (or slicing abilities/results) as the
data cube
"""
data = self._mask._flattened(data=self._data, wcs=self._wcs, view=slice)
if isinstance(data, da.Array):
# Quantity does not work well with lazily evaluated data with an
# unkonwn shape (which is the case when doing boolean indexing of arrays)
data = self._compute(data)
if weights is not None:
weights = self._mask._flattened(data=weights, wcs=self._wcs, view=slice)
return u.Quantity(data * weights, self.unit, copy=False)
else:
return u.Quantity(data, self.unit, copy=False)
def median(self, axis=None, iterate_rays=False, **kwargs):
"""
Compute the median of an array, optionally along an axis.
Ignores excluded mask elements.
Parameters
----------
axis : int (optional)
The axis to collapse
iterate_rays : bool
Iterate over individual rays? This mode is slower but can save RAM
costs, which may be extreme for large cubes
Returns
-------
med : ndarray
The median
"""
try:
from bottleneck import nanmedian
bnok = True
except ImportError:
bnok = False
# slicewise median is nonsense, must force how = 'cube'
# bottleneck.nanmedian does not allow axis to be a list or tuple
if bnok and not iterate_rays and not isinstance(axis, (list, tuple)):
log.debug("Using bottleneck nanmedian")
result = self.apply_numpy_function(nanmedian, axis=axis,
projection=True, unit=self.unit,
how='cube', check_endian=True,
**kwargs)
elif hasattr(np, 'nanmedian') and not iterate_rays:
log.debug("Using numpy nanmedian")
result = self.apply_numpy_function(np.nanmedian, axis=axis,
projection=True, unit=self.unit,
how='cube',**kwargs)
else:
log.debug("Using numpy median iterating over rays")
result = self.apply_function(np.median, projection=True, axis=axis,
unit=self.unit, **kwargs)
return result
def percentile(self, q, axis=None, iterate_rays=False, **kwargs):
"""
Return percentiles of the data.
Parameters
----------
q : float
The percentile to compute
axis : int, or None
Which axis to compute percentiles over
iterate_rays : bool
Iterate over individual rays? This mode is slower but can save RAM
costs, which may be extreme for large cubes
"""
if hasattr(np, 'nanpercentile') and not iterate_rays:
result = self.apply_numpy_function(np.nanpercentile, q=q,
axis=axis, projection=True,
unit=self.unit, how='cube',
**kwargs)
else:
result = self.apply_function(np.percentile, q=q, axis=axis,
projection=True, unit=self.unit,
**kwargs)
return result
def with_mask(self, mask, inherit_mask=True, wcs_tolerance=None):
"""
Return a new SpectralCube instance that contains a composite mask of
the current SpectralCube and the new ``mask``. Values of the mask that
are ``True`` will be *included* (masks are analogous to numpy boolean
index arrays, they are the inverse of the ``.mask`` attribute of a numpy
masked array).
Parameters
----------
mask : :class:`~spectral_cube.masks.MaskBase` instance, or boolean numpy array
The mask to apply. If a boolean array is supplied,
it will be converted into a mask, assuming that
`True` values indicate included elements.
inherit_mask : bool (optional, default=True)
If True, combines the provided mask with the
mask currently attached to the cube
wcs_tolerance : None or float
The tolerance of difference in WCS parameters between the cube and
the mask. Defaults to `self._wcs_tolerance` (which itself defaults
to 0.0) if unspecified
Returns
-------
new_cube : :class:`SpectralCube`
A cube with the new mask applied.
Notes
-----
This operation returns a view into the data, and not a copy.
"""
if isinstance(mask, np.ndarray):
if not is_broadcastable_and_smaller(mask.shape, self._data.shape):
raise ValueError("Mask shape is not broadcastable to data shape: "
"%s vs %s" % (mask.shape, self._data.shape))
mask = BooleanArrayMask(mask, self._wcs, shape=self._data.shape)
if self._mask is not None and inherit_mask:
new_mask = np.bitwise_and(self._mask, mask)
else:
new_mask = mask
new_mask._validate_wcs(new_data=self._data, new_wcs=self._wcs,
wcs_tolerance=wcs_tolerance or self._wcs_tolerance)
return self._new_cube_with(mask=new_mask, wcs_tolerance=wcs_tolerance)
def __getitem__(self, view):
# Need to allow self[:], self[:,:]
if isinstance(view, (slice,int,np.int64)):
view = (view, slice(None), slice(None))
elif len(view) == 2:
view = view + (slice(None),)
elif len(view) > 3:
raise IndexError("Too many indices")
meta = {}
meta.update(self._meta)
slice_data = [(s.start, s.stop, s.step)
if hasattr(s,'start') else s
for s in view]
if 'slice' in meta:
meta['slice'].append(slice_data)
else:
meta['slice'] = [slice_data]
intslices = [2-ii for ii,s in enumerate(view) if not hasattr(s,'start')]
if intslices:
if len(intslices) > 1:
if 2 in intslices:
raise NotImplementedError("1D slices along non-spectral "
"axes are not yet implemented.")
newwcs = self._wcs.sub([a
for a in (1,2,3)
if a not in [x+1 for x in intslices]])
if cube_utils._has_beam(self):
bmarg = {'beam': self.beam}
elif cube_utils._has_beams(self):
bmarg = {'beams': self.beams}
else:
bmarg = {}
return self._oned_spectrum(value=self._data[view],
wcs=newwcs,
copy=False,
unit=self.unit,
spectral_unit=self._spectral_unit,
mask=self.mask[view] if self.mask is not None else None,
meta=meta,
**bmarg
)
# only one element, so drop an axis
newwcs = wcs_utils.drop_axis(self._wcs, intslices[0])
header = self._nowcs_header
if intslices[0] == 0:
# celestial: can report the wavelength/frequency of the axis
header['CRVAL3'] = self.spectral_axis[intslices[0]].value
header['CDELT3'] = self.wcs.sub([wcs.WCSSUB_SPECTRAL]).wcs.cdelt[0]
header['CUNIT3'] = self._spectral_unit.to_string(format='FITS')
return Slice(value=self.filled_data[view],
mask=self.mask[view] if self.mask is not None else None,
wcs=newwcs,
copy=False,
unit=self.unit,
header=header,
meta=meta)
newmask = self._mask[view] if self._mask is not None else None
newwcs = wcs_utils.slice_wcs(self._wcs, view, shape=self.shape)
return self._new_cube_with(data=self._data[view],
wcs=newwcs,
mask=newmask,
meta=meta)
@property
def unitless(self):
"""Return a copy of self with unit set to None"""
newcube = self._new_cube_with()
newcube._unit = None
return newcube
def with_spectral_unit(self, unit, velocity_convention=None,
rest_value=None):
"""
Returns a new Cube with a different Spectral Axis unit
Parameters
----------
unit : :class:`~astropy.units.Unit`
Any valid spectral unit: velocity, (wave)length, or frequency.
Only vacuum units are supported.
velocity_convention : 'relativistic', 'radio', or 'optical'
The velocity convention to use for the output velocity axis.
Required if the output type is velocity. This can be either one
of the above strings, or an `astropy.units` equivalency.
rest_value : :class:`~astropy.units.Quantity`
A rest wavelength or frequency with appropriate units. Required if
output type is velocity. The cube's WCS should include this
already if the *input* type is velocity, but the WCS's rest
wavelength/frequency can be overridden with this parameter.
.. note: This must be the rest frequency/wavelength *in vacuum*,
even if your cube has air wavelength units
"""
newwcs,newmeta = self._new_spectral_wcs(unit=unit,
velocity_convention=velocity_convention,
rest_value=rest_value)
if self._mask is not None:
newmask = self._mask.with_spectral_unit(unit,
velocity_convention=velocity_convention,
rest_value=rest_value)
newmask._wcs = newwcs
else:
newmask = None
cube = self._new_cube_with(wcs=newwcs, mask=newmask, meta=newmeta,
spectral_unit=unit)
return cube
@cube_utils.slice_syntax
def unmasked_data(self, view):
"""
Return a view of the subset of the underlying data,
ignoring the mask.
Returns
-------
data : Quantity instance
The unmasked data
"""
values = self._data[view]
# Astropy Quantities don't play well with dask arrays with shape ()
if isinstance(values, da.Array) and values.shape == ():
values = values.compute()
return u.Quantity(values, self.unit, copy=False)
def unmasked_copy(self):
"""
Return a copy of the cube with no mask (i.e., all data included)
"""
newcube = self._new_cube_with()
newcube._mask = None
return newcube
@cached
def _pix_cen(self):
"""
Offset of every pixel from the origin, along each direction
Returns
-------
tuple of spectral_offset, y_offset, x_offset, each 3D arrays
describing the distance from the origin
Notes
-----
These arrays are broadcast, and are not memory intensive
Each array is in the units of the corresponding wcs.cunit, but
this is implicit (e.g., they are not astropy Quantity arrays)
"""
# Start off by extracting the world coordinates of the pixels
_, lat, lon = self.world[0, :, :]
spectral, _, _ = self.world[:, 0, 0]
spectral -= spectral[0] # offset from first pixel
# Convert to radians
lon = np.radians(lon)
lat = np.radians(lat)
# Find the dx and dy arrays
from astropy.coordinates.angle_utilities import angular_separation
dx = angular_separation(lon[:, :-1], lat[:, :-1],
lon[:, 1:], lat[:, :-1])
dy = angular_separation(lon[:-1, :], lat[:-1, :],
lon[1:, :], lat[1:, :])
# Find the cumulative offset - need to add a zero at the start
x = np.zeros(self._data.shape[1:])
y = np.zeros(self._data.shape[1:])
x[:, 1:] = np.cumsum(np.degrees(dx), axis=1)
y[1:, :] = np.cumsum(np.degrees(dy), axis=0)
if isinstance(self._data, da.Array):
x, y, spectral = da.broadcast_arrays(x[None,:,:], y[None,:,:], spectral[:,None,None])
# NOTE: we need to rechunk these to the actual data size, otherwise
# the resulting arrays have a single chunk which can cause issues with
# da.store (which writes data out in chunks)
return (spectral.rechunk(self._data.chunksize),
y.rechunk(self._data.chunksize),
x.rechunk(self._data.chunksize))
else:
x, y, spectral = np.broadcast_arrays(x[None,:,:], y[None,:,:], spectral[:,None,None])
return spectral, y, x
@cached
def _pix_size_slice(self, axis):
"""
Return the size of each pixel along any given direction. Assumes
pixels have equal size. Also assumes that the spectral and spatial
directions are separable, which is enforced throughout this code.
Parameters
----------
axis : 0, 1, or 2
The axis along which to compute the pixel size
Returns
-------
Pixel size in units of either degrees or the appropriate spectral unit
"""
if axis == 0:
# note that self._spectral_scale is required here because wcs
# forces into units of m, m/s, or Hz
return np.abs(self.wcs.pixel_scale_matrix[2,2]) * self._spectral_scale
elif axis in (1,2):
# the pixel size is a projection. I think the pixel_scale_matrix
# must be symmetric, such that psm[axis,:]**2 == psm[:,axis]**2
return np.sum(self.wcs.pixel_scale_matrix[2-axis,:]**2)**0.5
else:
raise ValueError("Cubes have 3 axes.")
@cached
def _pix_size(self):
"""
Return the size of each pixel along each direction, in world units
Returns
-------
dv, dy, dx : tuple of 3D arrays
The extent of each pixel along each direction
Notes
-----
These arrays are broadcast, and are not memory intensive
Each array is in the units of the corresponding wcs.cunit, but
this is implicit (e.g., they are not astropy Quantity arrays)
"""
# First, scale along x direction
xpix = np.linspace(-0.5, self._data.shape[2] - 0.5, self._data.shape[2] + 1)
ypix = np.linspace(0., self._data.shape[1] - 1, self._data.shape[1])
xpix, ypix = np.meshgrid(xpix, ypix)
zpix = np.zeros(xpix.shape)
lon, lat, _ = self._wcs.all_pix2world(xpix, ypix, zpix, 0)
# Convert to radians
lon = np.radians(lon)
lat = np.radians(lat)
# Find the dx and dy arrays
from astropy.coordinates.angle_utilities import angular_separation
dx = angular_separation(lon[:, :-1], lat[:, :-1],
lon[:, 1:], lat[:, :-1])
# Next, scale along y direction
xpix = np.linspace(0., self._data.shape[2] - 1, self._data.shape[2])
ypix = np.linspace(-0.5,
self._data.shape[1] - 0.5,
self._data.shape[1] + 1)
xpix, ypix = np.meshgrid(xpix, ypix)
zpix = np.zeros(xpix.shape)
lon, lat, _ = self._wcs.all_pix2world(xpix, ypix, zpix, 0)
# Convert to radians
lon = np.radians(lon)
lat = np.radians(lat)
# Find the dx and dy arrays
from astropy.coordinates.angle_utilities import angular_separation
dy = angular_separation(lon[:-1, :], lat[:-1, :],
lon[1:, :], lat[1:, :])
# Next, spectral coordinates
zpix = np.linspace(-0.5, self._data.shape[0] - 0.5,
self._data.shape[0] + 1)
xpix = np.zeros(zpix.shape)
ypix = np.zeros(zpix.shape)
_, _, spectral = self._wcs.all_pix2world(xpix, ypix, zpix, 0)
# Take spectral units into account
# order of operations here is crucial! If this is done after
# broadcasting, the full array size is allocated, which is bad!
dspectral = np.diff(spectral) * self._spectral_scale
dx = np.abs(np.degrees(dx.reshape(1, dx.shape[0], dx.shape[1])))
dy = np.abs(np.degrees(dy.reshape(1, dy.shape[0], dy.shape[1])))
dspectral = np.abs(dspectral.reshape(-1, 1, 1))
dx, dy, dspectral = np.broadcast_arrays(dx, dy, dspectral)
return dspectral, dy, dx
def moment(self, order=0, axis=0, how='auto'):
"""
Compute moments along the spectral axis.
Moments are defined as follows, where :math:`I` is the intensity in a
channel and :math:`x` is the spectral coordinate:
Moment 0:
.. math:: M_0 \\int I dx
Moment 1:
.. math:: M_1 = \\frac{\\int I x dx}{M_0}
Moment N:
.. math:: M_N = \\frac{\\int I (x - M_1)^N dx}{M_0}
.. warning:: Note that these follow the mathematical definitions of
moments, and therefore the second moment will return a
variance map. To get linewidth maps, you can instead use
the :meth:`~SpectralCube.linewidth_fwhm` or
:meth:`~SpectralCube.linewidth_sigma` methods.
Parameters
----------
order : int
The order of the moment to take. Default=0
axis : int
The axis along which to compute the moment. Default=0
how : cube | slice | ray | auto
How to compute the moment. All strategies give the same
result, but certain strategies are more efficient depending
on data size and layout. Cube/slice/ray iterate over
decreasing subsets of the data, to conserve memory.
Default='auto'
Returns
-------
map [, wcs]
The moment map (numpy array) and, if wcs=True, the WCS object
describing the map
Notes
-----
Generally, how='cube' is fastest for small cubes that easily
fit into memory. how='slice' is best for most larger datasets.
how='ray' is probably only a good idea for very large cubes
whose data are contiguous over the axis of the moment map.
For the first moment, the result for axis=1, 2 is the angular
offset *relative to the cube face*. For axis=0, it is the
*absolute* velocity/frequency of the first moment.
"""
if axis == 0 and order == 2:
warnings.warn("Note that the second moment returned will be a "
"variance map. To get a linewidth map, use the "
"SpectralCube.linewidth_fwhm() or "
"SpectralCube.linewidth_sigma() methods instead.",
VarianceWarning)
from ._moments import (moment_slicewise, moment_cubewise,
moment_raywise, moment_auto)
dispatch = dict(slice=moment_slicewise,
cube=moment_cubewise,
ray=moment_raywise,
auto=moment_auto)
if how not in dispatch:
return ValueError("Invalid how. Must be in %s" %
sorted(list(dispatch.keys())))
out = dispatch[how](self, order, axis)
# apply units
if order == 0:
if axis == 0 and self._spectral_unit is not None:
axunit = unit = self._spectral_unit
else:
axunit = unit = u.Unit(self._wcs.wcs.cunit[np2wcs[axis]])
out = u.Quantity(out, self.unit * axunit, copy=False)
else:
if axis == 0 and self._spectral_unit is not None:
unit = self._spectral_unit ** max(order, 1)
else:
unit = u.Unit(self._wcs.wcs.cunit[np2wcs[axis]]) ** max(order, 1)
out = u.Quantity(out, unit, copy=False)
# special case: for order=1, axis=0, you usually want
# the absolute velocity and not the offset
if order == 1 and axis == 0:
out += self.world[0, :, :][0]
new_wcs = wcs_utils.drop_axis(self._wcs, np2wcs[axis])
meta = {'moment_order': order,
'moment_axis': axis,
'moment_method': how}
meta.update(self._meta)
return Projection(out, copy=False, wcs=new_wcs, meta=meta,
header=self._nowcs_header)
def moment0(self, axis=0, how='auto'):
"""
Compute the zeroth moment along an axis.
See :meth:`moment`.
"""
return self.moment(axis=axis, order=0, how=how)
def moment1(self, axis=0, how='auto'):
"""
Compute the 1st moment along an axis.
For an explanation of the ``axis`` and ``how`` parameters, see :meth:`moment`.
"""
return self.moment(axis=axis, order=1, how=how)
def moment2(self, axis=0, how='auto'):
"""
Compute the 2nd moment along an axis.
For an explanation of the ``axis`` and ``how`` parameters, see :meth:`moment`.
"""
return self.moment(axis=axis, order=2, how=how)
def linewidth_sigma(self, how='auto'):
"""
Compute a (sigma) linewidth map along the spectral axis.
For an explanation of the ``how`` parameter, see :meth:`moment`.
"""
with np.errstate(invalid='ignore'):
with warnings.catch_warnings():
warnings.simplefilter("ignore", VarianceWarning)
return np.sqrt(self.moment2(how=how))
def linewidth_fwhm(self, how='auto'):
"""
Compute a (FWHM) linewidth map along the spectral axis.
For an explanation of the ``how`` parameter, see :meth:`moment`.
"""
return self.linewidth_sigma() * SIGMA2FWHM
@property
def spectral_axis(self):
"""
A `~astropy.units.Quantity` array containing the central values of
each channel along the spectral axis.
"""
return self.world[:, 0, 0][0].ravel()
@property
def velocity_convention(self):
"""
The `~astropy.units.equivalencies` that describes the spectral axis
"""
return spectral_axis.determine_vconv_from_ctype(self.wcs.wcs.ctype[self.wcs.wcs.spec])
def closest_spectral_channel(self, value):
"""
Find the index of the closest spectral channel to the specified
spectral coordinate.
Parameters
----------
value : :class:`~astropy.units.Quantity`
The value of the spectral coordinate to search for.
"""
# TODO: we have to not compute this every time
spectral_axis = self.spectral_axis
try:
value = value.to(spectral_axis.unit, equivalencies=u.spectral())
except u.UnitsError:
if value.unit.is_equivalent(u.Hz, equivalencies=u.spectral()):
if spectral_axis.unit.is_equivalent(u.m / u.s):
raise u.UnitsError("Spectral axis is in velocity units and "
"'value' is in frequency-equivalent units "
"- use SpectralCube.with_spectral_unit "
"first to convert the cube to frequency-"
"equivalent units, or search for a "
"velocity instead")
else:
raise u.UnitsError("Unexpected spectral axis units: {0}".format(spectral_axis.unit))
elif value.unit.is_equivalent(u.m / u.s):
if spectral_axis.unit.is_equivalent(u.Hz, equivalencies=u.spectral()):
raise u.UnitsError("Spectral axis is in frequency-equivalent "
"units and 'value' is in velocity units "
"- use SpectralCube.with_spectral_unit "
"first to convert the cube to frequency-"
"equivalent units, or search for a "
"velocity instead")
else:
raise u.UnitsError("Unexpected spectral axis units: {0}".format(spectral_axis.unit))
else:
raise u.UnitsError("'value' should be in frequency equivalent or velocity units (got {0})".format(value.unit))
# TODO: optimize the next line - just brute force for now
return np.argmin(np.abs(spectral_axis - value))
def spectral_slab(self, lo, hi):
"""
Extract a new cube between two spectral coordinates
Parameters
----------
lo, hi : :class:`~astropy.units.Quantity`
The lower and upper spectral coordinate for the slab range. The
units should be compatible with the units of the spectral axis.
If the spectral axis is in frequency-equivalent units and you
want to select a range in velocity, or vice-versa, you should
first use :meth:`~spectral_cube.SpectralCube.with_spectral_unit`
to convert the units of the spectral axis.
"""
# Find range of values for spectral axis
ilo = self.closest_spectral_channel(lo)
ihi = self.closest_spectral_channel(hi)
if ilo > ihi:
ilo, ihi = ihi, ilo
ihi += 1
# Create WCS slab
wcs_slab = self._wcs.deepcopy()
wcs_slab.wcs.crpix[2] -= ilo
# Create mask slab
if self._mask is None:
mask_slab = None
else:
try:
mask_slab = self._mask[ilo:ihi, :, :]
except NotImplementedError:
warnings.warn("Mask slicing not implemented for "
"{0} - dropping mask".
format(self._mask.__class__.__name__),
NotImplementedWarning
)
mask_slab = None
# Create new spectral cube
slab = self._new_cube_with(data=self._data[ilo:ihi], wcs=wcs_slab,
mask=mask_slab)
# TODO: we could change the WCS to give a spectral axis in the
# correct units as requested - so if the initial cube is in Hz and we
# request a range in km/s, we could adjust the WCS to be in km/s
# instead
return slab
def minimal_subcube(self, spatial_only=False):
"""
Return the minimum enclosing subcube where the mask is valid
Parameters
----------
spatial_only: bool
Only compute the minimal subcube in the spatial dimensions
"""
if self._mask is not None:
return self[self.subcube_slices_from_mask(self._mask,
spatial_only=spatial_only)]
else:
return self[:]
def subcube_from_mask(self, region_mask):
"""
Given a mask, return the minimal subcube that encloses the mask
Parameters
----------
region_mask: `~spectral_cube.masks.MaskBase` or boolean `numpy.ndarray`
The mask with appropraite WCS or an ndarray with matched
coordinates
"""
return self[self.subcube_slices_from_mask(region_mask)]
def subcube_slices_from_mask(self, region_mask, spatial_only=False):
"""
Given a mask, return the slices corresponding to the minimum subcube
that encloses the mask
Parameters
----------
region_mask: `~spectral_cube.masks.MaskBase` or boolean `numpy.ndarray`
The mask with appropriate WCS or an ndarray with matched
coordinates
spatial_only: bool
Return only slices that affect the spatial dimensions; the spectral
dimension will be left unchanged
"""
if not scipyOK:
raise ImportError("Scipy could not be imported: this function won't work.")
if isinstance(region_mask, np.ndarray):
if is_broadcastable_and_smaller(region_mask.shape, self.shape):
region_mask = BooleanArrayMask(region_mask, self._wcs)
else:
raise ValueError("Mask shape does not match cube shape.")
include = region_mask.include(self._data, self._wcs,
wcs_tolerance=self._wcs_tolerance)
if not include.any():
return (slice(0),)*3
slices = ndimage.find_objects(np.broadcast_arrays(include,
self._data)[0])[0]
if spatial_only:
slices = (slice(None), slices[1], slices[2])
return tuple(slices)
def subcube(self, xlo='min', xhi='max', ylo='min', yhi='max', zlo='min',
zhi='max', rest_value=None):
"""
Extract a sub-cube spatially and spectrally.
When spatial WCS dimensions are given as an `~astropy.units.Quantity`,
the spatial coordinates of the 'lo' and 'hi' corners are solved together.
This minimizes WCS variations due to the sky curvature when slicing from
a large (>1 deg) image.
Parameters
----------
[xyz]lo/[xyz]hi : int or :class:`~astropy.units.Quantity` or ``min``/``max``
The endpoints to extract. If given as a quantity, will be
interpreted as World coordinates. If given as a string or
int, will be interpreted as pixel coordinates.
"""
dims = {'x': 2,
'y': 1,
'z': 0}
limit_dict = {}
limit_dict['zlo'] = 0 if zlo == 'min' else zlo
limit_dict['zhi'] = self.shape[0] if zhi == 'max' else zhi
# Specific warning for slicing a frequency axis with a velocity or
# vice/versa
if ((hasattr(zlo, 'unit') and not
zlo.unit.is_equivalent(self.spectral_axis.unit)) or
(hasattr(zhi, 'unit') and not
zhi.unit.is_equivalent(self.spectral_axis.unit))):
raise u.UnitsError("Spectral units are not equivalent to the "
"spectral slice. Use `.with_spectral_unit` "
"to convert to equivalent units first")
# Solve for the spatial pixel indices together
limit_dict_spat = wcs_utils.find_spatial_pixel_index(self, xlo, xhi, ylo, yhi)
limit_dict.update(limit_dict_spat)
# Handle the z (spectral) axis. This shouldn't change
# much spacially, so solve one at a time
# Track if the z axis values had units. Will need to make a +1 correction below
united = []
for lim in limit_dict:
if 'z' not in lim:
continue
limval = limit_dict[lim]
if hasattr(limval, 'unit'):
united.append(lim)
dim = dims[lim[0]]
sl = [slice(0,1)]*2
sl.insert(dim, slice(None))
sl = tuple(sl)
spine = self.world[sl][dim]
val = np.argmin(np.abs(limval-spine))
if limval > spine.max() or limval < spine.min():
log.warning("The limit {0} is out of bounds."
" Using min/max instead.".format(lim))
limit_dict[lim] = val
# Check spectral axis ordering.
hi,lo = limit_dict['zhi'], limit_dict['zlo']
if hi < lo:
# must have high > low
limit_dict['zhi'], limit_dict['zlo'] = lo, hi
if 'zhi' in united:
# End-inclusive indexing: need to add one for the high slice
# Only do this for converted values, not for pixel values
# (i.e., if the xlo/ylo/zlo value had units)
limit_dict['zhi'] += 1
for xx in 'zyx':
if limit_dict[xx+'hi'] == limit_dict[xx+'lo']:
# I think this should be unreachable now
raise ValueError("The slice in the {0} direction will remove "
"all elements. If you want a single-channel "
"slice, you need a different approach."
.format(xx))
slices = [slice(limit_dict[xx+'lo'], limit_dict[xx+'hi'])
for xx in 'zyx']
slices = tuple(slices)
log.debug('slices: {0}'.format(slices))
return self[slices]
def subcube_from_ds9region(self, ds9_region, allow_empty=False):
"""
Extract a masked subcube from a ds9 region
(only functions on celestial dimensions)
Parameters
----------
ds9_region: str
The DS9 region(s) to extract
allow_empty: bool
If this is False, an exception will be raised if the region
contains no overlap with the cube
"""
import regions
if isinstance(ds9_region, six.string_types):
region_list = regions.DS9Parser(ds9_region).shapes.to_regions()
else:
raise TypeError("{0} should be a DS9 string".format(ds9_region))
return self.subcube_from_regions(region_list, allow_empty)
def subcube_from_crtfregion(self, crtf_region, allow_empty=False):
"""
Extract a masked subcube from a CRTF region.
Parameters
----------
crtf_region: str
The CRTF region(s) string to extract
allow_empty: bool
If this is False, an exception will be raised if the region
contains no overlap with the cube
"""
import regions
if isinstance(crtf_region, six.string_types):
region_list = regions.CRTFParser(crtf_region).shapes.to_regions()
else:
raise TypeError("{0} should be a CRTF string".format(crtf_region))
return self.subcube_from_regions(region_list, allow_empty)
def subcube_from_regions(self, region_list, allow_empty=False):
"""
Extract a masked subcube from a list of ``regions.Region`` object
(only functions on celestial dimensions)
Parameters
----------
region_list: ``regions.Region`` list
The region(s) to extract
allow_empty: bool, optional
If this is False, an exception will be raised if the region
contains no overlap with the cube. Default is False.
"""
import regions
# Convert every region to a `regions.PixelRegion` object.
regs = []
for x in region_list:
if isinstance(x, regions.SkyRegion):
regs.append(x.to_pixel(self.wcs.celestial))
elif isinstance(x, regions.PixelRegion):
regs.append(x)
else:
raise TypeError("'{}' should be `regions.Region` object".format(x))
# List of regions are converted to a `regions.CompoundPixelRegion` object.
compound_region = _regionlist_to_single_region(regs)
# Compound mask of all the regions.
mask = compound_region.to_mask()
# Collecting frequency/velocity range, velocity type and rest frequency
# of each region.
ranges = [x.meta.get('range', None) for x in regs]
veltypes = [x.meta.get('veltype', None) for x in regs]
restfreqs = [x.meta.get('restfreq', None) for x in regs]
xlo, xhi, ylo, yhi = mask.bbox.ixmin, mask.bbox.ixmax, mask.bbox.iymin, mask.bbox.iymax
# Negative indices will do bad things, like wrap around the cube
# If xhi/yhi are negative, there is not overlap
if (xhi < 0) or (yhi < 0):
raise ValueError("Region is outside of cube.")
if xlo < 0:
xlo = 0
if ylo < 0:
ylo = 0
# If None, then the whole spectral range of the cube is selected.
if None in ranges:
subcube = self.subcube(xlo=xlo, ylo=ylo, xhi=xhi, yhi=yhi)
else:
ranges = self._velocity_freq_conversion_regions(ranges, veltypes, restfreqs)
zlo = min([x[0] for x in ranges])
zhi = max([x[1] for x in ranges])
slab = self.spectral_slab(zlo, zhi)
subcube = slab.subcube(xlo=xlo, ylo=ylo, xhi=xhi, yhi=yhi)
if any(dim == 0 for dim in subcube.shape):
if allow_empty:
warnings.warn("The derived subset is empty: the region does not"
" overlap with the cube (but allow_empty=True).")
else:
raise ValueError("The derived subset is empty: the region does not"
" overlap with the cube.")
# cropping the mask from top left corner so that it fits the subcube.
maskarray = mask.data[:subcube.shape[1], :subcube.shape[2]].astype('bool')
masked_subcube = subcube.with_mask(BooleanArrayMask(maskarray, subcube.wcs, shape=subcube.shape))
# by using ceil / floor above, we potentially introduced a NaN buffer
# that we can now crop out
return masked_subcube.minimal_subcube(spatial_only=True)
def _velocity_freq_conversion_regions(self, ranges, veltypes, restfreqs):
"""
Makes the spectral range of the regions compatible with the spectral
convention of the cube.
ranges: `~astropy.units.Quantity` object
List of range(a list of max and min limits on the spectral axis) of
each ``regions.Region`` object.
veltypes: List of `str`
It contains list of velocity convention that each region is following.
The string should be a combination of the following elements:
{'RADIO' | 'OPTICAL' | 'Z' | 'BETA' | 'GAMMA' | 'RELATIVISTIC' | None}
An element can be `None` if veltype of the region is unknown and is
assumed to take that of the cube.
restfreqs: List of `~astropy.units.Quantity`
It contains the rest frequency of each region.
"""
header = self.wcs.to_header()
# Obtaining rest frequency of the cube in GHz.
restfreq_cube = get_rest_value_from_wcs(self.wcs).to("GHz",
equivalencies=u.spectral())
CTYPE3 = header['CTYPE3']
veltype_cube = determine_vconv_from_ctype(CTYPE3)
veltype_equivalencies = dict(RADIO=u.doppler_radio,
OPTICAL=u.doppler_optical,
Z=doppler_z,
BETA=doppler_beta,
GAMMA=doppler_gamma,
RELATIVISTIC=u.doppler_relativistic
)
final_ranges = []
for range, veltype, restfreq in zip(ranges, veltypes, restfreqs):
if restfreq is None:
restfreq = restfreq_cube
restfreq = restfreq.to("GHz", equivalencies=u.spectral())
if veltype not in veltype_equivalencies and veltype is not None:
raise ValueError("Spectral Cube doesn't support {} this type of"
"velocity".format(veltype))
veltype = veltype_equivalencies.get(veltype, veltype_cube)
# Because there is chance that the veltype and rest frequency
# of the region may not be the same as that of cube, we convert it
# to frequency and then convert to the spectral unit of the cube.
freq_range = (u.Quantity(range).to("GHz",
equivalencies=veltype(restfreq)))
final_ranges.append(freq_range.to(header['CUNIT3'],
equivalencies=veltype_cube(restfreq_cube)))
return final_ranges
def _val_to_own_unit(self, value, operation='compare', tofrom='to',
keepunit=False):
"""
Given a value, check if it has a unit. If it does, convert to the
cube's unit. If it doesn't, raise an exception.
"""
if isinstance(value, SpectralCube):
if self.unit.is_equivalent(value.unit):
return value
else:
return value.to(self.unit)
elif hasattr(value, 'unit'):
if keepunit:
return value.to(self.unit)
else:
return value.to(self.unit).value
else:
raise ValueError("Can only {operation} cube objects {tofrom}"
" SpectralCubes or Quantities with "
"a unit attribute."
.format(operation=operation, tofrom=tofrom))
def __gt__(self, value):
"""
Return a LazyMask representing the inequality
Parameters
----------
value : number
The threshold
"""
value = self._val_to_own_unit(value)
return LazyComparisonMask(operator.gt, value, data=self._data, wcs=self._wcs)
def __ge__(self, value):
value = self._val_to_own_unit(value)
return LazyComparisonMask(operator.ge, value, data=self._data, wcs=self._wcs)
def __le__(self, value):
value = self._val_to_own_unit(value)
return LazyComparisonMask(operator.le, value, data=self._data, wcs=self._wcs)
def __lt__(self, value):
value = self._val_to_own_unit(value)
return LazyComparisonMask(operator.lt, value, data=self._data, wcs=self._wcs)
def __eq__(self, value):
value = self._val_to_own_unit(value)
return LazyComparisonMask(operator.eq, value, data=self._data, wcs=self._wcs)
def __hash__(self):
return id(self)
def __ne__(self, value):
value = self._val_to_own_unit(value)
return LazyComparisonMask(operator.ne, value, data=self._data, wcs=self._wcs)
def __add__(self, value):
if isinstance(value, SpectralCube):
return self._cube_on_cube_operation(operator.add, value)
else:
value = self._val_to_own_unit(value, operation='add', tofrom='from',
keepunit=True)
return self._apply_everywhere(operator.add, value)
def __sub__(self, value):
if isinstance(value, SpectralCube):
return self._cube_on_cube_operation(operator.sub, value)
else:
value = self._val_to_own_unit(value, operation='subtract',
tofrom='from', keepunit=True)
return self._apply_everywhere(operator.sub, value)
def __mul__(self, value):
if isinstance(value, SpectralCube):
return self._cube_on_cube_operation(operator.mul, value)
else:
return self._apply_everywhere(operator.mul, value)
def __truediv__(self, value):
return self.__div__(value)
def __div__(self, value):
if isinstance(value, SpectralCube):
return self._cube_on_cube_operation(operator.truediv, value)
else:
return self._apply_everywhere(operator.truediv, value)
def __pow__(self, value):
if isinstance(value, SpectralCube):
return self._cube_on_cube_operation(operator.pow, value)
else:
return self._apply_everywhere(operator.pow, value)
def to_yt(self, spectral_factor=1.0, nprocs=None, **kwargs):
"""
Convert a spectral cube to a yt object that can be further analyzed in
yt.
Parameters
----------
spectral_factor : float, optional
Factor by which to stretch the spectral axis. If set to 1, one pixel
in spectral coordinates is equivalent to one pixel in spatial
coordinates.
If using yt 3.0 or later, additional keyword arguments will be passed
onto yt's ``FITSDataset`` constructor. See the yt documentation
(http://yt-project.org/docs/3.0/examining/loading_data.html?#fits-data)
for details on options for reading FITS data.
"""
import yt
if (('dev' in yt.__version__) or
(LooseVersion(yt.__version__) >= LooseVersion('3.0'))):
# yt has updated their FITS data set so that only the SpectralCube
# variant takes spectral_factor
try:
from yt.frontends.fits.api import SpectralCubeFITSDataset as FITSDataset
except ImportError:
from yt.frontends.fits.api import FITSDataset
from yt.units.unit_object import UnitParseError
data = self._get_filled_data(fill=0.)
if isinstance(data, da.Array):
# Note that >f8 can cause issues with yt, and for visualization
# we don't really need the full 64-bit of floating point
# precision, so we cast to float32.
data = data.astype(np.float32).compute()
hdu = PrimaryHDU(data, header=self.wcs.to_header())
units = str(self.unit.to_string())
hdu.header["BUNIT"] = units
hdu.header["BTYPE"] = "flux"
ds = FITSDataset(hdu, nprocs=nprocs,
spectral_factor=spectral_factor, **kwargs)
# Check to make sure the units are legit
try:
ds.quan(1.0,units)
except UnitParseError:
raise RuntimeError("The unit %s was not parsed by yt. " % units+
"Check to make sure it is correct.")
else:
from yt.mods import load_uniform_grid
data = {'flux': self._get_filled_data(fill=0.).transpose()}
nz, ny, nx = self.shape
if nprocs is None:
nprocs = 1
bbox = np.array([[0.5,float(nx)+0.5],
[0.5,float(ny)+0.5],
[0.5,spectral_factor*float(nz)+0.5]])
ds = load_uniform_grid(data, [nx,ny,nz], 1., bbox=bbox,
nprocs=nprocs, periodicity=(False, False,
False))
return ytCube(self, ds, spectral_factor=spectral_factor)
def to_glue(self, name=None, glue_app=None, dataset=None, start_gui=True):
"""
Send data to a new or existing Glue application
Parameters
----------
name : str or None
The name of the dataset within Glue. If None, defaults to
'SpectralCube'. If a dataset with the given name already exists,
a new dataset with "_" appended will be added instead.
glue_app : GlueApplication or None
A glue application to send the data to. If this is not specified,
a new glue application will be started if one does not already
exist for this cube. Otherwise, the data will be sent to the
existing glue application, `self._glue_app`.
dataset : glue.core.Data or None
An existing Data object to add the cube to. This is a good way
to compare cubes with the same dimensions. Supercedes ``glue_app``
start_gui : bool
Start the GUI when this is run. Set to `False` for testing.
"""
if name is None:
name = 'SpectralCube'
from glue.app.qt import GlueApplication
from glue.core import DataCollection, Data
from glue.core.coordinates import coordinates_from_header
try:
from glue.viewers.image.qt.data_viewer import ImageViewer
except ImportError:
from glue.viewers.image.qt.viewer_widget import ImageWidget as ImageViewer
if dataset is not None:
if name in [d.label for d in dataset.components]:
name = name+"_"
dataset[name] = self
else:
result = Data(label=name)
result.coords = coordinates_from_header(self.header)
result.add_component(self, name)
if glue_app is None:
if hasattr(self,'_glue_app'):
glue_app = self._glue_app
else:
# Start a new glue session. This will quit when done.
# I don't think the return statement is ever reached, based on
# past attempts [@ChrisBeaumont - chime in here if you'd like]
dc = DataCollection([result])
#start Glue
ga = self._glue_app = GlueApplication(dc)
self._glue_viewer = ga.new_data_viewer(ImageViewer,
data=result)
if start_gui:
self._glue_app.start()
return self._glue_app
glue_app.add_datasets(self._glue_app.data_collection, result)
def to_pvextractor(self):
"""
Open the cube in a quick viewer written in matplotlib that allows you
to create PV extractions within the GUI
"""
from pvextractor.gui import PVSlicer
return PVSlicer(self)
def to_ds9(self, ds9id=None, newframe=False):
"""
Send the data to ds9 (this will create a copy in memory)
Parameters
----------
ds9id: None or string
The DS9 session ID. If 'None', a new one will be created.
To find your ds9 session ID, open the ds9 menu option
File:XPA:Information and look for the XPA_METHOD string, e.g.
``XPA_METHOD: 86ab2314:60063``. You would then calll this
function as ``cube.to_ds9('86ab2314:60063')``
newframe: bool
Send the cube to a new frame or to the current frame?
"""
try:
import ds9
except ImportError:
import pyds9 as ds9
if ds9id is None:
dd = ds9.DS9(start=True)
else:
dd = ds9.DS9(target=ds9id, start=False)
if newframe:
dd.set('frame new')
dd.set_pyfits(self.hdulist)
return dd
@property
def header(self):
log.debug("Creating header")
header = super(BaseSpectralCube, self).header
# Preserve the cube's spectral units
# (if CUNIT3 is not in the header, it is whatever that type's default unit is)
if 'CUNIT3' in header and self._spectral_unit != u.Unit(header['CUNIT3']):
header['CDELT3'] *= self._spectral_scale
header['CRVAL3'] *= self._spectral_scale
header['CUNIT3'] = self._spectral_unit.to_string(format='FITS')
return header
@property
def hdu(self):
"""
HDU version of self
"""
log.debug("Creating HDU")
hdu = PrimaryHDU(self.filled_data[:].value, header=self.header)
return hdu
@property
def hdulist(self):
return HDUList(self.hdu)
@warn_slow
def to(self, unit, equivalencies=()):
"""
Return the cube converted to the given unit (assuming it is equivalent).
If conversion was required, this will be a copy, otherwise it will
"""
if not isinstance(unit, u.Unit):
unit = u.Unit(unit)
if unit == self.unit:
# No copying
return self
# Create the tuple of unit conversions needed.
factor = cube_utils.bunit_converters(self, unit, equivalencies=equivalencies)
# special case: array in equivalencies
# (I don't think this should have to be special cased, but I don't know
# how to manipulate broadcasting rules any other way)
if hasattr(factor, '__len__') and len(factor) == len(self):
return self._new_cube_with(data=self._data*factor[:,None,None],
unit=unit)
else:
return self._new_cube_with(data=self._data*factor,
unit=unit)
def find_lines(self, velocity_offset=None, velocity_convention=None,
rest_value=None, **kwargs):
"""
Using astroquery's splatalogue interface, search for lines within the
spectral band. See `astroquery.splatalogue.Splatalogue` for
information on keyword arguments
Parameters
----------
velocity_offset : u.km/u.s equivalent
An offset by which the spectral axis should be shifted before
searching splatalogue. This value will be *added* to the velocity,
so if you want to redshift a spectrum, make this value positive,
and if you want to un-redshift it, make this value negative.
velocity_convention : 'radio', 'optical', 'relativistic'
The doppler convention to pass to `with_spectral_unit`
rest_value : u.GHz equivalent
The rest frequency (or wavelength or energy) to be passed to
`with_spectral_unit`
"""
warnings.warn("The line-finding routine is experimental. Please "
"report bugs on the Issues page: "
"https://github.com/radio-astro-tools/spectral-cube/issues",
ExperimentalImplementationWarning
)
from astroquery.splatalogue import Splatalogue
if velocity_convention in DOPPLER_CONVENTIONS:
velocity_convention = DOPPLER_CONVENTIONS[velocity_convention]
if velocity_offset is not None:
newspecaxis = self.with_spectral_unit(u.km/u.s,
velocity_convention=velocity_convention,
rest_value=rest_value).spectral_axis
spectral_axis = (newspecaxis + velocity_offset).to(u.GHz,
velocity_convention(rest_value))
else:
spectral_axis = self.spectral_axis.to(u.GHz)
numin,numax = spectral_axis.min(), spectral_axis.max()
log.log(19, "Min/max frequency: {0},{1}".format(numin, numax))
result = Splatalogue.query_lines(numin, numax, **kwargs)
return result
@warn_slow
def reproject(self, header, order='bilinear', use_memmap=False,
filled=True):
"""
Spatially reproject the cube into a new header. Fills the data with
the cube's ``fill_value`` to replace bad values before reprojection.
If you want to reproject a cube both spatially and spectrally, you need
to use `spectral_interpolate` as well.
.. warning::
The current implementation of ``reproject`` requires that the whole
cube be loaded into memory. Issue #506 notes that this is a
problem, and it is on our to-do list to fix.
Parameters
----------
header : `astropy.io.fits.Header`
A header specifying a cube in valid WCS
order : int or str, optional
The order of the interpolation (if ``mode`` is set to
``'interpolation'``). This can be either one of the following
strings:
* 'nearest-neighbor'
* 'bilinear'
* 'biquadratic'
* 'bicubic'
or an integer. A value of ``0`` indicates nearest neighbor
interpolation.
use_memmap : bool
If specified, a memory mapped temporary file on disk will be
written to rather than storing the intermediate spectra in memory.
filled : bool
Fill the masked values with the cube's fill value before
reprojection? Note that setting ``filled=False`` will use the raw
data array, which can be a workaround that prevents loading large
data into memory.
"""
try:
from reproject.version import version
except ImportError:
raise ImportError("Requires the reproject package to be"
" installed.")
# Need version > 0.2 to work with cubes, >= 0.5 for memmap
from distutils.version import LooseVersion
if LooseVersion(version) < "0.5":
raise Warning("Requires version >=0.5 of reproject. The current "
"version is: {}".format(version))
elif LooseVersion(version) >= "0.6":
reproj_kwargs = {}
else:
reproj_kwargs = {'independent_celestial_slices': True}
from reproject import reproject_interp
# TODO: Find the minimal subcube that contains the header and only reproject that
# (see FITS_tools.regrid_cube for a guide on how to do this)
newwcs = wcs.WCS(header)
shape_out = tuple([header['NAXIS{0}'.format(i + 1)] for i in
range(header['NAXIS'])][::-1])
if filled:
data = self.unitless_filled_data[:]
else:
data = self._data
if use_memmap:
if data.dtype.itemsize not in (4,8):
raise ValueError("Data must be float32 or float64 to be "
"reprojected. Other data types need some "
"kind of additional memory handling.")
# note: requires reproject from December 2018 or later
outarray = np.memmap(filename='output.np', mode='w+',
shape=tuple(shape_out),
dtype='float64' if data.dtype.itemsize == 8 else 'float32')
else:
outarray = None
newcube, newcube_valid = reproject_interp((data,
self.header),
newwcs,
output_array=outarray,
shape_out=shape_out,
order=order,
**reproj_kwargs)
return self._new_cube_with(data=newcube,
wcs=newwcs,
mask=BooleanArrayMask(newcube_valid.astype('bool'),
newwcs),
meta=self.meta,
)
@parallel_docstring
def spatial_smooth_median(self, ksize, update_function=None, **kwargs):
"""
Smooth the image in each spatial-spatial plane of the cube using a median filter.
Parameters
----------
ksize : int
Size of the median filter (scipy.ndimage.filters.median_filter)
update_function : method
Method that is called to update an external progressbar
If provided, it disables the default `astropy.utils.console.ProgressBar`
kwargs : dict
Passed to the convolve function
"""
if not scipyOK:
raise ImportError("Scipy could not be imported: this function won't work.")
def _msmooth_image(im, **kwargs):
return ndimage.filters.median_filter(im, size=ksize, **kwargs)
newcube = self.apply_function_parallel_spatial(_msmooth_image,
**kwargs)
return newcube
@parallel_docstring
def spatial_smooth(self, kernel,
convolve=convolution.convolve,
**kwargs):
"""
Smooth the image in each spatial-spatial plane of the cube.
Parameters
----------
kernel : `~astropy.convolution.Kernel2D`
A 2D kernel from astropy
convolve : function
The astropy convolution function to use, either
`astropy.convolution.convolve` or
`astropy.convolution.convolve_fft`
kwargs : dict
Passed to the convolve function
"""
def _gsmooth_image(img, **kwargs):
"""
Helper function to smooth an image
"""
return convolve(img, kernel, normalize_kernel=True, **kwargs)
newcube = self.apply_function_parallel_spatial(_gsmooth_image,
**kwargs)
return newcube
@parallel_docstring
def spectral_smooth_median(self, ksize,
use_memmap=True,
verbose=0,
num_cores=None,
**kwargs):
"""
Smooth the cube along the spectral dimension
Parameters
----------
ksize : int
Size of the median filter (scipy.ndimage.filters.median_filter)
verbose : int
Verbosity level to pass to joblib
kwargs : dict
Not used at the moment.
"""
if not scipyOK:
raise ImportError("Scipy could not be imported: this function won't work.")
return self.apply_function_parallel_spectral(ndimage.filters.median_filter,
size=ksize,
verbose=verbose,
num_cores=num_cores,
use_memmap=use_memmap,
**kwargs)
def _apply_function_parallel_base(self,
iteration_data,
function,
applicator,
num_cores=None,
verbose=0,
use_memmap=True,
parallel=False,
memmap_dir=None,
update_function=None,
**kwargs
):
"""
Apply a function in parallel using the ``applicator`` function. The
function will be performed on data with masked values replaced with the
cube's fill value.
Parameters
----------
iteration_data : generator
The data to be iterated over in the format expected by ``applicator``
function : function
The function to apply in the spectral dimension. It must take
two arguments: an array representing a spectrum and a boolean array
representing the mask. It may also accept ``**kwargs``. The
function must return an object with the same shape as the input
spectrum.
applicator : function
Either ``_apply_spatial_function`` or ``_apply_spectral_function``,
a tool to handle the iteration data and send it to the ``function``
appropriately.
num_cores : int or None
The number of cores to use if running in parallel. Should be >1 if
``parallel==True`` and cannot be >1 if ``parallel==False``
verbose : int
Verbosity level to pass to joblib
use_memmap : bool
If specified, a memory mapped temporary file on disk will be
written to rather than storing the intermediate spectra in memory.
parallel : bool
If set to ``False``, will force the use of a single thread instead
of using ``joblib``.
update_function : function
A callback function to call on each iteration of the application.
It should not accept any arguments. For example, this can be
``Progressbar.update`` or some function that prints a status
report. The function *must* be picklable if ``parallel==True``.
kwargs : dict
Passed to ``function``
"""
if use_memmap:
ntf = tempfile.NamedTemporaryFile(dir=memmap_dir)
outcube = np.memmap(ntf, mode='w+', shape=self.shape, dtype=np.float)
else:
if self._is_huge and not self.allow_huge_operations:
raise ValueError("Applying a function without ``use_memmap`` "
"requires loading the whole array into "
"memory *twice*, which can overload the "
"machine's memory for large cubes. Either "
"set ``use_memmap=True`` or set "
"``cube.allow_huge_operations=True`` to "
"override this restriction.")
outcube = np.empty(shape=self.shape, dtype=np.float)
if num_cores == 1 and parallel:
warnings.warn("parallel=True was specified but num_cores=1. "
"Joblib will be used to run the task with a "
"single thread.")
elif num_cores is not None and num_cores > 1 and not parallel:
raise ValueError("parallel execution was not requested, but "
"multiple cores were: these are incompatible "
"options. Either specify num_cores=1 or "
"parallel=True")
if parallel and use_memmap:
# it is not possible to run joblib parallelization without memmap
try:
import joblib
from joblib._parallel_backends import MultiprocessingBackend
from joblib import register_parallel_backend, parallel_backend
from joblib import Parallel, delayed
if update_function is not None:
# https://stackoverflow.com/questions/38483874/intermediate-results-from-joblib
class MultiCallback:
def __init__(self, *callbacks):
self.callbacks = [cb for cb in callbacks if cb]
def __call__(self, out):
for cb in self.callbacks:
cb(out)
class Callback_Backend(MultiprocessingBackend):
def callback(self, result):
update_function()
# Overload apply_async and set callback=self.callback
def apply_async(self, func, callback=None):
cbs = MultiCallback(callback, self.callback)
return super().apply_async(func, cbs)
joblib.register_parallel_backend('custom',
Callback_Backend,
make_default=True)
Parallel(n_jobs=num_cores,
verbose=verbose,
max_nbytes=None)(delayed(applicator)(arg, outcube,
function,
**kwargs)
for arg in iteration_data)
except ImportError:
if num_cores is not None and num_cores > 1:
warnings.warn("Could not import joblib. Will run in serial.",
warnings.ImportWarning)
parallel = False
# this isn't an else statement because we want to catch the case where
# the above clause fails on ImportError
if not parallel or not use_memmap:
if update_function is not None:
pbu = update_function
elif verbose > 0:
progressbar = ProgressBar(self.shape[1]*self.shape[2])
pbu = progressbar.update
else:
pbu = object
for arg in iteration_data:
applicator(arg, outcube, function, **kwargs)
pbu()
# TODO: do something about the mask?
newcube = self._new_cube_with(data=outcube, wcs=self.wcs,
mask=self.mask, meta=self.meta,
fill_value=self.fill_value)
return newcube
def apply_function_parallel_spatial(self,
function,
num_cores=None,
verbose=0,
use_memmap=True,
parallel=True,
**kwargs
):
"""
Apply a function in parallel along the spatial dimension. The
function will be performed on data with masked values replaced with the
cube's fill value.
Parameters
----------
function : function
The function to apply in the spatial dimension. It must take
two arguments: an array representing an image and a boolean array
representing the mask. It may also accept ``**kwargs``. The
function must return an object with the same shape as the input
spectrum.
num_cores : int or None
The number of cores to use if running in parallel
verbose : int
Verbosity level to pass to joblib
use_memmap : bool
If specified, a memory mapped temporary file on disk will be
written to rather than storing the intermediate spectra in memory.
parallel : bool
If set to ``False``, will force the use of a single core without
using ``joblib``.
kwargs : dict
Passed to ``function``
"""
shape = self.shape
data = self.unitless_filled_data
# 'images' is a generator
# the boolean check will skip the function for bad spectra
images = ((data[ii,:,:],
self.mask.include(view=(ii, slice(None), slice(None))),
ii,
)
for ii in range(shape[0]))
return self._apply_function_parallel_base(images, function,
applicator=_apply_spatial_function,
verbose=verbose,
parallel=parallel,
num_cores=num_cores,
use_memmap=use_memmap,
**kwargs)
def apply_function_parallel_spectral(self,
function,
num_cores=None,
verbose=0,
use_memmap=True,
parallel=True,
**kwargs
):
"""
Apply a function in parallel along the spectral dimension. The
function will be performed on data with masked values replaced with the
cube's fill value.
Parameters
----------
function : function
The function to apply in the spectral dimension. It must take
two arguments: an array representing a spectrum and a boolean array
representing the mask. It may also accept ``**kwargs``. The
function must return an object with the same shape as the input
spectrum.
num_cores : int or None
The number of cores to use if running in parallel
verbose : int
Verbosity level to pass to joblib
use_memmap : bool
If specified, a memory mapped temporary file on disk will be
written to rather than storing the intermediate spectra in memory.
parallel : bool
If set to ``False``, will force the use of a single core without
using ``joblib``.
kwargs : dict
Passed to ``function``
"""
shape = self.shape
data = self.unitless_filled_data
# 'spectra' is a generator
# the boolean check will skip the function for bad spectra
# TODO: should spatial good/bad be cached?
spectra = ((data[:,jj,ii],
self.mask.include(view=(slice(None), jj, ii)),
ii, jj,
)
for jj in range(shape[1])
for ii in range(shape[2]))
return self._apply_function_parallel_base(iteration_data=spectra,
function=function,
applicator=_apply_spectral_function,
use_memmap=use_memmap,
parallel=parallel,
verbose=verbose,
num_cores=num_cores,
**kwargs
)
@parallel_docstring
def sigma_clip_spectrally(self, threshold, verbose=0, use_memmap=True,
num_cores=None, **kwargs):
"""
Run astropy's sigma clipper along the spectral axis, converting all bad
(excluded) values to NaN.
Parameters
----------
threshold : float
The ``sigma`` parameter in `astropy.stats.sigma_clip`, which refers
to the number of sigma above which to cut.
verbose : int
Verbosity level to pass to joblib
"""
return self.apply_function_parallel_spectral(stats.sigma_clip,
sigma=threshold,
axis=0, # changes behavior of sigmaclip
num_cores=num_cores,
use_memmap=use_memmap,
verbose=verbose,
**kwargs)
@parallel_docstring
def spectral_smooth(self, kernel,
convolve=convolution.convolve,
verbose=0,
use_memmap=True,
num_cores=None,
**kwargs):
"""
Smooth the cube along the spectral dimension
Note that the mask is left unchanged in this operation.
Parameters
----------
kernel : `~astropy.convolution.Kernel1D`
A 1D kernel from astropy
convolve : function
The astropy convolution function to use, either
`astropy.convolution.convolve` or
`astropy.convolution.convolve_fft`
verbose : int
Verbosity level to pass to joblib
kwargs : dict
Passed to the convolve function
"""
if isinstance(kernel.array, u.Quantity):
raise u.UnitsError("The convolution kernel should be defined "
"without a unit.")
return self.apply_function_parallel_spectral(convolve,
kernel=kernel,
normalize_kernel=True,
num_cores=num_cores,
use_memmap=use_memmap,
verbose=verbose,
**kwargs)
def spectral_interpolate(self, spectral_grid,
suppress_smooth_warning=False,
fill_value=None,
update_function=None):
"""Resample the cube spectrally onto a specific grid
Parameters
----------
spectral_grid : array
An array of the spectral positions to regrid onto
suppress_smooth_warning : bool
If disabled, a warning will be raised when interpolating onto a
grid that does not nyquist sample the existing grid. Disable this
if you have already appropriately smoothed the data.
fill_value : float
Value for extrapolated spectral values that lie outside of
the spectral range defined in the original data. The
default is to use the nearest spectral channel in the
cube.
update_function : method
Method that is called to update an external progressbar
If provided, it disables the default `astropy.utils.console.ProgressBar`
Returns
-------
cube : SpectralCube
"""
inaxis = self.spectral_axis.to(spectral_grid.unit)
indiff = np.mean(np.diff(inaxis))
outdiff = np.mean(np.diff(spectral_grid))
# account for reversed axes
if outdiff < 0:
spectral_grid = spectral_grid[::-1]
outdiff = np.mean(np.diff(spectral_grid))
outslice = slice(None, None, -1)
else:
outslice = slice(None, None, 1)
cubedata = self.filled_data
specslice = slice(None) if indiff >= 0 else slice(None, None, -1)
inaxis = inaxis[specslice]
indiff = np.mean(np.diff(inaxis))
# insanity checks
if indiff < 0 or outdiff < 0:
raise ValueError("impossible.")
assert np.all(np.diff(spectral_grid) > 0)
assert np.all(np.diff(inaxis) > 0)
np.testing.assert_allclose(np.diff(spectral_grid), outdiff,
err_msg="Output grid must be linear")
if outdiff > 2 * indiff and not suppress_smooth_warning:
warnings.warn("Input grid has too small a spacing. The data should "
"be smoothed prior to resampling.",
SmoothingWarning
)
newcube = np.empty([spectral_grid.size, self.shape[1], self.shape[2]],
dtype=cubedata[:1, 0, 0].dtype)
newmask = np.empty([spectral_grid.size, self.shape[1], self.shape[2]],
dtype='bool')
yy,xx = np.indices(self.shape[1:])
if update_function is None:
pb = ProgressBar(xx.size)
update_function = pb.update
for ix, iy in (zip(xx.flat, yy.flat)):
mask = self.mask.include(view=(specslice, iy, ix))
if any(mask):
newcube[outslice,iy,ix] = \
np.interp(spectral_grid.value, inaxis.value,
cubedata[specslice,iy,ix].value,
left=fill_value, right=fill_value)
if all(mask):
newmask[:,iy,ix] = True
else:
interped = np.interp(spectral_grid.value,
inaxis.value, mask) > 0
newmask[outslice,iy,ix] = interped
else:
newmask[:, iy, ix] = False
newcube[:, iy, ix] = np.NaN
update_function()
newwcs = self.wcs.deepcopy()
newwcs.wcs.crpix[2] = 1
newwcs.wcs.crval[2] = spectral_grid[0].value if outslice.step > 0 \
else spectral_grid[-1].value
newwcs.wcs.cunit[2] = spectral_grid.unit.to_string('FITS')
newwcs.wcs.cdelt[2] = outdiff.value if outslice.step > 0 \
else -outdiff.value
newwcs.wcs.set()
newbmask = BooleanArrayMask(newmask, wcs=newwcs)
newcube = self._new_cube_with(data=newcube, wcs=newwcs, mask=newbmask,
meta=self.meta,
fill_value=self.fill_value)
return newcube
@warn_slow
def convolve_to(self, beam, convolve=convolution.convolve_fft, update_function=None, **kwargs):
"""
Convolve each channel in the cube to a specified beam
.. warning::
The current implementation of ``convolve_to`` creates an in-memory
copy of the whole cube to store the convolved data. Issue #506
notes that this is a problem, and it is on our to-do list to fix.
Parameters
----------
beam : `radio_beam.Beam`
The beam to convolve to
convolve : function
The astropy convolution function to use, either
`astropy.convolution.convolve` or
`astropy.convolution.convolve_fft`
update_function : method
Method that is called to update an external progressbar
If provided, it disables the default `astropy.utils.console.ProgressBar`
kwargs : dict
Keyword arguments to pass to the convolution function
Returns
-------
cube : `SpectralCube`
A SpectralCube with a single ``beam``
"""
# Check if the beams are the same.
if beam == self.beam:
warnings.warn("The given beam is identical to the current beam. "
"Skipping convolution.")
return self
pixscale = wcs.utils.proj_plane_pixel_area(self.wcs.celestial)**0.5*u.deg
convolution_kernel = beam.deconvolve(self.beam).as_kernel(pixscale)
# See #631: kwargs get passed within self.apply_function_parallel_spatial
def convfunc(img, **kwargs):
return convolve(img, convolution_kernel, normalize_kernel=True,
**kwargs)
newcube = self.apply_function_parallel_spatial(convfunc,
**kwargs).with_beam(beam)
return newcube
def mask_channels(self, goodchannels):
"""
Helper function to mask out channels. This function is equivalent to
adding a mask with ``cube[view]`` where ``view`` is broadcastable to
the cube shape, but it accepts 1D arrays that are not normally
broadcastable.
Parameters
----------
goodchannels : array
A 1D boolean array declaring which channels should be kept.
Returns
-------
cube : `SpectralCube`
A cube with the specified channels masked
"""
goodchannels = np.asarray(goodchannels, dtype='bool')
if goodchannels.ndim != 1:
raise ValueError("goodchannels mask must be one-dimensional")
if goodchannels.size != self.shape[0]:
raise ValueError("goodchannels must have a length equal to the "
"cube's spectral dimension.")
return self.with_mask(goodchannels[:,None,None])
@warn_slow
def downsample_axis(self, factor, axis, estimator=np.nanmean,
truncate=False, use_memmap=True, progressbar=True):
"""
Downsample the cube by averaging over *factor* pixels along an axis.
Crops right side if the shape is not a multiple of factor.
The WCS will be 'downsampled' by the specified factor as well.
If the downsample factor is odd, there will be an offset in the WCS.
There is both an in-memory and a memory-mapped implementation; the
default is to use the memory-mapped version. Technically, the 'large
data' warning doesn't apply when using the memory-mapped version, but
the warning is still there anyway.
Parameters
----------
myarr : `~numpy.ndarray`
The array to downsample
factor : int
The factor to downsample by
axis : int
The axis to downsample along
estimator : function
defaults to mean. You can downsample by summing or
something else if you want a different estimator
(e.g., downsampling error: you want to sum & divide by sqrt(n))
truncate : bool
Whether to truncate the last chunk or average over a smaller number.
e.g., if you downsample [1,2,3,4] by a factor of 3, you could get either
[2] or [2,4] if truncate is True or False, respectively.
use_memmap : bool
Use a memory map on disk to avoid loading the whole cube into memory
(several times)? If set, the warning about large cubes can be ignored
(though you still have to override the warning)
progressbar : bool
Include a progress bar? Only works with ``use_memmap=True``
"""
def makeslice(startpoint,axis=axis,step=factor):
# make empty slices
view = [slice(None) for ii in range(self.ndim)]
# then fill the appropriate slice
view[axis] = slice(startpoint,None,step)
return tuple(view)
# size of the dimension of interest
xs = self.shape[axis]
if not use_memmap:
if xs % int(factor) != 0:
if truncate:
view = [slice(None) for ii in range(self.ndim)]
view[axis] = slice(None,xs-(xs % int(factor)))
view = tuple(view)
crarr = self.unitless_filled_data[view]
mask = self.mask[view].include()
else:
extension_shape = list(self.shape)
extension_shape[axis] = (factor - xs % int(factor))
extension = np.empty(extension_shape) * np.nan
crarr = np.concatenate((self.unitless_filled_data[:],
extension), axis=axis)
extension[:] = 0
mask = np.concatenate((self.mask.include(), extension), axis=axis)
else:
crarr = self.unitless_filled_data[:]
mask = self.mask.include()
# The extra braces here are crucial: We're adding an extra dimension so we
# can average across it
stacked_array = np.concatenate([[crarr[makeslice(ii)]]
for ii in range(factor)])
dsarr = estimator(stacked_array, axis=0)
if not isinstance(mask, np.ndarray):
raise TypeError("Mask is of wrong data type")
stacked_mask = np.concatenate([[mask[makeslice(ii)]] for ii in
range(factor)])
mask = np.any(stacked_mask, axis=0)
else:
def makeslice_local(startpoint, axis=axis, nsteps=factor):
# make empty slices
view = [slice(None) for ii in range(self.ndim)]
# then fill the appropriate slice
view[axis] = slice(startpoint,startpoint+nsteps,1)
return tuple(view)
newshape = list(self.shape)
newshape[axis] = (newshape[axis]//factor +
((1-int(truncate)) * (xs % int(factor) != 0)))
newshape = tuple(newshape)
if progressbar:
progressbar = ProgressBar
else:
progressbar = lambda x: x
# Create a view that will add a blank newaxis at the right spot
view_newaxis = [slice(None) for ii in range(self.ndim)]
view_newaxis[axis] = None
view_newaxis = tuple(view_newaxis)
ntf = tempfile.NamedTemporaryFile()
dsarr = np.memmap(ntf, mode='w+', shape=newshape, dtype=np.float)
ntf2 = tempfile.NamedTemporaryFile()
mask = np.memmap(ntf2, mode='w+', shape=newshape, dtype=np.bool)
for ii in progressbar(range(newshape[axis])):
view_fulldata = makeslice_local(ii*factor)
view_newdata = makeslice_local(ii, nsteps=1)
to_average = self.unitless_filled_data[view_fulldata]
to_anyfy = self.mask[view_fulldata].include()
dsarr[view_newdata] = estimator(to_average, axis)[view_newaxis]
mask[view_newdata] = np.any(to_anyfy, axis).astype('bool')[view_newaxis]
# the slice should just start at zero; we had factor//2 here earlier,
# and that was an error that probably half-compensated for an error in
# wcs_utils
view = makeslice(0)
newwcs = wcs_utils.slice_wcs(self.wcs, view, shape=self.shape)
newwcs._naxis = list(self.shape)
# this is an assertion to ensure that the WCS produced is valid
# (this is basically a regression test for #442)
assert newwcs[:, slice(None), slice(None)]
assert len(newwcs._naxis) == 3
return self._new_cube_with(data=dsarr, wcs=newwcs,
mask=BooleanArrayMask(mask, wcs=newwcs))
def plot_channel_maps(self, nx, ny, channels, contourkwargs={}, output_file=None,
fig=None, fig_smallest_dim_inches=8, decimals=3, zoom=1,
textcolor=None, cmap='gray_r', tighten=False,
textxloc=0.5, textyloc=0.9,
savefig_kwargs={}, **kwargs):
"""
Make channel maps from a spectral cube
Parameters
----------
input_file : str
Name of the input spectral cube
nx, ny : int
Number of sub-plots in the x and y direction
channels : list
List of channels to show
cmap : str
The name of a colormap to use for the ``imshow`` colors
contourkwargs : dict
Keyword arguments passed to ``contour``
textcolor : None or str
Color of the label text to overlay. If ``None``, will be
determined automatically. If ``'notext'``, no text will be added.
textxloc : float
textyloc : float
Text label X,Y-location in axis fraction units
output_file : str
Name of the matplotlib plot
fig : matplotlib figure
The figure object to plot onto. Will be overridden to enforce a
specific aspect ratio.
fig_smallest_dim_inches : float
The size of the smallest dimension (either width or height) of the
figure in inches. The other dimension will be selected based on
the aspect ratio of the data: it cannot be a free parameter.
decimals : int, optional
Number of decimal places to show in spectral value
zoom : int, optional
How much to zoom in. In future versions of this function, the
pointing center will be customizable.
tighten : bool
Call ``plt.tight_layout()`` after plotting?
savefig_kwargs : dict
Keyword arguments to pass to ``savefig`` (e.g.,
``bbox_inches='tight'``)
kwargs : dict
Passed to ``imshow``
"""
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
cmap = getattr(plt.cm, cmap)
if len(channels) != nx * ny:
raise ValueError("Number of channels should be equal to nx * ny")
# Read in spectral cube and get spectral axis
spectral_axis = self.spectral_axis
sizey, sizex = self.shape[1:]
cenx = sizex / 2.
ceny = sizey / 2.
aspect_ratio = self.shape[2]/float(self.shape[1])
gridratio = ny / float(nx) * aspect_ratio
if gridratio > 1:
ysize = fig_smallest_dim_inches*gridratio
xsize = fig_smallest_dim_inches
else:
xsize = fig_smallest_dim_inches*gridratio
ysize = fig_smallest_dim_inches
if fig is None:
fig = plt.figure(figsize=(xsize, ysize))
else:
fig.set_figheight(ysize)
fig.set_figwidth(xsize)
# unclear if needed
#fig.subplots_adjust(margin,margin,1.-margin,1.-margin,0.,0.)
axis_list = []
gs = GridSpec(ny, nx, figure=fig, hspace=0, wspace=0)
for ichannel, channel in enumerate(channels):
slc = self[channel,:,:]
ax = plt.subplot(gs[ichannel], projection=slc.wcs)
im = ax.imshow(slc.value, origin='lower', cmap=cmap, **kwargs)
if contourkwargs:
ax.contour(slc.value, **contourkwargs)
ax.set_xlim(cenx - cenx / zoom, cenx + cenx / zoom)
ax.set_ylim(ceny - ceny / zoom, ceny + ceny / zoom)
if textcolor != 'notext':
if textcolor is None:
# determine average image color and set textcolor to opposite
# (this is a bit hacky and there is _definitely_ a better way
# to do this)
avgcolor = im.cmap(im.norm(im.get_array())).mean(axis=(0,1))
totalcolor = avgcolor[:3].sum()
if totalcolor > 0.5:
textcolor = 'w'
else:
textcolor = 'k'
ax.tick_params(color=textcolor)
ax.set_title(("{0:." + str(decimals) + "f}").format(spectral_axis[channel]),
x=textxloc, y=textyloc, color=textcolor)
# only label bottom-left panel with locations
if (ichannel != nx*(ny-1)):
ax.coords[0].set_ticklabel_position('')
ax.coords[1].set_ticklabel_position('')
ax.tick_params(direction='in')
axis_list.append(ax)
if tighten:
plt.tight_layout()
if output_file is not None:
fig.savefig(output_file, **savefig_kwargs)
return axis_list
class SpectralCube(BaseSpectralCube, BeamMixinClass):
__name__ = "SpectralCube"
_oned_spectrum = OneDSpectrum
def __new__(cls, *args, **kwargs):
if kwargs.pop('use_dask', False):
from .dask_spectral_cube import DaskSpectralCube
return super().__new__(DaskSpectralCube)
else:
return super().__new__(cls)
def __init__(self, data, wcs, mask=None, meta=None, fill_value=np.nan,
header=None, allow_huge_operations=False, beam=None,
wcs_tolerance=0.0, use_dask=False, **kwargs):
super(SpectralCube, self).__init__(data=data, wcs=wcs, mask=mask,
meta=meta, fill_value=fill_value,
header=header,
allow_huge_operations=allow_huge_operations,
wcs_tolerance=wcs_tolerance,
**kwargs)
# Beam loading must happen *after* WCS is read
if beam is None:
beam = cube_utils.try_load_beam(self.header)
else:
if not isinstance(beam, Beam):
raise TypeError("beam must be a radio_beam.Beam object.")
# Allow setting the beam attribute even if there is no beam defined
# Accessing `SpectralCube.beam` without a beam defined raises a
# `NoBeamError` with an informative message.
self.beam = beam
if beam is not None:
self._meta['beam'] = beam
self._header.update(beam.to_header_keywords())
def _new_cube_with(self, **kwargs):
beam = kwargs.pop('beam', None)
if 'beam' in self._meta and beam is None:
beam = self._beam
newcube = super(SpectralCube, self)._new_cube_with(beam=beam, **kwargs)
return newcube
_new_cube_with.__doc__ = BaseSpectralCube._new_cube_with.__doc__
def with_beam(self, beam):
'''
Attach a beam object to the `~SpectralCube`.
Parameters
----------
beam : `~radio_beam.Beam`
`Beam` object defining the resolution element of the
`~SpectralCube`.
'''
if not isinstance(beam, Beam):
raise TypeError("beam must be a radio_beam.Beam object.")
meta = self._meta.copy()
meta['beam'] = beam
header = self._header.copy()
header.update(beam.to_header_keywords())
newcube = self._new_cube_with(meta=self.meta, beam=beam)
return newcube
class VaryingResolutionSpectralCube(BaseSpectralCube, MultiBeamMixinClass):
"""
A variant of the SpectralCube class that has PSF (beam) information on a
per-channel basis.
"""
__name__ = "VaryingResolutionSpectralCube"
_oned_spectrum = VaryingResolutionOneDSpectrum
def __new__(cls, *args, **kwargs):
if kwargs.pop('use_dask', False):
from .dask_spectral_cube import DaskVaryingResolutionSpectralCube
return super().__new__(DaskVaryingResolutionSpectralCube)
else:
return super().__new__(cls)
def __init__(self, *args, **kwargs):
"""
Create a SpectralCube with an associated beam table. The new
VaryingResolutionSpectralCube will have a ``beams`` attribute and a
``beam_threshold`` attribute as described below. It will perform some
additional checks when trying to perform analysis across image frames.
Three new keyword arguments are accepted:
Other Parameters
----------------
beam_table : `numpy.recarray`
A table of beam major and minor axes in arcseconds and position
angles, with labels BMAJ, BMIN, BPA
beams : list
A list of `radio_beam.Beam` objects
beam_threshold : float or dict
The fractional threshold above which beams are considered
different. A dictionary may be used with entries 'area', 'major',
'minor', 'pa' so that you can specify a different fractional
threshold for each of these. For example, if you want to check
only that the areas are the same, and not worry about the shape
(which might be a bad idea...), you could set
``beam_threshold={'area':0.01, 'major':1.5, 'minor':1.5,
'pa':5.0}``
"""
# these types of cube are undefined without the radio_beam package
beam_table = kwargs.pop('beam_table', None)
beams = kwargs.pop('beams', None)
beam_threshold = kwargs.pop('beam_threshold', 0.01)
if (beam_table is None and beams is None):
raise ValueError(
"Must give either a beam table or a list of beams to "
"initialize a VaryingResolutionSpectralCube")
super(VaryingResolutionSpectralCube, self).__init__(*args, **kwargs)
if isinstance(beam_table, BinTableHDU):
beam_data_table = beam_table.data
else:
beam_data_table = beam_table
if beam_table is not None:
# CASA beam tables are in arcsec, and that's what we support
beams = Beams(major=u.Quantity(beam_data_table['BMAJ'], u.arcsec),
minor=u.Quantity(beam_data_table['BMIN'], u.arcsec),
pa=u.Quantity(beam_data_table['BPA'], u.deg),
meta=[{key: row[key] for key in beam_data_table.names
if key not in ('BMAJ','BPA', 'BMIN')}
for row in beam_data_table],
)
goodbeams = beams.isfinite
# track which, if any, beams are masked for later use
self.goodbeams_mask = goodbeams
if not all(goodbeams):
warnings.warn("There were {0} non-finite beams; layers with "
"non-finite beams will be masked out.".format(
np.count_nonzero(np.logical_not(goodbeams))),
NonFiniteBeamsWarning
)
beam_mask = BooleanArrayMask(goodbeams[:,None,None],
wcs=self._wcs,
shape=self.shape,
)
if not is_broadcastable_and_smaller(beam_mask.shape,
self._data.shape):
# this should never be allowed to happen
raise ValueError("Beam mask shape is not broadcastable to data shape: "
"%s vs %s" % (beam_mask.shape, self._data.shape))
assert beam_mask.shape == self.shape
new_mask = np.bitwise_and(self._mask, beam_mask)
new_mask._validate_wcs(new_data=self._data, new_wcs=self._wcs)
self._mask = new_mask
if (len(beams) != self.shape[0]):
raise ValueError("Beam list must have same size as spectral "
"dimension")
self.beams = beams
self.beam_threshold = beam_threshold
def __getitem__(self, view):
# Need to allow self[:], self[:,:]
if isinstance(view, (slice,int,np.int64)):
view = (view, slice(None), slice(None))
elif len(view) == 2:
view = view + (slice(None),)
elif len(view) > 3:
raise IndexError("Too many indices")
meta = {}
meta.update(self._meta)
slice_data = [(s.start, s.stop, s.step)
if hasattr(s,'start') else s
for s in view]
if 'slice' in meta:
meta['slice'].append(slice_data)
else:
meta['slice'] = [slice_data]
# intslices identifies the slices that are given by integers, i.e.
# indices. Other slices are slice objects, e.g. obj[5:10], and have
# 'start' attributes.
intslices = [2-ii for ii,s in enumerate(view) if not hasattr(s,'start')]
# for beams, we care only about the first slice, independent of its
# type
specslice = view[0]
if intslices:
if len(intslices) > 1:
if 2 in intslices:
raise NotImplementedError("1D slices along non-spectral "
"axes are not yet implemented.")
newwcs = self._wcs.sub([a
for a in (1,2,3)
if a not in [x+1 for x in intslices]])
if cube_utils._has_beam(self):
bmarg = {'beam': self.beam}
elif cube_utils._has_beams(self):
bmarg = {'beams': self.unmasked_beams[specslice]}
else:
bmarg = {}
return self._oned_spectrum(value=self._data[view],
wcs=newwcs,
copy=False,
unit=self.unit,
spectral_unit=self._spectral_unit,
mask=self.mask[view],
meta=meta,
goodbeams_mask=self.goodbeams_mask[specslice]
if hasattr(self, '_goodbeams_mask')
else None,
**bmarg
)
# only one element, so drop an axis
newwcs = wcs_utils.drop_axis(self._wcs, intslices[0])
header = self._nowcs_header
# Slice objects know how to parse Beam objects stored in the
# metadata
# A 2D slice with a VRSC should not be allowed along a
# position-spectral axis
if not isinstance(self.unmasked_beams[specslice], Beam):
raise AttributeError("2D slices along a spectral axis are not "
"allowed for "
"VaryingResolutionSpectralCubes. Convolve"
" to a common resolution with "
"`convolve_to` before attempting "
"position-spectral slicing.")
meta['beam'] = self.unmasked_beams[specslice]
return Slice(value=self.filled_data[view],
wcs=newwcs,
copy=False,
unit=self.unit,
header=header,
meta=meta)
newmask = self._mask[view] if self._mask is not None else None
newwcs = wcs_utils.slice_wcs(self._wcs, view, shape=self.shape)
newwcs._naxis = list(self.shape)
# this is an assertion to ensure that the WCS produced is valid
# (this is basically a regression test for #442)
assert newwcs[:, slice(None), slice(None)]
assert len(newwcs._naxis) == 3
return self._new_cube_with(data=self._data[view],
wcs=newwcs,
mask=newmask,
beams=self.unmasked_beams[specslice],
meta=meta)
def spectral_slab(self, lo, hi):
"""
Extract a new cube between two spectral coordinates
Parameters
----------
lo, hi : :class:`~astropy.units.Quantity`
The lower and upper spectral coordinate for the slab range. The
units should be compatible with the units of the spectral axis.
If the spectral axis is in frequency-equivalent units and you
want to select a range in velocity, or vice-versa, you should
first use :meth:`~spectral_cube.SpectralCube.with_spectral_unit`
to convert the units of the spectral axis.
"""
# Find range of values for spectral axis
ilo = self.closest_spectral_channel(lo)
ihi = self.closest_spectral_channel(hi)
if ilo > ihi:
ilo, ihi = ihi, ilo
ihi += 1
# Create WCS slab
wcs_slab = self._wcs.deepcopy()
wcs_slab.wcs.crpix[2] -= ilo
# Create mask slab
if self._mask is None:
mask_slab = None
else:
try:
mask_slab = self._mask[ilo:ihi, :, :]
except NotImplementedError:
warnings.warn("Mask slicing not implemented for "
"{0} - dropping mask".
format(self._mask.__class__.__name__),
NotImplementedWarning
)
mask_slab = None
# Create new spectral cube
slab = self._new_cube_with(data=self._data[ilo:ihi], wcs=wcs_slab,
beams=self.unmasked_beams[ilo:ihi],
mask=mask_slab)
return slab
def _new_cube_with(self, goodbeams_mask=None, **kwargs):
beams = kwargs.pop('beams', self.unmasked_beams)
beam_threshold = kwargs.pop('beam_threshold', self.beam_threshold)
VRSC = VaryingResolutionSpectralCube
newcube = super(VRSC, self)._new_cube_with(beams=beams,
beam_threshold=beam_threshold,
**kwargs)
if goodbeams_mask is not None:
newcube.goodbeams_mask = goodbeams_mask
assert hasattr(newcube, '_goodbeams_mask')
else:
newcube.goodbeams_mask = np.isfinite(newcube.beams)
assert hasattr(newcube, '_goodbeams_mask')
return newcube
_new_cube_with.__doc__ = BaseSpectralCube._new_cube_with.__doc__
def _check_beam_areas(self, threshold, mean_beam, mask=None):
"""
Check that the beam areas are the same to within some threshold
"""
if mask is not None:
assert len(mask) == len(self.unmasked_beams)
mask = np.array(mask, dtype='bool')
else:
mask = np.ones(len(self.unmasked_beams), dtype='bool')
qtys = dict(sr=self.unmasked_beams.sr,
major=self.unmasked_beams.major.to(u.deg),
minor=self.unmasked_beams.minor.to(u.deg),
# position angles are not really comparable
#pa=u.Quantity([bm.pa for bm in self.unmasked_beams], u.deg),
)
errormessage = ""
for (qtyname, qty) in (qtys.items()):
minv = qty[mask].min()
maxv = qty[mask].max()
mn = getattr(mean_beam, qtyname)
maxdiff = (np.max(np.abs(u.Quantity((maxv-mn, minv-mn))))/mn).decompose()
if isinstance(threshold, dict):
th = threshold[qtyname]
else:
th = threshold
if maxdiff > th:
errormessage += ("Beam {2}s differ by up to {0}x, which is greater"
" than the threshold {1}\n".format(maxdiff,
threshold,
qtyname
))
if errormessage != "":
raise ValueError(errormessage)
def __getattribute__(self, attrname):
"""
For any functions that operate over the spectral axis, perform beam
sameness checks before performing the operation to avoid unexpected
results
"""
# short name to avoid long lines below
VRSC = VaryingResolutionSpectralCube
# what about apply_numpy_function, apply_function? since they're
# called by some of these, maybe *only* those should be wrapped to
# avoid redundant calls
if attrname in ('moment', 'apply_numpy_function', 'apply_function',
'apply_function_parallel_spectral'):
origfunc = super(VRSC, self).__getattribute__(attrname)
return self._handle_beam_areas_wrapper(origfunc)
else:
return super(VRSC, self).__getattribute__(attrname)
@property
def header(self):
header = super(VaryingResolutionSpectralCube, self).header
# this indicates to CASA that there is a beam table
header['CASAMBM'] = True
return header
@property
def hdu(self):
raise ValueError("For VaryingResolutionSpectralCube's, use hdulist "
"instead of hdu.")
@property
def hdulist(self):
"""
HDUList version of self
"""
hdu = PrimaryHDU(self.filled_data[:].value, header=self.header)
from .cube_utils import beams_to_bintable
# use unmasked beams because, even if the beam is masked out, we should
# write it
bmhdu = beams_to_bintable(self.unmasked_beams)
return HDUList([hdu, bmhdu])
@warn_slow
def convolve_to(self, beam, allow_smaller=False,
convolve=convolution.convolve_fft,
update_function=None,
**kwargs):
"""
Convolve each channel in the cube to a specified beam
.. warning::
The current implementation of ``convolve_to`` creates an in-memory
copy of the whole cube to store the convolved data. Issue #506
notes that this is a problem, and it is on our to-do list to fix.
.. warning::
Note that if there is any misaligment between the cube's spatial
pixel axes and the WCS's spatial axes *and* the beams are not
round, the convolution kernels used here may be incorrect. Be wary
in such cases!
Parameters
----------
beam : `radio_beam.Beam`
The beam to convolve to
allow_smaller : bool
If the specified target beam is smaller than the beam in a channel
in any dimension and this is ``False``, it will raise an exception.
convolve : function
The astropy convolution function to use, either
`astropy.convolution.convolve` or
`astropy.convolution.convolve_fft`
update_function : method
Method that is called to update an external progressbar
If provided, it disables the default `astropy.utils.console.ProgressBar`
kwargs : dict
Keyword arguments to pass to the convolution function
Returns
-------
cube : `SpectralCube`
A SpectralCube with a single ``beam``
"""
if ((self.wcs.celestial.wcs.get_pc()[0,1] != 0 or
self.wcs.celestial.wcs.get_pc()[1,0] != 0)):
warnings.warn("The beams will produce convolution kernels "
"that are not aware of any misaligment "
"between pixel and world coordinates, "
"and there are off-diagonal elements of the "
"WCS spatial transformation matrix. "
"Unexpected results are likely.",
BeamWarning
)
pixscale = wcs.utils.proj_plane_pixel_area(self.wcs.celestial)**0.5*u.deg
convolution_kernels = []
for bm,valid in zip(self.unmasked_beams, self.goodbeams_mask):
if not valid:
# just skip masked-out beams
convolution_kernels.append(None)
continue
elif beam == bm:
# Point response when beams are equal, don't convolve.
convolution_kernels.append(None)
continue
try:
cb = beam.deconvolve(bm)
ck = cb.as_kernel(pixscale)
convolution_kernels.append(ck)
except ValueError:
if allow_smaller:
convolution_kernels.append(None)
else:
raise
if update_function is None:
pb = ProgressBar(self.shape[0])
update_function = pb.update
newdata = np.empty(self.shape)
for ii,kernel in enumerate(convolution_kernels):
# load each image from a slice to avoid loading whole cube into
# memory
img = self[ii,:,:].filled_data[:]
# Kernel can only be None when `allow_smaller` is True,
# or if the beams are equal. Only the latter is really valid.
if kernel is None:
newdata[ii, :, :] = img
else:
# See #631: kwargs get passed within self.apply_function_parallel_spatial
newdata[ii, :, :] = convolve(img, kernel,
normalize_kernel=True,
**kwargs)
update_function()
newcube = SpectralCube(data=newdata, wcs=self.wcs, mask=self.mask,
meta=self.meta, fill_value=self.fill_value,
header=self.header,
allow_huge_operations=self.allow_huge_operations,
beam=beam,
wcs_tolerance=self._wcs_tolerance)
return newcube
@warn_slow
def to(self, unit, equivalencies=()):
"""
Return the cube converted to the given unit (assuming it is equivalent).
If conversion was required, this will be a copy, otherwise it will
"""
if not isinstance(unit, u.Unit):
unit = u.Unit(unit)
if unit == self.unit:
# No copying
return self
# Create the tuple of unit conversions needed.
factor = cube_utils.bunit_converters(self, unit, equivalencies=equivalencies)
factor = np.array(factor)
# special case: array in equivalencies
# (I don't think this should have to be special cased, but I don't know
# how to manipulate broadcasting rules any other way)
if hasattr(factor, '__len__') and len(factor) == len(self):
return self._new_cube_with(data=self._data*factor[:,None,None],
unit=unit)
else:
return self._new_cube_with(data=self._data*factor,
unit=unit)
def mask_channels(self, goodchannels):
"""
Helper function to mask out channels. This function is equivalent to
adding a mask with ``cube[view]`` where ``view`` is broadcastable to
the cube shape, but it accepts 1D arrays that are not normally
broadcastable. Additionally, for `VaryingResolutionSpectralCube` s,
the beams in the bad channels will not be checked when averaging,
convolving, and doing other operations that are multibeam-aware.
Parameters
----------
goodchannels : array
A 1D boolean array declaring which channels should be kept.
Returns
-------
cube : `SpectralCube`
A cube with the specified channels masked
"""
goodchannels = np.asarray(goodchannels, dtype='bool')
if goodchannels.ndim != 1:
raise ValueError("goodchannels mask must be one-dimensional")
if goodchannels.size != self.shape[0]:
raise ValueError("goodchannels must have a length equal to the "
"cube's spectral dimension.")
cube = self.with_mask(goodchannels[:,None,None])
cube.goodbeams_mask = np.logical_and(goodchannels, self.goodbeams_mask)
return cube
def spectral_interpolate(self, *args, **kwargs):
raise AttributeError("VaryingResolutionSpectralCubes can't be "
"spectrally interpolated. Convolve to a "
"common resolution with `convolve_to` before "
"attempting spectral interpolation.")
def spectral_smooth(self, *args, **kwargs):
raise AttributeError("VaryingResolutionSpectralCubes can't be "
"spectrally smoothed. Convolve to a "
"common resolution with `convolve_to` before "
"attempting spectral smoothed.")
def _regionlist_to_single_region(region_list):
"""
Recursively merge a region list into a single compound region
"""
import regions
if len(region_list) == 1:
return region_list[0]
left = _regionlist_to_single_region(region_list[:len(region_list)//2])
right = _regionlist_to_single_region(region_list[len(region_list)//2:])
return regions.CompoundPixelRegion(left, right, operator.or_)
|
bsd-3-clause
|
alexsavio/scikit-learn
|
sklearn/utils/tests/test_linear_assignment.py
|
421
|
1349
|
# Author: Brian M. Clapper, G Varoquaux
# License: BSD
import numpy as np
# XXX we should be testing the public API here
from sklearn.utils.linear_assignment_ import _hungarian
def test_hungarian():
matrices = [
# Square
([[400, 150, 400],
[400, 450, 600],
[300, 225, 300]],
850 # expected cost
),
# Rectangular variant
([[400, 150, 400, 1],
[400, 450, 600, 2],
[300, 225, 300, 3]],
452 # expected cost
),
# Square
([[10, 10, 8],
[9, 8, 1],
[9, 7, 4]],
18
),
# Rectangular variant
([[10, 10, 8, 11],
[9, 8, 1, 1],
[9, 7, 4, 10]],
15
),
# n == 2, m == 0 matrix
([[], []],
0
),
]
for cost_matrix, expected_total in matrices:
cost_matrix = np.array(cost_matrix)
indexes = _hungarian(cost_matrix)
total_cost = 0
for r, c in indexes:
x = cost_matrix[r, c]
total_cost += x
assert expected_total == total_cost
indexes = _hungarian(cost_matrix.T)
total_cost = 0
for c, r in indexes:
x = cost_matrix[r, c]
total_cost += x
assert expected_total == total_cost
|
bsd-3-clause
|
pkruskal/scikit-learn
|
examples/model_selection/plot_precision_recall.py
|
249
|
6150
|
"""
================
Precision-Recall
================
Example of Precision-Recall metric to evaluate classifier output quality.
In information retrieval, precision is a measure of result relevancy, while
recall is a measure of how many truly relevant results are returned. A high
area under the curve represents both high recall and high precision, where high
precision relates to a low false positive rate, and high recall relates to a
low false negative rate. High scores for both show that the classifier is
returning accurate results (high precision), as well as returning a majority of
all positive results (high recall).
A system with high recall but low precision returns many results, but most of
its predicted labels are incorrect when compared to the training labels. A
system with high precision but low recall is just the opposite, returning very
few results, but most of its predicted labels are correct when compared to the
training labels. An ideal system with high precision and high recall will
return many results, with all results labeled correctly.
Precision (:math:`P`) is defined as the number of true positives (:math:`T_p`)
over the number of true positives plus the number of false positives
(:math:`F_p`).
:math:`P = \\frac{T_p}{T_p+F_p}`
Recall (:math:`R`) is defined as the number of true positives (:math:`T_p`)
over the number of true positives plus the number of false negatives
(:math:`F_n`).
:math:`R = \\frac{T_p}{T_p + F_n}`
These quantities are also related to the (:math:`F_1`) score, which is defined
as the harmonic mean of precision and recall.
:math:`F1 = 2\\frac{P \\times R}{P+R}`
It is important to note that the precision may not decrease with recall. The
definition of precision (:math:`\\frac{T_p}{T_p + F_p}`) shows that lowering
the threshold of a classifier may increase the denominator, by increasing the
number of results returned. If the threshold was previously set too high, the
new results may all be true positives, which will increase precision. If the
previous threshold was about right or too low, further lowering the threshold
will introduce false positives, decreasing precision.
Recall is defined as :math:`\\frac{T_p}{T_p+F_n}`, where :math:`T_p+F_n` does
not depend on the classifier threshold. This means that lowering the classifier
threshold may increase recall, by increasing the number of true positive
results. It is also possible that lowering the threshold may leave recall
unchanged, while the precision fluctuates.
The relationship between recall and precision can be observed in the
stairstep area of the plot - at the edges of these steps a small change
in the threshold considerably reduces precision, with only a minor gain in
recall. See the corner at recall = .59, precision = .8 for an example of this
phenomenon.
Precision-recall curves are typically used in binary classification to study
the output of a classifier. In order to extend Precision-recall curve and
average precision to multi-class or multi-label classification, it is necessary
to binarize the output. One curve can be drawn per label, but one can also draw
a precision-recall curve by considering each element of the label indicator
matrix as a binary prediction (micro-averaging).
.. note::
See also :func:`sklearn.metrics.average_precision_score`,
:func:`sklearn.metrics.recall_score`,
:func:`sklearn.metrics.precision_score`,
:func:`sklearn.metrics.f1_score`
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn import svm, datasets
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import average_precision_score
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import label_binarize
from sklearn.multiclass import OneVsRestClassifier
# import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
# Binarize the output
y = label_binarize(y, classes=[0, 1, 2])
n_classes = y.shape[1]
# Add noisy features
random_state = np.random.RandomState(0)
n_samples, n_features = X.shape
X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
# Split into training and test
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=random_state)
# Run classifier
classifier = OneVsRestClassifier(svm.SVC(kernel='linear', probability=True,
random_state=random_state))
y_score = classifier.fit(X_train, y_train).decision_function(X_test)
# Compute Precision-Recall and plot curve
precision = dict()
recall = dict()
average_precision = dict()
for i in range(n_classes):
precision[i], recall[i], _ = precision_recall_curve(y_test[:, i],
y_score[:, i])
average_precision[i] = average_precision_score(y_test[:, i], y_score[:, i])
# Compute micro-average ROC curve and ROC area
precision["micro"], recall["micro"], _ = precision_recall_curve(y_test.ravel(),
y_score.ravel())
average_precision["micro"] = average_precision_score(y_test, y_score,
average="micro")
# Plot Precision-Recall curve
plt.clf()
plt.plot(recall[0], precision[0], label='Precision-Recall curve')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.0])
plt.title('Precision-Recall example: AUC={0:0.2f}'.format(average_precision[0]))
plt.legend(loc="lower left")
plt.show()
# Plot Precision-Recall curve for each class
plt.clf()
plt.plot(recall["micro"], precision["micro"],
label='micro-average Precision-recall curve (area = {0:0.2f})'
''.format(average_precision["micro"]))
for i in range(n_classes):
plt.plot(recall[i], precision[i],
label='Precision-recall curve of class {0} (area = {1:0.2f})'
''.format(i, average_precision[i]))
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.title('Extension of Precision-Recall curve to multi-class')
plt.legend(loc="lower right")
plt.show()
|
bsd-3-clause
|
looooo/paraBEM
|
examples/plots/2d_elements.py
|
1
|
1479
|
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
import numpy as np
import paraBEM
from paraBEM.pan2d import doublet_2_0, source_2_0, doublet_2_0_v
from paraBEM.utils import check_path
pnt1 = paraBEM.PanelVector2(-1, 0)
pnt2 = paraBEM.PanelVector2(1, 0)
source = paraBEM.Panel2([pnt1, pnt2])
y = np.linspace(-3, 3, 100)
val = [source_2_0(paraBEM.Vector2(yi, 8), source) for yi in y]
plt.plot(y, val)
val = [source_2_0(paraBEM.Vector2(yi, 0.01), source) for yi in y]
plt.plot(y, val)
val = [source_2_0(paraBEM.Vector2(yi, 0.0), source) for yi in y]
plt.plot(y, val)
val = [source_2_0(paraBEM.Vector2(yi, 3), source) for yi in y]
plt.plot(y, val)
plt.savefig(check_path("results/2d/source.png"))
plt.close()
y = np.linspace(-3, 3, 100)
val = [doublet_2_0(paraBEM.Vector2(yi, 7), source) for yi in y]
plt.plot(y, val)
val = [doublet_2_0(paraBEM.Vector2(yi, 0.01), source) for yi in y]
plt.plot(y, val)
val = [doublet_2_0(paraBEM.Vector2(yi, 0.0), source) for yi in y]
plt.plot(y, val)
val = [doublet_2_0(paraBEM.Vector2(yi, 3), source) for yi in y]
plt.plot(y, val)
plt.savefig(check_path("results/2d/doublet.png"))
plt.close()
y = np.linspace(-3, 3, 100)
val = [doublet_2_0_v(paraBEM.Vector2(yi, 7), source).x for yi in y]
plt.plot(y, val)
val = [doublet_2_0_v(paraBEM.Vector2(yi, 0.2), source).x for yi in y]
plt.plot(y, val)
val = [doublet_2_0_v(paraBEM.Vector2(yi, 3), source).x for yi in y]
plt.savefig(check_path("results/2d/doublet_v.png"))
|
gpl-3.0
|
czhengsci/pymatgen
|
pymatgen/io/abinit/flows.py
|
3
|
116124
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
A Flow is a container for Works, and works consist of tasks.
Flows are the final objects that can be dumped directly to a pickle file on disk
Flows are executed using abirun (abipy).
"""
from __future__ import unicode_literals, division, print_function
import os
import sys
import time
import collections
import warnings
import shutil
import copy
import tempfile
import numpy as np
from pprint import pprint
from six.moves import map, StringIO
from tabulate import tabulate
from pydispatch import dispatcher
from collections import OrderedDict
from monty.collections import as_set, dict2namedtuple
from monty.string import list_strings, is_string, make_banner
from monty.operator import operator_from_str
from monty.io import FileLock
from monty.pprint import draw_tree
from monty.termcolor import cprint, colored, cprint_map, get_terminal_size
from monty.inspect import find_top_pyfile
from monty.dev import deprecated
from monty.json import MSONable
from pymatgen.util.serialization import pmg_pickle_load, pmg_pickle_dump, pmg_serialize
from pymatgen.core.units import Memory
from pymatgen.util.io_utils import AtomicFile
from pymatgen.util.plotting import add_fig_kwargs, get_ax_fig_plt
from . import wrappers
from .nodes import Status, Node, NodeError, NodeResults, Dependency, GarbageCollector, check_spectator
from .tasks import ScfTask, DdkTask, DdeTask, TaskManager, FixQueueCriticalError
from .utils import File, Directory, Editor
from .abiinspect import yaml_read_irred_perts
from .works import NodeContainer, Work, BandStructureWork, PhononWork, BecWork, G0W0Work, QptdmWork, DteWork
from .events import EventsParser # autodoc_event_handlers
import logging
logger = logging.getLogger(__name__)
__author__ = "Matteo Giantomassi"
__copyright__ = "Copyright 2013, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Matteo Giantomassi"
__all__ = [
"Flow",
"G0W0WithQptdmFlow",
"bandstructure_flow",
"g0w0_flow",
"phonon_flow",
]
class FlowResults(NodeResults):
JSON_SCHEMA = NodeResults.JSON_SCHEMA.copy()
#JSON_SCHEMA["properties"] = {
# "queries": {"type": "string", "required": True},
#}
@classmethod
def from_node(cls, flow):
"""Initialize an instance from a Work instance."""
new = super(FlowResults, cls).from_node(flow)
# Will put all files found in outdir in GridFs
d = {os.path.basename(f): f for f in flow.outdir.list_filepaths()}
# Add the pickle file.
d["pickle"] = flow.pickle_file if flow.pickle_protocol != 0 else (flow.pickle_file, "t")
new.add_gridfs_files(**d)
return new
class FlowError(NodeError):
"""Base Exception for :class:`Node` methods"""
class Flow(Node, NodeContainer, MSONable):
"""
This object is a container of work. Its main task is managing the
possible inter-dependencies among the work and the creation of
dynamic workflows that are generated by callbacks registered by the user.
.. attributes::
creation_date: String with the creation_date
pickle_protocol: Protocol for Pickle database (default: -1 i.e. latest protocol)
Important methods for constructing flows:
.. methods::
register_work: register (add) a work to the flow
resister_task: register a work that contains only this task returns the work
allocate: propagate the workdir and manager of the flow to all the registered tasks
build:
build_and_pickle_dump:
"""
VERSION = "0.1"
PICKLE_FNAME = "__AbinitFlow__.pickle"
Error = FlowError
Results = FlowResults
@classmethod
def from_inputs(cls, workdir, inputs, manager=None, pickle_protocol=-1, task_class=ScfTask,
work_class=Work, remove=False):
"""
Construct a simple flow from a list of inputs. The flow contains a single Work with
tasks whose class is given by task_class.
.. warning::
Don't use this interface if you have dependencies among the tasks.
Args:
workdir: String specifying the directory where the works will be produced.
inputs: List of inputs.
manager: :class:`TaskManager` object responsible for the submission of the jobs.
If manager is None, the object is initialized from the yaml file
located either in the working directory or in the user configuration dir.
pickle_protocol: Pickle protocol version used for saving the status of the object.
-1 denotes the latest version supported by the python interpreter.
task_class: The class of the :class:`Task`.
work_class: The class of the :class:`Work`.
remove: attempt to remove working directory `workdir` if directory already exists.
"""
if not isinstance(inputs, (list, tuple)): inputs = [inputs]
flow = cls(workdir, manager=manager, pickle_protocol=pickle_protocol, remove=remove)
work = work_class()
for inp in inputs:
work.register(inp, task_class=task_class)
flow.register_work(work)
return flow.allocate()
@classmethod
def as_flow(cls, obj):
"""Convert obj into a Flow. Accepts filepath, dict, or Flow object."""
if isinstance(obj, cls): return obj
if is_string(obj):
return cls.pickle_load(obj)
elif isinstance(obj, collections.Mapping):
return cls.from_dict(obj)
else:
raise TypeError("Don't know how to convert type %s into a Flow" % type(obj))
def __init__(self, workdir, manager=None, pickle_protocol=-1, remove=False):
"""
Args:
workdir: String specifying the directory where the works will be produced.
if workdir is None, the initialization of the working directory
is performed by flow.allocate(workdir).
manager: :class:`TaskManager` object responsible for the submission of the jobs.
If manager is None, the object is initialized from the yaml file
located either in the working directory or in the user configuration dir.
pickle_protocol: Pickle protocol version used for saving the status of the object.
-1 denotes the latest version supported by the python interpreter.
remove: attempt to remove working directory `workdir` if directory already exists.
"""
super(Flow, self).__init__()
if workdir is not None:
if remove and os.path.exists(workdir): shutil.rmtree(workdir)
self.set_workdir(workdir)
self.creation_date = time.asctime()
if manager is None: manager = TaskManager.from_user_config()
self.manager = manager.deepcopy()
# List of works.
self._works = []
self._waited = 0
# List of callbacks that must be executed when the dependencies reach S_OK
self._callbacks = []
# Install default list of handlers at the flow level.
# Users can override the default list by calling flow.install_event_handlers in the script.
# Example:
#
# # flow level (common case)
# flow.install_event_handlers(handlers=my_handlers)
#
# # task level (advanced mode)
# flow[0][0].install_event_handlers(handlers=my_handlers)
#
self.install_event_handlers()
self.pickle_protocol = int(pickle_protocol)
# ID used to access mongodb
self._mongo_id = None
# Save the location of the script used to generate the flow.
# This trick won't work if we are running with nosetests, py.test etc
pyfile = find_top_pyfile()
if "python" in pyfile or "ipython" in pyfile: pyfile = "<" + pyfile + ">"
self.set_pyfile(pyfile)
# TODO
# Signal slots: a dictionary with the list
# of callbacks indexed by node_id and SIGNAL_TYPE.
# When the node changes its status, it broadcast a signal.
# The flow is listening to all the nodes of the calculation
# [node_id][SIGNAL] = list_of_signal_handlers
#self._sig_slots = slots = {}
#for work in self:
# slots[work] = {s: [] for s in work.S_ALL}
#for task in self.iflat_tasks():
# slots[task] = {s: [] for s in work.S_ALL}
@pmg_serialize
def as_dict(self, **kwargs):
"""
JSON serialization, note that we only need to save
a string with the working directory since the object will be
reconstructed from the pickle file located in workdir
"""
return {"workdir": self.workdir}
# This is needed for fireworks.
to_dict = as_dict
@classmethod
def from_dict(cls, d, **kwargs):
"""Reconstruct the flow from the pickle file."""
return cls.pickle_load(d["workdir"], **kwargs)
@classmethod
def temporary_flow(cls, manager=None):
"""Return a Flow in a temporary directory. Useful for unit tests."""
return cls(workdir=tempfile.mkdtemp(), manager=manager)
def set_workdir(self, workdir, chroot=False):
"""
Set the working directory. Cannot be set more than once unless chroot is True
"""
if not chroot and hasattr(self, "workdir") and self.workdir != workdir:
raise ValueError("self.workdir != workdir: %s, %s" % (self.workdir, workdir))
# Directories with (input|output|temporary) data.
self.workdir = os.path.abspath(workdir)
self.indir = Directory(os.path.join(self.workdir, "indata"))
self.outdir = Directory(os.path.join(self.workdir, "outdata"))
self.tmpdir = Directory(os.path.join(self.workdir, "tmpdata"))
self.wdir = Directory(self.workdir)
def reload(self):
"""
Reload the flow from the pickle file. Used when we are monitoring the flow
executed by the scheduler. In this case, indeed, the flow might have been changed
by the scheduler and we have to reload the new flow in memory.
"""
new = self.__class__.pickle_load(self.workdir)
self = new
@classmethod
def pickle_load(cls, filepath, spectator_mode=True, remove_lock=False):
"""
Loads the object from a pickle file and performs initial setup.
Args:
filepath: Filename or directory name. It filepath is a directory, we
scan the directory tree starting from filepath and we
read the first pickle database. Raise RuntimeError if multiple
databases are found.
spectator_mode: If True, the nodes of the flow are not connected by signals.
This option is usually used when we want to read a flow
in read-only mode and we want to avoid callbacks that can change the flow.
remove_lock:
True to remove the file lock if any (use it carefully).
"""
if os.path.isdir(filepath):
# Walk through each directory inside path and find the pickle database.
for dirpath, dirnames, filenames in os.walk(filepath):
fnames = [f for f in filenames if f == cls.PICKLE_FNAME]
if fnames:
if len(fnames) == 1:
filepath = os.path.join(dirpath, fnames[0])
break # Exit os.walk
else:
err_msg = "Found multiple databases:\n %s" % str(fnames)
raise RuntimeError(err_msg)
else:
err_msg = "Cannot find %s inside directory %s" % (cls.PICKLE_FNAME, filepath)
raise ValueError(err_msg)
if remove_lock and os.path.exists(filepath + ".lock"):
try:
os.remove(filepath + ".lock")
except:
pass
with FileLock(filepath):
with open(filepath, "rb") as fh:
flow = pmg_pickle_load(fh)
# Check if versions match.
if flow.VERSION != cls.VERSION:
msg = ("File flow version %s != latest version %s\n."
"Regenerate the flow to solve the problem " % (flow.VERSION, cls.VERSION))
warnings.warn(msg)
flow.set_spectator_mode(spectator_mode)
# Recompute the status of each task since tasks that
# have been submitted previously might be completed.
flow.check_status()
return flow
@classmethod
def pickle_loads(cls, s):
"""Reconstruct the flow from a string."""
strio = StringIO()
strio.write(s)
strio.seek(0)
flow = pmg_pickle_load(strio)
return flow
def __len__(self):
return len(self.works)
def __iter__(self):
return self.works.__iter__()
def __getitem__(self, slice):
return self.works[slice]
def set_pyfile(self, pyfile):
"""
Set the path of the python script used to generate the flow.
.. Example:
flow.set_pyfile(__file__)
"""
# TODO: Could use a frame hack to get the caller outside abinit
# so that pyfile is automatically set when we __init__ it!
self._pyfile = os.path.abspath(pyfile)
@property
def pyfile(self):
"""
Absolute path of the python script used to generate the flow. Set by `set_pyfile`
"""
try:
return self._pyfile
except AttributeError:
return None
@property
def pid_file(self):
"""The path of the pid file created by PyFlowScheduler."""
return os.path.join(self.workdir, "_PyFlowScheduler.pid")
@property
def has_scheduler(self):
"""True if there's a scheduler running the flow."""
return os.path.exists(self.pid_file)
def check_pid_file(self):
"""
This function checks if we are already running the :class:`Flow` with a :class:`PyFlowScheduler`.
Raises: Flow.Error if the pid file of the scheduler exists.
"""
if not os.path.exists(self.pid_file):
return 0
self.show_status()
raise self.Error("""\n\
pid_file
%s
already exists. There are two possibilities:
1) There's an another instance of PyFlowScheduler running
2) The previous scheduler didn't exit in a clean way
To solve case 1:
Kill the previous scheduler (use 'kill pid' where pid is the number reported in the file)
Then you can restart the new scheduler.
To solve case 2:
Remove the pid_file and restart the scheduler.
Exiting""" % self.pid_file)
@property
def pickle_file(self):
"""The path of the pickle file."""
return os.path.join(self.workdir, self.PICKLE_FNAME)
@property
def mongo_id(self):
return self._mongo_id
@mongo_id.setter
def mongo_id(self, value):
if self.mongo_id is not None:
raise RuntimeError("Cannot change mongo_id %s" % self.mongo_id)
self._mongo_id = value
def mongodb_upload(self, **kwargs):
from abiflows.core.scheduler import FlowUploader
FlowUploader().upload(self, **kwargs)
def validate_json_schema(self):
"""Validate the JSON schema. Return list of errors."""
errors = []
for work in self:
for task in work:
if not task.get_results().validate_json_schema():
errors.append(task)
if not work.get_results().validate_json_schema():
errors.append(work)
if not self.get_results().validate_json_schema():
errors.append(self)
return errors
def get_mongo_info(self):
"""
Return a JSON dictionary with information on the flow.
Mainly used for constructing the info section in `FlowEntry`.
The default implementation is empty. Subclasses must implement it
"""
return {}
def mongo_assimilate(self):
"""
This function is called by client code when the flow is completed
Return a JSON dictionary with the most important results produced
by the flow. The default implementation is empty. Subclasses must implement it
"""
return {}
@property
def works(self):
"""List of :class:`Work` objects contained in self.."""
return self._works
@property
def all_ok(self):
"""True if all the tasks in works have reached `S_OK`."""
return all(work.all_ok for work in self)
@property
def num_tasks(self):
"""Total number of tasks"""
return len(list(self.iflat_tasks()))
@property
def errored_tasks(self):
"""List of errored tasks."""
etasks = []
for status in [self.S_ERROR, self.S_QCRITICAL, self.S_ABICRITICAL]:
etasks.extend(list(self.iflat_tasks(status=status)))
return set(etasks)
@property
def num_errored_tasks(self):
"""The number of tasks whose status is `S_ERROR`."""
return len(self.errored_tasks)
@property
def unconverged_tasks(self):
"""List of unconverged tasks."""
return list(self.iflat_tasks(status=self.S_UNCONVERGED))
@property
def num_unconverged_tasks(self):
"""The number of tasks whose status is `S_UNCONVERGED`."""
return len(self.unconverged_tasks)
@property
def status_counter(self):
"""
Returns a :class:`Counter` object that counts the number of tasks with
given status (use the string representation of the status as key).
"""
# Count the number of tasks with given status in each work.
counter = self[0].status_counter
for work in self[1:]:
counter += work.status_counter
return counter
@property
def ncores_reserved(self):
"""
Returns the number of cores reserved in this moment.
A core is reserved if the task is not running but
we have submitted the task to the queue manager.
"""
return sum(work.ncores_reserved for work in self)
@property
def ncores_allocated(self):
"""
Returns the number of cores allocated in this moment.
A core is allocated if it's running a task or if we have
submitted a task to the queue manager but the job is still pending.
"""
return sum(work.ncores_allocated for work in self)
@property
def ncores_used(self):
"""
Returns the number of cores used in this moment.
A core is used if there's a job that is running on it.
"""
return sum(work.ncores_used for work in self)
@property
def has_chrooted(self):
"""
Returns a string that evaluates to True if we have changed
the workdir for visualization purposes e.g. we are using sshfs.
to mount the remote directory where the `Flow` is located.
The string gives the previous workdir of the flow.
"""
try:
return self._chrooted_from
except AttributeError:
return ""
def chroot(self, new_workdir):
"""
Change the workir of the :class:`Flow`. Mainly used for
allowing the user to open the GUI on the local host
and access the flow from remote via sshfs.
.. note::
Calling this method will make the flow go in read-only mode.
"""
self._chrooted_from = self.workdir
self.set_workdir(new_workdir, chroot=True)
for i, work in enumerate(self):
new_wdir = os.path.join(self.workdir, "w" + str(i))
work.chroot(new_wdir)
def groupby_status(self):
"""
Returns a ordered dictionary mapping the task status to
the list of named tuples (task, work_index, task_index).
"""
Entry = collections.namedtuple("Entry", "task wi ti")
d = collections.defaultdict(list)
for task, wi, ti in self.iflat_tasks_wti():
d[task.status].append(Entry(task, wi, ti))
# Sort keys according to their status.
return OrderedDict([(k, d[k]) for k in sorted(list(d.keys()))])
def groupby_task_class(self):
"""
Returns a dictionary mapping the task class to the list of tasks in the flow
"""
# Find all Task classes
class2tasks = OrderedDict()
for task in self.iflat_tasks():
cls = task.__class__
if cls not in class2tasks: class2tasks[cls] = []
class2tasks[cls].append(task)
return class2tasks
def iflat_nodes(self, status=None, op="==", nids=None):
"""
Generators that produces a flat sequence of nodes.
if status is not None, only the tasks with the specified status are selected.
nids is an optional list of node identifiers used to filter the nodes.
"""
nids = as_set(nids)
if status is None:
if not (nids and self.node_id not in nids):
yield self
for work in self:
if nids and work.node_id not in nids: continue
yield work
for task in work:
if nids and task.node_id not in nids: continue
yield task
else:
# Get the operator from the string.
op = operator_from_str(op)
# Accept Task.S_FLAG or string.
status = Status.as_status(status)
if not (nids and self.node_id not in nids):
if op(self.status, status): yield self
for wi, work in enumerate(self):
if nids and work.node_id not in nids: continue
if op(work.status, status): yield work
for ti, task in enumerate(work):
if nids and task.node_id not in nids: continue
if op(task.status, status): yield task
def node_from_nid(self, nid):
"""Return the node in the `Flow` with the given `nid` identifier"""
for node in self.iflat_nodes():
if node.node_id == nid: return node
raise ValueError("Cannot find node with node id: %s" % nid)
def iflat_tasks_wti(self, status=None, op="==", nids=None):
"""
Generator to iterate over all the tasks of the `Flow`.
Yields:
(task, work_index, task_index)
If status is not None, only the tasks whose status satisfies
the condition (task.status op status) are selected
status can be either one of the flags defined in the :class:`Task` class
(e.g Task.S_OK) or a string e.g "S_OK"
nids is an optional list of node identifiers used to filter the tasks.
"""
return self._iflat_tasks_wti(status=status, op=op, nids=nids, with_wti=True)
def iflat_tasks(self, status=None, op="==", nids=None):
"""
Generator to iterate over all the tasks of the :class:`Flow`.
If status is not None, only the tasks whose status satisfies
the condition (task.status op status) are selected
status can be either one of the flags defined in the :class:`Task` class
(e.g Task.S_OK) or a string e.g "S_OK"
nids is an optional list of node identifiers used to filter the tasks.
"""
return self._iflat_tasks_wti(status=status, op=op, nids=nids, with_wti=False)
def _iflat_tasks_wti(self, status=None, op="==", nids=None, with_wti=True):
"""
Generators that produces a flat sequence of task.
if status is not None, only the tasks with the specified status are selected.
nids is an optional list of node identifiers used to filter the tasks.
Returns:
(task, work_index, task_index) if with_wti is True else task
"""
nids = as_set(nids)
if status is None:
for wi, work in enumerate(self):
for ti, task in enumerate(work):
if nids and task.node_id not in nids: continue
if with_wti:
yield task, wi, ti
else:
yield task
else:
# Get the operator from the string.
op = operator_from_str(op)
# Accept Task.S_FLAG or string.
status = Status.as_status(status)
for wi, work in enumerate(self):
for ti, task in enumerate(work):
if nids and task.node_id not in nids: continue
if op(task.status, status):
if with_wti:
yield task, wi, ti
else:
yield task
def abivalidate_inputs(self):
"""
Run ABINIT in dry mode to validate all the inputs of the flow.
Return:
(isok, tuples)
isok is True if all inputs are ok.
tuples is List of `namedtuple` objects, one for each task in the flow.
Each namedtuple has the following attributes:
retcode: Return code. 0 if OK.
log_file: log file of the Abinit run, use log_file.read() to access its content.
stderr_file: stderr file of the Abinit run. use stderr_file.read() to access its content.
Raises:
`RuntimeError` if executable is not in $PATH.
"""
if not self.allocated:
self.allocate()
isok, tuples = True, []
for task in self.iflat_tasks():
t = task.input.abivalidate()
if t.retcode != 0: isok = False
tuples.append(t)
return isok, tuples
def check_dependencies(self):
"""Test the dependencies of the nodes for possible deadlocks."""
deadlocks = []
for task in self.iflat_tasks():
for dep in task.deps:
if dep.node.depends_on(task):
deadlocks.append((task, dep.node))
if deadlocks:
lines = ["Detect wrong list of dependecies that will lead to a deadlock:"]
lines.extend(["%s <--> %s" % nodes for nodes in deadlocks])
raise RuntimeError("\n".join(lines))
def find_deadlocks(self):
"""
This function detects deadlocks
Return:
named tuple with the tasks grouped in: deadlocks, runnables, running
"""
# Find jobs that can be submitted and and the jobs that are already in the queue.
runnables = []
for work in self:
runnables.extend(work.fetch_alltasks_to_run())
runnables.extend(list(self.iflat_tasks(status=self.S_SUB)))
# Running jobs.
running = list(self.iflat_tasks(status=self.S_RUN))
# Find deadlocks.
err_tasks = self.errored_tasks
deadlocked = []
if err_tasks:
for task in self.iflat_tasks():
if any(task.depends_on(err_task) for err_task in err_tasks):
deadlocked.append(task)
return dict2namedtuple(deadlocked=deadlocked, runnables=runnables, running=running)
def check_status(self, **kwargs):
"""
Check the status of the works in self.
Args:
show: True to show the status of the flow.
kwargs: keyword arguments passed to show_status
"""
for work in self:
work.check_status()
if kwargs.pop("show", False):
self.show_status(**kwargs)
@property
def status(self):
"""The status of the :class:`Flow` i.e. the minimum of the status of its tasks and its works"""
return min(work.get_all_status(only_min=True) for work in self)
#def restart_unconverged_tasks(self, max_nlauch, excs):
# nlaunch = 0
# for task in self.unconverged_tasks:
# try:
# logger.info("Flow will try restart task %s" % task)
# fired = task.restart()
# if fired:
# nlaunch += 1
# max_nlaunch -= 1
# if max_nlaunch == 0:
# logger.info("Restart: too many jobs in the queue, returning")
# self.pickle_dump()
# return nlaunch, max_nlaunch
# except task.RestartError:
# excs.append(straceback())
# return nlaunch, max_nlaunch
def fix_abicritical(self):
"""
This function tries to fix critical events originating from ABINIT.
Returns the number of tasks that have been fixed.
"""
count = 0
for task in self.iflat_tasks(status=self.S_ABICRITICAL):
count += task.fix_abicritical()
return count
def fix_queue_critical(self):
"""
This function tries to fix critical events originating from the queue submission system.
Returns the number of tasks that have been fixed.
"""
count = 0
for task in self.iflat_tasks(status=self.S_QCRITICAL):
logger.info("Will try to fix task %s" % str(task))
try:
print(task.fix_queue_critical())
count += 1
except FixQueueCriticalError:
logger.info("Not able to fix task %s" % task)
return count
def show_info(self, **kwargs):
"""
Print info on the flow i.e. total number of tasks, works, tasks grouped by class.
Example:
Task Class Number
------------ --------
ScfTask 1
NscfTask 1
ScrTask 2
SigmaTask 6
"""
stream = kwargs.pop("stream", sys.stdout)
lines = [str(self)]
app = lines.append
app("Number of works: %d, total number of tasks: %s" % (len(self), self.num_tasks) )
app("Number of tasks with a given class:\n")
# Build Table
data = [[cls.__name__, len(tasks)]
for cls, tasks in self.groupby_task_class().items()]
app(str(tabulate(data, headers=["Task Class", "Number"])))
stream.write("\n".join(lines))
def show_summary(self, **kwargs):
"""
Print a short summary with the status of the flow and a counter task_status --> number_of_tasks
Args:
stream: File-like object, Default: sys.stdout
Example:
Status Count
--------- -------
Completed 10
<Flow, node_id=27163, workdir=flow_gwconv_ecuteps>, num_tasks=10, all_ok=True
"""
stream = kwargs.pop("stream", sys.stdout)
stream.write("\n")
table = list(self.status_counter.items())
s = tabulate(table, headers=["Status", "Count"])
stream.write(s + "\n")
stream.write("\n")
stream.write("%s, num_tasks=%s, all_ok=%s\n" % (str(self), self.num_tasks, self.all_ok))
stream.write("\n")
def show_status(self, **kwargs):
"""
Report the status of the works and the status of the different tasks on the specified stream.
Args:
stream: File-like object, Default: sys.stdout
nids: List of node identifiers. By defaults all nodes are shown
wslice: Slice object used to select works.
verbose: Verbosity level (default 0). > 0 to show only the works that are not finalized.
"""
stream = kwargs.pop("stream", sys.stdout)
nids = as_set(kwargs.pop("nids", None))
wslice = kwargs.pop("wslice", None)
verbose = kwargs.pop("verbose", 0)
wlist = None
if wslice is not None:
# Convert range to list of work indices.
wlist = list(range(wslice.start, wslice.step, wslice.stop))
#has_colours = stream_has_colours(stream)
has_colours = True
red = "red" if has_colours else None
for i, work in enumerate(self):
if nids and work.node_id not in nids: continue
print("", file=stream)
cprint_map("Work #%d: %s, Finalized=%s" % (i, work, work.finalized), cmap={"True": "green"}, file=stream)
if wlist is not None and i in wlist: continue
if verbose == 0 and work.finalized:
print(" Finalized works are not shown. Use verbose > 0 to force output.", file=stream)
continue
headers = ["Task", "Status", "Queue", "MPI|Omp|Gb",
"Warn|Com", "Class", "Sub|Rest|Corr", "Time",
"Node_ID"]
table = []
tot_num_errors = 0
for task in work:
if nids and task.node_id not in nids: continue
task_name = os.path.basename(task.name)
# FIXME: This should not be done here.
# get_event_report should be called only in check_status
# Parse the events in the main output.
report = task.get_event_report()
# Get time info (run-time or time in queue or None)
stime = None
timedelta = task.datetimes.get_runtime()
if timedelta is not None:
stime = str(timedelta) + "R"
else:
timedelta = task.datetimes.get_time_inqueue()
if timedelta is not None:
stime = str(timedelta) + "Q"
events = "|".join(2*["NA"])
if report is not None:
events = '{:>4}|{:>3}'.format(*map(str, (
report.num_warnings, report.num_comments)))
para_info = '{:>4}|{:>3}|{:>3}'.format(*map(str, (
task.mpi_procs, task.omp_threads, "%.1f" % task.mem_per_proc.to("Gb"))))
task_info = list(map(str, [task.__class__.__name__,
(task.num_launches, task.num_restarts, task.num_corrections), stime, task.node_id]))
qinfo = "None"
if task.queue_id is not None:
qname = str(task.qname)
if not verbose:
qname = qname[:min(5, len(qname))]
qinfo = str(task.queue_id) + "@" + qname
if task.status.is_critical:
tot_num_errors += 1
task_name = colored(task_name, red)
if has_colours:
table.append([task_name, task.status.colored, qinfo,
para_info, events] + task_info)
else:
table.append([task_name, str(task.status), qinfo, events,
para_info] + task_info)
# Print table and write colorized line with the total number of errors.
print(tabulate(table, headers=headers, tablefmt="grid"), file=stream)
if tot_num_errors:
cprint("Total number of errors: %d" % tot_num_errors, "red", file=stream)
print("", file=stream)
if self.all_ok:
cprint("\nall_ok reached\n", "green", file=stream)
def show_events(self, status=None, nids=None):
"""
Print the Abinit events (ERRORS, WARNIING, COMMENTS) to stdout
Args:
status: if not None, only the tasks with this status are select
nids: optional list of node identifiers used to filter the tasks.
"""
nrows, ncols = get_terminal_size()
for task in self.iflat_tasks(status=status, nids=nids):
report = task.get_event_report()
if report:
print(make_banner(str(task), width=ncols, mark="="))
print(report)
#report = report.filter_types()
def show_corrections(self, status=None, nids=None):
"""
Show the corrections applied to the flow at run-time.
Args:
status: if not None, only the tasks with this status are select.
nids: optional list of node identifiers used to filter the tasks.
Return: The number of corrections found.
"""
nrows, ncols = get_terminal_size()
count = 0
for task in self.iflat_tasks(status=status, nids=nids):
if task.num_corrections == 0: continue
count += 1
print(make_banner(str(task), width=ncols, mark="="))
for corr in task.corrections:
pprint(corr)
if not count: print("No correction found.")
return count
def show_history(self, status=None, nids=None, full_history=False, metadata=False):
"""
Print the history of the flow to stdout.
Args:
status: if not None, only the tasks with this status are select
full_history: Print full info set, including nodes with an empty history.
nids: optional list of node identifiers used to filter the tasks.
metadata: print history metadata (experimental)
"""
nrows, ncols = get_terminal_size()
works_done = []
# Loop on the tasks and show the history of the work is not in works_done
for task in self.iflat_tasks(status=status, nids=nids):
work = task.work
if work not in works_done:
works_done.append(work)
if work.history or full_history:
cprint(make_banner(str(work), width=ncols, mark="="), **work.status.color_opts)
print(work.history.to_string(metadata=metadata))
if task.history or full_history:
cprint(make_banner(str(task), width=ncols, mark="="), **task.status.color_opts)
print(task.history.to_string(metadata=metadata))
# Print the history of the flow.
if self.history or full_history:
cprint(make_banner(str(self), width=ncols, mark="="), **self.status.color_opts)
print(self.history.to_string(metadata=metadata))
def show_inputs(self, varnames=None, nids=None, wslice=None, stream=sys.stdout):
"""
Print the input of the tasks to the given stream.
Args:
varnames:
List of Abinit variables. If not None, only the variable in varnames
are selected and printed.
nids:
List of node identifiers. By defaults all nodes are shown
wslice:
Slice object used to select works.
stream:
File-like object, Default: sys.stdout
"""
if varnames is not None:
# Build dictionary varname --> [(task1, value), (task2, value), ...]
varnames = [s.strip() for s in list_strings(varnames)]
dlist = collections.defaultdict(list)
for task in self.select_tasks(nids=nids, wslice=wslice):
dstruct = task.input.structure.as_dict(fmt="abivars")
for vname in varnames:
value = task.input.get(vname, None)
if value is None: # maybe in structure?
value = dstruct.get(vname, None)
if value is not None:
dlist[vname].append((task, value))
for vname in varnames:
tv_list = dlist[vname]
if not tv_list:
stream.write("[%s]: Found 0 tasks with this variable\n" % vname)
else:
stream.write("[%s]: Found %s tasks with this variable\n" % (vname, len(tv_list)))
for i, (task, value) in enumerate(tv_list):
stream.write(" %s --> %s\n" % (str(value), task))
stream.write("\n")
else:
lines = []
for task in self.select_tasks(nids=nids, wslice=wslice):
s = task.make_input(with_header=True)
# Add info on dependencies.
if task.deps:
s += "\n\nDependencies:\n" + "\n".join(str(dep) for dep in task.deps)
else:
s += "\n\nDependencies: None"
lines.append(2*"\n" + 80 * "=" + "\n" + s + 2*"\n")
stream.writelines(lines)
def listext(self, ext, stream=sys.stdout):
"""
Print to the given `stream` a table with the list of the output files
with the given `ext` produced by the flow.
"""
nodes_files = []
for node in self.iflat_nodes():
filepath = node.outdir.has_abiext(ext)
if filepath:
nodes_files.append((node, File(filepath)))
if nodes_files:
print("Found %s files with extension `%s` produced by the flow" % (len(nodes_files), ext), file=stream)
table = [[f.relpath, "%.2f" % (f.get_stat().st_size / 1024**2),
node.node_id, node.__class__.__name__]
for node, f in nodes_files]
print(tabulate(table, headers=["File", "Size [Mb]", "Node_ID", "Node Class"]), file=stream)
else:
print("No output file with extension %s has been produced by the flow" % ext, file=stream)
def select_tasks(self, nids=None, wslice=None, task_class=None):
"""
Return a list with a subset of tasks.
Args:
nids: List of node identifiers.
wslice: Slice object used to select works.
task_class: String or class used to select tasks. Ignored if None.
.. note::
nids and wslice are mutually exclusive.
If no argument is provided, the full list of tasks is returned.
"""
if nids is not None:
assert wslice is None
tasks = self.tasks_from_nids(nids)
elif wslice is not None:
tasks = []
for work in self[wslice]:
tasks.extend([t for t in work])
else:
# All tasks selected if no option is provided.
tasks = list(self.iflat_tasks())
# Filter by task class
if task_class is not None:
tasks = [t for t in tasks if t.isinstance(task_class)]
return tasks
def get_task_scfcycles(self, nids=None, wslice=None, task_class=None, exclude_ok_tasks=False):
"""
Return list of (taks, scfcycle) tuples for all the tasks in the flow with a SCF algorithm
e.g. electronic GS-SCF iteration, DFPT-SCF iterations etc.
Args:
nids: List of node identifiers.
wslice: Slice object used to select works.
task_class: String or class used to select tasks. Ignored if None.
exclude_ok_tasks: True if only running tasks should be considered.
Returns:
List of `ScfCycle` subclass instances.
"""
select_status = [self.S_RUN] if exclude_ok_tasks else [self.S_RUN, self.S_OK]
tasks_cycles = []
for task in self.select_tasks(nids=nids, wslice=wslice):
# Fileter
if task.status not in select_status or task.cycle_class is None:
continue
if task_class is not None and not task.isinstance(task_class):
continue
try:
cycle = task.cycle_class.from_file(task.output_file.path)
if cycle is not None:
tasks_cycles.append((task, cycle))
except Exception:
# This is intentionally ignored because from_file can fail for several reasons.
pass
return tasks_cycles
def show_tricky_tasks(self, verbose=0):
"""
Print list of tricky tasks i.e. tasks that have been restarted or
launched more than once or tasks with corrections.
Args:
verbose: Verbosity level. If > 0, task history and corrections (if any) are printed.
"""
nids, tasks = [], []
for task in self.iflat_tasks():
if task.num_launches > 1 or any(n > 0 for n in (task.num_restarts, task.num_corrections)):
nids.append(task.node_id)
tasks.append(task)
if not nids:
cprint("Everything's fine, no tricky tasks found", color="green")
else:
self.show_status(nids=nids)
if not verbose:
print("Use --verbose to print task history.")
return
for nid, task in zip(nids, tasks):
cprint(repr(task), **task.status.color_opts)
self.show_history(nids=[nid], full_history=False, metadata=False)
#if task.num_restarts:
# self.show_restarts(nids=[nid])
if task.num_corrections:
self.show_corrections(nids=[nid])
def inspect(self, nids=None, wslice=None, **kwargs):
"""
Inspect the tasks (SCF iterations, Structural relaxation ...) and
produces matplotlib plots.
Args:
nids: List of node identifiers.
wslice: Slice object used to select works.
kwargs: keyword arguments passed to `task.inspect` method.
.. note::
nids and wslice are mutually exclusive.
If nids and wslice are both None, all tasks in self are inspected.
Returns:
List of `matplotlib` figures.
"""
figs = []
for task in self.select_tasks(nids=nids, wslice=wslice):
if hasattr(task, "inspect"):
fig = task.inspect(**kwargs)
if fig is None:
cprint("Cannot inspect Task %s" % task, color="blue")
else:
figs.append(fig)
else:
cprint("Task %s does not provide an inspect method" % task, color="blue")
return figs
def get_results(self, **kwargs):
results = self.Results.from_node(self)
results.update(self.get_dict_for_mongodb_queries())
return results
def get_dict_for_mongodb_queries(self):
"""
This function returns a dictionary with the attributes that will be
put in the mongodb document to facilitate the query.
Subclasses may want to replace or extend the default behaviour.
"""
d = {}
return d
# TODO
all_structures = [task.input.structure for task in self.iflat_tasks()]
all_pseudos = [task.input.pseudos for task in self.iflat_tasks()]
def look_before_you_leap(self):
"""
This method should be called before running the calculation to make
sure that the most important requirements are satisfied.
Return:
List of strings with inconsistencies/errors.
"""
errors = []
try:
self.check_dependencies()
except self.Error as exc:
errors.append(str(exc))
if self.has_db:
try:
self.manager.db_connector.get_collection()
except Exception as exc:
errors.append("""
ERROR while trying to connect to the MongoDB database:
Exception:
%s
Connector:
%s
""" % (exc, self.manager.db_connector))
return "\n".join(errors)
@property
def has_db(self):
"""True if flow uses `MongoDB` to store the results."""
return self.manager.has_db
def db_insert(self):
"""
Insert results in the `MongDB` database.
"""
assert self.has_db
# Connect to MongoDb and get the collection.
coll = self.manager.db_connector.get_collection()
print("Mongodb collection %s with count %d", coll, coll.count())
start = time.time()
for work in self:
for task in work:
results = task.get_results()
pprint(results)
results.update_collection(coll)
results = work.get_results()
pprint(results)
results.update_collection(coll)
print("MongoDb update done in %s [s]" % time.time() - start)
results = self.get_results()
pprint(results)
results.update_collection(coll)
# Update the pickle file to save the mongo ids.
self.pickle_dump()
for d in coll.find():
pprint(d)
def tasks_from_nids(self, nids):
"""
Return the list of tasks associated to the given list of node identifiers (nids).
.. note::
Invalid ids are ignored
"""
if not isinstance(nids, collections.Iterable): nids = [nids]
n2task = {task.node_id: task for task in self.iflat_tasks()}
return [n2task[n] for n in nids if n in n2task]
def wti_from_nids(self, nids):
"""Return the list of (w, t) indices from the list of node identifiers nids."""
return [task.pos for task in self.tasks_from_nids(nids)]
def open_files(self, what="o", status=None, op="==", nids=None, editor=None):
"""
Open the files of the flow inside an editor (command line interface).
Args:
what: string with the list of characters selecting the file type
Possible choices:
i ==> input_file,
o ==> output_file,
f ==> files_file,
j ==> job_file,
l ==> log_file,
e ==> stderr_file,
q ==> qout_file,
all ==> all files.
status: if not None, only the tasks with this status are select
op: status operator. Requires status. A task is selected
if task.status op status evaluates to true.
nids: optional list of node identifiers used to filter the tasks.
editor: Select the editor. None to use the default editor ($EDITOR shell env var)
"""
# Build list of files to analyze.
files = []
for task in self.iflat_tasks(status=status, op=op, nids=nids):
lst = task.select_files(what)
if lst:
files.extend(lst)
return Editor(editor=editor).edit_files(files)
def parse_timing(self, nids=None):
"""
Parse the timer data in the main output file(s) of Abinit.
Requires timopt /= 0 in the input file (usually timopt = -1)
Args:
nids: optional list of node identifiers used to filter the tasks.
Return: :class:`AbinitTimerParser` instance, None if error.
"""
# Get the list of output files according to nids.
paths = [task.output_file.path for task in self.iflat_tasks(nids=nids)]
# Parse data.
from .abitimer import AbinitTimerParser
parser = AbinitTimerParser()
read_ok = parser.parse(paths)
if read_ok:
return parser
return None
def show_abierrors(self, nids=None, stream=sys.stdout):
"""
Write to the given stream the list of ABINIT errors for all tasks whose status is S_ABICRITICAL.
Args:
nids: optional list of node identifiers used to filter the tasks.
stream: File-like object. Default: sys.stdout
"""
lines = []
app = lines.append
for task in self.iflat_tasks(status=self.S_ABICRITICAL, nids=nids):
header = "=== " + task.qout_file.path + "==="
app(header)
report = task.get_event_report()
if report is not None:
app("num_errors: %s, num_warnings: %s, num_comments: %s" % (
report.num_errors, report.num_warnings, report.num_comments))
app("*** ERRORS ***")
app("\n".join(str(e) for e in report.errors))
app("*** BUGS ***")
app("\n".join(str(b) for b in report.bugs))
else:
app("get_envent_report returned None!")
app("=" * len(header) + 2*"\n")
return stream.writelines(lines)
def show_qouts(self, nids=None, stream=sys.stdout):
"""
Write to the given stream the content of the queue output file for all tasks whose status is S_QCRITICAL.
Args:
nids: optional list of node identifiers used to filter the tasks.
stream: File-like object. Default: sys.stdout
"""
lines = []
for task in self.iflat_tasks(status=self.S_QCRITICAL, nids=nids):
header = "=== " + task.qout_file.path + "==="
lines.append(header)
if task.qout_file.exists:
with open(task.qout_file.path, "rt") as fh:
lines += fh.readlines()
else:
lines.append("File does not exist!")
lines.append("=" * len(header) + 2*"\n")
return stream.writelines(lines)
def debug(self, status=None, nids=None):
"""
This method is usually used when the flow didn't completed succesfully
It analyzes the files produced the tasks to facilitate debugging.
Info are printed to stdout.
Args:
status: If not None, only the tasks with this status are selected
nids: optional list of node identifiers used to filter the tasks.
"""
nrows, ncols = get_terminal_size()
# Test for scheduler exceptions first.
sched_excfile = os.path.join(self.workdir, "_exceptions")
if os.path.exists(sched_excfile):
with open(sched_excfile, "r") as fh:
cprint("Found exceptions raised by the scheduler", "red")
cprint(fh.read(), color="red")
return
if status is not None:
tasks = list(self.iflat_tasks(status=status, nids=nids))
else:
errors = list(self.iflat_tasks(status=self.S_ERROR, nids=nids))
qcriticals = list(self.iflat_tasks(status=self.S_QCRITICAL, nids=nids))
abicriticals = list(self.iflat_tasks(status=self.S_ABICRITICAL, nids=nids))
tasks = errors + qcriticals + abicriticals
# For each task selected:
# 1) Check the error files of the task. If not empty, print the content to stdout and we are done.
# 2) If error files are empty, look at the master log file for possible errors
# 3) If also this check failes, scan all the process log files.
# TODO: This check is not needed if we introduce a new __abinit_error__ file
# that is created by the first MPI process that invokes MPI abort!
#
ntasks = 0
for task in tasks:
print(make_banner(str(task), width=ncols, mark="="))
ntasks += 1
# Start with error files.
for efname in ["qerr_file", "stderr_file",]:
err_file = getattr(task, efname)
if err_file.exists:
s = err_file.read()
if not s: continue
print(make_banner(str(err_file), width=ncols, mark="="))
cprint(s, color="red")
#count += 1
# Check main log file.
try:
report = task.get_event_report()
if report and report.num_errors:
print(make_banner(os.path.basename(report.filename), width=ncols, mark="="))
s = "\n".join(str(e) for e in report.errors)
else:
s = None
except Exception as exc:
s = str(exc)
count = 0 # count > 0 means we found some useful info that could explain the failures.
if s is not None:
cprint(s, color="red")
count += 1
if not count:
# Inspect all log files produced by the other nodes.
log_files = task.tmpdir.list_filepaths(wildcard="*LOG_*")
if not log_files:
cprint("No *LOG_* file in tmpdir. This usually happens if you are running with many CPUs", color="magenta")
for log_file in log_files:
try:
report = EventsParser().parse(log_file)
if report.errors:
print(report)
count += 1
break
except Exception as exc:
cprint(str(exc), color="red")
count += 1
break
if not count:
cprint("Houston, we could not find any error message that can explain the problem", color="magenta")
print("Number of tasks analyzed: %d" % ntasks)
def cancel(self, nids=None):
"""
Cancel all the tasks that are in the queue.
nids is an optional list of node identifiers used to filter the tasks.
Returns:
Number of jobs cancelled, negative value if error
"""
if self.has_chrooted:
# TODO: Use paramiko to kill the job?
warnings.warn("Cannot cancel the flow via sshfs!")
return -1
# If we are running with the scheduler, we must send a SIGKILL signal.
if os.path.exists(self.pid_file):
cprint("Found scheduler attached to this flow.", "yellow")
cprint("Sending SIGKILL to the scheduler before cancelling the tasks!", "yellow")
with open(self.pid_file, "rt") as fh:
pid = int(fh.readline())
retcode = os.system("kill -9 %d" % pid)
self.history.info("Sent SIGKILL to the scheduler, retcode: %s" % retcode)
try:
os.remove(self.pid_file)
except IOError:
pass
num_cancelled = 0
for task in self.iflat_tasks(nids=nids):
num_cancelled += task.cancel()
return num_cancelled
def get_njobs_in_queue(self, username=None):
"""
returns the number of jobs in the queue, None when the number of jobs cannot be determined.
Args:
username: (str) the username of the jobs to count (default is to autodetect)
"""
return self.manager.qadapter.get_njobs_in_queue(username=username)
def rmtree(self, ignore_errors=False, onerror=None):
"""Remove workdir (same API as shutil.rmtree)."""
if not os.path.exists(self.workdir): return
shutil.rmtree(self.workdir, ignore_errors=ignore_errors, onerror=onerror)
def rm_and_build(self):
"""Remove the workdir and rebuild the flow."""
self.rmtree()
self.build()
def build(self, *args, **kwargs):
"""Make directories and files of the `Flow`."""
# Allocate here if not done yet!
if not self.allocated: self.allocate()
self.indir.makedirs()
self.outdir.makedirs()
self.tmpdir.makedirs()
# Check the nodeid file in workdir
nodeid_path = os.path.join(self.workdir, ".nodeid")
if os.path.exists(nodeid_path):
with open(nodeid_path, "rt") as fh:
node_id = int(fh.read())
if self.node_id != node_id:
msg = ("\nFound node_id %s in file:\n\n %s\n\nwhile the node_id of the present flow is %d.\n"
"This means that you are trying to build a new flow in a directory already used by another flow.\n"
"Possible solutions:\n"
" 1) Change the workdir of the new flow.\n"
" 2) remove the old directory either with `rm -r` or by calling the method flow.rmtree()\n"
% (node_id, nodeid_path, self.node_id))
raise RuntimeError(msg)
else:
with open(nodeid_path, "wt") as fh:
fh.write(str(self.node_id))
if self.pyfile and os.path.isfile(self.pyfile):
shutil.copy(self.pyfile, self.workdir)
for work in self:
work.build(*args, **kwargs)
def build_and_pickle_dump(self, abivalidate=False):
"""
Build dirs and file of the `Flow` and save the object in pickle format.
Returns 0 if success
Args:
abivalidate: If True, all the input files are validate by calling
the abinit parser. If the validation fails, ValueError is raise.
"""
self.build()
if not abivalidate: return self.pickle_dump()
# Validation with Abinit.
isok, errors = self.abivalidate_inputs()
if isok: return self.pickle_dump()
errlines = []
for i, e in enumerate(errors):
errlines.append("[%d] %s" % (i, e))
raise ValueError("\n".join(errlines))
@check_spectator
def pickle_dump(self):
"""
Save the status of the object in pickle format.
Returns 0 if success
"""
if self.has_chrooted:
warnings.warn("Cannot pickle_dump since we have chrooted from %s" % self.has_chrooted)
return -1
#if self.in_spectator_mode:
# warnings.warn("Cannot pickle_dump since flow is in_spectator_mode")
# return -2
protocol = self.pickle_protocol
# Atomic transaction with FileLock.
with FileLock(self.pickle_file):
with AtomicFile(self.pickle_file, mode="wb") as fh:
pmg_pickle_dump(self, fh, protocol=protocol)
return 0
def pickle_dumps(self, protocol=None):
"""
Return a string with the pickle representation.
`protocol` selects the pickle protocol. self.pickle_protocol is
used if `protocol` is None
"""
strio = StringIO()
pmg_pickle_dump(self, strio,
protocol=self.pickle_protocol if protocol is None
else protocol)
return strio.getvalue()
def register_task(self, input, deps=None, manager=None, task_class=None, append=False):
"""
Utility function that generates a `Work` made of a single task
Args:
input: :class:`AbinitInput`
deps: List of :class:`Dependency` objects specifying the dependency of this node.
An empy list of deps implies that this node has no dependencies.
manager: The :class:`TaskManager` responsible for the submission of the task.
If manager is None, we use the :class:`TaskManager` specified during the creation of the work.
task_class: Task subclass to instantiate. Default: :class:`AbinitTask`
append: If true, the task is added to the last work (a new Work is created if flow is empty)
Returns:
The generated :class:`Work` for the task, work[0] is the actual task.
"""
# append True is much easier to use. In principle should be the default behaviour
# but this would break the previous API so ...
if not append:
work = Work(manager=manager)
else:
if not self.works:
work = Work(manager=manager)
append = False
else:
work = self.works[-1]
task = work.register(input, deps=deps, task_class=task_class)
if not append: self.register_work(work)
return work
def register_work(self, work, deps=None, manager=None, workdir=None):
"""
Register a new :class:`Work` and add it to the internal list, taking into account possible dependencies.
Args:
work: :class:`Work` object.
deps: List of :class:`Dependency` objects specifying the dependency of this node.
An empy list of deps implies that this node has no dependencies.
manager: The :class:`TaskManager` responsible for the submission of the task.
If manager is None, we use the `TaskManager` specified during the creation of the work.
workdir: The name of the directory used for the :class:`Work`.
Returns:
The registered :class:`Work`.
"""
if getattr(self, "workdir", None) is not None:
# The flow has a directory, build the named of the directory of the work.
work_workdir = None
if workdir is None:
work_workdir = os.path.join(self.workdir, "w" + str(len(self)))
else:
work_workdir = os.path.join(self.workdir, os.path.basename(workdir))
work.set_workdir(work_workdir)
if manager is not None:
work.set_manager(manager)
self.works.append(work)
if deps:
deps = [Dependency(node, exts) for node, exts in deps.items()]
work.add_deps(deps)
return work
def register_work_from_cbk(self, cbk_name, cbk_data, deps, work_class, manager=None):
"""
Registers a callback function that will generate the :class:`Task` of the :class:`Work`.
Args:
cbk_name: Name of the callback function (must be a bound method of self)
cbk_data: Additional data passed to the callback function.
deps: List of :class:`Dependency` objects specifying the dependency of the work.
work_class: :class:`Work` class to instantiate.
manager: The :class:`TaskManager` responsible for the submission of the task.
If manager is None, we use the `TaskManager` specified during the creation of the :class:`Flow`.
Returns:
The :class:`Work` that will be finalized by the callback.
"""
# TODO: pass a Work factory instead of a class
# Directory of the Work.
work_workdir = os.path.join(self.workdir, "w" + str(len(self)))
# Create an empty work and register the callback
work = work_class(workdir=work_workdir, manager=manager)
self._works.append(work)
deps = [Dependency(node, exts) for node, exts in deps.items()]
if not deps:
raise ValueError("A callback must have deps!")
work.add_deps(deps)
# Wrap the callable in a Callback object and save
# useful info such as the index of the work and the callback data.
cbk = FlowCallback(cbk_name, self, deps=deps, cbk_data=cbk_data)
self._callbacks.append(cbk)
return work
@property
def allocated(self):
"""Numer of allocations. Set by `allocate`."""
try:
return self._allocated
except AttributeError:
return 0
def allocate(self, workdir=None, use_smartio=False):
"""
Allocate the `Flow` i.e. assign the `workdir` and (optionally)
the :class:`TaskManager` to the different tasks in the Flow.
Args:
workdir: Working directory of the flow. Must be specified here
if we haven't initialized the workdir in the __init__.
Return:
self
"""
if workdir is not None:
# We set the workdir of the flow here
self.set_workdir(workdir)
for i, work in enumerate(self):
work.set_workdir(os.path.join(self.workdir, "w" + str(i)))
if not hasattr(self, "workdir"):
raise RuntimeError("You must call flow.allocate(workdir) if the workdir is not passed to __init__")
for work in self:
# Each work has a reference to its flow.
work.allocate(manager=self.manager)
work.set_flow(self)
# Each task has a reference to its work.
for task in work:
task.set_work(work)
self.check_dependencies()
if not hasattr(self, "_allocated"): self._allocated = 0
self._allocated += 1
if use_smartio:
self.use_smartio()
return self
def use_smartio(self):
"""
This function should be called when the entire `Flow` has been built.
It tries to reduce the pressure on the hard disk by using Abinit smart-io
capabilities for those files that are not needed by other nodes.
Smart-io means that big files (e.g. WFK) are written only if the calculation
is unconverged so that we can restart from it. No output is produced if
convergence is achieved.
Return:
self
"""
if not self.allocated:
#raise RuntimeError("You must call flow.allocate() before invoking flow.use_smartio()")
self.allocate()
for task in self.iflat_tasks():
children = task.get_children()
if not children:
# Change the input so that output files are produced
# only if the calculation is not converged.
task.history.info("Will disable IO for task")
task.set_vars(prtwf=-1, prtden=0) # TODO: prt1wf=-1,
else:
must_produce_abiexts = []
for child in children:
# Get the list of dependencies. Find that task
for d in child.deps:
must_produce_abiexts.extend(d.exts)
must_produce_abiexts = set(must_produce_abiexts)
#print("must_produce_abiexts", must_produce_abiexts)
# Variables supporting smart-io.
smart_prtvars = {
"prtwf": "WFK",
}
# Set the variable to -1 to disable the output
for varname, abiext in smart_prtvars.items():
if abiext not in must_produce_abiexts:
print("%s: setting %s to -1" % (task, varname))
task.set_vars({varname: -1})
return self
#def new_from_input_decorators(self, new_workdir, decorators)
# """
# Return a new :class:`Flow` in which all the Abinit inputs have been
# decorated by decorators.
# """
# # The trick part here is how to assign a new id to the new nodes while maintaing the
# # correct dependencies! The safest approach would be to pass through __init__
# # instead of using copy.deepcopy()
# return flow
def show_dependencies(self, stream=sys.stdout):
"""Writes to the given stream the ASCII representation of the dependency tree."""
def child_iter(node):
return [d.node for d in node.deps]
def text_str(node):
return colored(str(node), color=node.status.color_opts["color"])
for task in self.iflat_tasks():
print(draw_tree(task, child_iter, text_str), file=stream)
def on_dep_ok(self, signal, sender):
# TODO
# Replace this callback with dynamic dispatch
# on_all_S_OK for work
# on_S_OK for task
logger.info("on_dep_ok with sender %s, signal %s" % (str(sender), signal))
for i, cbk in enumerate(self._callbacks):
if not cbk.handle_sender(sender):
logger.info("%s does not handle sender %s" % (cbk, sender))
continue
if not cbk.can_execute():
logger.info("Cannot execute %s" % cbk)
continue
# Execute the callback and disable it
self.history.info("flow in on_dep_ok: about to execute callback %s" % str(cbk))
cbk()
cbk.disable()
# Update the database.
self.pickle_dump()
@check_spectator
def finalize(self):
"""
This method is called when the flow is completed.
Return 0 if success
"""
if self.finalized:
self.history.warning("Calling finalize on an already finalized flow.")
return 1
self.history.info("Calling flow.finalize.")
self.finalized = True
if self.has_db:
self.history.info("Saving results in database.")
try:
self.flow.db_insert()
self.finalized = True
except Exception:
logger.critical("MongoDb insertion failed.")
return 2
# Here we remove the big output files if we have the garbage collector
# and the policy is set to "flow."
if self.gc is not None and self.gc.policy == "flow":
self.history.info("gc.policy set to flow. Will clean task output files.")
for task in self.iflat_tasks():
task.clean_output_files()
return 0
def set_garbage_collector(self, exts=None, policy="task"):
"""
Enable the garbage collector that will remove the big output files that are not needed.
Args:
exts: string or list with the Abinit file extensions to be removed. A default is
provided if exts is None
policy: Either `flow` or `task`. If policy is set to 'task', we remove the output
files as soon as the task reaches S_OK. If 'flow', the files are removed
only when the flow is finalized. This option should be used when we are dealing
with a dynamic flow with callbacks generating other tasks since a :class:`Task`
might not be aware of its children when it reached S_OK.
"""
assert policy in ("task", "flow")
exts = list_strings(exts) if exts is not None else ("WFK", "SUS", "SCR", "BSR", "BSC")
gc = GarbageCollector(exts=set(exts), policy=policy)
self.set_gc(gc)
for work in self:
#work.set_gc(gc) # TODO Add support for Works and flow policy
for task in work:
task.set_gc(gc)
def connect_signals(self):
"""
Connect the signals within the `Flow`.
The `Flow` is responsible for catching the important signals raised from its works.
"""
# Connect the signals inside each Work.
for work in self:
work.connect_signals()
# Observe the nodes that must reach S_OK in order to call the callbacks.
for cbk in self._callbacks:
#cbk.enable()
for dep in cbk.deps:
logger.info("connecting %s \nwith sender %s, signal %s" % (str(cbk), dep.node, dep.node.S_OK))
dispatcher.connect(self.on_dep_ok, signal=dep.node.S_OK, sender=dep.node, weak=False)
# Associate to each signal the callback _on_signal
# (bound method of the node that will be called by `Flow`
# Each node will set its attribute _done_signal to True to tell
# the flow that this callback should be disabled.
# Register the callbacks for the Work.
#for work in self:
# slot = self._sig_slots[work]
# for signal in S_ALL:
# done_signal = getattr(work, "_done_ " + signal, False)
# if not done_sig:
# cbk_name = "_on_" + str(signal)
# cbk = getattr(work, cbk_name, None)
# if cbk is None: continue
# slot[work][signal].append(cbk)
# print("connecting %s\nwith sender %s, signal %s" % (str(cbk), dep.node, dep.node.S_OK))
# dispatcher.connect(self.on_dep_ok, signal=signal, sender=dep.node, weak=False)
# Register the callbacks for the Tasks.
#self.show_receivers()
def disconnect_signals(self):
"""Disable the signals within the `Flow`."""
# Disconnect the signals inside each Work.
for work in self:
work.disconnect_signals()
# Disable callbacks.
for cbk in self._callbacks:
cbk.disable()
def show_receivers(self, sender=None, signal=None):
sender = sender if sender is not None else dispatcher.Any
signal = signal if signal is not None else dispatcher.Any
print("*** live receivers ***")
for rec in dispatcher.liveReceivers(dispatcher.getReceivers(sender, signal)):
print("receiver -->", rec)
print("*** end live receivers ***")
def set_spectator_mode(self, mode=True):
"""
When the flow is in spectator_mode, we have to disable signals, pickle dump and possible callbacks
A spectator can still operate on the flow but the new status of the flow won't be saved in
the pickle file. Usually the flow is in spectator mode when we are already running it via
the scheduler or other means and we should not interfere with its evolution.
This is the reason why signals and callbacks must be disabled.
Unfortunately preventing client-code from calling methods with side-effects when
the flow is in spectator mode is not easy (e.g. flow.cancel will cancel the tasks submitted to the
queue and the flow used by the scheduler won't see this change!
"""
# Set the flags of all the nodes in the flow.
mode = bool(mode)
self.in_spectator_mode = mode
for node in self.iflat_nodes():
node.in_spectator_mode = mode
# connect/disconnect signals depending on mode.
if not mode:
self.connect_signals()
else:
self.disconnect_signals()
#def get_results(self, **kwargs)
def rapidfire(self, check_status=True, max_nlaunch=-1, max_loops=1, sleep_time=5, **kwargs):
"""
Use :class:`PyLauncher` to submits tasks in rapidfire mode.
kwargs contains the options passed to the launcher.
Args:
check_status:
max_nlaunch: Maximum number of launches. default: no limit.
max_loops: Maximum number of loops
sleep_time: seconds to sleep between rapidfire loop iterations
Return:
Number of tasks submitted.
"""
self.check_pid_file()
self.set_spectator_mode(False)
if check_status: self.check_status()
from .launcher import PyLauncher
return PyLauncher(self, **kwargs).rapidfire(max_nlaunch=max_nlaunch, max_loops=max_loops, sleep_time=sleep_time)
def single_shot(self, check_status=True, **kwargs):
"""
Use :class:`PyLauncher` to submits one task.
kwargs contains the options passed to the launcher.
Return:
number of tasks submitted.
"""
self.check_pid_file()
self.set_spectator_mode(False)
if check_status: self.check_status()
from .launcher import PyLauncher
return PyLauncher(self, **kwargs).single_shot()
def make_scheduler(self, **kwargs):
"""
Build a return a :class:`PyFlowScheduler` to run the flow.
Args:
kwargs: if empty we use the user configuration file.
if `filepath` in kwargs we init the scheduler from filepath.
else pass **kwargs to :class:`PyFlowScheduler` __init__ method.
"""
from .launcher import PyFlowScheduler
if not kwargs:
# User config if kwargs is empty
sched = PyFlowScheduler.from_user_config()
else:
# Use from_file if filepath if present, else call __init__
filepath = kwargs.pop("filepath", None)
if filepath is not None:
assert not kwargs
sched = PyFlowScheduler.from_file(filepath)
else:
sched = PyFlowScheduler(**kwargs)
sched.add_flow(self)
return sched
def batch(self, timelimit=None):
"""
Run the flow in batch mode, return exit status of the job script.
Requires a manager.yml file and a batch_adapter adapter.
Args:
timelimit: Time limit (int with seconds or string with time given with the slurm convention:
"days-hours:minutes:seconds"). If timelimit is None, the default value specified in the
`batch_adapter` entry of `manager.yml` is used.
"""
from .launcher import BatchLauncher
# Create a batch dir from the flow.workdir.
prev_dir = os.path.join(*self.workdir.split(os.path.sep)[:-1])
prev_dir = os.path.join(os.path.sep, prev_dir)
workdir = os.path.join(prev_dir, os.path.basename(self.workdir) + "_batch")
return BatchLauncher(workdir=workdir, flows=self).submit(timelimit=timelimit)
def make_light_tarfile(self, name=None):
"""Lightweight tarball file. Mainly used for debugging. Return the name of the tarball file."""
name = os.path.basename(self.workdir) + "-light.tar.gz" if name is None else name
return self.make_tarfile(name=name, exclude_dirs=["outdata", "indata", "tmpdata"])
def make_tarfile(self, name=None, max_filesize=None, exclude_exts=None, exclude_dirs=None, verbose=0, **kwargs):
"""
Create a tarball file.
Args:
name: Name of the tarball file. Set to os.path.basename(`flow.workdir`) + "tar.gz"` if name is None.
max_filesize (int or string with unit): a file is included in the tar file if its size <= max_filesize
Can be specified in bytes e.g. `max_files=1024` or with a string with unit e.g. `max_filesize="1 Mb"`.
No check is done if max_filesize is None.
exclude_exts: List of file extensions to be excluded from the tar file.
exclude_dirs: List of directory basenames to be excluded.
verbose (int): Verbosity level.
kwargs: keyword arguments passed to the :class:`TarFile` constructor.
Returns:
The name of the tarfile.
"""
def any2bytes(s):
"""Convert string or number to memory in bytes."""
if is_string(s):
return int(Memory.from_string(s).to("b"))
else:
return int(s)
if max_filesize is not None:
max_filesize = any2bytes(max_filesize)
if exclude_exts:
# Add/remove ".nc" so that we can simply pass "GSR" instead of "GSR.nc"
# Moreover this trick allows one to treat WFK.nc and WFK file on the same footing.
exts = []
for e in list_strings(exclude_exts):
exts.append(e)
if e.endswith(".nc"):
exts.append(e.replace(".nc", ""))
else:
exts.append(e + ".nc")
exclude_exts = exts
def filter(tarinfo):
"""
Function that takes a TarInfo object argument and returns the changed TarInfo object.
If it instead returns None the TarInfo object will be excluded from the archive.
"""
# Skip links.
if tarinfo.issym() or tarinfo.islnk():
if verbose: print("Excluding link: %s" % tarinfo.name)
return None
# Check size in bytes
if max_filesize is not None and tarinfo.size > max_filesize:
if verbose: print("Excluding %s due to max_filesize" % tarinfo.name)
return None
# Filter filenames.
if exclude_exts and any(tarinfo.name.endswith(ext) for ext in exclude_exts):
if verbose: print("Excluding %s due to extension" % tarinfo.name)
return None
# Exlude directories (use dir basenames).
if exclude_dirs and any(dir_name in exclude_dirs for dir_name in tarinfo.name.split(os.path.sep)):
if verbose: print("Excluding %s due to exclude_dirs" % tarinfo.name)
return None
return tarinfo
back = os.getcwd()
os.chdir(os.path.join(self.workdir, ".."))
import tarfile
name = os.path.basename(self.workdir) + ".tar.gz" if name is None else name
with tarfile.open(name=name, mode='w:gz', **kwargs) as tar:
tar.add(os.path.basename(self.workdir), arcname=None, recursive=True, exclude=None, filter=filter)
# Add the script used to generate the flow.
if self.pyfile is not None and os.path.exists(self.pyfile):
tar.add(self.pyfile)
os.chdir(back)
return name
#def abirobot(self, ext, check_status=True, nids=None):
# """
# Builds and return the :class:`Robot` subclass from the file extension `ext`.
# `nids` is an optional list of node identifiers used to filter the tasks in the flow.
# """
# from abipy.abilab import abirobot
# if check_status: self.check_status()
# return abirobot(flow=self, ext=ext, nids=nids):
def get_graphviz(self, engine="automatic", graph_attr=None, node_attr=None, edge_attr=None):
"""
Generate flow graph in the DOT language.
Args:
engine: Layout command used. ['dot', 'neato', 'twopi', 'circo', 'fdp', 'sfdp', 'patchwork', 'osage']
graph_attr: Mapping of (attribute, value) pairs for the graph.
node_attr: Mapping of (attribute, value) pairs set for all nodes.
edge_attr: Mapping of (attribute, value) pairs set for all edges.
Returns: graphviz.Digraph <https://graphviz.readthedocs.io/en/stable/api.html#digraph>
"""
self.allocate()
from graphviz import Digraph
fg = Digraph("flow", #filename="flow_%s.gv" % os.path.basename(self.relworkdir),
engine="fdp" if engine == "automatic" else engine)
# Set graph attributes.
# https://www.graphviz.org/doc/info/
#fg.attr(label="%s@%s" % (self.__class__.__name__, self.relworkdir))
fg.attr(label=repr(self))
#fg.attr(fontcolor="white", bgcolor='purple:pink')
fg.attr(rankdir="LR", pagedir="BL")
#fg.attr(constraint="false", pack="true", packMode="clust")
fg.node_attr.update(color='lightblue2', style='filled')
#fg.node_attr.update(ranksep='equally')
# Add input attributes.
if graph_attr is not None:
fg.graph_attr.update(**graph_attr)
if node_attr is not None:
fg.node_attr.update(**node_attr)
if edge_attr is not None:
fg.edge_attr.update(**edge_attr)
def node_kwargs(node):
return dict(
#shape="circle",
color=node.color_hex,
fontsize="8.0",
label=(str(node) if not hasattr(node, "pos_str") else
node.pos_str + "\n" + node.__class__.__name__),
)
edge_kwargs = dict(arrowType="vee", style="solid")
cluster_kwargs = dict(rankdir="LR", pagedir="BL", style="rounded", bgcolor="azure2")
for work in self:
# Build cluster with tasks.
cluster_name = "cluster%s" % work.name
with fg.subgraph(name=cluster_name) as wg:
wg.attr(**cluster_kwargs)
wg.attr(label="%s (%s)" % (work.__class__.__name__, work.name))
#wg.attr(label=repr(work))
#wg.attr(label="%s (%s)\n%s (%s)" % (
# work.__class__.__name__, work.name, work.relworkdir, work.node_id))
for task in work:
wg.node(task.name, **node_kwargs(task))
# Connect children to task.
for child in task.get_children():
# Find file extensions required by this task
i = [dep.node for dep in child.deps].index(task)
edge_label = "+".join(child.deps[i].exts)
fg.edge(task.name, child.name, label=edge_label, color=task.color_hex,
**edge_kwargs)
# Treat the case in which we have a work producing output for other tasks.
for work in self:
children = work.get_children()
if not children: continue
cluster_name = "cluster%s" % work.name
seen = set()
for child in children:
# This is not needed, too much confusing
#fg.edge(cluster_name, child.name, color=work.color_hex, **edge_kwargs)
# Find file extensions required by work
i = [dep.node for dep in child.deps].index(work)
for ext in child.deps[i].exts:
out = "%s (%s)" % (ext, work.name)
fg.node(out)
fg.edge(out, child.name, **edge_kwargs)
key = (cluster_name, out)
if key not in seen:
seen.add(key)
fg.edge(cluster_name, out, color=work.color_hex, **edge_kwargs)
# Treat the case in which we have a task that depends on external files.
seen = set()
for task in self.iflat_tasks():
#print(task.get_parents())
for node in (p for p in task.get_parents() if p.is_file):
#print("parent file node", node)
#infile = "%s (%s)" % (ext, work.name)
infile = node.filepath
if infile not in seen:
seen.add(infile)
fg.node(infile, **node_kwargs(node))
fg.edge(infile, task.name, color=node.color_hex, **edge_kwargs)
return fg
@add_fig_kwargs
def plot_networkx(self, mode="network", with_edge_labels=False, ax=None, arrows=False,
node_size="num_cores", node_label="name_class", layout_type="spring", **kwargs):
"""
Use networkx to draw the flow with the connections among the nodes and
the status of the tasks.
Args:
mode: `networkx` to show connections, `status` to group tasks by status.
with_edge_labels: True to draw edge labels.
ax: matplotlib :class:`Axes` or None if a new figure should be created.
arrows: if True draw arrowheads.
node_size: By default, the size of the node is proportional to the number of cores used.
node_label: By default, the task class is used to label node.
layout_type: Get positions for all nodes using `layout_type`. e.g. pos = nx.spring_layout(g)
.. warning::
Requires networkx package.
"""
self.allocate()
# Build the graph
import networkx as nx
g = nx.Graph() if not arrows else nx.DiGraph()
edge_labels = {}
for task in self.iflat_tasks():
g.add_node(task, name=task.name)
for child in task.get_children():
g.add_edge(task, child)
# TODO: Add getters! What about locked nodes!
i = [dep.node for dep in child.deps].index(task)
edge_labels[(task, child)] = " ".join(child.deps[i].exts)
filedeps = [d for d in task.deps if d.node.is_file]
for d in filedeps:
#print(d.node, d.exts)
g.add_node(d.node, name="%s (%s)" % (d.node.basename, d.node.node_id))
g.add_edge(d.node, task)
edge_labels[(d.node, task)] = "+".join(d.exts)
# This part is needed if we have a work that produces output used by other nodes.
for work in self:
children = work.get_children()
if not children:
continue
g.add_node(work, name=work.name)
for task in work:
g.add_edge(task, work)
edge_labels[(task, work)] = "all_ok "
for child in children:
#print(child)
g.add_edge(work, child)
i = [dep.node for dep in child.deps].index(work)
edge_labels[(work, child)] = "+".join(child.deps[i].exts)
# Get positions for all nodes using layout_type.
# e.g. pos = nx.spring_layout(g)
pos = getattr(nx, layout_type + "_layout")(g)
# Select function used to compute the size of the node
def make_node_size(node):
if node.is_task:
return 300 * node.manager.num_cores
else:
return 600
# Function used to build the label
def make_node_label(node):
if node_label == "name_class":
if node.is_file:
return "%s\n(%s)" % (node.basename, node.node_id)
else:
return (node.pos_str + "\n" + node.__class__.__name__
if hasattr(node, "pos_str") else str(node))
else:
raise NotImplementedError("node_label: %s" % str(node_label))
labels = {node: make_node_label(node) for node in g.nodes()}
ax, fig, plt = get_ax_fig_plt(ax=ax)
# Select plot type.
if mode == "network":
nx.draw_networkx(g, pos, labels=labels,
node_color=[node.color_rgb for node in g.nodes()],
node_size=[make_node_size(node) for node in g.nodes()],
width=1, style="dotted", with_labels=True, arrows=arrows, ax=ax)
# Draw edge labels
if with_edge_labels:
nx.draw_networkx_edge_labels(g, pos, edge_labels=edge_labels, ax=ax)
elif mode == "status":
# Group tasks by status (only tasks are show here).
for status in self.ALL_STATUS:
tasks = list(self.iflat_tasks(status=status))
# Draw nodes (color is given by status)
node_color = status.color_opts["color"]
if node_color is None: node_color = "black"
#print("num nodes %s with node_color %s" % (len(tasks), node_color))
nx.draw_networkx_nodes(g, pos,
nodelist=tasks,
node_color=node_color,
node_size=[make_node_size(task) for task in tasks],
alpha=0.5, ax=ax
#label=str(status),
)
# Draw edges.
nx.draw_networkx_edges(g, pos, width=2.0, alpha=0.5, arrows=arrows, ax=ax) # edge_color='r')
# Draw labels
nx.draw_networkx_labels(g, pos, labels, font_size=12, ax=ax)
# Draw edge labels
if with_edge_labels:
nx.draw_networkx_edge_labels(g, pos, edge_labels=edge_labels, ax=ax)
#label_pos=0.5, font_size=10, font_color='k', font_family='sans-serif', font_weight='normal',
# alpha=1.0, bbox=None, ax=None, rotate=True, **kwds)
else:
raise ValueError("Unknown value for mode: %s" % str(mode))
ax.axis("off")
return fig
class G0W0WithQptdmFlow(Flow):
def __init__(self, workdir, scf_input, nscf_input, scr_input, sigma_inputs, manager=None):
"""
Build a :class:`Flow` for one-shot G0W0 calculations.
The computation of the q-points for the screening is parallelized with qptdm
i.e. we run independent calculations for each q-point and then we merge the final results.
Args:
workdir: Working directory.
scf_input: Input for the GS SCF run.
nscf_input: Input for the NSCF run (band structure run).
scr_input: Input for the SCR run.
sigma_inputs: Input(s) for the SIGMA run(s).
manager: :class:`TaskManager` object used to submit the jobs
Initialized from manager.yml if manager is None.
"""
super(G0W0WithQptdmFlow, self).__init__(workdir, manager=manager)
# Register the first work (GS + NSCF calculation)
bands_work = self.register_work(BandStructureWork(scf_input, nscf_input))
# Register the callback that will be executed the work for the SCR with qptdm.
scr_work = self.register_work_from_cbk(cbk_name="cbk_qptdm_workflow", cbk_data={"input": scr_input},
deps={bands_work.nscf_task: "WFK"}, work_class=QptdmWork)
# The last work contains a list of SIGMA tasks
# that will use the data produced in the previous two works.
if not isinstance(sigma_inputs, (list, tuple)):
sigma_inputs = [sigma_inputs]
sigma_work = Work()
for sigma_input in sigma_inputs:
sigma_work.register_sigma_task(sigma_input, deps={bands_work.nscf_task: "WFK", scr_work: "SCR"})
self.register_work(sigma_work)
self.allocate()
def cbk_qptdm_workflow(self, cbk):
"""
This callback is executed by the flow when bands_work.nscf_task reaches S_OK.
It computes the list of q-points for the W(q,G,G'), creates nqpt tasks
in the second work (QptdmWork), and connect the signals.
"""
scr_input = cbk.data["input"]
# Use the WFK file produced by the second
# Task in the first Work (NSCF step).
nscf_task = self[0][1]
wfk_file = nscf_task.outdir.has_abiext("WFK")
work = self[1]
work.set_manager(self.manager)
work.create_tasks(wfk_file, scr_input)
work.add_deps(cbk.deps)
work.set_flow(self)
# Each task has a reference to its work.
for task in work:
task.set_work(work)
# Add the garbage collector.
if self.gc is not None: task.set_gc(self.gc)
work.connect_signals()
work.build()
return work
class FlowCallbackError(Exception):
"""Exceptions raised by FlowCallback."""
class FlowCallback(object):
"""
This object implements the callbacks executed by the :class:`flow` when
particular conditions are fulfilled. See on_dep_ok method of :class:`Flow`.
.. note::
I decided to implement callbacks via this object instead of a standard
approach based on bound methods because:
1) pickle (v<=3) does not support the pickling/unplickling of bound methods
2) There's some extra logic and extra data needed for the proper functioning
of a callback at the flow level and this object provides an easy-to-use interface.
"""
Error = FlowCallbackError
def __init__(self, func_name, flow, deps, cbk_data):
"""
Args:
func_name: String with the name of the callback to execute.
func_name must be a bound method of flow with signature:
func_name(self, cbk)
where self is the Flow instance and cbk is the callback
flow: Reference to the :class:`Flow`
deps: List of dependencies associated to the callback
The callback is executed when all dependencies reach S_OK.
cbk_data: Dictionary with additional data that will be passed to the callback via self.
"""
self.func_name = func_name
self.flow = flow
self.deps = deps
self.data = cbk_data or {}
self._disabled = False
def __str__(self):
return "%s: %s bound to %s" % (self.__class__.__name__, self.func_name, self.flow)
def __call__(self):
"""Execute the callback."""
if self.can_execute():
# Get the bound method of the flow from func_name.
# We use this trick because pickle (format <=3) does not support bound methods.
try:
func = getattr(self.flow, self.func_name)
except AttributeError as exc:
raise self.Error(str(exc))
return func(self)
else:
raise self.Error("You tried to __call_ a callback that cannot be executed!")
def can_execute(self):
"""True if we can execute the callback."""
return not self._disabled and all(dep.status == dep.node.S_OK for dep in self.deps)
def disable(self):
"""
True if the callback has been disabled. This usually happens when the callback has been executed.
"""
self._disabled = True
def enable(self):
"""Enable the callback"""
self._disabled = False
def handle_sender(self, sender):
"""
True if the callback is associated to the sender
i.e. if the node who sent the signal appears in the
dependencies of the callback.
"""
return sender in [d.node for d in self.deps]
# Factory functions.
def bandstructure_flow(workdir, scf_input, nscf_input, dos_inputs=None, manager=None, flow_class=Flow, allocate=True):
"""
Build a :class:`Flow` for band structure calculations.
Args:
workdir: Working directory.
scf_input: Input for the GS SCF run.
nscf_input: Input for the NSCF run (band structure run).
dos_inputs: Input(s) for the NSCF run (dos run).
manager: :class:`TaskManager` object used to submit the jobs
Initialized from manager.yml if manager is None.
flow_class: Flow subclass
allocate: True if the flow should be allocated before returning.
Returns:
:class:`Flow` object
"""
flow = flow_class(workdir, manager=manager)
work = BandStructureWork(scf_input, nscf_input, dos_inputs=dos_inputs)
flow.register_work(work)
# Handy aliases
flow.scf_task, flow.nscf_task, flow.dos_tasks = work.scf_task, work.nscf_task, work.dos_tasks
if allocate: flow.allocate()
return flow
def g0w0_flow(workdir, scf_input, nscf_input, scr_input, sigma_inputs, manager=None, flow_class=Flow, allocate=True):
"""
Build a :class:`Flow` for one-shot $G_0W_0$ calculations.
Args:
workdir: Working directory.
scf_input: Input for the GS SCF run.
nscf_input: Input for the NSCF run (band structure run).
scr_input: Input for the SCR run.
sigma_inputs: List of inputs for the SIGMA run.
flow_class: Flow class
manager: :class:`TaskManager` object used to submit the jobs.
Initialized from manager.yml if manager is None.
allocate: True if the flow should be allocated before returning.
Returns:
:class:`Flow` object
"""
flow = flow_class(workdir, manager=manager)
work = G0W0Work(scf_input, nscf_input, scr_input, sigma_inputs)
flow.register_work(work)
if allocate: flow.allocate()
return flow
class PhononFlow(Flow):
"""
1) One workflow for the GS run.
2) nqpt works for phonon calculations. Each work contains
nirred tasks where nirred is the number of irreducible phonon perturbations
for that particular q-point.
"""
@classmethod
def from_scf_input(cls, workdir, scf_input, ph_ngqpt, with_becs=True, manager=None, allocate=True):
"""
Create a `PhononFlow` for phonon calculations from an `AbinitInput` defining a ground-state run.
Args:
workdir: Working directory of the flow.
scf_input: :class:`AbinitInput` object with the parameters for the GS-SCF run.
ph_ngqpt: q-mesh for phonons. Must be a sub-mesh of the k-mesh used for
electrons. e.g if ngkpt = (8, 8, 8). ph_ngqpt = (4, 4, 4) is a valid choice
whereas ph_ngqpt = (3, 3, 3) is not!
with_becs: True if Born effective charges are wanted.
manager: :class:`TaskManager` object. Read from `manager.yml` if None.
allocate: True if the flow should be allocated before returning.
Return:
:class:`PhononFlow` object.
"""
flow = cls(workdir, manager=manager)
# Register the SCF task
flow.register_scf_task(scf_input)
scf_task = flow[0][0]
# Make sure k-mesh and q-mesh are compatible.
scf_ngkpt, ph_ngqpt = np.array(scf_input["ngkpt"]), np.array(ph_ngqpt)
if any(scf_ngkpt % ph_ngqpt != 0):
raise ValueError("ph_ngqpt %s should be a sub-mesh of scf_ngkpt %s" % (ph_ngqpt, scf_ngkpt))
# Get the q-points in the IBZ from Abinit
qpoints = scf_input.abiget_ibz(ngkpt=ph_ngqpt, shiftk=(0, 0, 0), kptopt=1).points
# Create a PhononWork for each q-point. Add DDK and E-field if q == Gamma and with_becs.
for qpt in qpoints:
if np.allclose(qpt, 0) and with_becs:
ph_work = BecWork.from_scf_task(scf_task)
else:
ph_work = PhononWork.from_scf_task(scf_task, qpoints=qpt)
flow.register_work(ph_work)
if allocate: flow.allocate()
return flow
def open_final_ddb(self):
"""
Open the DDB file located in the output directory of the flow.
Return:
:class:`DdbFile` object, None if file could not be found or file is not readable.
"""
ddb_path = self.outdir.has_abiext("DDB")
if not ddb_path:
if self.status == self.S_OK:
logger.critical("%s reached S_OK but didn't produce a GSR file in %s" % (self, self.outdir))
return None
from abipy.dfpt.ddb import DdbFile
try:
return DdbFile(ddb_path)
except Exception as exc:
logger.critical("Exception while reading DDB file at %s:\n%s" % (ddb_path, str(exc)))
return None
def finalize(self):
"""This method is called when the flow is completed."""
# Merge all the out_DDB files found in work.outdir.
ddb_files = list(filter(None, [work.outdir.has_abiext("DDB") for work in self]))
# Final DDB file will be produced in the outdir of the work.
out_ddb = self.outdir.path_in("out_DDB")
desc = "DDB file merged by %s on %s" % (self.__class__.__name__, time.asctime())
mrgddb = wrappers.Mrgddb(manager=self.manager, verbose=0)
mrgddb.merge(self.outdir.path, ddb_files, out_ddb=out_ddb, description=desc)
print("Final DDB file available at %s" % out_ddb)
# Call the method of the super class.
retcode = super(PhononFlow, self).finalize()
return retcode
class NonLinearCoeffFlow(Flow):
"""
1) One workflow for the GS run.
2) nqpt works for electric field calculations. Each work contains
nirred tasks where nirred is the number of irreducible perturbations
for that particular q-point.
"""
@classmethod
def from_scf_input(cls, workdir, scf_input, manager=None, allocate=True):
"""
Create a `NonlinearFlow` for second order susceptibility calculations from
an `AbinitInput` defining a ground-state run.
Args:
workdir: Working directory of the flow.
scf_input: :class:`AbinitInput` object with the parameters for the GS-SCF run.
manager: :class:`TaskManager` object. Read from `manager.yml` if None.
allocate: True if the flow should be allocated before returning.
Return:
:class:`NonlinearFlow` object.
"""
flow = cls(workdir, manager=manager)
flow.register_scf_task(scf_input)
scf_task = flow[0][0]
nl_work = DteWork.from_scf_task(scf_task)
flow.register_work(nl_work)
if allocate: flow.allocate()
return flow
def open_final_ddb(self):
"""
Open the DDB file located in the output directory of the flow.
Return:
:class:`DdbFile` object, None if file could not be found or file is not readable.
"""
ddb_path = self.outdir.has_abiext("DDB")
if not ddb_path:
if self.status == self.S_OK:
logger.critical("%s reached S_OK but didn't produce a GSR file in %s" % (self, self.outdir))
return None
from abipy.dfpt.ddb import DdbFile
try:
return DdbFile(ddb_path)
except Exception as exc:
logger.critical("Exception while reading DDB file at %s:\n%s" % (ddb_path, str(exc)))
return None
def finalize(self):
"""This method is called when the flow is completed."""
# Merge all the out_DDB files found in work.outdir.
ddb_files = list(filter(None, [work.outdir.has_abiext("DDB") for work in self]))
# Final DDB file will be produced in the outdir of the work.
out_ddb = self.outdir.path_in("out_DDB")
desc = "DDB file merged by %s on %s" % (self.__class__.__name__, time.asctime())
mrgddb = wrappers.Mrgddb(manager=self.manager, verbose=0)
mrgddb.merge(self.outdir.path, ddb_files, out_ddb=out_ddb, description=desc)
print("Final DDB file available at %s" % out_ddb)
# Call the method of the super class.
retcode = super(NonLinearCoeffFlow, self).finalize()
print("retcode", retcode)
#if retcode != 0: return retcode
return retcode
def phonon_flow(workdir, scf_input, ph_inputs, with_nscf=False, with_ddk=False, with_dde=False,
manager=None, flow_class=PhononFlow, allocate=True):
"""
Build a :class:`PhononFlow` for phonon calculations.
Args:
workdir: Working directory.
scf_input: Input for the GS SCF run.
ph_inputs: List of Inputs for the phonon runs.
with_nscf: add an nscf task in front of al phonon tasks to make sure the q point is covered
with_ddk: add the ddk step
with_dde: add the dde step it the dde is set ddk is switched on automatically
manager: :class:`TaskManager` used to submit the jobs
Initialized from manager.yml if manager is None.
flow_class: Flow class
Returns:
:class:`Flow` object
"""
logger.critical("phonon_flow is deprecated and could give wrong results")
if with_dde:
with_ddk = True
natom = len(scf_input.structure)
# Create the container that will manage the different works.
flow = flow_class(workdir, manager=manager)
# Register the first work (GS calculation)
# register_task creates a work for the task, registers it to the flow and returns the work
# the 0the element of the work is the task
scf_task = flow.register_task(scf_input, task_class=ScfTask)[0]
# Build a temporary work with a shell manager just to run
# ABINIT to get the list of irreducible pertubations for this q-point.
shell_manager = flow.manager.to_shell_manager(mpi_procs=1)
if with_ddk:
logger.info('add ddk')
# TODO
# MG Warning: be careful here because one should use tolde or tolwfr (tolvrs shall not be used!)
ddk_input = ph_inputs[0].deepcopy()
ddk_input.set_vars(qpt=[0, 0, 0], rfddk=1, rfelfd=2, rfdir=[1, 1, 1])
ddk_task = flow.register_task(ddk_input, deps={scf_task: 'WFK'}, task_class=DdkTask)[0]
if with_dde:
logger.info('add dde')
dde_input = ph_inputs[0].deepcopy()
dde_input.set_vars(qpt=[0, 0, 0], rfddk=1, rfelfd=2)
dde_input_idir = dde_input.deepcopy()
dde_input_idir.set_vars(rfdir=[1, 1, 1])
dde_task = flow.register_task(dde_input, deps={scf_task: 'WFK', ddk_task: 'DDK'}, task_class=DdeTask)[0]
if not isinstance(ph_inputs, (list, tuple)):
ph_inputs = [ph_inputs]
for i, ph_input in enumerate(ph_inputs):
fake_input = ph_input.deepcopy()
# Run abinit on the front-end to get the list of irreducible pertubations.
tmp_dir = os.path.join(workdir, "__ph_run" + str(i) + "__")
w = PhononWork(workdir=tmp_dir, manager=shell_manager)
fake_task = w.register(fake_input)
# Use the magic value paral_rf = -1 to get the list of irreducible perturbations for this q-point.
abivars = dict(
paral_rf=-1,
rfatpol=[1, natom], # Set of atoms to displace.
rfdir=[1, 1, 1], # Along this set of reduced coordinate axis.
)
fake_task.set_vars(abivars)
w.allocate()
w.start(wait=True)
# Parse the file to get the perturbations.
try:
irred_perts = yaml_read_irred_perts(fake_task.log_file.path)
except:
print("Error in %s" % fake_task.log_file.path)
raise
logger.info(irred_perts)
w.rmtree()
# Now we can build the final list of works:
# One work per q-point, each work computes all
# the irreducible perturbations for a singe q-point.
work_qpt = PhononWork()
if with_nscf:
# MG: Warning this code assume 0 is Gamma!
nscf_input = copy.deepcopy(scf_input)
nscf_input.set_vars(kptopt=3, iscf=-3, qpt=irred_perts[0]['qpt'], nqpt=1)
nscf_task = work_qpt.register_nscf_task(nscf_input, deps={scf_task: "DEN"})
deps = {nscf_task: "WFQ", scf_task: "WFK"}
else:
deps = {scf_task: "WFK"}
if with_ddk:
deps[ddk_task] = 'DDK'
logger.info(irred_perts[0]['qpt'])
for irred_pert in irred_perts:
#print(irred_pert)
new_input = ph_input.deepcopy()
#rfatpol 1 1 # Only the first atom is displaced
#rfdir 1 0 0 # Along the first reduced coordinate axis
qpt = irred_pert["qpt"]
idir = irred_pert["idir"]
ipert = irred_pert["ipert"]
# TODO this will work for phonons, but not for the other types of perturbations.
rfdir = 3 * [0]
rfdir[idir -1] = 1
rfatpol = [ipert, ipert]
new_input.set_vars(
#rfpert=1,
qpt=qpt,
rfdir=rfdir,
rfatpol=rfatpol,
)
if with_ddk:
new_input.set_vars(rfelfd=3)
work_qpt.register_phonon_task(new_input, deps=deps)
flow.register_work(work_qpt)
if allocate: flow.allocate()
return flow
def phonon_conv_flow(workdir, scf_input, qpoints, params, manager=None, allocate=True):
"""
Create a :class:`Flow` to perform convergence studies for phonon calculations.
Args:
workdir: Working directory of the flow.
scf_input: :class:`AbinitInput` object defining a GS-SCF calculation.
qpoints: List of list of lists with the reduced coordinates of the q-point(s).
params:
To perform a converge study wrt ecut: params=["ecut", [2, 4, 6]]
manager: :class:`TaskManager` object responsible for the submission of the jobs.
If manager is None, the object is initialized from the yaml file
located either in the working directory or in the user configuration dir.
allocate: True if the flow should be allocated before returning.
Return:
:class:`Flow` object.
"""
qpoints = np.reshape(qpoints, (-1, 3))
flow = Flow(workdir=workdir, manager=manager)
for qpt in qpoints:
for gs_inp in scf_input.product(*params):
# Register the SCF task
work = flow.register_scf_task(gs_inp)
# Add the PhononWork connected to this scf_task.
flow.register_work(PhononWork.from_scf_task(work[0], qpoints=qpt))
if allocate: flow.allocate()
return flow
|
mit
|
vortex-ape/scikit-learn
|
sklearn/datasets/tests/test_20news.py
|
11
|
3510
|
"""Test the 20news downloader, if the data is available."""
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import SkipTest
from sklearn.datasets.tests.test_common import check_return_X_y
from functools import partial
from sklearn import datasets
def test_20news():
try:
data = datasets.fetch_20newsgroups(
subset='all', download_if_missing=False, shuffle=False)
except IOError:
raise SkipTest("Download 20 newsgroups to run this test")
# Extract a reduced dataset
data2cats = datasets.fetch_20newsgroups(
subset='all', categories=data.target_names[-1:-3:-1], shuffle=False)
# Check that the ordering of the target_names is the same
# as the ordering in the full dataset
assert_equal(data2cats.target_names,
data.target_names[-2:])
# Assert that we have only 0 and 1 as labels
assert_equal(np.unique(data2cats.target).tolist(), [0, 1])
# Check that the number of filenames is consistent with data/target
assert_equal(len(data2cats.filenames), len(data2cats.target))
assert_equal(len(data2cats.filenames), len(data2cats.data))
# Check that the first entry of the reduced dataset corresponds to
# the first entry of the corresponding category in the full dataset
entry1 = data2cats.data[0]
category = data2cats.target_names[data2cats.target[0]]
label = data.target_names.index(category)
entry2 = data.data[np.where(data.target == label)[0][0]]
assert_equal(entry1, entry2)
def test_20news_length_consistency():
"""Checks the length consistencies within the bunch
This is a non-regression test for a bug present in 0.16.1.
"""
try:
data = datasets.fetch_20newsgroups(
subset='all', download_if_missing=False, shuffle=False)
except IOError:
raise SkipTest("Download 20 newsgroups to run this test")
# Extract the full dataset
data = datasets.fetch_20newsgroups(subset='all')
assert_equal(len(data['data']), len(data.data))
assert_equal(len(data['target']), len(data.target))
assert_equal(len(data['filenames']), len(data.filenames))
def test_20news_vectorized():
try:
datasets.fetch_20newsgroups(subset='all',
download_if_missing=False)
except IOError:
raise SkipTest("Download 20 newsgroups to run this test")
# test subset = train
bunch = datasets.fetch_20newsgroups_vectorized(subset="train")
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (11314, 130107))
assert_equal(bunch.target.shape[0], 11314)
assert_equal(bunch.data.dtype, np.float64)
# test subset = test
bunch = datasets.fetch_20newsgroups_vectorized(subset="test")
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (7532, 130107))
assert_equal(bunch.target.shape[0], 7532)
assert_equal(bunch.data.dtype, np.float64)
# test return_X_y option
fetch_func = partial(datasets.fetch_20newsgroups_vectorized, subset='test')
check_return_X_y(bunch, fetch_func)
# test subset = all
bunch = datasets.fetch_20newsgroups_vectorized(subset='all')
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (11314 + 7532, 130107))
assert_equal(bunch.target.shape[0], 11314 + 7532)
assert_equal(bunch.data.dtype, np.float64)
|
bsd-3-clause
|
Titan-C/scikit-learn
|
sklearn/cluster/tests/test_k_means.py
|
10
|
32655
|
"""Testing for K-means"""
import sys
import numpy as np
from scipy import sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import if_safe_multiprocessing_with_blas
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.extmath import row_norms
from sklearn.metrics.cluster import v_measure_score
from sklearn.cluster import KMeans, k_means
from sklearn.cluster import MiniBatchKMeans
from sklearn.cluster.k_means_ import _labels_inertia
from sklearn.cluster.k_means_ import _mini_batch_step
from sklearn.datasets.samples_generator import make_blobs
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.exceptions import DataConversionWarning
from sklearn.metrics.cluster import homogeneity_score
# non centered, sparse centers to check the
centers = np.array([
[0.0, 5.0, 0.0, 0.0, 0.0],
[1.0, 1.0, 4.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 5.0, 1.0],
])
n_samples = 100
n_clusters, n_features = centers.shape
X, true_labels = make_blobs(n_samples=n_samples, centers=centers,
cluster_std=1., random_state=42)
X_csr = sp.csr_matrix(X)
def test_elkan_results():
rnd = np.random.RandomState(0)
X_normal = rnd.normal(size=(50, 10))
X_blobs, _ = make_blobs(random_state=0)
km_full = KMeans(algorithm='full', n_clusters=5, random_state=0, n_init=1)
km_elkan = KMeans(algorithm='elkan', n_clusters=5,
random_state=0, n_init=1)
for X in [X_normal, X_blobs]:
km_full.fit(X)
km_elkan.fit(X)
assert_array_almost_equal(km_elkan.cluster_centers_,
km_full.cluster_centers_)
assert_array_equal(km_elkan.labels_, km_full.labels_)
def test_labels_assignment_and_inertia():
# pure numpy implementation as easily auditable reference gold
# implementation
rng = np.random.RandomState(42)
noisy_centers = centers + rng.normal(size=centers.shape)
labels_gold = - np.ones(n_samples, dtype=np.int)
mindist = np.empty(n_samples)
mindist.fill(np.infty)
for center_id in range(n_clusters):
dist = np.sum((X - noisy_centers[center_id]) ** 2, axis=1)
labels_gold[dist < mindist] = center_id
mindist = np.minimum(dist, mindist)
inertia_gold = mindist.sum()
assert_true((mindist >= 0.0).all())
assert_true((labels_gold != -1).all())
# perform label assignment using the dense array input
x_squared_norms = (X ** 2).sum(axis=1)
labels_array, inertia_array = _labels_inertia(
X, x_squared_norms, noisy_centers)
assert_array_almost_equal(inertia_array, inertia_gold)
assert_array_equal(labels_array, labels_gold)
# perform label assignment using the sparse CSR input
x_squared_norms_from_csr = row_norms(X_csr, squared=True)
labels_csr, inertia_csr = _labels_inertia(
X_csr, x_squared_norms_from_csr, noisy_centers)
assert_array_almost_equal(inertia_csr, inertia_gold)
assert_array_equal(labels_csr, labels_gold)
def test_minibatch_update_consistency():
# Check that dense and sparse minibatch update give the same results
rng = np.random.RandomState(42)
old_centers = centers + rng.normal(size=centers.shape)
new_centers = old_centers.copy()
new_centers_csr = old_centers.copy()
counts = np.zeros(new_centers.shape[0], dtype=np.int32)
counts_csr = np.zeros(new_centers.shape[0], dtype=np.int32)
x_squared_norms = (X ** 2).sum(axis=1)
x_squared_norms_csr = row_norms(X_csr, squared=True)
buffer = np.zeros(centers.shape[1], dtype=np.double)
buffer_csr = np.zeros(centers.shape[1], dtype=np.double)
# extract a small minibatch
X_mb = X[:10]
X_mb_csr = X_csr[:10]
x_mb_squared_norms = x_squared_norms[:10]
x_mb_squared_norms_csr = x_squared_norms_csr[:10]
# step 1: compute the dense minibatch update
old_inertia, incremental_diff = _mini_batch_step(
X_mb, x_mb_squared_norms, new_centers, counts,
buffer, 1, None, random_reassign=False)
assert_greater(old_inertia, 0.0)
# compute the new inertia on the same batch to check that it decreased
labels, new_inertia = _labels_inertia(
X_mb, x_mb_squared_norms, new_centers)
assert_greater(new_inertia, 0.0)
assert_less(new_inertia, old_inertia)
# check that the incremental difference computation is matching the
# final observed value
effective_diff = np.sum((new_centers - old_centers) ** 2)
assert_almost_equal(incremental_diff, effective_diff)
# step 2: compute the sparse minibatch update
old_inertia_csr, incremental_diff_csr = _mini_batch_step(
X_mb_csr, x_mb_squared_norms_csr, new_centers_csr, counts_csr,
buffer_csr, 1, None, random_reassign=False)
assert_greater(old_inertia_csr, 0.0)
# compute the new inertia on the same batch to check that it decreased
labels_csr, new_inertia_csr = _labels_inertia(
X_mb_csr, x_mb_squared_norms_csr, new_centers_csr)
assert_greater(new_inertia_csr, 0.0)
assert_less(new_inertia_csr, old_inertia_csr)
# check that the incremental difference computation is matching the
# final observed value
effective_diff = np.sum((new_centers_csr - old_centers) ** 2)
assert_almost_equal(incremental_diff_csr, effective_diff)
# step 3: check that sparse and dense updates lead to the same results
assert_array_equal(labels, labels_csr)
assert_array_almost_equal(new_centers, new_centers_csr)
assert_almost_equal(incremental_diff, incremental_diff_csr)
assert_almost_equal(old_inertia, old_inertia_csr)
assert_almost_equal(new_inertia, new_inertia_csr)
def _check_fitted_model(km):
# check that the number of clusters centers and distinct labels match
# the expectation
centers = km.cluster_centers_
assert_equal(centers.shape, (n_clusters, n_features))
labels = km.labels_
assert_equal(np.unique(labels).shape[0], n_clusters)
# check that the labels assignment are perfect (up to a permutation)
assert_equal(v_measure_score(true_labels, labels), 1.0)
assert_greater(km.inertia_, 0.0)
# check error on dataset being too small
assert_raises(ValueError, km.fit, [[0., 1.]])
def test_k_means_plus_plus_init():
km = KMeans(init="k-means++", n_clusters=n_clusters,
random_state=42).fit(X)
_check_fitted_model(km)
def test_k_means_new_centers():
# Explore the part of the code where a new center is reassigned
X = np.array([[0, 0, 1, 1],
[0, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 1, 0, 0]])
labels = [0, 1, 2, 1, 1, 2]
bad_centers = np.array([[+0, 1, 0, 0],
[.2, 0, .2, .2],
[+0, 0, 0, 0]])
km = KMeans(n_clusters=3, init=bad_centers, n_init=1, max_iter=10,
random_state=1)
for this_X in (X, sp.coo_matrix(X)):
km.fit(this_X)
this_labels = km.labels_
# Reorder the labels so that the first instance is in cluster 0,
# the second in cluster 1, ...
this_labels = np.unique(this_labels, return_index=True)[1][this_labels]
np.testing.assert_array_equal(this_labels, labels)
@if_safe_multiprocessing_with_blas
def test_k_means_plus_plus_init_2_jobs():
if sys.version_info[:2] < (3, 4):
raise SkipTest(
"Possible multi-process bug with some BLAS under Python < 3.4")
km = KMeans(init="k-means++", n_clusters=n_clusters, n_jobs=2,
random_state=42).fit(X)
_check_fitted_model(km)
def test_k_means_precompute_distances_flag():
# check that a warning is raised if the precompute_distances flag is not
# supported
km = KMeans(precompute_distances="wrong")
assert_raises(ValueError, km.fit, X)
def test_k_means_plus_plus_init_sparse():
km = KMeans(init="k-means++", n_clusters=n_clusters, random_state=42)
km.fit(X_csr)
_check_fitted_model(km)
def test_k_means_random_init():
km = KMeans(init="random", n_clusters=n_clusters, random_state=42)
km.fit(X)
_check_fitted_model(km)
def test_k_means_random_init_sparse():
km = KMeans(init="random", n_clusters=n_clusters, random_state=42)
km.fit(X_csr)
_check_fitted_model(km)
def test_k_means_plus_plus_init_not_precomputed():
km = KMeans(init="k-means++", n_clusters=n_clusters, random_state=42,
precompute_distances=False).fit(X)
_check_fitted_model(km)
def test_k_means_random_init_not_precomputed():
km = KMeans(init="random", n_clusters=n_clusters, random_state=42,
precompute_distances=False).fit(X)
_check_fitted_model(km)
def test_k_means_perfect_init():
km = KMeans(init=centers.copy(), n_clusters=n_clusters, random_state=42,
n_init=1)
km.fit(X)
_check_fitted_model(km)
def test_k_means_n_init():
rnd = np.random.RandomState(0)
X = rnd.normal(size=(40, 2))
# two regression tests on bad n_init argument
# previous bug: n_init <= 0 threw non-informative TypeError (#3858)
assert_raises_regex(ValueError, "n_init", KMeans(n_init=0).fit, X)
assert_raises_regex(ValueError, "n_init", KMeans(n_init=-1).fit, X)
def test_k_means_explicit_init_shape():
# test for sensible errors when giving explicit init
# with wrong number of features or clusters
rnd = np.random.RandomState(0)
X = rnd.normal(size=(40, 3))
for Class in [KMeans, MiniBatchKMeans]:
# mismatch of number of features
km = Class(n_init=1, init=X[:, :2], n_clusters=len(X))
msg = "does not match the number of features of the data"
assert_raises_regex(ValueError, msg, km.fit, X)
# for callable init
km = Class(n_init=1,
init=lambda X_, k, random_state: X_[:, :2],
n_clusters=len(X))
assert_raises_regex(ValueError, msg, km.fit, X)
# mismatch of number of clusters
msg = "does not match the number of clusters"
km = Class(n_init=1, init=X[:2, :], n_clusters=3)
assert_raises_regex(ValueError, msg, km.fit, X)
# for callable init
km = Class(n_init=1,
init=lambda X_, k, random_state: X_[:2, :],
n_clusters=3)
assert_raises_regex(ValueError, msg, km.fit, X)
def test_k_means_fortran_aligned_data():
# Check the KMeans will work well, even if X is a fortran-aligned data.
X = np.asfortranarray([[0, 0], [0, 1], [0, 1]])
centers = np.array([[0, 0], [0, 1]])
labels = np.array([0, 1, 1])
km = KMeans(n_init=1, init=centers, precompute_distances=False,
random_state=42, n_clusters=2)
km.fit(X)
assert_array_equal(km.cluster_centers_, centers)
assert_array_equal(km.labels_, labels)
def test_mb_k_means_plus_plus_init_dense_array():
mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters,
random_state=42)
mb_k_means.fit(X)
_check_fitted_model(mb_k_means)
def test_mb_kmeans_verbose():
mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters,
random_state=42, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
mb_k_means.fit(X)
finally:
sys.stdout = old_stdout
def test_mb_k_means_plus_plus_init_sparse_matrix():
mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters,
random_state=42)
mb_k_means.fit(X_csr)
_check_fitted_model(mb_k_means)
def test_minibatch_init_with_large_k():
mb_k_means = MiniBatchKMeans(init='k-means++', init_size=10, n_clusters=20)
# Check that a warning is raised, as the number clusters is larger
# than the init_size
assert_warns(RuntimeWarning, mb_k_means.fit, X)
def test_minibatch_k_means_random_init_dense_array():
# increase n_init to make random init stable enough
mb_k_means = MiniBatchKMeans(init="random", n_clusters=n_clusters,
random_state=42, n_init=10).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_random_init_sparse_csr():
# increase n_init to make random init stable enough
mb_k_means = MiniBatchKMeans(init="random", n_clusters=n_clusters,
random_state=42, n_init=10).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_perfect_init_dense_array():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42, n_init=1).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_init_multiple_runs_with_explicit_centers():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42, n_init=10)
assert_warns(RuntimeWarning, mb_k_means.fit, X)
def test_minibatch_k_means_perfect_init_sparse_csr():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42, n_init=1).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_minibatch_sensible_reassign_fit():
# check if identical initial clusters are reassigned
# also a regression test for when there are more desired reassignments than
# samples.
zeroed_X, true_labels = make_blobs(n_samples=100, centers=5,
cluster_std=1., random_state=42)
zeroed_X[::2, :] = 0
mb_k_means = MiniBatchKMeans(n_clusters=20, batch_size=10, random_state=42,
init="random")
mb_k_means.fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10)
# do the same with batch-size > X.shape[0] (regression test)
mb_k_means = MiniBatchKMeans(n_clusters=20, batch_size=201,
random_state=42, init="random")
mb_k_means.fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10)
def test_minibatch_sensible_reassign_partial_fit():
zeroed_X, true_labels = make_blobs(n_samples=n_samples, centers=5,
cluster_std=1., random_state=42)
zeroed_X[::2, :] = 0
mb_k_means = MiniBatchKMeans(n_clusters=20, random_state=42, init="random")
for i in range(100):
mb_k_means.partial_fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10)
def test_minibatch_reassign():
# Give a perfect initialization, but a large reassignment_ratio,
# as a result all the centers should be reassigned and the model
# should no longer be good
for this_X in (X, X_csr):
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=100,
random_state=42)
mb_k_means.fit(this_X)
score_before = mb_k_means.score(this_X)
try:
old_stdout = sys.stdout
sys.stdout = StringIO()
# Turn on verbosity to smoke test the display code
_mini_batch_step(this_X, (X ** 2).sum(axis=1),
mb_k_means.cluster_centers_,
mb_k_means.counts_,
np.zeros(X.shape[1], np.double),
False, distances=np.zeros(X.shape[0]),
random_reassign=True, random_state=42,
reassignment_ratio=1, verbose=True)
finally:
sys.stdout = old_stdout
assert_greater(score_before, mb_k_means.score(this_X))
# Give a perfect initialization, with a small reassignment_ratio,
# no center should be reassigned
for this_X in (X, X_csr):
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=100,
init=centers.copy(),
random_state=42, n_init=1)
mb_k_means.fit(this_X)
clusters_before = mb_k_means.cluster_centers_
# Turn on verbosity to smoke test the display code
_mini_batch_step(this_X, (X ** 2).sum(axis=1),
mb_k_means.cluster_centers_,
mb_k_means.counts_,
np.zeros(X.shape[1], np.double),
False, distances=np.zeros(X.shape[0]),
random_reassign=True, random_state=42,
reassignment_ratio=1e-15)
assert_array_almost_equal(clusters_before, mb_k_means.cluster_centers_)
def test_minibatch_with_many_reassignments():
# Test for the case that the number of clusters to reassign is bigger
# than the batch_size
n_samples = 550
rnd = np.random.RandomState(42)
X = rnd.uniform(size=(n_samples, 10))
# Check that the fit works if n_clusters is bigger than the batch_size.
# Run the test with 550 clusters and 550 samples, because it turned out
# that this values ensure that the number of clusters to reassign
# is always bigger than the batch_size
n_clusters = 550
MiniBatchKMeans(n_clusters=n_clusters,
batch_size=100,
init_size=n_samples,
random_state=42).fit(X)
def test_sparse_mb_k_means_callable_init():
def test_init(X, k, random_state):
return centers
# Small test to check that giving the wrong number of centers
# raises a meaningful error
msg = "does not match the number of clusters"
assert_raises_regex(ValueError, msg, MiniBatchKMeans(init=test_init,
random_state=42).fit,
X_csr)
# Now check that the fit actually works
mb_k_means = MiniBatchKMeans(n_clusters=3, init=test_init,
random_state=42).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_mini_batch_k_means_random_init_partial_fit():
km = MiniBatchKMeans(n_clusters=n_clusters, init="random", random_state=42)
# use the partial_fit API for online learning
for X_minibatch in np.array_split(X, 10):
km.partial_fit(X_minibatch)
# compute the labeling on the complete dataset
labels = km.predict(X)
assert_equal(v_measure_score(true_labels, labels), 1.0)
def test_minibatch_default_init_size():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
batch_size=10, random_state=42,
n_init=1).fit(X)
assert_equal(mb_k_means.init_size_, 3 * mb_k_means.batch_size)
_check_fitted_model(mb_k_means)
def test_minibatch_tol():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=10,
random_state=42, tol=.01).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_set_init_size():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
init_size=666, random_state=42,
n_init=1).fit(X)
assert_equal(mb_k_means.init_size, 666)
assert_equal(mb_k_means.init_size_, n_samples)
_check_fitted_model(mb_k_means)
def test_k_means_invalid_init():
km = KMeans(init="invalid", n_init=1, n_clusters=n_clusters)
assert_raises(ValueError, km.fit, X)
def test_mini_match_k_means_invalid_init():
km = MiniBatchKMeans(init="invalid", n_init=1, n_clusters=n_clusters)
assert_raises(ValueError, km.fit, X)
def test_k_means_copyx():
# Check if copy_x=False returns nearly equal X after de-centering.
my_X = X.copy()
km = KMeans(copy_x=False, n_clusters=n_clusters, random_state=42)
km.fit(my_X)
_check_fitted_model(km)
# check if my_X is centered
assert_array_almost_equal(my_X, X)
def test_k_means_non_collapsed():
# Check k_means with a bad initialization does not yield a singleton
# Starting with bad centers that are quickly ignored should not
# result in a repositioning of the centers to the center of mass that
# would lead to collapsed centers which in turns make the clustering
# dependent of the numerical unstabilities.
my_X = np.array([[1.1, 1.1], [0.9, 1.1], [1.1, 0.9], [0.9, 1.1]])
array_init = np.array([[1.0, 1.0], [5.0, 5.0], [-5.0, -5.0]])
km = KMeans(init=array_init, n_clusters=3, random_state=42, n_init=1)
km.fit(my_X)
# centers must not been collapsed
assert_equal(len(np.unique(km.labels_)), 3)
centers = km.cluster_centers_
assert_true(np.linalg.norm(centers[0] - centers[1]) >= 0.1)
assert_true(np.linalg.norm(centers[0] - centers[2]) >= 0.1)
assert_true(np.linalg.norm(centers[1] - centers[2]) >= 0.1)
def test_predict():
km = KMeans(n_clusters=n_clusters, random_state=42)
km.fit(X)
# sanity check: predict centroid labels
pred = km.predict(km.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# sanity check: re-predict labeling for training set samples
pred = km.predict(X)
assert_array_equal(pred, km.labels_)
# re-predict labels for training set using fit_predict
pred = km.fit_predict(X)
assert_array_equal(pred, km.labels_)
def test_score():
km1 = KMeans(n_clusters=n_clusters, max_iter=1, random_state=42, n_init=1)
s1 = km1.fit(X).score(X)
km2 = KMeans(n_clusters=n_clusters, max_iter=10, random_state=42, n_init=1)
s2 = km2.fit(X).score(X)
assert_greater(s2, s1)
km1 = KMeans(n_clusters=n_clusters, max_iter=1, random_state=42, n_init=1,
algorithm='elkan')
s1 = km1.fit(X).score(X)
km2 = KMeans(n_clusters=n_clusters, max_iter=10, random_state=42, n_init=1,
algorithm='elkan')
s2 = km2.fit(X).score(X)
assert_greater(s2, s1)
def test_predict_minibatch_dense_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, random_state=40).fit(X)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# sanity check: re-predict labeling for training set samples
pred = mb_k_means.predict(X)
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_predict_minibatch_kmeanspp_init_sparse_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, init='k-means++',
n_init=10).fit(X_csr)
# sanity check: re-predict labeling for training set samples
assert_array_equal(mb_k_means.predict(X_csr), mb_k_means.labels_)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# check that models trained on sparse input also works for dense input at
# predict time
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_predict_minibatch_random_init_sparse_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, init='random',
n_init=10).fit(X_csr)
# sanity check: re-predict labeling for training set samples
assert_array_equal(mb_k_means.predict(X_csr), mb_k_means.labels_)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# check that models trained on sparse input also works for dense input at
# predict time
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_int_input():
X_list = [[0, 0], [10, 10], [12, 9], [-1, 1], [2, 0], [8, 10]]
for dtype in [np.int32, np.int64]:
X_int = np.array(X_list, dtype=dtype)
X_int_csr = sp.csr_matrix(X_int)
init_int = X_int[:2]
fitted_models = [
KMeans(n_clusters=2).fit(X_int),
KMeans(n_clusters=2, init=init_int, n_init=1).fit(X_int),
# mini batch kmeans is very unstable on such a small dataset hence
# we use many inits
MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_int),
MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_int_csr),
MiniBatchKMeans(n_clusters=2, batch_size=2,
init=init_int, n_init=1).fit(X_int),
MiniBatchKMeans(n_clusters=2, batch_size=2,
init=init_int, n_init=1).fit(X_int_csr),
]
for km in fitted_models:
assert_equal(km.cluster_centers_.dtype, np.float64)
expected_labels = [0, 1, 1, 0, 0, 1]
scores = np.array([v_measure_score(expected_labels, km.labels_)
for km in fitted_models])
assert_array_equal(scores, np.ones(scores.shape[0]))
def test_transform():
km = KMeans(n_clusters=n_clusters)
km.fit(X)
X_new = km.transform(km.cluster_centers_)
for c in range(n_clusters):
assert_equal(X_new[c, c], 0)
for c2 in range(n_clusters):
if c != c2:
assert_greater(X_new[c, c2], 0)
def test_fit_transform():
X1 = KMeans(n_clusters=3, random_state=51).fit(X).transform(X)
X2 = KMeans(n_clusters=3, random_state=51).fit_transform(X)
assert_array_equal(X1, X2)
def test_predict_equal_labels():
km = KMeans(random_state=13, n_jobs=1, n_init=1, max_iter=1,
algorithm='full')
km.fit(X)
assert_array_equal(km.predict(X), km.labels_)
km = KMeans(random_state=13, n_jobs=1, n_init=1, max_iter=1,
algorithm='elkan')
km.fit(X)
assert_array_equal(km.predict(X), km.labels_)
def test_full_vs_elkan():
km1 = KMeans(algorithm='full', random_state=13)
km2 = KMeans(algorithm='elkan', random_state=13)
km1.fit(X)
km2.fit(X)
homogeneity_score(km1.predict(X), km2.predict(X)) == 1.0
def test_n_init():
# Check that increasing the number of init increases the quality
n_runs = 5
n_init_range = [1, 5, 10]
inertia = np.zeros((len(n_init_range), n_runs))
for i, n_init in enumerate(n_init_range):
for j in range(n_runs):
km = KMeans(n_clusters=n_clusters, init="random", n_init=n_init,
random_state=j).fit(X)
inertia[i, j] = km.inertia_
inertia = inertia.mean(axis=1)
failure_msg = ("Inertia %r should be decreasing"
" when n_init is increasing.") % list(inertia)
for i in range(len(n_init_range) - 1):
assert_true(inertia[i] >= inertia[i + 1], failure_msg)
def test_k_means_function():
# test calling the k_means function directly
# catch output
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
cluster_centers, labels, inertia = k_means(X, n_clusters=n_clusters,
verbose=True)
finally:
sys.stdout = old_stdout
centers = cluster_centers
assert_equal(centers.shape, (n_clusters, n_features))
labels = labels
assert_equal(np.unique(labels).shape[0], n_clusters)
# check that the labels assignment are perfect (up to a permutation)
assert_equal(v_measure_score(true_labels, labels), 1.0)
assert_greater(inertia, 0.0)
# check warning when centers are passed
assert_warns(RuntimeWarning, k_means, X, n_clusters=n_clusters,
init=centers)
# to many clusters desired
assert_raises(ValueError, k_means, X, n_clusters=X.shape[0] + 1)
def test_x_squared_norms_init_centroids():
"""Test that x_squared_norms can be None in _init_centroids"""
from sklearn.cluster.k_means_ import _init_centroids
X_norms = np.sum(X**2, axis=1)
precompute = _init_centroids(
X, 3, "k-means++", random_state=0, x_squared_norms=X_norms)
assert_array_equal(
precompute,
_init_centroids(X, 3, "k-means++", random_state=0))
def test_max_iter_error():
km = KMeans(max_iter=-1)
assert_raise_message(ValueError, 'Number of iterations should be',
km.fit, X)
def test_float_precision():
km = KMeans(n_init=1, random_state=30)
mb_km = MiniBatchKMeans(n_init=1, random_state=30)
inertia = {}
X_new = {}
centers = {}
for estimator in [km, mb_km]:
for is_sparse in [False, True]:
for dtype in [np.float64, np.float32]:
if is_sparse:
X_test = sp.csr_matrix(X_csr, dtype=dtype)
else:
X_test = X.astype(dtype)
estimator.fit(X_test)
# dtype of cluster centers has to be the dtype of the input
# data
assert_equal(estimator.cluster_centers_.dtype, dtype)
inertia[dtype] = estimator.inertia_
X_new[dtype] = estimator.transform(X_test)
centers[dtype] = estimator.cluster_centers_
# ensure the extracted row is a 2d array
assert_equal(estimator.predict(X_test[:1]),
estimator.labels_[0])
if hasattr(estimator, 'partial_fit'):
estimator.partial_fit(X_test[0:3])
# dtype of cluster centers has to stay the same after
# partial_fit
assert_equal(estimator.cluster_centers_.dtype, dtype)
# compare arrays with low precision since the difference between
# 32 and 64 bit sometimes makes a difference up to the 4th decimal
# place
assert_array_almost_equal(inertia[np.float32], inertia[np.float64],
decimal=4)
assert_array_almost_equal(X_new[np.float32], X_new[np.float64],
decimal=4)
assert_array_almost_equal(centers[np.float32], centers[np.float64],
decimal=4)
def test_k_means_init_centers():
# This test is used to check KMeans won't mutate the user provided input
# array silently even if input data and init centers have the same type
X_small = np.array([[1.1, 1.1], [-7.5, -7.5], [-1.1, -1.1], [7.5, 7.5]])
init_centers = np.array([[0.0, 0.0], [5.0, 5.0], [-5.0, -5.0]])
for dtype in [np.int32, np.int64, np.float32, np.float64]:
X_test = dtype(X_small)
init_centers_test = dtype(init_centers)
assert_array_equal(init_centers, init_centers_test)
km = KMeans(init=init_centers_test, n_clusters=3, n_init=1)
km.fit(X_test)
assert_equal(False, np.may_share_memory(km.cluster_centers_, init_centers))
def test_sparse_k_means_init_centers():
from sklearn.datasets import load_iris
iris = load_iris()
X = iris.data
# Get a local optimum
centers = KMeans(n_clusters=3).fit(X).cluster_centers_
# Fit starting from a local optimum shouldn't change the solution
np.testing.assert_allclose(
centers,
KMeans(n_clusters=3,
init=centers,
n_init=1).fit(X).cluster_centers_
)
# The same should be true when X is sparse
X_sparse = sp.csr_matrix(X)
np.testing.assert_allclose(
centers,
KMeans(n_clusters=3,
init=centers,
n_init=1).fit(X_sparse).cluster_centers_
)
def test_sparse_validate_centers():
from sklearn.datasets import load_iris
iris = load_iris()
X = iris.data
# Get a local optimum
centers = KMeans(n_clusters=4).fit(X).cluster_centers_
# Test that a ValueError is raised for validate_center_shape
classifier = KMeans(n_clusters=3, init=centers, n_init=1)
msg = "The shape of the initial centers \(\(4L?, 4L?\)\) " \
"does not match the number of clusters 3"
assert_raises_regex(ValueError, msg, classifier.fit, X)
|
bsd-3-clause
|
JifuZhao/Poisson-Kriging
|
old/kriging.py
|
1
|
12806
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
__author__ = "Jifu Zhao"
__email__ = "[email protected]"
__date__ = "04/07/2017"
__modify__ = "05/04/2017"
"""
# -------------------------------------------------------------------------
# two problems
# 1. for semivariogram, when h = 0, is it correct to manually setting 0 ?
# 2. for prediction variance, currently manually add mu ?
# -------------------------------------------------------------------------
import warnings
import numpy as np
import pandas as pd
from itertools import product
import matplotlib.pyplot as plt
from sklearn.metrics.pairwise import pairwise_distances
warnings.filterwarnings("ignore")
class Kriging(object):
""" Poisson Kriging in 2D applications """
def __init__(self):
""" initialization of Poisson Kriging """
self.x = None # location in x direction, 1d array
self.y = None # location in y direction, 1d array
self.z = None # measurements, 1d array
self.loc = None # location array, 2d array, format (x, y)
self.distance = None # pairwise distance, 2d array
self.mu = None # estimated mean value
self.a = None # tuned parameter for semivariogram model
self.c = None # tuned parameter for semivariogram model
self.grids = None # location to be estimated, 2d array
self.xmin = None # minimum x location
self.xmax = None # maximum x location
self.ymin = None # minimum y location
self.ymax = None # maximum y location
self.pred = None # predicted value corresponding to grids
self.pred_var = None # predicted variance
self.A = None # matrix A to solve A*x = b
self.inv_A = None # inverse of matrix A
self.alpha = None
self.model = None
def fit(self, x, y, z, xmin=None, xmax=None, ymin=None,
ymax=None, xsplit=100, ysplit=100):
""" fit the model, calculate the pairwise distance """
self.x = x # location in x direction, 1d array
self.y = y # location in y direction, 1d array
self.z = z # measurements, 1d array
self.loc = np.concatenate((self.x[:, np.newaxis],
self.y[:, np.newaxis]), axis=1)
# calculate the estimate of mean value and variance
self.mu = np.mean(z)
self.c = np.var(z)
# calculate the pairwise l2 distance
self.distance = pairwise_distances(self.loc, metric='l2', n_jobs=-1)
# split the experimental region into grids
self.xmin = min(x) if (xmin == None) else xmin
self.xmax = max(x) if (xmax == None) else xmax
self.ymin = min(y) if (ymin == None) else ymin
self.ymax = max(y) if (ymax == None) else ymax
self.grids = self._grid(self.xmin, self.xmax, self.ymin, self.ymax,
xsplit, ysplit)
def semivariogram(self, a_range, x_range=None, bandwidth=None,
model=None, figsize=(10, 5), verbose=False):
"""
plot the semivariogram for exploratory analysis (for z not y)
model can be 'spherical', 'exponential', 'gaussian' or None
"""
# calculate the semivariogram
h, gamma = self._semivariogram(x_range, bandwidth, model)
# calculate the fitted curve
a, x, pred, mse = self._fit_model(h, gamma, a_range, model)
print('*' * 40)
print(' Minimum distance is: {0:10.3f}'.format(np.min(self.distance)))
print(' Maximum distance is: {0:10.3f}'.format(np.max(self.distance)))
print(' Best parameter of a: {0:10.3f}'.format(a))
print('*' * 40)
# plot the semivariogram curve
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=figsize)
ax1.set_title("Semivariogram Curve", fontsize=15)
ax1.plot(h, gamma, '.-', label="Actual Value")
ax1.plot(x, pred, 'r-', label="Fitted Value")
ax1.set_xlabel("Distance")
ax1.set_ylabel("Semivariance")
ax1.legend(fontsize=12)
ax2.set_title("Distribution of Distances", fontsize=15)
ax2.hist(self.distance.reshape(-1), bins=len(h), rwidth=0.6)
ax2.set_xlabel("Distance")
ax2.set_ylabel("Frequency")
if verbose == True:
return fig, mse
else:
return fig
def predict(self, loc=None, x_range=None, bandwidth=None, a_range=None,
a=None, model=None, fit=True):
""" function to make predictions """
if self.model == None:
self.model = model
if fit == True:
# calculate the semivariogram
h, gamma = self._semivariogram(x_range, bandwidth, model)
# calculate the fitted curve
self.a, x, pred, mse = self._fit_model(h, gamma, a_range, model)
else:
self.a = a
# make predictions
if loc == None:
self.pred = np.zeros(len(self.grids))
self.pred_var = np.zeros(len(self.grids))
for i in range(len(self.grids)):
x = self.grids[i][0]
y = self.grids[i][1]
self.pred[i], self.pred_var[i] = self._solve(x, y, model)
else:
x = loc[0]
y = loc[1]
return self._solve(x, y, model)
def _solve(self, x, y, model):
""" function to solve A*x = b, return prediction and variance """
# calculate matrix A
n = len(self.z)
if (self.A == None) or (self.model != model):
self.model = model
self.A = np.ones((n + 1, n + 1))
semi_z = self._fit(self.a, self.distance, self.c, model)
semi_y = semi_z - self.mu
semi_y[self.distance == 0] = 0
C = self.c - semi_y - self.mu
self.A[:n, :n] = C
for i in range(n):
self.A[i, i] += self.mu
self.A[n, n] = 0
try:
self.inv_A = np.linalg.inv(self.A)
except:
print("Cannot do inverse on A, use pseudo-inverse instead")
try:
self.inv_A = np.linalg.pinv(self.A)
except:
print("Cannot do pseudo-inverse on A, error")
return
# make predictions on location (x, y)
b = np.ones(n + 1)
h = np.sqrt((self.x - x) ** 2 + (self.y - y) ** 2)
semi_z = self._fit(self.a, h, self.c, model)
semi_y = semi_z - self.mu
# semi_y[h == 0] = 0 # ------------------------------ problem ????
C = self.c - semi_y - self.mu
b[:n] = C
coeff = np.dot(self.inv_A, b)
Lambda = coeff[:n] # lambda coefficient
self.alpha = coeff[-1] # Lagrange multiplier
# calculate prediction and corresponding variance
pred = np.sum(Lambda * self.z)
var = self.c - self.mu - np.sum(Lambda * C) - self.alpha
var = var + self.mu # manually add mu ? # ------------ problem ????
return pred, var
def _semivariogram(self, x_range=None, bandwidth=None, model=None):
""" function to fit the semivariogram model (for z not y) """
# calculate the correct distance region
if x_range == None:
dist_min = np.min(self.distance)
dist_max = np.max(self.distance)
if bandwidth == None:
bandwidth = (dist_max - dist_min) / 100
x = np.arange(dist_min, dist_max, bandwidth)
else:
if bandwidth == None:
bandwidth = (x_range[1] - x_range[0]) / 100
x = np.arange(x_range[0], x_range[1], bandwidth)
h = [] # distance
gamma = [] # semivariogram value at distance h
for d in x:
low = d - bandwidth / 2.0
upp = d + bandwidth / 2.0
idx1, idx2 = np.where((self.distance >= low) &
(self.distance <= upp))
if len(idx1) == 0:
continue
tmp = np.mean([0.5 * (self.z[i] - self.z[j]) ** 2
for (i, j) in zip(idx1, idx2)])
h.append(d)
gamma.append(tmp)
return h, gamma
def _fit_model(self, h, gamma, a_range, model=None):
"""
function to fit the semivariogram model by minimize the MSE
model: "spherical", "exponential", "gaussian"
"""
mse = np.zeros(len(a_range))
for i in range(len(a_range)):
a = a_range[i]
fitted = self._fit(a, h, self.c, model)
mse[i] = np.mean((gamma - fitted) ** 2)
# find the minimum mse and corresponding a
a = a_range[np.argmin(mse)]
# make predictions
x = np.linspace(min(h), max(h), len(h) * 5)
pred = self._fit(a, x, self.c, model)
return a, x, pred, mse
def _fit(self, a, h, c, model=None):
""" function to fit the model """
if model == 'spherical':
fitted = self._spherical(a, h, c)
elif model == 'exponential':
fitted = self._exponential(a, h, c)
elif model == 'gaussian':
fitted = self._gaussian(a, h, c)
return fitted
def _spherical(self, a, h, c):
""" Spherical model for semivariogram """
h = np.array(h)
ans = c * (1.5 * h / a - 0.5 * (h / a) ** 3)
ans[h > a] = c
return ans
def _exponential(self, a, h, c):
""" Exponential model for semivariogram """
h = np.array(h)
ans = c * (1 - np.exp(-h / a))
return ans
def _gaussian(self, a, h, c):
""" Gaussian model for semivariogram """
h = np.array(h)
ans = c * (1 - np.exp(- (h / a) ** 2))
return ans
def _grid(self, xmin, xmax, ymin, ymax, xsplit, ysplit):
""" function to divide the interested area into grids """
xstep = (xmax - xmin) / xsplit
ystep = (ymax - ymin) / ysplit
x_loc = np.arange(xmin + xstep / 2.0, xmax, xstep)
y_loc = np.arange(ymin + ystep / 2.0, ymax, ystep)
# form the grids
grids = product(x_loc, y_loc)
return np.array(list(grids))
def plot2D(self, fitted=False, figsize=(8, 6), s=50):
""" plot the fitted surface in 2D """
if (fitted == True) and (self.pred == None):
print("Error, grid not calculated")
return
if fitted == False:
fig, ax = plt.subplots(figsize=figsize)
img = ax.scatter(self.x, self.y, c=self.z, s=s)
ax.axis('image')
ax.set_xlim((self.xmin, self.xmax))
ax.set_ylim((self.ymin, self.ymax))
ax.set_xlabel('X position', fontsize=12)
ax.set_ylabel('Y position', fontsize=12)
plt.colorbar(img, fraction=0.046, pad=0.04)
return fig
else:
fig, ax = plt.subplots(figsize=figsize)
img = ax.scatter(self.grids[:, 0], self.grids[:, 1], c=self.pred,
linewidths=0)
if s != 0:
ax.scatter(self.x, self.y, facecolor='none', s=s,
edgecolors='black', linewidths=0.8)
ax.axis('image')
ax.set_xlim((self.xmin, self.xmax))
ax.set_ylim((self.ymin, self.ymax))
ax.set_xlabel('X position', fontsize=12)
ax.set_ylabel('Y position', fontsize=12)
plt.colorbar(img, fraction=0.046, pad=0.04)
return fig
def plot_variance(self, figsize=(8, 6), s=50):
""" function to plot the variance """
if self.pred == None:
print("Error, grid not calculated")
return
fig, ax = plt.subplots(figsize=figsize)
img = ax.scatter(self.grids[:, 0], self.grids[:, 1], c=self.pred_var,
linewidths=0)
if s != 0:
ax.scatter(self.x, self.y, facecolor='none', s=s,
edgecolors='black', linewidths=0.8)
ax.axis('image')
ax.set_xlim((self.xmin, self.xmax))
ax.set_ylim((self.ymin, self.ymax))
ax.set_xlabel('X position', fontsize=12)
ax.set_ylabel('Y position', fontsize=12)
plt.colorbar(img, fraction=0.046, pad=0.04)
return fig
def get_result(self):
""" get the fitted result """
df = pd.DataFrame({'x': self.grids[:, 0], 'y': self.grids[:, 1],
'estimate': self.pred, 'variance': self.pred_var})
return self.distance, self.mu, self.a, self.c,\
df[['x', 'y', 'estimate', 'variance']]
|
mit
|
timgasser/bcycle-austin
|
notebooks/bcycle_lib/utils.py
|
1
|
7524
|
# Common library routines for the BCycle analysis
import pandas as pd
import numpy as np
INPUT_DIR = '../input'
def load_bikes(file=INPUT_DIR + '/bikes.csv'):
'''
Load the bikes CSV file, converting column types
INPUT: Filename to read (defaults to `../input/bikes.csv`
RETURNS: Pandas dataframe containing bikes information
'''
try:
bikes_df = pd.read_csv(file,
dtype={'station_id' : np.int8,
'bikes' : np.int8,
'docks' : np.int8}
)
bikes_df['datetime'] = pd.to_datetime(bikes_df['datetime'], format='%Y-%m-%d %H:%M:%S')
return bikes_df
except OSError as e:
print('Error opening {0}. Do you need to unzip {0}.zip?'.format(file))
return None
def load_stations(file=INPUT_DIR + '/stations.csv'):
'''
Load the stations CSV file, converting column types
INPUT: Filename to read (defaults to `../input/stations.csv`
RETURNS: Pandas dataframe containing stations information
'''
try:
stations_df = pd.read_csv(file,
dtype={'station_id' : np.int8,
'lat' : np.float32,
'lon' : np.float32}
)
stations_df['datetime'] = pd.to_datetime(stations_df['datetime'], format='%Y-%m-%d %H:%M:%S')
return stations_df
except OSError as e:
print('Error opening {0}. Do you need to unzip {0}.zip?'.format(file))
return None
def load_weather(file=INPUT_DIR + '/weather.csv'):
'''Loads the weather CSV and converts types'''
try:
df = pd.read_csv(file)
except OSError as e:
print('Error opening {0}. Do you need to unzip {0}.zip?'.format(file))
return None
# Remove whitespace and keep min/max values
df.columns = [col.strip() for col in df.columns]
df = df[['CDT','Max TemperatureF','Min TemperatureF',
'Max Humidity', 'Min Humidity',
'Max Sea Level PressureIn', 'Min Sea Level PressureIn',
'Max Wind SpeedMPH', 'Mean Wind SpeedMPH', 'Max Gust SpeedMPH',
'PrecipitationIn', 'CloudCover', 'Events']]
# Clean up column names, drop means as they're a linear combination of max/min
df.columns = ['date', 'max_temp', 'min_temp', 'max_humidity', 'min_humidity',
'max_pressure', 'min_pressure', 'max_wind', 'min_wind', 'max_gust',
'precipitation', 'cloud_cover', 'events']
# Convert column types appropriately
df['date'] = pd.to_datetime(df['date'], format='%Y-%m-%d')
df.index = df['date']
df = df.drop('date', axis=1)
df[['max_temp', 'min_temp']] = df[['max_temp', 'min_temp']].astype(np.uint8)
df[['max_humidity', 'min_humidity']] = df[['max_humidity', 'min_humidity']].astype(np.uint8)
df[['max_wind', 'min_wind', 'max_gust']] = df[['max_wind', 'min_wind', 'max_gust']].astype(np.uint8)
# Cloud cover is a fraction of 8 -
# http://help.wunderground.com/knowledgebase/articles/129043-how-can-i-translate-the-cloud-cover-data-on-your
df['cloud_pct'] = (df['cloud_cover'].astype(np.float32) / 8.0) * 100
df = df.drop('cloud_cover', axis=1)
# Precipitation sometimes has 'T' for trace amounts of rain. Replace this with small value
# and convert to a float
# http://help.wunderground.com/knowledgebase/articles/656875-what-does-t-stand-for-on-the-rain-precipitation
df['precipitation'] = df['precipitation'].replace('T', 0.01)
df['precipitation'] = df['precipitation'].astype(np.float32)
# Events are tricky. they're separated by hypens, and can have multiple values not in the same order !
events = set()
df['events'] = df['events'].replace(np.nan, 'None')
for row in df['events']:
if row is not np.nan:
line = row.split('-')
[events.add(word.lower()) for word in line]
for event in events:
df[event] = df['events'].apply(str.lower).str.contains(event).astype(np.uint8)
df = df.drop(['events', 'none'], axis=1)
return df
def haversine_dist(lat1, lon1, lat2, lon2, R=3961):
'''
Calculates the distance between two points in miles using the haversine formula
INPUT: lat1/lon1 and lat2/lon2 are position values
R is an optional radius of the planet
RETURNS: Distance between the points in miles
'''
dlon = np.radians(lon2 - lon1)
dlat = np.radians(lat2 - lat1)
lat1 = np.radians(lat1)
lat2 = np.radians(lat2)
a = (np.sin(dlat/2.0))**2 + np.cos(lat1) * np.cos(lat2) * (np.sin(dlon/2.0))**2
c = 2 * np.arctan2( np.sqrt(a), np.sqrt(1-a) )
d = R * c
return d
def load_bike_trips():
# Sort the bikes_df dataframe by station_id first, and then datetime so we
# can use a diff() and get the changes by time for each station
bikes_df = load_bikes()
bikes_df = bikes_df.sort_values(['station_id', 'datetime']).copy()
stations = bikes_df['station_id'].unique()
# Our dataframe is grouped by station_id first now, so grab each station in
# turn and do a diff() on bikes and docks for each station individually
diff_list = list()
for station in stations:
station_diff_df = bikes_df[bikes_df['station_id'] == station].copy()
station_diff_df['bikes_diff'] = station_diff_df['bikes'].diff()
station_diff_df['docks_diff'] = station_diff_df['docks'].diff()
diff_list.append(station_diff_df)
# Concatenate the station dataframes back together into a single one.
# Make sure we didn't lose any rows in the process (!)
bikes_diff_df = pd.concat(diff_list)
# The first row of each station-wise diff is filled with NaNs, store a 0 in these fields
# then we can convert the data type from floats to int8s
bikes_diff_df.fillna(0, inplace=True)
bikes_diff_df[['bikes_diff', 'docks_diff']] = bikes_diff_df[['bikes_diff', 'docks_diff']].astype(np.int8)
bikes_diff_df.index = bikes_diff_df['datetime']
bikes_diff_df.drop('datetime', axis=1, inplace=True)
assert(bikes_df.shape[0] == bikes_diff_df.shape[0])
bike_trips_df = bikes_diff_df
bike_trips_df['checkouts'] = bike_trips_df['bikes_diff']
bike_trips_df.loc[bike_trips_df['checkouts'] > 0, 'checkouts'] = 0
bike_trips_df['checkouts'] = bike_trips_df['checkouts'].abs()
# Conversely, checkins are positive `bikes_diff` values
bike_trips_df['checkins'] = bike_trips_df['bikes_diff']
bike_trips_df.loc[bike_trips_df['checkins'] < 0, 'checkins'] = 0
bike_trips_df['checkins'] = bike_trips_df['checkins'].abs()
# Might want to use sum of checkouts and checkins for find "busiest" stations
bike_trips_df['totals'] = bike_trips_df['checkouts'] + bike_trips_df['checkins']
return bike_trips_df
def load_daily_rentals(all_stations=False):
bike_trips_df = load_bike_trips()
daily_bikes_df = bike_trips_df.copy()
if not all_stations:
daily_bikes_df = daily_bikes_df[daily_bikes_df['station_id'] < 49]
daily_bikes_df = daily_bikes_df.reset_index()
daily_bikes_df = daily_bikes_df[['datetime', 'checkouts']]
daily_bikes_df.columns = ['date', 'rentals']
daily_bikes_df = daily_bikes_df.groupby('date').sum()
daily_bikes_df = daily_bikes_df.resample('1D').sum()
return daily_bikes_df
|
mit
|
SwissTPH/TBRU_serialTB
|
scripts/variant_processing/1_parse_patient_FINAL.py
|
1
|
2810
|
#!/usr/bin/env python
#Parse FINAL, annotated output of patient vSNP data
#These vSNP calls emerge from the first part of the pipeline
#they are filtered by:
# - base-calling quality,
# - mapping quality,
# - minimal overall coverage,
# - at least 2 forward and 2 reverse reads,
# - strand filter,
# - minimum frequency: 0.5%
# - remove problematic loci,
# - exclude calls with read-end bias,
# - annotated to H37Rv, using H37Rv loci
# if a vSNP locus was absent in H37Rv it was given a % and reported
# as it was in CCDC5079.
#12 patients with several samples each
##############
#KERNEL SETUP#
##############
import os
import pickle
import pandas as pd
import datetime
###############
#PATHS TO DATA#
###############
CWD = os.getcwd() #get current working directory
INTERIM_PATH = os.path.abspath(os.path.join(CWD, os.pardir, os.pardir, 'data', 'interim'))
PATH_TO_RESULT = os.path.abspath(os.path.join(CWD, os.pardir, os.pardir, 'data', 'raw', 'FINAL_vSNP'))
FINAL_LIST = '{}{}FINAL.list'.format(INTERIM_PATH,os.sep)
SNPS = '{}{}SNP_COVERAGE.pkl'.format(INTERIM_PATH,os.sep)
################
#OPEN AND PARSE#
################
INDEX_KEY = 0
FINAL_DATA = {} #data will be stored as a dictionary: easy conversion to DataFrame
try:
SNP_COVERAGE = pickle.load(open(SNPS,'rb'))
except IOError:
print('Cannot open - make sure the following file exists:', SNPS)
for result in open(FINAL_LIST):
#PARSE BASIC RESULT "X.snp"
result = result.strip()
_SAMPLE_ID = result.split('.')[0]
_PATIENT_ID = _SAMPLE_ID[:2]
_TIME_ID = _SAMPLE_ID[2:]
for line in open('{}{}'.format(PATH_TO_RESULT,result)):
line = line.strip()
split = line.split()
_locus = int(split[0])
_ref_base = line.split()[1]
_alt_base = line.split()[2]
_freq = float(split[3][:-1])/100
#Get the coverage for a SNP from a specific sample (nested dictionary)
try:
_coverage = SNP_COVERAGE.get(_locus,None).get(_SAMPLE_ID, None)
except:
_coverage = None
FINAL_DATA[INDEX_KEY] = {'PATIENT': _PATIENT_ID, 'LOCUS': _locus,
'REF_BASE': _ref_base, 'ALT_BASE': _alt_base,
'FREQUENCY': round(_freq,4), 'STAGE': 'FINAL',
'COVERAGE': _coverage, 'TIME': _TIME_ID,
'SAMPLE': _SAMPLE_ID}
INDEX_KEY+=1
FINAL_DF = pd.DataFrame(FINAL_DATA).T
NOW = datetime.datetime.now()
#TARGET = '../../data/interim/{}_FINAL'.format(NOW.strftime('%y%m%d'))
TARGET = '{}{}1_FINAL'.format(INTERIM_PATH,os.sep)
pickle.dump(FINAL_DATA, open('{}.pkl'.format(TARGET),'wb'))
FINAL_DF.to_csv('{}.csv'.format(TARGET))
|
gpl-3.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.