repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
Peilong/ganglia-parser | gangliaParser.py | 2 | 3339 | #!/usr/bin/python
__author__ = 'Peilong'
import urllib2
import re
import sys
import json
from pprint import pprint
import matplotlib.pyplot as plt
import ConfigParser
import string
import decimal
import datetime
import os
def get_metric(remove_duplicate, metric):
global dir_str
for index, value in enumerate (remove_duplicate):
#print index, value
### Concatenate the final URL where JSON data locates
jsonUrl = gangliaUrl + 'graph.php?r=hour&c=spark&h='+ value + '&v=0.0&m='+ metric +'&jr=&js=&json=1'
print "--> from host: ", value
#print "Web view: ", jsonUrl
# retrieve json data
response = urllib2.urlopen(jsonUrl)
jsondata = json.load(response)
### Generate the JSON files
fnbase = dir_str + remove_duplicate[index] + '-' + metric
filename = fnbase +'.json'
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
with open(filename, "w") as f:
json.dump(jsondata, f)
f.close()
# check if json file is empty or contains "null"
if jsondata == None:
continue
xaxis = []
yaxis = []
#print jsondata[0]
rawdata = jsondata[0]['datapoints']
#print "rawdata=", rawdata
#The last two points are usually meanless, so substrate 2
for i in xrange (0, len(rawdata)-2):
xaxis.append(rawdata[i][1])
yaxis.append(rawdata[i][0])
### Plot JSON data to png figures
plt.figure()
plt.clf()
plt.title(value)
plt.plot(xaxis, yaxis, 'k')
plt.ylabel('Value '+ metric)
plt.xlabel('Time (1 hour in total)')
plt.savefig(fnbase+'.png')
return
if len (sys.argv) < 2:
print 'Usage: python gangliaParser.py target.conf'
sys.exit(2)
else:
filename = sys.argv[-1]
### Header
print '-'*45
print ' *** Ganglia JSON Parser Tool V - 0.2 ***'
print ''
print ' Usage: python gangliaParser.py target.conf'
print '-'*45
### Configuration file parser
cf = ConfigParser.ConfigParser()
cf.read(filename)
dnsAddress = cf.get("target","dns")
# metrics is comma separated list
metrics = cf.get("option","metrics")
#node_number = int (cf.get("node","number"))
### Ganglia web UI IP address
gangliaUrl = 'http://'+dnsAddress + ':5080/ganglia/'
### Ganglia web UI index page
contents = urllib2.urlopen(gangliaUrl).read()
### Regular experssion matching to grab the node internal IP
pattern = re.compile('ip-\w{1,3}-\w{1,3}-\w{1,3}-\w{1,3}\.ec2\.internal')
result = pattern.findall(contents)
seen = set()
remove_duplicate = []
for item in result:
if item not in seen:
seen.add(item)
remove_duplicate.append(item)
#print "set size =", len(seen)
#num_nodes = len(seen)
# gererate timestamp for json directory and organize output files in it
utc_datetime = datetime.datetime.utcnow()
dir_str = "metrics-"+utc_datetime.strftime("%Y-%m-%d-%H:%M:%S/")
print "Creating directory "+dir_str
# get all the metrics one by one
for metric in metrics.split(','):
print "Getting metric \""+metric+"\""
get_metric(remove_duplicate, metric)
### Final printout message
print '+'*45
print '[SUCCESS] .JSON and .PNG files are saved!'
print '[SUCCESS] to ./'+dir_str
print '+'*45
| gpl-2.0 |
fzalkow/scikit-learn | sklearn/decomposition/tests/test_incremental_pca.py | 297 | 8265 | """Tests for Incremental PCA."""
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn import datasets
from sklearn.decomposition import PCA, IncrementalPCA
iris = datasets.load_iris()
def test_incremental_pca():
# Incremental PCA on dense arrays.
X = iris.data
batch_size = X.shape[0] // 3
ipca = IncrementalPCA(n_components=2, batch_size=batch_size)
pca = PCA(n_components=2)
pca.fit_transform(X)
X_transformed = ipca.fit_transform(X)
np.testing.assert_equal(X_transformed.shape, (X.shape[0], 2))
assert_almost_equal(ipca.explained_variance_ratio_.sum(),
pca.explained_variance_ratio_.sum(), 1)
for n_components in [1, 2, X.shape[1]]:
ipca = IncrementalPCA(n_components, batch_size=batch_size)
ipca.fit(X)
cov = ipca.get_covariance()
precision = ipca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]))
def test_incremental_pca_check_projection():
# Test that the projection of data is correct.
rng = np.random.RandomState(1999)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
# Get the reconstruction of the generated data X
# Note that Xt has the same "components" as X, just separated
# This is what we want to ensure is recreated correctly
Yt = IncrementalPCA(n_components=2).fit(X).transform(Xt)
# Normalize
Yt /= np.sqrt((Yt ** 2).sum())
# Make sure that the first element of Yt is ~1, this means
# the reconstruction worked as expected
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_incremental_pca_inverse():
# Test that the projection of data can be inverted.
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
ipca = IncrementalPCA(n_components=2, batch_size=10).fit(X)
Y = ipca.transform(X)
Y_inverse = ipca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
def test_incremental_pca_validation():
# Test that n_components is >=1 and <= n_features.
X = [[0, 1], [1, 0]]
for n_components in [-1, 0, .99, 3]:
assert_raises(ValueError, IncrementalPCA(n_components,
batch_size=10).fit, X)
def test_incremental_pca_set_params():
# Test that components_ sign is stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 20
X = rng.randn(n_samples, n_features)
X2 = rng.randn(n_samples, n_features)
X3 = rng.randn(n_samples, n_features)
ipca = IncrementalPCA(n_components=20)
ipca.fit(X)
# Decreasing number of components
ipca.set_params(n_components=10)
assert_raises(ValueError, ipca.partial_fit, X2)
# Increasing number of components
ipca.set_params(n_components=15)
assert_raises(ValueError, ipca.partial_fit, X3)
# Returning to original setting
ipca.set_params(n_components=20)
ipca.partial_fit(X)
def test_incremental_pca_num_features_change():
# Test that changing n_components will raise an error.
rng = np.random.RandomState(1999)
n_samples = 100
X = rng.randn(n_samples, 20)
X2 = rng.randn(n_samples, 50)
ipca = IncrementalPCA(n_components=None)
ipca.fit(X)
assert_raises(ValueError, ipca.partial_fit, X2)
def test_incremental_pca_batch_signs():
# Test that components_ sign is stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(10, 20)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(np.sign(i), np.sign(j), decimal=6)
def test_incremental_pca_batch_values():
# Test that components_ values are stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(20, 40, 3)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(i, j, decimal=1)
def test_incremental_pca_partial_fit():
# Test that fit and partial_fit get equivalent results.
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
batch_size = 10
ipca = IncrementalPCA(n_components=2, batch_size=batch_size).fit(X)
pipca = IncrementalPCA(n_components=2, batch_size=batch_size)
# Add one to make sure endpoint is included
batch_itr = np.arange(0, n + 1, batch_size)
for i, j in zip(batch_itr[:-1], batch_itr[1:]):
pipca.partial_fit(X[i:j, :])
assert_almost_equal(ipca.components_, pipca.components_, decimal=3)
def test_incremental_pca_against_pca_iris():
# Test that IncrementalPCA and PCA are approximate (to a sign flip).
X = iris.data
Y_pca = PCA(n_components=2).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=2, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_incremental_pca_against_pca_random_data():
# Test that IncrementalPCA and PCA are approximate (to a sign flip).
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features) + 5 * rng.rand(1, n_features)
Y_pca = PCA(n_components=3).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=3, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_explained_variances():
# Test that PCA and IncrementalPCA calculations match
X = datasets.make_low_rank_matrix(1000, 100, tail_strength=0.,
effective_rank=10, random_state=1999)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 99]:
pca = PCA(n_components=nc).fit(X)
ipca = IncrementalPCA(n_components=nc, batch_size=100).fit(X)
assert_almost_equal(pca.explained_variance_, ipca.explained_variance_,
decimal=prec)
assert_almost_equal(pca.explained_variance_ratio_,
ipca.explained_variance_ratio_, decimal=prec)
assert_almost_equal(pca.noise_variance_, ipca.noise_variance_,
decimal=prec)
def test_whitening():
# Test that PCA and IncrementalPCA transforms match to sign flip.
X = datasets.make_low_rank_matrix(1000, 10, tail_strength=0.,
effective_rank=2, random_state=1999)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 9]:
pca = PCA(whiten=True, n_components=nc).fit(X)
ipca = IncrementalPCA(whiten=True, n_components=nc,
batch_size=250).fit(X)
Xt_pca = pca.transform(X)
Xt_ipca = ipca.transform(X)
assert_almost_equal(np.abs(Xt_pca), np.abs(Xt_ipca), decimal=prec)
Xinv_ipca = ipca.inverse_transform(Xt_ipca)
Xinv_pca = pca.inverse_transform(Xt_pca)
assert_almost_equal(X, Xinv_ipca, decimal=prec)
assert_almost_equal(X, Xinv_pca, decimal=prec)
assert_almost_equal(Xinv_pca, Xinv_ipca, decimal=prec)
| bsd-3-clause |
RachitKansal/scikit-learn | sklearn/neighbors/classification.py | 132 | 14388 | """Nearest Neighbor Classification"""
# Authors: Jake Vanderplas <[email protected]>
# Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Sparseness support by Lars Buitinck <[email protected]>
# Multi-output support by Arnaud Joly <[email protected]>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import numpy as np
from scipy import stats
from ..utils.extmath import weighted_mode
from .base import \
_check_weights, _get_weights, \
NeighborsBase, KNeighborsMixin,\
RadiusNeighborsMixin, SupervisedIntegerMixin
from ..base import ClassifierMixin
from ..utils import check_array
class KNeighborsClassifier(NeighborsBase, KNeighborsMixin,
SupervisedIntegerMixin, ClassifierMixin):
"""Classifier implementing the k-nearest neighbors vote.
Read more in the :ref:`User Guide <classification>`.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`k_neighbors` queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDTree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default = 'minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Doesn't affect :meth:`fit` method.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import KNeighborsClassifier
>>> neigh = KNeighborsClassifier(n_neighbors=3)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
KNeighborsClassifier(...)
>>> print(neigh.predict([[1.1]]))
[0]
>>> print(neigh.predict_proba([[0.9]]))
[[ 0.66666667 0.33333333]]
See also
--------
RadiusNeighborsClassifier
KNeighborsRegressor
RadiusNeighborsRegressor
NearestNeighbors
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
.. warning::
Regarding the Nearest Neighbors algorithms, if it is found that two
neighbors, neighbor `k+1` and `k`, have identical distances but
but different labels, the results will depend on the ordering of the
training data.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5,
weights='uniform', algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, n_jobs=1,
**kwargs):
self._init_params(n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, n_jobs=n_jobs, **kwargs)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the class labels for the provided data
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
y : array of shape [n_samples] or [n_samples, n_outputs]
Class labels for each data sample.
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.kneighbors(X)
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_outputs = len(classes_)
n_samples = X.shape[0]
weights = _get_weights(neigh_dist, self.weights)
y_pred = np.empty((n_samples, n_outputs), dtype=classes_[0].dtype)
for k, classes_k in enumerate(classes_):
if weights is None:
mode, _ = stats.mode(_y[neigh_ind, k], axis=1)
else:
mode, _ = weighted_mode(_y[neigh_ind, k], weights, axis=1)
mode = np.asarray(mode.ravel(), dtype=np.intp)
y_pred[:, k] = classes_k.take(mode)
if not self.outputs_2d_:
y_pred = y_pred.ravel()
return y_pred
def predict_proba(self, X):
"""Return probability estimates for the test data X.
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
of such arrays if n_outputs > 1.
The class probabilities of the input samples. Classes are ordered
by lexicographic order.
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.kneighbors(X)
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_samples = X.shape[0]
weights = _get_weights(neigh_dist, self.weights)
if weights is None:
weights = np.ones_like(neigh_ind)
all_rows = np.arange(X.shape[0])
probabilities = []
for k, classes_k in enumerate(classes_):
pred_labels = _y[:, k][neigh_ind]
proba_k = np.zeros((n_samples, classes_k.size))
# a simple ':' index doesn't work right
for i, idx in enumerate(pred_labels.T): # loop is O(n_neighbors)
proba_k[all_rows, idx] += weights[:, i]
# normalize 'votes' into real [0,1] probabilities
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
probabilities.append(proba_k)
if not self.outputs_2d_:
probabilities = probabilities[0]
return probabilities
class RadiusNeighborsClassifier(NeighborsBase, RadiusNeighborsMixin,
SupervisedIntegerMixin, ClassifierMixin):
"""Classifier implementing a vote among neighbors within a given radius
Read more in the :ref:`User Guide <classification>`.
Parameters
----------
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth`radius_neighbors`
queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default='minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
outlier_label : int, optional (default = None)
Label, which is given for outlier samples (samples with no
neighbors on given radius).
If set to None, ValueError is raised, when outlier is detected.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import RadiusNeighborsClassifier
>>> neigh = RadiusNeighborsClassifier(radius=1.0)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
RadiusNeighborsClassifier(...)
>>> print(neigh.predict([[1.5]]))
[0]
See also
--------
KNeighborsClassifier
RadiusNeighborsRegressor
KNeighborsRegressor
NearestNeighbors
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, radius=1.0, weights='uniform',
algorithm='auto', leaf_size=30, p=2, metric='minkowski',
outlier_label=None, metric_params=None, **kwargs):
self._init_params(radius=radius,
algorithm=algorithm,
leaf_size=leaf_size,
metric=metric, p=p, metric_params=metric_params,
**kwargs)
self.weights = _check_weights(weights)
self.outlier_label = outlier_label
def predict(self, X):
"""Predict the class labels for the provided data
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
y : array of shape [n_samples] or [n_samples, n_outputs]
Class labels for each data sample.
"""
X = check_array(X, accept_sparse='csr')
n_samples = X.shape[0]
neigh_dist, neigh_ind = self.radius_neighbors(X)
inliers = [i for i, nind in enumerate(neigh_ind) if len(nind) != 0]
outliers = [i for i, nind in enumerate(neigh_ind) if len(nind) == 0]
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_outputs = len(classes_)
if self.outlier_label is not None:
neigh_dist[outliers] = 1e-6
elif outliers:
raise ValueError('No neighbors found for test samples %r, '
'you can try using larger radius, '
'give a label for outliers, '
'or consider removing them from your dataset.'
% outliers)
weights = _get_weights(neigh_dist, self.weights)
y_pred = np.empty((n_samples, n_outputs), dtype=classes_[0].dtype)
for k, classes_k in enumerate(classes_):
pred_labels = np.array([_y[ind, k] for ind in neigh_ind],
dtype=object)
if weights is None:
mode = np.array([stats.mode(pl)[0]
for pl in pred_labels[inliers]], dtype=np.int)
else:
mode = np.array([weighted_mode(pl, w)[0]
for (pl, w)
in zip(pred_labels[inliers], weights)],
dtype=np.int)
mode = mode.ravel()
y_pred[inliers, k] = classes_k.take(mode)
if outliers:
y_pred[outliers, :] = self.outlier_label
if not self.outputs_2d_:
y_pred = y_pred.ravel()
return y_pred
| bsd-3-clause |
hbhzwj/SADIT | util/mod.py | 1 | 1576 | """put import module sentence that may fail here
"""
from __future__ import print_function, division
try:
import numpy as np
except ImportError:
np = None
print('--> [wanring], no numpy, some funcationality may be affected')
try:
import guiqwt.pyplot as plt
print('--> Use [guiqwt] as plot backend')
except ImportError:
try:
import matplotlib.pyplot as plt
# import guiqwt.pyplot as plt
print('--> Use [matplotlib] as plot backend')
except Exception:
plt = None
print('--> [wanring], no [guiqwt] and [matplotlib], cannot visualize the result')
try:
from collections import Counter
except ImportError:
Counter = None
print('--> [wanring], no collection.Counter , some funcationality may be affected')
try:
import _mysql as mysql
from MySQLdb.constants import FIELD_TYPE
except ImportError:
mysql = None
# FIELD_TYPE = object
# FIELD_TYPE = object
from Namespace import Namespace
FIELD_TYPE = Namespace({
'INT24': None,
'LONG': None,
'LONGLONG': None
})
print('--> [warning] cannot import sql related function, reading for sql server is not supported')
try:
import tables
except ImportError:
tables = False
#########################################
## Adaption for Python3
#########################################
try:
import Queue as queue # replace with 'import queue' if using Python 3
except ImportError:
import queue
try:
from itertools import izip
except ImportError:
izip = zip
| gpl-3.0 |
krikru/tensorflow-opencl | tensorflow/examples/learn/text_classification_character_rnn.py | 61 | 3350 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This is an example of using recurrent neural networks over characters for DBpedia dataset to predict class from description of an entity.
This model is similar to one described in this paper:
"Character-level Convolutional Networks for Text Classification"
http://arxiv.org/abs/1509.01626
and is somewhat alternative to the Lua code from here:
https://github.com/zhangxiangxiao/Crepe
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import numpy as np
import pandas
from sklearn import metrics
import tensorflow as tf
learn = tf.contrib.learn
FLAGS = None
MAX_DOCUMENT_LENGTH = 100
HIDDEN_SIZE = 20
def char_rnn_model(features, target):
"""Character level recurrent neural network model to predict classes."""
target = tf.one_hot(target, 15, 1, 0)
byte_list = tf.one_hot(features, 256, 1, 0)
byte_list = tf.unstack(byte_list, axis=1)
cell = tf.contrib.rnn.GRUCell(HIDDEN_SIZE)
_, encoding = tf.contrib.rnn.static_rnn(cell, byte_list, dtype=tf.float32)
logits = tf.contrib.layers.fully_connected(encoding, 15, activation_fn=None)
loss = tf.contrib.losses.softmax_cross_entropy(logits, target)
train_op = tf.contrib.layers.optimize_loss(
loss,
tf.contrib.framework.get_global_step(),
optimizer='Adam',
learning_rate=0.01)
return ({
'class': tf.argmax(logits, 1),
'prob': tf.nn.softmax(logits)
}, loss, train_op)
def main(unused_argv):
# Prepare training and testing data
dbpedia = learn.datasets.load_dataset(
'dbpedia', test_with_fake_data=FLAGS.test_with_fake_data)
x_train = pandas.DataFrame(dbpedia.train.data)[1]
y_train = pandas.Series(dbpedia.train.target)
x_test = pandas.DataFrame(dbpedia.test.data)[1]
y_test = pandas.Series(dbpedia.test.target)
# Process vocabulary
char_processor = learn.preprocessing.ByteProcessor(MAX_DOCUMENT_LENGTH)
x_train = np.array(list(char_processor.fit_transform(x_train)))
x_test = np.array(list(char_processor.transform(x_test)))
# Build model
classifier = learn.Estimator(model_fn=char_rnn_model)
# Train and predict
classifier.fit(x_train, y_train, steps=100)
y_predicted = [
p['class'] for p in classifier.predict(
x_test, as_iterable=True)
]
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--test_with_fake_data',
default=False,
help='Test the example code with fake data.',
action='store_true')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
gpostelnicu/fin_data | fin_data/io/wiki_store.py | 1 | 2253 | import glob
import os
from datetime import datetime
import pandas as pd
from functools32 import lru_cache
from pandas import HDFStore
from fin_data.util.file import latest_filename
class WikiStore(object):
"""
WikiStore is a HDFStore storage for a Quandl WIKI dataset.
The Quandl WIKI dataset can be retrieved from: https://www.quandl.com/data/WIKI-Wiki-EOD-Stock-Prices.
"""
def __init__(self, base_dir, date_index=True):
self.base_dir = base_dir
assert os.path.exists(self.base_dir)
self.date_index = date_index
self._init()
def keys(self):
return self.tickers
@lru_cache(maxsize=100)
def __getitem__(self, item):
df = self.store[item]
if self.date_index:
df.set_index('date', inplace=True)
return df
@staticmethod
def store_snapshot(base_dir, snapshot_file):
w_df = pd.read_csv(snapshot_file, parse_dates=[1])
w_df.columns = [c.replace('-', '_') for c in w_df.columns]
w_df.set_index('ticker', inplace=True)
w_df.sort_index(inplace=True)
snapshot_file = datetime.today().strftime('%Y%m%d')
with HDFStore(os.path.join(base_dir, '{}.h5'.format(snapshot_file)), 'w',
complevel=6, complib='blosc') as store:
tickers = set(w_df.index)
for ticker in tickers:
df = w_df.loc[ticker, :]
df.reset_index(inplace=True)
df = df.drop('ticker', 1)
store[ticker] = df
def _init(self):
self.store = HDFStore(latest_filename('{}/*.h5'.format(self.base_dir)))
self.tickers = [t[1:] for t in self.store.keys()]
def close(self):
self.store.close()
def tickers_column(self, tickers, col='adj_close', fun_filter=None):
if not tickers:
return None
def fetch_column(ticker):
ticker_dat = self[ticker]
df = ticker_dat[[col]]
df.columns = [ticker]
if fun_filter:
df = fun_filter(df)
return df
buf = [fetch_column(ticker) for ticker in tickers]
if len(tickers) == 1:
return buf[0]
return buf[0].join(buf[1:])
| mit |
eg-zhang/scikit-learn | examples/linear_model/lasso_dense_vs_sparse_data.py | 348 | 1862 | """
==============================
Lasso on dense and sparse data
==============================
We show that linear_model.Lasso provides the same results for dense and sparse
data and that in the case of sparse data the speed is improved.
"""
print(__doc__)
from time import time
from scipy import sparse
from scipy import linalg
from sklearn.datasets.samples_generator import make_regression
from sklearn.linear_model import Lasso
###############################################################################
# The two Lasso implementations on Dense data
print("--- Dense matrices")
X, y = make_regression(n_samples=200, n_features=5000, random_state=0)
X_sp = sparse.coo_matrix(X)
alpha = 1
sparse_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=1000)
dense_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=1000)
t0 = time()
sparse_lasso.fit(X_sp, y)
print("Sparse Lasso done in %fs" % (time() - t0))
t0 = time()
dense_lasso.fit(X, y)
print("Dense Lasso done in %fs" % (time() - t0))
print("Distance between coefficients : %s"
% linalg.norm(sparse_lasso.coef_ - dense_lasso.coef_))
###############################################################################
# The two Lasso implementations on Sparse data
print("--- Sparse matrices")
Xs = X.copy()
Xs[Xs < 2.5] = 0.0
Xs = sparse.coo_matrix(Xs)
Xs = Xs.tocsc()
print("Matrix density : %s %%" % (Xs.nnz / float(X.size) * 100))
alpha = 0.1
sparse_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=10000)
dense_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=10000)
t0 = time()
sparse_lasso.fit(Xs, y)
print("Sparse Lasso done in %fs" % (time() - t0))
t0 = time()
dense_lasso.fit(Xs.toarray(), y)
print("Dense Lasso done in %fs" % (time() - t0))
print("Distance between coefficients : %s"
% linalg.norm(sparse_lasso.coef_ - dense_lasso.coef_))
| bsd-3-clause |
akshaybabloo/Car-ND | Project_4/temp/main.py | 1 | 8248 | import cv2
import matplotlib.pyplot as plt
import pickle
import os
from glob import glob
import numpy as np
from PIL import Image
from moviepy.editor import ImageSequenceClip
from moviepy.video.io.VideoFileClip import VideoFileClip
from scipy.misc import imread
from tqdm import tqdm
from helper import *
with open("camera_cal/calibration.p", mode='rb') as f:
camera_calib = pickle.load(f)
mtx = camera_calib["mtx"]
dist = camera_calib["dist"]
prev_left_coeffs = None
prev_right_coeffs = None
def image_pipeline(file, filepath=False):
global prev_left_coeffs
global prev_right_coeffs
plt.clf()
if filepath == True:
# Read in image
raw = cv2.imread(file)
else:
raw = file
# Parameters
imshape = raw.shape
src = np.float32(
[[120, 720],
[550, 470],
[700, 470],
[1160, 720]])
dst = np.float32(
[[200, 720],
[200, 0],
[1080, 0],
[1080, 720]])
M = cv2.getPerspectiveTransform(src, dst)
Minv = cv2.getPerspectiveTransform(dst, src)
height = raw.shape[0]
offset = 50
offset_height = height - offset
half_frame = raw.shape[1] // 2
steps = 6
pixels_per_step = offset_height / steps
window_radius = 200
medianfilt_kernel_size = 51
blank_canvas = np.zeros((720, 1280))
colour_canvas = cv2.cvtColor(blank_canvas.astype(np.uint8), cv2.COLOR_GRAY2RGB)
# Apply distortion correction to raw image
image = cv2.undistort(raw, mtx, dist, None, mtx)
## Option I
combined = apply_thresholds(image)
## Option II
have_fit = False
curvature_checked = False
xgrad_thresh_temp = (40, 100)
s_thresh_temp = (150, 255)
while have_fit == False:
combined_binary = apply_threshold_v2(image, xgrad_thresh=xgrad_thresh_temp, s_thresh=s_thresh_temp)
# plt.imshow(combined_binary, cmap="gray")
# Plotting thresholded images
"""
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(20,10))
ax1.set_title('Option 1')
ax1.imshow(combined, cmap="gray")
ax2.set_title('Option 2: Combined S channel and gradient thresholds')
ax2.imshow(combined_binary, cmap='gray')
"""
# Warp onto birds-eye-view
# Previous region-of-interest mask's function is absorbed by the warp
warped = cv2.warpPerspective(combined_binary, M, (imshape[1], imshape[0]), flags=cv2.INTER_LINEAR)
# plt.imshow(warped, cmap="gray")
# Histogram and get pixels in window
leftx, lefty, rightx, righty = histogram_pixels(warped, horizontal_offset=40)
plt.imshow(warped, cmap="gray")
if len(leftx) > 1 and len(rightx) > 1:
have_fit = True
xgrad_thresh_temp = (xgrad_thresh_temp[0] - 2, xgrad_thresh_temp[1] + 2)
s_thresh_temp = (s_thresh_temp[0] - 2, s_thresh_temp[1] + 2)
left_fit, left_coeffs = fit_second_order_poly(lefty, leftx, return_coeffs=True)
# print("Left coeffs:", left_coeffs)
# print("righty[0]: ,", righty[0], ", rightx[0]: ", rightx[0])
right_fit, right_coeffs = fit_second_order_poly(righty, rightx, return_coeffs=True)
# print("Right coeffs: ", right_coeffs)
# Plot data
"""
plt.plot(left_fit, lefty, color='green', linewidth=3)
plt.plot(right_fit, righty, color='green', linewidth=3)
plt.imshow(warped, cmap="gray")
"""
# Determine curvature of the lane
# Define y-value where we want radius of curvature
# I'll choose the maximum y-value, corresponding to the bottom of the image
y_eval = 500
left_curverad = np.absolute(((1 + (2 * left_coeffs[0] * y_eval + left_coeffs[1]) ** 2) ** 1.5) \
/ (2 * left_coeffs[0]))
right_curverad = np.absolute(((1 + (2 * right_coeffs[0] * y_eval + right_coeffs[1]) ** 2) ** 1.5) \
/ (2 * right_coeffs[0]))
# print("Left lane curve radius: ", left_curverad)
# print("Right lane curve radius: ", right_curverad)
curvature = (left_curverad + right_curverad) / 2
min_curverad = min(left_curverad, right_curverad)
# TODO: if plausible parallel, continue. Else don't make `curvature_checked` = True
if not plausible_curvature(left_curverad, right_curverad) or \
not plausible_continuation_of_traces(left_coeffs, right_coeffs, prev_left_coeffs, prev_right_coeffs):
if prev_left_coeffs is not None and prev_right_coeffs is not None:
left_coeffs = prev_left_coeffs
right_coeffs = prev_right_coeffs
prev_left_coeffs = left_coeffs
prev_right_coeffs = right_coeffs
# Det vehicle position wrt centre
centre = center(719, left_coeffs, right_coeffs)
## 7. Warp the detected lane boundaries back onto the original image.
# print("Left coeffs: ", left_coeffs)
# print("Right fit: ", right_coeffs)
polyfit_left = draw_poly(blank_canvas, lane_poly, left_coeffs, 30)
polyfit_drawn = draw_poly(polyfit_left, lane_poly, right_coeffs, 30)
# plt.imshow(polyfit_drawn, cmap="gray")
# plt.imshow(warped)
# Convert to colour and highlight lane line area
trace = colour_canvas
trace[polyfit_drawn > 1] = [0, 0, 255]
# print("polyfit shape: ", polyfit_drawn.shape)
area = highlight_lane_line_area(blank_canvas, left_coeffs, right_coeffs)
trace[area == 1] = [0, 255, 0]
# plt.imshow(trace)
lane_lines = cv2.warpPerspective(trace, Minv, (imshape[1], imshape[0]), flags=cv2.INTER_LINEAR)
# plt.imshow(trace)
combined_img = cv2.add(lane_lines, image)
add_figures_to_image(combined_img, curvature=curvature,
vehicle_position=centre,
min_curvature=min_curverad,
left_coeffs=left_coeffs,
right_coeffs=right_coeffs)
plt.imshow(combined_img)
return combined_img
# def run():
# """
# Runs the pipeline.
# """
#
# if not os.path.isdir('video' + os.sep + 'seq'):
# os.mkdir('video' + os.sep + 'seq')
# to_image_sequence()
#
# video_in = VideoFileClip(VIDEO_LOCATION)
# video_size = tuple(video_in.size) # Get the video frames size.
#
# # Load calibrated images.
# cam_calibration = helper.get_camera_calibration()
# cam_calibrator = helper.CalibrateCamera(video_size, cam_calibration)
#
# # Load images with img_*.jpeg
# content = glob('video/seq/img_*.jpeg')
# images = []
# for con in tqdm(range(len(content)), desc='Reading files'):
# # images.append(imread('../video/seq/img_%s.jpeg' % i))
# images.append(imread('video/seq/img_%s.jpeg' % con))
#
# # Apply line detection to the read images and write them to a folder.
# rows = len(images)
# processed_images = []
# for row in tqdm(range(rows), desc='Applying DetectLines'):
# img = images[row]
#
# ld = processor.DetectLanes(SRC, DST, number_frame=FRAME_MEMORY, camera_calibration=cam_calibrator,
# transform_offset=OFFSET)
# img = ld.generate_frame(img)
#
# processed_images.append(img)
#
# # Write as image
# im = Image.fromarray(img)
# im.save('video/seq_new/img_{}.jpeg'.format(row))
#
# # # Create a backup.
# # with open('data.p', 'wb') as p:
# # pickle.dump({'images': processed_images}, p, protocol=pickle.HIGHEST_PROTOCOL)
#
# # Read the contents of processed image and make a video of it.
# new_content = glob('video/seq_new/img_*.jpeg')
# images_new = []
#
# # Read the images from the processed folder
# for i in tqdm(range(len(new_content)), desc='Reading processed images'):
# images_new.append(imread('video/seq_new/img_%s.jpeg' % i))
#
# # Write sequence of images to file as a video.
# new_clip = ImageSequenceClip(images_new, fps=video_in.fps)
# new_clip.write_videofile('processed_video.mp4')
from moviepy.editor import VideoFileClip
output = 'project_output_colour.mp4'
clip1 = VideoFileClip("video/project_video.mp4")
output_clip = clip1.fl_image(image_pipeline) #NOTE: this function expects color images!!
output_clip.write_videofile(output, audio=False)
| mit |
yonglehou/scikit-learn | sklearn/neighbors/classification.py | 106 | 13987 | """Nearest Neighbor Classification"""
# Authors: Jake Vanderplas <[email protected]>
# Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Sparseness support by Lars Buitinck <[email protected]>
# Multi-output support by Arnaud Joly <[email protected]>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import numpy as np
from scipy import stats
from ..utils.extmath import weighted_mode
from .base import \
_check_weights, _get_weights, \
NeighborsBase, KNeighborsMixin,\
RadiusNeighborsMixin, SupervisedIntegerMixin
from ..base import ClassifierMixin
from ..utils import check_array
class KNeighborsClassifier(NeighborsBase, KNeighborsMixin,
SupervisedIntegerMixin, ClassifierMixin):
"""Classifier implementing the k-nearest neighbors vote.
Read more in the :ref:`User Guide <classification>`.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`k_neighbors` queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDTree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default = 'minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional (default = None)
additional keyword arguments for the metric function.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import KNeighborsClassifier
>>> neigh = KNeighborsClassifier(n_neighbors=3)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
KNeighborsClassifier(...)
>>> print(neigh.predict([[1.1]]))
[0]
>>> print(neigh.predict_proba([[0.9]]))
[[ 0.66666667 0.33333333]]
See also
--------
RadiusNeighborsClassifier
KNeighborsRegressor
RadiusNeighborsRegressor
NearestNeighbors
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
.. warning::
Regarding the Nearest Neighbors algorithms, if it is found that two
neighbors, neighbor `k+1` and `k`, have identical distances but
but different labels, the results will depend on the ordering of the
training data.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5,
weights='uniform', algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, **kwargs):
self._init_params(n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, **kwargs)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the class labels for the provided data
Parameters
----------
X : array of shape [n_samples, n_features]
A 2-D array representing the test points.
Returns
-------
y : array of shape [n_samples] or [n_samples, n_outputs]
Class labels for each data sample.
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.kneighbors(X)
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_outputs = len(classes_)
n_samples = X.shape[0]
weights = _get_weights(neigh_dist, self.weights)
y_pred = np.empty((n_samples, n_outputs), dtype=classes_[0].dtype)
for k, classes_k in enumerate(classes_):
if weights is None:
mode, _ = stats.mode(_y[neigh_ind, k], axis=1)
else:
mode, _ = weighted_mode(_y[neigh_ind, k], weights, axis=1)
mode = np.asarray(mode.ravel(), dtype=np.intp)
y_pred[:, k] = classes_k.take(mode)
if not self.outputs_2d_:
y_pred = y_pred.ravel()
return y_pred
def predict_proba(self, X):
"""Return probability estimates for the test data X.
Parameters
----------
X : array, shape = (n_samples, n_features)
A 2-D array representing the test points.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
of such arrays if n_outputs > 1.
The class probabilities of the input samples. Classes are ordered
by lexicographic order.
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.kneighbors(X)
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_samples = X.shape[0]
weights = _get_weights(neigh_dist, self.weights)
if weights is None:
weights = np.ones_like(neigh_ind)
all_rows = np.arange(X.shape[0])
probabilities = []
for k, classes_k in enumerate(classes_):
pred_labels = _y[:, k][neigh_ind]
proba_k = np.zeros((n_samples, classes_k.size))
# a simple ':' index doesn't work right
for i, idx in enumerate(pred_labels.T): # loop is O(n_neighbors)
proba_k[all_rows, idx] += weights[:, i]
# normalize 'votes' into real [0,1] probabilities
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
probabilities.append(proba_k)
if not self.outputs_2d_:
probabilities = probabilities[0]
return probabilities
class RadiusNeighborsClassifier(NeighborsBase, RadiusNeighborsMixin,
SupervisedIntegerMixin, ClassifierMixin):
"""Classifier implementing a vote among neighbors within a given radius
Read more in the :ref:`User Guide <classification>`.
Parameters
----------
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth`radius_neighbors`
queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default='minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
outlier_label : int, optional (default = None)
Label, which is given for outlier samples (samples with no
neighbors on given radius).
If set to None, ValueError is raised, when outlier is detected.
metric_params: dict, optional (default = None)
additional keyword arguments for the metric function.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import RadiusNeighborsClassifier
>>> neigh = RadiusNeighborsClassifier(radius=1.0)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
RadiusNeighborsClassifier(...)
>>> print(neigh.predict([[1.5]]))
[0]
See also
--------
KNeighborsClassifier
RadiusNeighborsRegressor
KNeighborsRegressor
NearestNeighbors
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, radius=1.0, weights='uniform',
algorithm='auto', leaf_size=30, p=2, metric='minkowski',
outlier_label=None, metric_params=None, **kwargs):
self._init_params(radius=radius,
algorithm=algorithm,
leaf_size=leaf_size,
metric=metric, p=p, metric_params=metric_params,
**kwargs)
self.weights = _check_weights(weights)
self.outlier_label = outlier_label
def predict(self, X):
"""Predict the class labels for the provided data
Parameters
----------
X : array of shape [n_samples, n_features]
A 2-D array representing the test points.
Returns
-------
y : array of shape [n_samples] or [n_samples, n_outputs]
Class labels for each data sample.
"""
X = check_array(X, accept_sparse='csr')
n_samples = X.shape[0]
neigh_dist, neigh_ind = self.radius_neighbors(X)
inliers = [i for i, nind in enumerate(neigh_ind) if len(nind) != 0]
outliers = [i for i, nind in enumerate(neigh_ind) if len(nind) == 0]
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_outputs = len(classes_)
if self.outlier_label is not None:
neigh_dist[outliers] = 1e-6
elif outliers:
raise ValueError('No neighbors found for test samples %r, '
'you can try using larger radius, '
'give a label for outliers, '
'or consider removing them from your dataset.'
% outliers)
weights = _get_weights(neigh_dist, self.weights)
y_pred = np.empty((n_samples, n_outputs), dtype=classes_[0].dtype)
for k, classes_k in enumerate(classes_):
pred_labels = np.array([_y[ind, k] for ind in neigh_ind],
dtype=object)
if weights is None:
mode = np.array([stats.mode(pl)[0]
for pl in pred_labels[inliers]], dtype=np.int)
else:
mode = np.array([weighted_mode(pl, w)[0]
for (pl, w)
in zip(pred_labels[inliers], weights)],
dtype=np.int)
mode = mode.ravel()
y_pred[inliers, k] = classes_k.take(mode)
if outliers:
y_pred[outliers, :] = self.outlier_label
if not self.outputs_2d_:
y_pred = y_pred.ravel()
return y_pred
| bsd-3-clause |
richardtran415/pymatgen | pymatgen/electronic_structure/tests/test_plotter.py | 5 | 26593 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import json
import os
import unittest
import warnings
from io import open
import scipy
from monty.os.path import which
from pymatgen.core.structure import Structure
from pymatgen.electronic_structure.bandstructure import BandStructureSymmLine
from pymatgen.electronic_structure.boltztrap import BoltztrapAnalyzer
from pymatgen.electronic_structure.cohp import CompleteCohp
from pymatgen.electronic_structure.core import Spin
from pymatgen.electronic_structure.dos import CompleteDos
from pymatgen.electronic_structure.plotter import (
BoltztrapPlotter,
BSDOSPlotter,
BSPlotter,
BSPlotterProjected,
CohpPlotter,
DosPlotter,
fold_point,
plot_brillouin_zone,
plot_ellipsoid,
)
from pymatgen.io.vasp import Vasprun
from pymatgen.util.testing import PymatgenTest
class DosPlotterTest(unittest.TestCase):
def setUp(self):
with open(os.path.join(PymatgenTest.TEST_FILES_DIR, "complete_dos.json"), "r", encoding="utf-8") as f:
self.dos = CompleteDos.from_dict(json.load(f))
self.plotter = DosPlotter(sigma=0.2, stack=True)
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
def test_add_dos_dict(self):
d = self.plotter.get_dos_dict()
self.assertEqual(len(d), 0)
self.plotter.add_dos_dict(self.dos.get_element_dos(), key_sort_func=lambda x: x.X)
d = self.plotter.get_dos_dict()
self.assertEqual(len(d), 4)
def test_get_dos_dict(self):
self.plotter.add_dos_dict(self.dos.get_element_dos(), key_sort_func=lambda x: x.X)
d = self.plotter.get_dos_dict()
for el in ["Li", "Fe", "P", "O"]:
self.assertIn(el, d)
# Minimal baseline testing for get_plot. not a true test. Just checks that
# it can actually execute.
def test_get_plot(self):
# Disabling latex is needed for this test to work.
from matplotlib import rc
rc("text", usetex=False)
self.plotter.add_dos_dict(self.dos.get_element_dos(), key_sort_func=lambda x: x.X)
plt = self.plotter.get_plot()
self.plotter.save_plot("dosplot.png")
self.assertTrue(os.path.isfile("dosplot.png"))
os.remove("dosplot.png")
plt.close("all")
class BSPlotterTest(unittest.TestCase):
def setUp(self):
with open(os.path.join(PymatgenTest.TEST_FILES_DIR, "CaO_2605_bandstructure.json"), "r", encoding="utf-8") as f:
d = json.loads(f.read())
self.bs = BandStructureSymmLine.from_dict(d)
self.plotter = BSPlotter(self.bs)
self.assertEqual(len(self.plotter._bs), 1, "wrong number of band objects")
with open(os.path.join(PymatgenTest.TEST_FILES_DIR, "N2_12103_bandstructure.json"), "r", encoding="utf-8") as f:
d = json.loads(f.read())
self.sbs_sc = BandStructureSymmLine.from_dict(d)
with open(os.path.join(PymatgenTest.TEST_FILES_DIR, "C_48_bandstructure.json"), "r", encoding="utf-8") as f:
d = json.loads(f.read())
self.sbs_met = BandStructureSymmLine.from_dict(d)
self.plotter_multi = BSPlotter([self.sbs_sc, self.sbs_met])
self.assertEqual(len(self.plotter_multi._bs), 2, "wrong number of band objects")
self.assertEqual(self.plotter_multi._nb_bands, [96, 96], "wrong number of bands")
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
def test_add_bs(self):
self.plotter_multi.add_bs(self.sbs_sc)
self.assertEqual(len(self.plotter_multi._bs), 3, "wrong number of band objects")
self.assertEqual(self.plotter_multi._nb_bands, [96, 96, 96], "wrong number of bands")
def test_get_branch_steps(self):
steps_idx = BSPlotter._get_branch_steps(self.sbs_sc.branches)
self.assertEqual(steps_idx, [0, 121, 132, 143], "wrong list of steps idx")
def test_rescale_distances(self):
rescaled_distances = self.plotter_multi._rescale_distances(self.sbs_sc, self.sbs_met)
self.assertEqual(
len(rescaled_distances),
len(self.sbs_met.distance),
"wrong lenght of distances list",
)
self.assertEqual(rescaled_distances[-1], 6.5191398067252875, "wrong last distance value")
self.assertEqual(
rescaled_distances[148],
self.sbs_sc.distance[19],
"wrong distance at high symm k-point",
)
def test_interpolate_bands(self):
data = self.plotter.bs_plot_data()
d = data["distances"]
en = data["energy"]["1"]
int_distances, int_energies = self.plotter._interpolate_bands(d, en)
self.assertEqual(len(int_distances), 10, "wrong lenght of distances list")
self.assertEqual(len(int_distances[0]), 100, "wrong lenght of distances in a branch")
self.assertEqual(len(int_energies), 10, "wrong lenght of distances list")
self.assertEqual(int_energies[0].shape, (16, 100), "wrong lenght of distances list")
def test_bs_plot_data(self):
self.assertEqual(
len(self.plotter.bs_plot_data()["distances"]),
10,
"wrong number of sequences of branches",
)
self.assertEqual(
len(self.plotter.bs_plot_data()["distances"][0]),
16,
"wrong number of distances in the first sequence of branches",
)
self.assertEqual(
sum([len(e) for e in self.plotter.bs_plot_data()["distances"]]),
160,
"wrong number of distances",
)
lenght = len(self.plotter.bs_plot_data(split_branches=False)["distances"][0])
self.assertEqual(lenght, 144, "wrong number of distances in the first sequence of branches")
lenght = len(self.plotter.bs_plot_data(split_branches=False)["distances"])
self.assertEqual(lenght, 2, "wrong number of distances in the first sequence of branches")
self.assertEqual(self.plotter.bs_plot_data()["ticks"]["label"][5], "K", "wrong tick label")
self.assertEqual(
len(self.plotter.bs_plot_data()["ticks"]["label"]),
19,
"wrong number of tick labels",
)
def test_get_ticks(self):
self.assertEqual(self.plotter.get_ticks()["label"][5], "K", "wrong tick label")
self.assertEqual(
self.plotter.get_ticks()["distance"][5],
2.406607625322699,
"wrong tick distance",
)
# Minimal baseline testing for get_plot. not a true test. Just checks that
# it can actually execute.
def test_get_plot(self):
# zero_to_efermi = True, ylim = None, smooth = False,
# vbm_cbm_marker = False, smooth_tol = None
# Disabling latex is needed for this test to work.
from matplotlib import rc
rc("text", usetex=False)
plt = self.plotter.get_plot()
self.assertEqual(plt.ylim(), (-4.0, 7.6348), "wrong ylim")
plt = self.plotter.get_plot(smooth=True)
plt = self.plotter.get_plot(vbm_cbm_marker=True)
self.plotter.save_plot("bsplot.png")
self.assertTrue(os.path.isfile("bsplot.png"))
os.remove("bsplot.png")
plt.close("all")
# test plotter with 2 bandstructures
plt = self.plotter_multi.get_plot()
self.assertEqual(len(plt.gca().get_lines()), 874, "wrong number of lines")
self.assertEqual(plt.ylim(), (-10.0, 10.0), "wrong ylim")
plt = self.plotter_multi.get_plot(zero_to_efermi=False)
self.assertEqual(plt.ylim(), (-15.2379, 12.67141266), "wrong ylim")
plt = self.plotter_multi.get_plot(smooth=True)
self.plotter_multi.save_plot("bsplot.png")
self.assertTrue(os.path.isfile("bsplot.png"))
os.remove("bsplot.png")
plt.close("all")
class BSPlotterProjectedTest(unittest.TestCase):
def setUp(self):
with open(os.path.join(PymatgenTest.TEST_FILES_DIR, "Cu2O_361_bandstructure.json"), "r", encoding="utf-8") as f:
d = json.load(f)
self.bs = BandStructureSymmLine.from_dict(d)
self.plotter = BSPlotterProjected(self.bs)
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
# Minimal baseline testing for get_plot. not a true test. Just checks that
# it can actually execute.
def test_methods(self):
self.plotter.get_elt_projected_plots().close()
self.plotter.get_elt_projected_plots_color().close()
self.plotter.get_projected_plots_dots({"Cu": ["d", "s"], "O": ["p"]}).close()
self.plotter.get_projected_plots_dots_patom_pmorb(
{"Cu": ["dxy", "s", "px"], "O": ["px", "py", "pz"]},
{"Cu": [3, 5], "O": [1]},
).close()
class BSDOSPlotterTest(unittest.TestCase):
def setUp(self):
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
# Minimal baseline testing for get_plot. not a true test. Just checks that
# it can actually execute.
def test_methods(self):
v = Vasprun(os.path.join(PymatgenTest.TEST_FILES_DIR, "vasprun_Si_bands.xml"))
p = BSDOSPlotter()
plt = p.get_plot(
v.get_band_structure(kpoints_filename=os.path.join(PymatgenTest.TEST_FILES_DIR, "KPOINTS_Si_bands"))
)
plt.close()
plt = p.get_plot(
v.get_band_structure(kpoints_filename=os.path.join(PymatgenTest.TEST_FILES_DIR, "KPOINTS_Si_bands")),
v.complete_dos,
)
plt.close("all")
class PlotBZTest(unittest.TestCase):
def setUp(self):
self.rec_latt = Structure.from_file(
os.path.join(PymatgenTest.TEST_FILES_DIR, "Si.cssr")
).lattice.reciprocal_lattice
self.kpath = [[[0.0, 0.0, 0.0], [0.5, 0.0, 0.5], [0.5, 0.25, 0.75], [0.375, 0.375, 0.75]]]
self.labels = {
"\\Gamma": [0.0, 0.0, 0.0],
"K": [0.375, 0.375, 0.75],
"L": [0.5, 0.5, 0.5],
"U": [0.625, 0.25, 0.625],
"W": [0.5, 0.25, 0.75],
"X": [0.5, 0.0, 0.5],
}
self.hessian = [
[17.64757034, 3.90159625, -4.77845607],
[3.90159625, 14.88874142, 6.75776076],
[-4.77845607, 6.75776076, 12.12987493],
]
self.center = [0.41, 0.0, 0.41]
self.points = [[0.0, 0.0, 0.0], [0.5, 0.5, 0.5]]
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
def test_bz_plot(self):
fig, ax = plot_ellipsoid(self.hessian, self.center, lattice=self.rec_latt)
fig = plot_brillouin_zone(
self.rec_latt,
lines=self.kpath,
labels=self.labels,
kpoints=self.points,
ax=ax,
show=False,
)
def test_fold_point(self):
self.assertTrue(
scipy.allclose(
fold_point([0.0, -0.5, 0.5], lattice=self.rec_latt),
self.rec_latt.get_cartesian_coords([0.0, 0.5, 0.5]),
)
)
self.assertTrue(
scipy.allclose(
fold_point([0.1, -0.6, 0.2], lattice=self.rec_latt),
self.rec_latt.get_cartesian_coords([0.1, 0.4, 0.2]),
)
)
x_trans = which("x_trans")
@unittest.skipIf(not x_trans, "No x_trans.")
class BoltztrapPlotterTest(unittest.TestCase):
def setUp(self):
bz = BoltztrapAnalyzer.from_files(os.path.join(PymatgenTest.TEST_FILES_DIR, "boltztrap/transp/"))
self.plotter = BoltztrapPlotter(bz)
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
def test_plot_carriers(self):
plt = self.plotter.plot_carriers()
self.assertEqual(len(plt.gca().get_lines()), 7, "wrong number of lines")
self.assertEqual(
plt.gca().get_lines()[0].get_data()[0][0],
-2.0702422655947665,
"wrong 0 data in line 0",
)
self.assertEqual(
plt.gca().get_lines()[0].get_data()[1][0],
6.525490122298364e22,
"wrong 1 data in line 0",
)
plt.close()
def test_plot_complexity_factor_mu(self):
plt = self.plotter.plot_complexity_factor_mu()
self.assertEqual(len(plt.gca().get_lines()), 2, "wrong number of lines")
self.assertEqual(
plt.gca().get_lines()[0].get_data()[0][0],
-2.0702422655947665,
"wrong 0 data in line 0",
)
self.assertEqual(
plt.gca().get_lines()[0].get_data()[1][0],
0.004708835456903449,
"wrong 1 data in line 0",
)
plt.close()
def test_plot_conductivity_dop(self):
plt = self.plotter.plot_conductivity_dop()
self.assertEqual(len(plt.gca().get_lines()), 8, "wrong number of lines")
self.assertEqual(
plt.gca().get_lines()[0].get_data()[0][0],
1000000000000000.0,
"wrong 0 data in line 0",
)
self.assertEqual(
plt.gca().get_lines()[0].get_data()[1][0],
0.3801957596666667,
"wrong 1 data in line 0",
)
plt.close()
def test_plot_conductivity_mu(self):
plt = self.plotter.plot_conductivity_mu()
self.assertEqual(len(plt.gca().get_lines()), 9, "wrong number of lines")
self.assertEqual(
plt.gca().get_lines()[0].get_data()[0][0],
-2.0702422655947665,
"wrong 0 data in line 0",
)
self.assertEqual(
plt.gca().get_lines()[0].get_data()[1][0],
1965.1306,
"wrong 1 data in line 0",
)
plt.close()
def test_plot_conductivity_temp(self):
plt = self.plotter.plot_conductivity_temp()
self.assertEqual(len(plt.gca().get_lines()), 6, "wrong number of lines")
self.assertEqual(plt.gca().get_lines()[0].get_data()[0][0], 100, "wrong 0 data in line 0")
self.assertEqual(
plt.gca().get_lines()[0].get_data()[1][0],
0.3801957596666667,
"wrong 1 data in line 0",
)
plt.close()
def test_plot_dos(self):
plt = self.plotter.plot_dos()
self.assertEqual(len(plt.gca().get_lines()), 3, "wrong number of lines")
self.assertEqual(
plt.gca().get_lines()[0].get_data()[0][0],
-2.4197044934588674,
"wrong 0 data in line 0",
)
self.assertEqual(plt.gca().get_lines()[0].get_data()[1][0], 0.0, "wrong 1 data in line 0")
plt.close()
def test_plot_eff_mass_dop(self):
plt = self.plotter.plot_eff_mass_dop()
self.assertEqual(len(plt.gca().get_lines()), 8, "wrong number of lines")
self.assertEqual(
plt.gca().get_lines()[0].get_data()[0][0],
1000000000000000.0,
"wrong 0 data in line 0",
)
self.assertEqual(
plt.gca().get_lines()[0].get_data()[1][0],
1.4231240011719886,
"wrong 1 data in line 0",
)
plt.close()
def test_plot_eff_mass_temp(self):
plt = self.plotter.plot_eff_mass_temp()
self.assertEqual(len(plt.gca().get_lines()), 6, "wrong number of lines")
self.assertEqual(plt.gca().get_lines()[0].get_data()[0][0], 100, "wrong 0 data in line 0")
self.assertEqual(
plt.gca().get_lines()[0].get_data()[1][0],
1.4231240011719886,
"wrong 1 data in line 0",
)
plt.close()
def test_plot_hall_carriers(self):
plt = self.plotter.plot_hall_carriers()
self.assertEqual(len(plt.gca().get_lines()), 7, "wrong number of lines")
self.assertEqual(
plt.gca().get_lines()[0].get_data()[0][0],
-2.0702422655947665,
"wrong 0 data in line 0",
)
self.assertEqual(
plt.gca().get_lines()[0].get_data()[1][0],
9.538187273102463e17,
"wrong 1 data in line 0",
)
plt.close()
def test_plot_power_factor_dop(self):
plt = self.plotter.plot_power_factor_dop()
self.assertEqual(len(plt.gca().get_lines()), 8, "wrong number of lines")
self.assertEqual(
plt.gca().get_lines()[0].get_data()[0][0],
1000000000000000.0,
"wrong 0 data in line 0",
)
self.assertEqual(
plt.gca().get_lines()[0].get_data()[1][0],
0.40606868935796925,
"wrong 1 data in line 0",
)
plt.close()
def test_plot_power_factor_mu(self):
plt = self.plotter.plot_power_factor_mu()
self.assertEqual(len(plt.gca().get_lines()), 9, "wrong number of lines")
self.assertEqual(
plt.gca().get_lines()[0].get_data()[0][0],
-2.0702422655947665,
"wrong 0 data in line 0",
)
self.assertEqual(
plt.gca().get_lines()[0].get_data()[1][0],
365.5514594136157,
"wrong 1 data in line 0",
)
plt.close()
def test_plot_power_factor_temp(self):
plt = self.plotter.plot_power_factor_temp()
self.assertEqual(len(plt.gca().get_lines()), 6, "wrong number of lines")
self.assertEqual(plt.gca().get_lines()[0].get_data()[0][0], 100, "wrong 0 data in line 0")
self.assertEqual(
plt.gca().get_lines()[0].get_data()[1][0],
0.40606868935796925,
"wrong 1 data in line 0",
)
plt.close()
def test_plot_seebeck_dop(self):
plt = self.plotter.plot_seebeck_dop()
self.assertEqual(len(plt.gca().get_lines()), 8, "wrong number of lines")
self.assertEqual(
plt.gca().get_lines()[0].get_data()[0][0],
1000000000000000.0,
"wrong 0 data in line 0",
)
self.assertEqual(
plt.gca().get_lines()[0].get_data()[1][0],
1050.8197666666667,
"wrong 1 data in line 0",
)
plt.close()
def test_plot_seebeck_eff_mass_mu(self):
plt = self.plotter.plot_seebeck_eff_mass_mu()
self.assertEqual(len(plt.gca().get_lines()), 2, "wrong number of lines")
self.assertEqual(
plt.gca().get_lines()[0].get_data()[0][0],
-2.0702422655947665,
"wrong 0 data in line 0",
)
self.assertEqual(
plt.gca().get_lines()[0].get_data()[1][0],
6412.881888198197,
"wrong 1 data in line 0",
)
plt.close()
def test_plot_seebeck_mu(self):
plt = self.plotter.plot_seebeck_mu()
self.assertEqual(len(plt.gca().get_lines()), 9, "wrong number of lines")
self.assertEqual(
plt.gca().get_lines()[0].get_data()[0][0],
-2.0702422655947665,
"wrong 0 data in line 0",
)
self.assertEqual(
plt.gca().get_lines()[0].get_data()[1][0],
-433.11096000000003,
"wrong 1 data in line 0",
)
plt.close()
def test_plot_seebeck_temp(self):
plt = self.plotter.plot_seebeck_temp()
self.assertEqual(len(plt.gca().get_lines()), 6, "wrong number of lines")
self.assertEqual(plt.gca().get_lines()[0].get_data()[0][0], 100, "wrong 0 data in line 0")
self.assertEqual(
plt.gca().get_lines()[0].get_data()[1][0],
1050.8197666666667,
"wrong 1 data in line 0",
)
plt.close()
def test_plot_zt_dop(self):
plt = self.plotter.plot_zt_dop()
self.assertEqual(len(plt.gca().get_lines()), 8, "wrong number of lines")
self.assertEqual(
plt.gca().get_lines()[0].get_data()[0][0],
1000000000000000.0,
"wrong 0 data in line 0",
)
self.assertEqual(
plt.gca().get_lines()[0].get_data()[1][0],
4.060682863129955e-05,
"wrong 1 data in line 0",
)
plt.close()
def test_plot_zt_mu(self):
plt = self.plotter.plot_zt_mu()
self.assertEqual(len(plt.gca().get_lines()), 9, "wrong number of lines")
self.assertEqual(
plt.gca().get_lines()[0].get_data()[0][0],
-2.0702422655947665,
"wrong 0 data in line 0",
)
self.assertEqual(
plt.gca().get_lines()[0].get_data()[1][0],
0.2153839699235254,
"wrong 1 data in line 0",
)
plt.close()
def test_plot_zt_temp(self):
plt = self.plotter.plot_zt_temp()
self.assertEqual(len(plt.gca().get_lines()), 6, "wrong number of lines")
self.assertEqual(plt.gca().get_lines()[0].get_data()[0][0], 100, "wrong 0 data in line 0")
self.assertEqual(
plt.gca().get_lines()[0].get_data()[1][0],
4.060682863129955e-05,
"wrong 1 data in line 0",
)
plt.close()
class CohpPlotterTest(PymatgenTest):
def setUp(self):
path = os.path.join(PymatgenTest.TEST_FILES_DIR, "cohp", "complete_cohp_lobster.json")
with open(os.path.join(path), "r") as f:
self.cohp = CompleteCohp.from_dict(json.load(f))
path = os.path.join(PymatgenTest.TEST_FILES_DIR, "cohp", "complete_coop_lobster.json")
with open(os.path.join(path), "r") as f:
self.coop = CompleteCohp.from_dict(json.load(f))
self.cohp_plot = CohpPlotter(zero_at_efermi=False)
self.coop_plot = CohpPlotter(are_coops=True)
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
def test_attributes(self):
self.assertFalse(self.cohp_plot.are_coops)
self.assertTrue(self.coop_plot.are_coops)
self.assertFalse(self.cohp_plot.zero_at_efermi)
self.assertTrue(self.coop_plot.zero_at_efermi)
self.cohp_plot.add_cohp_dict(self.cohp.all_cohps)
cohp_energies = self.cohp_plot._cohps["1"]["energies"]
self.assertEqual(len(cohp_energies), 301)
self.assertAlmostEqual(cohp_energies[0], -0.27768)
self.assertAlmostEqual(cohp_energies[-1], 14.77248)
self.coop_plot.add_cohp_dict(self.coop.all_cohps)
coop_energies = self.coop_plot._cohps["10"]["energies"]
self.assertEqual(len(coop_energies), 241)
self.assertAlmostEqual(coop_energies[0], -6.02510)
self.assertAlmostEqual(coop_energies[-1], 6.02510)
def test_add_cohp_dict(self):
# Sorts the populations by z-coordinates of the sites
def sortkeys(sites):
return sites[0].z, sites[1].z
sorted_keys = ["3", "4", "7", "8", "9", "10", "11", "6", "5", "2", "1"]
d_coop = self.coop_plot.get_cohp_dict()
self.assertEqual(len(d_coop), 0)
bonds = self.coop.bonds
self.coop_plot.add_cohp_dict(self.coop.all_cohps, key_sort_func=lambda x: sortkeys(bonds[x]["sites"]))
d_coop = self.coop_plot.get_cohp_dict()
self.assertEqual(len(d_coop), 11)
self.assertEqual(list(self.coop_plot._cohps.keys()), sorted_keys)
def test_get_cohp_dict(self):
self.cohp_plot.add_cohp_dict(self.cohp.all_cohps)
d_cohp = self.cohp_plot.get_cohp_dict()
for bond in ["1", "2"]:
self.assertIn(bond, d_cohp)
def test_get_plot(self):
self.cohp_plot.add_cohp_dict(self.cohp.all_cohps)
plt_cohp = self.cohp_plot.get_plot()
ax_cohp = plt_cohp.gca()
self.assertEqual(ax_cohp.get_xlabel(), "-COHP")
self.assertEqual(ax_cohp.get_ylabel(), "$E$ (eV)")
legend_labels = ax_cohp.get_legend_handles_labels()[1]
self.assertEqual(len(self.cohp_plot._cohps), len(legend_labels))
self.assertEqual(ax_cohp.lines[0].get_linestyle(), "-")
self.assertEqual(ax_cohp.lines[1].get_linestyle(), "--")
for label in legend_labels:
self.assertIn(label, self.cohp_plot._cohps)
linesindex = legend_labels.index("1")
linestyles = {Spin.up: "-", Spin.down: "--"}
cohp_fe_fe = self.cohp.all_cohps["1"]
for s, spin in enumerate([Spin.up, Spin.down]):
lines = ax_cohp.lines[2 * linesindex + s]
self.assertArrayAlmostEqual(lines.get_xdata(), -cohp_fe_fe.cohp[spin])
self.assertArrayAlmostEqual(lines.get_ydata(), self.cohp.energies)
self.assertEqual(lines.get_linestyle(), linestyles[spin])
plt_cohp.close()
plt_cohp = self.cohp_plot.get_plot(invert_axes=False, plot_negative=False)
ax_cohp = plt_cohp.gca()
self.assertEqual(ax_cohp.get_xlabel(), "$E$ (eV)")
self.assertEqual(ax_cohp.get_ylabel(), "COHP")
for s, spin in enumerate([Spin.up, Spin.down]):
lines = ax_cohp.lines[2 * linesindex + s]
self.assertArrayAlmostEqual(lines.get_xdata(), self.cohp.energies)
self.assertArrayAlmostEqual(lines.get_ydata(), cohp_fe_fe.cohp[spin])
plt_cohp.close()
plt_cohp = self.cohp_plot.get_plot(integrated=True)
ax_cohp = plt_cohp.gca()
self.assertEqual(ax_cohp.get_xlabel(), "-ICOHP (eV)")
for s, spin in enumerate([Spin.up, Spin.down]):
lines = ax_cohp.lines[2 * linesindex + s]
self.assertArrayAlmostEqual(lines.get_xdata(), -cohp_fe_fe.icohp[spin])
coop_dict = {"Bi5-Bi6": self.coop.all_cohps["10"]}
self.coop_plot.add_cohp_dict(coop_dict)
plt_coop = self.coop_plot.get_plot()
ax_coop = plt_coop.gca()
self.assertEqual(ax_coop.get_xlabel(), "COOP")
self.assertEqual(ax_coop.get_ylabel(), "$E - E_f$ (eV)")
lines_coop = ax_coop.get_lines()[0]
self.assertArrayAlmostEqual(lines_coop.get_ydata(), self.coop.energies - self.coop.efermi)
coop_bi_bi = self.coop.all_cohps["10"].cohp[Spin.up]
self.assertArrayAlmostEqual(lines_coop.get_xdata(), coop_bi_bi)
# Cleanup.
plt_cohp.close()
plt_coop.close("all")
def test_save_plot(self):
self.cohp_plot.add_cohp_dict(self.cohp.all_cohps)
plt_cohp = self.cohp_plot.get_plot()
self.cohp_plot.save_plot("cohpplot.png")
self.assertTrue(os.path.isfile("cohpplot.png"))
os.remove("cohpplot.png")
plt_cohp.close("all")
if __name__ == "__main__":
unittest.main()
| mit |
samuel1208/scikit-learn | sklearn/svm/tests/test_sparse.py | 95 | 12156 | from nose.tools import assert_raises, assert_true, assert_false
import numpy as np
from scipy import sparse
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_equal)
from sklearn import datasets, svm, linear_model, base
from sklearn.datasets import make_classification, load_digits, make_blobs
from sklearn.svm.tests import test_svm
from sklearn.utils import ConvergenceWarning
from sklearn.utils.extmath import safe_sparse_dot
from sklearn.utils.testing import assert_warns, assert_raise_message
# test sample 1
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
X_sp = sparse.lil_matrix(X)
Y = [1, 1, 1, 2, 2, 2]
T = np.array([[-1, -1], [2, 2], [3, 2]])
true_result = [1, 2, 2]
# test sample 2
X2 = np.array([[0, 0, 0], [1, 1, 1], [2, 0, 0, ],
[0, 0, 2], [3, 3, 3]])
X2_sp = sparse.dok_matrix(X2)
Y2 = [1, 2, 2, 2, 3]
T2 = np.array([[-1, -1, -1], [1, 1, 1], [2, 2, 2]])
true_result2 = [1, 2, 3]
iris = datasets.load_iris()
# permute
rng = np.random.RandomState(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# sparsify
iris.data = sparse.csr_matrix(iris.data)
def check_svm_model_equal(dense_svm, sparse_svm, X_train, y_train, X_test):
dense_svm.fit(X_train.toarray(), y_train)
if sparse.isspmatrix(X_test):
X_test_dense = X_test.toarray()
else:
X_test_dense = X_test
sparse_svm.fit(X_train, y_train)
assert_true(sparse.issparse(sparse_svm.support_vectors_))
assert_true(sparse.issparse(sparse_svm.dual_coef_))
assert_array_almost_equal(dense_svm.support_vectors_,
sparse_svm.support_vectors_.toarray())
assert_array_almost_equal(dense_svm.dual_coef_, sparse_svm.dual_coef_.toarray())
if dense_svm.kernel == "linear":
assert_true(sparse.issparse(sparse_svm.coef_))
assert_array_almost_equal(dense_svm.coef_, sparse_svm.coef_.toarray())
assert_array_almost_equal(dense_svm.support_, sparse_svm.support_)
assert_array_almost_equal(dense_svm.predict(X_test_dense), sparse_svm.predict(X_test))
assert_array_almost_equal(dense_svm.decision_function(X_test_dense),
sparse_svm.decision_function(X_test))
assert_array_almost_equal(dense_svm.decision_function(X_test_dense),
sparse_svm.decision_function(X_test_dense))
assert_array_almost_equal(dense_svm.predict_proba(X_test_dense),
sparse_svm.predict_proba(X_test), 4)
msg = "cannot use sparse input in 'SVC' trained on dense data"
if sparse.isspmatrix(X_test):
assert_raise_message(ValueError, msg, dense_svm.predict, X_test)
def test_svc():
"""Check that sparse SVC gives the same result as SVC"""
# many class dataset:
X_blobs, y_blobs = make_blobs(n_samples=100, centers=10, random_state=0)
X_blobs = sparse.csr_matrix(X_blobs)
datasets = [[X_sp, Y, T], [X2_sp, Y2, T2],
[X_blobs[:80], y_blobs[:80], X_blobs[80:]],
[iris.data, iris.target, iris.data]]
kernels = ["linear", "poly", "rbf", "sigmoid"]
for dataset in datasets:
for kernel in kernels:
clf = svm.SVC(kernel=kernel, probability=True, random_state=0)
sp_clf = svm.SVC(kernel=kernel, probability=True, random_state=0)
check_svm_model_equal(clf, sp_clf, *dataset)
def test_unsorted_indices():
# test that the result with sorted and unsorted indices in csr is the same
# we use a subset of digits as iris, blobs or make_classification didn't
# show the problem
digits = load_digits()
X, y = digits.data[:50], digits.target[:50]
X_test = sparse.csr_matrix(digits.data[50:100])
X_sparse = sparse.csr_matrix(X)
coef_dense = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X, y).coef_
sparse_svc = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X_sparse, y)
coef_sorted = sparse_svc.coef_
# make sure dense and sparse SVM give the same result
assert_array_almost_equal(coef_dense, coef_sorted.toarray())
X_sparse_unsorted = X_sparse[np.arange(X.shape[0])]
X_test_unsorted = X_test[np.arange(X_test.shape[0])]
# make sure we scramble the indices
assert_false(X_sparse_unsorted.has_sorted_indices)
assert_false(X_test_unsorted.has_sorted_indices)
unsorted_svc = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X_sparse_unsorted, y)
coef_unsorted = unsorted_svc.coef_
# make sure unsorted indices give same result
assert_array_almost_equal(coef_unsorted.toarray(), coef_sorted.toarray())
assert_array_almost_equal(sparse_svc.predict_proba(X_test_unsorted),
sparse_svc.predict_proba(X_test))
def test_svc_with_custom_kernel():
kfunc = lambda x, y: safe_sparse_dot(x, y.T)
clf_lin = svm.SVC(kernel='linear').fit(X_sp, Y)
clf_mylin = svm.SVC(kernel=kfunc).fit(X_sp, Y)
assert_array_equal(clf_lin.predict(X_sp), clf_mylin.predict(X_sp))
def test_svc_iris():
# Test the sparse SVC with the iris dataset
for k in ('linear', 'poly', 'rbf'):
sp_clf = svm.SVC(kernel=k).fit(iris.data, iris.target)
clf = svm.SVC(kernel=k).fit(iris.data.toarray(), iris.target)
assert_array_almost_equal(clf.support_vectors_,
sp_clf.support_vectors_.toarray())
assert_array_almost_equal(clf.dual_coef_, sp_clf.dual_coef_.toarray())
assert_array_almost_equal(
clf.predict(iris.data.toarray()), sp_clf.predict(iris.data))
if k == 'linear':
assert_array_almost_equal(clf.coef_, sp_clf.coef_.toarray())
def test_sparse_decision_function():
#Test decision_function
#Sanity check, test that decision_function implemented in python
#returns the same as the one in libsvm
# multi class:
clf = svm.SVC(kernel='linear', C=0.1).fit(iris.data, iris.target)
dec = safe_sparse_dot(iris.data, clf.coef_.T) + clf.intercept_
assert_array_almost_equal(dec, clf.decision_function(iris.data))
# binary:
clf.fit(X, Y)
dec = np.dot(X, clf.coef_.T) + clf.intercept_
prediction = clf.predict(X)
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
assert_array_almost_equal(
prediction,
clf.classes_[(clf.decision_function(X) > 0).astype(np.int).ravel()])
expected = np.array([-1., -0.66, -1., 0.66, 1., 1.])
assert_array_almost_equal(clf.decision_function(X), expected, 2)
def test_error():
# Test that it gives proper exception on deficient input
# impossible value of C
assert_raises(ValueError, svm.SVC(C=-1).fit, X, Y)
# impossible value of nu
clf = svm.NuSVC(nu=0.0)
assert_raises(ValueError, clf.fit, X_sp, Y)
Y2 = Y[:-1] # wrong dimensions for labels
assert_raises(ValueError, clf.fit, X_sp, Y2)
clf = svm.SVC()
clf.fit(X_sp, Y)
assert_array_equal(clf.predict(T), true_result)
def test_linearsvc():
# Similar to test_SVC
clf = svm.LinearSVC(random_state=0).fit(X, Y)
sp_clf = svm.LinearSVC(random_state=0).fit(X_sp, Y)
assert_true(sp_clf.fit_intercept)
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=4)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=4)
assert_array_almost_equal(clf.predict(X), sp_clf.predict(X_sp))
clf.fit(X2, Y2)
sp_clf.fit(X2_sp, Y2)
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=4)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=4)
def test_linearsvc_iris():
# Test the sparse LinearSVC with the iris dataset
sp_clf = svm.LinearSVC(random_state=0).fit(iris.data, iris.target)
clf = svm.LinearSVC(random_state=0).fit(iris.data.toarray(), iris.target)
assert_equal(clf.fit_intercept, sp_clf.fit_intercept)
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=1)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=1)
assert_array_almost_equal(
clf.predict(iris.data.toarray()), sp_clf.predict(iris.data))
# check decision_function
pred = np.argmax(sp_clf.decision_function(iris.data), 1)
assert_array_almost_equal(pred, clf.predict(iris.data.toarray()))
# sparsify the coefficients on both models and check that they still
# produce the same results
clf.sparsify()
assert_array_equal(pred, clf.predict(iris.data))
sp_clf.sparsify()
assert_array_equal(pred, sp_clf.predict(iris.data))
def test_weight():
# Test class weights
X_, y_ = make_classification(n_samples=200, n_features=100,
weights=[0.833, 0.167], random_state=0)
X_ = sparse.csr_matrix(X_)
for clf in (linear_model.LogisticRegression(),
svm.LinearSVC(random_state=0),
svm.SVC()):
clf.set_params(class_weight={0: 5})
clf.fit(X_[:180], y_[:180])
y_pred = clf.predict(X_[180:])
assert_true(np.sum(y_pred == y_[180:]) >= 11)
def test_sample_weights():
# Test weights on individual samples
clf = svm.SVC()
clf.fit(X_sp, Y)
assert_array_equal(clf.predict(X[2]), [1.])
sample_weight = [.1] * 3 + [10] * 3
clf.fit(X_sp, Y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X[2]), [2.])
def test_sparse_liblinear_intercept_handling():
# Test that sparse liblinear honours intercept_scaling param
test_svm.test_dense_liblinear_intercept_handling(svm.LinearSVC)
def test_sparse_realdata():
# Test on a subset from the 20newsgroups dataset.
# This catchs some bugs if input is not correctly converted into
# sparse format or weights are not correctly initialized.
data = np.array([0.03771744, 0.1003567, 0.01174647, 0.027069])
indices = np.array([6, 5, 35, 31])
indptr = np.array(
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 4, 4, 4])
X = sparse.csr_matrix((data, indices, indptr))
y = np.array(
[1., 0., 2., 2., 1., 1., 1., 2., 2., 0., 1., 2., 2.,
0., 2., 0., 3., 0., 3., 0., 1., 1., 3., 2., 3., 2.,
0., 3., 1., 0., 2., 1., 2., 0., 1., 0., 2., 3., 1.,
3., 0., 1., 0., 0., 2., 0., 1., 2., 2., 2., 3., 2.,
0., 3., 2., 1., 2., 3., 2., 2., 0., 1., 0., 1., 2.,
3., 0., 0., 2., 2., 1., 3., 1., 1., 0., 1., 2., 1.,
1., 3.])
clf = svm.SVC(kernel='linear').fit(X.toarray(), y)
sp_clf = svm.SVC(kernel='linear').fit(sparse.coo_matrix(X), y)
assert_array_equal(clf.support_vectors_, sp_clf.support_vectors_.toarray())
assert_array_equal(clf.dual_coef_, sp_clf.dual_coef_.toarray())
def test_sparse_svc_clone_with_callable_kernel():
# Test that the "dense_fit" is called even though we use sparse input
# meaning that everything works fine.
a = svm.SVC(C=1, kernel=lambda x, y: x * y.T, probability=True,
random_state=0)
b = base.clone(a)
b.fit(X_sp, Y)
pred = b.predict(X_sp)
b.predict_proba(X_sp)
dense_svm = svm.SVC(C=1, kernel=lambda x, y: np.dot(x, y.T),
probability=True, random_state=0)
pred_dense = dense_svm.fit(X, Y).predict(X)
assert_array_equal(pred_dense, pred)
# b.decision_function(X_sp) # XXX : should be supported
def test_timeout():
sp = svm.SVC(C=1, kernel=lambda x, y: x * y.T, probability=True,
random_state=0, max_iter=1)
assert_warns(ConvergenceWarning, sp.fit, X_sp, Y)
def test_consistent_proba():
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_1 = a.fit(X, Y).predict_proba(X)
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_2 = a.fit(X, Y).predict_proba(X)
assert_array_almost_equal(proba_1, proba_2)
| bsd-3-clause |
pbrod/numpy | numpy/core/code_generators/ufunc_docstrings.py | 7 | 106598 | """
Docstrings for generated ufuncs
The syntax is designed to look like the function add_newdoc is being
called from numpy.lib, but in this file add_newdoc puts the docstrings
in a dictionary. This dictionary is used in
numpy/core/code_generators/generate_umath.py to generate the docstrings
for the ufuncs in numpy.core at the C level when the ufuncs are created
at compile time.
"""
import textwrap
docdict = {}
def get(name):
return docdict.get(name)
# common parameter text to all ufuncs
subst = {
'PARAMS': textwrap.dedent("""
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or None,
a freshly-allocated array is returned. A tuple (possible only as a
keyword argument) must have length equal to the number of outputs.
where : array_like, optional
This condition is broadcast over the input. At locations where the
condition is True, the `out` array will be set to the ufunc result.
Elsewhere, the `out` array will retain its original value.
Note that if an uninitialized `out` array is created via the default
``out=None``, locations within it where the condition is False will
remain uninitialized.
**kwargs
For other keyword-only arguments, see the
:ref:`ufunc docs <ufuncs.kwargs>`.
""").strip(),
'BROADCASTABLE_2': ("If ``x1.shape != x2.shape``, they must be "
"broadcastable to a common\n shape (which becomes "
"the shape of the output)."),
'OUT_SCALAR_1': "This is a scalar if `x` is a scalar.",
'OUT_SCALAR_2': "This is a scalar if both `x1` and `x2` are scalars.",
}
def add_newdoc(place, name, doc):
doc = textwrap.dedent(doc).strip()
skip = (
# gufuncs do not use the OUT_SCALAR replacement strings
'matmul',
# clip has 3 inputs, which is not handled by this
'clip',
)
if name[0] != '_' and name not in skip:
if '\nx :' in doc:
assert '$OUT_SCALAR_1' in doc, "in {}".format(name)
elif '\nx2 :' in doc or '\nx1, x2 :' in doc:
assert '$OUT_SCALAR_2' in doc, "in {}".format(name)
else:
assert False, "Could not detect number of inputs in {}".format(name)
for k, v in subst.items():
doc = doc.replace('$' + k, v)
docdict['.'.join((place, name))] = doc
add_newdoc('numpy.core.umath', 'absolute',
"""
Calculate the absolute value element-wise.
``np.abs`` is a shorthand for this function.
Parameters
----------
x : array_like
Input array.
$PARAMS
Returns
-------
absolute : ndarray
An ndarray containing the absolute value of
each element in `x`. For complex input, ``a + ib``, the
absolute value is :math:`\\sqrt{ a^2 + b^2 }`.
$OUT_SCALAR_1
Examples
--------
>>> x = np.array([-1.2, 1.2])
>>> np.absolute(x)
array([ 1.2, 1.2])
>>> np.absolute(1.2 + 1j)
1.5620499351813308
Plot the function over ``[-10, 10]``:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(start=-10, stop=10, num=101)
>>> plt.plot(x, np.absolute(x))
>>> plt.show()
Plot the function over the complex plane:
>>> xx = x + 1j * x[:, np.newaxis]
>>> plt.imshow(np.abs(xx), extent=[-10, 10, -10, 10], cmap='gray')
>>> plt.show()
The `abs` function can be used as a shorthand for ``np.absolute`` on
ndarrays.
>>> x = np.array([-1.2, 1.2])
>>> abs(x)
array([1.2, 1.2])
""")
add_newdoc('numpy.core.umath', 'add',
"""
Add arguments element-wise.
Parameters
----------
x1, x2 : array_like
The arrays to be added.
$BROADCASTABLE_2
$PARAMS
Returns
-------
add : ndarray or scalar
The sum of `x1` and `x2`, element-wise.
$OUT_SCALAR_2
Notes
-----
Equivalent to `x1` + `x2` in terms of array broadcasting.
Examples
--------
>>> np.add(1.0, 4.0)
5.0
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.add(x1, x2)
array([[ 0., 2., 4.],
[ 3., 5., 7.],
[ 6., 8., 10.]])
The ``+`` operator can be used as a shorthand for ``np.add`` on ndarrays.
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> x1 + x2
array([[ 0., 2., 4.],
[ 3., 5., 7.],
[ 6., 8., 10.]])
""")
add_newdoc('numpy.core.umath', 'arccos',
"""
Trigonometric inverse cosine, element-wise.
The inverse of `cos` so that, if ``y = cos(x)``, then ``x = arccos(y)``.
Parameters
----------
x : array_like
`x`-coordinate on the unit circle.
For real arguments, the domain is [-1, 1].
$PARAMS
Returns
-------
angle : ndarray
The angle of the ray intersecting the unit circle at the given
`x`-coordinate in radians [0, pi].
$OUT_SCALAR_1
See Also
--------
cos, arctan, arcsin, emath.arccos
Notes
-----
`arccos` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that ``cos(z) = x``. The convention is to return
the angle `z` whose real part lies in `[0, pi]`.
For real-valued input data types, `arccos` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arccos` is a complex analytic function that
has branch cuts ``[-inf, -1]`` and `[1, inf]` and is continuous from
above on the former and from below on the latter.
The inverse `cos` is also known as `acos` or cos^-1.
References
----------
M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 79. http://www.math.sfu.ca/~cbm/aands/
Examples
--------
We expect the arccos of 1 to be 0, and of -1 to be pi:
>>> np.arccos([1, -1])
array([ 0. , 3.14159265])
Plot arccos:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-1, 1, num=100)
>>> plt.plot(x, np.arccos(x))
>>> plt.axis('tight')
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'arccosh',
"""
Inverse hyperbolic cosine, element-wise.
Parameters
----------
x : array_like
Input array.
$PARAMS
Returns
-------
arccosh : ndarray
Array of the same shape as `x`.
$OUT_SCALAR_1
See Also
--------
cosh, arcsinh, sinh, arctanh, tanh
Notes
-----
`arccosh` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `cosh(z) = x`. The convention is to return the
`z` whose imaginary part lies in ``[-pi, pi]`` and the real part in
``[0, inf]``.
For real-valued input data types, `arccosh` always returns real output.
For each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arccosh` is a complex analytical function that
has a branch cut `[-inf, 1]` and is continuous from above on it.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 86. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Inverse hyperbolic function",
https://en.wikipedia.org/wiki/Arccosh
Examples
--------
>>> np.arccosh([np.e, 10.0])
array([ 1.65745445, 2.99322285])
>>> np.arccosh(1)
0.0
""")
add_newdoc('numpy.core.umath', 'arcsin',
"""
Inverse sine, element-wise.
Parameters
----------
x : array_like
`y`-coordinate on the unit circle.
$PARAMS
Returns
-------
angle : ndarray
The inverse sine of each element in `x`, in radians and in the
closed interval ``[-pi/2, pi/2]``.
$OUT_SCALAR_1
See Also
--------
sin, cos, arccos, tan, arctan, arctan2, emath.arcsin
Notes
-----
`arcsin` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that :math:`sin(z) = x`. The convention is to
return the angle `z` whose real part lies in [-pi/2, pi/2].
For real-valued input data types, *arcsin* always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arcsin` is a complex analytic function that
has, by convention, the branch cuts [-inf, -1] and [1, inf] and is
continuous from above on the former and from below on the latter.
The inverse sine is also known as `asin` or sin^{-1}.
References
----------
Abramowitz, M. and Stegun, I. A., *Handbook of Mathematical Functions*,
10th printing, New York: Dover, 1964, pp. 79ff.
http://www.math.sfu.ca/~cbm/aands/
Examples
--------
>>> np.arcsin(1) # pi/2
1.5707963267948966
>>> np.arcsin(-1) # -pi/2
-1.5707963267948966
>>> np.arcsin(0)
0.0
""")
add_newdoc('numpy.core.umath', 'arcsinh',
"""
Inverse hyperbolic sine element-wise.
Parameters
----------
x : array_like
Input array.
$PARAMS
Returns
-------
out : ndarray or scalar
Array of the same shape as `x`.
$OUT_SCALAR_1
Notes
-----
`arcsinh` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `sinh(z) = x`. The convention is to return the
`z` whose imaginary part lies in `[-pi/2, pi/2]`.
For real-valued input data types, `arcsinh` always returns real output.
For each value that cannot be expressed as a real number or infinity, it
returns ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arccos` is a complex analytical function that
has branch cuts `[1j, infj]` and `[-1j, -infj]` and is continuous from
the right on the former and from the left on the latter.
The inverse hyperbolic sine is also known as `asinh` or ``sinh^-1``.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 86. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Inverse hyperbolic function",
https://en.wikipedia.org/wiki/Arcsinh
Examples
--------
>>> np.arcsinh(np.array([np.e, 10.0]))
array([ 1.72538256, 2.99822295])
""")
add_newdoc('numpy.core.umath', 'arctan',
"""
Trigonometric inverse tangent, element-wise.
The inverse of tan, so that if ``y = tan(x)`` then ``x = arctan(y)``.
Parameters
----------
x : array_like
$PARAMS
Returns
-------
out : ndarray or scalar
Out has the same shape as `x`. Its real part is in
``[-pi/2, pi/2]`` (``arctan(+/-inf)`` returns ``+/-pi/2``).
$OUT_SCALAR_1
See Also
--------
arctan2 : The "four quadrant" arctan of the angle formed by (`x`, `y`)
and the positive `x`-axis.
angle : Argument of complex values.
Notes
-----
`arctan` is a multi-valued function: for each `x` there are infinitely
many numbers `z` such that tan(`z`) = `x`. The convention is to return
the angle `z` whose real part lies in [-pi/2, pi/2].
For real-valued input data types, `arctan` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arctan` is a complex analytic function that
has [``1j, infj``] and [``-1j, -infj``] as branch cuts, and is continuous
from the left on the former and from the right on the latter.
The inverse tangent is also known as `atan` or tan^{-1}.
References
----------
Abramowitz, M. and Stegun, I. A., *Handbook of Mathematical Functions*,
10th printing, New York: Dover, 1964, pp. 79.
http://www.math.sfu.ca/~cbm/aands/
Examples
--------
We expect the arctan of 0 to be 0, and of 1 to be pi/4:
>>> np.arctan([0, 1])
array([ 0. , 0.78539816])
>>> np.pi/4
0.78539816339744828
Plot arctan:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-10, 10)
>>> plt.plot(x, np.arctan(x))
>>> plt.axis('tight')
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'arctan2',
"""
Element-wise arc tangent of ``x1/x2`` choosing the quadrant correctly.
The quadrant (i.e., branch) is chosen so that ``arctan2(x1, x2)`` is
the signed angle in radians between the ray ending at the origin and
passing through the point (1,0), and the ray ending at the origin and
passing through the point (`x2`, `x1`). (Note the role reversal: the
"`y`-coordinate" is the first function parameter, the "`x`-coordinate"
is the second.) By IEEE convention, this function is defined for
`x2` = +/-0 and for either or both of `x1` and `x2` = +/-inf (see
Notes for specific values).
This function is not defined for complex-valued arguments; for the
so-called argument of complex values, use `angle`.
Parameters
----------
x1 : array_like, real-valued
`y`-coordinates.
x2 : array_like, real-valued
`x`-coordinates.
$BROADCASTABLE_2
$PARAMS
Returns
-------
angle : ndarray
Array of angles in radians, in the range ``[-pi, pi]``.
$OUT_SCALAR_2
See Also
--------
arctan, tan, angle
Notes
-----
*arctan2* is identical to the `atan2` function of the underlying
C library. The following special values are defined in the C
standard: [1]_
====== ====== ================
`x1` `x2` `arctan2(x1,x2)`
====== ====== ================
+/- 0 +0 +/- 0
+/- 0 -0 +/- pi
> 0 +/-inf +0 / +pi
< 0 +/-inf -0 / -pi
+/-inf +inf +/- (pi/4)
+/-inf -inf +/- (3*pi/4)
====== ====== ================
Note that +0 and -0 are distinct floating point numbers, as are +inf
and -inf.
References
----------
.. [1] ISO/IEC standard 9899:1999, "Programming language C."
Examples
--------
Consider four points in different quadrants:
>>> x = np.array([-1, +1, +1, -1])
>>> y = np.array([-1, -1, +1, +1])
>>> np.arctan2(y, x) * 180 / np.pi
array([-135., -45., 45., 135.])
Note the order of the parameters. `arctan2` is defined also when `x2` = 0
and at several other special points, obtaining values in
the range ``[-pi, pi]``:
>>> np.arctan2([1., -1.], [0., 0.])
array([ 1.57079633, -1.57079633])
>>> np.arctan2([0., 0., np.inf], [+0., -0., np.inf])
array([ 0. , 3.14159265, 0.78539816])
""")
add_newdoc('numpy.core.umath', '_arg',
"""
DO NOT USE, ONLY FOR TESTING
""")
add_newdoc('numpy.core.umath', 'arctanh',
"""
Inverse hyperbolic tangent element-wise.
Parameters
----------
x : array_like
Input array.
$PARAMS
Returns
-------
out : ndarray or scalar
Array of the same shape as `x`.
$OUT_SCALAR_1
See Also
--------
emath.arctanh
Notes
-----
`arctanh` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that ``tanh(z) = x``. The convention is to return
the `z` whose imaginary part lies in `[-pi/2, pi/2]`.
For real-valued input data types, `arctanh` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arctanh` is a complex analytical function
that has branch cuts `[-1, -inf]` and `[1, inf]` and is continuous from
above on the former and from below on the latter.
The inverse hyperbolic tangent is also known as `atanh` or ``tanh^-1``.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 86. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Inverse hyperbolic function",
https://en.wikipedia.org/wiki/Arctanh
Examples
--------
>>> np.arctanh([0, -0.5])
array([ 0. , -0.54930614])
""")
add_newdoc('numpy.core.umath', 'bitwise_and',
"""
Compute the bit-wise AND of two arrays element-wise.
Computes the bit-wise AND of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``&``.
Parameters
----------
x1, x2 : array_like
Only integer and boolean types are handled.
$BROADCASTABLE_2
$PARAMS
Returns
-------
out : ndarray or scalar
Result.
$OUT_SCALAR_2
See Also
--------
logical_and
bitwise_or
bitwise_xor
binary_repr :
Return the binary representation of the input number as a string.
Examples
--------
The number 13 is represented by ``00001101``. Likewise, 17 is
represented by ``00010001``. The bit-wise AND of 13 and 17 is
therefore ``000000001``, or 1:
>>> np.bitwise_and(13, 17)
1
>>> np.bitwise_and(14, 13)
12
>>> np.binary_repr(12)
'1100'
>>> np.bitwise_and([14,3], 13)
array([12, 1])
>>> np.bitwise_and([11,7], [4,25])
array([0, 1])
>>> np.bitwise_and(np.array([2,5,255]), np.array([3,14,16]))
array([ 2, 4, 16])
>>> np.bitwise_and([True, True], [False, True])
array([False, True])
The ``&`` operator can be used as a shorthand for ``np.bitwise_and`` on
ndarrays.
>>> x1 = np.array([2, 5, 255])
>>> x2 = np.array([3, 14, 16])
>>> x1 & x2
array([ 2, 4, 16])
""")
add_newdoc('numpy.core.umath', 'bitwise_or',
"""
Compute the bit-wise OR of two arrays element-wise.
Computes the bit-wise OR of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``|``.
Parameters
----------
x1, x2 : array_like
Only integer and boolean types are handled.
$BROADCASTABLE_2
$PARAMS
Returns
-------
out : ndarray or scalar
Result.
$OUT_SCALAR_2
See Also
--------
logical_or
bitwise_and
bitwise_xor
binary_repr :
Return the binary representation of the input number as a string.
Examples
--------
The number 13 has the binaray representation ``00001101``. Likewise,
16 is represented by ``00010000``. The bit-wise OR of 13 and 16 is
then ``000111011``, or 29:
>>> np.bitwise_or(13, 16)
29
>>> np.binary_repr(29)
'11101'
>>> np.bitwise_or(32, 2)
34
>>> np.bitwise_or([33, 4], 1)
array([33, 5])
>>> np.bitwise_or([33, 4], [1, 2])
array([33, 6])
>>> np.bitwise_or(np.array([2, 5, 255]), np.array([4, 4, 4]))
array([ 6, 5, 255])
>>> np.array([2, 5, 255]) | np.array([4, 4, 4])
array([ 6, 5, 255])
>>> np.bitwise_or(np.array([2, 5, 255, 2147483647], dtype=np.int32),
... np.array([4, 4, 4, 2147483647], dtype=np.int32))
array([ 6, 5, 255, 2147483647])
>>> np.bitwise_or([True, True], [False, True])
array([ True, True])
The ``|`` operator can be used as a shorthand for ``np.bitwise_or`` on
ndarrays.
>>> x1 = np.array([2, 5, 255])
>>> x2 = np.array([4, 4, 4])
>>> x1 | x2
array([ 6, 5, 255])
""")
add_newdoc('numpy.core.umath', 'bitwise_xor',
"""
Compute the bit-wise XOR of two arrays element-wise.
Computes the bit-wise XOR of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``^``.
Parameters
----------
x1, x2 : array_like
Only integer and boolean types are handled.
$BROADCASTABLE_2
$PARAMS
Returns
-------
out : ndarray or scalar
Result.
$OUT_SCALAR_2
See Also
--------
logical_xor
bitwise_and
bitwise_or
binary_repr :
Return the binary representation of the input number as a string.
Examples
--------
The number 13 is represented by ``00001101``. Likewise, 17 is
represented by ``00010001``. The bit-wise XOR of 13 and 17 is
therefore ``00011100``, or 28:
>>> np.bitwise_xor(13, 17)
28
>>> np.binary_repr(28)
'11100'
>>> np.bitwise_xor(31, 5)
26
>>> np.bitwise_xor([31,3], 5)
array([26, 6])
>>> np.bitwise_xor([31,3], [5,6])
array([26, 5])
>>> np.bitwise_xor([True, True], [False, True])
array([ True, False])
The ``^`` operator can be used as a shorthand for ``np.bitwise_xor`` on
ndarrays.
>>> x1 = np.array([True, True])
>>> x2 = np.array([False, True])
>>> x1 ^ x2
array([ True, False])
""")
add_newdoc('numpy.core.umath', 'ceil',
"""
Return the ceiling of the input, element-wise.
The ceil of the scalar `x` is the smallest integer `i`, such that
``i >= x``. It is often denoted as :math:`\\lceil x \\rceil`.
Parameters
----------
x : array_like
Input data.
$PARAMS
Returns
-------
y : ndarray or scalar
The ceiling of each element in `x`, with `float` dtype.
$OUT_SCALAR_1
See Also
--------
floor, trunc, rint, fix
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.ceil(a)
array([-1., -1., -0., 1., 2., 2., 2.])
""")
add_newdoc('numpy.core.umath', 'trunc',
"""
Return the truncated value of the input, element-wise.
The truncated value of the scalar `x` is the nearest integer `i` which
is closer to zero than `x` is. In short, the fractional part of the
signed number `x` is discarded.
Parameters
----------
x : array_like
Input data.
$PARAMS
Returns
-------
y : ndarray or scalar
The truncated value of each element in `x`.
$OUT_SCALAR_1
See Also
--------
ceil, floor, rint, fix
Notes
-----
.. versionadded:: 1.3.0
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.trunc(a)
array([-1., -1., -0., 0., 1., 1., 2.])
""")
add_newdoc('numpy.core.umath', 'conjugate',
"""
Return the complex conjugate, element-wise.
The complex conjugate of a complex number is obtained by changing the
sign of its imaginary part.
Parameters
----------
x : array_like
Input value.
$PARAMS
Returns
-------
y : ndarray
The complex conjugate of `x`, with same dtype as `y`.
$OUT_SCALAR_1
Notes
-----
`conj` is an alias for `conjugate`:
>>> np.conj is np.conjugate
True
Examples
--------
>>> np.conjugate(1+2j)
(1-2j)
>>> x = np.eye(2) + 1j * np.eye(2)
>>> np.conjugate(x)
array([[ 1.-1.j, 0.-0.j],
[ 0.-0.j, 1.-1.j]])
""")
add_newdoc('numpy.core.umath', 'cos',
"""
Cosine element-wise.
Parameters
----------
x : array_like
Input array in radians.
$PARAMS
Returns
-------
y : ndarray
The corresponding cosine values.
$OUT_SCALAR_1
Notes
-----
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
References
----------
M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions.
New York, NY: Dover, 1972.
Examples
--------
>>> np.cos(np.array([0, np.pi/2, np.pi]))
array([ 1.00000000e+00, 6.12303177e-17, -1.00000000e+00])
>>>
>>> # Example of providing the optional output parameter
>>> out1 = np.array([0], dtype='d')
>>> out2 = np.cos([0.1], out1)
>>> out2 is out1
True
>>>
>>> # Example of ValueError due to provision of shape mis-matched `out`
>>> np.cos(np.zeros((3,3)),np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: operands could not be broadcast together with shapes (3,3) (2,2)
""")
add_newdoc('numpy.core.umath', 'cosh',
"""
Hyperbolic cosine, element-wise.
Equivalent to ``1/2 * (np.exp(x) + np.exp(-x))`` and ``np.cos(1j*x)``.
Parameters
----------
x : array_like
Input array.
$PARAMS
Returns
-------
out : ndarray or scalar
Output array of same shape as `x`.
$OUT_SCALAR_1
Examples
--------
>>> np.cosh(0)
1.0
The hyperbolic cosine describes the shape of a hanging cable:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-4, 4, 1000)
>>> plt.plot(x, np.cosh(x))
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'degrees',
"""
Convert angles from radians to degrees.
Parameters
----------
x : array_like
Input array in radians.
$PARAMS
Returns
-------
y : ndarray of floats
The corresponding degree values; if `out` was supplied this is a
reference to it.
$OUT_SCALAR_1
See Also
--------
rad2deg : equivalent function
Examples
--------
Convert a radian array to degrees
>>> rad = np.arange(12.)*np.pi/6
>>> np.degrees(rad)
array([ 0., 30., 60., 90., 120., 150., 180., 210., 240.,
270., 300., 330.])
>>> out = np.zeros((rad.shape))
>>> r = np.degrees(rad, out)
>>> np.all(r == out)
True
""")
add_newdoc('numpy.core.umath', 'rad2deg',
"""
Convert angles from radians to degrees.
Parameters
----------
x : array_like
Angle in radians.
$PARAMS
Returns
-------
y : ndarray
The corresponding angle in degrees.
$OUT_SCALAR_1
See Also
--------
deg2rad : Convert angles from degrees to radians.
unwrap : Remove large jumps in angle by wrapping.
Notes
-----
.. versionadded:: 1.3.0
rad2deg(x) is ``180 * x / pi``.
Examples
--------
>>> np.rad2deg(np.pi/2)
90.0
""")
add_newdoc('numpy.core.umath', 'heaviside',
"""
Compute the Heaviside step function.
The Heaviside step function is defined as::
0 if x1 < 0
heaviside(x1, x2) = x2 if x1 == 0
1 if x1 > 0
where `x2` is often taken to be 0.5, but 0 and 1 are also sometimes used.
Parameters
----------
x1 : array_like
Input values.
x2 : array_like
The value of the function when x1 is 0.
$BROADCASTABLE_2
$PARAMS
Returns
-------
out : ndarray or scalar
The output array, element-wise Heaviside step function of `x1`.
$OUT_SCALAR_2
Notes
-----
.. versionadded:: 1.13.0
References
----------
.. Wikipedia, "Heaviside step function",
https://en.wikipedia.org/wiki/Heaviside_step_function
Examples
--------
>>> np.heaviside([-1.5, 0, 2.0], 0.5)
array([ 0. , 0.5, 1. ])
>>> np.heaviside([-1.5, 0, 2.0], 1)
array([ 0., 1., 1.])
""")
add_newdoc('numpy.core.umath', 'divide',
"""
Divide arguments element-wise.
Parameters
----------
x1 : array_like
Dividend array.
x2 : array_like
Divisor array.
$BROADCASTABLE_2
$PARAMS
Returns
-------
y : ndarray or scalar
The quotient ``x1/x2``, element-wise.
$OUT_SCALAR_2
See Also
--------
seterr : Set whether to raise or warn on overflow, underflow and
division by zero.
Notes
-----
Equivalent to ``x1`` / ``x2`` in terms of array-broadcasting.
Behavior on division by zero can be changed using ``seterr``.
In Python 2, when both ``x1`` and ``x2`` are of an integer type,
``divide`` will behave like ``floor_divide``. In Python 3, it behaves
like ``true_divide``.
Examples
--------
>>> np.divide(2.0, 4.0)
0.5
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.divide(x1, x2)
array([[ NaN, 1. , 1. ],
[ Inf, 4. , 2.5],
[ Inf, 7. , 4. ]])
Note the behavior with integer types (Python 2 only):
>>> np.divide(2, 4)
0
>>> np.divide(2, 4.)
0.5
Division by zero always yields zero in integer arithmetic (again,
Python 2 only), and does not raise an exception or a warning:
>>> np.divide(np.array([0, 1], dtype=int), np.array([0, 0], dtype=int))
array([0, 0])
Division by zero can, however, be caught using ``seterr``:
>>> old_err_state = np.seterr(divide='raise')
>>> np.divide(1, 0)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
FloatingPointError: divide by zero encountered in divide
>>> ignored_states = np.seterr(**old_err_state)
>>> np.divide(1, 0)
0
The ``/`` operator can be used as a shorthand for ``np.divide`` on
ndarrays.
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = 2 * np.ones(3)
>>> x1 / x2
array([[0. , 0.5, 1. ],
[1.5, 2. , 2.5],
[3. , 3.5, 4. ]])
""")
add_newdoc('numpy.core.umath', 'equal',
"""
Return (x1 == x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays.
$BROADCASTABLE_2
$PARAMS
Returns
-------
out : ndarray or scalar
Output array, element-wise comparison of `x1` and `x2`.
Typically of type bool, unless ``dtype=object`` is passed.
$OUT_SCALAR_2
See Also
--------
not_equal, greater_equal, less_equal, greater, less
Examples
--------
>>> np.equal([0, 1, 3], np.arange(3))
array([ True, True, False])
What is compared are values, not types. So an int (1) and an array of
length one can evaluate as True:
>>> np.equal(1, np.ones(1))
array([ True])
The ``==`` operator can be used as a shorthand for ``np.equal`` on
ndarrays.
>>> a = np.array([2, 4, 6])
>>> b = np.array([2, 4, 2])
>>> a == b
array([ True, True, False])
""")
add_newdoc('numpy.core.umath', 'exp',
"""
Calculate the exponential of all elements in the input array.
Parameters
----------
x : array_like
Input values.
$PARAMS
Returns
-------
out : ndarray or scalar
Output array, element-wise exponential of `x`.
$OUT_SCALAR_1
See Also
--------
expm1 : Calculate ``exp(x) - 1`` for all elements in the array.
exp2 : Calculate ``2**x`` for all elements in the array.
Notes
-----
The irrational number ``e`` is also known as Euler's number. It is
approximately 2.718281, and is the base of the natural logarithm,
``ln`` (this means that, if :math:`x = \\ln y = \\log_e y`,
then :math:`e^x = y`. For real input, ``exp(x)`` is always positive.
For complex arguments, ``x = a + ib``, we can write
:math:`e^x = e^a e^{ib}`. The first term, :math:`e^a`, is already
known (it is the real argument, described above). The second term,
:math:`e^{ib}`, is :math:`\\cos b + i \\sin b`, a function with
magnitude 1 and a periodic phase.
References
----------
.. [1] Wikipedia, "Exponential function",
https://en.wikipedia.org/wiki/Exponential_function
.. [2] M. Abramovitz and I. A. Stegun, "Handbook of Mathematical Functions
with Formulas, Graphs, and Mathematical Tables," Dover, 1964, p. 69,
http://www.math.sfu.ca/~cbm/aands/page_69.htm
Examples
--------
Plot the magnitude and phase of ``exp(x)`` in the complex plane:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-2*np.pi, 2*np.pi, 100)
>>> xx = x + 1j * x[:, np.newaxis] # a + ib over complex plane
>>> out = np.exp(xx)
>>> plt.subplot(121)
>>> plt.imshow(np.abs(out),
... extent=[-2*np.pi, 2*np.pi, -2*np.pi, 2*np.pi], cmap='gray')
>>> plt.title('Magnitude of exp(x)')
>>> plt.subplot(122)
>>> plt.imshow(np.angle(out),
... extent=[-2*np.pi, 2*np.pi, -2*np.pi, 2*np.pi], cmap='hsv')
>>> plt.title('Phase (angle) of exp(x)')
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'exp2',
"""
Calculate `2**p` for all `p` in the input array.
Parameters
----------
x : array_like
Input values.
$PARAMS
Returns
-------
out : ndarray or scalar
Element-wise 2 to the power `x`.
$OUT_SCALAR_1
See Also
--------
power
Notes
-----
.. versionadded:: 1.3.0
Examples
--------
>>> np.exp2([2, 3])
array([ 4., 8.])
""")
add_newdoc('numpy.core.umath', 'expm1',
"""
Calculate ``exp(x) - 1`` for all elements in the array.
Parameters
----------
x : array_like
Input values.
$PARAMS
Returns
-------
out : ndarray or scalar
Element-wise exponential minus one: ``out = exp(x) - 1``.
$OUT_SCALAR_1
See Also
--------
log1p : ``log(1 + x)``, the inverse of expm1.
Notes
-----
This function provides greater precision than ``exp(x) - 1``
for small values of ``x``.
Examples
--------
The true value of ``exp(1e-10) - 1`` is ``1.00000000005e-10`` to
about 32 significant digits. This example shows the superiority of
expm1 in this case.
>>> np.expm1(1e-10)
1.00000000005e-10
>>> np.exp(1e-10) - 1
1.000000082740371e-10
""")
add_newdoc('numpy.core.umath', 'fabs',
"""
Compute the absolute values element-wise.
This function returns the absolute values (positive magnitude) of the
data in `x`. Complex values are not handled, use `absolute` to find the
absolute values of complex data.
Parameters
----------
x : array_like
The array of numbers for which the absolute values are required. If
`x` is a scalar, the result `y` will also be a scalar.
$PARAMS
Returns
-------
y : ndarray or scalar
The absolute values of `x`, the returned values are always floats.
$OUT_SCALAR_1
See Also
--------
absolute : Absolute values including `complex` types.
Examples
--------
>>> np.fabs(-1)
1.0
>>> np.fabs([-1.2, 1.2])
array([ 1.2, 1.2])
""")
add_newdoc('numpy.core.umath', 'floor',
"""
Return the floor of the input, element-wise.
The floor of the scalar `x` is the largest integer `i`, such that
`i <= x`. It is often denoted as :math:`\\lfloor x \\rfloor`.
Parameters
----------
x : array_like
Input data.
$PARAMS
Returns
-------
y : ndarray or scalar
The floor of each element in `x`.
$OUT_SCALAR_1
See Also
--------
ceil, trunc, rint, fix
Notes
-----
Some spreadsheet programs calculate the "floor-towards-zero", where
``floor(-2.5) == -2``. NumPy instead uses the definition of
`floor` where `floor(-2.5) == -3`. The "floor-towards-zero"
function is called ``fix`` in NumPy.
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.floor(a)
array([-2., -2., -1., 0., 1., 1., 2.])
""")
add_newdoc('numpy.core.umath', 'floor_divide',
"""
Return the largest integer smaller or equal to the division of the inputs.
It is equivalent to the Python ``//`` operator and pairs with the
Python ``%`` (`remainder`), function so that ``a = a % b + b * (a // b)``
up to roundoff.
Parameters
----------
x1 : array_like
Numerator.
x2 : array_like
Denominator.
$BROADCASTABLE_2
$PARAMS
Returns
-------
y : ndarray
y = floor(`x1`/`x2`)
$OUT_SCALAR_2
See Also
--------
remainder : Remainder complementary to floor_divide.
divmod : Simultaneous floor division and remainder.
divide : Standard division.
floor : Round a number to the nearest integer toward minus infinity.
ceil : Round a number to the nearest integer toward infinity.
Examples
--------
>>> np.floor_divide(7,3)
2
>>> np.floor_divide([1., 2., 3., 4.], 2.5)
array([ 0., 0., 1., 1.])
The ``//`` operator can be used as a shorthand for ``np.floor_divide``
on ndarrays.
>>> x1 = np.array([1., 2., 3., 4.])
>>> x1 // 2.5
array([0., 0., 1., 1.])
""")
add_newdoc('numpy.core.umath', 'fmod',
"""
Return the element-wise remainder of division.
This is the NumPy implementation of the C library function fmod, the
remainder has the same sign as the dividend `x1`. It is equivalent to
the Matlab(TM) ``rem`` function and should not be confused with the
Python modulus operator ``x1 % x2``.
Parameters
----------
x1 : array_like
Dividend.
x2 : array_like
Divisor.
$BROADCASTABLE_2
$PARAMS
Returns
-------
y : array_like
The remainder of the division of `x1` by `x2`.
$OUT_SCALAR_2
See Also
--------
remainder : Equivalent to the Python ``%`` operator.
divide
Notes
-----
The result of the modulo operation for negative dividend and divisors
is bound by conventions. For `fmod`, the sign of result is the sign of
the dividend, while for `remainder` the sign of the result is the sign
of the divisor. The `fmod` function is equivalent to the Matlab(TM)
``rem`` function.
Examples
--------
>>> np.fmod([-3, -2, -1, 1, 2, 3], 2)
array([-1, 0, -1, 1, 0, 1])
>>> np.remainder([-3, -2, -1, 1, 2, 3], 2)
array([1, 0, 1, 1, 0, 1])
>>> np.fmod([5, 3], [2, 2.])
array([ 1., 1.])
>>> a = np.arange(-3, 3).reshape(3, 2)
>>> a
array([[-3, -2],
[-1, 0],
[ 1, 2]])
>>> np.fmod(a, [2,2])
array([[-1, 0],
[-1, 0],
[ 1, 0]])
""")
add_newdoc('numpy.core.umath', 'greater',
"""
Return the truth value of (x1 > x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays.
$BROADCASTABLE_2
$PARAMS
Returns
-------
out : ndarray or scalar
Output array, element-wise comparison of `x1` and `x2`.
Typically of type bool, unless ``dtype=object`` is passed.
$OUT_SCALAR_2
See Also
--------
greater_equal, less, less_equal, equal, not_equal
Examples
--------
>>> np.greater([4,2],[2,2])
array([ True, False])
The ``>`` operator can be used as a shorthand for ``np.greater`` on
ndarrays.
>>> a = np.array([4, 2])
>>> b = np.array([2, 2])
>>> a > b
array([ True, False])
""")
add_newdoc('numpy.core.umath', 'greater_equal',
"""
Return the truth value of (x1 >= x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays.
$BROADCASTABLE_2
$PARAMS
Returns
-------
out : bool or ndarray of bool
Output array, element-wise comparison of `x1` and `x2`.
Typically of type bool, unless ``dtype=object`` is passed.
$OUT_SCALAR_2
See Also
--------
greater, less, less_equal, equal, not_equal
Examples
--------
>>> np.greater_equal([4, 2, 1], [2, 2, 2])
array([ True, True, False])
The ``>=`` operator can be used as a shorthand for ``np.greater_equal``
on ndarrays.
>>> a = np.array([4, 2, 1])
>>> b = np.array([2, 2, 2])
>>> a >= b
array([ True, True, False])
""")
add_newdoc('numpy.core.umath', 'hypot',
"""
Given the "legs" of a right triangle, return its hypotenuse.
Equivalent to ``sqrt(x1**2 + x2**2)``, element-wise. If `x1` or
`x2` is scalar_like (i.e., unambiguously cast-able to a scalar type),
it is broadcast for use with each element of the other argument.
(See Examples)
Parameters
----------
x1, x2 : array_like
Leg of the triangle(s).
$BROADCASTABLE_2
$PARAMS
Returns
-------
z : ndarray
The hypotenuse of the triangle(s).
$OUT_SCALAR_2
Examples
--------
>>> np.hypot(3*np.ones((3, 3)), 4*np.ones((3, 3)))
array([[ 5., 5., 5.],
[ 5., 5., 5.],
[ 5., 5., 5.]])
Example showing broadcast of scalar_like argument:
>>> np.hypot(3*np.ones((3, 3)), [4])
array([[ 5., 5., 5.],
[ 5., 5., 5.],
[ 5., 5., 5.]])
""")
add_newdoc('numpy.core.umath', 'invert',
"""
Compute bit-wise inversion, or bit-wise NOT, element-wise.
Computes the bit-wise NOT of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``~``.
For signed integer inputs, the two's complement is returned. In a
two's-complement system negative numbers are represented by the two's
complement of the absolute value. This is the most common method of
representing signed integers on computers [1]_. A N-bit
two's-complement system can represent every integer in the range
:math:`-2^{N-1}` to :math:`+2^{N-1}-1`.
Parameters
----------
x : array_like
Only integer and boolean types are handled.
$PARAMS
Returns
-------
out : ndarray or scalar
Result.
$OUT_SCALAR_1
See Also
--------
bitwise_and, bitwise_or, bitwise_xor
logical_not
binary_repr :
Return the binary representation of the input number as a string.
Notes
-----
`bitwise_not` is an alias for `invert`:
>>> np.bitwise_not is np.invert
True
References
----------
.. [1] Wikipedia, "Two's complement",
https://en.wikipedia.org/wiki/Two's_complement
Examples
--------
We've seen that 13 is represented by ``00001101``.
The invert or bit-wise NOT of 13 is then:
>>> x = np.invert(np.array(13, dtype=np.uint8))
>>> x
242
>>> np.binary_repr(x, width=8)
'11110010'
The result depends on the bit-width:
>>> x = np.invert(np.array(13, dtype=np.uint16))
>>> x
65522
>>> np.binary_repr(x, width=16)
'1111111111110010'
When using signed integer types the result is the two's complement of
the result for the unsigned type:
>>> np.invert(np.array([13], dtype=np.int8))
array([-14], dtype=int8)
>>> np.binary_repr(-14, width=8)
'11110010'
Booleans are accepted as well:
>>> np.invert(np.array([True, False]))
array([False, True])
The ``~`` operator can be used as a shorthand for ``np.invert`` on
ndarrays.
>>> x1 = np.array([True, False])
>>> ~x1
array([False, True])
""")
add_newdoc('numpy.core.umath', 'isfinite',
"""
Test element-wise for finiteness (not infinity or not Not a Number).
The result is returned as a boolean array.
Parameters
----------
x : array_like
Input values.
$PARAMS
Returns
-------
y : ndarray, bool
True where ``x`` is not positive infinity, negative infinity,
or NaN; false otherwise.
$OUT_SCALAR_1
See Also
--------
isinf, isneginf, isposinf, isnan
Notes
-----
Not a Number, positive infinity and negative infinity are considered
to be non-finite.
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Also that positive infinity is not equivalent to negative infinity. But
infinity is equivalent to positive infinity. Errors result if the
second argument is also supplied when `x` is a scalar input, or if
first and second arguments have different shapes.
Examples
--------
>>> np.isfinite(1)
True
>>> np.isfinite(0)
True
>>> np.isfinite(np.nan)
False
>>> np.isfinite(np.inf)
False
>>> np.isfinite(np.NINF)
False
>>> np.isfinite([np.log(-1.),1.,np.log(0)])
array([False, True, False])
>>> x = np.array([-np.inf, 0., np.inf])
>>> y = np.array([2, 2, 2])
>>> np.isfinite(x, y)
array([0, 1, 0])
>>> y
array([0, 1, 0])
""")
add_newdoc('numpy.core.umath', 'isinf',
"""
Test element-wise for positive or negative infinity.
Returns a boolean array of the same shape as `x`, True where ``x ==
+/-inf``, otherwise False.
Parameters
----------
x : array_like
Input values
$PARAMS
Returns
-------
y : bool (scalar) or boolean ndarray
True where ``x`` is positive or negative infinity, false otherwise.
$OUT_SCALAR_1
See Also
--------
isneginf, isposinf, isnan, isfinite
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754).
Errors result if the second argument is supplied when the first
argument is a scalar, or if the first and second arguments have
different shapes.
Examples
--------
>>> np.isinf(np.inf)
True
>>> np.isinf(np.nan)
False
>>> np.isinf(np.NINF)
True
>>> np.isinf([np.inf, -np.inf, 1.0, np.nan])
array([ True, True, False, False])
>>> x = np.array([-np.inf, 0., np.inf])
>>> y = np.array([2, 2, 2])
>>> np.isinf(x, y)
array([1, 0, 1])
>>> y
array([1, 0, 1])
""")
add_newdoc('numpy.core.umath', 'isnan',
"""
Test element-wise for NaN and return result as a boolean array.
Parameters
----------
x : array_like
Input array.
$PARAMS
Returns
-------
y : ndarray or bool
True where ``x`` is NaN, false otherwise.
$OUT_SCALAR_1
See Also
--------
isinf, isneginf, isposinf, isfinite, isnat
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Examples
--------
>>> np.isnan(np.nan)
True
>>> np.isnan(np.inf)
False
>>> np.isnan([np.log(-1.),1.,np.log(0)])
array([ True, False, False])
""")
add_newdoc('numpy.core.umath', 'isnat',
"""
Test element-wise for NaT (not a time) and return result as a boolean array.
.. versionadded:: 1.13.0
Parameters
----------
x : array_like
Input array with datetime or timedelta data type.
$PARAMS
Returns
-------
y : ndarray or bool
True where ``x`` is NaT, false otherwise.
$OUT_SCALAR_1
See Also
--------
isnan, isinf, isneginf, isposinf, isfinite
Examples
--------
>>> np.isnat(np.datetime64("NaT"))
True
>>> np.isnat(np.datetime64("2016-01-01"))
False
>>> np.isnat(np.array(["NaT", "2016-01-01"], dtype="datetime64[ns]"))
array([ True, False])
""")
add_newdoc('numpy.core.umath', 'left_shift',
"""
Shift the bits of an integer to the left.
Bits are shifted to the left by appending `x2` 0s at the right of `x1`.
Since the internal representation of numbers is in binary format, this
operation is equivalent to multiplying `x1` by ``2**x2``.
Parameters
----------
x1 : array_like of integer type
Input values.
x2 : array_like of integer type
Number of zeros to append to `x1`. Has to be non-negative.
$BROADCASTABLE_2
$PARAMS
Returns
-------
out : array of integer type
Return `x1` with bits shifted `x2` times to the left.
$OUT_SCALAR_2
See Also
--------
right_shift : Shift the bits of an integer to the right.
binary_repr : Return the binary representation of the input number
as a string.
Examples
--------
>>> np.binary_repr(5)
'101'
>>> np.left_shift(5, 2)
20
>>> np.binary_repr(20)
'10100'
>>> np.left_shift(5, [1,2,3])
array([10, 20, 40])
Note that the dtype of the second argument may change the dtype of the
result and can lead to unexpected results in some cases (see
:ref:`Casting Rules <ufuncs.casting>`):
>>> a = np.left_shift(np.uint8(255), 1) # Expect 254
>>> print(a, type(a)) # Unexpected result due to upcasting
510 <class 'numpy.int64'>
>>> b = np.left_shift(np.uint8(255), np.uint8(1))
>>> print(b, type(b))
254 <class 'numpy.uint8'>
The ``<<`` operator can be used as a shorthand for ``np.left_shift`` on
ndarrays.
>>> x1 = 5
>>> x2 = np.array([1, 2, 3])
>>> x1 << x2
array([10, 20, 40])
""")
add_newdoc('numpy.core.umath', 'less',
"""
Return the truth value of (x1 < x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays.
$BROADCASTABLE_2
$PARAMS
Returns
-------
out : ndarray or scalar
Output array, element-wise comparison of `x1` and `x2`.
Typically of type bool, unless ``dtype=object`` is passed.
$OUT_SCALAR_2
See Also
--------
greater, less_equal, greater_equal, equal, not_equal
Examples
--------
>>> np.less([1, 2], [2, 2])
array([ True, False])
The ``<`` operator can be used as a shorthand for ``np.less`` on ndarrays.
>>> a = np.array([1, 2])
>>> b = np.array([2, 2])
>>> a < b
array([ True, False])
""")
add_newdoc('numpy.core.umath', 'less_equal',
"""
Return the truth value of (x1 <= x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays.
$BROADCASTABLE_2
$PARAMS
Returns
-------
out : ndarray or scalar
Output array, element-wise comparison of `x1` and `x2`.
Typically of type bool, unless ``dtype=object`` is passed.
$OUT_SCALAR_2
See Also
--------
greater, less, greater_equal, equal, not_equal
Examples
--------
>>> np.less_equal([4, 2, 1], [2, 2, 2])
array([False, True, True])
The ``<=`` operator can be used as a shorthand for ``np.less_equal`` on
ndarrays.
>>> a = np.array([4, 2, 1])
>>> b = np.array([2, 2, 2])
>>> a <= b
array([False, True, True])
""")
add_newdoc('numpy.core.umath', 'log',
"""
Natural logarithm, element-wise.
The natural logarithm `log` is the inverse of the exponential function,
so that `log(exp(x)) = x`. The natural logarithm is logarithm in base
`e`.
Parameters
----------
x : array_like
Input value.
$PARAMS
Returns
-------
y : ndarray
The natural logarithm of `x`, element-wise.
$OUT_SCALAR_1
See Also
--------
log10, log2, log1p, emath.log
Notes
-----
Logarithm is a multivalued function: for each `x` there is an infinite
number of `z` such that `exp(z) = x`. The convention is to return the
`z` whose imaginary part lies in `[-pi, pi]`.
For real-valued input data types, `log` always returns real output. For
each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `log` is a complex analytical function that
has a branch cut `[-inf, 0]` and is continuous from above on it. `log`
handles the floating-point negative zero as an infinitesimal negative
number, conforming to the C99 standard.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 67. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Logarithm". https://en.wikipedia.org/wiki/Logarithm
Examples
--------
>>> np.log([1, np.e, np.e**2, 0])
array([ 0., 1., 2., -Inf])
""")
add_newdoc('numpy.core.umath', 'log10',
"""
Return the base 10 logarithm of the input array, element-wise.
Parameters
----------
x : array_like
Input values.
$PARAMS
Returns
-------
y : ndarray
The logarithm to the base 10 of `x`, element-wise. NaNs are
returned where x is negative.
$OUT_SCALAR_1
See Also
--------
emath.log10
Notes
-----
Logarithm is a multivalued function: for each `x` there is an infinite
number of `z` such that `10**z = x`. The convention is to return the
`z` whose imaginary part lies in `[-pi, pi]`.
For real-valued input data types, `log10` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `log10` is a complex analytical function that
has a branch cut `[-inf, 0]` and is continuous from above on it.
`log10` handles the floating-point negative zero as an infinitesimal
negative number, conforming to the C99 standard.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 67. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Logarithm". https://en.wikipedia.org/wiki/Logarithm
Examples
--------
>>> np.log10([1e-15, -3.])
array([-15., nan])
""")
add_newdoc('numpy.core.umath', 'log2',
"""
Base-2 logarithm of `x`.
Parameters
----------
x : array_like
Input values.
$PARAMS
Returns
-------
y : ndarray
Base-2 logarithm of `x`.
$OUT_SCALAR_1
See Also
--------
log, log10, log1p, emath.log2
Notes
-----
.. versionadded:: 1.3.0
Logarithm is a multivalued function: for each `x` there is an infinite
number of `z` such that `2**z = x`. The convention is to return the `z`
whose imaginary part lies in `[-pi, pi]`.
For real-valued input data types, `log2` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `log2` is a complex analytical function that
has a branch cut `[-inf, 0]` and is continuous from above on it. `log2`
handles the floating-point negative zero as an infinitesimal negative
number, conforming to the C99 standard.
Examples
--------
>>> x = np.array([0, 1, 2, 2**4])
>>> np.log2(x)
array([-Inf, 0., 1., 4.])
>>> xi = np.array([0+1.j, 1, 2+0.j, 4.j])
>>> np.log2(xi)
array([ 0.+2.26618007j, 0.+0.j , 1.+0.j , 2.+2.26618007j])
""")
add_newdoc('numpy.core.umath', 'logaddexp',
"""
Logarithm of the sum of exponentiations of the inputs.
Calculates ``log(exp(x1) + exp(x2))``. This function is useful in
statistics where the calculated probabilities of events may be so small
as to exceed the range of normal floating point numbers. In such cases
the logarithm of the calculated probability is stored. This function
allows adding probabilities stored in such a fashion.
Parameters
----------
x1, x2 : array_like
Input values.
$BROADCASTABLE_2
$PARAMS
Returns
-------
result : ndarray
Logarithm of ``exp(x1) + exp(x2)``.
$OUT_SCALAR_2
See Also
--------
logaddexp2: Logarithm of the sum of exponentiations of inputs in base 2.
Notes
-----
.. versionadded:: 1.3.0
Examples
--------
>>> prob1 = np.log(1e-50)
>>> prob2 = np.log(2.5e-50)
>>> prob12 = np.logaddexp(prob1, prob2)
>>> prob12
-113.87649168120691
>>> np.exp(prob12)
3.5000000000000057e-50
""")
add_newdoc('numpy.core.umath', 'logaddexp2',
"""
Logarithm of the sum of exponentiations of the inputs in base-2.
Calculates ``log2(2**x1 + 2**x2)``. This function is useful in machine
learning when the calculated probabilities of events may be so small as
to exceed the range of normal floating point numbers. In such cases
the base-2 logarithm of the calculated probability can be used instead.
This function allows adding probabilities stored in such a fashion.
Parameters
----------
x1, x2 : array_like
Input values.
$BROADCASTABLE_2
$PARAMS
Returns
-------
result : ndarray
Base-2 logarithm of ``2**x1 + 2**x2``.
$OUT_SCALAR_2
See Also
--------
logaddexp: Logarithm of the sum of exponentiations of the inputs.
Notes
-----
.. versionadded:: 1.3.0
Examples
--------
>>> prob1 = np.log2(1e-50)
>>> prob2 = np.log2(2.5e-50)
>>> prob12 = np.logaddexp2(prob1, prob2)
>>> prob1, prob2, prob12
(-166.09640474436813, -164.77447664948076, -164.28904982231052)
>>> 2**prob12
3.4999999999999914e-50
""")
add_newdoc('numpy.core.umath', 'log1p',
"""
Return the natural logarithm of one plus the input array, element-wise.
Calculates ``log(1 + x)``.
Parameters
----------
x : array_like
Input values.
$PARAMS
Returns
-------
y : ndarray
Natural logarithm of `1 + x`, element-wise.
$OUT_SCALAR_1
See Also
--------
expm1 : ``exp(x) - 1``, the inverse of `log1p`.
Notes
-----
For real-valued input, `log1p` is accurate also for `x` so small
that `1 + x == 1` in floating-point accuracy.
Logarithm is a multivalued function: for each `x` there is an infinite
number of `z` such that `exp(z) = 1 + x`. The convention is to return
the `z` whose imaginary part lies in `[-pi, pi]`.
For real-valued input data types, `log1p` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `log1p` is a complex analytical function that
has a branch cut `[-inf, -1]` and is continuous from above on it.
`log1p` handles the floating-point negative zero as an infinitesimal
negative number, conforming to the C99 standard.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 67. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Logarithm". https://en.wikipedia.org/wiki/Logarithm
Examples
--------
>>> np.log1p(1e-99)
1e-99
>>> np.log(1 + 1e-99)
0.0
""")
add_newdoc('numpy.core.umath', 'logical_and',
"""
Compute the truth value of x1 AND x2 element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays.
$BROADCASTABLE_2
$PARAMS
Returns
-------
y : ndarray or bool
Boolean result of the logical AND operation applied to the elements
of `x1` and `x2`; the shape is determined by broadcasting.
$OUT_SCALAR_2
See Also
--------
logical_or, logical_not, logical_xor
bitwise_and
Examples
--------
>>> np.logical_and(True, False)
False
>>> np.logical_and([True, False], [False, False])
array([False, False])
>>> x = np.arange(5)
>>> np.logical_and(x>1, x<4)
array([False, False, True, True, False])
The ``&`` operator can be used as a shorthand for ``np.logical_and`` on
boolean ndarrays.
>>> a = np.array([True, False])
>>> b = np.array([False, False])
>>> a & b
array([False, False])
""")
add_newdoc('numpy.core.umath', 'logical_not',
"""
Compute the truth value of NOT x element-wise.
Parameters
----------
x : array_like
Logical NOT is applied to the elements of `x`.
$PARAMS
Returns
-------
y : bool or ndarray of bool
Boolean result with the same shape as `x` of the NOT operation
on elements of `x`.
$OUT_SCALAR_1
See Also
--------
logical_and, logical_or, logical_xor
Examples
--------
>>> np.logical_not(3)
False
>>> np.logical_not([True, False, 0, 1])
array([False, True, True, False])
>>> x = np.arange(5)
>>> np.logical_not(x<3)
array([False, False, False, True, True])
""")
add_newdoc('numpy.core.umath', 'logical_or',
"""
Compute the truth value of x1 OR x2 element-wise.
Parameters
----------
x1, x2 : array_like
Logical OR is applied to the elements of `x1` and `x2`.
$BROADCASTABLE_2
$PARAMS
Returns
-------
y : ndarray or bool
Boolean result of the logical OR operation applied to the elements
of `x1` and `x2`; the shape is determined by broadcasting.
$OUT_SCALAR_2
See Also
--------
logical_and, logical_not, logical_xor
bitwise_or
Examples
--------
>>> np.logical_or(True, False)
True
>>> np.logical_or([True, False], [False, False])
array([ True, False])
>>> x = np.arange(5)
>>> np.logical_or(x < 1, x > 3)
array([ True, False, False, False, True])
The ``|`` operator can be used as a shorthand for ``np.logical_or`` on
boolean ndarrays.
>>> a = np.array([True, False])
>>> b = np.array([False, False])
>>> a | b
array([ True, False])
""")
add_newdoc('numpy.core.umath', 'logical_xor',
"""
Compute the truth value of x1 XOR x2, element-wise.
Parameters
----------
x1, x2 : array_like
Logical XOR is applied to the elements of `x1` and `x2`.
$BROADCASTABLE_2
$PARAMS
Returns
-------
y : bool or ndarray of bool
Boolean result of the logical XOR operation applied to the elements
of `x1` and `x2`; the shape is determined by broadcasting.
$OUT_SCALAR_2
See Also
--------
logical_and, logical_or, logical_not, bitwise_xor
Examples
--------
>>> np.logical_xor(True, False)
True
>>> np.logical_xor([True, True, False, False], [True, False, True, False])
array([False, True, True, False])
>>> x = np.arange(5)
>>> np.logical_xor(x < 1, x > 3)
array([ True, False, False, False, True])
Simple example showing support of broadcasting
>>> np.logical_xor(0, np.eye(2))
array([[ True, False],
[False, True]])
""")
add_newdoc('numpy.core.umath', 'maximum',
"""
Element-wise maximum of array elements.
Compare two arrays and returns a new array containing the element-wise
maxima. If one of the elements being compared is a NaN, then that
element is returned. If both elements are NaNs then the first is
returned. The latter distinction is important for complex NaNs, which
are defined as at least one of the real or imaginary parts being a NaN.
The net effect is that NaNs are propagated.
Parameters
----------
x1, x2 : array_like
The arrays holding the elements to be compared.
$BROADCASTABLE_2
$PARAMS
Returns
-------
y : ndarray or scalar
The maximum of `x1` and `x2`, element-wise.
$OUT_SCALAR_2
See Also
--------
minimum :
Element-wise minimum of two arrays, propagates NaNs.
fmax :
Element-wise maximum of two arrays, ignores NaNs.
amax :
The maximum value of an array along a given axis, propagates NaNs.
nanmax :
The maximum value of an array along a given axis, ignores NaNs.
fmin, amin, nanmin
Notes
-----
The maximum is equivalent to ``np.where(x1 >= x2, x1, x2)`` when
neither x1 nor x2 are nans, but it is faster and does proper
broadcasting.
Examples
--------
>>> np.maximum([2, 3, 4], [1, 5, 2])
array([2, 5, 4])
>>> np.maximum(np.eye(2), [0.5, 2]) # broadcasting
array([[ 1. , 2. ],
[ 0.5, 2. ]])
>>> np.maximum([np.nan, 0, np.nan], [0, np.nan, np.nan])
array([nan, nan, nan])
>>> np.maximum(np.Inf, 1)
inf
""")
add_newdoc('numpy.core.umath', 'minimum',
"""
Element-wise minimum of array elements.
Compare two arrays and returns a new array containing the element-wise
minima. If one of the elements being compared is a NaN, then that
element is returned. If both elements are NaNs then the first is
returned. The latter distinction is important for complex NaNs, which
are defined as at least one of the real or imaginary parts being a NaN.
The net effect is that NaNs are propagated.
Parameters
----------
x1, x2 : array_like
The arrays holding the elements to be compared.
$BROADCASTABLE_2
$PARAMS
Returns
-------
y : ndarray or scalar
The minimum of `x1` and `x2`, element-wise.
$OUT_SCALAR_2
See Also
--------
maximum :
Element-wise maximum of two arrays, propagates NaNs.
fmin :
Element-wise minimum of two arrays, ignores NaNs.
amin :
The minimum value of an array along a given axis, propagates NaNs.
nanmin :
The minimum value of an array along a given axis, ignores NaNs.
fmax, amax, nanmax
Notes
-----
The minimum is equivalent to ``np.where(x1 <= x2, x1, x2)`` when
neither x1 nor x2 are NaNs, but it is faster and does proper
broadcasting.
Examples
--------
>>> np.minimum([2, 3, 4], [1, 5, 2])
array([1, 3, 2])
>>> np.minimum(np.eye(2), [0.5, 2]) # broadcasting
array([[ 0.5, 0. ],
[ 0. , 1. ]])
>>> np.minimum([np.nan, 0, np.nan],[0, np.nan, np.nan])
array([nan, nan, nan])
>>> np.minimum(-np.Inf, 1)
-inf
""")
add_newdoc('numpy.core.umath', 'fmax',
"""
Element-wise maximum of array elements.
Compare two arrays and returns a new array containing the element-wise
maxima. If one of the elements being compared is a NaN, then the
non-nan element is returned. If both elements are NaNs then the first
is returned. The latter distinction is important for complex NaNs,
which are defined as at least one of the real or imaginary parts being
a NaN. The net effect is that NaNs are ignored when possible.
Parameters
----------
x1, x2 : array_like
The arrays holding the elements to be compared.
$BROADCASTABLE_2
$PARAMS
Returns
-------
y : ndarray or scalar
The maximum of `x1` and `x2`, element-wise.
$OUT_SCALAR_2
See Also
--------
fmin :
Element-wise minimum of two arrays, ignores NaNs.
maximum :
Element-wise maximum of two arrays, propagates NaNs.
amax :
The maximum value of an array along a given axis, propagates NaNs.
nanmax :
The maximum value of an array along a given axis, ignores NaNs.
minimum, amin, nanmin
Notes
-----
.. versionadded:: 1.3.0
The fmax is equivalent to ``np.where(x1 >= x2, x1, x2)`` when neither
x1 nor x2 are NaNs, but it is faster and does proper broadcasting.
Examples
--------
>>> np.fmax([2, 3, 4], [1, 5, 2])
array([ 2., 5., 4.])
>>> np.fmax(np.eye(2), [0.5, 2])
array([[ 1. , 2. ],
[ 0.5, 2. ]])
>>> np.fmax([np.nan, 0, np.nan],[0, np.nan, np.nan])
array([ 0., 0., nan])
""")
add_newdoc('numpy.core.umath', 'fmin',
"""
Element-wise minimum of array elements.
Compare two arrays and returns a new array containing the element-wise
minima. If one of the elements being compared is a NaN, then the
non-nan element is returned. If both elements are NaNs then the first
is returned. The latter distinction is important for complex NaNs,
which are defined as at least one of the real or imaginary parts being
a NaN. The net effect is that NaNs are ignored when possible.
Parameters
----------
x1, x2 : array_like
The arrays holding the elements to be compared.
$BROADCASTABLE_2
$PARAMS
Returns
-------
y : ndarray or scalar
The minimum of `x1` and `x2`, element-wise.
$OUT_SCALAR_2
See Also
--------
fmax :
Element-wise maximum of two arrays, ignores NaNs.
minimum :
Element-wise minimum of two arrays, propagates NaNs.
amin :
The minimum value of an array along a given axis, propagates NaNs.
nanmin :
The minimum value of an array along a given axis, ignores NaNs.
maximum, amax, nanmax
Notes
-----
.. versionadded:: 1.3.0
The fmin is equivalent to ``np.where(x1 <= x2, x1, x2)`` when neither
x1 nor x2 are NaNs, but it is faster and does proper broadcasting.
Examples
--------
>>> np.fmin([2, 3, 4], [1, 5, 2])
array([1, 3, 2])
>>> np.fmin(np.eye(2), [0.5, 2])
array([[ 0.5, 0. ],
[ 0. , 1. ]])
>>> np.fmin([np.nan, 0, np.nan],[0, np.nan, np.nan])
array([ 0., 0., nan])
""")
add_newdoc('numpy.core.umath', 'clip',
"""
Clip (limit) the values in an array.
Given an interval, values outside the interval are clipped to
the interval edges. For example, if an interval of ``[0, 1]``
is specified, values smaller than 0 become 0, and values larger
than 1 become 1.
Equivalent to but faster than ``np.minimum(np.maximum(a, a_min), a_max)``.
Parameters
----------
a : array_like
Array containing elements to clip.
a_min : array_like
Minimum value.
a_max : array_like
Maximum value.
out : ndarray, optional
The results will be placed in this array. It may be the input
array for in-place clipping. `out` must be of the right shape
to hold the output. Its type is preserved.
$PARAMS
See Also
--------
numpy.clip :
Wrapper that makes the ``a_min`` and ``a_max`` arguments optional,
dispatching to one of `~numpy.core.umath.clip`,
`~numpy.core.umath.minimum`, and `~numpy.core.umath.maximum`.
Returns
-------
clipped_array : ndarray
An array with the elements of `a`, but where values
< `a_min` are replaced with `a_min`, and those > `a_max`
with `a_max`.
""")
add_newdoc('numpy.core.umath', 'matmul',
"""
Matrix product of two arrays.
Parameters
----------
x1, x2 : array_like
Input arrays, scalars not allowed.
out : ndarray, optional
A location into which the result is stored. If provided, it must have
a shape that matches the signature `(n,k),(k,m)->(n,m)`. If not
provided or None, a freshly-allocated array is returned.
**kwargs
For other keyword-only arguments, see the
:ref:`ufunc docs <ufuncs.kwargs>`.
.. versionadded:: 1.16
Now handles ufunc kwargs
Returns
-------
y : ndarray
The matrix product of the inputs.
This is a scalar only when both x1, x2 are 1-d vectors.
Raises
------
ValueError
If the last dimension of `x1` is not the same size as
the second-to-last dimension of `x2`.
If a scalar value is passed in.
See Also
--------
vdot : Complex-conjugating dot product.
tensordot : Sum products over arbitrary axes.
einsum : Einstein summation convention.
dot : alternative matrix product with different broadcasting rules.
Notes
-----
The behavior depends on the arguments in the following way.
- If both arguments are 2-D they are multiplied like conventional
matrices.
- If either argument is N-D, N > 2, it is treated as a stack of
matrices residing in the last two indexes and broadcast accordingly.
- If the first argument is 1-D, it is promoted to a matrix by
prepending a 1 to its dimensions. After matrix multiplication
the prepended 1 is removed.
- If the second argument is 1-D, it is promoted to a matrix by
appending a 1 to its dimensions. After matrix multiplication
the appended 1 is removed.
``matmul`` differs from ``dot`` in two important ways:
- Multiplication by scalars is not allowed, use ``*`` instead.
- Stacks of matrices are broadcast together as if the matrices
were elements, respecting the signature ``(n,k),(k,m)->(n,m)``:
>>> a = np.ones([9, 5, 7, 4])
>>> c = np.ones([9, 5, 4, 3])
>>> np.dot(a, c).shape
(9, 5, 7, 9, 5, 3)
>>> np.matmul(a, c).shape
(9, 5, 7, 3)
>>> # n is 7, k is 4, m is 3
The matmul function implements the semantics of the ``@`` operator introduced
in Python 3.5 following :pep:`465`.
Examples
--------
For 2-D arrays it is the matrix product:
>>> a = np.array([[1, 0],
... [0, 1]])
>>> b = np.array([[4, 1],
... [2, 2]])
>>> np.matmul(a, b)
array([[4, 1],
[2, 2]])
For 2-D mixed with 1-D, the result is the usual.
>>> a = np.array([[1, 0],
... [0, 1]])
>>> b = np.array([1, 2])
>>> np.matmul(a, b)
array([1, 2])
>>> np.matmul(b, a)
array([1, 2])
Broadcasting is conventional for stacks of arrays
>>> a = np.arange(2 * 2 * 4).reshape((2, 2, 4))
>>> b = np.arange(2 * 2 * 4).reshape((2, 4, 2))
>>> np.matmul(a,b).shape
(2, 2, 2)
>>> np.matmul(a, b)[0, 1, 1]
98
>>> sum(a[0, 1, :] * b[0 , :, 1])
98
Vector, vector returns the scalar inner product, but neither argument
is complex-conjugated:
>>> np.matmul([2j, 3j], [2j, 3j])
(-13+0j)
Scalar multiplication raises an error.
>>> np.matmul([1,2], 3)
Traceback (most recent call last):
...
ValueError: matmul: Input operand 1 does not have enough dimensions ...
The ``@`` operator can be used as a shorthand for ``np.matmul`` on
ndarrays.
>>> x1 = np.array([2j, 3j])
>>> x2 = np.array([2j, 3j])
>>> x1 @ x2
(-13+0j)
.. versionadded:: 1.10.0
""")
add_newdoc('numpy.core.umath', 'modf',
"""
Return the fractional and integral parts of an array, element-wise.
The fractional and integral parts are negative if the given number is
negative.
Parameters
----------
x : array_like
Input array.
$PARAMS
Returns
-------
y1 : ndarray
Fractional part of `x`.
$OUT_SCALAR_1
y2 : ndarray
Integral part of `x`.
$OUT_SCALAR_1
Notes
-----
For integer input the return values are floats.
See Also
--------
divmod : ``divmod(x, 1)`` is equivalent to ``modf`` with the return values
switched, except it always has a positive remainder.
Examples
--------
>>> np.modf([0, 3.5])
(array([ 0. , 0.5]), array([ 0., 3.]))
>>> np.modf(-0.5)
(-0.5, -0)
""")
add_newdoc('numpy.core.umath', 'multiply',
"""
Multiply arguments element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays to be multiplied.
$BROADCASTABLE_2
$PARAMS
Returns
-------
y : ndarray
The product of `x1` and `x2`, element-wise.
$OUT_SCALAR_2
Notes
-----
Equivalent to `x1` * `x2` in terms of array broadcasting.
Examples
--------
>>> np.multiply(2.0, 4.0)
8.0
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.multiply(x1, x2)
array([[ 0., 1., 4.],
[ 0., 4., 10.],
[ 0., 7., 16.]])
The ``*`` operator can be used as a shorthand for ``np.multiply`` on
ndarrays.
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> x1 * x2
array([[ 0., 1., 4.],
[ 0., 4., 10.],
[ 0., 7., 16.]])
""")
add_newdoc('numpy.core.umath', 'negative',
"""
Numerical negative, element-wise.
Parameters
----------
x : array_like or scalar
Input array.
$PARAMS
Returns
-------
y : ndarray or scalar
Returned array or scalar: `y = -x`.
$OUT_SCALAR_1
Examples
--------
>>> np.negative([1.,-1.])
array([-1., 1.])
The unary ``-`` operator can be used as a shorthand for ``np.negative`` on
ndarrays.
>>> x1 = np.array(([1., -1.]))
>>> -x1
array([-1., 1.])
""")
add_newdoc('numpy.core.umath', 'positive',
"""
Numerical positive, element-wise.
.. versionadded:: 1.13.0
Parameters
----------
x : array_like or scalar
Input array.
Returns
-------
y : ndarray or scalar
Returned array or scalar: `y = +x`.
$OUT_SCALAR_1
Notes
-----
Equivalent to `x.copy()`, but only defined for types that support
arithmetic.
Examples
--------
>>> x1 = np.array(([1., -1.]))
>>> np.positive(x1)
array([ 1., -1.])
The unary ``+`` operator can be used as a shorthand for ``np.positive`` on
ndarrays.
>>> x1 = np.array(([1., -1.]))
>>> +x1
array([ 1., -1.])
""")
add_newdoc('numpy.core.umath', 'not_equal',
"""
Return (x1 != x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays.
$BROADCASTABLE_2
$PARAMS
Returns
-------
out : ndarray or scalar
Output array, element-wise comparison of `x1` and `x2`.
Typically of type bool, unless ``dtype=object`` is passed.
$OUT_SCALAR_2
See Also
--------
equal, greater, greater_equal, less, less_equal
Examples
--------
>>> np.not_equal([1.,2.], [1., 3.])
array([False, True])
>>> np.not_equal([1, 2], [[1, 3],[1, 4]])
array([[False, True],
[False, True]])
The ``!=`` operator can be used as a shorthand for ``np.not_equal`` on
ndarrays.
>>> a = np.array([1., 2.])
>>> b = np.array([1., 3.])
>>> a != b
array([False, True])
""")
add_newdoc('numpy.core.umath', '_ones_like',
"""
This function used to be the numpy.ones_like, but now a specific
function for that has been written for consistency with the other
*_like functions. It is only used internally in a limited fashion now.
See Also
--------
ones_like
""")
add_newdoc('numpy.core.umath', 'power',
"""
First array elements raised to powers from second array, element-wise.
Raise each base in `x1` to the positionally-corresponding power in
`x2`. `x1` and `x2` must be broadcastable to the same shape. Note that an
integer type raised to a negative integer power will raise a ValueError.
Parameters
----------
x1 : array_like
The bases.
x2 : array_like
The exponents.
$BROADCASTABLE_2
$PARAMS
Returns
-------
y : ndarray
The bases in `x1` raised to the exponents in `x2`.
$OUT_SCALAR_2
See Also
--------
float_power : power function that promotes integers to float
Examples
--------
Cube each element in an array.
>>> x1 = np.arange(6)
>>> x1
[0, 1, 2, 3, 4, 5]
>>> np.power(x1, 3)
array([ 0, 1, 8, 27, 64, 125])
Raise the bases to different exponents.
>>> x2 = [1.0, 2.0, 3.0, 3.0, 2.0, 1.0]
>>> np.power(x1, x2)
array([ 0., 1., 8., 27., 16., 5.])
The effect of broadcasting.
>>> x2 = np.array([[1, 2, 3, 3, 2, 1], [1, 2, 3, 3, 2, 1]])
>>> x2
array([[1, 2, 3, 3, 2, 1],
[1, 2, 3, 3, 2, 1]])
>>> np.power(x1, x2)
array([[ 0, 1, 8, 27, 16, 5],
[ 0, 1, 8, 27, 16, 5]])
The ``**`` operator can be used as a shorthand for ``np.power`` on
ndarrays.
>>> x2 = np.array([1, 2, 3, 3, 2, 1])
>>> x1 = np.arange(6)
>>> x1 ** x2
array([ 0, 1, 8, 27, 16, 5])
""")
add_newdoc('numpy.core.umath', 'float_power',
"""
First array elements raised to powers from second array, element-wise.
Raise each base in `x1` to the positionally-corresponding power in `x2`.
`x1` and `x2` must be broadcastable to the same shape. This differs from
the power function in that integers, float16, and float32 are promoted to
floats with a minimum precision of float64 so that the result is always
inexact. The intent is that the function will return a usable result for
negative powers and seldom overflow for positive powers.
.. versionadded:: 1.12.0
Parameters
----------
x1 : array_like
The bases.
x2 : array_like
The exponents.
$BROADCASTABLE_2
$PARAMS
Returns
-------
y : ndarray
The bases in `x1` raised to the exponents in `x2`.
$OUT_SCALAR_2
See Also
--------
power : power function that preserves type
Examples
--------
Cube each element in a list.
>>> x1 = range(6)
>>> x1
[0, 1, 2, 3, 4, 5]
>>> np.float_power(x1, 3)
array([ 0., 1., 8., 27., 64., 125.])
Raise the bases to different exponents.
>>> x2 = [1.0, 2.0, 3.0, 3.0, 2.0, 1.0]
>>> np.float_power(x1, x2)
array([ 0., 1., 8., 27., 16., 5.])
The effect of broadcasting.
>>> x2 = np.array([[1, 2, 3, 3, 2, 1], [1, 2, 3, 3, 2, 1]])
>>> x2
array([[1, 2, 3, 3, 2, 1],
[1, 2, 3, 3, 2, 1]])
>>> np.float_power(x1, x2)
array([[ 0., 1., 8., 27., 16., 5.],
[ 0., 1., 8., 27., 16., 5.]])
""")
add_newdoc('numpy.core.umath', 'radians',
"""
Convert angles from degrees to radians.
Parameters
----------
x : array_like
Input array in degrees.
$PARAMS
Returns
-------
y : ndarray
The corresponding radian values.
$OUT_SCALAR_1
See Also
--------
deg2rad : equivalent function
Examples
--------
Convert a degree array to radians
>>> deg = np.arange(12.) * 30.
>>> np.radians(deg)
array([ 0. , 0.52359878, 1.04719755, 1.57079633, 2.0943951 ,
2.61799388, 3.14159265, 3.66519143, 4.1887902 , 4.71238898,
5.23598776, 5.75958653])
>>> out = np.zeros((deg.shape))
>>> ret = np.radians(deg, out)
>>> ret is out
True
""")
add_newdoc('numpy.core.umath', 'deg2rad',
"""
Convert angles from degrees to radians.
Parameters
----------
x : array_like
Angles in degrees.
$PARAMS
Returns
-------
y : ndarray
The corresponding angle in radians.
$OUT_SCALAR_1
See Also
--------
rad2deg : Convert angles from radians to degrees.
unwrap : Remove large jumps in angle by wrapping.
Notes
-----
.. versionadded:: 1.3.0
``deg2rad(x)`` is ``x * pi / 180``.
Examples
--------
>>> np.deg2rad(180)
3.1415926535897931
""")
add_newdoc('numpy.core.umath', 'reciprocal',
"""
Return the reciprocal of the argument, element-wise.
Calculates ``1/x``.
Parameters
----------
x : array_like
Input array.
$PARAMS
Returns
-------
y : ndarray
Return array.
$OUT_SCALAR_1
Notes
-----
.. note::
This function is not designed to work with integers.
For integer arguments with absolute value larger than 1 the result is
always zero because of the way Python handles integer division. For
integer zero the result is an overflow.
Examples
--------
>>> np.reciprocal(2.)
0.5
>>> np.reciprocal([1, 2., 3.33])
array([ 1. , 0.5 , 0.3003003])
""")
add_newdoc('numpy.core.umath', 'remainder',
"""
Return element-wise remainder of division.
Computes the remainder complementary to the `floor_divide` function. It is
equivalent to the Python modulus operator``x1 % x2`` and has the same sign
as the divisor `x2`. The MATLAB function equivalent to ``np.remainder``
is ``mod``.
.. warning::
This should not be confused with:
* Python 3.7's `math.remainder` and C's ``remainder``, which
computes the IEEE remainder, which are the complement to
``round(x1 / x2)``.
* The MATLAB ``rem`` function and or the C ``%`` operator which is the
complement to ``int(x1 / x2)``.
Parameters
----------
x1 : array_like
Dividend array.
x2 : array_like
Divisor array.
$BROADCASTABLE_2
$PARAMS
Returns
-------
y : ndarray
The element-wise remainder of the quotient ``floor_divide(x1, x2)``.
$OUT_SCALAR_2
See Also
--------
floor_divide : Equivalent of Python ``//`` operator.
divmod : Simultaneous floor division and remainder.
fmod : Equivalent of the MATLAB ``rem`` function.
divide, floor
Notes
-----
Returns 0 when `x2` is 0 and both `x1` and `x2` are (arrays of)
integers.
``mod`` is an alias of ``remainder``.
Examples
--------
>>> np.remainder([4, 7], [2, 3])
array([0, 1])
>>> np.remainder(np.arange(7), 5)
array([0, 1, 2, 3, 4, 0, 1])
The ``%`` operator can be used as a shorthand for ``np.remainder`` on
ndarrays.
>>> x1 = np.arange(7)
>>> x1 % 5
array([0, 1, 2, 3, 4, 0, 1])
""")
add_newdoc('numpy.core.umath', 'divmod',
"""
Return element-wise quotient and remainder simultaneously.
.. versionadded:: 1.13.0
``np.divmod(x, y)`` is equivalent to ``(x // y, x % y)``, but faster
because it avoids redundant work. It is used to implement the Python
built-in function ``divmod`` on NumPy arrays.
Parameters
----------
x1 : array_like
Dividend array.
x2 : array_like
Divisor array.
$BROADCASTABLE_2
$PARAMS
Returns
-------
out1 : ndarray
Element-wise quotient resulting from floor division.
$OUT_SCALAR_2
out2 : ndarray
Element-wise remainder from floor division.
$OUT_SCALAR_2
See Also
--------
floor_divide : Equivalent to Python's ``//`` operator.
remainder : Equivalent to Python's ``%`` operator.
modf : Equivalent to ``divmod(x, 1)`` for positive ``x`` with the return
values switched.
Examples
--------
>>> np.divmod(np.arange(5), 3)
(array([0, 0, 0, 1, 1]), array([0, 1, 2, 0, 1]))
The `divmod` function can be used as a shorthand for ``np.divmod`` on
ndarrays.
>>> x = np.arange(5)
>>> divmod(x, 3)
(array([0, 0, 0, 1, 1]), array([0, 1, 2, 0, 1]))
""")
add_newdoc('numpy.core.umath', 'right_shift',
"""
Shift the bits of an integer to the right.
Bits are shifted to the right `x2`. Because the internal
representation of numbers is in binary format, this operation is
equivalent to dividing `x1` by ``2**x2``.
Parameters
----------
x1 : array_like, int
Input values.
x2 : array_like, int
Number of bits to remove at the right of `x1`.
$BROADCASTABLE_2
$PARAMS
Returns
-------
out : ndarray, int
Return `x1` with bits shifted `x2` times to the right.
$OUT_SCALAR_2
See Also
--------
left_shift : Shift the bits of an integer to the left.
binary_repr : Return the binary representation of the input number
as a string.
Examples
--------
>>> np.binary_repr(10)
'1010'
>>> np.right_shift(10, 1)
5
>>> np.binary_repr(5)
'101'
>>> np.right_shift(10, [1,2,3])
array([5, 2, 1])
The ``>>`` operator can be used as a shorthand for ``np.right_shift`` on
ndarrays.
>>> x1 = 10
>>> x2 = np.array([1,2,3])
>>> x1 >> x2
array([5, 2, 1])
""")
add_newdoc('numpy.core.umath', 'rint',
"""
Round elements of the array to the nearest integer.
Parameters
----------
x : array_like
Input array.
$PARAMS
Returns
-------
out : ndarray or scalar
Output array is same shape and type as `x`.
$OUT_SCALAR_1
See Also
--------
fix, ceil, floor, trunc
Notes
-----
For values exactly halfway between rounded decimal values, NumPy
rounds to the nearest even value. Thus 1.5 and 2.5 round to 2.0,
-0.5 and 0.5 round to 0.0, etc.
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.rint(a)
array([-2., -2., -0., 0., 2., 2., 2.])
""")
add_newdoc('numpy.core.umath', 'sign',
"""
Returns an element-wise indication of the sign of a number.
The `sign` function returns ``-1 if x < 0, 0 if x==0, 1 if x > 0``. nan
is returned for nan inputs.
For complex inputs, the `sign` function returns
``sign(x.real) + 0j if x.real != 0 else sign(x.imag) + 0j``.
complex(nan, 0) is returned for complex nan inputs.
Parameters
----------
x : array_like
Input values.
$PARAMS
Returns
-------
y : ndarray
The sign of `x`.
$OUT_SCALAR_1
Notes
-----
There is more than one definition of sign in common use for complex
numbers. The definition used here is equivalent to :math:`x/\\sqrt{x*x}`
which is different from a common alternative, :math:`x/|x|`.
Examples
--------
>>> np.sign([-5., 4.5])
array([-1., 1.])
>>> np.sign(0)
0
>>> np.sign(5-2j)
(1+0j)
""")
add_newdoc('numpy.core.umath', 'signbit',
"""
Returns element-wise True where signbit is set (less than zero).
Parameters
----------
x : array_like
The input value(s).
$PARAMS
Returns
-------
result : ndarray of bool
Output array, or reference to `out` if that was supplied.
$OUT_SCALAR_1
Examples
--------
>>> np.signbit(-1.2)
True
>>> np.signbit(np.array([1, -2.3, 2.1]))
array([False, True, False])
""")
add_newdoc('numpy.core.umath', 'copysign',
"""
Change the sign of x1 to that of x2, element-wise.
If `x2` is a scalar, its sign will be copied to all elements of `x1`.
Parameters
----------
x1 : array_like
Values to change the sign of.
x2 : array_like
The sign of `x2` is copied to `x1`.
$BROADCASTABLE_2
$PARAMS
Returns
-------
out : ndarray or scalar
The values of `x1` with the sign of `x2`.
$OUT_SCALAR_2
Examples
--------
>>> np.copysign(1.3, -1)
-1.3
>>> 1/np.copysign(0, 1)
inf
>>> 1/np.copysign(0, -1)
-inf
>>> np.copysign([-1, 0, 1], -1.1)
array([-1., -0., -1.])
>>> np.copysign([-1, 0, 1], np.arange(3)-1)
array([-1., 0., 1.])
""")
add_newdoc('numpy.core.umath', 'nextafter',
"""
Return the next floating-point value after x1 towards x2, element-wise.
Parameters
----------
x1 : array_like
Values to find the next representable value of.
x2 : array_like
The direction where to look for the next representable value of `x1`.
$BROADCASTABLE_2
$PARAMS
Returns
-------
out : ndarray or scalar
The next representable values of `x1` in the direction of `x2`.
$OUT_SCALAR_2
Examples
--------
>>> eps = np.finfo(np.float64).eps
>>> np.nextafter(1, 2) == eps + 1
True
>>> np.nextafter([1, 2], [2, 1]) == [eps + 1, 2 - eps]
array([ True, True])
""")
add_newdoc('numpy.core.umath', 'spacing',
"""
Return the distance between x and the nearest adjacent number.
Parameters
----------
x : array_like
Values to find the spacing of.
$PARAMS
Returns
-------
out : ndarray or scalar
The spacing of values of `x`.
$OUT_SCALAR_1
Notes
-----
It can be considered as a generalization of EPS:
``spacing(np.float64(1)) == np.finfo(np.float64).eps``, and there
should not be any representable number between ``x + spacing(x)`` and
x for any finite x.
Spacing of +- inf and NaN is NaN.
Examples
--------
>>> np.spacing(1) == np.finfo(np.float64).eps
True
""")
add_newdoc('numpy.core.umath', 'sin',
"""
Trigonometric sine, element-wise.
Parameters
----------
x : array_like
Angle, in radians (:math:`2 \\pi` rad equals 360 degrees).
$PARAMS
Returns
-------
y : array_like
The sine of each element of x.
$OUT_SCALAR_1
See Also
--------
arcsin, sinh, cos
Notes
-----
The sine is one of the fundamental functions of trigonometry (the
mathematical study of triangles). Consider a circle of radius 1
centered on the origin. A ray comes in from the :math:`+x` axis, makes
an angle at the origin (measured counter-clockwise from that axis), and
departs from the origin. The :math:`y` coordinate of the outgoing
ray's intersection with the unit circle is the sine of that angle. It
ranges from -1 for :math:`x=3\\pi / 2` to +1 for :math:`\\pi / 2.` The
function has zeroes where the angle is a multiple of :math:`\\pi`.
Sines of angles between :math:`\\pi` and :math:`2\\pi` are negative.
The numerous properties of the sine and related functions are included
in any standard trigonometry text.
Examples
--------
Print sine of one angle:
>>> np.sin(np.pi/2.)
1.0
Print sines of an array of angles given in degrees:
>>> np.sin(np.array((0., 30., 45., 60., 90.)) * np.pi / 180. )
array([ 0. , 0.5 , 0.70710678, 0.8660254 , 1. ])
Plot the sine function:
>>> import matplotlib.pylab as plt
>>> x = np.linspace(-np.pi, np.pi, 201)
>>> plt.plot(x, np.sin(x))
>>> plt.xlabel('Angle [rad]')
>>> plt.ylabel('sin(x)')
>>> plt.axis('tight')
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'sinh',
"""
Hyperbolic sine, element-wise.
Equivalent to ``1/2 * (np.exp(x) - np.exp(-x))`` or
``-1j * np.sin(1j*x)``.
Parameters
----------
x : array_like
Input array.
$PARAMS
Returns
-------
y : ndarray
The corresponding hyperbolic sine values.
$OUT_SCALAR_1
Notes
-----
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
References
----------
M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions.
New York, NY: Dover, 1972, pg. 83.
Examples
--------
>>> np.sinh(0)
0.0
>>> np.sinh(np.pi*1j/2)
1j
>>> np.sinh(np.pi*1j) # (exact value is 0)
1.2246063538223773e-016j
>>> # Discrepancy due to vagaries of floating point arithmetic.
>>> # Example of providing the optional output parameter
>>> out1 = np.array([0], dtype='d')
>>> out2 = np.sinh([0.1], out1)
>>> out2 is out1
True
>>> # Example of ValueError due to provision of shape mis-matched `out`
>>> np.sinh(np.zeros((3,3)),np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: operands could not be broadcast together with shapes (3,3) (2,2)
""")
add_newdoc('numpy.core.umath', 'sqrt',
"""
Return the non-negative square-root of an array, element-wise.
Parameters
----------
x : array_like
The values whose square-roots are required.
$PARAMS
Returns
-------
y : ndarray
An array of the same shape as `x`, containing the positive
square-root of each element in `x`. If any element in `x` is
complex, a complex array is returned (and the square-roots of
negative reals are calculated). If all of the elements in `x`
are real, so is `y`, with negative elements returning ``nan``.
If `out` was provided, `y` is a reference to it.
$OUT_SCALAR_1
See Also
--------
lib.scimath.sqrt
A version which returns complex numbers when given negative reals.
Notes
-----
*sqrt* has--consistent with common convention--as its branch cut the
real "interval" [`-inf`, 0), and is continuous from above on it.
A branch cut is a curve in the complex plane across which a given
complex function fails to be continuous.
Examples
--------
>>> np.sqrt([1,4,9])
array([ 1., 2., 3.])
>>> np.sqrt([4, -1, -3+4J])
array([ 2.+0.j, 0.+1.j, 1.+2.j])
>>> np.sqrt([4, -1, np.inf])
array([ 2., nan, inf])
""")
add_newdoc('numpy.core.umath', 'cbrt',
"""
Return the cube-root of an array, element-wise.
.. versionadded:: 1.10.0
Parameters
----------
x : array_like
The values whose cube-roots are required.
$PARAMS
Returns
-------
y : ndarray
An array of the same shape as `x`, containing the cube
cube-root of each element in `x`.
If `out` was provided, `y` is a reference to it.
$OUT_SCALAR_1
Examples
--------
>>> np.cbrt([1,8,27])
array([ 1., 2., 3.])
""")
add_newdoc('numpy.core.umath', 'square',
"""
Return the element-wise square of the input.
Parameters
----------
x : array_like
Input data.
$PARAMS
Returns
-------
out : ndarray or scalar
Element-wise `x*x`, of the same shape and dtype as `x`.
$OUT_SCALAR_1
See Also
--------
numpy.linalg.matrix_power
sqrt
power
Examples
--------
>>> np.square([-1j, 1])
array([-1.-0.j, 1.+0.j])
""")
add_newdoc('numpy.core.umath', 'subtract',
"""
Subtract arguments, element-wise.
Parameters
----------
x1, x2 : array_like
The arrays to be subtracted from each other.
$BROADCASTABLE_2
$PARAMS
Returns
-------
y : ndarray
The difference of `x1` and `x2`, element-wise.
$OUT_SCALAR_2
Notes
-----
Equivalent to ``x1 - x2`` in terms of array broadcasting.
Examples
--------
>>> np.subtract(1.0, 4.0)
-3.0
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.subtract(x1, x2)
array([[ 0., 0., 0.],
[ 3., 3., 3.],
[ 6., 6., 6.]])
The ``-`` operator can be used as a shorthand for ``np.subtract`` on
ndarrays.
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> x1 - x2
array([[0., 0., 0.],
[3., 3., 3.],
[6., 6., 6.]])
""")
add_newdoc('numpy.core.umath', 'tan',
"""
Compute tangent element-wise.
Equivalent to ``np.sin(x)/np.cos(x)`` element-wise.
Parameters
----------
x : array_like
Input array.
$PARAMS
Returns
-------
y : ndarray
The corresponding tangent values.
$OUT_SCALAR_1
Notes
-----
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
References
----------
M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions.
New York, NY: Dover, 1972.
Examples
--------
>>> from math import pi
>>> np.tan(np.array([-pi,pi/2,pi]))
array([ 1.22460635e-16, 1.63317787e+16, -1.22460635e-16])
>>>
>>> # Example of providing the optional output parameter illustrating
>>> # that what is returned is a reference to said parameter
>>> out1 = np.array([0], dtype='d')
>>> out2 = np.cos([0.1], out1)
>>> out2 is out1
True
>>>
>>> # Example of ValueError due to provision of shape mis-matched `out`
>>> np.cos(np.zeros((3,3)),np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: operands could not be broadcast together with shapes (3,3) (2,2)
""")
add_newdoc('numpy.core.umath', 'tanh',
"""
Compute hyperbolic tangent element-wise.
Equivalent to ``np.sinh(x)/np.cosh(x)`` or ``-1j * np.tan(1j*x)``.
Parameters
----------
x : array_like
Input array.
$PARAMS
Returns
-------
y : ndarray
The corresponding hyperbolic tangent values.
$OUT_SCALAR_1
Notes
-----
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
References
----------
.. [1] M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions.
New York, NY: Dover, 1972, pg. 83.
http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Hyperbolic function",
https://en.wikipedia.org/wiki/Hyperbolic_function
Examples
--------
>>> np.tanh((0, np.pi*1j, np.pi*1j/2))
array([ 0. +0.00000000e+00j, 0. -1.22460635e-16j, 0. +1.63317787e+16j])
>>> # Example of providing the optional output parameter illustrating
>>> # that what is returned is a reference to said parameter
>>> out1 = np.array([0], dtype='d')
>>> out2 = np.tanh([0.1], out1)
>>> out2 is out1
True
>>> # Example of ValueError due to provision of shape mis-matched `out`
>>> np.tanh(np.zeros((3,3)),np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: operands could not be broadcast together with shapes (3,3) (2,2)
""")
add_newdoc('numpy.core.umath', 'true_divide',
"""
Returns a true division of the inputs, element-wise.
Instead of the Python traditional 'floor division', this returns a true
division. True division adjusts the output type to present the best
answer, regardless of input types.
Parameters
----------
x1 : array_like
Dividend array.
x2 : array_like
Divisor array.
$BROADCASTABLE_2
$PARAMS
Returns
-------
out : ndarray or scalar
$OUT_SCALAR_2
Notes
-----
In Python, ``//`` is the floor division operator and ``/`` the
true division operator. The ``true_divide(x1, x2)`` function is
equivalent to true division in Python.
Examples
--------
>>> x = np.arange(5)
>>> np.true_divide(x, 4)
array([ 0. , 0.25, 0.5 , 0.75, 1. ])
>>> x/4
array([ 0. , 0.25, 0.5 , 0.75, 1. ])
>>> x//4
array([0, 0, 0, 0, 1])
The ``/`` operator can be used as a shorthand for ``np.true_divide`` on
ndarrays.
>>> x = np.arange(5)
>>> x / 4
array([0. , 0.25, 0.5 , 0.75, 1. ])
""")
add_newdoc('numpy.core.umath', 'frexp',
"""
Decompose the elements of x into mantissa and twos exponent.
Returns (`mantissa`, `exponent`), where `x = mantissa * 2**exponent``.
The mantissa lies in the open interval(-1, 1), while the twos
exponent is a signed integer.
Parameters
----------
x : array_like
Array of numbers to be decomposed.
out1 : ndarray, optional
Output array for the mantissa. Must have the same shape as `x`.
out2 : ndarray, optional
Output array for the exponent. Must have the same shape as `x`.
$PARAMS
Returns
-------
mantissa : ndarray
Floating values between -1 and 1.
$OUT_SCALAR_1
exponent : ndarray
Integer exponents of 2.
$OUT_SCALAR_1
See Also
--------
ldexp : Compute ``y = x1 * 2**x2``, the inverse of `frexp`.
Notes
-----
Complex dtypes are not supported, they will raise a TypeError.
Examples
--------
>>> x = np.arange(9)
>>> y1, y2 = np.frexp(x)
>>> y1
array([ 0. , 0.5 , 0.5 , 0.75 , 0.5 , 0.625, 0.75 , 0.875,
0.5 ])
>>> y2
array([0, 1, 2, 2, 3, 3, 3, 3, 4])
>>> y1 * 2**y2
array([ 0., 1., 2., 3., 4., 5., 6., 7., 8.])
""")
add_newdoc('numpy.core.umath', 'ldexp',
"""
Returns x1 * 2**x2, element-wise.
The mantissas `x1` and twos exponents `x2` are used to construct
floating point numbers ``x1 * 2**x2``.
Parameters
----------
x1 : array_like
Array of multipliers.
x2 : array_like, int
Array of twos exponents.
$BROADCASTABLE_2
$PARAMS
Returns
-------
y : ndarray or scalar
The result of ``x1 * 2**x2``.
$OUT_SCALAR_2
See Also
--------
frexp : Return (y1, y2) from ``x = y1 * 2**y2``, inverse to `ldexp`.
Notes
-----
Complex dtypes are not supported, they will raise a TypeError.
`ldexp` is useful as the inverse of `frexp`, if used by itself it is
more clear to simply use the expression ``x1 * 2**x2``.
Examples
--------
>>> np.ldexp(5, np.arange(4))
array([ 5., 10., 20., 40.], dtype=float16)
>>> x = np.arange(6)
>>> np.ldexp(*np.frexp(x))
array([ 0., 1., 2., 3., 4., 5.])
""")
add_newdoc('numpy.core.umath', 'gcd',
"""
Returns the greatest common divisor of ``|x1|`` and ``|x2|``
Parameters
----------
x1, x2 : array_like, int
Arrays of values.
$BROADCASTABLE_2
Returns
-------
y : ndarray or scalar
The greatest common divisor of the absolute value of the inputs
$OUT_SCALAR_2
See Also
--------
lcm : The lowest common multiple
Examples
--------
>>> np.gcd(12, 20)
4
>>> np.gcd.reduce([15, 25, 35])
5
>>> np.gcd(np.arange(6), 20)
array([20, 1, 2, 1, 4, 5])
""")
add_newdoc('numpy.core.umath', 'lcm',
"""
Returns the lowest common multiple of ``|x1|`` and ``|x2|``
Parameters
----------
x1, x2 : array_like, int
Arrays of values.
$BROADCASTABLE_2
Returns
-------
y : ndarray or scalar
The lowest common multiple of the absolute value of the inputs
$OUT_SCALAR_2
See Also
--------
gcd : The greatest common divisor
Examples
--------
>>> np.lcm(12, 20)
60
>>> np.lcm.reduce([3, 12, 20])
60
>>> np.lcm.reduce([40, 12, 20])
120
>>> np.lcm(np.arange(6), 20)
array([ 0, 20, 20, 60, 20, 20])
""")
| bsd-3-clause |
dongjoon-hyun/spark | python/pyspark/pandas/tests/test_internal.py | 14 | 4900 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pandas as pd
from pyspark.pandas.internal import (
InternalFrame,
SPARK_DEFAULT_INDEX_NAME,
SPARK_INDEX_NAME_FORMAT,
)
from pyspark.pandas.utils import spark_column_equals
from pyspark.testing.pandasutils import PandasOnSparkTestCase
from pyspark.testing.sqlutils import SQLTestUtils
class InternalFrameTest(PandasOnSparkTestCase, SQLTestUtils):
def test_from_pandas(self):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
internal = InternalFrame.from_pandas(pdf)
sdf = internal.spark_frame
self.assert_eq(internal.index_spark_column_names, [SPARK_DEFAULT_INDEX_NAME])
self.assert_eq(internal.index_names, [None])
self.assert_eq(internal.column_labels, [("a",), ("b",)])
self.assert_eq(internal.data_spark_column_names, ["a", "b"])
self.assertTrue(spark_column_equals(internal.spark_column_for(("a",)), sdf["a"]))
self.assertTrue(spark_column_equals(internal.spark_column_for(("b",)), sdf["b"]))
self.assert_eq(internal.to_pandas_frame, pdf)
# non-string column name
pdf1 = pd.DataFrame({0: [1, 2, 3], 1: [4, 5, 6]})
internal = InternalFrame.from_pandas(pdf1)
sdf = internal.spark_frame
self.assert_eq(internal.index_spark_column_names, [SPARK_DEFAULT_INDEX_NAME])
self.assert_eq(internal.index_names, [None])
self.assert_eq(internal.column_labels, [(0,), (1,)])
self.assert_eq(internal.data_spark_column_names, ["0", "1"])
self.assertTrue(spark_column_equals(internal.spark_column_for((0,)), sdf["0"]))
self.assertTrue(spark_column_equals(internal.spark_column_for((1,)), sdf["1"]))
self.assert_eq(internal.to_pandas_frame, pdf1)
# categorical column
pdf2 = pd.DataFrame({0: [1, 2, 3], 1: pd.Categorical([4, 5, 6])})
internal = InternalFrame.from_pandas(pdf2)
sdf = internal.spark_frame
self.assert_eq(internal.index_spark_column_names, [SPARK_DEFAULT_INDEX_NAME])
self.assert_eq(internal.index_names, [None])
self.assert_eq(internal.column_labels, [(0,), (1,)])
self.assert_eq(internal.data_spark_column_names, ["0", "1"])
self.assertTrue(spark_column_equals(internal.spark_column_for((0,)), sdf["0"]))
self.assertTrue(spark_column_equals(internal.spark_column_for((1,)), sdf["1"]))
self.assert_eq(internal.to_pandas_frame, pdf2)
# multi-index
pdf.set_index("a", append=True, inplace=True)
internal = InternalFrame.from_pandas(pdf)
sdf = internal.spark_frame
self.assert_eq(
internal.index_spark_column_names,
[SPARK_INDEX_NAME_FORMAT(0), SPARK_INDEX_NAME_FORMAT(1)],
)
self.assert_eq(internal.index_names, [None, ("a",)])
self.assert_eq(internal.column_labels, [("b",)])
self.assert_eq(internal.data_spark_column_names, ["b"])
self.assertTrue(spark_column_equals(internal.spark_column_for(("b",)), sdf["b"]))
self.assert_eq(internal.to_pandas_frame, pdf)
# multi-index columns
pdf.columns = pd.MultiIndex.from_tuples([("x", "b")])
internal = InternalFrame.from_pandas(pdf)
sdf = internal.spark_frame
self.assert_eq(
internal.index_spark_column_names,
[SPARK_INDEX_NAME_FORMAT(0), SPARK_INDEX_NAME_FORMAT(1)],
)
self.assert_eq(internal.index_names, [None, ("a",)])
self.assert_eq(internal.column_labels, [("x", "b")])
self.assert_eq(internal.data_spark_column_names, ["(x, b)"])
self.assertTrue(spark_column_equals(internal.spark_column_for(("x", "b")), sdf["(x, b)"]))
self.assert_eq(internal.to_pandas_frame, pdf)
if __name__ == "__main__":
import unittest
from pyspark.pandas.tests.test_internal import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 |
huongttlan/statsmodels | tools/code_maintenance.py | 37 | 2307 | """
Code maintenance script modified from PyMC
"""
#!/usr/bin/env python
import sys
import os
# This is a function, not a test case, because it has to be run from inside
# the source tree to work well.
mod_strs = ['IPython', 'pylab', 'matplotlib', 'scipy','Pdb']
dep_files = {}
for mod_str in mod_strs:
dep_files[mod_str] = []
def remove_whitespace(fname):
# Remove trailing whitespace
fd = open(fname,mode='U') # open in universal newline mode
lines = []
for line in fd.readlines():
lines.append( line.rstrip() )
fd.close()
fd = open(fname,mode='w')
fd.seek(0)
for line in lines:
fd.write(line+'\n')
fd.close()
# print 'Removed whitespace from %s'%fname
def find_whitespace(fname):
fd = open(fname, mode='U')
for line in fd.readlines():
#print repr(line)
if ' \n' in line:
print fname
break
# print
print_only = True
# ====================
# = Strip whitespace =
# ====================
for dirname, dirs, files in os.walk('.'):
if dirname[1:].find('.')==-1:
# print dirname
for fname in files:
if fname[-2:] in ['c', 'f'] or fname[-3:]=='.py' or fname[-4:] in ['.pyx', '.txt', '.tex', '.sty', '.cls'] or fname.find('.')==-1:
# print fname
if print_only:
find_whitespace(dirname + '/' + fname)
else:
remove_whitespace(dirname + '/' + fname)
"""
# ==========================
# = Check for dependencies =
# ==========================
for dirname, dirs, files in os.walk('pymc'):
for fname in files:
if fname[-3:]=='.py' or fname[-4:]=='.pyx':
if dirname.find('sandbox')==-1 and fname != 'test_dependencies.py'\
and dirname.find('examples')==-1:
for mod_str in mod_strs:
if file(dirname+'/'+fname).read().find(mod_str)>=0:
dep_files[mod_str].append(dirname+'/'+fname)
print 'Instances of optional dependencies found are:'
for mod_str in mod_strs:
print '\t'+mod_str+':'
for fname in dep_files[mod_str]:
print '\t\t'+fname
if len(dep_files['Pdb'])>0:
raise ValueError, 'Looks like Pdb was not commented out in '+', '.join(dep_files[mod_str])
"""
| bsd-3-clause |
jmschrei/scikit-learn | sklearn/linear_model/bayes.py | 220 | 15248 | """
Various bayesian regression
"""
from __future__ import print_function
# Authors: V. Michel, F. Pedregosa, A. Gramfort
# License: BSD 3 clause
from math import log
import numpy as np
from scipy import linalg
from .base import LinearModel
from ..base import RegressorMixin
from ..utils.extmath import fast_logdet, pinvh
from ..utils import check_X_y
###############################################################################
# BayesianRidge regression
class BayesianRidge(LinearModel, RegressorMixin):
"""Bayesian ridge regression
Fit a Bayesian ridge model and optimize the regularization parameters
lambda (precision of the weights) and alpha (precision of the noise).
Read more in the :ref:`User Guide <bayesian_regression>`.
Parameters
----------
n_iter : int, optional
Maximum number of iterations. Default is 300.
tol : float, optional
Stop the algorithm if w has converged. Default is 1.e-3.
alpha_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the alpha parameter. Default is 1.e-6
alpha_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the alpha parameter.
Default is 1.e-6.
lambda_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the lambda parameter. Default is 1.e-6.
lambda_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the lambda parameter.
Default is 1.e-6
compute_score : boolean, optional
If True, compute the objective function at each step of the model.
Default is False
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
Default is True.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
verbose : boolean, optional, default False
Verbose mode when fitting the model.
Attributes
----------
coef_ : array, shape = (n_features)
Coefficients of the regression model (mean of distribution)
alpha_ : float
estimated precision of the noise.
lambda_ : array, shape = (n_features)
estimated precisions of the weights.
scores_ : float
if computed, value of the objective function (to be maximized)
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.BayesianRidge()
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
... # doctest: +NORMALIZE_WHITESPACE
BayesianRidge(alpha_1=1e-06, alpha_2=1e-06, compute_score=False,
copy_X=True, fit_intercept=True, lambda_1=1e-06, lambda_2=1e-06,
n_iter=300, normalize=False, tol=0.001, verbose=False)
>>> clf.predict([[1, 1]])
array([ 1.])
Notes
-----
See examples/linear_model/plot_bayesian_ridge.py for an example.
"""
def __init__(self, n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6,
lambda_1=1.e-6, lambda_2=1.e-6, compute_score=False,
fit_intercept=True, normalize=False, copy_X=True,
verbose=False):
self.n_iter = n_iter
self.tol = tol
self.alpha_1 = alpha_1
self.alpha_2 = alpha_2
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.compute_score = compute_score
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.verbose = verbose
def fit(self, X, y):
"""Fit the model
Parameters
----------
X : numpy array of shape [n_samples,n_features]
Training data
y : numpy array of shape [n_samples]
Target values
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y, dtype=np.float64, y_numeric=True)
X, y, X_mean, y_mean, X_std = self._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
n_samples, n_features = X.shape
### Initialization of the values of the parameters
alpha_ = 1. / np.var(y)
lambda_ = 1.
verbose = self.verbose
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
self.scores_ = list()
coef_old_ = None
XT_y = np.dot(X.T, y)
U, S, Vh = linalg.svd(X, full_matrices=False)
eigen_vals_ = S ** 2
### Convergence loop of the bayesian ridge regression
for iter_ in range(self.n_iter):
### Compute mu and sigma
# sigma_ = lambda_ / alpha_ * np.eye(n_features) + np.dot(X.T, X)
# coef_ = sigma_^-1 * XT * y
if n_samples > n_features:
coef_ = np.dot(Vh.T,
Vh / (eigen_vals_ + lambda_ / alpha_)[:, None])
coef_ = np.dot(coef_, XT_y)
if self.compute_score:
logdet_sigma_ = - np.sum(
np.log(lambda_ + alpha_ * eigen_vals_))
else:
coef_ = np.dot(X.T, np.dot(
U / (eigen_vals_ + lambda_ / alpha_)[None, :], U.T))
coef_ = np.dot(coef_, y)
if self.compute_score:
logdet_sigma_ = lambda_ * np.ones(n_features)
logdet_sigma_[:n_samples] += alpha_ * eigen_vals_
logdet_sigma_ = - np.sum(np.log(logdet_sigma_))
### Update alpha and lambda
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
gamma_ = (np.sum((alpha_ * eigen_vals_)
/ (lambda_ + alpha_ * eigen_vals_)))
lambda_ = ((gamma_ + 2 * lambda_1)
/ (np.sum(coef_ ** 2) + 2 * lambda_2))
alpha_ = ((n_samples - gamma_ + 2 * alpha_1)
/ (rmse_ + 2 * alpha_2))
### Compute the objective function
if self.compute_score:
s = lambda_1 * log(lambda_) - lambda_2 * lambda_
s += alpha_1 * log(alpha_) - alpha_2 * alpha_
s += 0.5 * (n_features * log(lambda_)
+ n_samples * log(alpha_)
- alpha_ * rmse_
- (lambda_ * np.sum(coef_ ** 2))
- logdet_sigma_
- n_samples * log(2 * np.pi))
self.scores_.append(s)
### Check for convergence
if iter_ != 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print("Convergence after ", str(iter_), " iterations")
break
coef_old_ = np.copy(coef_)
self.alpha_ = alpha_
self.lambda_ = lambda_
self.coef_ = coef_
self._set_intercept(X_mean, y_mean, X_std)
return self
###############################################################################
# ARD (Automatic Relevance Determination) regression
class ARDRegression(LinearModel, RegressorMixin):
"""Bayesian ARD regression.
Fit the weights of a regression model, using an ARD prior. The weights of
the regression model are assumed to be in Gaussian distributions.
Also estimate the parameters lambda (precisions of the distributions of the
weights) and alpha (precision of the distribution of the noise).
The estimation is done by an iterative procedures (Evidence Maximization)
Read more in the :ref:`User Guide <bayesian_regression>`.
Parameters
----------
n_iter : int, optional
Maximum number of iterations. Default is 300
tol : float, optional
Stop the algorithm if w has converged. Default is 1.e-3.
alpha_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the alpha parameter. Default is 1.e-6.
alpha_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the alpha parameter. Default is 1.e-6.
lambda_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the lambda parameter. Default is 1.e-6.
lambda_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the lambda parameter. Default is 1.e-6.
compute_score : boolean, optional
If True, compute the objective function at each step of the model.
Default is False.
threshold_lambda : float, optional
threshold for removing (pruning) weights with high precision from
the computation. Default is 1.e+4.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
Default is True.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True.
If True, X will be copied; else, it may be overwritten.
verbose : boolean, optional, default False
Verbose mode when fitting the model.
Attributes
----------
coef_ : array, shape = (n_features)
Coefficients of the regression model (mean of distribution)
alpha_ : float
estimated precision of the noise.
lambda_ : array, shape = (n_features)
estimated precisions of the weights.
sigma_ : array, shape = (n_features, n_features)
estimated variance-covariance matrix of the weights
scores_ : float
if computed, value of the objective function (to be maximized)
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.ARDRegression()
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
... # doctest: +NORMALIZE_WHITESPACE
ARDRegression(alpha_1=1e-06, alpha_2=1e-06, compute_score=False,
copy_X=True, fit_intercept=True, lambda_1=1e-06, lambda_2=1e-06,
n_iter=300, normalize=False, threshold_lambda=10000.0, tol=0.001,
verbose=False)
>>> clf.predict([[1, 1]])
array([ 1.])
Notes
--------
See examples/linear_model/plot_ard.py for an example.
"""
def __init__(self, n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6,
lambda_1=1.e-6, lambda_2=1.e-6, compute_score=False,
threshold_lambda=1.e+4, fit_intercept=True, normalize=False,
copy_X=True, verbose=False):
self.n_iter = n_iter
self.tol = tol
self.fit_intercept = fit_intercept
self.normalize = normalize
self.alpha_1 = alpha_1
self.alpha_2 = alpha_2
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.compute_score = compute_score
self.threshold_lambda = threshold_lambda
self.copy_X = copy_X
self.verbose = verbose
def fit(self, X, y):
"""Fit the ARDRegression model according to the given training data
and parameters.
Iterative procedure to maximize the evidence
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target values (integers)
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y, dtype=np.float64, y_numeric=True)
n_samples, n_features = X.shape
coef_ = np.zeros(n_features)
X, y, X_mean, y_mean, X_std = self._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
### Launch the convergence loop
keep_lambda = np.ones(n_features, dtype=bool)
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
verbose = self.verbose
### Initialization of the values of the parameters
alpha_ = 1. / np.var(y)
lambda_ = np.ones(n_features)
self.scores_ = list()
coef_old_ = None
### Iterative procedure of ARDRegression
for iter_ in range(self.n_iter):
### Compute mu and sigma (using Woodbury matrix identity)
sigma_ = pinvh(np.eye(n_samples) / alpha_ +
np.dot(X[:, keep_lambda] *
np.reshape(1. / lambda_[keep_lambda], [1, -1]),
X[:, keep_lambda].T))
sigma_ = np.dot(sigma_, X[:, keep_lambda]
* np.reshape(1. / lambda_[keep_lambda], [1, -1]))
sigma_ = - np.dot(np.reshape(1. / lambda_[keep_lambda], [-1, 1])
* X[:, keep_lambda].T, sigma_)
sigma_.flat[::(sigma_.shape[1] + 1)] += 1. / lambda_[keep_lambda]
coef_[keep_lambda] = alpha_ * np.dot(
sigma_, np.dot(X[:, keep_lambda].T, y))
### Update alpha and lambda
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
gamma_ = 1. - lambda_[keep_lambda] * np.diag(sigma_)
lambda_[keep_lambda] = ((gamma_ + 2. * lambda_1)
/ ((coef_[keep_lambda]) ** 2
+ 2. * lambda_2))
alpha_ = ((n_samples - gamma_.sum() + 2. * alpha_1)
/ (rmse_ + 2. * alpha_2))
### Prune the weights with a precision over a threshold
keep_lambda = lambda_ < self.threshold_lambda
coef_[~keep_lambda] = 0
### Compute the objective function
if self.compute_score:
s = (lambda_1 * np.log(lambda_) - lambda_2 * lambda_).sum()
s += alpha_1 * log(alpha_) - alpha_2 * alpha_
s += 0.5 * (fast_logdet(sigma_) + n_samples * log(alpha_)
+ np.sum(np.log(lambda_)))
s -= 0.5 * (alpha_ * rmse_ + (lambda_ * coef_ ** 2).sum())
self.scores_.append(s)
### Check for convergence
if iter_ > 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print("Converged after %s iterations" % iter_)
break
coef_old_ = np.copy(coef_)
self.coef_ = coef_
self.alpha_ = alpha_
self.sigma_ = sigma_
self.lambda_ = lambda_
self._set_intercept(X_mean, y_mean, X_std)
return self
| bsd-3-clause |
fivejjs/GPy | GPy/examples/classification.py | 4 | 6767 | # Copyright (c) 2012-2014, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
"""
Gaussian Processes classification examples
"""
import GPy
default_seed = 10000
def oil(num_inducing=50, max_iters=100, kernel=None, optimize=True, plot=True):
"""
Run a Gaussian process classification on the three phase oil data. The demonstration calls the basic GP classification model and uses EP to approximate the likelihood.
"""
try:import pods
except ImportError:print 'pods unavailable, see https://github.com/sods/ods for example datasets'
data = pods.datasets.oil()
X = data['X']
Xtest = data['Xtest']
Y = data['Y'][:, 0:1]
Ytest = data['Ytest'][:, 0:1]
Y[Y.flatten()==-1] = 0
Ytest[Ytest.flatten()==-1] = 0
# Create GP model
m = GPy.models.SparseGPClassification(X, Y, kernel=kernel, num_inducing=num_inducing)
# Contrain all parameters to be positive
#m.tie_params('.*len')
m['.*len'] = 10.
# Optimize
if optimize:
for _ in range(5):
m.optimize(max_iters=int(max_iters/5))
print(m)
#Test
probs = m.predict(Xtest)[0]
GPy.util.classification.conf_matrix(probs, Ytest)
return m
def toy_linear_1d_classification(seed=default_seed, optimize=True, plot=True):
"""
Simple 1D classification example using EP approximation
:param seed: seed value for data generation (default is 4).
:type seed: int
"""
try:import pods
except ImportError:print 'pods unavailable, see https://github.com/sods/ods for example datasets'
data = pods.datasets.toy_linear_1d_classification(seed=seed)
Y = data['Y'][:, 0:1]
Y[Y.flatten() == -1] = 0
# Model definition
m = GPy.models.GPClassification(data['X'], Y)
# Optimize
if optimize:
#m.update_likelihood_approximation()
# Parameters optimization:
m.optimize()
#m.update_likelihood_approximation()
#m.pseudo_EM()
# Plot
if plot:
from matplotlib import pyplot as plt
fig, axes = plt.subplots(2, 1)
m.plot_f(ax=axes[0])
m.plot(ax=axes[1])
print m
return m
def toy_linear_1d_classification_laplace(seed=default_seed, optimize=True, plot=True):
"""
Simple 1D classification example using Laplace approximation
:param seed: seed value for data generation (default is 4).
:type seed: int
"""
try:import pods
except ImportError:print 'pods unavailable, see https://github.com/sods/ods for example datasets'
data = pods.datasets.toy_linear_1d_classification(seed=seed)
Y = data['Y'][:, 0:1]
Y[Y.flatten() == -1] = 0
likelihood = GPy.likelihoods.Bernoulli()
laplace_inf = GPy.inference.latent_function_inference.Laplace()
kernel = GPy.kern.RBF(1)
# Model definition
m = GPy.core.GP(data['X'], Y, kernel=kernel, likelihood=likelihood, inference_method=laplace_inf)
# Optimize
if optimize:
try:
m.optimize('scg', messages=1)
except Exception as e:
return m
# Plot
if plot:
from matplotlib import pyplot as plt
fig, axes = plt.subplots(2, 1)
m.plot_f(ax=axes[0])
m.plot(ax=axes[1])
print m
return m
def sparse_toy_linear_1d_classification(num_inducing=10, seed=default_seed, optimize=True, plot=True):
"""
Sparse 1D classification example
:param seed: seed value for data generation (default is 4).
:type seed: int
"""
try:import pods
except ImportError:print 'pods unavailable, see https://github.com/sods/ods for example datasets'
data = pods.datasets.toy_linear_1d_classification(seed=seed)
Y = data['Y'][:, 0:1]
Y[Y.flatten() == -1] = 0
# Model definition
m = GPy.models.SparseGPClassification(data['X'], Y, num_inducing=num_inducing)
m['.*len'] = 4.
# Optimize
if optimize:
m.optimize()
# Plot
if plot:
from matplotlib import pyplot as plt
fig, axes = plt.subplots(2, 1)
m.plot_f(ax=axes[0])
m.plot(ax=axes[1])
print m
return m
def toy_heaviside(seed=default_seed, max_iters=100, optimize=True, plot=True):
"""
Simple 1D classification example using a heavy side gp transformation
:param seed: seed value for data generation (default is 4).
:type seed: int
"""
try:import pods
except ImportError:print 'pods unavailable, see https://github.com/sods/ods for example datasets'
data = pods.datasets.toy_linear_1d_classification(seed=seed)
Y = data['Y'][:, 0:1]
Y[Y.flatten() == -1] = 0
# Model definition
kernel = GPy.kern.RBF(1)
likelihood = GPy.likelihoods.Bernoulli(gp_link=GPy.likelihoods.link_functions.Heaviside())
ep = GPy.inference.latent_function_inference.expectation_propagation.EP()
m = GPy.core.GP(X=data['X'], Y=Y, kernel=kernel, likelihood=likelihood, inference_method=ep, name='gp_classification_heaviside')
#m = GPy.models.GPClassification(data['X'], likelihood=likelihood)
# Optimize
if optimize:
# Parameters optimization:
for _ in range(5):
m.optimize(max_iters=int(max_iters/5))
print m
# Plot
if plot:
from matplotlib import pyplot as plt
fig, axes = plt.subplots(2, 1)
m.plot_f(ax=axes[0])
m.plot(ax=axes[1])
print m
return m
def crescent_data(model_type='Full', num_inducing=10, seed=default_seed, kernel=None, optimize=True, plot=True):
"""
Run a Gaussian process classification on the crescent data. The demonstration calls the basic GP classification model and uses EP to approximate the likelihood.
:param model_type: type of model to fit ['Full', 'FITC', 'DTC'].
:param inducing: number of inducing variables (only used for 'FITC' or 'DTC').
:type inducing: int
:param seed: seed value for data generation.
:type seed: int
:param kernel: kernel to use in the model
:type kernel: a GPy kernel
"""
try:import pods
except ImportError:print 'pods unavailable, see https://github.com/sods/ods for example datasets'
data = pods.datasets.crescent_data(seed=seed)
Y = data['Y']
Y[Y.flatten()==-1] = 0
if model_type == 'Full':
m = GPy.models.GPClassification(data['X'], Y, kernel=kernel)
elif model_type == 'DTC':
m = GPy.models.SparseGPClassification(data['X'], Y, kernel=kernel, num_inducing=num_inducing)
m['.*len'] = 10.
elif model_type == 'FITC':
m = GPy.models.FITCClassification(data['X'], Y, kernel=kernel, num_inducing=num_inducing)
m['.*len'] = 3.
if optimize:
m.pseudo_EM()
if plot:
m.plot()
print m
return m
| bsd-3-clause |
HoliestCow/ece692_deeplearning | project5/cnn/rad_gan.py | 1 | 4059 | """ GAN Example
Use a generative adversarial network (GAN) to generate digit images from a
noise distribution.
References:
- Generative adversarial nets. I Goodfellow, J Pouget-Abadie, M Mirza,
B Xu, D Warde-Farley, S Ozair, Y. Bengio. Advances in neural information
processing systems, 2672-2680.
Links:
- [GAN Paper](https://arxiv.org/pdf/1406.2661.pdf).
"""
from __future__ import division, print_function, absolute_import
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import tflearn
import h5py
# Data loading and preprocessing
# import tflearn.datasets.mnist as mnist
# X, Y, testX, testY = mnist.load_data()
# bg = np.load('./integrations/bg_spectra_only.npy')
dataset = h5py.File('./spectral_data.h5', 'r')
x = np.array(dataset['training_data'], dtype=float)
x_test = np.array(dataset['testing_data'], dtype=float)
# validation_dataset = np.array(dataset['validation_data'])
# image_dim = 784 # 28*28 pixels
image_dim = 1024
z_dim = 200 # Noise data points
total_samples = x.shape[0]
def samplewise_mean(x):
for i in range(x.shape[0]):
mean = np.mean(x[i, :])
std = np.std(x[i, :])
z = np.divide(np.subtract(x[i, :], mean), std)
x[i, :] = z
return x
x = samplewise_mean(x)
x_test = samplewise_mean(x)
# Generator
def generator(x, reuse=False):
with tf.variable_scope('Generator', reuse=reuse):
x = tflearn.fully_connected(x, 512, activation='relu')
x = tflearn.fully_connected(x, image_dim, activation='sigmoid')
return x
# Discriminator
def discriminator(x, reuse=False):
with tf.variable_scope('Discriminator', reuse=reuse):
x = tflearn.fully_connected(x, 512, activation='relu')
x = tflearn.fully_connected(x, 1, activation='sigmoid')
return x
# Build Networks
gen_input = tflearn.input_data(shape=[None, z_dim], name='input_noise')
disc_input = tflearn.input_data(shape=[None, image_dim], name='disc_input')
gen_sample = generator(gen_input)
disc_real = discriminator(disc_input)
disc_fake = discriminator(gen_sample, reuse=True)
# Define Loss
disc_loss = -tf.reduce_mean(tf.log(disc_real) + tf.log(1. - disc_fake))
gen_loss = -tf.reduce_mean(tf.log(disc_fake))
# Build Training Ops for both Generator and Discriminator.
# Each network optimization should only update its own variable, thus we need
# to retrieve each network variables (with get_layer_variables_by_scope) and set
# 'placeholder=None' because we do not need to feed any target.
gen_vars = tflearn.get_layer_variables_by_scope('Generator')
gen_model = tflearn.regression(gen_sample, placeholder=None, optimizer='adam',
loss=gen_loss, trainable_vars=gen_vars,
batch_size=64, name='target_gen', op_name='GEN')
disc_vars = tflearn.get_layer_variables_by_scope('Discriminator')
disc_model = tflearn.regression(disc_real, placeholder=None, optimizer='adam',
loss=disc_loss, trainable_vars=disc_vars,
batch_size=64, name='target_disc', op_name='DISC')
# Define GAN model, that output the generated images.
gan = tflearn.DNN(gen_model)
# Training
# Generate noise to feed to the generator
z = np.random.uniform(-1., 1., size=[total_samples, z_dim])
# z = np.random.poisson(lam=1, size=z_dim)
# Start training, feed both noise and real images.
gan.fit(X_inputs={gen_input: z, disc_input: x},
Y_targets=None,
n_epoch=20)
# Generate images from noise, using the generator network.
f, a = plt.subplots(2, 2, figsize=(10, 4))
for i in range(2):
for j in range(2):
# Noise input.
z = np.random.uniform(-1., 1., size=[1, z_dim])
# z = np.random.poisson(lam=1, size=z_dim)
# Generate image from noise. Extend to 3 channels for matplot figure.
# temp = [[ii, ii, ii] for ii in list(gan.predict([z])[0])]
# a[j][i].imshow(np.reshape(temp, (28, 28, 3)))
temp = list(gan.predict([z])[0])
a[j][i].plot(temp)
f.savefig('test_gan.png')
| mit |
Midafi/scikit-image | doc/examples/plot_ssim.py | 3 | 2274 | """
===========================
Structural similarity index
===========================
When comparing images, the mean squared error (MSE)--while simple to
implement--is not highly indicative of perceived similarity. Structural
similarity aims to address this shortcoming by taking texture into account
[1]_, [2]_.
The example shows two modifications of the input image, each with the same MSE,
but with very different mean structural similarity indices.
.. [1] Zhou Wang; Bovik, A.C.; ,"Mean squared error: Love it or leave it? A new
look at Signal Fidelity Measures," Signal Processing Magazine, IEEE,
vol. 26, no. 1, pp. 98-117, Jan. 2009.
.. [2] Z. Wang, A. C. Bovik, H. R. Sheikh and E. P. Simoncelli, "Image quality
assessment: From error visibility to structural similarity," IEEE
Transactions on Image Processing, vol. 13, no. 4, pp. 600-612,
Apr. 2004.
"""
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from skimage import data, img_as_float
from skimage.measure import structural_similarity as ssim
matplotlib.rcParams['font.size'] = 9
img = img_as_float(data.camera())
rows, cols = img.shape
noise = np.ones_like(img) * 0.2 * (img.max() - img.min())
noise[np.random.random(size=noise.shape) > 0.5] *= -1
def mse(x, y):
return np.linalg.norm(x - y)
img_noise = img + noise
img_const = img + abs(noise)
fig, (ax0, ax1, ax2) = plt.subplots(nrows=1, ncols=3, figsize=(8, 4))
mse_none = mse(img, img)
ssim_none = ssim(img, img, dynamic_range=img.max() - img.min())
mse_noise = mse(img, img_noise)
ssim_noise = ssim(img, img_noise,
dynamic_range=img_const.max() - img_const.min())
mse_const = mse(img, img_const)
ssim_const = ssim(img, img_const,
dynamic_range=img_noise.max() - img_noise.min())
label = 'MSE: %2.f, SSIM: %.2f'
ax0.imshow(img, cmap=plt.cm.gray, vmin=0, vmax=1)
ax0.set_xlabel(label % (mse_none, ssim_none))
ax0.set_title('Original image')
ax1.imshow(img_noise, cmap=plt.cm.gray, vmin=0, vmax=1)
ax1.set_xlabel(label % (mse_noise, ssim_noise))
ax1.set_title('Image with noise')
ax2.imshow(img_const, cmap=plt.cm.gray, vmin=0, vmax=1)
ax2.set_xlabel(label % (mse_const, ssim_const))
ax2.set_title('Image plus constant')
plt.show()
| bsd-3-clause |
spbguru/repo1 | external/linux32/lib/python2.6/site-packages/matplotlib/__init__.py | 69 | 28184 | """
This is an object-orient plotting library.
A procedural interface is provided by the companion pylab module,
which may be imported directly, e.g::
from pylab import *
or using ipython::
ipython -pylab
For the most part, direct use of the object-oriented library is
encouraged when programming rather than working interactively. The
exceptions are the pylab commands :func:`~matplotlib.pyplot.figure`,
:func:`~matplotlib.pyplot.subplot`,
:func:`~matplotlib.backends.backend_qt4agg.show`, and
:func:`~pyplot.savefig`, which can greatly simplify scripting.
Modules include:
:mod:`matplotlib.axes`
defines the :class:`~matplotlib.axes.Axes` class. Most pylab
commands are wrappers for :class:`~matplotlib.axes.Axes`
methods. The axes module is the highest level of OO access to
the library.
:mod:`matplotlib.figure`
defines the :class:`~matplotlib.figure.Figure` class.
:mod:`matplotlib.artist`
defines the :class:`~matplotlib.artist.Artist` base class for
all classes that draw things.
:mod:`matplotlib.lines`
defines the :class:`~matplotlib.lines.Line2D` class for
drawing lines and markers
:mod`matplotlib.patches`
defines classes for drawing polygons
:mod:`matplotlib.text`
defines the :class:`~matplotlib.text.Text`,
:class:`~matplotlib.text.TextWithDash`, and
:class:`~matplotlib.text.Annotate` classes
:mod:`matplotlib.image`
defines the :class:`~matplotlib.image.AxesImage` and
:class:`~matplotlib.image.FigureImage` classes
:mod:`matplotlib.collections`
classes for efficient drawing of groups of lines or polygons
:mod:`matplotlib.colors`
classes for interpreting color specifications and for making
colormaps
:mod:`matplotlib.cm`
colormaps and the :class:`~matplotlib.image.ScalarMappable`
mixin class for providing color mapping functionality to other
classes
:mod:`matplotlib.ticker`
classes for calculating tick mark locations and for formatting
tick labels
:mod:`matplotlib.backends`
a subpackage with modules for various gui libraries and output
formats
The base matplotlib namespace includes:
:data:`~matplotlib.rcParams`
a global dictionary of default configuration settings. It is
initialized by code which may be overridded by a matplotlibrc
file.
:func:`~matplotlib.rc`
a function for setting groups of rcParams values
:func:`~matplotlib.use`
a function for setting the matplotlib backend. If used, this
function must be called immediately after importing matplotlib
for the first time. In particular, it must be called
**before** importing pylab (if pylab is imported).
matplotlib is written by John D. Hunter (jdh2358 at gmail.com) and a
host of others.
"""
from __future__ import generators
__version__ = '0.98.5.2'
__revision__ = '$Revision: 6660 $'
__date__ = '$Date: 2008-12-18 06:10:51 -0600 (Thu, 18 Dec 2008) $'
import os, re, shutil, subprocess, sys, warnings
import distutils.sysconfig
import distutils.version
NEWCONFIG = False
# Needed for toolkit setuptools support
if 0:
try:
__import__('pkg_resources').declare_namespace(__name__)
except ImportError:
pass # must not have setuptools
if not hasattr(sys, 'argv'): # for modpython
sys.argv = ['modpython']
"""
Manage user customizations through a rc file.
The default file location is given in the following order
- environment variable MATPLOTLIBRC
- HOME/.matplotlib/matplotlibrc if HOME is defined
- PATH/matplotlibrc where PATH is the return value of
get_data_path()
"""
import sys, os, tempfile
from rcsetup import defaultParams, validate_backend, validate_toolbar
from rcsetup import validate_cairo_format
major, minor1, minor2, s, tmp = sys.version_info
_python24 = major>=2 and minor1>=4
# the havedate check was a legacy from old matplotlib which preceeded
# datetime support
_havedate = True
#try:
# import pkg_resources # pkg_resources is part of setuptools
#except ImportError: _have_pkg_resources = False
#else: _have_pkg_resources = True
if not _python24:
raise ImportError('matplotlib requires Python 2.4 or later')
import numpy
nn = numpy.__version__.split('.')
if not (int(nn[0]) >= 1 and int(nn[1]) >= 1):
raise ImportError(
'numpy 1.1 or later is required; you have %s' % numpy.__version__)
def is_string_like(obj):
if hasattr(obj, 'shape'): return 0
try: obj + ''
except (TypeError, ValueError): return 0
return 1
def _is_writable_dir(p):
"""
p is a string pointing to a putative writable dir -- return True p
is such a string, else False
"""
try: p + '' # test is string like
except TypeError: return False
try:
t = tempfile.TemporaryFile(dir=p)
t.write('1')
t.close()
except OSError: return False
else: return True
class Verbose:
"""
A class to handle reporting. Set the fileo attribute to any file
instance to handle the output. Default is sys.stdout
"""
levels = ('silent', 'helpful', 'debug', 'debug-annoying')
vald = dict( [(level, i) for i,level in enumerate(levels)])
# parse the verbosity from the command line; flags look like
# --verbose-silent or --verbose-helpful
_commandLineVerbose = None
for arg in sys.argv[1:]:
if not arg.startswith('--verbose-'): continue
_commandLineVerbose = arg[10:]
def __init__(self):
self.set_level('silent')
self.fileo = sys.stdout
def set_level(self, level):
'set the verbosity to one of the Verbose.levels strings'
if self._commandLineVerbose is not None:
level = self._commandLineVerbose
if level not in self.levels:
raise ValueError('Illegal verbose string "%s". Legal values are %s'%(level, self.levels))
self.level = level
def set_fileo(self, fname):
std = {
'sys.stdout': sys.stdout,
'sys.stderr': sys.stderr,
}
if fname in std:
self.fileo = std[fname]
else:
try:
fileo = file(fname, 'w')
except IOError:
raise ValueError('Verbose object could not open log file "%s" for writing.\nCheck your matplotlibrc verbose.fileo setting'%fname)
else:
self.fileo = fileo
def report(self, s, level='helpful'):
"""
print message s to self.fileo if self.level>=level. Return
value indicates whether a message was issued
"""
if self.ge(level):
print >>self.fileo, s
return True
return False
def wrap(self, fmt, func, level='helpful', always=True):
"""
return a callable function that wraps func and reports it
output through the verbose handler if current verbosity level
is higher than level
if always is True, the report will occur on every function
call; otherwise only on the first time the function is called
"""
assert callable(func)
def wrapper(*args, **kwargs):
ret = func(*args, **kwargs)
if (always or not wrapper._spoke):
spoke = self.report(fmt%ret, level)
if not wrapper._spoke: wrapper._spoke = spoke
return ret
wrapper._spoke = False
wrapper.__doc__ = func.__doc__
return wrapper
def ge(self, level):
'return true if self.level is >= level'
return self.vald[self.level]>=self.vald[level]
verbose=Verbose()
def checkdep_dvipng():
try:
s = subprocess.Popen(['dvipng','-version'], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
line = s.stdout.readlines()[1]
v = line.split()[-1]
return v
except (IndexError, ValueError, OSError):
return None
def checkdep_ghostscript():
try:
if sys.platform == 'win32':
command_args = ['gswin32c', '--version']
else:
command_args = ['gs', '--version']
s = subprocess.Popen(command_args, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
v = s.stdout.read()[:-1]
return v
except (IndexError, ValueError, OSError):
return None
def checkdep_tex():
try:
s = subprocess.Popen(['tex','-version'], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
line = s.stdout.readlines()[0]
pattern = '3\.1\d+'
match = re.search(pattern, line)
v = match.group(0)
return v
except (IndexError, ValueError, AttributeError, OSError):
return None
def checkdep_pdftops():
try:
s = subprocess.Popen(['pdftops','-v'], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
for line in s.stderr:
if 'version' in line:
v = line.split()[-1]
return v
except (IndexError, ValueError, UnboundLocalError, OSError):
return None
def compare_versions(a, b):
"return True if a is greater than or equal to b"
if a:
a = distutils.version.LooseVersion(a)
b = distutils.version.LooseVersion(b)
if a>=b: return True
else: return False
else: return False
def checkdep_ps_distiller(s):
if not s:
return False
flag = True
gs_req = '7.07'
gs_sugg = '7.07'
gs_v = checkdep_ghostscript()
if compare_versions(gs_v, gs_sugg): pass
elif compare_versions(gs_v, gs_req):
verbose.report(('ghostscript-%s found. ghostscript-%s or later '
'is recommended to use the ps.usedistiller option.') % (gs_v, gs_sugg))
else:
flag = False
warnings.warn(('matplotlibrc ps.usedistiller option can not be used '
'unless ghostscript-%s or later is installed on your system') % gs_req)
if s == 'xpdf':
pdftops_req = '3.0'
pdftops_req_alt = '0.9' # poppler version numbers, ugh
pdftops_v = checkdep_pdftops()
if compare_versions(pdftops_v, pdftops_req):
pass
elif compare_versions(pdftops_v, pdftops_req_alt) and not \
compare_versions(pdftops_v, '1.0'):
pass
else:
flag = False
warnings.warn(('matplotlibrc ps.usedistiller can not be set to '
'xpdf unless xpdf-%s or later is installed on your system') % pdftops_req)
if flag:
return s
else:
return False
def checkdep_usetex(s):
if not s:
return False
tex_req = '3.1415'
gs_req = '7.07'
gs_sugg = '7.07'
dvipng_req = '1.5'
flag = True
tex_v = checkdep_tex()
if compare_versions(tex_v, tex_req): pass
else:
flag = False
warnings.warn(('matplotlibrc text.usetex option can not be used '
'unless TeX-%s or later is '
'installed on your system') % tex_req)
dvipng_v = checkdep_dvipng()
if compare_versions(dvipng_v, dvipng_req): pass
else:
flag = False
warnings.warn( 'matplotlibrc text.usetex can not be used with *Agg '
'backend unless dvipng-1.5 or later is '
'installed on your system')
gs_v = checkdep_ghostscript()
if compare_versions(gs_v, gs_sugg): pass
elif compare_versions(gs_v, gs_req):
verbose.report(('ghostscript-%s found. ghostscript-%s or later is '
'recommended for use with the text.usetex '
'option.') % (gs_v, gs_sugg))
else:
flag = False
warnings.warn(('matplotlibrc text.usetex can not be used '
'unless ghostscript-%s or later is '
'installed on your system') % gs_req)
return flag
def _get_home():
"""Find user's home directory if possible.
Otherwise raise error.
:see: http://mail.python.org/pipermail/python-list/2005-February/263921.html
"""
path=''
try:
path=os.path.expanduser("~")
except:
pass
if not os.path.isdir(path):
for evar in ('HOME', 'USERPROFILE', 'TMP'):
try:
path = os.environ[evar]
if os.path.isdir(path):
break
except: pass
if path:
return path
else:
raise RuntimeError('please define environment variable $HOME')
get_home = verbose.wrap('$HOME=%s', _get_home, always=False)
def _get_configdir():
"""
Return the string representing the configuration dir.
default is HOME/.matplotlib. you can override this with the
MPLCONFIGDIR environment variable
"""
configdir = os.environ.get('MPLCONFIGDIR')
if configdir is not None:
if not _is_writable_dir(configdir):
raise RuntimeError('Could not write to MPLCONFIGDIR="%s"'%configdir)
return configdir
h = get_home()
p = os.path.join(get_home(), '.matplotlib')
if os.path.exists(p):
if not _is_writable_dir(p):
raise RuntimeError("'%s' is not a writable dir; you must set %s/.matplotlib to be a writable dir. You can also set environment variable MPLCONFIGDIR to any writable directory where you want matplotlib data stored "% (h, h))
else:
if not _is_writable_dir(h):
raise RuntimeError("Failed to create %s/.matplotlib; consider setting MPLCONFIGDIR to a writable directory for matplotlib configuration data"%h)
os.mkdir(p)
return p
get_configdir = verbose.wrap('CONFIGDIR=%s', _get_configdir, always=False)
def _get_data_path():
'get the path to matplotlib data'
if 'MATPLOTLIBDATA' in os.environ:
path = os.environ['MATPLOTLIBDATA']
if not os.path.isdir(path):
raise RuntimeError('Path in environment MATPLOTLIBDATA not a directory')
return path
path = os.sep.join([os.path.dirname(__file__), 'mpl-data'])
if os.path.isdir(path): return path
# setuptools' namespace_packages may highjack this init file
# so need to try something known to be in matplotlib, not basemap
import matplotlib.afm
path = os.sep.join([os.path.dirname(matplotlib.afm.__file__), 'mpl-data'])
if os.path.isdir(path): return path
# py2exe zips pure python, so still need special check
if getattr(sys,'frozen',None):
path = os.path.join(os.path.split(sys.path[0])[0], 'mpl-data')
if os.path.isdir(path): return path
else:
# Try again assuming we need to step up one more directory
path = os.path.join(os.path.split(os.path.split(sys.path[0])[0])[0],
'mpl-data')
if os.path.isdir(path): return path
else:
# Try again assuming sys.path[0] is a dir not a exe
path = os.path.join(sys.path[0], 'mpl-data')
if os.path.isdir(path): return path
raise RuntimeError('Could not find the matplotlib data files')
def _get_data_path_cached():
if defaultParams['datapath'][0] is None:
defaultParams['datapath'][0] = _get_data_path()
return defaultParams['datapath'][0]
get_data_path = verbose.wrap('matplotlib data path %s', _get_data_path_cached,
always=False)
def get_example_data(fname):
"""
return a filehandle to one of the example files in mpl-data/example
*fname*
the name of one of the files in mpl-data/example
"""
datadir = os.path.join(get_data_path(), 'example')
fullpath = os.path.join(datadir, fname)
if not os.path.exists(fullpath):
raise IOError('could not find matplotlib example file "%s" in data directory "%s"'%(
fname, datadir))
return file(fullpath, 'rb')
def get_py2exe_datafiles():
datapath = get_data_path()
head, tail = os.path.split(datapath)
d = {}
for root, dirs, files in os.walk(datapath):
# Need to explicitly remove cocoa_agg files or py2exe complains
# NOTE I dont know why, but do as previous version
if 'Matplotlib.nib' in files:
files.remove('Matplotlib.nib')
files = [os.path.join(root, filename) for filename in files]
root = root.replace(tail, 'mpl-data')
root = root[root.index('mpl-data'):]
d[root] = files
return d.items()
def matplotlib_fname():
"""
Return the path to the rc file
Search order:
* current working dir
* environ var MATPLOTLIBRC
* HOME/.matplotlib/matplotlibrc
* MATPLOTLIBDATA/matplotlibrc
"""
oldname = os.path.join( os.getcwd(), '.matplotlibrc')
if os.path.exists(oldname):
print >> sys.stderr, """\
WARNING: Old rc filename ".matplotlibrc" found in working dir
and and renamed to new default rc file name "matplotlibrc"
(no leading"dot"). """
shutil.move('.matplotlibrc', 'matplotlibrc')
home = get_home()
oldname = os.path.join( home, '.matplotlibrc')
if os.path.exists(oldname):
configdir = get_configdir()
newname = os.path.join(configdir, 'matplotlibrc')
print >> sys.stderr, """\
WARNING: Old rc filename "%s" found and renamed to
new default rc file name "%s"."""%(oldname, newname)
shutil.move(oldname, newname)
fname = os.path.join( os.getcwd(), 'matplotlibrc')
if os.path.exists(fname): return fname
if 'MATPLOTLIBRC' in os.environ:
path = os.environ['MATPLOTLIBRC']
if os.path.exists(path):
fname = os.path.join(path, 'matplotlibrc')
if os.path.exists(fname):
return fname
fname = os.path.join(get_configdir(), 'matplotlibrc')
if os.path.exists(fname): return fname
path = get_data_path() # guaranteed to exist or raise
fname = os.path.join(path, 'matplotlibrc')
if not os.path.exists(fname):
warnings.warn('Could not find matplotlibrc; using defaults')
return fname
_deprecated_map = {
'text.fontstyle': 'font.style',
'text.fontangle': 'font.style',
'text.fontvariant': 'font.variant',
'text.fontweight': 'font.weight',
'text.fontsize': 'font.size',
'tick.size' : 'tick.major.size',
}
class RcParams(dict):
"""
A dictionary object including validation
validating functions are defined and associated with rc parameters in
:mod:`matplotlib.rcsetup`
"""
validate = dict([ (key, converter) for key, (default, converter) in \
defaultParams.iteritems() ])
def __setitem__(self, key, val):
try:
if key in _deprecated_map.keys():
alt = _deprecated_map[key]
warnings.warn('%s is deprecated in matplotlibrc. Use %s \
instead.'% (key, alt))
key = alt
cval = self.validate[key](val)
dict.__setitem__(self, key, cval)
except KeyError:
raise KeyError('%s is not a valid rc parameter.\
See rcParams.keys() for a list of valid parameters.'%key)
def rc_params(fail_on_error=False):
'Return the default params updated from the values in the rc file'
fname = matplotlib_fname()
if not os.path.exists(fname):
# this should never happen, default in mpl-data should always be found
message = 'could not find rc file; returning defaults'
ret = RcParams([ (key, default) for key, (default, converter) in \
defaultParams.iteritems() ])
warnings.warn(message)
return ret
cnt = 0
rc_temp = {}
for line in file(fname):
cnt += 1
strippedline = line.split('#',1)[0].strip()
if not strippedline: continue
tup = strippedline.split(':',1)
if len(tup) !=2:
warnings.warn('Illegal line #%d\n\t%s\n\tin file "%s"'%\
(cnt, line, fname))
continue
key, val = tup
key = key.strip()
val = val.strip()
if key in rc_temp:
warnings.warn('Duplicate key in file "%s", line #%d'%(fname,cnt))
rc_temp[key] = (val, line, cnt)
ret = RcParams([ (key, default) for key, (default, converter) in \
defaultParams.iteritems() ])
for key in ('verbose.level', 'verbose.fileo'):
if key in rc_temp:
val, line, cnt = rc_temp.pop(key)
if fail_on_error:
ret[key] = val # try to convert to proper type or raise
else:
try: ret[key] = val # try to convert to proper type or skip
except Exception, msg:
warnings.warn('Bad val "%s" on line #%d\n\t"%s"\n\tin file \
"%s"\n\t%s' % (val, cnt, line, fname, msg))
verbose.set_level(ret['verbose.level'])
verbose.set_fileo(ret['verbose.fileo'])
for key, (val, line, cnt) in rc_temp.iteritems():
if key in defaultParams:
if fail_on_error:
ret[key] = val # try to convert to proper type or raise
else:
try: ret[key] = val # try to convert to proper type or skip
except Exception, msg:
warnings.warn('Bad val "%s" on line #%d\n\t"%s"\n\tin file \
"%s"\n\t%s' % (val, cnt, line, fname, msg))
else:
print >> sys.stderr, """
Bad key "%s" on line %d in
%s.
You probably need to get an updated matplotlibrc file from
http://matplotlib.sf.net/_static/matplotlibrc or from the matplotlib source
distribution""" % (key, cnt, fname)
if ret['datapath'] is None:
ret['datapath'] = get_data_path()
if not ret['text.latex.preamble'] == ['']:
verbose.report("""
*****************************************************************
You have the following UNSUPPORTED LaTeX preamble customizations:
%s
Please do not ask for support with these customizations active.
*****************************************************************
"""% '\n'.join(ret['text.latex.preamble']), 'helpful')
verbose.report('loaded rc file %s'%fname)
return ret
# this is the instance used by the matplotlib classes
rcParams = rc_params()
rcParamsDefault = RcParams([ (key, default) for key, (default, converter) in \
defaultParams.iteritems() ])
rcParams['ps.usedistiller'] = checkdep_ps_distiller(rcParams['ps.usedistiller'])
rcParams['text.usetex'] = checkdep_usetex(rcParams['text.usetex'])
def rc(group, **kwargs):
"""
Set the current rc params. Group is the grouping for the rc, eg.
for ``lines.linewidth`` the group is ``lines``, for
``axes.facecolor``, the group is ``axes``, and so on. Group may
also be a list or tuple of group names, eg. (*xtick*, *ytick*).
*kwargs* is a dictionary attribute name/value pairs, eg::
rc('lines', linewidth=2, color='r')
sets the current rc params and is equivalent to::
rcParams['lines.linewidth'] = 2
rcParams['lines.color'] = 'r'
The following aliases are available to save typing for interactive
users:
===== =================
Alias Property
===== =================
'lw' 'linewidth'
'ls' 'linestyle'
'c' 'color'
'fc' 'facecolor'
'ec' 'edgecolor'
'mew' 'markeredgewidth'
'aa' 'antialiased'
===== =================
Thus you could abbreviate the above rc command as::
rc('lines', lw=2, c='r')
Note you can use python's kwargs dictionary facility to store
dictionaries of default parameters. Eg, you can customize the
font rc as follows::
font = {'family' : 'monospace',
'weight' : 'bold',
'size' : 'larger'}
rc('font', **font) # pass in the font dict as kwargs
This enables you to easily switch between several configurations.
Use :func:`~matplotlib.pyplot.rcdefaults` to restore the default
rc params after changes.
"""
aliases = {
'lw' : 'linewidth',
'ls' : 'linestyle',
'c' : 'color',
'fc' : 'facecolor',
'ec' : 'edgecolor',
'mew' : 'markeredgewidth',
'aa' : 'antialiased',
}
if is_string_like(group):
group = (group,)
for g in group:
for k,v in kwargs.items():
name = aliases.get(k) or k
key = '%s.%s' % (g, name)
if key not in rcParams:
raise KeyError('Unrecognized key "%s" for group "%s" and name "%s"' %
(key, g, name))
rcParams[key] = v
def rcdefaults():
"""
Restore the default rc params - the ones that were created at
matplotlib load time.
"""
rcParams.update(rcParamsDefault)
if NEWCONFIG:
#print "importing from reorganized config system!"
try:
from config import rcParams, rcdefaults, mplConfig, save_config
verbose.set_level(rcParams['verbose.level'])
verbose.set_fileo(rcParams['verbose.fileo'])
except:
from config import rcParams, rcdefaults
_use_error_msg = """ This call to matplotlib.use() has no effect
because the the backend has already been chosen;
matplotlib.use() must be called *before* pylab, matplotlib.pyplot,
or matplotlib.backends is imported for the first time.
"""
def use(arg, warn=True):
"""
Set the matplotlib backend to one of the known backends.
The argument is case-insensitive. For the Cairo backend,
the argument can have an extension to indicate the type of
output. Example:
use('cairo.pdf')
will specify a default of pdf output generated by Cairo.
Note: this function must be called *before* importing pylab for
the first time; or, if you are not using pylab, it must be called
before importing matplotlib.backends. If warn is True, a warning
is issued if you try and callthis after pylab or pyplot have been
loaded. In certain black magic use cases, eg
pyplot.switch_backends, we are doing the reloading necessary to
make the backend switch work (in some cases, eg pure image
backends) so one can set warn=False to supporess the warnings
"""
if 'matplotlib.backends' in sys.modules:
if warn: warnings.warn(_use_error_msg)
return
arg = arg.lower()
if arg.startswith('module://'):
name = arg
else:
be_parts = arg.split('.')
name = validate_backend(be_parts[0])
rcParams['backend'] = name
if name == 'cairo' and len(be_parts) > 1:
rcParams['cairo.format'] = validate_cairo_format(be_parts[1])
def get_backend():
"Returns the current backend"
return rcParams['backend']
def interactive(b):
"""
Set interactive mode to boolean b.
If b is True, then draw after every plotting command, eg, after xlabel
"""
rcParams['interactive'] = b
def is_interactive():
'Return true if plot mode is interactive'
b = rcParams['interactive']
return b
def tk_window_focus():
"""Return true if focus maintenance under TkAgg on win32 is on.
This currently works only for python.exe and IPython.exe.
Both IDLE and Pythonwin.exe fail badly when tk_window_focus is on."""
if rcParams['backend'] != 'TkAgg':
return False
return rcParams['tk.window_focus']
# Now allow command line to override
# Allow command line access to the backend with -d (matlab compatible
# flag)
for s in sys.argv[1:]:
if s.startswith('-d') and len(s) > 2: # look for a -d flag
try:
use(s[2:])
except (KeyError, ValueError):
pass
# we don't want to assume all -d flags are backends, eg -debug
verbose.report('matplotlib version %s'%__version__)
verbose.report('verbose.level %s'%verbose.level)
verbose.report('interactive is %s'%rcParams['interactive'])
verbose.report('units is %s'%rcParams['units'])
verbose.report('platform is %s'%sys.platform)
verbose.report('loaded modules: %s'%sys.modules.keys(), 'debug')
| gpl-3.0 |
muku42/seaborn | seaborn/tests/test_linearmodels.py | 8 | 18843 | import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import pandas as pd
import nose.tools as nt
import numpy.testing as npt
import pandas.util.testing as pdt
from numpy.testing.decorators import skipif
from nose import SkipTest
try:
import statsmodels.regression.linear_model as smlm
_no_statsmodels = False
except ImportError:
_no_statsmodels = True
from . import PlotTestCase
from .. import linearmodels as lm
from .. import algorithms as algo
from .. import utils
from ..palettes import color_palette
rs = np.random.RandomState(0)
class TestLinearPlotter(PlotTestCase):
rs = np.random.RandomState(77)
df = pd.DataFrame(dict(x=rs.normal(size=60),
d=rs.randint(-2, 3, 60),
y=rs.gamma(4, size=60),
s=np.tile(list("abcdefghij"), 6)))
df["z"] = df.y + rs.randn(60)
df["y_na"] = df.y.copy()
df.y_na.ix[[10, 20, 30]] = np.nan
def test_establish_variables_from_frame(self):
p = lm._LinearPlotter()
p.establish_variables(self.df, x="x", y="y")
pdt.assert_series_equal(p.x, self.df.x)
pdt.assert_series_equal(p.y, self.df.y)
pdt.assert_frame_equal(p.data, self.df)
def test_establish_variables_from_series(self):
p = lm._LinearPlotter()
p.establish_variables(None, x=self.df.x, y=self.df.y)
pdt.assert_series_equal(p.x, self.df.x)
pdt.assert_series_equal(p.y, self.df.y)
nt.assert_is(p.data, None)
def test_establish_variables_from_array(self):
p = lm._LinearPlotter()
p.establish_variables(None,
x=self.df.x.values,
y=self.df.y.values)
npt.assert_array_equal(p.x, self.df.x)
npt.assert_array_equal(p.y, self.df.y)
nt.assert_is(p.data, None)
def test_establish_variables_from_mix(self):
p = lm._LinearPlotter()
p.establish_variables(self.df, x="x", y=self.df.y)
pdt.assert_series_equal(p.x, self.df.x)
pdt.assert_series_equal(p.y, self.df.y)
pdt.assert_frame_equal(p.data, self.df)
def test_establish_variables_from_bad(self):
p = lm._LinearPlotter()
with nt.assert_raises(ValueError):
p.establish_variables(None, x="x", y=self.df.y)
def test_dropna(self):
p = lm._LinearPlotter()
p.establish_variables(self.df, x="x", y_na="y_na")
pdt.assert_series_equal(p.x, self.df.x)
pdt.assert_series_equal(p.y_na, self.df.y_na)
p.dropna("x", "y_na")
mask = self.df.y_na.notnull()
pdt.assert_series_equal(p.x, self.df.x[mask])
pdt.assert_series_equal(p.y_na, self.df.y_na[mask])
class TestRegressionPlotter(PlotTestCase):
rs = np.random.RandomState(49)
grid = np.linspace(-3, 3, 30)
n_boot = 100
bins_numeric = 3
bins_given = [-1, 0, 1]
df = pd.DataFrame(dict(x=rs.normal(size=60),
d=rs.randint(-2, 3, 60),
y=rs.gamma(4, size=60),
s=np.tile(list(range(6)), 10)))
df["z"] = df.y + rs.randn(60)
df["y_na"] = df.y.copy()
bw_err = rs.randn(6)[df.s.values] * 2
df.y += bw_err
p = 1 / (1 + np.exp(-(df.x * 2 + rs.randn(60))))
df["c"] = [rs.binomial(1, p_i) for p_i in p]
df.y_na.ix[[10, 20, 30]] = np.nan
def test_variables_from_frame(self):
p = lm._RegressionPlotter("x", "y", data=self.df, units="s")
pdt.assert_series_equal(p.x, self.df.x)
pdt.assert_series_equal(p.y, self.df.y)
pdt.assert_series_equal(p.units, self.df.s)
pdt.assert_frame_equal(p.data, self.df)
def test_variables_from_series(self):
p = lm._RegressionPlotter(self.df.x, self.df.y, units=self.df.s)
npt.assert_array_equal(p.x, self.df.x)
npt.assert_array_equal(p.y, self.df.y)
npt.assert_array_equal(p.units, self.df.s)
nt.assert_is(p.data, None)
def test_variables_from_mix(self):
p = lm._RegressionPlotter("x", self.df.y + 1, data=self.df)
npt.assert_array_equal(p.x, self.df.x)
npt.assert_array_equal(p.y, self.df.y + 1)
pdt.assert_frame_equal(p.data, self.df)
def test_dropna(self):
p = lm._RegressionPlotter("x", "y_na", data=self.df)
nt.assert_equal(len(p.x), pd.notnull(self.df.y_na).sum())
p = lm._RegressionPlotter("x", "y_na", data=self.df, dropna=False)
nt.assert_equal(len(p.x), len(self.df.y_na))
def test_ci(self):
p = lm._RegressionPlotter("x", "y", data=self.df, ci=95)
nt.assert_equal(p.ci, 95)
nt.assert_equal(p.x_ci, 95)
p = lm._RegressionPlotter("x", "y", data=self.df, ci=95, x_ci=68)
nt.assert_equal(p.ci, 95)
nt.assert_equal(p.x_ci, 68)
@skipif(_no_statsmodels)
def test_fast_regression(self):
p = lm._RegressionPlotter("x", "y", data=self.df, n_boot=self.n_boot)
# Fit with the "fast" function, which just does linear algebra
yhat_fast, _ = p.fit_fast(self.grid)
# Fit using the statsmodels function with an OLS model
yhat_smod, _ = p.fit_statsmodels(self.grid, smlm.OLS)
# Compare the vector of y_hat values
npt.assert_array_almost_equal(yhat_fast, yhat_smod)
@skipif(_no_statsmodels)
def test_regress_poly(self):
p = lm._RegressionPlotter("x", "y", data=self.df, n_boot=self.n_boot)
# Fit an first-order polynomial
yhat_poly, _ = p.fit_poly(self.grid, 1)
# Fit using the statsmodels function with an OLS model
yhat_smod, _ = p.fit_statsmodels(self.grid, smlm.OLS)
# Compare the vector of y_hat values
npt.assert_array_almost_equal(yhat_poly, yhat_smod)
def test_regress_logx(self):
x = np.arange(1, 10)
y = np.arange(1, 10)
grid = np.linspace(1, 10, 100)
p = lm._RegressionPlotter(x, y, n_boot=self.n_boot)
yhat_lin, _ = p.fit_fast(grid)
yhat_log, _ = p.fit_logx(grid)
nt.assert_greater(yhat_lin[0], yhat_log[0])
nt.assert_greater(yhat_log[20], yhat_lin[20])
nt.assert_greater(yhat_lin[90], yhat_log[90])
@skipif(_no_statsmodels)
def test_regress_n_boot(self):
p = lm._RegressionPlotter("x", "y", data=self.df, n_boot=self.n_boot)
# Fast (linear algebra) version
_, boots_fast = p.fit_fast(self.grid)
npt.assert_equal(boots_fast.shape, (self.n_boot, self.grid.size))
# Slower (np.polyfit) version
_, boots_poly = p.fit_poly(self.grid, 1)
npt.assert_equal(boots_poly.shape, (self.n_boot, self.grid.size))
# Slowest (statsmodels) version
_, boots_smod = p.fit_statsmodels(self.grid, smlm.OLS)
npt.assert_equal(boots_smod.shape, (self.n_boot, self.grid.size))
@skipif(_no_statsmodels)
def test_regress_without_bootstrap(self):
p = lm._RegressionPlotter("x", "y", data=self.df,
n_boot=self.n_boot, ci=None)
# Fast (linear algebra) version
_, boots_fast = p.fit_fast(self.grid)
nt.assert_is(boots_fast, None)
# Slower (np.polyfit) version
_, boots_poly = p.fit_poly(self.grid, 1)
nt.assert_is(boots_poly, None)
# Slowest (statsmodels) version
_, boots_smod = p.fit_statsmodels(self.grid, smlm.OLS)
nt.assert_is(boots_smod, None)
def test_numeric_bins(self):
p = lm._RegressionPlotter(self.df.x, self.df.y)
x_binned, bins = p.bin_predictor(self.bins_numeric)
npt.assert_equal(len(bins), self.bins_numeric)
npt.assert_array_equal(np.unique(x_binned), bins)
def test_provided_bins(self):
p = lm._RegressionPlotter(self.df.x, self.df.y)
x_binned, bins = p.bin_predictor(self.bins_given)
npt.assert_array_equal(np.unique(x_binned), self.bins_given)
def test_bin_results(self):
p = lm._RegressionPlotter(self.df.x, self.df.y)
x_binned, bins = p.bin_predictor(self.bins_given)
nt.assert_greater(self.df.x[x_binned == 0].min(),
self.df.x[x_binned == -1].max())
nt.assert_greater(self.df.x[x_binned == 1].min(),
self.df.x[x_binned == 0].max())
def test_scatter_data(self):
p = lm._RegressionPlotter(self.df.x, self.df.y)
x, y = p.scatter_data
npt.assert_array_equal(x, self.df.x)
npt.assert_array_equal(y, self.df.y)
p = lm._RegressionPlotter(self.df.d, self.df.y)
x, y = p.scatter_data
npt.assert_array_equal(x, self.df.d)
npt.assert_array_equal(y, self.df.y)
p = lm._RegressionPlotter(self.df.d, self.df.y, x_jitter=.1)
x, y = p.scatter_data
nt.assert_true((x != self.df.d).any())
npt.assert_array_less(np.abs(self.df.d - x), np.repeat(.1, len(x)))
npt.assert_array_equal(y, self.df.y)
p = lm._RegressionPlotter(self.df.d, self.df.y, y_jitter=.05)
x, y = p.scatter_data
npt.assert_array_equal(x, self.df.d)
npt.assert_array_less(np.abs(self.df.y - y), np.repeat(.1, len(y)))
def test_estimate_data(self):
p = lm._RegressionPlotter(self.df.d, self.df.y, x_estimator=np.mean)
x, y, ci = p.estimate_data
npt.assert_array_equal(x, np.sort(np.unique(self.df.d)))
npt.assert_array_almost_equal(y, self.df.groupby("d").y.mean())
npt.assert_array_less(np.array(ci)[:, 0], y)
npt.assert_array_less(y, np.array(ci)[:, 1])
def test_estimate_cis(self):
# set known good seed to avoid the test stochastically failing
np.random.seed(123)
p = lm._RegressionPlotter(self.df.d, self.df.y,
x_estimator=np.mean, ci=95)
_, _, ci_big = p.estimate_data
p = lm._RegressionPlotter(self.df.d, self.df.y,
x_estimator=np.mean, ci=50)
_, _, ci_wee = p.estimate_data
npt.assert_array_less(np.diff(ci_wee), np.diff(ci_big))
p = lm._RegressionPlotter(self.df.d, self.df.y,
x_estimator=np.mean, ci=None)
_, _, ci_nil = p.estimate_data
npt.assert_array_equal(ci_nil, [None] * len(ci_nil))
def test_estimate_units(self):
# Seed the RNG locally
np.random.seed(345)
p = lm._RegressionPlotter("x", "y", data=self.df,
units="s", x_bins=3)
_, _, ci_big = p.estimate_data
ci_big = np.diff(ci_big, axis=1)
p = lm._RegressionPlotter("x", "y", data=self.df, x_bins=3)
_, _, ci_wee = p.estimate_data
ci_wee = np.diff(ci_wee, axis=1)
npt.assert_array_less(ci_wee, ci_big)
def test_partial(self):
x = self.rs.randn(100)
y = x + self.rs.randn(100)
z = x + self.rs.randn(100)
p = lm._RegressionPlotter(y, z)
_, r_orig = np.corrcoef(p.x, p.y)[0]
p = lm._RegressionPlotter(y, z, y_partial=x)
_, r_semipartial = np.corrcoef(p.x, p.y)[0]
nt.assert_less(r_semipartial, r_orig)
p = lm._RegressionPlotter(y, z, x_partial=x, y_partial=x)
_, r_partial = np.corrcoef(p.x, p.y)[0]
nt.assert_less(r_partial, r_orig)
@skipif(_no_statsmodels)
def test_logistic_regression(self):
p = lm._RegressionPlotter("x", "c", data=self.df,
logistic=True, n_boot=self.n_boot)
_, yhat, _ = p.fit_regression(x_range=(-3, 3))
npt.assert_array_less(yhat, 1)
npt.assert_array_less(0, yhat)
@skipif(_no_statsmodels)
def test_robust_regression(self):
p_ols = lm._RegressionPlotter("x", "y", data=self.df,
n_boot=self.n_boot)
_, ols_yhat, _ = p_ols.fit_regression(x_range=(-3, 3))
p_robust = lm._RegressionPlotter("x", "y", data=self.df,
robust=True, n_boot=self.n_boot)
_, robust_yhat, _ = p_robust.fit_regression(x_range=(-3, 3))
nt.assert_equal(len(ols_yhat), len(robust_yhat))
@skipif(_no_statsmodels)
def test_lowess_regression(self):
p = lm._RegressionPlotter("x", "y", data=self.df, lowess=True)
grid, yhat, err_bands = p.fit_regression(x_range=(-3, 3))
nt.assert_equal(len(grid), len(yhat))
nt.assert_is(err_bands, None)
def test_regression_options(self):
with nt.assert_raises(ValueError):
lm._RegressionPlotter("x", "y", data=self.df,
lowess=True, order=2)
with nt.assert_raises(ValueError):
lm._RegressionPlotter("x", "y", data=self.df,
lowess=True, logistic=True)
def test_regression_limits(self):
f, ax = plt.subplots()
ax.scatter(self.df.x, self.df.y)
p = lm._RegressionPlotter("x", "y", data=self.df)
grid, _, _ = p.fit_regression(ax)
xlim = ax.get_xlim()
nt.assert_equal(grid.min(), xlim[0])
nt.assert_equal(grid.max(), xlim[1])
p = lm._RegressionPlotter("x", "y", data=self.df, truncate=True)
grid, _, _ = p.fit_regression()
nt.assert_equal(grid.min(), self.df.x.min())
nt.assert_equal(grid.max(), self.df.x.max())
class TestRegressionPlots(PlotTestCase):
rs = np.random.RandomState(56)
df = pd.DataFrame(dict(x=rs.randn(90),
y=rs.randn(90) + 5,
z=rs.randint(0, 1, 90),
g=np.repeat(list("abc"), 30),
h=np.tile(list("xy"), 45),
u=np.tile(np.arange(6), 15)))
bw_err = rs.randn(6)[df.u.values]
df.y += bw_err
def test_regplot_basic(self):
f, ax = plt.subplots()
lm.regplot("x", "y", self.df)
nt.assert_equal(len(ax.lines), 1)
nt.assert_equal(len(ax.collections), 2)
x, y = ax.collections[0].get_offsets().T
npt.assert_array_equal(x, self.df.x)
npt.assert_array_equal(y, self.df.y)
def test_regplot_selective(self):
f, ax = plt.subplots()
ax = lm.regplot("x", "y", self.df, scatter=False, ax=ax)
nt.assert_equal(len(ax.lines), 1)
nt.assert_equal(len(ax.collections), 1)
ax.clear()
f, ax = plt.subplots()
ax = lm.regplot("x", "y", self.df, fit_reg=False)
nt.assert_equal(len(ax.lines), 0)
nt.assert_equal(len(ax.collections), 1)
ax.clear()
f, ax = plt.subplots()
ax = lm.regplot("x", "y", self.df, ci=None)
nt.assert_equal(len(ax.lines), 1)
nt.assert_equal(len(ax.collections), 1)
ax.clear()
def test_regplot_scatter_kws_alpha(self):
f, ax = plt.subplots()
color = np.array([[0.3, 0.8, 0.5, 0.5]])
ax = lm.regplot("x", "y", self.df, scatter_kws={'color': color})
nt.assert_is(ax.collections[0]._alpha, None)
nt.assert_equal(ax.collections[0]._facecolors[0, 3], 0.5)
f, ax = plt.subplots()
color = np.array([[0.3, 0.8, 0.5]])
ax = lm.regplot("x", "y", self.df, scatter_kws={'color': color})
nt.assert_equal(ax.collections[0]._alpha, 0.8)
f, ax = plt.subplots()
color = np.array([[0.3, 0.8, 0.5]])
ax = lm.regplot("x", "y", self.df, scatter_kws={'color': color,
'alpha': 0.4})
nt.assert_equal(ax.collections[0]._alpha, 0.4)
f, ax = plt.subplots()
color = 'r'
ax = lm.regplot("x", "y", self.df, scatter_kws={'color': color})
nt.assert_equal(ax.collections[0]._alpha, 0.8)
def test_regplot_binned(self):
ax = lm.regplot("x", "y", self.df, x_bins=5)
nt.assert_equal(len(ax.lines), 6)
nt.assert_equal(len(ax.collections), 2)
def test_lmplot_basic(self):
g = lm.lmplot("x", "y", self.df)
ax = g.axes[0, 0]
nt.assert_equal(len(ax.lines), 1)
nt.assert_equal(len(ax.collections), 2)
x, y = ax.collections[0].get_offsets().T
npt.assert_array_equal(x, self.df.x)
npt.assert_array_equal(y, self.df.y)
def test_lmplot_hue(self):
g = lm.lmplot("x", "y", data=self.df, hue="h")
ax = g.axes[0, 0]
nt.assert_equal(len(ax.lines), 2)
nt.assert_equal(len(ax.collections), 4)
def test_lmplot_markers(self):
g1 = lm.lmplot("x", "y", data=self.df, hue="h", markers="s")
nt.assert_equal(g1.hue_kws, {"marker": ["s", "s"]})
g2 = lm.lmplot("x", "y", data=self.df, hue="h", markers=["o", "s"])
nt.assert_equal(g2.hue_kws, {"marker": ["o", "s"]})
with nt.assert_raises(ValueError):
lm.lmplot("x", "y", data=self.df, hue="h", markers=["o", "s", "d"])
def test_lmplot_marker_linewidths(self):
if mpl.__version__ == "1.4.2":
raise SkipTest
g = lm.lmplot("x", "y", data=self.df, hue="h",
fit_reg=False, markers=["o", "+"])
c = g.axes[0, 0].collections
nt.assert_equal(c[0].get_linewidths()[0], 0)
rclw = mpl.rcParams["lines.linewidth"]
nt.assert_equal(c[1].get_linewidths()[0], rclw)
def test_lmplot_facets(self):
g = lm.lmplot("x", "y", data=self.df, row="g", col="h")
nt.assert_equal(g.axes.shape, (3, 2))
g = lm.lmplot("x", "y", data=self.df, col="u", col_wrap=4)
nt.assert_equal(g.axes.shape, (6,))
g = lm.lmplot("x", "y", data=self.df, hue="h", col="u")
nt.assert_equal(g.axes.shape, (1, 6))
def test_lmplot_hue_col_nolegend(self):
g = lm.lmplot("x", "y", data=self.df, col="h", hue="h")
nt.assert_is(g._legend, None)
def test_lmplot_scatter_kws(self):
g = lm.lmplot("x", "y", hue="h", data=self.df, ci=None)
red_scatter, blue_scatter = g.axes[0, 0].collections
red, blue = color_palette(n_colors=2)
npt.assert_array_equal(red, red_scatter.get_facecolors()[0, :3])
npt.assert_array_equal(blue, blue_scatter.get_facecolors()[0, :3])
def test_residplot(self):
x, y = self.df.x, self.df.y
ax = lm.residplot(x, y)
resid = y - np.polyval(np.polyfit(x, y, 1), x)
x_plot, y_plot = ax.collections[0].get_offsets().T
npt.assert_array_equal(x, x_plot)
npt.assert_array_almost_equal(resid, y_plot)
@skipif(_no_statsmodels)
def test_residplot_lowess(self):
ax = lm.residplot("x", "y", self.df, lowess=True)
nt.assert_equal(len(ax.lines), 2)
x, y = ax.lines[1].get_xydata().T
npt.assert_array_equal(x, np.sort(self.df.x))
| bsd-3-clause |
michaelaye/vispy | vispy/testing/__init__.py | 21 | 2415 | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
"""
Testing
=======
This module provides functions useful for running tests in vispy.
Tests can be run in a few ways:
* From Python, you can import ``vispy`` and do ``vispy.test()``.
* From the source root, you can do ``make test`` which wraps to
a call to ``python make test``.
There are various diffrent testing "modes", including:
* "full": run all tests.
* any backend name (e.g., "glfw"): run application/GL tests using a
specific backend.
* "nobackend": run tests that do not require a backend.
* "examples": run repo examples to check for errors and warnings.
* "flake": check style errors.
Examples get automatically tested unless they have a special comment toward
the top ``# vispy: testskip``. Examples that should be tested should be
formatted so that 1) a ``Canvas`` class is defined, or a ``canvas`` class
is instantiated; and 2) the ``app.run()`` call is protected by a check
if ``__name__ == "__main__"``. This makes it so that the event loop is not
started when running examples in the test suite -- the test suite instead
manually updates the canvas (using ``app.process_events()``) for under one
second to ensure that things like timer events are processed.
For examples on how to test various bits of functionality (e.g., application
functionality, or drawing things with OpenGL), it's best to look at existing
examples in the test suite.
The code base gets automatically tested by Travis-CI (Linux) and AppVeyor
(Windows) on Python 2.6, 2.7, 3.4. There are multiple testing modes that
use e.g. full dependencies, minimal dependencies, etc. See ``.travis.yml``
to determine what automatic tests are run.
"""
from ._testing import (SkipTest, requires_application, requires_ipython, # noqa
requires_img_lib, # noqa
has_backend, requires_pyopengl, # noqa
requires_scipy, has_matplotlib, # noqa
save_testing_image, TestingCanvas, has_pyopengl, # noqa
run_tests_if_main,
assert_is, assert_in, assert_not_in, assert_equal,
assert_not_equal, assert_raises, assert_true, # noqa
raises) # noqa
from ._runners import test # noqa
| bsd-3-clause |
hsiaoyi0504/scikit-learn | examples/linear_model/plot_sgd_penalties.py | 249 | 1563 | """
==============
SGD: Penalties
==============
Plot the contours of the three penalties.
All of the above are supported by
:class:`sklearn.linear_model.stochastic_gradient`.
"""
from __future__ import division
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
def l1(xs):
return np.array([np.sqrt((1 - np.sqrt(x ** 2.0)) ** 2.0) for x in xs])
def l2(xs):
return np.array([np.sqrt(1.0 - x ** 2.0) for x in xs])
def el(xs, z):
return np.array([(2 - 2 * x - 2 * z + 4 * x * z -
(4 * z ** 2
- 8 * x * z ** 2
+ 8 * x ** 2 * z ** 2
- 16 * x ** 2 * z ** 3
+ 8 * x * z ** 3 + 4 * x ** 2 * z ** 4) ** (1. / 2)
- 2 * x * z ** 2) / (2 - 4 * z) for x in xs])
def cross(ext):
plt.plot([-ext, ext], [0, 0], "k-")
plt.plot([0, 0], [-ext, ext], "k-")
xs = np.linspace(0, 1, 100)
alpha = 0.501 # 0.5 division throuh zero
cross(1.2)
plt.plot(xs, l1(xs), "r-", label="L1")
plt.plot(xs, -1.0 * l1(xs), "r-")
plt.plot(-1 * xs, l1(xs), "r-")
plt.plot(-1 * xs, -1.0 * l1(xs), "r-")
plt.plot(xs, l2(xs), "b-", label="L2")
plt.plot(xs, -1.0 * l2(xs), "b-")
plt.plot(-1 * xs, l2(xs), "b-")
plt.plot(-1 * xs, -1.0 * l2(xs), "b-")
plt.plot(xs, el(xs, alpha), "y-", label="Elastic Net")
plt.plot(xs, -1.0 * el(xs, alpha), "y-")
plt.plot(-1 * xs, el(xs, alpha), "y-")
plt.plot(-1 * xs, -1.0 * el(xs, alpha), "y-")
plt.xlabel(r"$w_0$")
plt.ylabel(r"$w_1$")
plt.legend()
plt.axis("equal")
plt.show()
| bsd-3-clause |
keir-rex/zipline | zipline/history/history_container.py | 18 | 33931 | #
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from bisect import insort_left
from collections import namedtuple
from itertools import groupby, product
import logbook
import numpy as np
import pandas as pd
from six import itervalues, iteritems, iterkeys
from . history import HistorySpec
from zipline.finance.trading import with_environment
from zipline.utils.data import RollingPanel, _ensure_index
from zipline.utils.munge import ffill, bfill
logger = logbook.Logger('History Container')
# The closing price is referred to by multiple names,
# allow both for price rollover logic etc.
CLOSING_PRICE_FIELDS = frozenset({'price', 'close_price'})
def ffill_buffer_from_prior_values(freq,
field,
buffer_frame,
digest_frame,
pv_frame,
raw=False):
"""
Forward-fill a buffer frame, falling back to the end-of-period values of a
digest frame if the buffer frame has leading NaNs.
"""
# convert to ndarray if necessary
digest_values = digest_frame
if raw and isinstance(digest_frame, pd.DataFrame):
digest_values = digest_frame.values
buffer_values = buffer_frame
if raw and isinstance(buffer_frame, pd.DataFrame):
buffer_values = buffer_frame.values
nan_sids = pd.isnull(buffer_values[0])
if np.any(nan_sids) and len(digest_values):
# If we have any leading nans in the buffer and we have a non-empty
# digest frame, use the oldest digest values as the initial buffer
# values.
buffer_values[0, nan_sids] = digest_values[-1, nan_sids]
nan_sids = pd.isnull(buffer_values[0])
if np.any(nan_sids):
# If we still have leading nans, fall back to the last known values
# from before the digest.
key_loc = pv_frame.index.get_loc((freq.freq_str, field))
filler = pv_frame.values[key_loc, nan_sids]
buffer_values[0, nan_sids] = filler
if raw:
filled = ffill(buffer_values)
return filled
return buffer_frame.ffill()
def ffill_digest_frame_from_prior_values(freq,
field,
digest_frame,
pv_frame,
raw=False):
"""
Forward-fill a digest frame, falling back to the last known prior values if
necessary.
"""
# convert to ndarray if necessary
values = digest_frame
if raw and isinstance(digest_frame, pd.DataFrame):
values = digest_frame.values
nan_sids = pd.isnull(values[0])
if np.any(nan_sids):
# If we have any leading nans in the frame, use values from pv_frame to
# seed values for those sids.
key_loc = pv_frame.index.get_loc((freq.freq_str, field))
filler = pv_frame.values[key_loc, nan_sids]
values[0, nan_sids] = filler
if raw:
filled = ffill(values)
return filled
return digest_frame.ffill()
def freq_str_and_bar_count(history_spec):
"""
Helper for getting the frequency string and bar count from a history spec.
"""
return (history_spec.frequency.freq_str, history_spec.bar_count)
@with_environment()
def next_bar(spec, env):
"""
Returns a function that will return the next bar for a given datetime.
"""
if spec.frequency.unit_str == 'd':
if spec.frequency.data_frequency == 'minute':
return lambda dt: env.get_open_and_close(
env.next_trading_day(dt),
)[1]
else:
return env.next_trading_day
else:
return env.next_market_minute
def compute_largest_specs(history_specs):
"""
Maps a Frequency to the largest HistorySpec at that frequency from an
iterable of HistorySpecs.
"""
return {key: max(group, key=lambda f: f.bar_count)
for key, group in groupby(
sorted(history_specs, key=freq_str_and_bar_count),
key=lambda spec: spec.frequency)}
# tuples to store a change to the shape of a HistoryContainer
FrequencyDelta = namedtuple(
'FrequencyDelta',
['freq', 'buffer_delta'],
)
LengthDelta = namedtuple(
'LengthDelta',
['freq', 'delta'],
)
HistoryContainerDeltaSuper = namedtuple(
'HistoryContainerDelta',
['field', 'frequency_delta', 'length_delta'],
)
class HistoryContainerDelta(HistoryContainerDeltaSuper):
"""
A class representing a resize of the history container.
"""
def __new__(cls, field=None, frequency_delta=None, length_delta=None):
"""
field is a new field that was added.
frequency is a FrequencyDelta representing a new frequency was added.
length is a bar LengthDelta which is a frequency and a bar_count.
If any field is None, then no change occurred of that type.
"""
return super(HistoryContainerDelta, cls).__new__(
cls, field, frequency_delta, length_delta,
)
@property
def empty(self):
"""
Checks if the delta is empty.
"""
return (self.field is None and
self.frequency_delta is None and
self.length_delta is None)
def normalize_to_data_freq(data_frequency, dt):
if data_frequency == 'minute':
return dt
return pd.tslib.normalize_date(dt)
class HistoryContainer(object):
"""
Container for all history panels and frames used by an algoscript.
To be used internally by TradingAlgorithm, but *not* passed directly to the
algorithm.
Entry point for the algoscript is the result of `get_history`.
"""
VALID_FIELDS = {
'price', 'open_price', 'volume', 'high', 'low', 'close_price',
}
def __init__(self,
history_specs,
initial_sids,
initial_dt,
data_frequency,
bar_data=None):
"""
A container to hold a rolling window of historical data within a user's
algorithm.
Args:
history_specs (dict[Frequency:HistorySpec]): The starting history
specs that this container should be able to service.
initial_sids (set[Asset or Int]): The starting sids to watch.
initial_dt (datetime): The datetime to start collecting history from.
bar_data (BarData): If this container is being constructed during
handle_data, this is the BarData for the current bar to fill the
buffer with. If this is constructed elsewhere, it is None.
Returns:
An instance of a new HistoryContainer
"""
# History specs to be served by this container.
self.history_specs = history_specs
self.largest_specs = compute_largest_specs(
itervalues(self.history_specs)
)
# The set of fields specified by all history specs
self.fields = pd.Index(
sorted(set(spec.field for spec in itervalues(history_specs)))
)
self.sids = pd.Index(
sorted(set(initial_sids or []))
)
self.data_frequency = data_frequency
initial_dt = normalize_to_data_freq(self.data_frequency, initial_dt)
# This panel contains raw minutes for periods that haven't been fully
# completed. When a frequency period rolls over, these minutes are
# digested using some sort of aggregation call on the panel (e.g. `sum`
# for volume, `max` for high, `min` for low, etc.).
self.buffer_panel = self.create_buffer_panel(initial_dt, bar_data)
# Dictionaries with Frequency objects as keys.
self.digest_panels, self.cur_window_starts, self.cur_window_closes = \
self.create_digest_panels(initial_sids, initial_dt)
# Helps prop up the prior day panel against having a nan, when the data
# has been seen.
self.last_known_prior_values = pd.DataFrame(
data=None,
index=self.prior_values_index,
columns=self.prior_values_columns,
# Note: For bizarre "intricacies of the spaghetti that is pandas
# indexing logic" reasons, setting this dtype prevents indexing
# errors in update_last_known_values. This is safe for the time
# being because our only forward-fillable fields are floats. If we
# need to add a non-float-typed forward-fillable field, then we may
# find ourselves having to track down and fix a pandas bug.
dtype=np.float64,
)
_ffillable_fields = None
@property
def ffillable_fields(self):
if self._ffillable_fields is None:
fillables = self.fields.intersection(HistorySpec.FORWARD_FILLABLE)
self._ffillable_fields = fillables
return self._ffillable_fields
@property
def prior_values_index(self):
index_values = list(
product(
(freq.freq_str for freq in self.unique_frequencies),
# Only store prior values for forward-fillable fields.
self.ffillable_fields,
)
)
if index_values:
return pd.MultiIndex.from_tuples(index_values)
else:
# MultiIndex doesn't gracefully support empty input, so we return
# an empty regular Index if we have values.
return pd.Index(index_values)
@property
def prior_values_columns(self):
return self.sids
@property
def all_panels(self):
yield self.buffer_panel
for panel in self.digest_panels.values():
yield panel
@property
def unique_frequencies(self):
"""
Return an iterator over all the unique frequencies serviced by this
container.
"""
return iterkeys(self.largest_specs)
@with_environment()
def _add_frequency(self, spec, dt, data, env=None):
"""
Adds a new frequency to the container. This reshapes the buffer_panel
if needed.
"""
freq = spec.frequency
self.largest_specs[freq] = spec
new_buffer_len = 0
if freq.max_bars > self.buffer_panel.window_length:
# More bars need to be held in the buffer_panel to support this
# freq
if freq.data_frequency \
!= self.buffer_spec.frequency.data_frequency:
# If the data_frequencies are not the same, then we need to
# create a fresh buffer.
self.buffer_panel = self.create_buffer_panel(
dt, bar_data=data,
)
new_buffer_len = None
else:
# The frequencies are the same, we just need to add more bars.
self._resize_panel(
self.buffer_panel,
freq.max_bars,
dt,
self.buffer_spec.frequency,
)
new_buffer_len = freq.max_minutes
# update the current buffer_spec to reflect the new lenght.
self.buffer_spec.bar_count = new_buffer_len + 1
if spec.bar_count > 1:
# This spec has more than one bar, construct a digest panel for it.
self.digest_panels[freq] = self._create_digest_panel(
dt, spec=spec, env=env,
)
else:
self.cur_window_starts[freq] = dt
self.cur_window_closes[freq] = freq.window_close(
self.cur_window_starts[freq]
)
self.last_known_prior_values = self.last_known_prior_values.reindex(
index=self.prior_values_index,
)
return FrequencyDelta(freq, new_buffer_len)
def _add_field(self, field):
"""
Adds a new field to the container.
"""
# self.fields is already sorted, so we just need to insert the new
# field in the correct index.
ls = list(self.fields)
insort_left(ls, field)
self.fields = pd.Index(ls)
# unset fillable fields cache
self._ffillable_fields = None
self._realign_fields()
self.last_known_prior_values = self.last_known_prior_values.reindex(
index=self.prior_values_index,
)
return field
@with_environment()
def _add_length(self, spec, dt, env=None):
"""
Increases the length of the digest panel for spec.frequency. If this
does not have a panel, and one is needed; a digest panel will be
constructed.
"""
old_count = self.largest_specs[spec.frequency].bar_count
self.largest_specs[spec.frequency] = spec
delta = spec.bar_count - old_count
panel = self.digest_panels.get(spec.frequency)
if panel is None:
# The old length for this frequency was 1 bar, meaning no digest
# panel was held. We must construct a new one here.
panel = self._create_digest_panel(
dt, spec=spec, env=env,
)
else:
self._resize_panel(
panel, spec.bar_count - 1, dt, freq=spec.frequency, env=env,
)
self.digest_panels[spec.frequency] = panel
return LengthDelta(spec.frequency, delta)
@with_environment()
def _resize_panel(self, panel, size, dt, freq, env=None):
"""
Resizes a panel, fills the date_buf with the correct values.
"""
# This is the oldest datetime that will be shown in the current window
# of the panel.
oldest_dt = pd.Timestamp(panel.start_date, tz='utc',)
delta = size - panel.window_length
# Construct the missing dates.
missing_dts = self._create_window_date_buf(
delta, freq.unit_str, freq.data_frequency, oldest_dt,
)
panel.extend_back(missing_dts)
@with_environment()
def _create_window_date_buf(self,
window,
unit_str,
data_frequency,
dt,
env=None):
"""
Creates a window length date_buf looking backwards from dt.
"""
if unit_str == 'd':
# Get the properly key'd datetime64 out of the pandas Timestamp
if data_frequency != 'daily':
arr = env.open_close_window(
dt,
window,
offset=-window,
).market_close.astype('datetime64[ns]').values
else:
arr = env.open_close_window(
dt,
window,
offset=-window,
).index.values
return arr
else:
return env.market_minute_window(
env.previous_market_minute(dt),
window,
step=-1,
)[::-1].values
@with_environment()
def _create_panel(self, dt, spec, env=None):
"""
Constructs a rolling panel with a properly aligned date_buf.
"""
dt = normalize_to_data_freq(spec.frequency.data_frequency, dt)
window = spec.bar_count - 1
date_buf = self._create_window_date_buf(
window,
spec.frequency.unit_str,
spec.frequency.data_frequency,
dt,
env=env,
)
panel = RollingPanel(
window=window,
items=self.fields,
sids=self.sids,
initial_dates=date_buf,
)
return panel
@with_environment()
def _create_digest_panel(self,
dt,
spec,
window_starts=None,
window_closes=None,
env=None):
"""
Creates a digest panel, setting the window_starts and window_closes.
If window_starts or window_closes are None, then self.cur_window_starts
or self.cur_window_closes will be used.
"""
freq = spec.frequency
window_starts = window_starts if window_starts is not None \
else self.cur_window_starts
window_closes = window_closes if window_closes is not None \
else self.cur_window_closes
window_starts[freq] = freq.normalize(dt)
window_closes[freq] = freq.window_close(window_starts[freq])
return self._create_panel(dt, spec, env=env)
def ensure_spec(self, spec, dt, bar_data):
"""
Ensure that this container has enough space to hold the data for the
given spec. This returns a HistoryContainerDelta to represent the
changes in shape that the container made to support the new
HistorySpec.
"""
updated = {}
if spec.field not in self.fields:
updated['field'] = self._add_field(spec.field)
if spec.frequency not in self.largest_specs:
updated['frequency_delta'] = self._add_frequency(
spec, dt, bar_data,
)
if spec.bar_count > self.largest_specs[spec.frequency].bar_count:
updated['length_delta'] = self._add_length(spec, dt)
return HistoryContainerDelta(**updated)
def add_sids(self, to_add):
"""
Add new sids to the container.
"""
self.sids = pd.Index(
sorted(self.sids.union(_ensure_index(to_add))),
)
self._realign_sids()
def drop_sids(self, to_drop):
"""
Remove sids from the container.
"""
self.sids = pd.Index(
sorted(self.sids.difference(_ensure_index(to_drop))),
)
self._realign_sids()
def _realign_sids(self):
"""
Realign our constituent panels after adding or removing sids.
"""
self.last_known_prior_values = self.last_known_prior_values.reindex(
columns=self.sids,
)
for panel in self.all_panels:
panel.set_minor_axis(self.sids)
def _realign_fields(self):
self.last_known_prior_values = self.last_known_prior_values.reindex(
index=self.prior_values_index,
)
for panel in self.all_panels:
panel.set_items(self.fields)
@with_environment()
def create_digest_panels(self,
initial_sids,
initial_dt,
env=None):
"""
Initialize a RollingPanel for each unique panel frequency being stored
by this container. Each RollingPanel pre-allocates enough storage
space to service the highest bar-count of any history call that it
serves.
"""
# Map from frequency -> first/last minute of the next digest to be
# rolled for that frequency.
first_window_starts = {}
first_window_closes = {}
# Map from frequency -> digest_panels.
panels = {}
for freq, largest_spec in iteritems(self.largest_specs):
if largest_spec.bar_count == 1:
# No need to allocate a digest panel; this frequency will only
# ever use data drawn from self.buffer_panel.
first_window_starts[freq] = freq.normalize(initial_dt)
first_window_closes[freq] = freq.window_close(
first_window_starts[freq]
)
continue
dt = initial_dt
rp = self._create_digest_panel(
dt,
spec=largest_spec,
window_starts=first_window_starts,
window_closes=first_window_closes,
env=env,
)
panels[freq] = rp
return panels, first_window_starts, first_window_closes
def create_buffer_panel(self, initial_dt, bar_data):
"""
Initialize a RollingPanel containing enough minutes to service all our
frequencies.
"""
max_bars_needed = max(
freq.max_bars for freq in self.unique_frequencies
)
freq = '1m' if self.data_frequency == 'minute' else '1d'
spec = HistorySpec(
max_bars_needed + 1, freq, None, None, self.data_frequency,
)
rp = self._create_panel(
initial_dt, spec,
)
self.buffer_spec = spec
if bar_data is not None:
frame = self.frame_from_bardata(bar_data, initial_dt)
rp.add_frame(initial_dt, frame)
return rp
def convert_columns(self, values):
"""
If columns have a specific type you want to enforce, overwrite this
method and return the transformed values.
"""
return values
def digest_bars(self, history_spec, do_ffill):
"""
Get the last (history_spec.bar_count - 1) bars from self.digest_panel
for the requested HistorySpec.
"""
bar_count = history_spec.bar_count
if bar_count == 1:
# slicing with [1 - bar_count:] doesn't work when bar_count == 1,
# so special-casing this.
res = pd.DataFrame(index=[], columns=self.sids, dtype=float)
return res.values, res.index
field = history_spec.field
# Panel axes are (field, dates, sids). We want just the entries for
# the requested field, the last (bar_count - 1) data points, and all
# sids.
digest_panel = self.digest_panels[history_spec.frequency]
frame = digest_panel.get_current(field, raw=True)
if do_ffill:
# Do forward-filling *before* truncating down to the requested
# number of bars. This protects us from losing data if an illiquid
# stock has a gap in its price history.
filled = ffill_digest_frame_from_prior_values(
history_spec.frequency,
history_spec.field,
frame,
self.last_known_prior_values,
raw=True
# Truncate only after we've forward-filled
)
indexer = slice(1 - bar_count, None)
return filled[indexer], digest_panel.current_dates()[indexer]
else:
indexer = slice(1 - bar_count, None)
return frame[indexer, :], digest_panel.current_dates()[indexer]
def buffer_panel_minutes(self,
buffer_panel,
earliest_minute=None,
latest_minute=None,
raw=False):
"""
Get the minutes in @buffer_panel between @earliest_minute and
@latest_minute, inclusive.
@buffer_panel can be a RollingPanel or a plain Panel. If a
RollingPanel is supplied, we call `get_current` to extract a Panel
object.
If no value is specified for @earliest_minute, use all the minutes we
have up until @latest minute.
If no value for @latest_minute is specified, use all values up until
the latest minute.
"""
if isinstance(buffer_panel, RollingPanel):
buffer_panel = buffer_panel.get_current(start=earliest_minute,
end=latest_minute,
raw=raw)
return buffer_panel
# Using .ix here rather than .loc because loc requires that the keys
# are actually in the index, whereas .ix returns all the values between
# earliest_minute and latest_minute, which is what we want.
return buffer_panel.ix[:, earliest_minute:latest_minute, :]
def frame_from_bardata(self, data, algo_dt):
"""
Create a DataFrame from the given BarData and algo dt.
"""
data = data._data
frame_data = np.empty((len(self.fields), len(self.sids))) * np.nan
for j, sid in enumerate(self.sids):
sid_data = data.get(sid)
if not sid_data:
continue
if algo_dt != sid_data['dt']:
continue
for i, field in enumerate(self.fields):
frame_data[i, j] = sid_data.get(field, np.nan)
return pd.DataFrame(
frame_data,
index=self.fields.copy(),
columns=self.sids.copy(),
)
def update(self, data, algo_dt):
"""
Takes the bar at @algo_dt's @data, checks to see if we need to roll any
new digests, then adds new data to the buffer panel.
"""
frame = self.frame_from_bardata(data, algo_dt)
self.update_last_known_values()
self.update_digest_panels(algo_dt, self.buffer_panel)
self.buffer_panel.add_frame(algo_dt, frame)
def update_digest_panels(self, algo_dt, buffer_panel, freq_filter=None):
"""
Check whether @algo_dt is greater than cur_window_close for any of our
frequencies. If so, roll a digest for that frequency using data drawn
from @buffer panel and insert it into the appropriate digest panels.
If @freq_filter is specified, only use the given data to update
frequencies on which the filter returns True.
This takes `buffer_panel` as an argument rather than using
self.buffer_panel so that this method can be used to add supplemental
data from an external source.
"""
for frequency in filter(freq_filter, self.unique_frequencies):
# We don't keep a digest panel if we only have a length-1 history
# spec for a given frequency
digest_panel = self.digest_panels.get(frequency, None)
while algo_dt > self.cur_window_closes[frequency]:
earliest_minute = self.cur_window_starts[frequency]
latest_minute = self.cur_window_closes[frequency]
minutes_to_process = self.buffer_panel_minutes(
buffer_panel,
earliest_minute=earliest_minute,
latest_minute=latest_minute,
raw=True
)
if digest_panel is not None:
# Create a digest from minutes_to_process and add it to
# digest_panel.
digest_frame = self.create_new_digest_frame(
minutes_to_process,
self.fields,
self.sids
)
digest_panel.add_frame(
latest_minute,
digest_frame,
self.fields,
self.sids
)
# Update panel start/close for this frequency.
self.cur_window_starts[frequency] = \
frequency.next_window_start(latest_minute)
self.cur_window_closes[frequency] = \
frequency.window_close(self.cur_window_starts[frequency])
def frame_to_series(self, field, frame, columns=None):
"""
Convert a frame with a DatetimeIndex and sid columns into a series with
a sid index, using the aggregator defined by the given field.
"""
if isinstance(frame, pd.DataFrame):
columns = frame.columns
frame = frame.values
if not len(frame):
return pd.Series(
data=(0 if field == 'volume' else np.nan),
index=columns,
).values
if field in ['price', 'close_price']:
# shortcircuit for full last row
vals = frame[-1]
if np.all(~np.isnan(vals)):
return vals
return ffill(frame)[-1]
elif field == 'open_price':
return bfill(frame)[0]
elif field == 'volume':
return np.nansum(frame, axis=0)
elif field == 'high':
return np.nanmax(frame, axis=0)
elif field == 'low':
return np.nanmin(frame, axis=0)
else:
raise ValueError("Unknown field {}".format(field))
def aggregate_ohlcv_panel(self,
fields,
ohlcv_panel,
items=None,
minor_axis=None):
"""
Convert an OHLCV Panel into a DataFrame by aggregating each field's
frame into a Series.
"""
vals = ohlcv_panel
if isinstance(ohlcv_panel, pd.Panel):
vals = ohlcv_panel.values
items = ohlcv_panel.items
minor_axis = ohlcv_panel.minor_axis
data = [
self.frame_to_series(
field,
vals[items.get_loc(field)],
minor_axis
)
for field in fields
]
return np.array(data)
def create_new_digest_frame(self, buffer_minutes, items=None,
minor_axis=None):
"""
Package up minutes in @buffer_minutes into a single digest frame.
"""
return self.aggregate_ohlcv_panel(
self.fields,
buffer_minutes,
items=items,
minor_axis=minor_axis
)
def update_last_known_values(self):
"""
Store the non-NaN values from our oldest frame in each frequency.
"""
ffillable = self.ffillable_fields
if not len(ffillable):
return
for frequency in self.unique_frequencies:
digest_panel = self.digest_panels.get(frequency, None)
if digest_panel:
oldest_known_values = digest_panel.oldest_frame(raw=True)
else:
oldest_known_values = self.buffer_panel.oldest_frame(raw=True)
oldest_vals = oldest_known_values
oldest_columns = self.fields
for field in ffillable:
f_idx = oldest_columns.get_loc(field)
field_vals = oldest_vals[f_idx]
# isnan would be fast, possible to use?
non_nan_sids = np.where(pd.notnull(field_vals))
key = (frequency.freq_str, field)
key_loc = self.last_known_prior_values.index.get_loc(key)
self.last_known_prior_values.values[
key_loc, non_nan_sids
] = field_vals[non_nan_sids]
def get_history(self, history_spec, algo_dt):
"""
Main API used by the algoscript is mapped to this function.
Selects from the overarching history panel the values for the
@history_spec at the given @algo_dt.
"""
field = history_spec.field
do_ffill = history_spec.ffill
# Get our stored values from periods prior to the current period.
digest_frame, index = self.digest_bars(history_spec, do_ffill)
# Get minutes from our buffer panel to build the last row of the
# returned frame.
buffer_panel = self.buffer_panel_minutes(
self.buffer_panel,
earliest_minute=self.cur_window_starts[history_spec.frequency],
raw=True
)
buffer_frame = buffer_panel[self.fields.get_loc(field)]
if do_ffill:
buffer_frame = ffill_buffer_from_prior_values(
history_spec.frequency,
field,
buffer_frame,
digest_frame,
self.last_known_prior_values,
raw=True
)
last_period = self.frame_to_series(field, buffer_frame, self.sids)
return fast_build_history_output(digest_frame,
last_period,
algo_dt,
index=index,
columns=self.sids)
def fast_build_history_output(buffer_frame,
last_period,
algo_dt,
index=None,
columns=None):
"""
Optimized concatenation of DataFrame and Series for use in
HistoryContainer.get_history.
Relies on the fact that the input arrays have compatible shapes.
"""
buffer_values = buffer_frame
if isinstance(buffer_frame, pd.DataFrame):
buffer_values = buffer_frame.values
index = buffer_frame.index
columns = buffer_frame.columns
return pd.DataFrame(
data=np.vstack(
[
buffer_values,
last_period,
]
),
index=fast_append_date_to_index(
index,
pd.Timestamp(algo_dt)
),
columns=columns,
)
def fast_append_date_to_index(index, timestamp):
"""
Append a timestamp to a DatetimeIndex. DatetimeIndex.append does not
appear to work.
"""
return pd.DatetimeIndex(
np.hstack(
[
index.values,
[timestamp.asm8],
]
),
tz='UTC',
)
| apache-2.0 |
elaeon/breast_cancer_networks | sankey/script.py | 2 | 8969 | from optparse import OptionParser
import pandas as pd
import json
ontology_term = {}
ontology_gene = {}
node_index = {}
def read_gen_ontology():
df = pd.read_csv(filepath_or_buffer="gen_ontology.csv")
return df
def read_ft():
ft = set([])
with open("TFs.txt", "r") as f:
for line in f.readlines():
ft.add(line.strip("\n"))
return ft
def read_ft_im():
df = pd.read_csv(filepath_or_buffer="regulon_2WsC_cy40.sif", sep='\t')
return df
def read_firma_molecular():
df = pd.read_csv(filepath_or_buffer="ms_k_sorted.csv")
return df
def read_genes_no_diferenciados():
df = pd.read_csv(filepath_or_buffer="etc_k_sorted.csv")
return df
def clean_nodes_links(nodes, links):
indexes = set([link["source"] for link in links])
indexes = indexes.union(set([link["target"] for link in links]))
n_nodes = []
n_links = []
other_index = {}
for n_index, index in enumerate(indexes):
other_index[index] = n_index
n_nodes.append(nodes[index])
for values in links:
r = {}
try:
s = {"source": other_index[values["source"]]}
except KeyError:
s = {"source": values["source"]}
try:
t = {"target": other_index[values["target"]]}
except KeyError:
s = {"target": values["target"]}
r.update(s)
r.update(t)
r["value"] = values["value"]
n_links.append(r)
return n_nodes, n_links
def unique(data, f=lambda v:"{}{}".format(v["source"], v["target"])):
from collections import OrderedDict
unique_data = OrderedDict()
for value in data:
key = f(value)
if not key in unique_data:
unique_data[key] = {"source": value["source"], "target": value["target"], "value": value["value"]}
return unique_data.values()
def join(left, right):
from collections import defaultdict
join_data = []
right_values = {right_value["source"]: right_value for right_value in right}
for left_value in left:
try:
right_value = right_values[left_value["source"]]
join_data.append({
"source": left_value["target"],
"target": right_value["target"],
"value": right_value["value"]})
except KeyError:
pass
#join_data.append({
# "source": left_value["target"],
# "target": node_index["UNKNOW"],
# "value": left_value["value"]})
return join_data
def get_linked(search_space, links, length=0, im=10):
import networkx as nx
G = nx.DiGraph()
edges = [(link["source"], link["target"], link["value"]) for link in links]
G.add_weighted_edges_from(edges)
def only_paths_of_size(paths):
return [edges for edges in paths if len(edges) >= length]
uv_count = {}
linked = []
search_space_unique = unique(search_space, f=lambda v:"{}".format(v["source"]))
for link in search_space_unique:
shorted = nx.shortest_path(G, source=link["source"])
paths = only_paths_of_size(list(shorted.values()))
for edges in paths:
vertices = list(zip(edges, edges[1:]))
last_u, last_v = vertices[-1]
if G[last_u][last_v]["weight"] >= im:
for u, v in vertices:
key = "{}{}".format(u,v)
if not key in uv_count:
linked.append({
"source": u,
"target": v,
"value": None})
uv_count[key] = 0
uv_count[key] += 1
for link in linked:
u = link["source"]
v = link["target"]
key = "{}{}".format(u,v)
link["value"] = uv_count.get(key, G[u][v]["weight"])
return linked
def ontology_graph(name, search_space_ontology):
data = []
for gene, ontologies in sorted(search_space_ontology.items(), key=lambda x: x[0]):
for key_ontology in ontologies[name]:
term = ontology_term[key_ontology]
data.append({"source": node_index[gene], "target": node_index[term], "value": 1})
return data
def firma_molecular_graph(fm, gnd, ft_im):
data = []
fm_set = set(fm["name"])
gnd_set = set(gnd["name"])
for i, row in ft_im.iterrows():
if row["blanco"] in fm_set:
target = "Firma Molecular"
elif row["blanco"] in gnd_set:
target = "Genes no diferenciados"
else:
continue
data.append({
"source": node_index[row["ft"]],
"target": node_index[target],
"value": abs(row["im"])})
return data
def run(options):
df = read_gen_ontology()
ft = read_ft()
df = df.drop_duplicates(subset="genesym")
fm = read_firma_molecular()
gnd = read_genes_no_diferenciados()
ft_im = read_ft_im()
gene_ont = ["Gene Ontology Biological Process", "Gene Ontology Cellular Component",
"Gene Ontology Molecular Function"]
gene_ont_abbrv = {"Gene Ontology Biological Process": "gbp",
"Gene Ontology Cellular Component": "gcc",
"Gene Ontology Molecular Function": "gmf"}
df_ft = df[df["genesym"].isin(ft)]
gene_ontology = {}
ontologies = {}
ontology_term_list = set([])
columns = ["genesym"] + gene_ont
for i, row in df_ft[columns].iterrows():
for ontology in gene_ont:
for go in row[ontology].split("///"):
go_terms = go.split("//")
if len(go_terms) >= 2:
go_id = go_terms.pop(0)
key = go_id.strip()
ontology_term.setdefault(key, [])
ontologies.setdefault(ontology, set([]))
ontology_gene.setdefault(key, [])
gene_ontology.setdefault(row["genesym"], dict(zip(gene_ont, [[], [], []])))
ontologies[ontology].add(key)
gene_ontology[row["genesym"]][ontology].append(key)
ontology_gene[key].append(row["genesym"])
term = go_terms[0].strip()
ontology_term[key] = gene_ont_abbrv[ontology]+":"+term
ontology_term_list.add(gene_ont_abbrv[ontology]+":"+term)
fm_gnd_set = set(list(fm["name"]) + list(gnd["name"]))
gene_set = set(gene_ontology.keys())
search_space_ontology = {}
for gene in gene_set.intersection(fm_gnd_set):
search_space_ontology[gene] = gene_ontology[gene]
s_nodes = set([])
nodes = sorted(list(s_nodes.union(set(["UNKNOW"]), ft, ontology_term_list, fm_gnd_set, set(["Firma Molecular", "Genes no diferenciados"]))))
for i, term in enumerate(nodes):
if not term in node_index:
node_index[term] = i
pipeline = []
if options.cellular_component:
pipeline.append(ontology_graph("Gene Ontology Cellular Component", search_space_ontology))
if options.biological_process:
pipeline.append(ontology_graph("Gene Ontology Biological Process", search_space_ontology))
if options.molecular_function:
pipeline.append(ontology_graph("Gene Ontology Molecular Function", search_space_ontology))
pipeline.append(firma_molecular_graph(fm, gnd, ft_im))
links = [v for v in pipeline[0]]
for p1, p2 in zip(pipeline, pipeline[1:]):
for v in join(p1, p2):
links.append(v)
paths = get_linked(pipeline[0], links, length=len(pipeline)+1, im=options.im)
print("PATHS", len(paths))
n_nodes, n_links = clean_nodes_links(nodes, paths)
result = json_sankey(n_nodes, n_links, type_=options.type)
with open("sankey.json", "w") as f:
f.write(json.dumps(result))
def json_sankey(n_nodes, n_links, type_="normal"):
if type_ == "normal":
result = {"nodes": [{"name": node} for node in n_nodes], "links": n_links}
elif type_ == "colors":
result = {"nodes": [{"name": node, "id": node.lower().replace(" ", "_") + "_score"} for node in n_nodes],
"links": n_links}
return result
class Test(object):
im = 10
cellular_component = True
biological_process = True
type = "normal"
def test():
options = Test()
run(options)
if __name__ == '__main__':
parser = OptionParser("%prog [options]")
parser.add_option("-c", "--cellular_component", action="store_true", default=False)
parser.add_option("-b", "--biological_process", action="store_true", default=False)
parser.add_option("-m", "--molecular_function", action="store_true", default=False)
parser.add_option("-i", "--im", action="store", default=0, type='float')
parser.add_option("-t", "--type", action="store", default="normal", type='string')
options, args = parser.parse_args()
run(options)
| gpl-3.0 |
xubenben/scikit-learn | examples/datasets/plot_random_dataset.py | 348 | 2254 | """
==============================================
Plot randomly generated classification dataset
==============================================
Plot several randomly generated 2D classification datasets.
This example illustrates the :func:`datasets.make_classification`
:func:`datasets.make_blobs` and :func:`datasets.make_gaussian_quantiles`
functions.
For ``make_classification``, three binary and two multi-class classification
datasets are generated, with different numbers of informative features and
clusters per class. """
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_gaussian_quantiles
plt.figure(figsize=(8, 8))
plt.subplots_adjust(bottom=.05, top=.9, left=.05, right=.95)
plt.subplot(321)
plt.title("One informative feature, one cluster per class", fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=1,
n_clusters_per_class=1)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(322)
plt.title("Two informative features, one cluster per class", fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=2,
n_clusters_per_class=1)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(323)
plt.title("Two informative features, two clusters per class", fontsize='small')
X2, Y2 = make_classification(n_features=2, n_redundant=0, n_informative=2)
plt.scatter(X2[:, 0], X2[:, 1], marker='o', c=Y2)
plt.subplot(324)
plt.title("Multi-class, two informative features, one cluster",
fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=2,
n_clusters_per_class=1, n_classes=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(325)
plt.title("Three blobs", fontsize='small')
X1, Y1 = make_blobs(n_features=2, centers=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(326)
plt.title("Gaussian divided into three quantiles", fontsize='small')
X1, Y1 = make_gaussian_quantiles(n_features=2, n_classes=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.show()
| bsd-3-clause |
fvinas/businessplots | businessplots/_text.py | 1 | 1071 | # -*- coding: utf-8 -*-
from matplotlib.patches import Ellipse
# TODO: add support for kwargs to customize text styles
def text(ax, x, y, text, kind='rect'):
if kind == 'rect':
text_artist = ax.text(x, y, text, fontsize=15, bbox=dict(fc='white', ec='k', pad=7), horizontalalignment='center', verticalalignment='center')
bbox = text_artist.get_window_extent(ax.figure.canvas.get_renderer()).transformed(ax.transData.inverted())
return bbox.height * 2.5
elif kind == 'ellipse':
# a bit more complicated: we need to get the text width & height before adding the ellipse
text_artist = ax.text(x, y, text, fontsize=15, horizontalalignment='center', verticalalignment='center', zorder=4)
# get text bbox in axes coordinates (not in pixels)
bbox = text_artist.get_window_extent(ax.figure.canvas.get_renderer()).transformed(ax.transData.inverted())
ax.add_artist(Ellipse(xy=[x, y], width=bbox.width * 1.8, height=bbox.height * 2.5, angle=0, fc='white', zorder=3))
return .5 * (bbox.height * 2.5)
| mit |
jason-neal/companion_simulations | obsolete/create_min_chi2_table.py | 1 | 6369 | #!/usr/bin/env python
"""create_min_chi2_table.py.
Create Table of minimum Chi_2 values and save to a table.
"""
import argparse
import os
import sys
import corner
import matplotlib.pyplot as plt
import pandas as pd
import sqlalchemy as sa
from joblib import Parallel, delayed
from pandas.plotting import scatter_matrix
import simulators
from mingle.utilities.param_file import get_host_params
from mingle.utilities.phoenix_utils import closest_model_params
from mingle.utilities.scatter_corner import scatter_corner
from mingle.utilities.db_utils import decompose_database_name
def parse_args(args):
"""Take care of all the argparse stuff.
:returns: the args
"""
parser = argparse.ArgumentParser(description='Minimum chi-squared table.')
parser.add_argument('star', help='Star name')
parser.add_argument('--suffix', help='Suffix to add to the file names.', default="")
return parser.parse_args(args)
def main(star, obsnum, chip, suffix="", echo=False):
database = os.path.join(simulators.paths["output_dir"], star, "iam",
"{0}-{1}_{2}_iam_chisqr_results{3}.db".format(star, obsnum, chip, suffix))
path, star, obsnum, chip = decompose_database_name(database)
os.makedirs(os.path.join(path, "plots"), exist_ok=True)
save_name = os.path.join(path, "{0}_iam_all_observation_min_chi2{1}.tsv".format(star, suffix))
teff, logg, fe_h = closest_model_params(*get_host_params(star))
params = {"path": path, "star": star, "obsnum": obsnum, "chip": chip,
"teff": teff, "logg": logg, "fe_h": fe_h}
# Hack to run from editor
if os.getcwd().endswith("companion_simulations/bin"):
database = "../" + database
save_name = "../" + save_name
if os.path.exists(database):
engine = sa.create_engine('sqlite:///{0}'.format(database), echo=echo)
else:
raise IOError("Database does not exist.")
table_names = engine.table_names()
if len(table_names) == 1:
tb_name = table_names[0]
else:
raise ValueError("Database has two many tables {0}".format(table_names))
query = """SELECT * FROM {0}
WHERE (teff_1 = {1} AND logg_1 = {2} AND feh_1 = {3})
ORDER BY chi2 LIMIT 1
""".format(tb_name, params["teff"], params["logg"], params["fe_h"])
df = pd.read_sql(sa.text(query), engine)
df["obsnum"] = obsnum
df["chip"] = chip
columns = ["obsnum", "chip", "teff_1", "logg_1", "feh_1", "teff_2",
"logg_2", "feh_2", "alpha", "rv", "gamma", "chi2"]
if os.path.exists(save_name):
df.to_csv(save_name, columns=columns, sep='\t', mode="a", index=False, header=False)
else:
df.to_csv(save_name, columns=columns, sep='\t', mode="a", index=False, header=True)
return save_name
def scatter_plots(star, filename):
"""Load minimum chi2 table and make scatter plots across chips."""
df = pd.read_table(filename, sep="\t")
df.loc[:, "chip"] = df.loc[:, "chip"].astype(int)
fig, axes = plt.subplots(5, 2)
subdf = df.loc[:, ["chip", "teff_2", "alpha", "rv", "gamma", "chi2"]] # "logg_2", "feh_2"
scatter_matrix(subdf, alpha=1, figsize=(12, 12), diagonal='hist')
plt.suptitle("{0} Observation/chip variations".format(star))
path, fname = os.path.split(filename)
figname = os.path.join(path, "plots", "{0}_scatter.pdf".format(fname.split(".")[0]))
plt.savefig(figname)
figname = os.path.join(path, "plots", "{0}_scatter.png".format(fname.split(".")[0]))
plt.savefig(figname)
plt.close()
def scatter_corner_plots(star, filename):
"""Load minimum chi2 table and make scatter plots across chips."""
df = pd.read_table(filename, sep="\t")
df.loc[:, "chip"] = df.loc[:, "chip"].astype(int)
fig, axes = plt.subplots(5, 2)
subdf = df.loc[:, ["chip", "teff_2", "alpha", "rv", "gamma", "chi2"]] # "logg_2", "feh_2"
scatter_corner(subdf, alpha=1, figsize=(12, 12), diagonal='hist', corner="lower")
plt.suptitle("{0} Observation/chip variations".format(star))
path, fname = os.path.split(filename)
figname = os.path.join(path, "plots", "{0}_scatter_corner.pdf".format(fname.split(".")[0]))
plt.savefig(figname)
figname = os.path.join(path, "plots", "{0}_scatter_corner.png".format(fname.split(".")[0]))
plt.savefig(figname)
plt.close()
# Corner.corner
def min_chi2_corner_plot(star, filename):
df = pd.read_table(filename, sep="\t")
df.loc[:, "chip"] = df.loc[:, "chip"].astype(int)
subdf = df.loc[:, ["chip", "teff_2", "alpha", "rv", "gamma", "chi2"]] # "logg_2", "feh_2"
corner.corner(subdf.values, labels=subdf.columns.values)
plt.suptitle("{0} Observation/chip variations".format(star))
path, fname = os.path.split(filename)
figname = os.path.join(path, "plots", "{0}_corner_corner.png".format(fname.split(".")[0]))
plt.savefig(figname)
corner.corner(subdf.values, labels=subdf.columns.values, plot_contours=False)
plt.suptitle("{0} Observation/chip variations".format(star))
figname = os.path.join(path, "plots", "{0}_corner_contoured.png".format(fname.split(".")[0]))
plt.savefig(figname)
plt.close()
# TODO common function to determine observations and chips for different stars (like here)
if __name__ == "__main__":
args = parse_args(sys.argv[1:])
star = args.star
obs_nums = {"HD30501": ["1", "2a", "2b", "3"], "HD211847": ["1", "2"], "HD4747": ["1"],
"HDSIM": ["1", "2", "3"], "HDSIM2": ["1", "2", "3"], "HDSIM3": ["1", "2", "3"]}
chips = range(1, 5)
def paralleled_main(star, obsnum):
for chip in chips:
try:
save_name = main(star, obsnum, chip, suffix=args.suffix)
except Exception as e:
print(e)
print("Table creation failed for {0}-{1}_{2}".format(star, obsnum, chip))
continue
try:
scatter_plots(star, save_name)
scatter_corner_plots(star, save_name)
min_chi2_corner_plot(star, save_name)
except Exception as e:
print(" Corner plots did not work.")
raise e
# Run in parallel
star_obsnums = obsnums[star]
Parallel(n_jobs=-1)(delayed(paralleled_main)(star, obsnum) for obsnum in star_obsnums)
| mit |
iandriver/RNA-sequence-tools | FPKM_Parsing/make_align_report.py | 2 | 3390 | import fnmatch
import os
import pandas as pd
import cPickle as pickle
from scipy import stats
import matplotlib as mpl
import matplotlib.pyplot as plt
from collections import OrderedDict
path = '/Volumes/Seq_data'
result_file_names = ['results_SPC_d7','results_spc2_n2/results_Lane1_data','results_spc2_n2/results_Lane2_data','results_spc2_n2/results_Lane3_data','results_spc2_n2/results_Lane4_data']
basename = 'spc_d0_4_7'
cell_list =[]
align_dict =OrderedDict()
align_dict['input_L_num'] = []
align_dict['mapped_L_num'] = []
align_dict['input_R_num'] = []
align_dict['mapped_R_num'] = []
align_dict['per_mapped'] = []
for rf in result_file_names:
path_to_file = os.path.join(path, rf)
for root, dirnames, filenames in os.walk(path_to_file):
for filename in fnmatch.filter(filenames, 'align_summary.txt'):
cell_name = (root.split('/')[-1])
file_title = (root.split('/')[-2])
if 'd7' in file_title:
day_n = 'D7_pnx_'
if cell_name[-1] == '_':
repeat = '_2'
else:
repeat =''
if 'Lane1' in file_title or 'Lane2' in file_title:
day_n = 'D0_ctrl_'
elif 'Lane3' in file_title or 'Lane4' in file_title:
day_n = 'D4_pnx_'
if 'bulk' in cell_name:
end_n = 'Human_bulk'
elif '+' in cell_name or '-' in cell_name or 'neg' in cell_name:
end_n = cell_name
else:
end_n = ''
if cell_name.strip('_')[-2] == 'C':
c_num = 'C0'+cell_name.strip('_')[-1]
elif cell_name.strip('_')[-3] == 'C':
c_num = cell_name.strip('_')[-3:]
else:
c_num =''
cell_name = day_n+c_num+end_n+repeat
cell_list.append(cell_name)
f = open(os.path.join(root,'align_summary.txt'), 'rw')
for l in f:
if 'Left' in l:
side_s = 0
elif 'Right' in l:
side_s = 1
if "Input" in l and side_s == 0:
input_L_num = int(l.split(':')[-1])
if "Mapped" in l and side_s == 0:
mapped_L_1 = l.split(':')[-1]
mapped_L_num = int(mapped_L_1.split('(')[0].strip())
if "Input" in l and side_s == 1:
input_R_num = int(l.split(':')[-1])
if "Mapped" in l and side_s == 0:
mapped_R_1 = l.split(':')[-1]
mapped_R_num = int(mapped_R_1.split('(')[0].strip())
if "overall read mapping rate." in l:
per_mapped = float(l.split('%')[0])
align_dict['input_L_num'].append(input_L_num)
align_dict['mapped_L_num'].append(mapped_L_num)
align_dict['input_R_num'].append(input_R_num)
align_dict['mapped_R_num'].append(mapped_R_num)
align_dict['per_mapped'].append(per_mapped)
f.close()
align_df = pd.DataFrame(align_dict, index = cell_list)
align_df.to_csv(os.path.join(path,result_file_names[0],'results_'+basename+'_align.txt'), sep = '\t')
plt.hist(align_df['mapped_L_num'])
plt.show()
with open(os.path.join(path,result_file_names[0],'results_'+basename+'_align.p'), 'wb') as fp:
pickle.dump(align_df, fp)
| mit |
jesusfcr/airflow | airflow/hooks/dbapi_hook.py | 17 | 9454 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from builtins import str
from past.builtins import basestring
from datetime import datetime
from contextlib import closing
import numpy
import logging
import sys
from sqlalchemy import create_engine
from airflow.hooks.base_hook import BaseHook
from airflow.exceptions import AirflowException
class DbApiHook(BaseHook):
"""
Abstract base class for sql hooks.
"""
# Override to provide the connection name.
conn_name_attr = None
# Override to have a default connection id for a particular dbHook
default_conn_name = 'default_conn_id'
# Override if this db supports autocommit.
supports_autocommit = False
# Override with the object that exposes the connect method
connector = None
def __init__(self, *args, **kwargs):
if not self.conn_name_attr:
raise AirflowException("conn_name_attr is not defined")
elif len(args) == 1:
setattr(self, self.conn_name_attr, args[0])
elif self.conn_name_attr not in kwargs:
setattr(self, self.conn_name_attr, self.default_conn_name)
else:
setattr(self, self.conn_name_attr, kwargs[self.conn_name_attr])
def get_conn(self):
"""Returns a connection object
"""
db = self.get_connection(getattr(self, self.conn_name_attr))
return self.connector.connect(
host=db.host,
port=db.port,
username=db.login,
schema=db.schema)
def get_uri(self):
conn = self.get_connection(getattr(self, self.conn_name_attr))
login = ''
if conn.login:
login = '{conn.login}:{conn.password}@'.format(conn=conn)
host = conn.host
if conn.port is not None:
host += ':{port}'.format(port=conn.port)
return '{conn.conn_type}://{login}{host}/{conn.schema}'.format(
conn=conn, login=login, host=host)
def get_sqlalchemy_engine(self, engine_kwargs=None):
if engine_kwargs is None:
engine_kwargs = {}
return create_engine(self.get_uri(), **engine_kwargs)
def get_pandas_df(self, sql, parameters=None):
"""
Executes the sql and returns a pandas dataframe
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
:param parameters: The parameters to render the SQL query with.
:type parameters: mapping or iterable
"""
if sys.version_info[0] < 3:
sql = sql.encode('utf-8')
import pandas.io.sql as psql
with closing(self.get_conn()) as conn:
return psql.read_sql(sql, con=conn, params=parameters)
def get_records(self, sql, parameters=None):
"""
Executes the sql and returns a set of records.
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
:param parameters: The parameters to render the SQL query with.
:type parameters: mapping or iterable
"""
if sys.version_info[0] < 3:
sql = sql.encode('utf-8')
with closing(self.get_conn()) as conn:
with closing(conn.cursor()) as cur:
if parameters is not None:
cur.execute(sql, parameters)
else:
cur.execute(sql)
return cur.fetchall()
def get_first(self, sql, parameters=None):
"""
Executes the sql and returns the first resulting row.
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
:param parameters: The parameters to render the SQL query with.
:type parameters: mapping or iterable
"""
if sys.version_info[0] < 3:
sql = sql.encode('utf-8')
with closing(self.get_conn()) as conn:
with closing(conn.cursor()) as cur:
if parameters is not None:
cur.execute(sql, parameters)
else:
cur.execute(sql)
return cur.fetchone()
def run(self, sql, autocommit=False, parameters=None):
"""
Runs a command or a list of commands. Pass a list of sql
statements to the sql parameter to get them to execute
sequentially
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
:param autocommit: What to set the connection's autocommit setting to
before executing the query.
:type autocommit: bool
:param parameters: The parameters to render the SQL query with.
:type parameters: mapping or iterable
"""
if isinstance(sql, basestring):
sql = [sql]
with closing(self.get_conn()) as conn:
if self.supports_autocommit:
self.set_autocommit(conn, autocommit)
with closing(conn.cursor()) as cur:
for s in sql:
if sys.version_info[0] < 3:
s = s.encode('utf-8')
logging.info(s)
if parameters is not None:
cur.execute(s, parameters)
else:
cur.execute(s)
conn.commit()
def set_autocommit(self, conn, autocommit):
conn.autocommit = autocommit
def get_cursor(self):
"""
Returns a cursor
"""
return self.get_conn().cursor()
def insert_rows(self, table, rows, target_fields=None, commit_every=1000):
"""
A generic way to insert a set of tuples into a table,
a new transaction is created every commit_every rows
:param table: Name of the target table
:type table: str
:param rows: The rows to insert into the table
:type rows: iterable of tuples
:param target_fields: The names of the columns to fill in the table
:type target_fields: iterable of strings
:param commit_every: The maximum number of rows to insert in one
transaction. Set to 0 to insert all rows in one transaction.
:type commit_every: int
"""
if target_fields:
target_fields = ", ".join(target_fields)
target_fields = "({})".format(target_fields)
else:
target_fields = ''
with closing(self.get_conn()) as conn:
if self.supports_autocommit:
self.set_autocommit(conn, False)
conn.commit()
with closing(conn.cursor()) as cur:
for i, row in enumerate(rows, 1):
l = []
for cell in row:
l.append(self._serialize_cell(cell, conn))
values = tuple(l)
placeholders = ["%s",]*len(values)
sql = "INSERT INTO {0} {1} VALUES ({2});".format(
table,
target_fields,
",".join(placeholders))
cur.execute(sql, values)
if commit_every and i % commit_every == 0:
conn.commit()
logging.info(
"Loaded {i} into {table} rows so far".format(**locals()))
conn.commit()
logging.info(
"Done loading. Loaded a total of {i} rows".format(**locals()))
@staticmethod
def _serialize_cell(cell, conn=None):
"""
Returns the SQL literal of the cell as a string.
:param cell: The cell to insert into the table
:type cell: object
:param conn: The database connection
:type conn: connection object
:return: The serialized cell
:rtype: str
"""
if cell is None:
return None
if isinstance(cell, datetime):
return cell.isoformat()
return str(cell)
def bulk_dump(self, table, tmp_file):
"""
Dumps a database table into a tab-delimited file
:param table: The name of the source table
:type table: str
:param tmp_file: The path of the target file
:type tmp_file: str
"""
raise NotImplementedError()
def bulk_load(self, table, tmp_file):
"""
Loads a tab-delimited file into a database table
:param table: The name of the target table
:type table: str
:param tmp_file: The path of the file to load into the table
:type tmp_file: str
"""
raise NotImplementedError()
| apache-2.0 |
chaen/DIRAC | Core/Utilities/Graphs/PlotBase.py | 4 | 9118 | """ PlotBase is a base class for various Graphs plots
The DIRAC Graphs package is derived from the GraphTool plotting package of the
CMS/Phedex Project by ... <to be added>
"""
from __future__ import print_function
from DIRAC.Core.Utilities.Graphs.Palette import Palette
from DIRAC.Core.Utilities.Graphs.GraphData import GraphData
from DIRAC.Core.Utilities.Graphs.GraphUtilities import pixelToPoint, evalPrefs
from matplotlib.axes import Axes
from matplotlib.pylab import setp
__RCSID__ = "$Id$"
class PlotBase( object ):
def __init__( self, data = None, axes = None, *aw, **kw ):
self.ax_contain = axes
self.canvas = None
self.figure = None
if self.ax_contain:
self.figure = self.ax_contain.get_figure()
self.canvas = self.figure.canvas
self.dpi = self.ax_contain.figure.get_dpi()
self.ax_contain.set_axis_off()
self.prefs = evalPrefs( *aw, **kw )
self.coords = {}
self.palette = Palette()
if isinstance( data, dict):
self.gdata = GraphData( data )
elif isinstance( data, object) and data.__class__ == GraphData:
self.gdata = data
def dumpPrefs( self ):
for key in self.prefs:
print(key.rjust(20), ':', str(self.prefs[key]).ljust(40))
def setAxes( self, axes ):
self.ax_contain = axes
self.ax_contain.set_axis_off()
self.figure = self.ax_contain.get_figure()
self.canvas = self.figure.canvas
self.dpi = self.ax_contain.figure.get_dpi()
def draw( self ):
prefs = self.prefs
dpi = self.ax_contain.figure.get_dpi()
# Update palette
palette = prefs.get( 'colors', {} )
if palette:
self.palette.addPalette( palette )
xlabel = prefs.get( 'xlabel', '' )
ylabel = prefs.get( 'ylabel', '' )
xticks_flag = prefs.get( 'xticks', True )
yticks_flag = prefs.get( 'yticks', True )
text_size = prefs['text_size']
text_padding = prefs['text_padding']
label_text_size = prefs.get( 'label_text_size', text_size )
label_text_size_point = pixelToPoint( label_text_size, dpi )
tick_text_size = prefs.get( 'tick_text_size', text_size )
tick_text_size_point = pixelToPoint( tick_text_size, dpi )
ytick_length = prefs.get( 'ytick_length', 7 * tick_text_size )
plot_title = prefs.get( 'plot_title', '' )
if not plot_title or plot_title == 'NoTitle':
plot_title_size = 0
plot_title_padding = 0
else:
plot_title_size = prefs.get( 'plot_title_size', text_size )
plot_title_padding = prefs.get( 'plot_text_padding', text_padding )
plot_title_size_point = pixelToPoint( plot_title_size, dpi )
stats_flag = prefs.get( 'statistics_line', False )
stats_line = ''
stats_line_space = 0.
if stats_flag:
stats_line = self.gdata.getStatString()
stats_line_size = label_text_size
stats_line_padding = label_text_size * 2.
stats_line_space = stats_line_size + stats_line_padding
plot_padding = prefs['plot_padding']
plot_left_padding = prefs.get( 'plot_left_padding', plot_padding )
plot_right_padding = prefs.get( 'plot_right_padding', 0 )
plot_bottom_padding = prefs.get( 'plot_bottom_padding', plot_padding )
plot_top_padding = prefs.get( 'plot_top_padding', 0 )
frame_flag = prefs['frame']
# Create plot axes, and set properties
left, bottom, width, height = self.ax_contain.get_window_extent().bounds
l, b, f_width, f_height = self.figure.get_window_extent().bounds
# Space needed for labels and ticks
x_label_space = 0
if xticks_flag:
x_label_space += tick_text_size * 1.5
if xlabel:
x_label_space += label_text_size * 1.5
y_label_space = 0
if yticks_flag:
y_label_space += ytick_length
if ylabel:
y_label_space += label_text_size * 1.5
ax_plot_rect = ( float( plot_left_padding + left + y_label_space ) / f_width,
float( plot_bottom_padding + bottom + x_label_space + stats_line_space ) / f_height,
float( width - plot_left_padding - plot_right_padding - y_label_space ) / f_width,
float( height - plot_bottom_padding - plot_top_padding - x_label_space - \
plot_title_size - 2 * plot_title_padding - stats_line_space ) / f_height )
ax = Axes( self.figure, ax_plot_rect )
if prefs['square_axis']:
l, b, a_width, a_height = ax.get_window_extent().bounds
delta = abs( a_height - a_width )
if a_height > a_width:
a_height = a_width
ax_plot_rect = ( float( plot_left_padding + left ) / f_width,
float( plot_bottom_padding + bottom + delta / 2. ) / f_height,
float( width - plot_left_padding - plot_right_padding ) / f_width,
float( height - plot_bottom_padding - plot_title_size - 2 * plot_title_padding - delta ) / f_height )
else:
a_width = a_height
ax_plot_rect = ( float( plot_left_padding + left + delta / 2. ) / f_width,
float( plot_bottom_padding + bottom ) / f_height,
float( width - plot_left_padding - delta ) / f_width,
float( height - plot_bottom_padding - plot_title_size - 2 * plot_title_padding ) / f_height )
ax.set_position( ax_plot_rect )
self.figure.add_axes( ax )
self.ax = ax
frame = ax.patch
frame.set_fill( False )
if frame_flag.lower() == 'off':
self.ax.set_axis_off()
self.log_xaxis = False
self.log_yaxis = False
else:
# If requested, make x/y axis logarithmic
if prefs.get( 'log_xaxis', 'False' ).find( 'r' ) >= 0:
ax.semilogx()
self.log_xaxis = True
else:
self.log_xaxis = False
if prefs.get( 'log_yaxis', 'False' ).find( 'r' ) >= 0:
ax.semilogy()
self.log_yaxis = True
else:
self.log_yaxis = False
if xticks_flag:
setp( ax.get_xticklabels(), family = prefs['font_family'] )
setp( ax.get_xticklabels(), fontname = prefs['font'] )
setp( ax.get_xticklabels(), size = tick_text_size_point )
else:
setp( ax.get_xticklabels(), size = 0 )
if yticks_flag:
setp( ax.get_yticklabels(), family = prefs['font_family'] )
setp( ax.get_yticklabels(), fontname = prefs['font'] )
setp( ax.get_yticklabels(), size = tick_text_size_point )
else:
setp( ax.get_yticklabels(), size = 0 )
setp( ax.get_xticklines(), markeredgewidth = pixelToPoint( 0.5, dpi ) )
setp( ax.get_xticklines(), markersize = pixelToPoint( text_size / 2., dpi ) )
setp( ax.get_yticklines(), markeredgewidth = pixelToPoint( 0.5, dpi ) )
setp( ax.get_yticklines(), markersize = pixelToPoint( text_size / 2., dpi ) )
setp( ax.get_xticklines(), zorder = 4.0 )
line_width = prefs.get( 'line_width', 1.0 )
frame_line_width = prefs.get( 'frame_line_width', line_width )
grid_line_width = prefs.get( 'grid_line_width', 0.1 )
plot_line_width = prefs.get( 'plot_line_width', 0.1 )
setp( ax.patch, linewidth = pixelToPoint( plot_line_width, dpi ) )
#setp( ax.spines, linewidth=pixelToPoint(frame_line_width,dpi) )
#setp( ax.axvline(), linewidth=pixelToPoint(1.0,dpi) )
axis_grid_flag = prefs.get( 'plot_axis_grid', True )
if axis_grid_flag:
ax.grid( True, color = '#555555', linewidth = pixelToPoint( grid_line_width, dpi ) )
plot_axis_flag = prefs.get( 'plot_axis', True )
if plot_axis_flag:
# Set labels
if xlabel:
t = ax.set_xlabel( xlabel )
t.set_family( prefs['font_family'] )
t.set_fontname( prefs['font'] )
t.set_size( label_text_size )
if ylabel:
t = ax.set_ylabel( ylabel )
t.set_family( prefs['font_family'] )
t.set_fontname( prefs['font'] )
t.set_size( label_text_size )
else:
self.ax.set_axis_off()
# Create a plot title, if necessary
if plot_title:
self.ax.title = self.ax.text( 0.5,
1. + float( plot_title_padding ) / height,
plot_title,
verticalalignment = 'bottom',
horizontalalignment = 'center',
size = pixelToPoint( plot_title_size, dpi ),
family = prefs['font_family'],
fontname = prefs['font'])
self.ax.title.set_transform( self.ax.transAxes )
self.ax.title.set_family( prefs['font_family'] )
self.ax.title.set_fontname( prefs['font'] )
if stats_line:
self.ax.stats = self.ax.text( 0.5, ( -stats_line_space ) / height,
stats_line,
verticalalignment = 'top',
horizontalalignment = 'center',
size = pixelToPoint( stats_line_size, dpi ) )
self.ax.stats.set_transform( self.ax.transAxes )
| gpl-3.0 |
MLWave/auto-sklearn | test/scores/test_libscores.py | 5 | 2736 | import unittest
import numpy as np
import autosklearn.scores.libscores
class LibScoresTest(unittest.TestCase):
def test_accuracy_metric_4_binary_classification(self):
# 100% correct
expected = np.array([0, 1, 1, 1, 0, 0, 1, 1, 1, 0]).reshape((-1, 1))
prediction = expected.copy()
score = autosklearn.scores.libscores.acc_metric(expected, prediction)
self.assertEqual(1, score)
# 100% incorrect
prediction = (expected.copy() - 1) * -1
score = autosklearn.scores.libscores.acc_metric(expected, prediction)
self.assertAlmostEqual(-1, score)
# Random
prediction = np.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 1])
score = autosklearn.scores.libscores.acc_metric(expected, prediction)
self.assertAlmostEqual(0, score)
def test_accuracy_metric_4_multiclass_classification(self):
# 100% correct
expected = np.array([[0, 0, 1, 1, 0, 1, 0, 1, 0, 1],
[1, 1, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 1, 0]])
prediction = expected.copy()
score = autosklearn.scores.libscores.acc_metric(expected, prediction)
self.assertEqual(1, score)
# 100% incorrect
prediction = (expected.copy() - 1) * -1
score = autosklearn.scores.libscores.acc_metric(expected, prediction)
self.assertAlmostEqual(-1, score)
# Pseudorandom
prediction = np.array([[1, 0, 0, 1, 0, 0, 1, 0, 0, 1],
[0, 1, 0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 1, 0, 0, 1, 0, 0, 1, 0]])
score = autosklearn.scores.libscores.acc_metric(expected, prediction)
self.assertAlmostEqual(0.33333333, score)
def test_accuracy_metric_4_multilabel_classification(self):
# 100% correct
expected = np.array([[0, 0, 1, 1, 0, 1, 0, 1, 0, 1],
[1, 1, 0, 0, 1, 0, 1, 0, 1, 0],
[1, 1, 0, 0, 1, 0, 1, 0, 1, 0]])
prediction = expected.copy()
score = autosklearn.scores.libscores.acc_metric(expected, prediction)
self.assertEqual(1, score)
# 100% incorrect
prediction = (expected.copy() - 1) * -1
score = autosklearn.scores.libscores.acc_metric(expected, prediction)
self.assertAlmostEqual(-1, score)
# Pseudorandom
prediction = np.array([[0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 1, 1, 1, 1, 1]])
score = autosklearn.scores.libscores.acc_metric(expected, prediction)
self.assertAlmostEqual(-0.0666666666, score) | bsd-3-clause |
ChinaQuants/bokeh | bokeh/compat/mplexporter/renderers/base.py | 2 | 14361 | import warnings
import itertools
from contextlib import contextmanager
import numpy as np
from matplotlib import transforms
from .. import utils
from .. import _py3k_compat as py3k
class Renderer(object):
@staticmethod
def ax_zoomable(ax):
return bool(ax and ax.get_navigate())
@staticmethod
def ax_has_xgrid(ax):
return bool(ax and ax.xaxis._gridOnMajor and ax.yaxis.get_gridlines())
@staticmethod
def ax_has_ygrid(ax):
return bool(ax and ax.yaxis._gridOnMajor and ax.yaxis.get_gridlines())
@property
def current_ax_zoomable(self):
return self.ax_zoomable(self._current_ax)
@property
def current_ax_has_xgrid(self):
return self.ax_has_xgrid(self._current_ax)
@property
def current_ax_has_ygrid(self):
return self.ax_has_ygrid(self._current_ax)
@contextmanager
def draw_figure(self, fig, props):
if hasattr(self, "_current_fig") and self._current_fig is not None:
warnings.warn("figure embedded in figure: something is wrong")
self._current_fig = fig
self._fig_props = props
self.open_figure(fig=fig, props=props)
yield
self.close_figure(fig=fig)
self._current_fig = None
self._fig_props = {}
@contextmanager
def draw_axes(self, ax, props):
if hasattr(self, "_current_ax") and self._current_ax is not None:
warnings.warn("axes embedded in axes: something is wrong")
self._current_ax = ax
self._ax_props = props
self.open_axes(ax=ax, props=props)
yield
self.close_axes(ax=ax)
self._current_ax = None
self._ax_props = {}
@contextmanager
def draw_legend(self, legend, props):
self._current_legend = legend
self._legend_props = props
self.open_legend(legend=legend, props=props)
yield
self.close_legend(legend=legend)
self._current_legend = None
self._legend_props = {}
# Following are the functions which should be overloaded in subclasses
def open_figure(self, fig, props):
"""
Begin commands for a particular figure.
Parameters
----------
fig : matplotlib.Figure
The Figure which will contain the ensuing axes and elements
props : dictionary
The dictionary of figure properties
"""
pass
def close_figure(self, fig):
"""
Finish commands for a particular figure.
Parameters
----------
fig : matplotlib.Figure
The figure which is finished being drawn.
"""
pass
def open_axes(self, ax, props):
"""
Begin commands for a particular axes.
Parameters
----------
ax : matplotlib.Axes
The Axes which will contain the ensuing axes and elements
props : dictionary
The dictionary of axes properties
"""
pass
def close_axes(self, ax):
"""
Finish commands for a particular axes.
Parameters
----------
ax : matplotlib.Axes
The Axes which is finished being drawn.
"""
pass
def open_legend(self, legend, props):
"""
Beging commands for a particular legend.
Parameters
----------
legend : matplotlib.legend.Legend
The Legend that will contain the ensuing elements
props : dictionary
The dictionary of legend properties
"""
pass
def close_legend(self, legend):
"""
Finish commands for a particular legend.
Parameters
----------
legend : matplotlib.legend.Legend
The Legend which is finished being drawn
"""
pass
def draw_marked_line(self, data, coordinates, linestyle, markerstyle,
label, mplobj=None):
"""Draw a line that also has markers.
If this isn't reimplemented by a renderer object, by default, it will
make a call to BOTH draw_line and draw_markers when both markerstyle
and linestyle are not None in the same Line2D object.
"""
if linestyle is not None:
self.draw_line(data, coordinates, linestyle, label, mplobj)
if markerstyle is not None:
self.draw_markers(data, coordinates, markerstyle, label, mplobj)
def draw_line(self, data, coordinates, style, label, mplobj=None):
"""
Draw a line. By default, draw the line via the draw_path() command.
Some renderers might wish to override this and provide more
fine-grained behavior.
In matplotlib, lines are generally created via the plt.plot() command,
though this command also can create marker collections.
Parameters
----------
data : array_like
A shape (N, 2) array of datapoints.
coordinates : string
A string code, which should be either 'data' for data coordinates,
or 'figure' for figure (pixel) coordinates.
style : dictionary
a dictionary specifying the appearance of the line.
mplobj : matplotlib object
the matplotlib plot element which generated this line
"""
pathcodes = ['M'] + (data.shape[0] - 1) * ['L']
pathstyle = dict(facecolor='none', **style)
pathstyle['edgecolor'] = pathstyle.pop('color')
pathstyle['edgewidth'] = pathstyle.pop('linewidth')
self.draw_path(data=data, coordinates=coordinates,
pathcodes=pathcodes, style=pathstyle, mplobj=mplobj)
@staticmethod
def _iter_path_collection(paths, path_transforms, offsets, styles):
"""Build an iterator over the elements of the path collection"""
N = max(len(paths), len(offsets))
if len(path_transforms) == 0:
path_transforms = [np.eye(3)]
edgecolor = styles['edgecolor']
if np.size(edgecolor) == 0:
edgecolor = ['none']
facecolor = styles['facecolor']
if np.size(facecolor) == 0:
facecolor = ['none']
elements = [paths, path_transforms, offsets,
edgecolor, styles['linewidth'], facecolor]
it = itertools
return it.islice(py3k.zip(*py3k.map(it.cycle, elements)), N)
def draw_path_collection(self, paths, path_coordinates, path_transforms,
offsets, offset_coordinates, offset_order,
styles, mplobj=None):
"""
Draw a collection of paths. The paths, offsets, and styles are all
iterables, and the number of paths is max(len(paths), len(offsets)).
By default, this is implemented via multiple calls to the draw_path()
function. For efficiency, Renderers may choose to customize this
implementation.
Examples of path collections created by matplotlib are scatter plots,
histograms, contour plots, and many others.
Parameters
----------
paths : list
list of tuples, where each tuple has two elements:
(data, pathcodes). See draw_path() for a description of these.
path_coordinates: string
the coordinates code for the paths, which should be either
'data' for data coordinates, or 'figure' for figure (pixel)
coordinates.
path_transforms: array_like
an array of shape (*, 3, 3), giving a series of 2D Affine
transforms for the paths. These encode translations, rotations,
and scalings in the standard way.
offsets: array_like
An array of offsets of shape (N, 2)
offset_coordinates : string
the coordinates code for the offsets, which should be either
'data' for data coordinates, or 'figure' for figure (pixel)
coordinates.
offset_order : string
either "before" or "after". This specifies whether the offset
is applied before the path transform, or after. The matplotlib
backend equivalent is "before"->"data", "after"->"screen".
styles: dictionary
A dictionary in which each value is a list of length N, containing
the style(s) for the paths.
mplobj : matplotlib object
the matplotlib plot element which generated this collection
"""
if offset_order == "before":
raise NotImplementedError("offset before transform")
for tup in self._iter_path_collection(paths, path_transforms,
offsets, styles):
(path, path_transform, offset, ec, lw, fc) = tup
vertices, pathcodes = path
path_transform = transforms.Affine2D(path_transform)
vertices = path_transform.transform(vertices)
# This is a hack:
if path_coordinates == "figure":
path_coordinates = "points"
style = {"edgecolor": utils.color_to_hex(ec),
"facecolor": utils.color_to_hex(fc),
"edgewidth": lw,
"dasharray": "10,0",
"alpha": styles['alpha'],
"zorder": styles['zorder']}
self.draw_path(data=vertices, coordinates=path_coordinates,
pathcodes=pathcodes, style=style, offset=offset,
offset_coordinates=offset_coordinates,
mplobj=mplobj)
def draw_markers(self, data, coordinates, style, label, mplobj=None):
"""
Draw a set of markers. By default, this is done by repeatedly
calling draw_path(), but renderers should generally overload
this method to provide a more efficient implementation.
In matplotlib, markers are created using the plt.plot() command.
Parameters
----------
data : array_like
A shape (N, 2) array of datapoints.
coordinates : string
A string code, which should be either 'data' for data coordinates,
or 'figure' for figure (pixel) coordinates.
style : dictionary
a dictionary specifying the appearance of the markers.
mplobj : matplotlib object
the matplotlib plot element which generated this marker collection
"""
vertices, pathcodes = style['markerpath']
pathstyle = dict((key, style[key]) for key in ['alpha', 'edgecolor',
'facecolor', 'zorder',
'edgewidth'])
pathstyle['dasharray'] = "10,0"
for vertex in data:
self.draw_path(data=vertices, coordinates="points",
pathcodes=pathcodes, style=pathstyle,
offset=vertex, offset_coordinates=coordinates,
mplobj=mplobj)
def draw_text(self, text, position, coordinates, style,
text_type=None, mplobj=None):
"""
Draw text on the image.
Parameters
----------
text : string
The text to draw
position : tuple
The (x, y) position of the text
coordinates : string
A string code, which should be either 'data' for data coordinates,
or 'figure' for figure (pixel) coordinates.
style : dictionary
a dictionary specifying the appearance of the text.
text_type : string or None
if specified, a type of text such as "xlabel", "ylabel", "title"
mplobj : matplotlib object
the matplotlib plot element which generated this text
"""
raise NotImplementedError()
def draw_path(self, data, coordinates, pathcodes, style,
offset=None, offset_coordinates="data", mplobj=None):
"""
Draw a path.
In matplotlib, paths are created by filled regions, histograms,
contour plots, patches, etc.
Parameters
----------
data : array_like
A shape (N, 2) array of datapoints.
coordinates : string
A string code, which should be either 'data' for data coordinates,
'figure' for figure (pixel) coordinates, or "points" for raw
point coordinates (useful in conjunction with offsets, below).
pathcodes : list
A list of single-character SVG pathcodes associated with the data.
Path codes are one of ['M', 'm', 'L', 'l', 'Q', 'q', 'T', 't',
'S', 's', 'C', 'c', 'Z', 'z']
See the SVG specification for details. Note that some path codes
consume more than one datapoint (while 'Z' consumes none), so
in general, the length of the pathcodes list will not be the same
as that of the data array.
style : dictionary
a dictionary specifying the appearance of the line.
offset : list (optional)
the (x, y) offset of the path. If not given, no offset will
be used.
offset_coordinates : string (optional)
A string code, which should be either 'data' for data coordinates,
or 'figure' for figure (pixel) coordinates.
mplobj : matplotlib object
the matplotlib plot element which generated this path
"""
raise NotImplementedError()
def draw_image(self, imdata, extent, coordinates, style, mplobj=None):
"""
Draw an image.
Parameters
----------
imdata : string
base64 encoded png representation of the image
extent : list
the axes extent of the image: [xmin, xmax, ymin, ymax]
coordinates: string
A string code, which should be either 'data' for data coordinates,
or 'figure' for figure (pixel) coordinates.
style : dictionary
a dictionary specifying the appearance of the image
mplobj : matplotlib object
the matplotlib plot object which generated this image
"""
raise NotImplementedError()
| bsd-3-clause |
giorgiop/scikit-learn | sklearn/feature_extraction/text.py | 13 | 52040 | # -*- coding: utf-8 -*-
# Authors: Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Lars Buitinck
# Robert Layton <[email protected]>
# Jochen Wersdörfer <[email protected]>
# Roman Sinayev <[email protected]>
#
# License: BSD 3 clause
"""
The :mod:`sklearn.feature_extraction.text` submodule gathers utilities to
build feature vectors from text documents.
"""
from __future__ import unicode_literals
import array
from collections import Mapping, defaultdict
import numbers
from operator import itemgetter
import re
import unicodedata
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.six.moves import xrange
from ..preprocessing import normalize
from .hashing import FeatureHasher
from .stop_words import ENGLISH_STOP_WORDS
from ..utils import deprecated
from ..utils.fixes import frombuffer_empty, bincount
from ..utils.validation import check_is_fitted
__all__ = ['CountVectorizer',
'ENGLISH_STOP_WORDS',
'TfidfTransformer',
'TfidfVectorizer',
'strip_accents_ascii',
'strip_accents_unicode',
'strip_tags']
def strip_accents_unicode(s):
"""Transform accentuated unicode symbols into their simple counterpart
Warning: the python-level loop and join operations make this
implementation 20 times slower than the strip_accents_ascii basic
normalization.
See also
--------
strip_accents_ascii
Remove accentuated char for any unicode symbol that has a direct
ASCII equivalent.
"""
normalized = unicodedata.normalize('NFKD', s)
if normalized == s:
return s
else:
return ''.join([c for c in normalized if not unicodedata.combining(c)])
def strip_accents_ascii(s):
"""Transform accentuated unicode symbols into ascii or nothing
Warning: this solution is only suited for languages that have a direct
transliteration to ASCII symbols.
See also
--------
strip_accents_unicode
Remove accentuated char for any unicode symbol.
"""
nkfd_form = unicodedata.normalize('NFKD', s)
return nkfd_form.encode('ASCII', 'ignore').decode('ASCII')
def strip_tags(s):
"""Basic regexp based HTML / XML tag stripper function
For serious HTML/XML preprocessing you should rather use an external
library such as lxml or BeautifulSoup.
"""
return re.compile(r"<([^>]+)>", flags=re.UNICODE).sub(" ", s)
def _check_stop_list(stop):
if stop == "english":
return ENGLISH_STOP_WORDS
elif isinstance(stop, six.string_types):
raise ValueError("not a built-in stop list: %s" % stop)
elif stop is None:
return None
else: # assume it's a collection
return frozenset(stop)
class VectorizerMixin(object):
"""Provides common code for text vectorizers (tokenization logic)."""
_white_spaces = re.compile(r"\s\s+")
def decode(self, doc):
"""Decode the input into a string of unicode symbols
The decoding strategy depends on the vectorizer parameters.
"""
if self.input == 'filename':
with open(doc, 'rb') as fh:
doc = fh.read()
elif self.input == 'file':
doc = doc.read()
if isinstance(doc, bytes):
doc = doc.decode(self.encoding, self.decode_error)
if doc is np.nan:
raise ValueError("np.nan is an invalid document, expected byte or "
"unicode string.")
return doc
def _word_ngrams(self, tokens, stop_words=None):
"""Turn tokens into a sequence of n-grams after stop words filtering"""
# handle stop words
if stop_words is not None:
tokens = [w for w in tokens if w not in stop_words]
# handle token n-grams
min_n, max_n = self.ngram_range
if max_n != 1:
original_tokens = tokens
tokens = []
n_original_tokens = len(original_tokens)
for n in xrange(min_n,
min(max_n + 1, n_original_tokens + 1)):
for i in xrange(n_original_tokens - n + 1):
tokens.append(" ".join(original_tokens[i: i + n]))
return tokens
def _char_ngrams(self, text_document):
"""Tokenize text_document into a sequence of character n-grams"""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
text_len = len(text_document)
ngrams = []
min_n, max_n = self.ngram_range
for n in xrange(min_n, min(max_n + 1, text_len + 1)):
for i in xrange(text_len - n + 1):
ngrams.append(text_document[i: i + n])
return ngrams
def _char_wb_ngrams(self, text_document):
"""Whitespace sensitive char-n-gram tokenization.
Tokenize text_document into a sequence of character n-grams
excluding any whitespace (operating only inside word boundaries)"""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
min_n, max_n = self.ngram_range
ngrams = []
for w in text_document.split():
w = ' ' + w + ' '
w_len = len(w)
for n in xrange(min_n, max_n + 1):
offset = 0
ngrams.append(w[offset:offset + n])
while offset + n < w_len:
offset += 1
ngrams.append(w[offset:offset + n])
if offset == 0: # count a short word (w_len < n) only once
break
return ngrams
def build_preprocessor(self):
"""Return a function to preprocess the text before tokenization"""
if self.preprocessor is not None:
return self.preprocessor
# unfortunately python functools package does not have an efficient
# `compose` function that would have allowed us to chain a dynamic
# number of functions. However the cost of a lambda call is a few
# hundreds of nanoseconds which is negligible when compared to the
# cost of tokenizing a string of 1000 chars for instance.
noop = lambda x: x
# accent stripping
if not self.strip_accents:
strip_accents = noop
elif callable(self.strip_accents):
strip_accents = self.strip_accents
elif self.strip_accents == 'ascii':
strip_accents = strip_accents_ascii
elif self.strip_accents == 'unicode':
strip_accents = strip_accents_unicode
else:
raise ValueError('Invalid value for "strip_accents": %s' %
self.strip_accents)
if self.lowercase:
return lambda x: strip_accents(x.lower())
else:
return strip_accents
def build_tokenizer(self):
"""Return a function that splits a string into a sequence of tokens"""
if self.tokenizer is not None:
return self.tokenizer
token_pattern = re.compile(self.token_pattern)
return lambda doc: token_pattern.findall(doc)
def get_stop_words(self):
"""Build or fetch the effective stop words list"""
return _check_stop_list(self.stop_words)
def build_analyzer(self):
"""Return a callable that handles preprocessing and tokenization"""
if callable(self.analyzer):
return self.analyzer
preprocess = self.build_preprocessor()
if self.analyzer == 'char':
return lambda doc: self._char_ngrams(preprocess(self.decode(doc)))
elif self.analyzer == 'char_wb':
return lambda doc: self._char_wb_ngrams(
preprocess(self.decode(doc)))
elif self.analyzer == 'word':
stop_words = self.get_stop_words()
tokenize = self.build_tokenizer()
return lambda doc: self._word_ngrams(
tokenize(preprocess(self.decode(doc))), stop_words)
else:
raise ValueError('%s is not a valid tokenization scheme/analyzer' %
self.analyzer)
def _validate_vocabulary(self):
vocabulary = self.vocabulary
if vocabulary is not None:
if isinstance(vocabulary, set):
vocabulary = sorted(vocabulary)
if not isinstance(vocabulary, Mapping):
vocab = {}
for i, t in enumerate(vocabulary):
if vocab.setdefault(t, i) != i:
msg = "Duplicate term in vocabulary: %r" % t
raise ValueError(msg)
vocabulary = vocab
else:
indices = set(six.itervalues(vocabulary))
if len(indices) != len(vocabulary):
raise ValueError("Vocabulary contains repeated indices.")
for i in xrange(len(vocabulary)):
if i not in indices:
msg = ("Vocabulary of size %d doesn't contain index "
"%d." % (len(vocabulary), i))
raise ValueError(msg)
if not vocabulary:
raise ValueError("empty vocabulary passed to fit")
self.fixed_vocabulary_ = True
self.vocabulary_ = dict(vocabulary)
else:
self.fixed_vocabulary_ = False
def _check_vocabulary(self):
"""Check if vocabulary is empty or missing (not fit-ed)"""
msg = "%(name)s - Vocabulary wasn't fitted."
check_is_fitted(self, 'vocabulary_', msg=msg),
if len(self.vocabulary_) == 0:
raise ValueError("Vocabulary is empty")
class HashingVectorizer(BaseEstimator, VectorizerMixin):
"""Convert a collection of text documents to a matrix of token occurrences
It turns a collection of text documents into a scipy.sparse matrix holding
token occurrence counts (or binary occurrence information), possibly
normalized as token frequencies if norm='l1' or projected on the euclidean
unit sphere if norm='l2'.
This text vectorizer implementation uses the hashing trick to find the
token string name to feature integer index mapping.
This strategy has several advantages:
- it is very low memory scalable to large datasets as there is no need to
store a vocabulary dictionary in memory
- it is fast to pickle and un-pickle as it holds no state besides the
constructor parameters
- it can be used in a streaming (partial fit) or parallel pipeline as there
is no state computed during fit.
There are also a couple of cons (vs using a CountVectorizer with an
in-memory vocabulary):
- there is no way to compute the inverse transform (from feature indices to
string feature names) which can be a problem when trying to introspect
which features are most important to a model.
- there can be collisions: distinct tokens can be mapped to the same
feature index. However in practice this is rarely an issue if n_features
is large enough (e.g. 2 ** 18 for text classification problems).
- no IDF weighting as this would render the transformer stateful.
The hash function employed is the signed 32-bit version of Murmurhash3.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, default='utf-8'
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char', 'char_wb'} or callable
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n), default=(1, 1)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If 'english', a built-in stop word list for English is used.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
lowercase : boolean, default=True
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
n_features : integer, default=(2 ** 20)
The number of features (columns) in the output matrices. Small numbers
of features are likely to cause hash collisions, but large numbers
will cause larger coefficient dimensions in linear learners.
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
binary : boolean, default=False.
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
non_negative : boolean, default=False
Whether output matrices should contain non-negative values only;
effectively calls abs on the matrix prior to returning it.
When True, output values can be interpreted as frequencies.
When False, output values will have expected value zero.
See also
--------
CountVectorizer, TfidfVectorizer
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), analyzer='word', n_features=(2 ** 20),
binary=False, norm='l2', non_negative=False,
dtype=np.float64):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.n_features = n_features
self.ngram_range = ngram_range
self.binary = binary
self.norm = norm
self.non_negative = non_negative
self.dtype = dtype
def partial_fit(self, X, y=None):
"""Does nothing: this transformer is stateless.
This method is just there to mark the fact that this transformer
can work in a streaming setup.
"""
return self
def fit(self, X, y=None):
"""Does nothing: this transformer is stateless."""
# triggers a parameter validation
if isinstance(X, six.string_types):
raise ValueError(
"Iterable over raw text documents expected, "
"string object received.")
self._get_hasher().fit(X, y=y)
return self
def transform(self, X, y=None):
"""Transform a sequence of documents to a document-term matrix.
Parameters
----------
X : iterable over raw text documents, length = n_samples
Samples. Each sample must be a text document (either bytes or
unicode strings, file name or file object depending on the
constructor argument) which will be tokenized and hashed.
y : (ignored)
Returns
-------
X : scipy.sparse matrix, shape = (n_samples, self.n_features)
Document-term matrix.
"""
if isinstance(X, six.string_types):
raise ValueError(
"Iterable over raw text documents expected, "
"string object received.")
analyzer = self.build_analyzer()
X = self._get_hasher().transform(analyzer(doc) for doc in X)
if self.binary:
X.data.fill(1)
if self.norm is not None:
X = normalize(X, norm=self.norm, copy=False)
return X
# Alias transform to fit_transform for convenience
fit_transform = transform
def _get_hasher(self):
return FeatureHasher(n_features=self.n_features,
input_type='string', dtype=self.dtype,
non_negative=self.non_negative)
def _document_frequency(X):
"""Count the number of non-zero values for each feature in sparse X."""
if sp.isspmatrix_csr(X):
return bincount(X.indices, minlength=X.shape[1])
else:
return np.diff(sp.csc_matrix(X, copy=False).indptr)
class CountVectorizer(BaseEstimator, VectorizerMixin):
"""Convert a collection of text documents to a matrix of token counts
This implementation produces a sparse representation of the counts using
scipy.sparse.coo_matrix.
If you do not provide an a-priori dictionary and you do not use an analyzer
that does some kind of feature selection then the number of features will
be equal to the vocabulary size found by analyzing the data.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char', 'char_wb'} or callable
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If 'english', a built-in stop word list for English is used.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
lowercase : boolean, True by default
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp select tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
max_df : float in range [0.0, 1.0] or int, default=1.0
When building the vocabulary ignore terms that have a document
frequency strictly higher than the given threshold (corpus-specific
stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, default=1
When building the vocabulary ignore terms that have a document
frequency strictly lower than the given threshold. This value is also
called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : int or None, default=None
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, optional
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents. Indices
in the mapping should not be repeated and should not have any gap
between 0 and the largest index.
binary : boolean, default=False
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
Attributes
----------
vocabulary_ : dict
A mapping of terms to feature indices.
stop_words_ : set
Terms that were ignored because they either:
- occurred in too many documents (`max_df`)
- occurred in too few documents (`min_df`)
- were cut off by feature selection (`max_features`).
This is only available if no vocabulary was given.
See also
--------
HashingVectorizer, TfidfVectorizer
Notes
-----
The ``stop_words_`` attribute can get large and increase the model size
when pickling. This attribute is provided only for introspection and can
be safely removed using delattr or set to None before pickling.
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), analyzer='word',
max_df=1.0, min_df=1, max_features=None,
vocabulary=None, binary=False, dtype=np.int64):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.max_df = max_df
self.min_df = min_df
if max_df < 0 or min_df < 0:
raise ValueError("negative value for max_df or min_df")
self.max_features = max_features
if max_features is not None:
if (not isinstance(max_features, numbers.Integral) or
max_features <= 0):
raise ValueError(
"max_features=%r, neither a positive integer nor None"
% max_features)
self.ngram_range = ngram_range
self.vocabulary = vocabulary
self.binary = binary
self.dtype = dtype
def _sort_features(self, X, vocabulary):
"""Sort features by name
Returns a reordered matrix and modifies the vocabulary in place
"""
sorted_features = sorted(six.iteritems(vocabulary))
map_index = np.empty(len(sorted_features), dtype=np.int32)
for new_val, (term, old_val) in enumerate(sorted_features):
vocabulary[term] = new_val
map_index[old_val] = new_val
X.indices = map_index.take(X.indices, mode='clip')
return X
def _limit_features(self, X, vocabulary, high=None, low=None,
limit=None):
"""Remove too rare or too common features.
Prune features that are non zero in more samples than high or less
documents than low, modifying the vocabulary, and restricting it to
at most the limit most frequent.
This does not prune samples with zero features.
"""
if high is None and low is None and limit is None:
return X, set()
# Calculate a mask based on document frequencies
dfs = _document_frequency(X)
tfs = np.asarray(X.sum(axis=0)).ravel()
mask = np.ones(len(dfs), dtype=bool)
if high is not None:
mask &= dfs <= high
if low is not None:
mask &= dfs >= low
if limit is not None and mask.sum() > limit:
mask_inds = (-tfs[mask]).argsort()[:limit]
new_mask = np.zeros(len(dfs), dtype=bool)
new_mask[np.where(mask)[0][mask_inds]] = True
mask = new_mask
new_indices = np.cumsum(mask) - 1 # maps old indices to new
removed_terms = set()
for term, old_index in list(six.iteritems(vocabulary)):
if mask[old_index]:
vocabulary[term] = new_indices[old_index]
else:
del vocabulary[term]
removed_terms.add(term)
kept_indices = np.where(mask)[0]
if len(kept_indices) == 0:
raise ValueError("After pruning, no terms remain. Try a lower"
" min_df or a higher max_df.")
return X[:, kept_indices], removed_terms
def _count_vocab(self, raw_documents, fixed_vocab):
"""Create sparse feature matrix, and vocabulary where fixed_vocab=False
"""
if fixed_vocab:
vocabulary = self.vocabulary_
else:
# Add a new value when a new vocabulary item is seen
vocabulary = defaultdict()
vocabulary.default_factory = vocabulary.__len__
analyze = self.build_analyzer()
j_indices = []
indptr = _make_int_array()
values = _make_int_array()
indptr.append(0)
for doc in raw_documents:
feature_counter = {}
for feature in analyze(doc):
try:
feature_idx = vocabulary[feature]
if feature_idx not in feature_counter:
feature_counter[feature_idx] = 1
else:
feature_counter[feature_idx] += 1
except KeyError:
# Ignore out-of-vocabulary items for fixed_vocab=True
continue
j_indices.extend(feature_counter.keys())
values.extend(feature_counter.values())
indptr.append(len(j_indices))
if not fixed_vocab:
# disable defaultdict behaviour
vocabulary = dict(vocabulary)
if not vocabulary:
raise ValueError("empty vocabulary; perhaps the documents only"
" contain stop words")
j_indices = np.asarray(j_indices, dtype=np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc)
values = frombuffer_empty(values, dtype=np.intc)
X = sp.csr_matrix((values, j_indices, indptr),
shape=(len(indptr) - 1, len(vocabulary)),
dtype=self.dtype)
X.sort_indices()
return vocabulary, X
def fit(self, raw_documents, y=None):
"""Learn a vocabulary dictionary of all tokens in the raw documents.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
self
"""
self.fit_transform(raw_documents)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn the vocabulary dictionary and return term-document matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
X : array, [n_samples, n_features]
Document-term matrix.
"""
# We intentionally don't call the transform method to make
# fit_transform overridable without unwanted side effects in
# TfidfVectorizer.
if isinstance(raw_documents, six.string_types):
raise ValueError(
"Iterable over raw text documents expected, "
"string object received.")
self._validate_vocabulary()
max_df = self.max_df
min_df = self.min_df
max_features = self.max_features
vocabulary, X = self._count_vocab(raw_documents,
self.fixed_vocabulary_)
if self.binary:
X.data.fill(1)
if not self.fixed_vocabulary_:
X = self._sort_features(X, vocabulary)
n_doc = X.shape[0]
max_doc_count = (max_df
if isinstance(max_df, numbers.Integral)
else max_df * n_doc)
min_doc_count = (min_df
if isinstance(min_df, numbers.Integral)
else min_df * n_doc)
if max_doc_count < min_doc_count:
raise ValueError(
"max_df corresponds to < documents than min_df")
X, self.stop_words_ = self._limit_features(X, vocabulary,
max_doc_count,
min_doc_count,
max_features)
self.vocabulary_ = vocabulary
return X
def transform(self, raw_documents):
"""Transform documents to document-term matrix.
Extract token counts out of raw text documents using the vocabulary
fitted with fit or the one provided to the constructor.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
X : sparse matrix, [n_samples, n_features]
Document-term matrix.
"""
if isinstance(raw_documents, six.string_types):
raise ValueError(
"Iterable over raw text documents expected, "
"string object received.")
if not hasattr(self, 'vocabulary_'):
self._validate_vocabulary()
self._check_vocabulary()
# use the same matrix-building strategy as fit_transform
_, X = self._count_vocab(raw_documents, fixed_vocab=True)
if self.binary:
X.data.fill(1)
return X
def inverse_transform(self, X):
"""Return terms per document with nonzero entries in X.
Parameters
----------
X : {array, sparse matrix}, shape = [n_samples, n_features]
Returns
-------
X_inv : list of arrays, len = n_samples
List of arrays of terms.
"""
self._check_vocabulary()
if sp.issparse(X):
# We need CSR format for fast row manipulations.
X = X.tocsr()
else:
# We need to convert X to a matrix, so that the indexing
# returns 2D objects
X = np.asmatrix(X)
n_samples = X.shape[0]
terms = np.array(list(self.vocabulary_.keys()))
indices = np.array(list(self.vocabulary_.values()))
inverse_vocabulary = terms[np.argsort(indices)]
return [inverse_vocabulary[X[i, :].nonzero()[1]].ravel()
for i in range(n_samples)]
def get_feature_names(self):
"""Array mapping from feature integer indices to feature name"""
self._check_vocabulary()
return [t for t, i in sorted(six.iteritems(self.vocabulary_),
key=itemgetter(1))]
def _make_int_array():
"""Construct an array.array of a type suitable for scipy.sparse indices."""
return array.array(str("i"))
class TfidfTransformer(BaseEstimator, TransformerMixin):
"""Transform a count matrix to a normalized tf or tf-idf representation
Tf means term-frequency while tf-idf means term-frequency times inverse
document-frequency. This is a common term weighting scheme in information
retrieval, that has also found good use in document classification.
The goal of using tf-idf instead of the raw frequencies of occurrence of a
token in a given document is to scale down the impact of tokens that occur
very frequently in a given corpus and that are hence empirically less
informative than features that occur in a small fraction of the training
corpus.
The formula that is used to compute the tf-idf of term t is
tf-idf(d, t) = tf(t) * idf(d, t), and the idf is computed as
idf(d, t) = log [ n / df(d, t) ] + 1 (if ``smooth_idf=False``),
where n is the total number of documents and df(d, t) is the
document frequency; the document frequency is the number of documents d
that contain term t. The effect of adding "1" to the idf in the equation
above is that terms with zero idf, i.e., terms that occur in all documents
in a training set, will not be entirely ignored.
(Note that the idf formula above differs from the standard
textbook notation that defines the idf as
idf(d, t) = log [ n / (df(d, t) + 1) ]).
If ``smooth_idf=True`` (the default), the constant "1" is added to the
numerator and denominator of the idf as if an extra document was seen
containing every term in the collection exactly once, which prevents
zero divisions: idf(d, t) = log [ (1 + n) / 1 + df(d, t) ] + 1.
Furthermore, the formulas used to compute tf and idf depend
on parameter settings that correspond to the SMART notation used in IR
as follows:
Tf is "n" (natural) by default, "l" (logarithmic) when
``sublinear_tf=True``.
Idf is "t" when use_idf is given, "n" (none) otherwise.
Normalization is "c" (cosine) when ``norm='l2'``, "n" (none)
when ``norm=None``.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
use_idf : boolean, default=True
Enable inverse-document-frequency reweighting.
smooth_idf : boolean, default=True
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : boolean, default=False
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
References
----------
.. [Yates2011] `R. Baeza-Yates and B. Ribeiro-Neto (2011). Modern
Information Retrieval. Addison Wesley, pp. 68-74.`
.. [MRS2008] `C.D. Manning, P. Raghavan and H. Schütze (2008).
Introduction to Information Retrieval. Cambridge University
Press, pp. 118-120.`
"""
def __init__(self, norm='l2', use_idf=True, smooth_idf=True,
sublinear_tf=False):
self.norm = norm
self.use_idf = use_idf
self.smooth_idf = smooth_idf
self.sublinear_tf = sublinear_tf
def fit(self, X, y=None):
"""Learn the idf vector (global term weights)
Parameters
----------
X : sparse matrix, [n_samples, n_features]
a matrix of term/token counts
"""
if not sp.issparse(X):
X = sp.csc_matrix(X)
if self.use_idf:
n_samples, n_features = X.shape
df = _document_frequency(X)
# perform idf smoothing if required
df += int(self.smooth_idf)
n_samples += int(self.smooth_idf)
# log+1 instead of log makes sure terms with zero idf don't get
# suppressed entirely.
idf = np.log(float(n_samples) / df) + 1.0
self._idf_diag = sp.spdiags(idf, diags=0, m=n_features,
n=n_features, format='csr')
return self
def transform(self, X, copy=True):
"""Transform a count matrix to a tf or tf-idf representation
Parameters
----------
X : sparse matrix, [n_samples, n_features]
a matrix of term/token counts
copy : boolean, default True
Whether to copy X and operate on the copy or perform in-place
operations.
Returns
-------
vectors : sparse matrix, [n_samples, n_features]
"""
if hasattr(X, 'dtype') and np.issubdtype(X.dtype, np.float):
# preserve float family dtype
X = sp.csr_matrix(X, copy=copy)
else:
# convert counts or binary occurrences to floats
X = sp.csr_matrix(X, dtype=np.float64, copy=copy)
n_samples, n_features = X.shape
if self.sublinear_tf:
np.log(X.data, X.data)
X.data += 1
if self.use_idf:
check_is_fitted(self, '_idf_diag', 'idf vector is not fitted')
expected_n_features = self._idf_diag.shape[0]
if n_features != expected_n_features:
raise ValueError("Input has n_features=%d while the model"
" has been trained with n_features=%d" % (
n_features, expected_n_features))
# *= doesn't work
X = X * self._idf_diag
if self.norm:
X = normalize(X, norm=self.norm, copy=False)
return X
@property
def idf_(self):
if hasattr(self, "_idf_diag"):
return np.ravel(self._idf_diag.sum(axis=0))
else:
return None
class TfidfVectorizer(CountVectorizer):
"""Convert a collection of raw documents to a matrix of TF-IDF features.
Equivalent to CountVectorizer followed by TfidfTransformer.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char'} or callable
Whether the feature should be made of word or character n-grams.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If a string, it is passed to _check_stop_list and the appropriate stop
list is returned. 'english' is currently the only supported string
value.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
lowercase : boolean, default True
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
max_df : float in range [0.0, 1.0] or int, default=1.0
When building the vocabulary ignore terms that have a document
frequency strictly higher than the given threshold (corpus-specific
stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, default=1
When building the vocabulary ignore terms that have a document
frequency strictly lower than the given threshold. This value is also
called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : int or None, default=None
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, optional
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents.
binary : boolean, default=False
If True, all non-zero term counts are set to 1. This does not mean
outputs will have only 0/1 values, only that the tf term in tf-idf
is binary. (Set idf and normalization to False to get 0/1 outputs.)
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
use_idf : boolean, default=True
Enable inverse-document-frequency reweighting.
smooth_idf : boolean, default=True
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : boolean, default=False
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
Attributes
----------
vocabulary_ : dict
A mapping of terms to feature indices.
idf_ : array, shape = [n_features], or None
The learned idf vector (global term weights)
when ``use_idf`` is set to True, None otherwise.
stop_words_ : set
Terms that were ignored because they either:
- occurred in too many documents (`max_df`)
- occurred in too few documents (`min_df`)
- were cut off by feature selection (`max_features`).
This is only available if no vocabulary was given.
See also
--------
CountVectorizer
Tokenize the documents and count the occurrences of token and return
them as a sparse matrix
TfidfTransformer
Apply Term Frequency Inverse Document Frequency normalization to a
sparse matrix of occurrence counts.
Notes
-----
The ``stop_words_`` attribute can get large and increase the model size
when pickling. This attribute is provided only for introspection and can
be safely removed using delattr or set to None before pickling.
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None, lowercase=True,
preprocessor=None, tokenizer=None, analyzer='word',
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), max_df=1.0, min_df=1,
max_features=None, vocabulary=None, binary=False,
dtype=np.int64, norm='l2', use_idf=True, smooth_idf=True,
sublinear_tf=False):
super(TfidfVectorizer, self).__init__(
input=input, encoding=encoding, decode_error=decode_error,
strip_accents=strip_accents, lowercase=lowercase,
preprocessor=preprocessor, tokenizer=tokenizer, analyzer=analyzer,
stop_words=stop_words, token_pattern=token_pattern,
ngram_range=ngram_range, max_df=max_df, min_df=min_df,
max_features=max_features, vocabulary=vocabulary, binary=binary,
dtype=dtype)
self._tfidf = TfidfTransformer(norm=norm, use_idf=use_idf,
smooth_idf=smooth_idf,
sublinear_tf=sublinear_tf)
# Broadcast the TF-IDF parameters to the underlying transformer instance
# for easy grid search and repr
@property
def norm(self):
return self._tfidf.norm
@norm.setter
def norm(self, value):
self._tfidf.norm = value
@property
def use_idf(self):
return self._tfidf.use_idf
@use_idf.setter
def use_idf(self, value):
self._tfidf.use_idf = value
@property
def smooth_idf(self):
return self._tfidf.smooth_idf
@smooth_idf.setter
def smooth_idf(self, value):
self._tfidf.smooth_idf = value
@property
def sublinear_tf(self):
return self._tfidf.sublinear_tf
@sublinear_tf.setter
def sublinear_tf(self, value):
self._tfidf.sublinear_tf = value
@property
def idf_(self):
return self._tfidf.idf_
def fit(self, raw_documents, y=None):
"""Learn vocabulary and idf from training set.
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
Returns
-------
self : TfidfVectorizer
"""
X = super(TfidfVectorizer, self).fit_transform(raw_documents)
self._tfidf.fit(X)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn vocabulary and idf, return term-document matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
Returns
-------
X : sparse matrix, [n_samples, n_features]
Tf-idf-weighted document-term matrix.
"""
X = super(TfidfVectorizer, self).fit_transform(raw_documents)
self._tfidf.fit(X)
# X is already a transformed view of raw_documents so
# we set copy to False
return self._tfidf.transform(X, copy=False)
def transform(self, raw_documents, copy=True):
"""Transform documents to document-term matrix.
Uses the vocabulary and document frequencies (df) learned by fit (or
fit_transform).
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
copy : boolean, default True
Whether to copy X and operate on the copy or perform in-place
operations.
Returns
-------
X : sparse matrix, [n_samples, n_features]
Tf-idf-weighted document-term matrix.
"""
check_is_fitted(self, '_tfidf', 'The tfidf vector is not fitted')
X = super(TfidfVectorizer, self).transform(raw_documents)
return self._tfidf.transform(X, copy=False)
| bsd-3-clause |
draperjames/bokeh | bokeh/sampledata/daylight.py | 13 | 2683 | """ Daylight hours from http://www.sunrisesunset.com
"""
from __future__ import absolute_import
from bokeh.util.dependencies import import_required
pd = import_required('pandas',
'daylight sample data requires Pandas (http://pandas.pydata.org) to be installed')
import re
import datetime
import requests
from six.moves import xrange
from os.path import join, abspath, dirname
url = "http://sunrisesunset.com/calendar.asp"
r0 = re.compile("<[^>]+>| |[\r\n\t]")
r1 = re.compile(r"(\d+)(DST Begins|DST Ends)?Sunrise: (\d+):(\d\d)Sunset: (\d+):(\d\d)")
def fetch_daylight_hours(lat, lon, tz, dst, year):
"""Fetch daylight hours from sunrisesunset.com for a given location.
Parameters
----------
lat : float
Location's latitude.
lon : float
Location's longitude.
tz : int or float
Time zone offset from UTC. Use floats for half-hour time zones.
dst : int
Daylight saving type, e.g. 0 -> none, 1 -> North America, 2 -> Europe.
See sunrisesunset.com/custom.asp for other possible values.
year : int
Year (1901..2099).
"""
daylight = []
summer = 0 if lat >= 0 else 1
for month in xrange(1, 12+1):
args = dict(url=url, lat=lat, lon=lon, tz=tz, dst=dst, year=year, month=month)
response = requests.get("%(url)s?comb_city_info=_;%(lon)s;%(lat)s;%(tz)s;%(dst)s&month=%(month)s&year=%(year)s&time_type=1&wadj=1" % args)
entries = r1.findall(r0.sub("", response.text))
for day, note, sunrise_hour, sunrise_minute, sunset_hour, sunset_minute in entries:
if note == "DST Begins":
summer = 1
elif note == "DST Ends":
summer = 0
date = datetime.date(year, month, int(day))
sunrise = datetime.time(int(sunrise_hour), int(sunrise_minute))
sunset = datetime.time(int(sunset_hour), int(sunset_minute))
daylight.append([date, sunrise, sunset, summer])
return pd.DataFrame(daylight, columns=["Date", "Sunrise", "Sunset", "Summer"])
# daylight_warsaw_2013 = fetch_daylight_hours(52.2297, -21.0122, 1, 2, 2013)
# daylight_warsaw_2013.to_csv("bokeh/sampledata/daylight_warsaw_2013.csv", index=False)
def load_daylight_hours(file):
path = join(dirname(abspath(__file__)), file)
df = pd.read_csv(path, parse_dates=["Date", "Sunrise", "Sunset"])
df["Date"] = df.Date.map(lambda x: x.date())
df["Sunrise"] = df.Sunrise.map(lambda x: x.time())
df["Sunset"] = df.Sunset.map(lambda x: x.time())
return df
daylight_warsaw_2013 = load_daylight_hours("daylight_warsaw_2013.csv")
| bsd-3-clause |
idlead/scikit-learn | sklearn/decomposition/tests/test_fastica.py | 272 | 7798 | """
Test the fastica algorithm.
"""
import itertools
import warnings
import numpy as np
from scipy import stats
from nose.tools import assert_raises
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns
from sklearn.decomposition import FastICA, fastica, PCA
from sklearn.decomposition.fastica_ import _gs_decorrelation
from sklearn.externals.six import moves
def center_and_norm(x, axis=-1):
""" Centers and norms x **in place**
Parameters
-----------
x: ndarray
Array with an axis of observations (statistical units) measured on
random variables.
axis: int, optional
Axis along which the mean and variance are calculated.
"""
x = np.rollaxis(x, axis)
x -= x.mean(axis=0)
x /= x.std(axis=0)
def test_gs():
# Test gram schmidt orthonormalization
# generate a random orthogonal matrix
rng = np.random.RandomState(0)
W, _, _ = np.linalg.svd(rng.randn(10, 10))
w = rng.randn(10)
_gs_decorrelation(w, W, 10)
assert_less((w ** 2).sum(), 1.e-10)
w = rng.randn(10)
u = _gs_decorrelation(w, W, 5)
tmp = np.dot(u, W.T)
assert_less((tmp[:5] ** 2).sum(), 1.e-10)
def test_fastica_simple(add_noise=False):
# Test the FastICA algorithm on very simple data.
rng = np.random.RandomState(0)
# scipy.stats uses the global RNG:
np.random.seed(0)
n_samples = 1000
# Generate two sources:
s1 = (2 * np.sin(np.linspace(0, 100, n_samples)) > 0) - 1
s2 = stats.t.rvs(1, size=n_samples)
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing angle
phi = 0.6
mixing = np.array([[np.cos(phi), np.sin(phi)],
[np.sin(phi), -np.cos(phi)]])
m = np.dot(mixing, s)
if add_noise:
m += 0.1 * rng.randn(2, 1000)
center_and_norm(m)
# function as fun arg
def g_test(x):
return x ** 3, (3 * x ** 2).mean(axis=-1)
algos = ['parallel', 'deflation']
nls = ['logcosh', 'exp', 'cube', g_test]
whitening = [True, False]
for algo, nl, whiten in itertools.product(algos, nls, whitening):
if whiten:
k_, mixing_, s_ = fastica(m.T, fun=nl, algorithm=algo)
assert_raises(ValueError, fastica, m.T, fun=np.tanh,
algorithm=algo)
else:
X = PCA(n_components=2, whiten=True).fit_transform(m.T)
k_, mixing_, s_ = fastica(X, fun=nl, algorithm=algo, whiten=False)
assert_raises(ValueError, fastica, X, fun=np.tanh,
algorithm=algo)
s_ = s_.T
# Check that the mixing model described in the docstring holds:
if whiten:
assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m))
center_and_norm(s_)
s1_, s2_ = s_
# Check to see if the sources have been estimated
# in the wrong order
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
s2_, s1_ = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
# Check that we have estimated the original sources
if not add_noise:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=2)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=2)
else:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=1)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=1)
# Test FastICA class
_, _, sources_fun = fastica(m.T, fun=nl, algorithm=algo, random_state=0)
ica = FastICA(fun=nl, algorithm=algo, random_state=0)
sources = ica.fit_transform(m.T)
assert_equal(ica.components_.shape, (2, 2))
assert_equal(sources.shape, (1000, 2))
assert_array_almost_equal(sources_fun, sources)
assert_array_almost_equal(sources, ica.transform(m.T))
assert_equal(ica.mixing_.shape, (2, 2))
for fn in [np.tanh, "exp(-.5(x^2))"]:
ica = FastICA(fun=fn, algorithm=algo, random_state=0)
assert_raises(ValueError, ica.fit, m.T)
assert_raises(TypeError, FastICA(fun=moves.xrange(10)).fit, m.T)
def test_fastica_nowhiten():
m = [[0, 1], [1, 0]]
# test for issue #697
ica = FastICA(n_components=1, whiten=False, random_state=0)
assert_warns(UserWarning, ica.fit, m)
assert_true(hasattr(ica, 'mixing_'))
def test_non_square_fastica(add_noise=False):
# Test the FastICA algorithm on very simple data.
rng = np.random.RandomState(0)
n_samples = 1000
# Generate two sources:
t = np.linspace(0, 100, n_samples)
s1 = np.sin(t)
s2 = np.ceil(np.sin(np.pi * t))
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing matrix
mixing = rng.randn(6, 2)
m = np.dot(mixing, s)
if add_noise:
m += 0.1 * rng.randn(6, n_samples)
center_and_norm(m)
k_, mixing_, s_ = fastica(m.T, n_components=2, random_state=rng)
s_ = s_.T
# Check that the mixing model described in the docstring holds:
assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m))
center_and_norm(s_)
s1_, s2_ = s_
# Check to see if the sources have been estimated
# in the wrong order
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
s2_, s1_ = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
# Check that we have estimated the original sources
if not add_noise:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=3)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=3)
def test_fit_transform():
# Test FastICA.fit_transform
rng = np.random.RandomState(0)
X = rng.random_sample((100, 10))
for whiten, n_components in [[True, 5], [False, None]]:
n_components_ = (n_components if n_components is not None else
X.shape[1])
ica = FastICA(n_components=n_components, whiten=whiten, random_state=0)
Xt = ica.fit_transform(X)
assert_equal(ica.components_.shape, (n_components_, 10))
assert_equal(Xt.shape, (100, n_components_))
ica = FastICA(n_components=n_components, whiten=whiten, random_state=0)
ica.fit(X)
assert_equal(ica.components_.shape, (n_components_, 10))
Xt2 = ica.transform(X)
assert_array_almost_equal(Xt, Xt2)
def test_inverse_transform():
# Test FastICA.inverse_transform
n_features = 10
n_samples = 100
n1, n2 = 5, 10
rng = np.random.RandomState(0)
X = rng.random_sample((n_samples, n_features))
expected = {(True, n1): (n_features, n1),
(True, n2): (n_features, n2),
(False, n1): (n_features, n2),
(False, n2): (n_features, n2)}
for whiten in [True, False]:
for n_components in [n1, n2]:
n_components_ = (n_components if n_components is not None else
X.shape[1])
ica = FastICA(n_components=n_components, random_state=rng,
whiten=whiten)
with warnings.catch_warnings(record=True):
# catch "n_components ignored" warning
Xt = ica.fit_transform(X)
expected_shape = expected[(whiten, n_components_)]
assert_equal(ica.mixing_.shape, expected_shape)
X2 = ica.inverse_transform(Xt)
assert_equal(X.shape, X2.shape)
# reversibility test in non-reduction case
if n_components == X.shape[1]:
assert_array_almost_equal(X, X2)
| bsd-3-clause |
Averroes/statsmodels | statsmodels/sandbox/examples/example_nbin.py | 33 | 13139 | # -*- coding: utf-8 -*-
'''
Author: Vincent Arel-Bundock <[email protected]>
Date: 2012-08-25
This example file implements 5 variations of the negative binomial regression
model for count data: NB-P, NB-1, NB-2, geometric and left-truncated.
The NBin class inherits from the GenericMaximumLikelihood statsmodels class
which provides automatic numerical differentiation for the score and hessian.
NB-1, NB-2 and geometric are implemented as special cases of the NB-P model
described in Greene (2008) Functional forms for the negative binomial model for
count data. Economics Letters, v99n3.
Tests are included to check how NB-1, NB-2 and geometric coefficient estimates
compare to equivalent models in R. Results usually agree up to the 4th digit.
The NB-P and left-truncated model results have not been compared to other
implementations. Note that NB-P appears to only have been implemented in the
LIMDEP software.
'''
import numpy as np
from scipy.special import gammaln
from scipy.stats import nbinom
from statsmodels.base.model import GenericLikelihoodModel
from statsmodels.base.model import GenericLikelihoodModelResults
import statsmodels.api as sm
#### Negative Binomial Log-likelihoods ####
def _ll_nbp(y, X, beta, alph, Q):
'''
Negative Binomial Log-likelihood -- type P
References:
Greene, W. 2008. "Functional forms for the negtive binomial model
for count data". Economics Letters. Volume 99, Number 3, pp.585-590.
Hilbe, J.M. 2011. "Negative binomial regression". Cambridge University Press.
Following notation in Greene (2008), with negative binomial heterogeneity
parameter :math:`\alpha`:
.. math::
\lambda_i = exp(X\beta)\\
\theta = 1 / \alpha \\
g_i = \theta \lambda_i^Q \\
w_i = g_i/(g_i + \lambda_i) \\
r_i = \theta / (\theta+\lambda_i) \\
ln \mathcal{L}_i = ln \Gamma(y_i+g_i) - ln \Gamma(1+y_i) + g_iln (r_i) + y_i ln(1-r_i)
'''
mu = np.exp(np.dot(X, beta))
size = 1/alph*mu**Q
prob = size/(size+mu)
ll = nbinom.logpmf(y, size, prob)
return ll
def _ll_nb1(y, X, beta, alph):
'''Negative Binomial regression (type 1 likelihood)'''
ll = _ll_nbp(y, X, beta, alph, Q=1)
return ll
def _ll_nb2(y, X, beta, alph):
'''Negative Binomial regression (type 2 likelihood)'''
ll = _ll_nbp(y, X, beta, alph, Q=0)
return ll
def _ll_geom(y, X, beta):
'''Geometric regression'''
ll = _ll_nbp(y, X, beta, alph=1, Q=0)
return ll
def _ll_nbt(y, X, beta, alph, C=0):
'''
Negative Binomial (truncated)
Truncated densities for count models (Cameron & Trivedi, 2005, 680):
.. math::
f(y|\beta, y \geq C+1) = \frac{f(y|\beta)}{1-F(C|\beta)}
'''
Q = 0
mu = np.exp(np.dot(X, beta))
size = 1/alph*mu**Q
prob = size/(size+mu)
ll = nbinom.logpmf(y, size, prob) - np.log(1 - nbinom.cdf(C, size, prob))
return ll
#### Model Classes ####
class NBin(GenericLikelihoodModel):
'''
Negative Binomial regression
Parameters
----------
endog : array-like
1-d array of the response variable.
exog : array-like
`exog` is an n x p array where n is the number of observations and p
is the number of regressors including the intercept if one is
included in the data.
ll_type: string
log-likelihood type
`nb2`: Negative Binomial type-2 (most common)
`nb1`: Negative Binomial type-1
`nbp`: Negative Binomial type-P (Greene, 2008)
`nbt`: Left-truncated Negative Binomial (type-2)
`geom`: Geometric regression model
C: integer
Cut-point for `nbt` model
'''
def __init__(self, endog, exog, ll_type='nb2', C=0, **kwds):
self.exog = np.array(exog)
self.endog = np.array(endog)
self.C = C
super(NBin, self).__init__(endog, exog, **kwds)
# Check user input
if ll_type not in ['nb2', 'nb1', 'nbp', 'nbt', 'geom']:
raise NameError('Valid ll_type are: nb2, nb1, nbp, nbt, geom')
self.ll_type = ll_type
# Starting values (assumes first column of exog is constant)
if ll_type == 'geom':
self.start_params_default = np.zeros(self.exog.shape[1])
elif ll_type == 'nbp':
# Greene recommends starting NB-P at NB-2
start_mod = NBin(endog, exog, 'nb2')
start_res = start_mod.fit(disp=False)
self.start_params_default = np.append(start_res.params, 0)
else:
self.start_params_default = np.append(np.zeros(self.exog.shape[1]), .5)
self.start_params_default[0] = np.log(self.endog.mean())
# Define loglik based on ll_type argument
if ll_type == 'nb1':
self.ll_func = _ll_nb1
elif ll_type == 'nb2':
self.ll_func = _ll_nb2
elif ll_type == 'geom':
self.ll_func = _ll_geom
elif ll_type == 'nbp':
self.ll_func = _ll_nbp
elif ll_type == 'nbt':
self.ll_func = _ll_nbt
def nloglikeobs(self, params):
alph = params[-1]
beta = params[:self.exog.shape[1]]
if self.ll_type == 'geom':
return -self.ll_func(self.endog, self.exog, beta)
elif self.ll_type == 'nbt':
return -self.ll_func(self.endog, self.exog, beta, alph, self.C)
elif self.ll_type == 'nbp':
Q = params[-2]
return -self.ll_func(self.endog, self.exog, beta, alph, Q)
else:
return -self.ll_func(self.endog, self.exog, beta, alph)
def fit(self, start_params=None, maxiter=10000, maxfun=5000, **kwds):
if start_params==None:
countfit = super(NBin, self).fit(start_params=self.start_params_default,
maxiter=maxiter, maxfun=maxfun, **kwds)
else:
countfit = super(NBin, self).fit(start_params=start_params,
maxiter=maxiter, maxfun=maxfun, **kwds)
countfit = CountResults(self, countfit)
return countfit
class CountResults(GenericLikelihoodModelResults):
def __init__(self, model, mlefit):
self.model = model
self.__dict__.update(mlefit.__dict__)
def summary(self, yname=None, xname=None, title=None, alpha=.05,
yname_list=None):
top_left = [('Dep. Variable:', None),
('Model:', [self.model.__class__.__name__]),
('Method:', ['MLE']),
('Date:', None),
('Time:', None),
('Converged:', ["%s" % self.mle_retvals['converged']])
]
top_right = [('No. Observations:', None),
('Log-Likelihood:', None),
]
if title is None:
title = self.model.__class__.__name__ + ' ' + "Regression Results"
#boiler plate
from statsmodels.iolib.summary import Summary
smry = Summary()
# for top of table
smry.add_table_2cols(self, gleft=top_left, gright=top_right, #[],
yname=yname, xname=xname, title=title)
# for parameters, etc
smry.add_table_params(self, yname=yname_list, xname=xname, alpha=alpha,
use_t=True)
return smry
#### Score function for NB-P ####
from scipy.special import digamma
def _score_nbp(y, X, beta, thet, Q):
'''
Negative Binomial Score -- type P likelihood from Greene (2007)
.. math::
\lambda_i = exp(X\beta)\\
g_i = \theta \lambda_i^Q \\
w_i = g_i/(g_i + \lambda_i) \\
r_i = \theta / (\theta+\lambda_i) \\
A_i = \left [ \Psi(y_i+g_i) - \Psi(g_i) + ln w_i \right ] \\
B_i = \left [ g_i (1-w_i) - y_iw_i \right ] \\
\partial ln \mathcal{L}_i / \partial
\begin{pmatrix} \lambda_i \\ \theta \\ Q \end{pmatrix}=
[A_i+B_i]
\begin{pmatrix} Q/\lambda_i \\ 1/\theta \\ ln(\lambda_i) \end{pmatrix}
-B_i
\begin{pmatrix} 1/\lambda_i\\ 0 \\ 0 \end{pmatrix} \\
\frac{\partial \lambda}{\partial \beta} = \lambda_i \mathbf{x}_i \\
\frac{\partial \mathcal{L}_i}{\partial \beta} =
\left (\frac{\partial\mathcal{L}_i}{\partial \lambda_i} \right )
\frac{\partial \lambda_i}{\partial \beta}
'''
lamb = np.exp(np.dot(X, beta))
g = thet * lamb**Q
w = g / (g + lamb)
r = thet / (thet+lamb)
A = digamma(y+g) - digamma(g) + np.log(w)
B = g*(1-w) - y*w
dl = (A+B) * Q/lamb - B * 1/lamb
dt = (A+B) * 1/thet
dq = (A+B) * np.log(lamb)
db = X * (dl * lamb)[:,np.newaxis]
sc = np.array([dt.sum(), dq.sum()])
sc = np.concatenate([db.sum(axis=0), sc])
return sc
#### Tests ####
from statsmodels.compat.python import urlopen
from numpy.testing import assert_almost_equal
import pandas
import patsy
medpar = pandas.read_csv(urlopen('http://vincentarelbundock.github.com/Rdatasets/csv/COUNT/medpar.csv'))
mdvis = pandas.read_csv(urlopen('http://vincentarelbundock.github.com/Rdatasets/csv/COUNT/mdvis.csv'))
# NB-2
'''
# R v2.15.1
library(MASS)
library(COUNT)
data(medpar)
f <- los~factor(type)+hmo+white
mod <- glm.nb(f, medpar)
summary(mod)
Call:
glm.nb(formula = f, data = medpar, init.theta = 2.243376203,
link = log)
Deviance Residuals:
Min 1Q Median 3Q Max
-2.4671 -0.9090 -0.2693 0.4320 3.8668
Coefficients:
Estimate Std. Error z value Pr(>|z|)
(Intercept) 2.31028 0.06745 34.253 < 2e-16 ***
factor(type)2 0.22125 0.05046 4.385 1.16e-05 ***
factor(type)3 0.70616 0.07600 9.292 < 2e-16 ***
hmo -0.06796 0.05321 -1.277 0.202
white -0.12907 0.06836 -1.888 0.059 .
---
Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
(Dispersion parameter for Negative Binomial(2.2434) family taken to be 1)
Null deviance: 1691.1 on 1494 degrees of freedom
Residual deviance: 1568.1 on 1490 degrees of freedom
AIC: 9607
Number of Fisher Scoring iterations: 1
Theta: 2.2434
Std. Err.: 0.0997
2 x log-likelihood: -9594.9530
'''
def test_nb2():
y, X = patsy.dmatrices('los ~ C(type) + hmo + white', medpar)
y = np.array(y)[:,0]
nb2 = NBin(y,X,'nb2').fit(maxiter=10000, maxfun=5000)
assert_almost_equal(nb2.params,
[2.31027893349935, 0.221248978197356, 0.706158824346228,
-0.067955221930748, -0.129065442248951, 0.4457567],
decimal=2)
# NB-1
'''
# R v2.15.1
# COUNT v1.2.3
library(COUNT)
data(medpar)
f <- los~factor(type)+hmo+white
ml.nb1(f, medpar)
Estimate SE Z LCL UCL
(Intercept) 2.34918407 0.06023641 38.9994023 2.23112070 2.46724744
factor(type)2 0.16175471 0.04585569 3.5274735 0.07187757 0.25163186
factor(type)3 0.41879257 0.06553258 6.3906006 0.29034871 0.54723643
hmo -0.04533566 0.05004714 -0.9058592 -0.14342805 0.05275673
white -0.12951295 0.06071130 -2.1332593 -0.24850710 -0.01051880
alpha 4.57898241 0.22015968 20.7984603 4.14746943 5.01049539
'''
#def test_nb1():
#y, X = patsy.dmatrices('los ~ C(type) + hmo + white', medpar)
#y = np.array(y)[:,0]
## TODO: Test fails with some of the other optimization methods
#nb1 = NBin(y,X,'nb1').fit(method='ncg', maxiter=10000, maxfun=5000)
#assert_almost_equal(nb1.params,
#[2.34918407014186, 0.161754714412848, 0.418792569970658,
#-0.0453356614650342, -0.129512952033423, 4.57898241219275],
#decimal=2)
# NB-Geometric
'''
MASS v7.3-20
R v2.15.1
library(MASS)
data(medpar)
f <- los~factor(type)+hmo+white
mod <- glm(f, family=negative.binomial(1), data=medpar)
summary(mod)
Call:
glm(formula = f, family = negative.binomial(1), data = medpar)
Deviance Residuals:
Min 1Q Median 3Q Max
-1.7942 -0.6545 -0.1896 0.3044 2.6844
Coefficients:
Estimate Std. Error t value Pr(>|t|)
(Intercept) 2.30849 0.07071 32.649 < 2e-16 ***
factor(type)2 0.22121 0.05283 4.187 2.99e-05 ***
factor(type)3 0.70599 0.08092 8.724 < 2e-16 ***
hmo -0.06779 0.05521 -1.228 0.2197
white -0.12709 0.07169 -1.773 0.0765 .
---
Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
(Dispersion parameter for Negative Binomial(1) family taken to be 0.5409721)
Null deviance: 872.29 on 1494 degrees of freedom
Residual deviance: 811.95 on 1490 degrees of freedom
AIC: 9927.3
Number of Fisher Scoring iterations: 5
'''
#def test_geom():
#y, X = patsy.dmatrices('los ~ C(type) + hmo + white', medpar)
#y = np.array(y)[:,0]
## TODO: remove alph from geom params
#geom = NBin(y,X,'geom').fit(maxiter=10000, maxfun=5000)
#assert_almost_equal(geom.params,
#[2.3084850946241, 0.221206159108742, 0.705986369841159,
#-0.0677871843613577, -0.127088772164963],
#decimal=4)
test_nb2()
| bsd-3-clause |
vdrhtc/Measurement-automation | lib3/core/_measurement_results/sParMeas2D.py | 1 | 8113 | # Standard library imports
# Third party imports
from matplotlib import pyplot as plt
from matplotlib import colorbar
import numpy as np
# Local application imports
from lib3.core.measurementResult import MeasurementResult
from lib3.core.contextBase import ContextBase
# TODO: write docstring for every function here
class SParMeas2D(MeasurementResult):
"""
Base class for vizualization of measurements of S-parameters dependence
on 2 parameters. Result output is two 2D heatmaps for
2 components of the S parameter.
Components can be:
- Real and Imaginary parts
- dBc and phase (in radiance)
"""
def __init__(self, name, sample_name):
super().__init__(name, sample_name)
self._context = ContextBase()
self._is_finished = False
self._phase_units = "rad"
self.max_phase = -1
self.min_phase = 1
self._plot_limits_fixed = False
self.max_abs = 1
self.min_abs = 0
self._unwrap_phase = False
self._amps_map = None
self._phas_map = None
self._amp_cb = None
self._phas_cb = None
self._amps_map = None
self._phas_map = None
self._amp_cb = None
self._phas_cb = None
def _prepare_figure(self):
fig, axes = plt.subplots(1, 2, figsize=(15, 7), sharey=True,
sharex=True)
ax_amps, ax_phas = axes
ax_amps.ticklabel_format(axis='x', style='sci', scilimits=(-2, 2))
ax_amps.set_ylabel("Frequency [GHz]")
xlabel = self._parameter_names[0]
ax_amps.set_xlabel(xlabel)
ax_phas.ticklabel_format(axis='x', style='sci', scilimits=(-2, 2))
ax_phas.set_xlabel(xlabel)
plt.tight_layout(pad=2, h_pad=-10)
cax_amps, kw = colorbar.make_axes(ax_amps, aspect=40)
cax_phas, kw = colorbar.make_axes(ax_phas, aspect=40)
cax_amps.set_title("$|S_{21}|$", position=(0.5, -0.05))
cax_phas.set_title("$\\angle S_{21}$\n [%s]" % self._phase_units,
position=(0.5, -0.1))
ax_amps.grid()
ax_phas.grid()
fig.canvas.set_window_title(self._name)
return fig, axes, (cax_amps, cax_phas)
def set_phase_units(self, units):
"""
Sets the units of the phase in the plots
Parameters:
-----------
units: "rad" or "deg"
units in which the phase will be displayed
"""
if units in ["deg", "rad"]:
self._phase_units = units
else:
print("Phase units invalid")
def set_unwrap_phase(self, unwrap_phase):
"""
Set if the phase plot should be unwrapped
Parameters:
-----------
unwrap_phase: boolean
True or False to control the unwrapping
"""
self._unwrap_phase = unwrap_phase
def _plot(self, data):
ax_amps, ax_phas = self._axes
cax_amps, cax_phas = self._caxes
if "data" not in data.keys():
return
X, Y, Z = self._prepare_data_for_plot(data)
if not self._unwrap_phase:
phases = abs(np.angle(Z).T)
else:
phases = np.unwrap(np.angle(Z)).T
phases[Z.T == 0] = 0
phases = phases if self._phase_units == "rad" else phases * 180 / np.pi
if self._plot_limits_fixed is False:
self.max_abs = max(abs(Z)[abs(Z) != 0])
self.min_abs = min(abs(Z)[abs(Z) != 0])
self.max_phase = max(phases[phases != 0])
self.min_phase = min(phases[phases != 0])
step_x = np.min(np.abs(np.diff(X)))
step_y = np.min(np.abs(np.diff(Y)))
extent = [np.min(X) - step_x / 2, np.max(X) + step_x / 2,
np.min(Y) - step_y / 2, np.max(Y) + step_y / 2]
if self._amps_map is None or not self._dynamic:
self._amps_map = ax_amps.imshow(abs(Z).T, origin='lower',
cmap="RdBu_r",
aspect='auto', vmax=self.max_abs,
vmin=self.min_abs,
extent=extent,
interpolation='none')
self._amp_cb = plt.colorbar(self._amps_map, cax=cax_amps)
self._amp_cb.formatter.set_powerlimits((0, 0))
self._amp_cb.update_ticks()
else:
self._amps_map.set_data(abs(Z).T)
self._amps_map.set_clim(self.min_abs, self.max_abs)
if self._phas_map is None or not self._dynamic:
self._phas_map = ax_phas.imshow(phases, origin='lower',
aspect='auto',
cmap="RdBu_r", vmin=self.min_phase,
vmax=self.max_phase,
extent=extent,
interpolation='none')
self._phas_cb = plt.colorbar(self._phas_map, cax=cax_phas)
else:
self._phas_map.set_data(phases)
self._phas_map.set_clim(self.min_phase, self.max_phase)
plt.draw()
def set_plot_range(self, min_abs, max_abs, min_phas=None, max_phas=None):
self.max_phase = max_phas
self.min_phase = min_phas
self.max_abs = max_abs
self.min_abs = min_abs
def _prepare_data_for_plot(self, data):
s_data = self._remove_delay(data["Frequency [Hz]"], data["data"])
parameter_list = data[self._parameter_names[0]]
# if parameter_list[0] > parameter_list[-1]:
# parameter_list = parameter_list[::-1]
# s_data = s_data[::-1, :]
# s_data = self.remove_background('avg_cur')
return parameter_list, data["Frequency [Hz]"] / 1e9, s_data
def remove_delay(self):
copy = self.copy()
s_data, frequencies = copy.get_data()["data"], copy.get_data()[
"Frequency [Hz]"]
copy.get_data()["data"] = self._remove_delay(frequencies, s_data)
return copy
def _remove_delay(self, frequencies, s_data):
phases = np.unwrap(np.angle(s_data))
k, b = np.polyfit(frequencies, phases[0], 1)
phases = phases - k * frequencies - b
corr_s_data = abs(s_data) * np.exp(1j * phases)
corr_s_data[abs(corr_s_data) < 1e-14] = 0
return corr_s_data
def remove_background(self, direction):
"""
Remove background
Parameters:
-----------
direction: str
"avg_cur" for bias slice subtraction
"avg_freq" for if_freq slice subtraction
"""
s_data = self.get_data()["data"]
len_freq = s_data.shape[1]
len_cur = s_data.shape[0]
if direction is "avg_cur":
avg = np.zeros(len_freq, dtype=complex)
for j in range(len_freq):
counter_av = 0
for i in range(len_cur):
if s_data[i, j] != 0:
counter_av += 1
avg[j] += s_data[i, j]
avg[j] = avg[j] / counter_av
s_data[:, j] = s_data[:, j] / avg[j]
elif direction is "avg_freq":
avg = np.zeros(len_cur, dtype=complex)
for j in range(len_cur):
counter_av = 0
for i in range(len_freq):
if s_data[j, i] != 0:
counter_av += 1
avg[j] += s_data[j, i]
avg[j] = avg[j] / counter_av
s_data[j, :] = s_data[j, :] / avg[j]
self.get_data()["data"] = s_data
return s_data
def __setstate__(self, state):
self._amps_map = None
self._phas_map = None
super().__setstate__(state)
def __getstate__(self):
d = super().__getstate__()
d["_amps_map"] = None
d["_phas_map"] = None
d["_amp_cb"] = None
d["_phas_cb"] = None
return d
# Not tested yet | gpl-3.0 |
r0mai/metashell | 3rd/templight/llvm/tools/clang/utils/analyzer/CmpRuns.py | 27 | 15658 | #!/usr/bin/env python
"""
CmpRuns - A simple tool for comparing two static analyzer runs to determine
which reports have been added, removed, or changed.
This is designed to support automated testing using the static analyzer, from
two perspectives:
1. To monitor changes in the static analyzer's reports on real code bases,
for regression testing.
2. For use by end users who want to integrate regular static analyzer testing
into a buildbot like environment.
Usage:
# Load the results of both runs, to obtain lists of the corresponding
# AnalysisDiagnostic objects.
#
resultsA = loadResultsFromSingleRun(singleRunInfoA, deleteEmpty)
resultsB = loadResultsFromSingleRun(singleRunInfoB, deleteEmpty)
# Generate a relation from diagnostics in run A to diagnostics in run B
# to obtain a list of triples (a, b, confidence).
diff = compareResults(resultsA, resultsB)
"""
from __future__ import division, print_function
from collections import defaultdict
from math import log
from optparse import OptionParser
import json
import os
import plistlib
import re
import sys
STATS_REGEXP = re.compile(r"Statistics: (\{.+\})", re.MULTILINE | re.DOTALL)
class Colors(object):
"""
Color for terminal highlight.
"""
RED = '\x1b[2;30;41m'
GREEN = '\x1b[6;30;42m'
CLEAR = '\x1b[0m'
# Information about analysis run:
# path - the analysis output directory
# root - the name of the root directory, which will be disregarded when
# determining the source file name
class SingleRunInfo(object):
def __init__(self, path, root="", verboseLog=None):
self.path = path
self.root = root.rstrip("/\\")
self.verboseLog = verboseLog
class AnalysisDiagnostic(object):
def __init__(self, data, report, htmlReport):
self._data = data
self._loc = self._data['location']
self._report = report
self._htmlReport = htmlReport
self._reportSize = len(self._data['path'])
def getFileName(self):
root = self._report.run.root
fileName = self._report.files[self._loc['file']]
if fileName.startswith(root) and len(root) > 0:
return fileName[len(root) + 1:]
return fileName
def getRootFileName(self):
path = self._data['path']
if not path:
return self.getFileName()
p = path[0]
if 'location' in p:
fIdx = p['location']['file']
else: # control edge
fIdx = path[0]['edges'][0]['start'][0]['file']
out = self._report.files[fIdx]
root = self._report.run.root
if out.startswith(root):
return out[len(root):]
return out
def getLine(self):
return self._loc['line']
def getColumn(self):
return self._loc['col']
def getPathLength(self):
return self._reportSize
def getCategory(self):
return self._data['category']
def getDescription(self):
return self._data['description']
def getIssueIdentifier(self):
id = self.getFileName() + "+"
if 'issue_context' in self._data:
id += self._data['issue_context'] + "+"
if 'issue_hash_content_of_line_in_context' in self._data:
id += str(self._data['issue_hash_content_of_line_in_context'])
return id
def getReport(self):
if self._htmlReport is None:
return " "
return os.path.join(self._report.run.path, self._htmlReport)
def getReadableName(self):
if 'issue_context' in self._data:
funcnamePostfix = "#" + self._data['issue_context']
else:
funcnamePostfix = ""
rootFilename = self.getRootFileName()
fileName = self.getFileName()
if rootFilename != fileName:
filePrefix = "[%s] %s" % (rootFilename, fileName)
else:
filePrefix = rootFilename
return '%s%s:%d:%d, %s: %s' % (filePrefix,
funcnamePostfix,
self.getLine(),
self.getColumn(), self.getCategory(),
self.getDescription())
# Note, the data format is not an API and may change from one analyzer
# version to another.
def getRawData(self):
return self._data
class AnalysisReport(object):
def __init__(self, run, files):
self.run = run
self.files = files
self.diagnostics = []
class AnalysisRun(object):
def __init__(self, info):
self.path = info.path
self.root = info.root
self.info = info
self.reports = []
# Cumulative list of all diagnostics from all the reports.
self.diagnostics = []
self.clang_version = None
self.stats = []
def getClangVersion(self):
return self.clang_version
def readSingleFile(self, p, deleteEmpty):
data = plistlib.readPlist(p)
if 'statistics' in data:
self.stats.append(json.loads(data['statistics']))
data.pop('statistics')
# We want to retrieve the clang version even if there are no
# reports. Assume that all reports were created using the same
# clang version (this is always true and is more efficient).
if 'clang_version' in data:
if self.clang_version is None:
self.clang_version = data.pop('clang_version')
else:
data.pop('clang_version')
# Ignore/delete empty reports.
if not data['files']:
if deleteEmpty:
os.remove(p)
return
# Extract the HTML reports, if they exists.
if 'HTMLDiagnostics_files' in data['diagnostics'][0]:
htmlFiles = []
for d in data['diagnostics']:
# FIXME: Why is this named files, when does it have multiple
# files?
assert len(d['HTMLDiagnostics_files']) == 1
htmlFiles.append(d.pop('HTMLDiagnostics_files')[0])
else:
htmlFiles = [None] * len(data['diagnostics'])
report = AnalysisReport(self, data.pop('files'))
diagnostics = [AnalysisDiagnostic(d, report, h)
for d, h in zip(data.pop('diagnostics'), htmlFiles)]
assert not data
report.diagnostics.extend(diagnostics)
self.reports.append(report)
self.diagnostics.extend(diagnostics)
def loadResults(path, opts, root="", deleteEmpty=True):
"""
Backwards compatibility API.
"""
return loadResultsFromSingleRun(SingleRunInfo(path, root, opts.verboseLog),
deleteEmpty)
def loadResultsFromSingleRun(info, deleteEmpty=True):
"""
# Load results of the analyzes from a given output folder.
# - info is the SingleRunInfo object
# - deleteEmpty specifies if the empty plist files should be deleted
"""
path = info.path
run = AnalysisRun(info)
if os.path.isfile(path):
run.readSingleFile(path, deleteEmpty)
else:
for (dirpath, dirnames, filenames) in os.walk(path):
for f in filenames:
if (not f.endswith('plist')):
continue
p = os.path.join(dirpath, f)
run.readSingleFile(p, deleteEmpty)
return run
def cmpAnalysisDiagnostic(d):
return d.getIssueIdentifier()
def compareResults(A, B, opts):
"""
compareResults - Generate a relation from diagnostics in run A to
diagnostics in run B.
The result is the relation as a list of triples (a, b) where
each element {a,b} is None or a matching element from the respective run
"""
res = []
# Map size_before -> size_after
path_difference_data = []
# Quickly eliminate equal elements.
neqA = []
neqB = []
eltsA = list(A.diagnostics)
eltsB = list(B.diagnostics)
eltsA.sort(key=cmpAnalysisDiagnostic)
eltsB.sort(key=cmpAnalysisDiagnostic)
while eltsA and eltsB:
a = eltsA.pop()
b = eltsB.pop()
if (a.getIssueIdentifier() == b.getIssueIdentifier()):
if a.getPathLength() != b.getPathLength():
if opts.relative_path_histogram:
path_difference_data.append(
float(a.getPathLength()) / b.getPathLength())
elif opts.relative_log_path_histogram:
path_difference_data.append(
log(float(a.getPathLength()) / b.getPathLength()))
elif opts.absolute_path_histogram:
path_difference_data.append(
a.getPathLength() - b.getPathLength())
res.append((a, b))
elif a.getIssueIdentifier() > b.getIssueIdentifier():
eltsB.append(b)
neqA.append(a)
else:
eltsA.append(a)
neqB.append(b)
neqA.extend(eltsA)
neqB.extend(eltsB)
# FIXME: Add fuzzy matching. One simple and possible effective idea would
# be to bin the diagnostics, print them in a normalized form (based solely
# on the structure of the diagnostic), compute the diff, then use that as
# the basis for matching. This has the nice property that we don't depend
# in any way on the diagnostic format.
for a in neqA:
res.append((a, None))
for b in neqB:
res.append((None, b))
if opts.relative_log_path_histogram or opts.relative_path_histogram or \
opts.absolute_path_histogram:
from matplotlib import pyplot
pyplot.hist(path_difference_data, bins=100)
pyplot.show()
return res
def computePercentile(l, percentile):
"""
Return computed percentile.
"""
return sorted(l)[int(round(percentile * len(l) + 0.5)) - 1]
def deriveStats(results):
# Assume all keys are the same in each statistics bucket.
combined_data = defaultdict(list)
# Collect data on paths length.
for report in results.reports:
for diagnostic in report.diagnostics:
combined_data['PathsLength'].append(diagnostic.getPathLength())
for stat in results.stats:
for key, value in stat.items():
combined_data[key].append(value)
combined_stats = {}
for key, values in combined_data.items():
combined_stats[str(key)] = {
"max": max(values),
"min": min(values),
"mean": sum(values) / len(values),
"90th %tile": computePercentile(values, 0.9),
"95th %tile": computePercentile(values, 0.95),
"median": sorted(values)[len(values) // 2],
"total": sum(values)
}
return combined_stats
def compareStats(resultsA, resultsB):
statsA = deriveStats(resultsA)
statsB = deriveStats(resultsB)
keys = sorted(statsA.keys())
for key in keys:
print(key)
for kkey in statsA[key]:
valA = float(statsA[key][kkey])
valB = float(statsB[key][kkey])
report = "%.3f -> %.3f" % (valA, valB)
# Only apply highlighting when writing to TTY and it's not Windows
if sys.stdout.isatty() and os.name != 'nt':
if valB != 0:
ratio = (valB - valA) / valB
if ratio < -0.2:
report = Colors.GREEN + report + Colors.CLEAR
elif ratio > 0.2:
report = Colors.RED + report + Colors.CLEAR
print("\t %s %s" % (kkey, report))
def dumpScanBuildResultsDiff(dirA, dirB, opts, deleteEmpty=True,
Stdout=sys.stdout):
# Load the run results.
resultsA = loadResults(dirA, opts, opts.rootA, deleteEmpty)
resultsB = loadResults(dirB, opts, opts.rootB, deleteEmpty)
if opts.show_stats:
compareStats(resultsA, resultsB)
if opts.stats_only:
return
# Open the verbose log, if given.
if opts.verboseLog:
auxLog = open(opts.verboseLog, "wb")
else:
auxLog = None
diff = compareResults(resultsA, resultsB, opts)
foundDiffs = 0
totalAdded = 0
totalRemoved = 0
for res in diff:
a, b = res
if a is None:
Stdout.write("ADDED: %r\n" % b.getReadableName())
foundDiffs += 1
totalAdded += 1
if auxLog:
auxLog.write("('ADDED', %r, %r)\n" % (b.getReadableName(),
b.getReport()))
elif b is None:
Stdout.write("REMOVED: %r\n" % a.getReadableName())
foundDiffs += 1
totalRemoved += 1
if auxLog:
auxLog.write("('REMOVED', %r, %r)\n" % (a.getReadableName(),
a.getReport()))
else:
pass
TotalReports = len(resultsB.diagnostics)
Stdout.write("TOTAL REPORTS: %r\n" % TotalReports)
Stdout.write("TOTAL ADDED: %r\n" % totalAdded)
Stdout.write("TOTAL REMOVED: %r\n" % totalRemoved)
if auxLog:
auxLog.write("('TOTAL NEW REPORTS', %r)\n" % TotalReports)
auxLog.write("('TOTAL DIFFERENCES', %r)\n" % foundDiffs)
auxLog.close()
return foundDiffs, len(resultsA.diagnostics), len(resultsB.diagnostics)
def generate_option_parser():
parser = OptionParser("usage: %prog [options] [dir A] [dir B]")
parser.add_option("", "--rootA", dest="rootA",
help="Prefix to ignore on source files for directory A",
action="store", type=str, default="")
parser.add_option("", "--rootB", dest="rootB",
help="Prefix to ignore on source files for directory B",
action="store", type=str, default="")
parser.add_option("", "--verbose-log", dest="verboseLog",
help="Write additional information to LOG \
[default=None]",
action="store", type=str, default=None,
metavar="LOG")
parser.add_option("--relative-path-differences-histogram",
action="store_true", dest="relative_path_histogram",
default=False,
help="Show histogram of relative paths differences. \
Requires matplotlib")
parser.add_option("--relative-log-path-differences-histogram",
action="store_true", dest="relative_log_path_histogram",
default=False,
help="Show histogram of log relative paths differences. \
Requires matplotlib")
parser.add_option("--absolute-path-differences-histogram",
action="store_true", dest="absolute_path_histogram",
default=False,
help="Show histogram of absolute paths differences. \
Requires matplotlib")
parser.add_option("--stats-only", action="store_true", dest="stats_only",
default=False, help="Only show statistics on reports")
parser.add_option("--show-stats", action="store_true", dest="show_stats",
default=False, help="Show change in statistics")
return parser
def main():
parser = generate_option_parser()
(opts, args) = parser.parse_args()
if len(args) != 2:
parser.error("invalid number of arguments")
dirA, dirB = args
dumpScanBuildResultsDiff(dirA, dirB, opts)
if __name__ == '__main__':
main()
| gpl-3.0 |
ml-lab/neuralnilm | neuralnilm/monitor/monitor.py | 4 | 10943 | from __future__ import print_function, division
from time import sleep
import pymongo
from monary import Monary
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from neuralnilm.consts import DATA_FOLD_NAMES
from neuralnilm.utils import get_colors
from neuralnilm.config import config
class Monitor(object):
def __init__(self, experiment_id, output_path='.',
update_period=1, max_num_lines=1000,
mongo_db='neuralnilm',
mongo_host=None):
"""
Parameters
----------
max_num_lines : int
Number of pixels.
"""
self.experiment_id = experiment_id
self.output_path = output_path
self.update_period = update_period
self.max_num_lines = max_num_lines
self._last_iteration_processed = {'train': 0, 'validation': 0}
if mongo_host is None:
self.mongo_host = config.get("MongoDB", "address")
else:
self.mongo_host = mongo_host
self.mongo_client = pymongo.MongoClient(self.mongo_host)
self.db = self.mongo_client[mongo_db]
self.mongo_db = mongo_db
self._validation_metric_names = None
def start(self):
while True:
if self._new_scores_available('train'):
self._plot_train_scores()
if self._new_scores_available('validation'):
self._plot_validation_scores()
sleep(self.update_period)
def _new_costs_available(self, train_or_validation):
"""Returns True if new training costs are available from DB.
Parameters
----------
train_or_validation : str, {'train', 'validation'}
"""
collection = self.db[train_or_validation + '_scores']
document = collection.find_one(
filter={
'experiment_id': self.experiment_id,
'iteration': {
'$gt': self._last_iteration_processed[train_or_validation]}
}
)
return bool(document)
def _get_validation_mse(self):
monary = Monary(host=self.mongo_host)
def get_mse_for_fold(fold):
iterations, loss, source_id = monary.query(
db=self.mongo_db,
coll='validation_scores',
query={'experiment_id': self.experiment_id, 'fold': fold},
fields=['iteration', 'scores.regression.mean_squared_error',
'source_id'],
types=['int32', 'float32', 'int8']
)
scores_df = pd.DataFrame(
{'loss': loss, 'source_id': source_id}, index=iterations)
scores_df = scores_df.sort_index()
return scores_df
FOLDS = ['unseen_appliances', 'unseen_activations_of_seen_appliances']
scores = {}
for fold in FOLDS:
scores[fold] = get_mse_for_fold(fold)
return scores
def _get_train_costs(self):
# Get train scores
monary = Monary(host=self.mongo_host)
iterations, loss, source_id = monary.query(
db=self.mongo_db,
coll='train_scores',
query={'experiment_id': self.experiment_id},
fields=['iteration', 'loss', 'source_id'],
types=['int32', 'float32', 'int8']
)
scores_df = pd.DataFrame(
{'loss': loss, 'source_id': source_id}, index=iterations)
scores_df = scores_df.sort_index()
return scores_df
def _plot_train_scores(self):
train_scores_df = self._get_train_costs()
all_scores = self._get_validation_mse()
all_scores.update({'train': train_scores_df})
fig, ax = plt.subplots(1)
source_names = self.source_names
for fold, scores_df in all_scores.iteritems():
sources = scores_df['source_id'].unique()
for source_i in sources:
# Get losses for just this source
mask = scores_df['source_id'] == source_i
loss = scores_df[mask]['loss']
# Downsample if necessary
loss_for_source = self._downsample(loss)
# Plot
ax.plot(loss_for_source.index, loss_for_source.values,
label='{} : {}'.format(fold, source_names[source_i]))
ax.legend()
plt.title('Training costs')
ax.set_xlabel('Iteration')
ax.set_ylabel('Mean squared error')
plt.show()
try:
self._last_iteration_processed['train'] = train_scores_df.index[-1]
except IndexError:
# No data loaded
pass
@property
def validation_metric_names(self):
"""
Returns
-------
metric_names : list
e.g. ['regression.mean_squared_error',
'classification_2_state.f1_score']
"""
if self._validation_metric_names is None:
scores = self.db.validation_scores.find_one(
filter={'experiment_id': self.experiment_id})['scores']
self._validation_metric_names = []
for metric_type, metrics in scores.iteritems():
for metric_name in metrics:
self._validation_metric_names.append(
metric_type + '.' + metric_name)
return self._validation_metric_names
@property
def source_names(self):
"""
Returns
-------
source_names : dict
"""
metadata = self.db.experiments.find_one({'_id': self.experiment_id})
sources = metadata['data']['pipeline']['sources']
source_names = {int(i): sources[i]['name'] for i in sources}
return source_names
def _plot_validation_scores(self):
validation_sources = self.db.validation_scores.distinct(
key='source_id', filter={'experiment_id': self.experiment_id})
validation_sources.sort()
num_cols = len(validation_sources)
fig, axes = plt.subplots(
nrows=3, ncols=num_cols, sharex="col", sharey=True,
squeeze=False)
fig.patch.set_facecolor('white')
source_names = self.source_names
for col, source_id in enumerate(validation_sources):
for row, fold in enumerate(DATA_FOLD_NAMES):
ax = axes[row, col]
self._plot_validation_scores_for_source_and_fold(
ax=ax, source_id=source_id, fold=fold,
show_axes_labels=(row == 0),
show_scales=(col == num_cols-1))
if row == 0:
ax.set_title(source_names[source_id], position=(.5, 1.05))
elif row == 2:
ax.set_xlabel('Iteration', labelpad=10)
if col == 0:
ax.set_ylabel(fold.replace("_", " ").title(), labelpad=10)
ax.patch.set_facecolor((0.95, 0.95, 0.95))
plt.subplots_adjust(
top=0.91, bottom=0.05, left=0.03, right=0.7,
hspace=0.15, wspace=0.1)
plt.show()
def _plot_validation_scores_for_source_and_fold(self, ax, source_id, fold,
show_axes_labels,
show_scales):
fields = ['iteration'] + ['scores.' + metric_name for metric_name in
self.validation_metric_names]
monary = Monary(host=self.mongo_host)
result = monary.query(
db=self.mongo_db,
coll='validation_scores',
query={
'experiment_id': self.experiment_id,
'source_id': source_id,
'fold': fold
},
fields=fields,
types=['int32'] + ['float32'] * len(self.validation_metric_names)
)
index = result[0]
data = {metric_name: result[i+1] for i, metric_name in
enumerate(self.validation_metric_names)}
df = pd.DataFrame(data, index=index)
df = df.sort_index()
df = self._downsample(df)
# Create multiple independent axes. Adapted from Joe Kington's answer:
# http://stackoverflow.com/a/7734614
# Colours
n = len(self.validation_metric_names)
colors = get_colors(n)
# Twin the x-axis to make independent y-axes.
axes = [ax]
for metric_name in self.validation_metric_names[1:]:
axes.append(ax.twinx())
SEP = 0.2
if show_scales:
for i, axis in enumerate(axes):
axis.yaxis.tick_right()
if i != 0:
# To make the border of the right-most axis visible,
# we need to turn the frame on. This hides the other plots,
# however, so we need to turn its fill off.
axis.set_frame_on(True)
axis.patch.set_visible(False)
# Move the last y-axes spines over to the right.
axis.spines['right'].set_position(
('axes', 1 + (SEP * i)))
else:
for axis in axes:
axis.tick_params(labelright=False, labelleft=False)
axis.yaxis.set_ticks_position('none')
axis.spines['right'].set_visible(False)
for axis in axes:
for spine in ['top', 'left', 'bottom']:
axis.spines[spine].set_visible(False)
axis.xaxis.set_ticks_position('none')
lines = []
for i, (axis, metric_name, color) in enumerate(
zip(axes, self.validation_metric_names, colors)):
axis.tick_params(axis='y', colors=color, direction='out')
label = metric_name.replace("regression.", "")
label = label.replace("classification_", "")
label = label.replace("_", " ")
label = label.replace(".", " ")
label = label.replace(" ", "\n")
line, = axis.plot(
df.index, df[metric_name].values, color=color, label=label)
if show_axes_labels and show_scales:
axis.set_ylabel(
label, color=color, rotation=0, fontsize=8, va='bottom')
if i == 0:
coords = (1.05, 1.1)
else:
coords = (1.05 + (SEP * i), 1.1)
axis.yaxis.set_label_coords(*coords)
lines.append(line)
self._last_iteration_processed['validation'] = index[-1]
return lines
def _downsample(self, data):
"""Downsample `data` if necessary."""
if len(data) > self.max_num_lines:
divisor = int(np.ceil(len(data) / self.max_num_lines))
data = data.groupby(lambda x: x // divisor).mean()
data.index *= divisor
return data
| apache-2.0 |
JPFrancoia/scikit-learn | examples/text/hashing_vs_dict_vectorizer.py | 93 | 3243 | """
===========================================
FeatureHasher and DictVectorizer Comparison
===========================================
Compares FeatureHasher and DictVectorizer by using both to vectorize
text documents.
The example demonstrates syntax and speed only; it doesn't actually do
anything useful with the extracted vectors. See the example scripts
{document_classification_20newsgroups,clustering}.py for actual learning
on text documents.
A discrepancy between the number of terms reported for DictVectorizer and
for FeatureHasher is to be expected due to hash collisions.
"""
# Author: Lars Buitinck
# License: BSD 3 clause
from __future__ import print_function
from collections import defaultdict
import re
import sys
from time import time
import numpy as np
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction import DictVectorizer, FeatureHasher
def n_nonzero_columns(X):
"""Returns the number of non-zero columns in a CSR matrix X."""
return len(np.unique(X.nonzero()[1]))
def tokens(doc):
"""Extract tokens from doc.
This uses a simple regex to break strings into tokens. For a more
principled approach, see CountVectorizer or TfidfVectorizer.
"""
return (tok.lower() for tok in re.findall(r"\w+", doc))
def token_freqs(doc):
"""Extract a dict mapping tokens from doc to their frequencies."""
freq = defaultdict(int)
for tok in tokens(doc):
freq[tok] += 1
return freq
categories = [
'alt.atheism',
'comp.graphics',
'comp.sys.ibm.pc.hardware',
'misc.forsale',
'rec.autos',
'sci.space',
'talk.religion.misc',
]
# Uncomment the following line to use a larger set (11k+ documents)
#categories = None
print(__doc__)
print("Usage: %s [n_features_for_hashing]" % sys.argv[0])
print(" The default number of features is 2**18.")
print()
try:
n_features = int(sys.argv[1])
except IndexError:
n_features = 2 ** 18
except ValueError:
print("not a valid number of features: %r" % sys.argv[1])
sys.exit(1)
print("Loading 20 newsgroups training data")
raw_data = fetch_20newsgroups(subset='train', categories=categories).data
data_size_mb = sum(len(s.encode('utf-8')) for s in raw_data) / 1e6
print("%d documents - %0.3fMB" % (len(raw_data), data_size_mb))
print()
print("DictVectorizer")
t0 = time()
vectorizer = DictVectorizer()
vectorizer.fit_transform(token_freqs(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % len(vectorizer.get_feature_names()))
print()
print("FeatureHasher on frequency dicts")
t0 = time()
hasher = FeatureHasher(n_features=n_features)
X = hasher.transform(token_freqs(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % n_nonzero_columns(X))
print()
print("FeatureHasher on raw tokens")
t0 = time()
hasher = FeatureHasher(n_features=n_features, input_type="string")
X = hasher.transform(tokens(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % n_nonzero_columns(X))
| bsd-3-clause |
schets/scikit-learn | sklearn/linear_model/stochastic_gradient.py | 8 | 50342 | # Authors: Peter Prettenhofer <[email protected]> (main author)
# Mathieu Blondel (partial_fit support)
#
# License: BSD 3 clause
"""Classification and regression using Stochastic Gradient Descent (SGD)."""
import numpy as np
import scipy.sparse as sp
from abc import ABCMeta, abstractmethod
from ..externals.joblib import Parallel, delayed
from .base import LinearClassifierMixin, SparseCoefMixin
from ..base import BaseEstimator, RegressorMixin
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import (check_array, check_random_state, check_X_y,
deprecated)
from ..utils.extmath import safe_sparse_dot
from ..utils.multiclass import _check_partial_fit_first_call
from ..utils.validation import check_is_fitted
from ..externals import six
from .sgd_fast import plain_sgd, average_sgd
from ..utils.seq_dataset import ArrayDataset, CSRDataset
from ..utils import compute_class_weight
from .sgd_fast import Hinge
from .sgd_fast import SquaredHinge
from .sgd_fast import Log
from .sgd_fast import ModifiedHuber
from .sgd_fast import SquaredLoss
from .sgd_fast import Huber
from .sgd_fast import EpsilonInsensitive
from .sgd_fast import SquaredEpsilonInsensitive
LEARNING_RATE_TYPES = {"constant": 1, "optimal": 2, "invscaling": 3,
"pa1": 4, "pa2": 5}
PENALTY_TYPES = {"none": 0, "l2": 2, "l1": 1, "elasticnet": 3}
SPARSE_INTERCEPT_DECAY = 0.01
"""For sparse data intercept updates are scaled by this decay factor to avoid
intercept oscillation."""
DEFAULT_EPSILON = 0.1
"""Default value of ``epsilon`` parameter. """
class BaseSGD(six.with_metaclass(ABCMeta, BaseEstimator, SparseCoefMixin)):
"""Base class for SGD classification and regression."""
def __init__(self, loss, penalty='l2', alpha=0.0001, C=1.0,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=0.1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
warm_start=False, average=False):
self.loss = loss
self.penalty = penalty
self.learning_rate = learning_rate
self.epsilon = epsilon
self.alpha = alpha
self.C = C
self.l1_ratio = l1_ratio
self.fit_intercept = fit_intercept
self.n_iter = n_iter
self.shuffle = shuffle
self.random_state = random_state
self.verbose = verbose
self.eta0 = eta0
self.power_t = power_t
self.warm_start = warm_start
self.average = average
self._validate_params()
self.coef_ = None
if self.average > 0:
self.standard_coef_ = None
self.average_coef_ = None
# iteration count for learning rate schedule
# must not be int (e.g. if ``learning_rate=='optimal'``)
self.t_ = None
def set_params(self, *args, **kwargs):
super(BaseSGD, self).set_params(*args, **kwargs)
self._validate_params()
return self
@abstractmethod
def fit(self, X, y):
"""Fit model."""
def _validate_params(self):
"""Validate input params. """
if not isinstance(self.shuffle, bool):
raise ValueError("shuffle must be either True or False")
if self.n_iter <= 0:
raise ValueError("n_iter must be > zero")
if not (0.0 <= self.l1_ratio <= 1.0):
raise ValueError("l1_ratio must be in [0, 1]")
if self.alpha < 0.0:
raise ValueError("alpha must be >= 0")
if self.learning_rate in ("constant", "invscaling"):
if self.eta0 <= 0.0:
raise ValueError("eta0 must be > 0")
# raises ValueError if not registered
self._get_penalty_type(self.penalty)
self._get_learning_rate_type(self.learning_rate)
if self.loss not in self.loss_functions:
raise ValueError("The loss %s is not supported. " % self.loss)
def _get_loss_function(self, loss):
"""Get concrete ``LossFunction`` object for str ``loss``. """
try:
loss_ = self.loss_functions[loss]
loss_class, args = loss_[0], loss_[1:]
if loss in ('huber', 'epsilon_insensitive',
'squared_epsilon_insensitive'):
args = (self.epsilon, )
return loss_class(*args)
except KeyError:
raise ValueError("The loss %s is not supported. " % loss)
def _get_learning_rate_type(self, learning_rate):
try:
return LEARNING_RATE_TYPES[learning_rate]
except KeyError:
raise ValueError("learning rate %s "
"is not supported. " % learning_rate)
def _get_penalty_type(self, penalty):
penalty = str(penalty).lower()
try:
return PENALTY_TYPES[penalty]
except KeyError:
raise ValueError("Penalty %s is not supported. " % penalty)
def _validate_sample_weight(self, sample_weight, n_samples):
"""Set the sample weight array."""
if sample_weight is None:
# uniform sample weights
sample_weight = np.ones(n_samples, dtype=np.float64, order='C')
else:
# user-provided array
sample_weight = np.asarray(sample_weight, dtype=np.float64,
order="C")
if sample_weight.shape[0] != n_samples:
raise ValueError("Shapes of X and sample_weight do not match.")
return sample_weight
def _allocate_parameter_mem(self, n_classes, n_features, coef_init=None,
intercept_init=None):
"""Allocate mem for parameters; initialize if provided."""
if n_classes > 2:
# allocate coef_ for multi-class
if coef_init is not None:
coef_init = np.asarray(coef_init, order="C")
if coef_init.shape != (n_classes, n_features):
raise ValueError("Provided ``coef_`` does not match dataset. ")
self.coef_ = coef_init
else:
self.coef_ = np.zeros((n_classes, n_features),
dtype=np.float64, order="C")
# allocate intercept_ for multi-class
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, order="C")
if intercept_init.shape != (n_classes, ):
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init
else:
self.intercept_ = np.zeros(n_classes, dtype=np.float64,
order="C")
else:
# allocate coef_ for binary problem
if coef_init is not None:
coef_init = np.asarray(coef_init, dtype=np.float64,
order="C")
coef_init = coef_init.ravel()
if coef_init.shape != (n_features,):
raise ValueError("Provided coef_init does not "
"match dataset.")
self.coef_ = coef_init
else:
self.coef_ = np.zeros(n_features,
dtype=np.float64,
order="C")
# allocate intercept_ for binary problem
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, dtype=np.float64)
if intercept_init.shape != (1,) and intercept_init.shape != ():
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init.reshape(1,)
else:
self.intercept_ = np.zeros(1, dtype=np.float64, order="C")
# initialize average parameters
if self.average > 0:
self.standard_coef_ = self.coef_
self.standard_intercept_ = self.intercept_
self.average_coef_ = np.zeros(self.coef_.shape,
dtype=np.float64,
order="C")
self.average_intercept_ = np.zeros(self.standard_intercept_.shape,
dtype=np.float64,
order="C")
def _make_dataset(X, y_i, sample_weight):
"""Create ``Dataset`` abstraction for sparse and dense inputs.
This also returns the ``intercept_decay`` which is different
for sparse datasets.
"""
if sp.issparse(X):
dataset = CSRDataset(X.data, X.indptr, X.indices, y_i, sample_weight)
intercept_decay = SPARSE_INTERCEPT_DECAY
else:
dataset = ArrayDataset(X, y_i, sample_weight)
intercept_decay = 1.0
return dataset, intercept_decay
def _prepare_fit_binary(est, y, i):
"""Initialization for fit_binary.
Returns y, coef, intercept.
"""
y_i = np.ones(y.shape, dtype=np.float64, order="C")
y_i[y != est.classes_[i]] = -1.0
average_intercept = 0
average_coef = None
if len(est.classes_) == 2:
if not est.average:
coef = est.coef_.ravel()
intercept = est.intercept_[0]
else:
coef = est.standard_coef_.ravel()
intercept = est.standard_intercept_[0]
average_coef = est.average_coef_.ravel()
average_intercept = est.average_intercept_[0]
else:
if not est.average:
coef = est.coef_[i]
intercept = est.intercept_[i]
else:
coef = est.standard_coef_[i]
intercept = est.standard_intercept_[i]
average_coef = est.average_coef_[i]
average_intercept = est.average_intercept_[i]
return y_i, coef, intercept, average_coef, average_intercept
def fit_binary(est, i, X, y, alpha, C, learning_rate, n_iter,
pos_weight, neg_weight, sample_weight):
"""Fit a single binary classifier.
The i'th class is considered the "positive" class.
"""
# if average is not true, average_coef, and average_intercept will be
# unused
y_i, coef, intercept, average_coef, average_intercept = \
_prepare_fit_binary(est, y, i)
assert y_i.shape[0] == y.shape[0] == sample_weight.shape[0]
dataset, intercept_decay = _make_dataset(X, y_i, sample_weight)
penalty_type = est._get_penalty_type(est.penalty)
learning_rate_type = est._get_learning_rate_type(learning_rate)
# XXX should have random_state_!
random_state = check_random_state(est.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
if not est.average:
return plain_sgd(coef, intercept, est.loss_function,
penalty_type, alpha, C, est.l1_ratio,
dataset, n_iter, int(est.fit_intercept),
int(est.verbose), int(est.shuffle), seed,
pos_weight, neg_weight,
learning_rate_type, est.eta0,
est.power_t, est.t_, intercept_decay)
else:
standard_coef, standard_intercept, average_coef, \
average_intercept = average_sgd(coef, intercept, average_coef,
average_intercept,
est.loss_function, penalty_type,
alpha, C, est.l1_ratio, dataset,
n_iter, int(est.fit_intercept),
int(est.verbose), int(est.shuffle),
seed, pos_weight, neg_weight,
learning_rate_type, est.eta0,
est.power_t, est.t_,
intercept_decay,
est.average)
if len(est.classes_) == 2:
est.average_intercept_[0] = average_intercept
else:
est.average_intercept_[i] = average_intercept
return standard_coef, standard_intercept
class BaseSGDClassifier(six.with_metaclass(ABCMeta, BaseSGD,
LinearClassifierMixin)):
loss_functions = {
"hinge": (Hinge, 1.0),
"squared_hinge": (SquaredHinge, 1.0),
"perceptron": (Hinge, 0.0),
"log": (Log, ),
"modified_huber": (ModifiedHuber, ),
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, n_iter=5, shuffle=True, verbose=0,
epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
class_weight=None, warm_start=False, average=False):
super(BaseSGDClassifier, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
self.class_weight = class_weight
self.classes_ = None
self.n_jobs = int(n_jobs)
def _partial_fit(self, X, y, alpha, C,
loss, learning_rate, n_iter,
classes, sample_weight,
coef_init, intercept_init):
X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C")
n_samples, n_features = X.shape
self._validate_params()
_check_partial_fit_first_call(self, classes)
n_classes = self.classes_.shape[0]
# Allocate datastructures from input arguments
self._expanded_class_weight = compute_class_weight(self.class_weight,
self.classes_, y)
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if self.coef_ is None or coef_init is not None:
self._allocate_parameter_mem(n_classes, n_features,
coef_init, intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous data %d."
% (n_features, self.coef_.shape[-1]))
self.loss_function = self._get_loss_function(loss)
if self.t_ is None:
self.t_ = 1.0
# delegate to concrete training procedure
if n_classes > 2:
self._fit_multiclass(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight, n_iter=n_iter)
elif n_classes == 2:
self._fit_binary(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight, n_iter=n_iter)
else:
raise ValueError("The number of class labels must be "
"greater than one.")
return self
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
if hasattr(self, "classes_"):
self.classes_ = None
X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C")
n_samples, n_features = X.shape
# labels can be encoded as float, int, or string literals
# np.unique sorts in asc order; largest class id is positive class
classes = np.unique(y)
if self.warm_start and self.coef_ is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
if self.average > 0:
self.standard_coef_ = self.coef_
self.standard_intercept_ = self.intercept_
self.average_coef_ = None
self.average_intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = None
self._partial_fit(X, y, alpha, C, loss, learning_rate, self.n_iter,
classes, sample_weight, coef_init, intercept_init)
return self
def _fit_binary(self, X, y, alpha, C, sample_weight,
learning_rate, n_iter):
"""Fit a binary classifier on X and y. """
coef, intercept = fit_binary(self, 1, X, y, alpha, C,
learning_rate, n_iter,
self._expanded_class_weight[1],
self._expanded_class_weight[0],
sample_weight)
self.t_ += n_iter * X.shape[0]
# need to be 2d
if self.average > 0:
if self.average <= self.t_ - 1:
self.coef_ = self.average_coef_.reshape(1, -1)
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_.reshape(1, -1)
self.standard_intercept_ = np.atleast_1d(intercept)
self.intercept_ = self.standard_intercept_
else:
self.coef_ = coef.reshape(1, -1)
# intercept is a float, need to convert it to an array of length 1
self.intercept_ = np.atleast_1d(intercept)
def _fit_multiclass(self, X, y, alpha, C, learning_rate,
sample_weight, n_iter):
"""Fit a multi-class classifier by combining binary classifiers
Each binary classifier predicts one class versus all others. This
strategy is called OVA: One Versus All.
"""
# Use joblib to fit OvA in parallel.
result = Parallel(n_jobs=self.n_jobs, backend="threading",
verbose=self.verbose)(
delayed(fit_binary)(self, i, X, y, alpha, C, learning_rate,
n_iter, self._expanded_class_weight[i], 1.,
sample_weight)
for i in range(len(self.classes_)))
for i, (_, intercept) in enumerate(result):
self.intercept_[i] = intercept
self.t_ += n_iter * X.shape[0]
if self.average > 0:
if self.average <= self.t_ - 1.0:
self.coef_ = self.average_coef_
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_
self.standard_intercept_ = np.atleast_1d(intercept)
self.intercept_ = self.standard_intercept_
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of the training data
y : numpy array, shape (n_samples,)
Subset of the target values
classes : array, shape (n_classes,)
Classes across all calls to partial_fit.
Can be obtained by via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn't need to contain all labels in `classes`.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
if self.class_weight == 'auto':
raise ValueError("class_weight 'auto' is not supported for "
"partial_fit. In order to use 'auto' weights, "
"use compute_class_weight('auto', classes, y). "
"In place of y you can us a large enough sample "
"of the full training set target to properly "
"estimate the class frequency distributions. "
"Pass the resulting weights as the class_weight "
"parameter.")
return self._partial_fit(X, y, alpha=self.alpha, C=1.0, loss=self.loss,
learning_rate=self.learning_rate, n_iter=1,
classes=classes, sample_weight=sample_weight,
coef_init=None, intercept_init=None)
def fit(self, X, y, coef_init=None, intercept_init=None, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
coef_init : array, shape (n_classes, n_features)
The initial coefficients to warm-start the optimization.
intercept_init : array, shape (n_classes,)
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed. These weights will
be multiplied with class_weight (passed through the
contructor) if class_weight is specified
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init, intercept_init=intercept_init,
sample_weight=sample_weight)
class SGDClassifier(BaseSGDClassifier, _LearntSelectorMixin):
"""Linear classifiers (SVM, logistic regression, a.o.) with SGD training.
This estimator implements regularized linear models with stochastic
gradient descent (SGD) learning: the gradient of the loss is estimated
each sample at a time and the model is updated along the way with a
decreasing strength schedule (aka learning rate). SGD allows minibatch
(online/out-of-core) learning, see the partial_fit method.
For best results using the default learning rate schedule, the data should
have zero mean and unit variance.
This implementation works with data represented as dense or sparse arrays
of floating point values for the features. The model it fits can be
controlled with the loss parameter; by default, it fits a linear support
vector machine (SVM).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
Parameters
----------
loss : str, 'hinge', 'log', 'modified_huber', 'squared_hinge',\
'perceptron', or a regression loss: 'squared_loss', 'huber',\
'epsilon_insensitive', or 'squared_epsilon_insensitive'
The loss function to be used. Defaults to 'hinge', which gives a
linear SVM.
The 'log' loss gives logistic regression, a probabilistic classifier.
'modified_huber' is another smooth loss that brings tolerance to
outliers as well as probability estimates.
'squared_hinge' is like hinge but is quadratically penalized.
'perceptron' is the linear loss used by the perceptron algorithm.
The other losses are designed for regression but can be useful in
classification as well; see SGDRegressor for a description.
penalty : str, 'none', 'l2', 'l1', or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
l1_ratio : float
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Defaults to 0.15.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs). The number
of iterations is set to 1 if using partial_fit.
Defaults to 5.
shuffle : bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to True.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level
epsilon : float
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
n_jobs : integer, optional
The number of CPUs to use to do the OVA (One Versus All, for
multi-class problems) computation. -1 means 'all CPUs'. Defaults
to 1.
learning_rate : string, optional
The learning rate schedule:
constant: eta = eta0
optimal: eta = 1.0 / (t + t0) [default]
invscaling: eta = eta0 / pow(t, power_t)
where t0 is chosen by a heuristic proposed by Leon Bottou.
eta0 : double
The initial learning rate for the 'constant' or 'invscaling'
schedules. The default value is 0.0 as eta0 is not used by the
default schedule 'optimal'.
power_t : double
The exponent for inverse scaling learning rate [default 0.5].
class_weight : dict, {class_label: weight} or "auto" or None, optional
Preset for the class_weight fit parameter.
Weights associated with classes. If not given, all classes
are supposed to have weight one.
The "auto" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So average=10 will begin averaging after seeing 10 samples.
Attributes
----------
coef_ : array, shape (1, n_features) if n_classes == 2 else (n_classes,\
n_features)
Weights assigned to the features.
intercept_ : array, shape (1,) if n_classes == 2 else (n_classes,)
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> Y = np.array([1, 1, 2, 2])
>>> clf = linear_model.SGDClassifier()
>>> clf.fit(X, Y)
... #doctest: +NORMALIZE_WHITESPACE
SGDClassifier(alpha=0.0001, average=False, class_weight=None, epsilon=0.1,
eta0=0.0, fit_intercept=True, l1_ratio=0.15,
learning_rate='optimal', loss='hinge', n_iter=5, n_jobs=1,
penalty='l2', power_t=0.5, random_state=None, shuffle=True,
verbose=0, warm_start=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
LinearSVC, LogisticRegression, Perceptron
"""
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, n_iter=5, shuffle=True, verbose=0,
epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
class_weight=None, warm_start=False, average=False):
super(SGDClassifier, self).__init__(
loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept, n_iter=n_iter, shuffle=shuffle,
verbose=verbose, epsilon=epsilon, n_jobs=n_jobs,
random_state=random_state, learning_rate=learning_rate, eta0=eta0,
power_t=power_t, class_weight=class_weight, warm_start=warm_start,
average=average)
def _check_proba(self):
check_is_fitted(self, "t_")
if self.loss not in ("log", "modified_huber"):
raise AttributeError("probability estimates are not available for"
" loss=%r" % self.loss)
@property
def predict_proba(self):
"""Probability estimates.
This method is only available for log loss and modified Huber loss.
Multiclass probability estimates are derived from binary (one-vs.-rest)
estimates by simple normalization, as recommended by Zadrozny and
Elkan.
Binary probability estimates for loss="modified_huber" are given by
(clip(decision_function(X), -1, 1) + 1) / 2.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples, n_classes)
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in `self.classes_`.
References
----------
Zadrozny and Elkan, "Transforming classifier scores into multiclass
probability estimates", SIGKDD'02,
http://www.research.ibm.com/people/z/zadrozny/kdd2002-Transf.pdf
The justification for the formula in the loss="modified_huber"
case is in the appendix B in:
http://jmlr.csail.mit.edu/papers/volume2/zhang02c/zhang02c.pdf
"""
self._check_proba()
return self._predict_proba
def _predict_proba(self, X):
if self.loss == "log":
return self._predict_proba_lr(X)
elif self.loss == "modified_huber":
binary = (len(self.classes_) == 2)
scores = self.decision_function(X)
if binary:
prob2 = np.ones((scores.shape[0], 2))
prob = prob2[:, 1]
else:
prob = scores
np.clip(scores, -1, 1, prob)
prob += 1.
prob /= 2.
if binary:
prob2[:, 0] -= prob
prob = prob2
else:
# the above might assign zero to all classes, which doesn't
# normalize neatly; work around this to produce uniform
# probabilities
prob_sum = prob.sum(axis=1)
all_zero = (prob_sum == 0)
if np.any(all_zero):
prob[all_zero, :] = 1
prob_sum[all_zero] = len(self.classes_)
# normalize
prob /= prob_sum.reshape((prob.shape[0], -1))
return prob
else:
raise NotImplementedError("predict_(log_)proba only supported when"
" loss='log' or loss='modified_huber' "
"(%r given)" % self.loss)
@property
def predict_log_proba(self):
"""Log of probability estimates.
This method is only available for log loss and modified Huber loss.
When loss="modified_huber", probability estimates may be hard zeros
and ones, so taking the logarithm is not possible.
See ``predict_proba`` for details.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
T : array-like, shape (n_samples, n_classes)
Returns the log-probability of the sample for each class in the
model, where classes are ordered as they are in
`self.classes_`.
"""
self._check_proba()
return self._predict_log_proba
def _predict_log_proba(self, X):
return np.log(self.predict_proba(X))
class BaseSGDRegressor(BaseSGD, RegressorMixin):
loss_functions = {
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=DEFAULT_EPSILON, random_state=None,
learning_rate="invscaling", eta0=0.01, power_t=0.25,
warm_start=False, average=False):
super(BaseSGDRegressor, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
def _partial_fit(self, X, y, alpha, C, loss, learning_rate,
n_iter, sample_weight,
coef_init, intercept_init):
X, y = check_X_y(X, y, "csr", copy=False, order='C', dtype=np.float64)
y = y.astype(np.float64)
n_samples, n_features = X.shape
self._validate_params()
# Allocate datastructures from input arguments
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if self.coef_ is None:
self._allocate_parameter_mem(1, n_features,
coef_init, intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous data %d."
% (n_features, self.coef_.shape[-1]))
if self.average > 0 and self.average_coef_ is None:
self.average_coef_ = np.zeros(n_features,
dtype=np.float64,
order="C")
self.average_intercept_ = np.zeros(1,
dtype=np.float64,
order="C")
self._fit_regressor(X, y, alpha, C, loss, learning_rate,
sample_weight, n_iter)
return self
def partial_fit(self, X, y, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of training data
y : numpy array of shape (n_samples,)
Subset of target values
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
return self._partial_fit(X, y, self.alpha, C=1.0,
loss=self.loss,
learning_rate=self.learning_rate, n_iter=1,
sample_weight=sample_weight,
coef_init=None, intercept_init=None)
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
if self.warm_start and self.coef_ is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
if self.average > 0:
self.standard_intercept_ = self.intercept_
self.standard_coef_ = self.coef_
self.average_coef_ = None
self.average_intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = None
return self._partial_fit(X, y, alpha, C, loss, learning_rate,
self.n_iter, sample_weight,
coef_init, intercept_init)
def fit(self, X, y, coef_init=None, intercept_init=None,
sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
coef_init : array, shape (n_features,)
The initial coefficients to warm-start the optimization.
intercept_init : array, shape (1,)
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init,
intercept_init=intercept_init,
sample_weight=sample_weight)
@deprecated(" and will be removed in 0.19.")
def decision_function(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
check_is_fitted(self, ["t_", "coef_", "intercept_"], all_or_any=all)
X = check_array(X, accept_sparse='csr')
scores = safe_sparse_dot(X, self.coef_.T,
dense_output=True) + self.intercept_
return scores.ravel()
def predict(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
return self.decision_function(X)
def _fit_regressor(self, X, y, alpha, C, loss, learning_rate,
sample_weight, n_iter):
dataset, intercept_decay = _make_dataset(X, y, sample_weight)
loss_function = self._get_loss_function(loss)
penalty_type = self._get_penalty_type(self.penalty)
learning_rate_type = self._get_learning_rate_type(learning_rate)
if self.t_ is None:
self.t_ = 1.0
random_state = check_random_state(self.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
if self.average > 0:
self.standard_coef_, self.standard_intercept_, \
self.average_coef_, self.average_intercept_ =\
average_sgd(self.standard_coef_,
self.standard_intercept_[0],
self.average_coef_,
self.average_intercept_[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
n_iter,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay, self.average)
self.average_intercept_ = np.atleast_1d(self.average_intercept_)
self.standard_intercept_ = np.atleast_1d(self.standard_intercept_)
self.t_ += n_iter * X.shape[0]
if self.average <= self.t_ - 1.0:
self.coef_ = self.average_coef_
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_
self.intercept_ = self.standard_intercept_
else:
self.coef_, self.intercept_ = \
plain_sgd(self.coef_,
self.intercept_[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
n_iter,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay)
self.t_ += n_iter * X.shape[0]
self.intercept_ = np.atleast_1d(self.intercept_)
class SGDRegressor(BaseSGDRegressor, _LearntSelectorMixin):
"""Linear model fitted by minimizing a regularized empirical loss with SGD
SGD stands for Stochastic Gradient Descent: the gradient of the loss is
estimated each sample at a time and the model is updated along the way with
a decreasing strength schedule (aka learning rate).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
This implementation works with data represented as dense numpy arrays of
floating point values for the features.
Parameters
----------
loss : str, 'squared_loss', 'huber', 'epsilon_insensitive', \
or 'squared_epsilon_insensitive'
The loss function to be used. Defaults to 'squared_loss' which refers
to the ordinary least squares fit. 'huber' modifies 'squared_loss' to
focus less on getting outliers correct by switching from squared to
linear loss past a distance of epsilon. 'epsilon_insensitive' ignores
errors less than epsilon and is linear past that; this is the loss
function used in SVR. 'squared_epsilon_insensitive' is the same but
becomes squared loss past a tolerance of epsilon.
penalty : str, 'none', 'l2', 'l1', or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
l1_ratio : float
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Defaults to 0.15.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs). The number
of iterations is set to 1 if using partial_fit.
Defaults to 5.
shuffle : bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to True.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level.
epsilon : float
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
learning_rate : string, optional
The learning rate:
constant: eta = eta0
optimal: eta = 1.0/(alpha * t)
invscaling: eta = eta0 / pow(t, power_t) [default]
eta0 : double, optional
The initial learning rate [default 0.01].
power_t : double, optional
The exponent for inverse scaling learning rate [default 0.25].
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So ``average=10 will`` begin averaging after seeing 10 samples.
Attributes
----------
coef_ : array, shape (n_features,)
Weights assigned to the features.
intercept_ : array, shape (1,)
The intercept term.
`average_coef_` : array, shape (n_features,)
Averaged weights assigned to the features.
`average_intercept_` : array, shape (1,)
The averaged intercept term.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = linear_model.SGDRegressor()
>>> clf.fit(X, y)
... #doctest: +NORMALIZE_WHITESPACE
SGDRegressor(alpha=0.0001, average=False, epsilon=0.1, eta0=0.01,
fit_intercept=True, l1_ratio=0.15, learning_rate='invscaling',
loss='squared_loss', n_iter=5, penalty='l2', power_t=0.25,
random_state=None, shuffle=True, verbose=0, warm_start=False)
See also
--------
Ridge, ElasticNet, Lasso, SVR
"""
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=DEFAULT_EPSILON, random_state=None,
learning_rate="invscaling", eta0=0.01, power_t=0.25,
warm_start=False, average=False):
super(SGDRegressor, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
| bsd-3-clause |
pothosware/gnuradio | gr-filter/examples/synth_filter.py | 58 | 2552 | #!/usr/bin/env python
#
# Copyright 2010,2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr
from gnuradio import filter
from gnuradio import blocks
import sys
try:
from gnuradio import analog
except ImportError:
sys.stderr.write("Error: Program requires gr-analog.\n")
sys.exit(1)
try:
import scipy
except ImportError:
sys.stderr.write("Error: Program requires scipy (see: www.scipy.org).\n")
sys.exit(1)
try:
import pylab
except ImportError:
sys.stderr.write("Error: Program requires matplotlib (see: matplotlib.sourceforge.net).\n")
sys.exit(1)
def main():
N = 1000000
fs = 8000
freqs = [100, 200, 300, 400, 500]
nchans = 7
sigs = list()
for fi in freqs:
s = analog.sig_source_c(fs, analog.GR_SIN_WAVE, fi, 1)
sigs.append(s)
taps = filter.firdes.low_pass_2(len(freqs), fs,
fs/float(nchans)/2, 100, 100)
print "Num. Taps = %d (taps per filter = %d)" % (len(taps),
len(taps)/nchans)
filtbank = filter.pfb_synthesizer_ccf(nchans, taps)
head = blocks.head(gr.sizeof_gr_complex, N)
snk = blocks.vector_sink_c()
tb = gr.top_block()
tb.connect(filtbank, head, snk)
for i,si in enumerate(sigs):
tb.connect(si, (filtbank, i))
tb.run()
if 1:
f1 = pylab.figure(1)
s1 = f1.add_subplot(1,1,1)
s1.plot(snk.data()[1000:])
fftlen = 2048
f2 = pylab.figure(2)
s2 = f2.add_subplot(1,1,1)
winfunc = scipy.blackman
s2.psd(snk.data()[10000:], NFFT=fftlen,
Fs = nchans*fs,
noverlap=fftlen/4,
window = lambda d: d*winfunc(fftlen))
pylab.show()
if __name__ == "__main__":
main()
| gpl-3.0 |
yiqingj/airflow | airflow/contrib/plugins/metastore_browser/main.py | 62 | 5773 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
import json
from flask import Blueprint, request
from flask_admin import BaseView, expose
import pandas as pd
from airflow.hooks.hive_hooks import HiveMetastoreHook, HiveCliHook
from airflow.hooks.mysql_hook import MySqlHook
from airflow.hooks.presto_hook import PrestoHook
from airflow.plugins_manager import AirflowPlugin
from airflow.www import utils as wwwutils
METASTORE_CONN_ID = 'metastore_default'
METASTORE_MYSQL_CONN_ID = 'metastore_mysql'
PRESTO_CONN_ID = 'presto_default'
HIVE_CLI_CONN_ID = 'hive_default'
DEFAULT_DB = 'default'
DB_WHITELIST = None
DB_BLACKLIST = ['tmp']
TABLE_SELECTOR_LIMIT = 2000
# Keeping pandas from truncating long strings
pd.set_option('display.max_colwidth', -1)
# Creating a flask admin BaseView
class MetastoreBrowserView(BaseView, wwwutils.DataProfilingMixin):
@expose('/')
def index(self):
sql = """
SELECT
a.name as db, db_location_uri as location,
count(1) as object_count, a.desc as description
FROM DBS a
JOIN TBLS b ON a.DB_ID = b.DB_ID
GROUP BY a.name, db_location_uri, a.desc
""".format(**locals())
h = MySqlHook(METASTORE_MYSQL_CONN_ID)
df = h.get_pandas_df(sql)
df.db = (
'<a href="/admin/metastorebrowserview/db/?db=' +
df.db + '">' + df.db + '</a>')
table = df.to_html(
classes="table table-striped table-bordered table-hover",
index=False,
escape=False,
na_rep='',)
return self.render(
"metastore_browser/dbs.html", table=table)
@expose('/table/')
def table(self):
table_name = request.args.get("table")
m = HiveMetastoreHook(METASTORE_CONN_ID)
table = m.get_table(table_name)
return self.render(
"metastore_browser/table.html",
table=table, table_name=table_name, datetime=datetime, int=int)
@expose('/db/')
def db(self):
db = request.args.get("db")
m = HiveMetastoreHook(METASTORE_CONN_ID)
tables = sorted(m.get_tables(db=db), key=lambda x: x.tableName)
return self.render(
"metastore_browser/db.html", tables=tables, db=db)
@wwwutils.gzipped
@expose('/partitions/')
def partitions(self):
schema, table = request.args.get("table").split('.')
sql = """
SELECT
a.PART_NAME,
a.CREATE_TIME,
c.LOCATION,
c.IS_COMPRESSED,
c.INPUT_FORMAT,
c.OUTPUT_FORMAT
FROM PARTITIONS a
JOIN TBLS b ON a.TBL_ID = b.TBL_ID
JOIN DBS d ON b.DB_ID = d.DB_ID
JOIN SDS c ON a.SD_ID = c.SD_ID
WHERE
b.TBL_NAME like '{table}' AND
d.NAME like '{schema}'
ORDER BY PART_NAME DESC
""".format(**locals())
h = MySqlHook(METASTORE_MYSQL_CONN_ID)
df = h.get_pandas_df(sql)
return df.to_html(
classes="table table-striped table-bordered table-hover",
index=False,
na_rep='',)
@wwwutils.gzipped
@expose('/objects/')
def objects(self):
where_clause = ''
if DB_WHITELIST:
dbs = ",".join(["'" + db + "'" for db in DB_WHITELIST])
where_clause = "AND b.name IN ({})".format(dbs)
if DB_BLACKLIST:
dbs = ",".join(["'" + db + "'" for db in DB_BLACKLIST])
where_clause = "AND b.name NOT IN ({})".format(dbs)
sql = """
SELECT CONCAT(b.NAME, '.', a.TBL_NAME), TBL_TYPE
FROM TBLS a
JOIN DBS b ON a.DB_ID = b.DB_ID
WHERE
a.TBL_NAME NOT LIKE '%tmp%' AND
a.TBL_NAME NOT LIKE '%temp%' AND
b.NAME NOT LIKE '%tmp%' AND
b.NAME NOT LIKE '%temp%'
{where_clause}
LIMIT {LIMIT};
""".format(where_clause=where_clause, LIMIT=TABLE_SELECTOR_LIMIT)
h = MySqlHook(METASTORE_MYSQL_CONN_ID)
d = [
{'id': row[0], 'text': row[0]}
for row in h.get_records(sql)]
return json.dumps(d)
@wwwutils.gzipped
@expose('/data/')
def data(self):
table = request.args.get("table")
sql = "SELECT * FROM {table} LIMIT 1000;".format(table=table)
h = PrestoHook(PRESTO_CONN_ID)
df = h.get_pandas_df(sql)
return df.to_html(
classes="table table-striped table-bordered table-hover",
index=False,
na_rep='',)
@expose('/ddl/')
def ddl(self):
table = request.args.get("table")
sql = "SHOW CREATE TABLE {table};".format(table=table)
h = HiveCliHook(HIVE_CLI_CONN_ID)
return h.run_cli(sql)
v = MetastoreBrowserView(category="Plugins", name="Hive Metadata Browser")
# Creating a flask blueprint to intergrate the templates and static folder
bp = Blueprint(
"metastore_browser", __name__,
template_folder='templates',
static_folder='static',
static_url_path='/static/metastore_browser')
# Defining the plugin class
class MetastoreBrowserPlugin(AirflowPlugin):
name = "metastore_browser"
flask_blueprints = [bp]
admin_views = [v]
| apache-2.0 |
Udzu/pudzu | dataviz/euheight.py | 1 | 2023 | from pudzu.charts import *
from pudzu.sandbox.bamboo import *
import seaborn as sns
# generate map
df = pd.read_csv("datasets/euheight.csv").set_index("country").fillna(0)
palette = tmap(RGBA, sns.cubehelix_palette(7, start=0.5, rot=-0.85))
basketball = Image.open("icons/basketball.png").convert("RGBA").resize((18,18))
def colorfn(c):
if c not in df.index:
return "white" if c in ['Sea', 'Borders'] else "grey"
return palette[(int(df['height'][c]) - 172) // 2]
def labelfn(c):
if c not in df.index or df['eurobasket'][c] == 0:
return None
return Image.from_row([basketball]*int(df['eurobasket'][c]), bg=0, padding=2)
map = map_chart("maps/Europe.png", colorfn, labelfn)
# legend
legendboxes = Image.from_array([
[Image.new("RGBA", (40,40), palette[i]),
Image.from_text("{}-{} cm".format(i * 2 + 172, str(i * 2 + 174)[-1]), arial(16))]
for i in reversed(range(6))] + [
[Image.new("RGBA", (40,40), "grey"), Image.from_text("No data", arial(16))],
[Image.new("RGBA", (40,40), "white").place(basketball), Image.from_text("Eurobasket wins\nsince 1993*", arial(16))]
], xalign=(0.5, 0), padding=(3,0), bg="white")
legend = Image.from_column([
Image.from_text("Heights", arial(16, bold=True), padding=5),
legendboxes,
Image.from_text("* Eurobasket wins for\nFRY are under Serbia.", arial(16, italics=True), align="left")
], bg="white", xalign=0).pad(5, "white").pad(1, "black")
chart = map.place(legend, align=(1,0), padding=10)
# title
title = Image.from_column([
Image.from_text("AVERAGE 18 YEAR OLD MALE HEIGHT (2014)", arial(48, bold=True)),
Image.from_text("and number of Eurobasket wins since 1993", arial(36))],
bg="white")
# title = Image.from_text("Europe by number of, and longest borders", arial(48, bold=True), bg="white")
img = Image.from_column([title, chart], bg="white", padding=2)
img.place(Image.from_text("/u/Udzu", font("arial", 16), fg="black", bg="white", padding=5).pad((1,1,0,0), "black"), align=1, padding=10, copy=False)
img.save("output/euheight.png")
| mit |
mugizico/scikit-learn | examples/cluster/plot_lena_segmentation.py | 271 | 2444 | """
=========================================
Segmenting the picture of Lena in regions
=========================================
This example uses :ref:`spectral_clustering` on a graph created from
voxel-to-voxel difference on an image to break this image into multiple
partly-homogeneous regions.
This procedure (spectral clustering on an image) is an efficient
approximate solution for finding normalized graph cuts.
There are two options to assign labels:
* with 'kmeans' spectral clustering will cluster samples in the embedding space
using a kmeans algorithm
* whereas 'discrete' will iteratively search for the closest partition
space to the embedding space.
"""
print(__doc__)
# Author: Gael Varoquaux <[email protected]>, Brian Cheung
# License: BSD 3 clause
import time
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn.feature_extraction import image
from sklearn.cluster import spectral_clustering
lena = sp.misc.lena()
# Downsample the image by a factor of 4
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
# Convert the image into a graph with the value of the gradient on the
# edges.
graph = image.img_to_graph(lena)
# Take a decreasing function of the gradient: an exponential
# The smaller beta is, the more independent the segmentation is of the
# actual image. For beta=1, the segmentation is close to a voronoi
beta = 5
eps = 1e-6
graph.data = np.exp(-beta * graph.data / lena.std()) + eps
# Apply spectral clustering (this step goes much faster if you have pyamg
# installed)
N_REGIONS = 11
###############################################################################
# Visualize the resulting regions
for assign_labels in ('kmeans', 'discretize'):
t0 = time.time()
labels = spectral_clustering(graph, n_clusters=N_REGIONS,
assign_labels=assign_labels,
random_state=1)
t1 = time.time()
labels = labels.reshape(lena.shape)
plt.figure(figsize=(5, 5))
plt.imshow(lena, cmap=plt.cm.gray)
for l in range(N_REGIONS):
plt.contour(labels == l, contours=1,
colors=[plt.cm.spectral(l / float(N_REGIONS)), ])
plt.xticks(())
plt.yticks(())
plt.title('Spectral clustering: %s, %.2fs' % (assign_labels, (t1 - t0)))
plt.show()
| bsd-3-clause |
antoinecarme/pyaf | pyaf/TS/Plots.py | 1 | 10299 | # Copyright (C) 2016 Antoine Carme <[email protected]>
# All rights reserved.
# This file is part of the Python Automatic Forecasting (PyAF) library and is made available under
# the terms of the 3 Clause BSD license
import pandas as pd
import numpy as np
from io import BytesIO
import base64
SIGNAL_COLOR='green'
FORECAST_COLOR='blue'
RESIDUE_COLOR='red'
COMPONENT_COLOR='navy'
SHADED_COLOR='turquoise'
UPPER_COLOR='grey'
LOWER_COLOR='black'
def add_patched_legend(ax , names):
# matplotlib does not like labels starting with '_'
patched_names = []
for name in names:
# remove leading '_' => here, this is almost OK: no signal transformation
patched_name = name.lstrip('_')
patched_names = patched_names + [ patched_name ]
# print("add_patched_legend" , names, patched_names)
ax.legend(patched_names)
def fig_to_png_base64(fig):
figfile = BytesIO()
fig.savefig(figfile, format='png')
figfile.seek(0) # rewind to beginning of file
figdata_png = base64.b64encode(figfile.getvalue())
return figdata_png.decode('utf8')
def decomp_plot_internal(df, time, signal, estimator, residue, name = None, format='png', max_length = 1000, horizon = 1) :
assert(df.shape[0] > 0)
assert(df.shape[1] > 0)
assert(time in df.columns)
assert(signal in df.columns)
assert(estimator in df.columns)
assert(residue in df.columns)
import matplotlib
import matplotlib.pyplot as plt
# print("MATPLOTLIB_BACKEND", matplotlib.get_backend())
# matplotlib.use('Agg')
df1 = df.tail(max(max_length , 4 * horizon));
if(name is not None):
plt.switch_backend('Agg')
fig, axs = plt.subplots(ncols=2, figsize=(32, 16))
lColor = COMPONENT_COLOR;
if(name is not None and name.endswith("Forecast")):
lColor = FORECAST_COLOR;
df1.plot.line(time, [signal, estimator, residue],
color=[SIGNAL_COLOR, lColor, RESIDUE_COLOR],
ax=axs[0] , grid = True, legend=False)
add_patched_legend(axs[0] , [signal, estimator, residue])
residues = df1[residue].values
import scipy.stats as scistats
resid = residues[~np.isnan(residues)]
scistats.probplot(resid, dist="norm", plot=axs[1])
return fig
def decomp_plot(df, time, signal, estimator, residue, name = None, format='png', max_length = 1000, horizon = 1) :
fig = decomp_plot_internal(df, time, signal, estimator, residue, name, format, max_length, horizon)
if(name is not None):
import matplotlib
import matplotlib.pyplot as plt
plt.switch_backend('Agg')
fig.savefig(name + '_decomp_output.' + format)
plt.close(fig)
def decomp_plot_as_png_base64(df, time, signal, estimator, residue, name = None, max_length = 1000, horizon = 1) :
fig = decomp_plot_internal(df, time, signal, estimator, residue, name, format, max_length, horizon)
import matplotlib
import matplotlib.pyplot as plt
png_b64 = fig_to_png_base64(fig)
plt.close(fig)
return png_b64
def prediction_interval_plot_internal(df, time, signal, estimator, lower, upper, name = None, format='png', max_length = 1000, horizon =
1) :
assert(df.shape[0] > 0)
assert(df.shape[1] > 0)
assert(time in df.columns)
assert(signal in df.columns)
assert(estimator in df.columns)
assert(lower in df.columns)
assert(upper in df.columns)
df1 = df.tail(max(max_length, 4 * horizon)).copy();
lMin = np.mean(df1[signal]) - np.std(df1[signal]) * 10;
lMax = np.mean(df1[signal]) + np.std(df1[signal]) * 10;
df1[lower] = df1[lower].apply(lambda x : x if (np.isnan(x) or x >= lMin) else np.nan);
df1[upper] = df1[upper].apply(lambda x : x if (np.isnan(x) or x <= lMax) else np.nan);
# last value of the signal
lLastSignalPos = df1[signal].dropna().tail(1).index[0];
lEstimtorValue = df1[estimator][lLastSignalPos];
df1.loc[lLastSignalPos , lower] = lEstimtorValue;
df1.loc[lLastSignalPos , upper] = lEstimtorValue;
import matplotlib
# matplotlib.use('Agg')
import matplotlib.pyplot as plt
if(name is not None):
plt.switch_backend('Agg')
fig, axs = plt.subplots(ncols=1, figsize=(16, 8))
df1.plot.line(time, [signal, estimator, lower, upper],
color=[SIGNAL_COLOR, FORECAST_COLOR, LOWER_COLOR, UPPER_COLOR],
ax=axs, grid = True, legend=False)
add_patched_legend(axs , [signal, estimator, lower, upper])
x = df1[time];
type1 = np.dtype(x)
if(type1.kind == 'M'):
x = x.apply(lambda t : t.date());
axs.fill_between(x.values, df1[lower], df1[upper], color=SHADED_COLOR, alpha=.2)
return fig
def prediction_interval_plot(df, time, signal, estimator, lower, upper, name = None, format='png', max_length = 1000, horizon = 1) :
fig = prediction_interval_plot_internal(df, time, signal, estimator, lower, upper, name, format, max_length, horizon)
if(name is not None):
import matplotlib
import matplotlib.pyplot as plt
plt.switch_backend('Agg')
fig.savefig(name + '_prediction_intervals_output.' + format)
plt.close(fig)
def prediction_interval_plot_as_png_base64(df, time, signal, estimator, lower, upper, name = None, max_length = 1000, horizon = 1) :
fig = prediction_interval_plot_internal(df, time, signal, estimator, lower, upper, name, format, max_length, horizon)
import matplotlib
import matplotlib.pyplot as plt
png_b64 = fig_to_png_base64(fig)
plt.close(fig)
return png_b64
def quantiles_plot_internal(df, time, signal, estimator, iQuantiles, name = None, format='png', horizon = 1) :
assert(df.shape[0] > 0)
assert(df.shape[1] > 0)
assert(time in df.columns)
assert(signal in df.columns)
assert(estimator in df.columns)
lQuantileNames = [estimator + '_Quantile_' + str(q) for q in iQuantiles]
df1 = df.tail(horizon)
import matplotlib
# matplotlib.use('Agg')
import matplotlib.pyplot as plt
if(name is not None):
plt.switch_backend('Agg')
lMin, lMax = df1[lQuantileNames].values.min(), df1[lQuantileNames].values.max()
cm = plt.cm.get_cmap('RdYlBu_r')
fig, axs = plt.subplots(horizon, 1, figsize=(12, 12), squeeze = True)
# plt.subplots_adjust(hspace=1)
# print(axs)
if (horizon == 1):
axs = [axs]
for h in range(horizon):
lIdx = df1.index[h]
lTime = df1.loc[lIdx, time]
q_values = df1.loc[lIdx, lQuantileNames].tolist()
_, bins1, patches = axs[h].hist(q_values, bins = q_values, weights=[1]*len(lQuantileNames), density = True)
for i, p in enumerate(patches):
j = (bins1[i] - lMin) / (lMax - lMin)
plt.setp(p, 'facecolor', cm(j))
if(h == 0):
axs[h].set_title('Forecast Quantiles')
axs[h].set_xlim((lMin,lMax))
# axs[h].set_ylim((0, 1.0))
axs[h].set_ylabel('H_' + str(h + 1))
axs[h].set_yticklabels([])
if(h < (horizon - 1)):
axs[h].set_xlabel('')
axs[h].set_xticklabels([])
return fig
def quantiles_plot(df, time, signal, estimator, iQuantiles, name = None, format='png', horizon = 1) :
fig = quantiles_plot_internal(df, time, signal, estimator, iQuantiles, name, format, horizon)
import matplotlib
import matplotlib.pyplot as plt
if(name is not None):
plt.switch_backend('Agg')
fig.savefig(name + '_quantiles_output.' + format)
plt.close(fig)
def quantiles_plot_as_png_base64(df, time, signal, estimator, iQuantiles, name = None, format='png', horizon = 1) :
fig = quantiles_plot_internal(df, time, signal, estimator, iQuantiles, name, format, horizon)
import matplotlib
import matplotlib.pyplot as plt
png_b64 = fig_to_png_base64(fig)
plt.close(fig)
return png_b64
def qqplot_residues(df , residue):
pass
def build_record_label(labels_list):
out = "<f0>" + str(labels_list[0]);
i = 1;
for l in labels_list[1:]:
out = out + " | <f" + str(i) + "> " + str(l) ;
i = i + 1;
return out + "";
def plot_hierarchy_internal(structure , iAnnotations, name):
import pydot
graph = pydot.Dot(graph_type='graph', rankdir='LR', fontsize="12.0");
graph.set_node_defaults(shape='record')
lLevelsReversed = sorted(structure.keys(), reverse=True);
for level in lLevelsReversed:
color = '#%02x%02x%02x' % (255, 255, 127 + int(128 * (1.0 - (level + 1.0) / len(lLevelsReversed))));
for col in structure[level].keys():
lLabel = col if iAnnotations is None else str(iAnnotations[col]);
if iAnnotations is not None:
lLabel = build_record_label(iAnnotations[col]);
node_col = pydot.Node(col, label=lLabel, style="filled", fillcolor=color, fontsize="12.0")
graph.add_node(node_col);
for col1 in structure[level][col]:
lLabel1 = col1
if iAnnotations is not None:
lLabel1 = build_record_label(iAnnotations[col1]);
color1 = '#%02x%02x%02x' % (255, 255, 128 + int(128 * (1.0 - (level + 2.0) / len(lLevelsReversed))));
node_col1 = pydot.Node(col1, label=lLabel1, style="filled",
fillcolor=color1, fontsize="12.0")
graph.add_node(node_col1);
lEdgeLabel = "";
if iAnnotations is not None:
lEdgeLabel = iAnnotations[col + "_" + col1];
lEdge = pydot.Edge(node_col, node_col1, color="red", label=lEdgeLabel, fontsize="12.0")
graph.add_edge(lEdge)
# print(graph.obj_dict)
return graph
def plot_hierarchy(structure , iAnnotations, name):
graph = plot_hierarchy_internal(structure , iAnnotations, name)
if(name is not None):
graph.write_png(name);
else:
from IPython.display import Image, display
plot1 = Image(graph.create_png())
display(plot1)
def plot_hierarchy_as_png_base64(structure , iAnnotations, name):
graph = plot_hierarchy_internal(structure , iAnnotations, name)
figdata_png = base64.b64encode(graph.create_png())
return figdata_png.decode('utf8')
| bsd-3-clause |
tylerjereddy/scipy | scipy/stats/_multivariate.py | 7 | 153934 | #
# Author: Joris Vankerschaver 2013
#
import math
import numpy as np
from numpy import asarray_chkfinite, asarray
import scipy.linalg
from scipy._lib import doccer
from scipy.special import gammaln, psi, multigammaln, xlogy, entr, betaln
from scipy._lib._util import check_random_state
from scipy.linalg.blas import drot
from scipy.linalg.misc import LinAlgError
from scipy.linalg.lapack import get_lapack_funcs
from ._discrete_distns import binom
from . import mvn
__all__ = ['multivariate_normal',
'matrix_normal',
'dirichlet',
'wishart',
'invwishart',
'multinomial',
'special_ortho_group',
'ortho_group',
'random_correlation',
'unitary_group',
'multivariate_t',
'multivariate_hypergeom']
_LOG_2PI = np.log(2 * np.pi)
_LOG_2 = np.log(2)
_LOG_PI = np.log(np.pi)
_doc_random_state = """\
random_state : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance then
that instance is used.
"""
def _squeeze_output(out):
"""
Remove single-dimensional entries from array and convert to scalar,
if necessary.
"""
out = out.squeeze()
if out.ndim == 0:
out = out[()]
return out
def _eigvalsh_to_eps(spectrum, cond=None, rcond=None):
"""Determine which eigenvalues are "small" given the spectrum.
This is for compatibility across various linear algebra functions
that should agree about whether or not a Hermitian matrix is numerically
singular and what is its numerical matrix rank.
This is designed to be compatible with scipy.linalg.pinvh.
Parameters
----------
spectrum : 1d ndarray
Array of eigenvalues of a Hermitian matrix.
cond, rcond : float, optional
Cutoff for small eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are
considered zero.
If None or -1, suitable machine precision is used.
Returns
-------
eps : float
Magnitude cutoff for numerical negligibility.
"""
if rcond is not None:
cond = rcond
if cond in [None, -1]:
t = spectrum.dtype.char.lower()
factor = {'f': 1E3, 'd': 1E6}
cond = factor[t] * np.finfo(t).eps
eps = cond * np.max(abs(spectrum))
return eps
def _pinv_1d(v, eps=1e-5):
"""A helper function for computing the pseudoinverse.
Parameters
----------
v : iterable of numbers
This may be thought of as a vector of eigenvalues or singular values.
eps : float
Values with magnitude no greater than eps are considered negligible.
Returns
-------
v_pinv : 1d float ndarray
A vector of pseudo-inverted numbers.
"""
return np.array([0 if abs(x) <= eps else 1/x for x in v], dtype=float)
class _PSD:
"""
Compute coordinated functions of a symmetric positive semidefinite matrix.
This class addresses two issues. Firstly it allows the pseudoinverse,
the logarithm of the pseudo-determinant, and the rank of the matrix
to be computed using one call to eigh instead of three.
Secondly it allows these functions to be computed in a way
that gives mutually compatible results.
All of the functions are computed with a common understanding as to
which of the eigenvalues are to be considered negligibly small.
The functions are designed to coordinate with scipy.linalg.pinvh()
but not necessarily with np.linalg.det() or with np.linalg.matrix_rank().
Parameters
----------
M : array_like
Symmetric positive semidefinite matrix (2-D).
cond, rcond : float, optional
Cutoff for small eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are
considered zero.
If None or -1, suitable machine precision is used.
lower : bool, optional
Whether the pertinent array data is taken from the lower
or upper triangle of M. (Default: lower)
check_finite : bool, optional
Whether to check that the input matrices contain only finite
numbers. Disabling may give a performance gain, but may result
in problems (crashes, non-termination) if the inputs do contain
infinities or NaNs.
allow_singular : bool, optional
Whether to allow a singular matrix. (Default: True)
Notes
-----
The arguments are similar to those of scipy.linalg.pinvh().
"""
def __init__(self, M, cond=None, rcond=None, lower=True,
check_finite=True, allow_singular=True):
# Compute the symmetric eigendecomposition.
# Note that eigh takes care of array conversion, chkfinite,
# and assertion that the matrix is square.
s, u = scipy.linalg.eigh(M, lower=lower, check_finite=check_finite)
eps = _eigvalsh_to_eps(s, cond, rcond)
if np.min(s) < -eps:
raise ValueError('the input matrix must be positive semidefinite')
d = s[s > eps]
if len(d) < len(s) and not allow_singular:
raise np.linalg.LinAlgError('singular matrix')
s_pinv = _pinv_1d(s, eps)
U = np.multiply(u, np.sqrt(s_pinv))
# Initialize the eagerly precomputed attributes.
self.rank = len(d)
self.U = U
self.log_pdet = np.sum(np.log(d))
# Initialize an attribute to be lazily computed.
self._pinv = None
@property
def pinv(self):
if self._pinv is None:
self._pinv = np.dot(self.U, self.U.T)
return self._pinv
class multi_rv_generic:
"""
Class which encapsulates common functionality between all multivariate
distributions.
"""
def __init__(self, seed=None):
super().__init__()
self._random_state = check_random_state(seed)
@property
def random_state(self):
""" Get or set the Generator object for generating random variates.
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance then
that instance is used.
"""
return self._random_state
@random_state.setter
def random_state(self, seed):
self._random_state = check_random_state(seed)
def _get_random_state(self, random_state):
if random_state is not None:
return check_random_state(random_state)
else:
return self._random_state
class multi_rv_frozen:
"""
Class which encapsulates common functionality between all frozen
multivariate distributions.
"""
@property
def random_state(self):
return self._dist._random_state
@random_state.setter
def random_state(self, seed):
self._dist._random_state = check_random_state(seed)
_mvn_doc_default_callparams = """\
mean : array_like, optional
Mean of the distribution (default zero)
cov : array_like, optional
Covariance matrix of the distribution (default one)
allow_singular : bool, optional
Whether to allow a singular covariance matrix. (Default: False)
"""
_mvn_doc_callparams_note = \
"""Setting the parameter `mean` to `None` is equivalent to having `mean`
be the zero-vector. The parameter `cov` can be a scalar, in which case
the covariance matrix is the identity times that value, a vector of
diagonal entries for the covariance matrix, or a two-dimensional
array_like.
"""
_mvn_doc_frozen_callparams = ""
_mvn_doc_frozen_callparams_note = \
"""See class definition for a detailed description of parameters."""
mvn_docdict_params = {
'_mvn_doc_default_callparams': _mvn_doc_default_callparams,
'_mvn_doc_callparams_note': _mvn_doc_callparams_note,
'_doc_random_state': _doc_random_state
}
mvn_docdict_noparams = {
'_mvn_doc_default_callparams': _mvn_doc_frozen_callparams,
'_mvn_doc_callparams_note': _mvn_doc_frozen_callparams_note,
'_doc_random_state': _doc_random_state
}
class multivariate_normal_gen(multi_rv_generic):
r"""A multivariate normal random variable.
The `mean` keyword specifies the mean. The `cov` keyword specifies the
covariance matrix.
Methods
-------
``pdf(x, mean=None, cov=1, allow_singular=False)``
Probability density function.
``logpdf(x, mean=None, cov=1, allow_singular=False)``
Log of the probability density function.
``cdf(x, mean=None, cov=1, allow_singular=False, maxpts=1000000*dim, abseps=1e-5, releps=1e-5)``
Cumulative distribution function.
``logcdf(x, mean=None, cov=1, allow_singular=False, maxpts=1000000*dim, abseps=1e-5, releps=1e-5)``
Log of the cumulative distribution function.
``rvs(mean=None, cov=1, size=1, random_state=None)``
Draw random samples from a multivariate normal distribution.
``entropy()``
Compute the differential entropy of the multivariate normal.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_mvn_doc_default_callparams)s
%(_doc_random_state)s
Alternatively, the object may be called (as a function) to fix the mean
and covariance parameters, returning a "frozen" multivariate normal
random variable:
rv = multivariate_normal(mean=None, cov=1, allow_singular=False)
- Frozen object with the same methods but holding the given
mean and covariance fixed.
Notes
-----
%(_mvn_doc_callparams_note)s
The covariance matrix `cov` must be a (symmetric) positive
semi-definite matrix. The determinant and inverse of `cov` are computed
as the pseudo-determinant and pseudo-inverse, respectively, so
that `cov` does not need to have full rank.
The probability density function for `multivariate_normal` is
.. math::
f(x) = \frac{1}{\sqrt{(2 \pi)^k \det \Sigma}}
\exp\left( -\frac{1}{2} (x - \mu)^T \Sigma^{-1} (x - \mu) \right),
where :math:`\mu` is the mean, :math:`\Sigma` the covariance matrix,
and :math:`k` is the dimension of the space where :math:`x` takes values.
.. versionadded:: 0.14.0
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.stats import multivariate_normal
>>> x = np.linspace(0, 5, 10, endpoint=False)
>>> y = multivariate_normal.pdf(x, mean=2.5, cov=0.5); y
array([ 0.00108914, 0.01033349, 0.05946514, 0.20755375, 0.43939129,
0.56418958, 0.43939129, 0.20755375, 0.05946514, 0.01033349])
>>> fig1 = plt.figure()
>>> ax = fig1.add_subplot(111)
>>> ax.plot(x, y)
The input quantiles can be any shape of array, as long as the last
axis labels the components. This allows us for instance to
display the frozen pdf for a non-isotropic random variable in 2D as
follows:
>>> x, y = np.mgrid[-1:1:.01, -1:1:.01]
>>> pos = np.dstack((x, y))
>>> rv = multivariate_normal([0.5, -0.2], [[2.0, 0.3], [0.3, 0.5]])
>>> fig2 = plt.figure()
>>> ax2 = fig2.add_subplot(111)
>>> ax2.contourf(x, y, rv.pdf(pos))
"""
def __init__(self, seed=None):
super().__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, mvn_docdict_params)
def __call__(self, mean=None, cov=1, allow_singular=False, seed=None):
"""Create a frozen multivariate normal distribution.
See `multivariate_normal_frozen` for more information.
"""
return multivariate_normal_frozen(mean, cov,
allow_singular=allow_singular,
seed=seed)
def _process_parameters(self, dim, mean, cov):
"""
Infer dimensionality from mean or covariance matrix, ensure that
mean and covariance are full vector resp. matrix.
"""
# Try to infer dimensionality
if dim is None:
if mean is None:
if cov is None:
dim = 1
else:
cov = np.asarray(cov, dtype=float)
if cov.ndim < 2:
dim = 1
else:
dim = cov.shape[0]
else:
mean = np.asarray(mean, dtype=float)
dim = mean.size
else:
if not np.isscalar(dim):
raise ValueError("Dimension of random variable must be "
"a scalar.")
# Check input sizes and return full arrays for mean and cov if
# necessary
if mean is None:
mean = np.zeros(dim)
mean = np.asarray(mean, dtype=float)
if cov is None:
cov = 1.0
cov = np.asarray(cov, dtype=float)
if dim == 1:
mean.shape = (1,)
cov.shape = (1, 1)
if mean.ndim != 1 or mean.shape[0] != dim:
raise ValueError("Array 'mean' must be a vector of length %d." %
dim)
if cov.ndim == 0:
cov = cov * np.eye(dim)
elif cov.ndim == 1:
cov = np.diag(cov)
elif cov.ndim == 2 and cov.shape != (dim, dim):
rows, cols = cov.shape
if rows != cols:
msg = ("Array 'cov' must be square if it is two dimensional,"
" but cov.shape = %s." % str(cov.shape))
else:
msg = ("Dimension mismatch: array 'cov' is of shape %s,"
" but 'mean' is a vector of length %d.")
msg = msg % (str(cov.shape), len(mean))
raise ValueError(msg)
elif cov.ndim > 2:
raise ValueError("Array 'cov' must be at most two-dimensional,"
" but cov.ndim = %d" % cov.ndim)
return dim, mean, cov
def _process_quantiles(self, x, dim):
"""
Adjust quantiles array so that last axis labels the components of
each data point.
"""
x = np.asarray(x, dtype=float)
if x.ndim == 0:
x = x[np.newaxis]
elif x.ndim == 1:
if dim == 1:
x = x[:, np.newaxis]
else:
x = x[np.newaxis, :]
return x
def _logpdf(self, x, mean, prec_U, log_det_cov, rank):
"""Log of the multivariate normal probability density function.
Parameters
----------
x : ndarray
Points at which to evaluate the log of the probability
density function
mean : ndarray
Mean of the distribution
prec_U : ndarray
A decomposition such that np.dot(prec_U, prec_U.T)
is the precision matrix, i.e. inverse of the covariance matrix.
log_det_cov : float
Logarithm of the determinant of the covariance matrix
rank : int
Rank of the covariance matrix.
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'logpdf' instead.
"""
dev = x - mean
maha = np.sum(np.square(np.dot(dev, prec_U)), axis=-1)
return -0.5 * (rank * _LOG_2PI + log_det_cov + maha)
def logpdf(self, x, mean=None, cov=1, allow_singular=False):
"""Log of the multivariate normal probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_mvn_doc_default_callparams)s
Returns
-------
pdf : ndarray or scalar
Log of the probability density function evaluated at `x`
Notes
-----
%(_mvn_doc_callparams_note)s
"""
dim, mean, cov = self._process_parameters(None, mean, cov)
x = self._process_quantiles(x, dim)
psd = _PSD(cov, allow_singular=allow_singular)
out = self._logpdf(x, mean, psd.U, psd.log_pdet, psd.rank)
return _squeeze_output(out)
def pdf(self, x, mean=None, cov=1, allow_singular=False):
"""Multivariate normal probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_mvn_doc_default_callparams)s
Returns
-------
pdf : ndarray or scalar
Probability density function evaluated at `x`
Notes
-----
%(_mvn_doc_callparams_note)s
"""
dim, mean, cov = self._process_parameters(None, mean, cov)
x = self._process_quantiles(x, dim)
psd = _PSD(cov, allow_singular=allow_singular)
out = np.exp(self._logpdf(x, mean, psd.U, psd.log_pdet, psd.rank))
return _squeeze_output(out)
def _cdf(self, x, mean, cov, maxpts, abseps, releps):
"""Log of the multivariate normal cumulative distribution function.
Parameters
----------
x : ndarray
Points at which to evaluate the cumulative distribution function.
mean : ndarray
Mean of the distribution
cov : array_like
Covariance matrix of the distribution
maxpts : integer
The maximum number of points to use for integration
abseps : float
Absolute error tolerance
releps : float
Relative error tolerance
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'cdf' instead.
.. versionadded:: 1.0.0
"""
lower = np.full(mean.shape, -np.inf)
# mvnun expects 1-d arguments, so process points sequentially
func1d = lambda x_slice: mvn.mvnun(lower, x_slice, mean, cov,
maxpts, abseps, releps)[0]
out = np.apply_along_axis(func1d, -1, x)
return _squeeze_output(out)
def logcdf(self, x, mean=None, cov=1, allow_singular=False, maxpts=None,
abseps=1e-5, releps=1e-5):
"""Log of the multivariate normal cumulative distribution function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_mvn_doc_default_callparams)s
maxpts : integer, optional
The maximum number of points to use for integration
(default `1000000*dim`)
abseps : float, optional
Absolute error tolerance (default 1e-5)
releps : float, optional
Relative error tolerance (default 1e-5)
Returns
-------
cdf : ndarray or scalar
Log of the cumulative distribution function evaluated at `x`
Notes
-----
%(_mvn_doc_callparams_note)s
.. versionadded:: 1.0.0
"""
dim, mean, cov = self._process_parameters(None, mean, cov)
x = self._process_quantiles(x, dim)
# Use _PSD to check covariance matrix
_PSD(cov, allow_singular=allow_singular)
if not maxpts:
maxpts = 1000000 * dim
out = np.log(self._cdf(x, mean, cov, maxpts, abseps, releps))
return out
def cdf(self, x, mean=None, cov=1, allow_singular=False, maxpts=None,
abseps=1e-5, releps=1e-5):
"""Multivariate normal cumulative distribution function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_mvn_doc_default_callparams)s
maxpts : integer, optional
The maximum number of points to use for integration
(default `1000000*dim`)
abseps : float, optional
Absolute error tolerance (default 1e-5)
releps : float, optional
Relative error tolerance (default 1e-5)
Returns
-------
cdf : ndarray or scalar
Cumulative distribution function evaluated at `x`
Notes
-----
%(_mvn_doc_callparams_note)s
.. versionadded:: 1.0.0
"""
dim, mean, cov = self._process_parameters(None, mean, cov)
x = self._process_quantiles(x, dim)
# Use _PSD to check covariance matrix
_PSD(cov, allow_singular=allow_singular)
if not maxpts:
maxpts = 1000000 * dim
out = self._cdf(x, mean, cov, maxpts, abseps, releps)
return out
def rvs(self, mean=None, cov=1, size=1, random_state=None):
"""Draw random samples from a multivariate normal distribution.
Parameters
----------
%(_mvn_doc_default_callparams)s
size : integer, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray or scalar
Random variates of size (`size`, `N`), where `N` is the
dimension of the random variable.
Notes
-----
%(_mvn_doc_callparams_note)s
"""
dim, mean, cov = self._process_parameters(None, mean, cov)
random_state = self._get_random_state(random_state)
out = random_state.multivariate_normal(mean, cov, size)
return _squeeze_output(out)
def entropy(self, mean=None, cov=1):
"""Compute the differential entropy of the multivariate normal.
Parameters
----------
%(_mvn_doc_default_callparams)s
Returns
-------
h : scalar
Entropy of the multivariate normal distribution
Notes
-----
%(_mvn_doc_callparams_note)s
"""
dim, mean, cov = self._process_parameters(None, mean, cov)
_, logdet = np.linalg.slogdet(2 * np.pi * np.e * cov)
return 0.5 * logdet
multivariate_normal = multivariate_normal_gen()
class multivariate_normal_frozen(multi_rv_frozen):
def __init__(self, mean=None, cov=1, allow_singular=False, seed=None,
maxpts=None, abseps=1e-5, releps=1e-5):
"""Create a frozen multivariate normal distribution.
Parameters
----------
mean : array_like, optional
Mean of the distribution (default zero)
cov : array_like, optional
Covariance matrix of the distribution (default one)
allow_singular : bool, optional
If this flag is True then tolerate a singular
covariance matrix (default False).
seed : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance
then that instance is used.
maxpts : integer, optional
The maximum number of points to use for integration of the
cumulative distribution function (default `1000000*dim`)
abseps : float, optional
Absolute error tolerance for the cumulative distribution function
(default 1e-5)
releps : float, optional
Relative error tolerance for the cumulative distribution function
(default 1e-5)
Examples
--------
When called with the default parameters, this will create a 1D random
variable with mean 0 and covariance 1:
>>> from scipy.stats import multivariate_normal
>>> r = multivariate_normal()
>>> r.mean
array([ 0.])
>>> r.cov
array([[1.]])
"""
self._dist = multivariate_normal_gen(seed)
self.dim, self.mean, self.cov = self._dist._process_parameters(
None, mean, cov)
self.cov_info = _PSD(self.cov, allow_singular=allow_singular)
if not maxpts:
maxpts = 1000000 * self.dim
self.maxpts = maxpts
self.abseps = abseps
self.releps = releps
def logpdf(self, x):
x = self._dist._process_quantiles(x, self.dim)
out = self._dist._logpdf(x, self.mean, self.cov_info.U,
self.cov_info.log_pdet, self.cov_info.rank)
return _squeeze_output(out)
def pdf(self, x):
return np.exp(self.logpdf(x))
def logcdf(self, x):
return np.log(self.cdf(x))
def cdf(self, x):
x = self._dist._process_quantiles(x, self.dim)
out = self._dist._cdf(x, self.mean, self.cov, self.maxpts, self.abseps,
self.releps)
return _squeeze_output(out)
def rvs(self, size=1, random_state=None):
return self._dist.rvs(self.mean, self.cov, size, random_state)
def entropy(self):
"""Computes the differential entropy of the multivariate normal.
Returns
-------
h : scalar
Entropy of the multivariate normal distribution
"""
log_pdet = self.cov_info.log_pdet
rank = self.cov_info.rank
return 0.5 * (rank * (_LOG_2PI + 1) + log_pdet)
# Set frozen generator docstrings from corresponding docstrings in
# multivariate_normal_gen and fill in default strings in class docstrings
for name in ['logpdf', 'pdf', 'logcdf', 'cdf', 'rvs']:
method = multivariate_normal_gen.__dict__[name]
method_frozen = multivariate_normal_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(method.__doc__,
mvn_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__, mvn_docdict_params)
_matnorm_doc_default_callparams = """\
mean : array_like, optional
Mean of the distribution (default: `None`)
rowcov : array_like, optional
Among-row covariance matrix of the distribution (default: `1`)
colcov : array_like, optional
Among-column covariance matrix of the distribution (default: `1`)
"""
_matnorm_doc_callparams_note = \
"""If `mean` is set to `None` then a matrix of zeros is used for the mean.
The dimensions of this matrix are inferred from the shape of `rowcov` and
`colcov`, if these are provided, or set to `1` if ambiguous.
`rowcov` and `colcov` can be two-dimensional array_likes specifying the
covariance matrices directly. Alternatively, a one-dimensional array will
be be interpreted as the entries of a diagonal matrix, and a scalar or
zero-dimensional array will be interpreted as this value times the
identity matrix.
"""
_matnorm_doc_frozen_callparams = ""
_matnorm_doc_frozen_callparams_note = \
"""See class definition for a detailed description of parameters."""
matnorm_docdict_params = {
'_matnorm_doc_default_callparams': _matnorm_doc_default_callparams,
'_matnorm_doc_callparams_note': _matnorm_doc_callparams_note,
'_doc_random_state': _doc_random_state
}
matnorm_docdict_noparams = {
'_matnorm_doc_default_callparams': _matnorm_doc_frozen_callparams,
'_matnorm_doc_callparams_note': _matnorm_doc_frozen_callparams_note,
'_doc_random_state': _doc_random_state
}
class matrix_normal_gen(multi_rv_generic):
r"""A matrix normal random variable.
The `mean` keyword specifies the mean. The `rowcov` keyword specifies the
among-row covariance matrix. The 'colcov' keyword specifies the
among-column covariance matrix.
Methods
-------
``pdf(X, mean=None, rowcov=1, colcov=1)``
Probability density function.
``logpdf(X, mean=None, rowcov=1, colcov=1)``
Log of the probability density function.
``rvs(mean=None, rowcov=1, colcov=1, size=1, random_state=None)``
Draw random samples.
Parameters
----------
X : array_like
Quantiles, with the last two axes of `X` denoting the components.
%(_matnorm_doc_default_callparams)s
%(_doc_random_state)s
Alternatively, the object may be called (as a function) to fix the mean
and covariance parameters, returning a "frozen" matrix normal
random variable:
rv = matrix_normal(mean=None, rowcov=1, colcov=1)
- Frozen object with the same methods but holding the given
mean and covariance fixed.
Notes
-----
%(_matnorm_doc_callparams_note)s
The covariance matrices specified by `rowcov` and `colcov` must be
(symmetric) positive definite. If the samples in `X` are
:math:`m \times n`, then `rowcov` must be :math:`m \times m` and
`colcov` must be :math:`n \times n`. `mean` must be the same shape as `X`.
The probability density function for `matrix_normal` is
.. math::
f(X) = (2 \pi)^{-\frac{mn}{2}}|U|^{-\frac{n}{2}} |V|^{-\frac{m}{2}}
\exp\left( -\frac{1}{2} \mathrm{Tr}\left[ U^{-1} (X-M) V^{-1}
(X-M)^T \right] \right),
where :math:`M` is the mean, :math:`U` the among-row covariance matrix,
:math:`V` the among-column covariance matrix.
The `allow_singular` behaviour of the `multivariate_normal`
distribution is not currently supported. Covariance matrices must be
full rank.
The `matrix_normal` distribution is closely related to the
`multivariate_normal` distribution. Specifically, :math:`\mathrm{Vec}(X)`
(the vector formed by concatenating the columns of :math:`X`) has a
multivariate normal distribution with mean :math:`\mathrm{Vec}(M)`
and covariance :math:`V \otimes U` (where :math:`\otimes` is the Kronecker
product). Sampling and pdf evaluation are
:math:`\mathcal{O}(m^3 + n^3 + m^2 n + m n^2)` for the matrix normal, but
:math:`\mathcal{O}(m^3 n^3)` for the equivalent multivariate normal,
making this equivalent form algorithmically inefficient.
.. versionadded:: 0.17.0
Examples
--------
>>> from scipy.stats import matrix_normal
>>> M = np.arange(6).reshape(3,2); M
array([[0, 1],
[2, 3],
[4, 5]])
>>> U = np.diag([1,2,3]); U
array([[1, 0, 0],
[0, 2, 0],
[0, 0, 3]])
>>> V = 0.3*np.identity(2); V
array([[ 0.3, 0. ],
[ 0. , 0.3]])
>>> X = M + 0.1; X
array([[ 0.1, 1.1],
[ 2.1, 3.1],
[ 4.1, 5.1]])
>>> matrix_normal.pdf(X, mean=M, rowcov=U, colcov=V)
0.023410202050005054
>>> # Equivalent multivariate normal
>>> from scipy.stats import multivariate_normal
>>> vectorised_X = X.T.flatten()
>>> equiv_mean = M.T.flatten()
>>> equiv_cov = np.kron(V,U)
>>> multivariate_normal.pdf(vectorised_X, mean=equiv_mean, cov=equiv_cov)
0.023410202050005054
"""
def __init__(self, seed=None):
super().__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, matnorm_docdict_params)
def __call__(self, mean=None, rowcov=1, colcov=1, seed=None):
"""Create a frozen matrix normal distribution.
See `matrix_normal_frozen` for more information.
"""
return matrix_normal_frozen(mean, rowcov, colcov, seed=seed)
def _process_parameters(self, mean, rowcov, colcov):
"""
Infer dimensionality from mean or covariance matrices. Handle
defaults. Ensure compatible dimensions.
"""
# Process mean
if mean is not None:
mean = np.asarray(mean, dtype=float)
meanshape = mean.shape
if len(meanshape) != 2:
raise ValueError("Array `mean` must be two dimensional.")
if np.any(meanshape == 0):
raise ValueError("Array `mean` has invalid shape.")
# Process among-row covariance
rowcov = np.asarray(rowcov, dtype=float)
if rowcov.ndim == 0:
if mean is not None:
rowcov = rowcov * np.identity(meanshape[0])
else:
rowcov = rowcov * np.identity(1)
elif rowcov.ndim == 1:
rowcov = np.diag(rowcov)
rowshape = rowcov.shape
if len(rowshape) != 2:
raise ValueError("`rowcov` must be a scalar or a 2D array.")
if rowshape[0] != rowshape[1]:
raise ValueError("Array `rowcov` must be square.")
if rowshape[0] == 0:
raise ValueError("Array `rowcov` has invalid shape.")
numrows = rowshape[0]
# Process among-column covariance
colcov = np.asarray(colcov, dtype=float)
if colcov.ndim == 0:
if mean is not None:
colcov = colcov * np.identity(meanshape[1])
else:
colcov = colcov * np.identity(1)
elif colcov.ndim == 1:
colcov = np.diag(colcov)
colshape = colcov.shape
if len(colshape) != 2:
raise ValueError("`colcov` must be a scalar or a 2D array.")
if colshape[0] != colshape[1]:
raise ValueError("Array `colcov` must be square.")
if colshape[0] == 0:
raise ValueError("Array `colcov` has invalid shape.")
numcols = colshape[0]
# Ensure mean and covariances compatible
if mean is not None:
if meanshape[0] != numrows:
raise ValueError("Arrays `mean` and `rowcov` must have the "
"same number of rows.")
if meanshape[1] != numcols:
raise ValueError("Arrays `mean` and `colcov` must have the "
"same number of columns.")
else:
mean = np.zeros((numrows, numcols))
dims = (numrows, numcols)
return dims, mean, rowcov, colcov
def _process_quantiles(self, X, dims):
"""
Adjust quantiles array so that last two axes labels the components of
each data point.
"""
X = np.asarray(X, dtype=float)
if X.ndim == 2:
X = X[np.newaxis, :]
if X.shape[-2:] != dims:
raise ValueError("The shape of array `X` is not compatible "
"with the distribution parameters.")
return X
def _logpdf(self, dims, X, mean, row_prec_rt, log_det_rowcov,
col_prec_rt, log_det_colcov):
"""Log of the matrix normal probability density function.
Parameters
----------
dims : tuple
Dimensions of the matrix variates
X : ndarray
Points at which to evaluate the log of the probability
density function
mean : ndarray
Mean of the distribution
row_prec_rt : ndarray
A decomposition such that np.dot(row_prec_rt, row_prec_rt.T)
is the inverse of the among-row covariance matrix
log_det_rowcov : float
Logarithm of the determinant of the among-row covariance matrix
col_prec_rt : ndarray
A decomposition such that np.dot(col_prec_rt, col_prec_rt.T)
is the inverse of the among-column covariance matrix
log_det_colcov : float
Logarithm of the determinant of the among-column covariance matrix
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'logpdf' instead.
"""
numrows, numcols = dims
roll_dev = np.rollaxis(X-mean, axis=-1, start=0)
scale_dev = np.tensordot(col_prec_rt.T,
np.dot(roll_dev, row_prec_rt), 1)
maha = np.sum(np.sum(np.square(scale_dev), axis=-1), axis=0)
return -0.5 * (numrows*numcols*_LOG_2PI + numcols*log_det_rowcov
+ numrows*log_det_colcov + maha)
def logpdf(self, X, mean=None, rowcov=1, colcov=1):
"""Log of the matrix normal probability density function.
Parameters
----------
X : array_like
Quantiles, with the last two axes of `X` denoting the components.
%(_matnorm_doc_default_callparams)s
Returns
-------
logpdf : ndarray
Log of the probability density function evaluated at `X`
Notes
-----
%(_matnorm_doc_callparams_note)s
"""
dims, mean, rowcov, colcov = self._process_parameters(mean, rowcov,
colcov)
X = self._process_quantiles(X, dims)
rowpsd = _PSD(rowcov, allow_singular=False)
colpsd = _PSD(colcov, allow_singular=False)
out = self._logpdf(dims, X, mean, rowpsd.U, rowpsd.log_pdet, colpsd.U,
colpsd.log_pdet)
return _squeeze_output(out)
def pdf(self, X, mean=None, rowcov=1, colcov=1):
"""Matrix normal probability density function.
Parameters
----------
X : array_like
Quantiles, with the last two axes of `X` denoting the components.
%(_matnorm_doc_default_callparams)s
Returns
-------
pdf : ndarray
Probability density function evaluated at `X`
Notes
-----
%(_matnorm_doc_callparams_note)s
"""
return np.exp(self.logpdf(X, mean, rowcov, colcov))
def rvs(self, mean=None, rowcov=1, colcov=1, size=1, random_state=None):
"""Draw random samples from a matrix normal distribution.
Parameters
----------
%(_matnorm_doc_default_callparams)s
size : integer, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray or scalar
Random variates of size (`size`, `dims`), where `dims` is the
dimension of the random matrices.
Notes
-----
%(_matnorm_doc_callparams_note)s
"""
size = int(size)
dims, mean, rowcov, colcov = self._process_parameters(mean, rowcov,
colcov)
rowchol = scipy.linalg.cholesky(rowcov, lower=True)
colchol = scipy.linalg.cholesky(colcov, lower=True)
random_state = self._get_random_state(random_state)
std_norm = random_state.standard_normal(size=(dims[1], size, dims[0]))
roll_rvs = np.tensordot(colchol, np.dot(std_norm, rowchol.T), 1)
out = np.rollaxis(roll_rvs.T, axis=1, start=0) + mean[np.newaxis, :, :]
if size == 1:
out = out.reshape(mean.shape)
return out
matrix_normal = matrix_normal_gen()
class matrix_normal_frozen(multi_rv_frozen):
"""Create a frozen matrix normal distribution.
Parameters
----------
%(_matnorm_doc_default_callparams)s
seed : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance
then that instance is used.
Examples
--------
>>> from scipy.stats import matrix_normal
>>> distn = matrix_normal(mean=np.zeros((3,3)))
>>> X = distn.rvs(); X
array([[-0.02976962, 0.93339138, -0.09663178],
[ 0.67405524, 0.28250467, -0.93308929],
[-0.31144782, 0.74535536, 1.30412916]])
>>> distn.pdf(X)
2.5160642368346784e-05
>>> distn.logpdf(X)
-10.590229595124615
"""
def __init__(self, mean=None, rowcov=1, colcov=1, seed=None):
self._dist = matrix_normal_gen(seed)
self.dims, self.mean, self.rowcov, self.colcov = \
self._dist._process_parameters(mean, rowcov, colcov)
self.rowpsd = _PSD(self.rowcov, allow_singular=False)
self.colpsd = _PSD(self.colcov, allow_singular=False)
def logpdf(self, X):
X = self._dist._process_quantiles(X, self.dims)
out = self._dist._logpdf(self.dims, X, self.mean, self.rowpsd.U,
self.rowpsd.log_pdet, self.colpsd.U,
self.colpsd.log_pdet)
return _squeeze_output(out)
def pdf(self, X):
return np.exp(self.logpdf(X))
def rvs(self, size=1, random_state=None):
return self._dist.rvs(self.mean, self.rowcov, self.colcov, size,
random_state)
# Set frozen generator docstrings from corresponding docstrings in
# matrix_normal_gen and fill in default strings in class docstrings
for name in ['logpdf', 'pdf', 'rvs']:
method = matrix_normal_gen.__dict__[name]
method_frozen = matrix_normal_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(method.__doc__,
matnorm_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__, matnorm_docdict_params)
_dirichlet_doc_default_callparams = """\
alpha : array_like
The concentration parameters. The number of entries determines the
dimensionality of the distribution.
"""
_dirichlet_doc_frozen_callparams = ""
_dirichlet_doc_frozen_callparams_note = \
"""See class definition for a detailed description of parameters."""
dirichlet_docdict_params = {
'_dirichlet_doc_default_callparams': _dirichlet_doc_default_callparams,
'_doc_random_state': _doc_random_state
}
dirichlet_docdict_noparams = {
'_dirichlet_doc_default_callparams': _dirichlet_doc_frozen_callparams,
'_doc_random_state': _doc_random_state
}
def _dirichlet_check_parameters(alpha):
alpha = np.asarray(alpha)
if np.min(alpha) <= 0:
raise ValueError("All parameters must be greater than 0")
elif alpha.ndim != 1:
raise ValueError("Parameter vector 'a' must be one dimensional, "
"but a.shape = %s." % (alpha.shape, ))
return alpha
def _dirichlet_check_input(alpha, x):
x = np.asarray(x)
if x.shape[0] + 1 != alpha.shape[0] and x.shape[0] != alpha.shape[0]:
raise ValueError("Vector 'x' must have either the same number "
"of entries as, or one entry fewer than, "
"parameter vector 'a', but alpha.shape = %s "
"and x.shape = %s." % (alpha.shape, x.shape))
if x.shape[0] != alpha.shape[0]:
xk = np.array([1 - np.sum(x, 0)])
if xk.ndim == 1:
x = np.append(x, xk)
elif xk.ndim == 2:
x = np.vstack((x, xk))
else:
raise ValueError("The input must be one dimensional or a two "
"dimensional matrix containing the entries.")
if np.min(x) < 0:
raise ValueError("Each entry in 'x' must be greater than or equal "
"to zero.")
if np.max(x) > 1:
raise ValueError("Each entry in 'x' must be smaller or equal one.")
# Check x_i > 0 or alpha_i > 1
xeq0 = (x == 0)
alphalt1 = (alpha < 1)
if x.shape != alpha.shape:
alphalt1 = np.repeat(alphalt1, x.shape[-1], axis=-1).reshape(x.shape)
chk = np.logical_and(xeq0, alphalt1)
if np.sum(chk):
raise ValueError("Each entry in 'x' must be greater than zero if its "
"alpha is less than one.")
if (np.abs(np.sum(x, 0) - 1.0) > 10e-10).any():
raise ValueError("The input vector 'x' must lie within the normal "
"simplex. but np.sum(x, 0) = %s." % np.sum(x, 0))
return x
def _lnB(alpha):
r"""Internal helper function to compute the log of the useful quotient.
.. math::
B(\alpha) = \frac{\prod_{i=1}{K}\Gamma(\alpha_i)}
{\Gamma\left(\sum_{i=1}^{K} \alpha_i \right)}
Parameters
----------
%(_dirichlet_doc_default_callparams)s
Returns
-------
B : scalar
Helper quotient, internal use only
"""
return np.sum(gammaln(alpha)) - gammaln(np.sum(alpha))
class dirichlet_gen(multi_rv_generic):
r"""A Dirichlet random variable.
The `alpha` keyword specifies the concentration parameters of the
distribution.
.. versionadded:: 0.15.0
Methods
-------
``pdf(x, alpha)``
Probability density function.
``logpdf(x, alpha)``
Log of the probability density function.
``rvs(alpha, size=1, random_state=None)``
Draw random samples from a Dirichlet distribution.
``mean(alpha)``
The mean of the Dirichlet distribution
``var(alpha)``
The variance of the Dirichlet distribution
``entropy(alpha)``
Compute the differential entropy of the Dirichlet distribution.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_dirichlet_doc_default_callparams)s
%(_doc_random_state)s
Alternatively, the object may be called (as a function) to fix
concentration parameters, returning a "frozen" Dirichlet
random variable:
rv = dirichlet(alpha)
- Frozen object with the same methods but holding the given
concentration parameters fixed.
Notes
-----
Each :math:`\alpha` entry must be positive. The distribution has only
support on the simplex defined by
.. math::
\sum_{i=1}^{K} x_i = 1
where 0 < x_i < 1.
If the quantiles don't lie within the simplex, a ValueError is raised.
The probability density function for `dirichlet` is
.. math::
f(x) = \frac{1}{\mathrm{B}(\boldsymbol\alpha)} \prod_{i=1}^K x_i^{\alpha_i - 1}
where
.. math::
\mathrm{B}(\boldsymbol\alpha) = \frac{\prod_{i=1}^K \Gamma(\alpha_i)}
{\Gamma\bigl(\sum_{i=1}^K \alpha_i\bigr)}
and :math:`\boldsymbol\alpha=(\alpha_1,\ldots,\alpha_K)`, the
concentration parameters and :math:`K` is the dimension of the space
where :math:`x` takes values.
Note that the dirichlet interface is somewhat inconsistent.
The array returned by the rvs function is transposed
with respect to the format expected by the pdf and logpdf.
Examples
--------
>>> from scipy.stats import dirichlet
Generate a dirichlet random variable
>>> quantiles = np.array([0.2, 0.2, 0.6]) # specify quantiles
>>> alpha = np.array([0.4, 5, 15]) # specify concentration parameters
>>> dirichlet.pdf(quantiles, alpha)
0.2843831684937255
The same PDF but following a log scale
>>> dirichlet.logpdf(quantiles, alpha)
-1.2574327653159187
Once we specify the dirichlet distribution
we can then calculate quantities of interest
>>> dirichlet.mean(alpha) # get the mean of the distribution
array([0.01960784, 0.24509804, 0.73529412])
>>> dirichlet.var(alpha) # get variance
array([0.00089829, 0.00864603, 0.00909517])
>>> dirichlet.entropy(alpha) # calculate the differential entropy
-4.3280162474082715
We can also return random samples from the distribution
>>> dirichlet.rvs(alpha, size=1, random_state=1)
array([[0.00766178, 0.24670518, 0.74563305]])
>>> dirichlet.rvs(alpha, size=2, random_state=2)
array([[0.01639427, 0.1292273 , 0.85437844],
[0.00156917, 0.19033695, 0.80809388]])
"""
def __init__(self, seed=None):
super().__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, dirichlet_docdict_params)
def __call__(self, alpha, seed=None):
return dirichlet_frozen(alpha, seed=seed)
def _logpdf(self, x, alpha):
"""Log of the Dirichlet probability density function.
Parameters
----------
x : ndarray
Points at which to evaluate the log of the probability
density function
%(_dirichlet_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'logpdf' instead.
"""
lnB = _lnB(alpha)
return - lnB + np.sum((xlogy(alpha - 1, x.T)).T, 0)
def logpdf(self, x, alpha):
"""Log of the Dirichlet probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_dirichlet_doc_default_callparams)s
Returns
-------
pdf : ndarray or scalar
Log of the probability density function evaluated at `x`.
"""
alpha = _dirichlet_check_parameters(alpha)
x = _dirichlet_check_input(alpha, x)
out = self._logpdf(x, alpha)
return _squeeze_output(out)
def pdf(self, x, alpha):
"""The Dirichlet probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_dirichlet_doc_default_callparams)s
Returns
-------
pdf : ndarray or scalar
The probability density function evaluated at `x`.
"""
alpha = _dirichlet_check_parameters(alpha)
x = _dirichlet_check_input(alpha, x)
out = np.exp(self._logpdf(x, alpha))
return _squeeze_output(out)
def mean(self, alpha):
"""Compute the mean of the dirichlet distribution.
Parameters
----------
%(_dirichlet_doc_default_callparams)s
Returns
-------
mu : ndarray or scalar
Mean of the Dirichlet distribution.
"""
alpha = _dirichlet_check_parameters(alpha)
out = alpha / (np.sum(alpha))
return _squeeze_output(out)
def var(self, alpha):
"""Compute the variance of the dirichlet distribution.
Parameters
----------
%(_dirichlet_doc_default_callparams)s
Returns
-------
v : ndarray or scalar
Variance of the Dirichlet distribution.
"""
alpha = _dirichlet_check_parameters(alpha)
alpha0 = np.sum(alpha)
out = (alpha * (alpha0 - alpha)) / ((alpha0 * alpha0) * (alpha0 + 1))
return _squeeze_output(out)
def entropy(self, alpha):
"""Compute the differential entropy of the dirichlet distribution.
Parameters
----------
%(_dirichlet_doc_default_callparams)s
Returns
-------
h : scalar
Entropy of the Dirichlet distribution
"""
alpha = _dirichlet_check_parameters(alpha)
alpha0 = np.sum(alpha)
lnB = _lnB(alpha)
K = alpha.shape[0]
out = lnB + (alpha0 - K) * scipy.special.psi(alpha0) - np.sum(
(alpha - 1) * scipy.special.psi(alpha))
return _squeeze_output(out)
def rvs(self, alpha, size=1, random_state=None):
"""Draw random samples from a Dirichlet distribution.
Parameters
----------
%(_dirichlet_doc_default_callparams)s
size : int, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray or scalar
Random variates of size (`size`, `N`), where `N` is the
dimension of the random variable.
"""
alpha = _dirichlet_check_parameters(alpha)
random_state = self._get_random_state(random_state)
return random_state.dirichlet(alpha, size=size)
dirichlet = dirichlet_gen()
class dirichlet_frozen(multi_rv_frozen):
def __init__(self, alpha, seed=None):
self.alpha = _dirichlet_check_parameters(alpha)
self._dist = dirichlet_gen(seed)
def logpdf(self, x):
return self._dist.logpdf(x, self.alpha)
def pdf(self, x):
return self._dist.pdf(x, self.alpha)
def mean(self):
return self._dist.mean(self.alpha)
def var(self):
return self._dist.var(self.alpha)
def entropy(self):
return self._dist.entropy(self.alpha)
def rvs(self, size=1, random_state=None):
return self._dist.rvs(self.alpha, size, random_state)
# Set frozen generator docstrings from corresponding docstrings in
# multivariate_normal_gen and fill in default strings in class docstrings
for name in ['logpdf', 'pdf', 'rvs', 'mean', 'var', 'entropy']:
method = dirichlet_gen.__dict__[name]
method_frozen = dirichlet_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(
method.__doc__, dirichlet_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__, dirichlet_docdict_params)
_wishart_doc_default_callparams = """\
df : int
Degrees of freedom, must be greater than or equal to dimension of the
scale matrix
scale : array_like
Symmetric positive definite scale matrix of the distribution
"""
_wishart_doc_callparams_note = ""
_wishart_doc_frozen_callparams = ""
_wishart_doc_frozen_callparams_note = \
"""See class definition for a detailed description of parameters."""
wishart_docdict_params = {
'_doc_default_callparams': _wishart_doc_default_callparams,
'_doc_callparams_note': _wishart_doc_callparams_note,
'_doc_random_state': _doc_random_state
}
wishart_docdict_noparams = {
'_doc_default_callparams': _wishart_doc_frozen_callparams,
'_doc_callparams_note': _wishart_doc_frozen_callparams_note,
'_doc_random_state': _doc_random_state
}
class wishart_gen(multi_rv_generic):
r"""A Wishart random variable.
The `df` keyword specifies the degrees of freedom. The `scale` keyword
specifies the scale matrix, which must be symmetric and positive definite.
In this context, the scale matrix is often interpreted in terms of a
multivariate normal precision matrix (the inverse of the covariance
matrix). These arguments must satisfy the relationship
``df > scale.ndim - 1``, but see notes on using the `rvs` method with
``df < scale.ndim``.
Methods
-------
``pdf(x, df, scale)``
Probability density function.
``logpdf(x, df, scale)``
Log of the probability density function.
``rvs(df, scale, size=1, random_state=None)``
Draw random samples from a Wishart distribution.
``entropy()``
Compute the differential entropy of the Wishart distribution.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_doc_default_callparams)s
%(_doc_random_state)s
Alternatively, the object may be called (as a function) to fix the degrees
of freedom and scale parameters, returning a "frozen" Wishart random
variable:
rv = wishart(df=1, scale=1)
- Frozen object with the same methods but holding the given
degrees of freedom and scale fixed.
See Also
--------
invwishart, chi2
Notes
-----
%(_doc_callparams_note)s
The scale matrix `scale` must be a symmetric positive definite
matrix. Singular matrices, including the symmetric positive semi-definite
case, are not supported.
The Wishart distribution is often denoted
.. math::
W_p(\nu, \Sigma)
where :math:`\nu` is the degrees of freedom and :math:`\Sigma` is the
:math:`p \times p` scale matrix.
The probability density function for `wishart` has support over positive
definite matrices :math:`S`; if :math:`S \sim W_p(\nu, \Sigma)`, then
its PDF is given by:
.. math::
f(S) = \frac{|S|^{\frac{\nu - p - 1}{2}}}{2^{ \frac{\nu p}{2} }
|\Sigma|^\frac{\nu}{2} \Gamma_p \left ( \frac{\nu}{2} \right )}
\exp\left( -tr(\Sigma^{-1} S) / 2 \right)
If :math:`S \sim W_p(\nu, \Sigma)` (Wishart) then
:math:`S^{-1} \sim W_p^{-1}(\nu, \Sigma^{-1})` (inverse Wishart).
If the scale matrix is 1-dimensional and equal to one, then the Wishart
distribution :math:`W_1(\nu, 1)` collapses to the :math:`\chi^2(\nu)`
distribution.
The algorithm [2]_ implemented by the `rvs` method may
produce numerically singular matrices with :math:`p - 1 < \nu < p`; the
user may wish to check for this condition and generate replacement samples
as necessary.
.. versionadded:: 0.16.0
References
----------
.. [1] M.L. Eaton, "Multivariate Statistics: A Vector Space Approach",
Wiley, 1983.
.. [2] W.B. Smith and R.R. Hocking, "Algorithm AS 53: Wishart Variate
Generator", Applied Statistics, vol. 21, pp. 341-345, 1972.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.stats import wishart, chi2
>>> x = np.linspace(1e-5, 8, 100)
>>> w = wishart.pdf(x, df=3, scale=1); w[:5]
array([ 0.00126156, 0.10892176, 0.14793434, 0.17400548, 0.1929669 ])
>>> c = chi2.pdf(x, 3); c[:5]
array([ 0.00126156, 0.10892176, 0.14793434, 0.17400548, 0.1929669 ])
>>> plt.plot(x, w)
The input quantiles can be any shape of array, as long as the last
axis labels the components.
"""
def __init__(self, seed=None):
super().__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, wishart_docdict_params)
def __call__(self, df=None, scale=None, seed=None):
"""Create a frozen Wishart distribution.
See `wishart_frozen` for more information.
"""
return wishart_frozen(df, scale, seed)
def _process_parameters(self, df, scale):
if scale is None:
scale = 1.0
scale = np.asarray(scale, dtype=float)
if scale.ndim == 0:
scale = scale[np.newaxis, np.newaxis]
elif scale.ndim == 1:
scale = np.diag(scale)
elif scale.ndim == 2 and not scale.shape[0] == scale.shape[1]:
raise ValueError("Array 'scale' must be square if it is two"
" dimensional, but scale.scale = %s."
% str(scale.shape))
elif scale.ndim > 2:
raise ValueError("Array 'scale' must be at most two-dimensional,"
" but scale.ndim = %d" % scale.ndim)
dim = scale.shape[0]
if df is None:
df = dim
elif not np.isscalar(df):
raise ValueError("Degrees of freedom must be a scalar.")
elif df <= dim - 1:
raise ValueError("Degrees of freedom must be greater than the "
"dimension of scale matrix minus 1.")
return dim, df, scale
def _process_quantiles(self, x, dim):
"""
Adjust quantiles array so that last axis labels the components of
each data point.
"""
x = np.asarray(x, dtype=float)
if x.ndim == 0:
x = x * np.eye(dim)[:, :, np.newaxis]
if x.ndim == 1:
if dim == 1:
x = x[np.newaxis, np.newaxis, :]
else:
x = np.diag(x)[:, :, np.newaxis]
elif x.ndim == 2:
if not x.shape[0] == x.shape[1]:
raise ValueError("Quantiles must be square if they are two"
" dimensional, but x.shape = %s."
% str(x.shape))
x = x[:, :, np.newaxis]
elif x.ndim == 3:
if not x.shape[0] == x.shape[1]:
raise ValueError("Quantiles must be square in the first two"
" dimensions if they are three dimensional"
", but x.shape = %s." % str(x.shape))
elif x.ndim > 3:
raise ValueError("Quantiles must be at most two-dimensional with"
" an additional dimension for multiple"
"components, but x.ndim = %d" % x.ndim)
# Now we have 3-dim array; should have shape [dim, dim, *]
if not x.shape[0:2] == (dim, dim):
raise ValueError('Quantiles have incompatible dimensions: should'
' be %s, got %s.' % ((dim, dim), x.shape[0:2]))
return x
def _process_size(self, size):
size = np.asarray(size)
if size.ndim == 0:
size = size[np.newaxis]
elif size.ndim > 1:
raise ValueError('Size must be an integer or tuple of integers;'
' thus must have dimension <= 1.'
' Got size.ndim = %s' % str(tuple(size)))
n = size.prod()
shape = tuple(size)
return n, shape
def _logpdf(self, x, dim, df, scale, log_det_scale, C):
"""Log of the Wishart probability density function.
Parameters
----------
x : ndarray
Points at which to evaluate the log of the probability
density function
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
scale : ndarray
Scale matrix
log_det_scale : float
Logarithm of the determinant of the scale matrix
C : ndarray
Cholesky factorization of the scale matrix, lower triagular.
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'logpdf' instead.
"""
# log determinant of x
# Note: x has components along the last axis, so that x.T has
# components alone the 0-th axis. Then since det(A) = det(A'), this
# gives us a 1-dim vector of determinants
# Retrieve tr(scale^{-1} x)
log_det_x = np.empty(x.shape[-1])
scale_inv_x = np.empty(x.shape)
tr_scale_inv_x = np.empty(x.shape[-1])
for i in range(x.shape[-1]):
_, log_det_x[i] = self._cholesky_logdet(x[:, :, i])
scale_inv_x[:, :, i] = scipy.linalg.cho_solve((C, True), x[:, :, i])
tr_scale_inv_x[i] = scale_inv_x[:, :, i].trace()
# Log PDF
out = ((0.5 * (df - dim - 1) * log_det_x - 0.5 * tr_scale_inv_x) -
(0.5 * df * dim * _LOG_2 + 0.5 * df * log_det_scale +
multigammaln(0.5*df, dim)))
return out
def logpdf(self, x, df, scale):
"""Log of the Wishart probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
Each quantile must be a symmetric positive definite matrix.
%(_doc_default_callparams)s
Returns
-------
pdf : ndarray
Log of the probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
dim, df, scale = self._process_parameters(df, scale)
x = self._process_quantiles(x, dim)
# Cholesky decomposition of scale, get log(det(scale))
C, log_det_scale = self._cholesky_logdet(scale)
out = self._logpdf(x, dim, df, scale, log_det_scale, C)
return _squeeze_output(out)
def pdf(self, x, df, scale):
"""Wishart probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
Each quantile must be a symmetric positive definite matrix.
%(_doc_default_callparams)s
Returns
-------
pdf : ndarray
Probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
return np.exp(self.logpdf(x, df, scale))
def _mean(self, dim, df, scale):
"""Mean of the Wishart distribution.
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'mean' instead.
"""
return df * scale
def mean(self, df, scale):
"""Mean of the Wishart distribution.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
mean : float
The mean of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._mean(dim, df, scale)
return _squeeze_output(out)
def _mode(self, dim, df, scale):
"""Mode of the Wishart distribution.
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'mode' instead.
"""
if df >= dim + 1:
out = (df-dim-1) * scale
else:
out = None
return out
def mode(self, df, scale):
"""Mode of the Wishart distribution
Only valid if the degrees of freedom are greater than the dimension of
the scale matrix.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
mode : float or None
The Mode of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._mode(dim, df, scale)
return _squeeze_output(out) if out is not None else out
def _var(self, dim, df, scale):
"""Variance of the Wishart distribution.
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'var' instead.
"""
var = scale**2
diag = scale.diagonal() # 1 x dim array
var += np.outer(diag, diag)
var *= df
return var
def var(self, df, scale):
"""Variance of the Wishart distribution.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
var : float
The variance of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._var(dim, df, scale)
return _squeeze_output(out)
def _standard_rvs(self, n, shape, dim, df, random_state):
"""
Parameters
----------
n : integer
Number of variates to generate
shape : iterable
Shape of the variates to generate
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
random_state : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance
then that instance is used.
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'rvs' instead.
"""
# Random normal variates for off-diagonal elements
n_tril = dim * (dim-1) // 2
covariances = random_state.normal(
size=n*n_tril).reshape(shape+(n_tril,))
# Random chi-square variates for diagonal elements
variances = (np.r_[[random_state.chisquare(df-(i+1)+1, size=n)**0.5
for i in range(dim)]].reshape((dim,) +
shape[::-1]).T)
# Create the A matri(ces) - lower triangular
A = np.zeros(shape + (dim, dim))
# Input the covariances
size_idx = tuple([slice(None, None, None)]*len(shape))
tril_idx = np.tril_indices(dim, k=-1)
A[size_idx + tril_idx] = covariances
# Input the variances
diag_idx = np.diag_indices(dim)
A[size_idx + diag_idx] = variances
return A
def _rvs(self, n, shape, dim, df, C, random_state):
"""Draw random samples from a Wishart distribution.
Parameters
----------
n : integer
Number of variates to generate
shape : iterable
Shape of the variates to generate
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
C : ndarray
Cholesky factorization of the scale matrix, lower triangular.
%(_doc_random_state)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'rvs' instead.
"""
random_state = self._get_random_state(random_state)
# Calculate the matrices A, which are actually lower triangular
# Cholesky factorizations of a matrix B such that B ~ W(df, I)
A = self._standard_rvs(n, shape, dim, df, random_state)
# Calculate SA = C A A' C', where SA ~ W(df, scale)
# Note: this is the product of a (lower) (lower) (lower)' (lower)'
# or, denoting B = AA', it is C B C' where C is the lower
# triangular Cholesky factorization of the scale matrix.
# this appears to conflict with the instructions in [1]_, which
# suggest that it should be D' B D where D is the lower
# triangular factorization of the scale matrix. However, it is
# meant to refer to the Bartlett (1933) representation of a
# Wishart random variate as L A A' L' where L is lower triangular
# so it appears that understanding D' to be upper triangular
# is either a typo in or misreading of [1]_.
for index in np.ndindex(shape):
CA = np.dot(C, A[index])
A[index] = np.dot(CA, CA.T)
return A
def rvs(self, df, scale, size=1, random_state=None):
"""Draw random samples from a Wishart distribution.
Parameters
----------
%(_doc_default_callparams)s
size : integer or iterable of integers, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray
Random variates of shape (`size`) + (`dim`, `dim), where `dim` is
the dimension of the scale matrix.
Notes
-----
%(_doc_callparams_note)s
"""
n, shape = self._process_size(size)
dim, df, scale = self._process_parameters(df, scale)
# Cholesky decomposition of scale
C = scipy.linalg.cholesky(scale, lower=True)
out = self._rvs(n, shape, dim, df, C, random_state)
return _squeeze_output(out)
def _entropy(self, dim, df, log_det_scale):
"""Compute the differential entropy of the Wishart.
Parameters
----------
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
log_det_scale : float
Logarithm of the determinant of the scale matrix
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'entropy' instead.
"""
return (
0.5 * (dim+1) * log_det_scale +
0.5 * dim * (dim+1) * _LOG_2 +
multigammaln(0.5*df, dim) -
0.5 * (df - dim - 1) * np.sum(
[psi(0.5*(df + 1 - (i+1))) for i in range(dim)]
) +
0.5 * df * dim
)
def entropy(self, df, scale):
"""Compute the differential entropy of the Wishart.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
h : scalar
Entropy of the Wishart distribution
Notes
-----
%(_doc_callparams_note)s
"""
dim, df, scale = self._process_parameters(df, scale)
_, log_det_scale = self._cholesky_logdet(scale)
return self._entropy(dim, df, log_det_scale)
def _cholesky_logdet(self, scale):
"""Compute Cholesky decomposition and determine (log(det(scale)).
Parameters
----------
scale : ndarray
Scale matrix.
Returns
-------
c_decomp : ndarray
The Cholesky decomposition of `scale`.
logdet : scalar
The log of the determinant of `scale`.
Notes
-----
This computation of ``logdet`` is equivalent to
``np.linalg.slogdet(scale)``. It is ~2x faster though.
"""
c_decomp = scipy.linalg.cholesky(scale, lower=True)
logdet = 2 * np.sum(np.log(c_decomp.diagonal()))
return c_decomp, logdet
wishart = wishart_gen()
class wishart_frozen(multi_rv_frozen):
"""Create a frozen Wishart distribution.
Parameters
----------
df : array_like
Degrees of freedom of the distribution
scale : array_like
Scale matrix of the distribution
seed : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance then
that instance is used.
"""
def __init__(self, df, scale, seed=None):
self._dist = wishart_gen(seed)
self.dim, self.df, self.scale = self._dist._process_parameters(
df, scale)
self.C, self.log_det_scale = self._dist._cholesky_logdet(self.scale)
def logpdf(self, x):
x = self._dist._process_quantiles(x, self.dim)
out = self._dist._logpdf(x, self.dim, self.df, self.scale,
self.log_det_scale, self.C)
return _squeeze_output(out)
def pdf(self, x):
return np.exp(self.logpdf(x))
def mean(self):
out = self._dist._mean(self.dim, self.df, self.scale)
return _squeeze_output(out)
def mode(self):
out = self._dist._mode(self.dim, self.df, self.scale)
return _squeeze_output(out) if out is not None else out
def var(self):
out = self._dist._var(self.dim, self.df, self.scale)
return _squeeze_output(out)
def rvs(self, size=1, random_state=None):
n, shape = self._dist._process_size(size)
out = self._dist._rvs(n, shape, self.dim, self.df,
self.C, random_state)
return _squeeze_output(out)
def entropy(self):
return self._dist._entropy(self.dim, self.df, self.log_det_scale)
# Set frozen generator docstrings from corresponding docstrings in
# Wishart and fill in default strings in class docstrings
for name in ['logpdf', 'pdf', 'mean', 'mode', 'var', 'rvs', 'entropy']:
method = wishart_gen.__dict__[name]
method_frozen = wishart_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(
method.__doc__, wishart_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__, wishart_docdict_params)
def _cho_inv_batch(a, check_finite=True):
"""
Invert the matrices a_i, using a Cholesky factorization of A, where
a_i resides in the last two dimensions of a and the other indices describe
the index i.
Overwrites the data in a.
Parameters
----------
a : array
Array of matrices to invert, where the matrices themselves are stored
in the last two dimensions.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
x : array
Array of inverses of the matrices ``a_i``.
See Also
--------
scipy.linalg.cholesky : Cholesky factorization of a matrix
"""
if check_finite:
a1 = asarray_chkfinite(a)
else:
a1 = asarray(a)
if len(a1.shape) < 2 or a1.shape[-2] != a1.shape[-1]:
raise ValueError('expected square matrix in last two dimensions')
potrf, potri = get_lapack_funcs(('potrf', 'potri'), (a1,))
triu_rows, triu_cols = np.triu_indices(a.shape[-2], k=1)
for index in np.ndindex(a1.shape[:-2]):
# Cholesky decomposition
a1[index], info = potrf(a1[index], lower=True, overwrite_a=False,
clean=False)
if info > 0:
raise LinAlgError("%d-th leading minor not positive definite"
% info)
if info < 0:
raise ValueError('illegal value in %d-th argument of internal'
' potrf' % -info)
# Inversion
a1[index], info = potri(a1[index], lower=True, overwrite_c=False)
if info > 0:
raise LinAlgError("the inverse could not be computed")
if info < 0:
raise ValueError('illegal value in %d-th argument of internal'
' potrf' % -info)
# Make symmetric (dpotri only fills in the lower triangle)
a1[index][triu_rows, triu_cols] = a1[index][triu_cols, triu_rows]
return a1
class invwishart_gen(wishart_gen):
r"""An inverse Wishart random variable.
The `df` keyword specifies the degrees of freedom. The `scale` keyword
specifies the scale matrix, which must be symmetric and positive definite.
In this context, the scale matrix is often interpreted in terms of a
multivariate normal covariance matrix.
Methods
-------
``pdf(x, df, scale)``
Probability density function.
``logpdf(x, df, scale)``
Log of the probability density function.
``rvs(df, scale, size=1, random_state=None)``
Draw random samples from an inverse Wishart distribution.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_doc_default_callparams)s
%(_doc_random_state)s
Alternatively, the object may be called (as a function) to fix the degrees
of freedom and scale parameters, returning a "frozen" inverse Wishart
random variable:
rv = invwishart(df=1, scale=1)
- Frozen object with the same methods but holding the given
degrees of freedom and scale fixed.
See Also
--------
wishart
Notes
-----
%(_doc_callparams_note)s
The scale matrix `scale` must be a symmetric positive definite
matrix. Singular matrices, including the symmetric positive semi-definite
case, are not supported.
The inverse Wishart distribution is often denoted
.. math::
W_p^{-1}(\nu, \Psi)
where :math:`\nu` is the degrees of freedom and :math:`\Psi` is the
:math:`p \times p` scale matrix.
The probability density function for `invwishart` has support over positive
definite matrices :math:`S`; if :math:`S \sim W^{-1}_p(\nu, \Sigma)`,
then its PDF is given by:
.. math::
f(S) = \frac{|\Sigma|^\frac{\nu}{2}}{2^{ \frac{\nu p}{2} }
|S|^{\frac{\nu + p + 1}{2}} \Gamma_p \left(\frac{\nu}{2} \right)}
\exp\left( -tr(\Sigma S^{-1}) / 2 \right)
If :math:`S \sim W_p^{-1}(\nu, \Psi)` (inverse Wishart) then
:math:`S^{-1} \sim W_p(\nu, \Psi^{-1})` (Wishart).
If the scale matrix is 1-dimensional and equal to one, then the inverse
Wishart distribution :math:`W_1(\nu, 1)` collapses to the
inverse Gamma distribution with parameters shape = :math:`\frac{\nu}{2}`
and scale = :math:`\frac{1}{2}`.
.. versionadded:: 0.16.0
References
----------
.. [1] M.L. Eaton, "Multivariate Statistics: A Vector Space Approach",
Wiley, 1983.
.. [2] M.C. Jones, "Generating Inverse Wishart Matrices", Communications
in Statistics - Simulation and Computation, vol. 14.2, pp.511-514,
1985.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.stats import invwishart, invgamma
>>> x = np.linspace(0.01, 1, 100)
>>> iw = invwishart.pdf(x, df=6, scale=1)
>>> iw[:3]
array([ 1.20546865e-15, 5.42497807e-06, 4.45813929e-03])
>>> ig = invgamma.pdf(x, 6/2., scale=1./2)
>>> ig[:3]
array([ 1.20546865e-15, 5.42497807e-06, 4.45813929e-03])
>>> plt.plot(x, iw)
The input quantiles can be any shape of array, as long as the last
axis labels the components.
"""
def __init__(self, seed=None):
super().__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, wishart_docdict_params)
def __call__(self, df=None, scale=None, seed=None):
"""Create a frozen inverse Wishart distribution.
See `invwishart_frozen` for more information.
"""
return invwishart_frozen(df, scale, seed)
def _logpdf(self, x, dim, df, scale, log_det_scale):
"""Log of the inverse Wishart probability density function.
Parameters
----------
x : ndarray
Points at which to evaluate the log of the probability
density function.
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
scale : ndarray
Scale matrix
log_det_scale : float
Logarithm of the determinant of the scale matrix
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'logpdf' instead.
"""
log_det_x = np.empty(x.shape[-1])
x_inv = np.copy(x).T
if dim > 1:
_cho_inv_batch(x_inv) # works in-place
else:
x_inv = 1./x_inv
tr_scale_x_inv = np.empty(x.shape[-1])
for i in range(x.shape[-1]):
C, lower = scipy.linalg.cho_factor(x[:, :, i], lower=True)
log_det_x[i] = 2 * np.sum(np.log(C.diagonal()))
tr_scale_x_inv[i] = np.dot(scale, x_inv[i]).trace()
# Log PDF
out = ((0.5 * df * log_det_scale - 0.5 * tr_scale_x_inv) -
(0.5 * df * dim * _LOG_2 + 0.5 * (df + dim + 1) * log_det_x) -
multigammaln(0.5*df, dim))
return out
def logpdf(self, x, df, scale):
"""Log of the inverse Wishart probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
Each quantile must be a symmetric positive definite matrix.
%(_doc_default_callparams)s
Returns
-------
pdf : ndarray
Log of the probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
dim, df, scale = self._process_parameters(df, scale)
x = self._process_quantiles(x, dim)
_, log_det_scale = self._cholesky_logdet(scale)
out = self._logpdf(x, dim, df, scale, log_det_scale)
return _squeeze_output(out)
def pdf(self, x, df, scale):
"""Inverse Wishart probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
Each quantile must be a symmetric positive definite matrix.
%(_doc_default_callparams)s
Returns
-------
pdf : ndarray
Probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
return np.exp(self.logpdf(x, df, scale))
def _mean(self, dim, df, scale):
"""Mean of the inverse Wishart distribution.
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'mean' instead.
"""
if df > dim + 1:
out = scale / (df - dim - 1)
else:
out = None
return out
def mean(self, df, scale):
"""Mean of the inverse Wishart distribution.
Only valid if the degrees of freedom are greater than the dimension of
the scale matrix plus one.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
mean : float or None
The mean of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._mean(dim, df, scale)
return _squeeze_output(out) if out is not None else out
def _mode(self, dim, df, scale):
"""Mode of the inverse Wishart distribution.
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'mode' instead.
"""
return scale / (df + dim + 1)
def mode(self, df, scale):
"""Mode of the inverse Wishart distribution.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
mode : float
The Mode of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._mode(dim, df, scale)
return _squeeze_output(out)
def _var(self, dim, df, scale):
"""Variance of the inverse Wishart distribution.
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'var' instead.
"""
if df > dim + 3:
var = (df - dim + 1) * scale**2
diag = scale.diagonal() # 1 x dim array
var += (df - dim - 1) * np.outer(diag, diag)
var /= (df - dim) * (df - dim - 1)**2 * (df - dim - 3)
else:
var = None
return var
def var(self, df, scale):
"""Variance of the inverse Wishart distribution.
Only valid if the degrees of freedom are greater than the dimension of
the scale matrix plus three.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
var : float
The variance of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._var(dim, df, scale)
return _squeeze_output(out) if out is not None else out
def _rvs(self, n, shape, dim, df, C, random_state):
"""Draw random samples from an inverse Wishart distribution.
Parameters
----------
n : integer
Number of variates to generate
shape : iterable
Shape of the variates to generate
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
C : ndarray
Cholesky factorization of the scale matrix, lower triagular.
%(_doc_random_state)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'rvs' instead.
"""
random_state = self._get_random_state(random_state)
# Get random draws A such that A ~ W(df, I)
A = super()._standard_rvs(n, shape, dim, df, random_state)
# Calculate SA = (CA)'^{-1} (CA)^{-1} ~ iW(df, scale)
eye = np.eye(dim)
trtrs = get_lapack_funcs(('trtrs'), (A,))
for index in np.ndindex(A.shape[:-2]):
# Calculate CA
CA = np.dot(C, A[index])
# Get (C A)^{-1} via triangular solver
if dim > 1:
CA, info = trtrs(CA, eye, lower=True)
if info > 0:
raise LinAlgError("Singular matrix.")
if info < 0:
raise ValueError('Illegal value in %d-th argument of'
' internal trtrs' % -info)
else:
CA = 1. / CA
# Get SA
A[index] = np.dot(CA.T, CA)
return A
def rvs(self, df, scale, size=1, random_state=None):
"""Draw random samples from an inverse Wishart distribution.
Parameters
----------
%(_doc_default_callparams)s
size : integer or iterable of integers, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray
Random variates of shape (`size`) + (`dim`, `dim), where `dim` is
the dimension of the scale matrix.
Notes
-----
%(_doc_callparams_note)s
"""
n, shape = self._process_size(size)
dim, df, scale = self._process_parameters(df, scale)
# Invert the scale
eye = np.eye(dim)
L, lower = scipy.linalg.cho_factor(scale, lower=True)
inv_scale = scipy.linalg.cho_solve((L, lower), eye)
# Cholesky decomposition of inverted scale
C = scipy.linalg.cholesky(inv_scale, lower=True)
out = self._rvs(n, shape, dim, df, C, random_state)
return _squeeze_output(out)
def entropy(self):
# Need to find reference for inverse Wishart entropy
raise AttributeError
invwishart = invwishart_gen()
class invwishart_frozen(multi_rv_frozen):
def __init__(self, df, scale, seed=None):
"""Create a frozen inverse Wishart distribution.
Parameters
----------
df : array_like
Degrees of freedom of the distribution
scale : array_like
Scale matrix of the distribution
seed : {None, int, `numpy.random.Generator`}, optional
If `seed` is None the `numpy.random.Generator` singleton is used.
If `seed` is an int, a new ``Generator`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` instance then that instance is
used.
"""
self._dist = invwishart_gen(seed)
self.dim, self.df, self.scale = self._dist._process_parameters(
df, scale
)
# Get the determinant via Cholesky factorization
C, lower = scipy.linalg.cho_factor(self.scale, lower=True)
self.log_det_scale = 2 * np.sum(np.log(C.diagonal()))
# Get the inverse using the Cholesky factorization
eye = np.eye(self.dim)
self.inv_scale = scipy.linalg.cho_solve((C, lower), eye)
# Get the Cholesky factorization of the inverse scale
self.C = scipy.linalg.cholesky(self.inv_scale, lower=True)
def logpdf(self, x):
x = self._dist._process_quantiles(x, self.dim)
out = self._dist._logpdf(x, self.dim, self.df, self.scale,
self.log_det_scale)
return _squeeze_output(out)
def pdf(self, x):
return np.exp(self.logpdf(x))
def mean(self):
out = self._dist._mean(self.dim, self.df, self.scale)
return _squeeze_output(out) if out is not None else out
def mode(self):
out = self._dist._mode(self.dim, self.df, self.scale)
return _squeeze_output(out)
def var(self):
out = self._dist._var(self.dim, self.df, self.scale)
return _squeeze_output(out) if out is not None else out
def rvs(self, size=1, random_state=None):
n, shape = self._dist._process_size(size)
out = self._dist._rvs(n, shape, self.dim, self.df,
self.C, random_state)
return _squeeze_output(out)
def entropy(self):
# Need to find reference for inverse Wishart entropy
raise AttributeError
# Set frozen generator docstrings from corresponding docstrings in
# inverse Wishart and fill in default strings in class docstrings
for name in ['logpdf', 'pdf', 'mean', 'mode', 'var', 'rvs']:
method = invwishart_gen.__dict__[name]
method_frozen = wishart_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(
method.__doc__, wishart_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__, wishart_docdict_params)
_multinomial_doc_default_callparams = """\
n : int
Number of trials
p : array_like
Probability of a trial falling into each category; should sum to 1
"""
_multinomial_doc_callparams_note = \
"""`n` should be a positive integer. Each element of `p` should be in the
interval :math:`[0,1]` and the elements should sum to 1. If they do not sum to
1, the last element of the `p` array is not used and is replaced with the
remaining probability left over from the earlier elements.
"""
_multinomial_doc_frozen_callparams = ""
_multinomial_doc_frozen_callparams_note = \
"""See class definition for a detailed description of parameters."""
multinomial_docdict_params = {
'_doc_default_callparams': _multinomial_doc_default_callparams,
'_doc_callparams_note': _multinomial_doc_callparams_note,
'_doc_random_state': _doc_random_state
}
multinomial_docdict_noparams = {
'_doc_default_callparams': _multinomial_doc_frozen_callparams,
'_doc_callparams_note': _multinomial_doc_frozen_callparams_note,
'_doc_random_state': _doc_random_state
}
class multinomial_gen(multi_rv_generic):
r"""A multinomial random variable.
Methods
-------
``pmf(x, n, p)``
Probability mass function.
``logpmf(x, n, p)``
Log of the probability mass function.
``rvs(n, p, size=1, random_state=None)``
Draw random samples from a multinomial distribution.
``entropy(n, p)``
Compute the entropy of the multinomial distribution.
``cov(n, p)``
Compute the covariance matrix of the multinomial distribution.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_doc_default_callparams)s
%(_doc_random_state)s
Notes
-----
%(_doc_callparams_note)s
Alternatively, the object may be called (as a function) to fix the `n` and
`p` parameters, returning a "frozen" multinomial random variable:
The probability mass function for `multinomial` is
.. math::
f(x) = \frac{n!}{x_1! \cdots x_k!} p_1^{x_1} \cdots p_k^{x_k},
supported on :math:`x=(x_1, \ldots, x_k)` where each :math:`x_i` is a
nonnegative integer and their sum is :math:`n`.
.. versionadded:: 0.19.0
Examples
--------
>>> from scipy.stats import multinomial
>>> rv = multinomial(8, [0.3, 0.2, 0.5])
>>> rv.pmf([1, 3, 4])
0.042000000000000072
The multinomial distribution for :math:`k=2` is identical to the
corresponding binomial distribution (tiny numerical differences
notwithstanding):
>>> from scipy.stats import binom
>>> multinomial.pmf([3, 4], n=7, p=[0.4, 0.6])
0.29030399999999973
>>> binom.pmf(3, 7, 0.4)
0.29030400000000012
The functions ``pmf``, ``logpmf``, ``entropy``, and ``cov`` support
broadcasting, under the convention that the vector parameters (``x`` and
``p``) are interpreted as if each row along the last axis is a single
object. For instance:
>>> multinomial.pmf([[3, 4], [3, 5]], n=[7, 8], p=[.3, .7])
array([0.2268945, 0.25412184])
Here, ``x.shape == (2, 2)``, ``n.shape == (2,)``, and ``p.shape == (2,)``,
but following the rules mentioned above they behave as if the rows
``[3, 4]`` and ``[3, 5]`` in ``x`` and ``[.3, .7]`` in ``p`` were a single
object, and as if we had ``x.shape = (2,)``, ``n.shape = (2,)``, and
``p.shape = ()``. To obtain the individual elements without broadcasting,
we would do this:
>>> multinomial.pmf([3, 4], n=7, p=[.3, .7])
0.2268945
>>> multinomial.pmf([3, 5], 8, p=[.3, .7])
0.25412184
This broadcasting also works for ``cov``, where the output objects are
square matrices of size ``p.shape[-1]``. For example:
>>> multinomial.cov([4, 5], [[.3, .7], [.4, .6]])
array([[[ 0.84, -0.84],
[-0.84, 0.84]],
[[ 1.2 , -1.2 ],
[-1.2 , 1.2 ]]])
In this example, ``n.shape == (2,)`` and ``p.shape == (2, 2)``, and
following the rules above, these broadcast as if ``p.shape == (2,)``.
Thus the result should also be of shape ``(2,)``, but since each output is
a :math:`2 \times 2` matrix, the result in fact has shape ``(2, 2, 2)``,
where ``result[0]`` is equal to ``multinomial.cov(n=4, p=[.3, .7])`` and
``result[1]`` is equal to ``multinomial.cov(n=5, p=[.4, .6])``.
See also
--------
scipy.stats.binom : The binomial distribution.
numpy.random.Generator.multinomial : Sampling from the multinomial distribution.
scipy.stats.multivariate_hypergeom :
The multivariate hypergeometric distribution.
""" # noqa: E501
def __init__(self, seed=None):
super().__init__(seed)
self.__doc__ = \
doccer.docformat(self.__doc__, multinomial_docdict_params)
def __call__(self, n, p, seed=None):
"""Create a frozen multinomial distribution.
See `multinomial_frozen` for more information.
"""
return multinomial_frozen(n, p, seed)
def _process_parameters(self, n, p):
"""Returns: n_, p_, npcond.
n_ and p_ are arrays of the correct shape; npcond is a boolean array
flagging values out of the domain.
"""
p = np.array(p, dtype=np.float64, copy=True)
p[..., -1] = 1. - p[..., :-1].sum(axis=-1)
# true for bad p
pcond = np.any(p < 0, axis=-1)
pcond |= np.any(p > 1, axis=-1)
n = np.array(n, dtype=np.int_, copy=True)
# true for bad n
ncond = n <= 0
return n, p, ncond | pcond
def _process_quantiles(self, x, n, p):
"""Returns: x_, xcond.
x_ is an int array; xcond is a boolean array flagging values out of the
domain.
"""
xx = np.asarray(x, dtype=np.int_)
if xx.ndim == 0:
raise ValueError("x must be an array.")
if xx.size != 0 and not xx.shape[-1] == p.shape[-1]:
raise ValueError("Size of each quantile should be size of p: "
"received %d, but expected %d." %
(xx.shape[-1], p.shape[-1]))
# true for x out of the domain
cond = np.any(xx != x, axis=-1)
cond |= np.any(xx < 0, axis=-1)
cond = cond | (np.sum(xx, axis=-1) != n)
return xx, cond
def _checkresult(self, result, cond, bad_value):
result = np.asarray(result)
if cond.ndim != 0:
result[cond] = bad_value
elif cond:
if result.ndim == 0:
return bad_value
result[...] = bad_value
return result
def _logpmf(self, x, n, p):
return gammaln(n+1) + np.sum(xlogy(x, p) - gammaln(x+1), axis=-1)
def logpmf(self, x, n, p):
"""Log of the Multinomial probability mass function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_doc_default_callparams)s
Returns
-------
logpmf : ndarray or scalar
Log of the probability mass function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
n, p, npcond = self._process_parameters(n, p)
x, xcond = self._process_quantiles(x, n, p)
result = self._logpmf(x, n, p)
# replace values for which x was out of the domain; broadcast
# xcond to the right shape
xcond_ = xcond | np.zeros(npcond.shape, dtype=np.bool_)
result = self._checkresult(result, xcond_, np.NINF)
# replace values bad for n or p; broadcast npcond to the right shape
npcond_ = npcond | np.zeros(xcond.shape, dtype=np.bool_)
return self._checkresult(result, npcond_, np.NAN)
def pmf(self, x, n, p):
"""Multinomial probability mass function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_doc_default_callparams)s
Returns
-------
pmf : ndarray or scalar
Probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
return np.exp(self.logpmf(x, n, p))
def mean(self, n, p):
"""Mean of the Multinomial distribution.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
mean : float
The mean of the distribution
"""
n, p, npcond = self._process_parameters(n, p)
result = n[..., np.newaxis]*p
return self._checkresult(result, npcond, np.NAN)
def cov(self, n, p):
"""Covariance matrix of the multinomial distribution.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
cov : ndarray
The covariance matrix of the distribution
"""
n, p, npcond = self._process_parameters(n, p)
nn = n[..., np.newaxis, np.newaxis]
result = nn * np.einsum('...j,...k->...jk', -p, p)
# change the diagonal
for i in range(p.shape[-1]):
result[..., i, i] += n*p[..., i]
return self._checkresult(result, npcond, np.nan)
def entropy(self, n, p):
r"""Compute the entropy of the multinomial distribution.
The entropy is computed using this expression:
.. math::
f(x) = - \log n! - n\sum_{i=1}^k p_i \log p_i +
\sum_{i=1}^k \sum_{x=0}^n \binom n x p_i^x(1-p_i)^{n-x} \log x!
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
h : scalar
Entropy of the Multinomial distribution
Notes
-----
%(_doc_callparams_note)s
"""
n, p, npcond = self._process_parameters(n, p)
x = np.r_[1:np.max(n)+1]
term1 = n*np.sum(entr(p), axis=-1)
term1 -= gammaln(n+1)
n = n[..., np.newaxis]
new_axes_needed = max(p.ndim, n.ndim) - x.ndim + 1
x.shape += (1,)*new_axes_needed
term2 = np.sum(binom.pmf(x, n, p)*gammaln(x+1),
axis=(-1, -1-new_axes_needed))
return self._checkresult(term1 + term2, npcond, np.nan)
def rvs(self, n, p, size=None, random_state=None):
"""Draw random samples from a Multinomial distribution.
Parameters
----------
%(_doc_default_callparams)s
size : integer or iterable of integers, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray or scalar
Random variates of shape (`size`, `len(p)`)
Notes
-----
%(_doc_callparams_note)s
"""
n, p, npcond = self._process_parameters(n, p)
random_state = self._get_random_state(random_state)
return random_state.multinomial(n, p, size)
multinomial = multinomial_gen()
class multinomial_frozen(multi_rv_frozen):
r"""Create a frozen Multinomial distribution.
Parameters
----------
n : int
number of trials
p: array_like
probability of a trial falling into each category; should sum to 1
seed : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance then
that instance is used.
"""
def __init__(self, n, p, seed=None):
self._dist = multinomial_gen(seed)
self.n, self.p, self.npcond = self._dist._process_parameters(n, p)
# monkey patch self._dist
def _process_parameters(n, p):
return self.n, self.p, self.npcond
self._dist._process_parameters = _process_parameters
def logpmf(self, x):
return self._dist.logpmf(x, self.n, self.p)
def pmf(self, x):
return self._dist.pmf(x, self.n, self.p)
def mean(self):
return self._dist.mean(self.n, self.p)
def cov(self):
return self._dist.cov(self.n, self.p)
def entropy(self):
return self._dist.entropy(self.n, self.p)
def rvs(self, size=1, random_state=None):
return self._dist.rvs(self.n, self.p, size, random_state)
# Set frozen generator docstrings from corresponding docstrings in
# multinomial and fill in default strings in class docstrings
for name in ['logpmf', 'pmf', 'mean', 'cov', 'rvs']:
method = multinomial_gen.__dict__[name]
method_frozen = multinomial_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(
method.__doc__, multinomial_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__,
multinomial_docdict_params)
class special_ortho_group_gen(multi_rv_generic):
r"""A matrix-valued SO(N) random variable.
Return a random rotation matrix, drawn from the Haar distribution
(the only uniform distribution on SO(n)).
The `dim` keyword specifies the dimension N.
Methods
-------
``rvs(dim=None, size=1, random_state=None)``
Draw random samples from SO(N).
Parameters
----------
dim : scalar
Dimension of matrices
Notes
-----
This class is wrapping the random_rot code from the MDP Toolkit,
https://github.com/mdp-toolkit/mdp-toolkit
Return a random rotation matrix, drawn from the Haar distribution
(the only uniform distribution on SO(n)).
The algorithm is described in the paper
Stewart, G.W., "The efficient generation of random orthogonal
matrices with an application to condition estimators", SIAM Journal
on Numerical Analysis, 17(3), pp. 403-409, 1980.
For more information see
https://en.wikipedia.org/wiki/Orthogonal_matrix#Randomization
See also the similar `ortho_group`. For a random rotation in three
dimensions, see `scipy.spatial.transform.Rotation.random`.
Examples
--------
>>> from scipy.stats import special_ortho_group
>>> x = special_ortho_group.rvs(3)
>>> np.dot(x, x.T)
array([[ 1.00000000e+00, 1.13231364e-17, -2.86852790e-16],
[ 1.13231364e-17, 1.00000000e+00, -1.46845020e-16],
[ -2.86852790e-16, -1.46845020e-16, 1.00000000e+00]])
>>> import scipy.linalg
>>> scipy.linalg.det(x)
1.0
This generates one random matrix from SO(3). It is orthogonal and
has a determinant of 1.
See Also
--------
ortho_group, scipy.spatial.transform.Rotation.random
"""
def __init__(self, seed=None):
super().__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__)
def __call__(self, dim=None, seed=None):
"""Create a frozen SO(N) distribution.
See `special_ortho_group_frozen` for more information.
"""
return special_ortho_group_frozen(dim, seed=seed)
def _process_parameters(self, dim):
"""Dimension N must be specified; it cannot be inferred."""
if dim is None or not np.isscalar(dim) or dim <= 1 or dim != int(dim):
raise ValueError("""Dimension of rotation must be specified,
and must be a scalar greater than 1.""")
return dim
def rvs(self, dim, size=1, random_state=None):
"""Draw random samples from SO(N).
Parameters
----------
dim : integer
Dimension of rotation space (N).
size : integer, optional
Number of samples to draw (default 1).
Returns
-------
rvs : ndarray or scalar
Random size N-dimensional matrices, dimension (size, dim, dim)
"""
random_state = self._get_random_state(random_state)
size = int(size)
if size > 1:
return np.array([self.rvs(dim, size=1, random_state=random_state)
for i in range(size)])
dim = self._process_parameters(dim)
H = np.eye(dim)
D = np.empty((dim,))
for n in range(dim-1):
x = random_state.normal(size=(dim-n,))
norm2 = np.dot(x, x)
x0 = x[0].item()
D[n] = np.sign(x[0]) if x[0] != 0 else 1
x[0] += D[n]*np.sqrt(norm2)
x /= np.sqrt((norm2 - x0**2 + x[0]**2) / 2.)
# Householder transformation
H[:, n:] -= np.outer(np.dot(H[:, n:], x), x)
D[-1] = (-1)**(dim-1)*D[:-1].prod()
# Equivalent to np.dot(np.diag(D), H) but faster, apparently
H = (D*H.T).T
return H
special_ortho_group = special_ortho_group_gen()
class special_ortho_group_frozen(multi_rv_frozen):
def __init__(self, dim=None, seed=None):
"""Create a frozen SO(N) distribution.
Parameters
----------
dim : scalar
Dimension of matrices
seed : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance
then that instance is used.
Examples
--------
>>> from scipy.stats import special_ortho_group
>>> g = special_ortho_group(5)
>>> x = g.rvs()
"""
self._dist = special_ortho_group_gen(seed)
self.dim = self._dist._process_parameters(dim)
def rvs(self, size=1, random_state=None):
return self._dist.rvs(self.dim, size, random_state)
class ortho_group_gen(multi_rv_generic):
r"""A matrix-valued O(N) random variable.
Return a random orthogonal matrix, drawn from the O(N) Haar
distribution (the only uniform distribution on O(N)).
The `dim` keyword specifies the dimension N.
Methods
-------
``rvs(dim=None, size=1, random_state=None)``
Draw random samples from O(N).
Parameters
----------
dim : scalar
Dimension of matrices
Notes
-----
This class is closely related to `special_ortho_group`.
Some care is taken to avoid numerical error, as per the paper by Mezzadri.
References
----------
.. [1] F. Mezzadri, "How to generate random matrices from the classical
compact groups", :arXiv:`math-ph/0609050v2`.
Examples
--------
>>> from scipy.stats import ortho_group
>>> x = ortho_group.rvs(3)
>>> np.dot(x, x.T)
array([[ 1.00000000e+00, 1.13231364e-17, -2.86852790e-16],
[ 1.13231364e-17, 1.00000000e+00, -1.46845020e-16],
[ -2.86852790e-16, -1.46845020e-16, 1.00000000e+00]])
>>> import scipy.linalg
>>> np.fabs(scipy.linalg.det(x))
1.0
This generates one random matrix from O(3). It is orthogonal and
has a determinant of +1 or -1.
"""
def __init__(self, seed=None):
super().__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__)
def _process_parameters(self, dim):
"""Dimension N must be specified; it cannot be inferred."""
if dim is None or not np.isscalar(dim) or dim <= 1 or dim != int(dim):
raise ValueError("Dimension of rotation must be specified,"
"and must be a scalar greater than 1.")
return dim
def rvs(self, dim, size=1, random_state=None):
"""Draw random samples from O(N).
Parameters
----------
dim : integer
Dimension of rotation space (N).
size : integer, optional
Number of samples to draw (default 1).
Returns
-------
rvs : ndarray or scalar
Random size N-dimensional matrices, dimension (size, dim, dim)
"""
random_state = self._get_random_state(random_state)
size = int(size)
if size > 1:
return np.array([self.rvs(dim, size=1, random_state=random_state)
for i in range(size)])
dim = self._process_parameters(dim)
H = np.eye(dim)
for n in range(dim):
x = random_state.normal(size=(dim-n,))
norm2 = np.dot(x, x)
x0 = x[0].item()
# random sign, 50/50, but chosen carefully to avoid roundoff error
D = np.sign(x[0]) if x[0] != 0 else 1
x[0] += D * np.sqrt(norm2)
x /= np.sqrt((norm2 - x0**2 + x[0]**2) / 2.)
# Householder transformation
H[:, n:] = -D * (H[:, n:] - np.outer(np.dot(H[:, n:], x), x))
return H
ortho_group = ortho_group_gen()
class random_correlation_gen(multi_rv_generic):
r"""A random correlation matrix.
Return a random correlation matrix, given a vector of eigenvalues.
The `eigs` keyword specifies the eigenvalues of the correlation matrix,
and implies the dimension.
Methods
-------
``rvs(eigs=None, random_state=None)``
Draw random correlation matrices, all with eigenvalues eigs.
Parameters
----------
eigs : 1d ndarray
Eigenvalues of correlation matrix.
Notes
-----
Generates a random correlation matrix following a numerically stable
algorithm spelled out by Davies & Higham. This algorithm uses a single O(N)
similarity transformation to construct a symmetric positive semi-definite
matrix, and applies a series of Givens rotations to scale it to have ones
on the diagonal.
References
----------
.. [1] Davies, Philip I; Higham, Nicholas J; "Numerically stable generation
of correlation matrices and their factors", BIT 2000, Vol. 40,
No. 4, pp. 640 651
Examples
--------
>>> from scipy.stats import random_correlation
>>> rng = np.random.default_rng()
>>> x = random_correlation.rvs((.5, .8, 1.2, 1.5), random_state=rng)
>>> x
array([[ 1. , -0.07198934, -0.20411041, -0.24385796],
[-0.07198934, 1. , 0.12968613, -0.29471382],
[-0.20411041, 0.12968613, 1. , 0.2828693 ],
[-0.24385796, -0.29471382, 0.2828693 , 1. ]])
>>> import scipy.linalg
>>> e, v = scipy.linalg.eigh(x)
>>> e
array([ 0.5, 0.8, 1.2, 1.5])
"""
def __init__(self, seed=None):
super().__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__)
def _process_parameters(self, eigs, tol):
eigs = np.asarray(eigs, dtype=float)
dim = eigs.size
if eigs.ndim != 1 or eigs.shape[0] != dim or dim <= 1:
raise ValueError("Array 'eigs' must be a vector of length "
"greater than 1.")
if np.fabs(np.sum(eigs) - dim) > tol:
raise ValueError("Sum of eigenvalues must equal dimensionality.")
for x in eigs:
if x < -tol:
raise ValueError("All eigenvalues must be non-negative.")
return dim, eigs
def _givens_to_1(self, aii, ajj, aij):
"""Computes a 2x2 Givens matrix to put 1's on the diagonal.
The input matrix is a 2x2 symmetric matrix M = [ aii aij ; aij ajj ].
The output matrix g is a 2x2 anti-symmetric matrix of the form
[ c s ; -s c ]; the elements c and s are returned.
Applying the output matrix to the input matrix (as b=g.T M g)
results in a matrix with bii=1, provided tr(M) - det(M) >= 1
and floating point issues do not occur. Otherwise, some other
valid rotation is returned. When tr(M)==2, also bjj=1.
"""
aiid = aii - 1.
ajjd = ajj - 1.
if ajjd == 0:
# ajj==1, so swap aii and ajj to avoid division by zero
return 0., 1.
dd = math.sqrt(max(aij**2 - aiid*ajjd, 0))
# The choice of t should be chosen to avoid cancellation [1]
t = (aij + math.copysign(dd, aij)) / ajjd
c = 1. / math.sqrt(1. + t*t)
if c == 0:
# Underflow
s = 1.0
else:
s = c*t
return c, s
def _to_corr(self, m):
"""
Given a psd matrix m, rotate to put one's on the diagonal, turning it
into a correlation matrix. This also requires the trace equal the
dimensionality. Note: modifies input matrix
"""
# Check requirements for in-place Givens
if not (m.flags.c_contiguous and m.dtype == np.float64 and
m.shape[0] == m.shape[1]):
raise ValueError()
d = m.shape[0]
for i in range(d-1):
if m[i, i] == 1:
continue
elif m[i, i] > 1:
for j in range(i+1, d):
if m[j, j] < 1:
break
else:
for j in range(i+1, d):
if m[j, j] > 1:
break
c, s = self._givens_to_1(m[i, i], m[j, j], m[i, j])
# Use BLAS to apply Givens rotations in-place. Equivalent to:
# g = np.eye(d)
# g[i, i] = g[j,j] = c
# g[j, i] = -s; g[i, j] = s
# m = np.dot(g.T, np.dot(m, g))
mv = m.ravel()
drot(mv, mv, c, -s, n=d,
offx=i*d, incx=1, offy=j*d, incy=1,
overwrite_x=True, overwrite_y=True)
drot(mv, mv, c, -s, n=d,
offx=i, incx=d, offy=j, incy=d,
overwrite_x=True, overwrite_y=True)
return m
def rvs(self, eigs, random_state=None, tol=1e-13, diag_tol=1e-7):
"""Draw random correlation matrices.
Parameters
----------
eigs : 1d ndarray
Eigenvalues of correlation matrix
tol : float, optional
Tolerance for input parameter checks
diag_tol : float, optional
Tolerance for deviation of the diagonal of the resulting
matrix. Default: 1e-7
Raises
------
RuntimeError
Floating point error prevented generating a valid correlation
matrix.
Returns
-------
rvs : ndarray or scalar
Random size N-dimensional matrices, dimension (size, dim, dim),
each having eigenvalues eigs.
"""
dim, eigs = self._process_parameters(eigs, tol=tol)
random_state = self._get_random_state(random_state)
m = ortho_group.rvs(dim, random_state=random_state)
m = np.dot(np.dot(m, np.diag(eigs)), m.T) # Set the trace of m
m = self._to_corr(m) # Carefully rotate to unit diagonal
# Check diagonal
if abs(m.diagonal() - 1).max() > diag_tol:
raise RuntimeError("Failed to generate a valid correlation matrix")
return m
random_correlation = random_correlation_gen()
class unitary_group_gen(multi_rv_generic):
r"""A matrix-valued U(N) random variable.
Return a random unitary matrix.
The `dim` keyword specifies the dimension N.
Methods
-------
``rvs(dim=None, size=1, random_state=None)``
Draw random samples from U(N).
Parameters
----------
dim : scalar
Dimension of matrices
Notes
-----
This class is similar to `ortho_group`.
References
----------
.. [1] F. Mezzadri, "How to generate random matrices from the classical
compact groups", :arXiv:`math-ph/0609050v2`.
Examples
--------
>>> from scipy.stats import unitary_group
>>> x = unitary_group.rvs(3)
>>> np.dot(x, x.conj().T)
array([[ 1.00000000e+00, 1.13231364e-17, -2.86852790e-16],
[ 1.13231364e-17, 1.00000000e+00, -1.46845020e-16],
[ -2.86852790e-16, -1.46845020e-16, 1.00000000e+00]])
This generates one random matrix from U(3). The dot product confirms that
it is unitary up to machine precision.
"""
def __init__(self, seed=None):
super().__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__)
def _process_parameters(self, dim):
"""Dimension N must be specified; it cannot be inferred."""
if dim is None or not np.isscalar(dim) or dim <= 1 or dim != int(dim):
raise ValueError("Dimension of rotation must be specified,"
"and must be a scalar greater than 1.")
return dim
def rvs(self, dim, size=1, random_state=None):
"""Draw random samples from U(N).
Parameters
----------
dim : integer
Dimension of space (N).
size : integer, optional
Number of samples to draw (default 1).
Returns
-------
rvs : ndarray or scalar
Random size N-dimensional matrices, dimension (size, dim, dim)
"""
random_state = self._get_random_state(random_state)
size = int(size)
if size > 1:
return np.array([self.rvs(dim, size=1, random_state=random_state)
for i in range(size)])
dim = self._process_parameters(dim)
z = 1/math.sqrt(2)*(random_state.normal(size=(dim, dim)) +
1j*random_state.normal(size=(dim, dim)))
q, r = scipy.linalg.qr(z)
d = r.diagonal()
q *= d/abs(d)
return q
unitary_group = unitary_group_gen()
_mvt_doc_default_callparams = \
"""
loc : array_like, optional
Location of the distribution. (default ``0``)
shape : array_like, optional
Positive semidefinite matrix of the distribution. (default ``1``)
df : float, optional
Degrees of freedom of the distribution; must be greater than zero.
If ``np.inf`` then results are multivariate normal. The default is ``1``.
allow_singular : bool, optional
Whether to allow a singular matrix. (default ``False``)
"""
_mvt_doc_callparams_note = \
"""Setting the parameter `loc` to ``None`` is equivalent to having `loc`
be the zero-vector. The parameter `shape` can be a scalar, in which case
the shape matrix is the identity times that value, a vector of
diagonal entries for the shape matrix, or a two-dimensional array_like.
"""
_mvt_doc_frozen_callparams_note = \
"""See class definition for a detailed description of parameters."""
mvt_docdict_params = {
'_mvt_doc_default_callparams': _mvt_doc_default_callparams,
'_mvt_doc_callparams_note': _mvt_doc_callparams_note,
'_doc_random_state': _doc_random_state
}
mvt_docdict_noparams = {
'_mvt_doc_default_callparams': "",
'_mvt_doc_callparams_note': _mvt_doc_frozen_callparams_note,
'_doc_random_state': _doc_random_state
}
class multivariate_t_gen(multi_rv_generic):
r"""A multivariate t-distributed random variable.
The `loc` parameter specifies the location. The `shape` parameter specifies
the positive semidefinite shape matrix. The `df` parameter specifies the
degrees of freedom.
In addition to calling the methods below, the object itself may be called
as a function to fix the location, shape matrix, and degrees of freedom
parameters, returning a "frozen" multivariate t-distribution random.
Methods
-------
``pdf(x, loc=None, shape=1, df=1, allow_singular=False)``
Probability density function.
``logpdf(x, loc=None, shape=1, df=1, allow_singular=False)``
Log of the probability density function.
``rvs(loc=None, shape=1, df=1, size=1, random_state=None)``
Draw random samples from a multivariate t-distribution.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_mvt_doc_default_callparams)s
%(_doc_random_state)s
Notes
-----
%(_mvt_doc_callparams_note)s
The matrix `shape` must be a (symmetric) positive semidefinite matrix. The
determinant and inverse of `shape` are computed as the pseudo-determinant
and pseudo-inverse, respectively, so that `shape` does not need to have
full rank.
The probability density function for `multivariate_t` is
.. math::
f(x) = \frac{\Gamma(\nu + p)/2}{\Gamma(\nu/2)\nu^{p/2}\pi^{p/2}|\Sigma|^{1/2}}
\exp\left[1 + \frac{1}{\nu} (\mathbf{x} - \boldsymbol{\mu})^{\top}
\boldsymbol{\Sigma}^{-1}
(\mathbf{x} - \boldsymbol{\mu}) \right]^{-(\nu + p)/2},
where :math:`p` is the dimension of :math:`\mathbf{x}`,
:math:`\boldsymbol{\mu}` is the :math:`p`-dimensional location,
:math:`\boldsymbol{\Sigma}` the :math:`p \times p`-dimensional shape
matrix, and :math:`\nu` is the degrees of freedom.
.. versionadded:: 1.6.0
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.stats import multivariate_t
>>> x, y = np.mgrid[-1:3:.01, -2:1.5:.01]
>>> pos = np.dstack((x, y))
>>> rv = multivariate_t([1.0, -0.5], [[2.1, 0.3], [0.3, 1.5]], df=2)
>>> fig, ax = plt.subplots(1, 1)
>>> ax.set_aspect('equal')
>>> plt.contourf(x, y, rv.pdf(pos))
"""
def __init__(self, seed=None):
"""Initialize a multivariate t-distributed random variable.
Parameters
----------
seed : Random state.
"""
super().__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, mvt_docdict_params)
self._random_state = check_random_state(seed)
def __call__(self, loc=None, shape=1, df=1, allow_singular=False,
seed=None):
"""Create a frozen multivariate t-distribution.
See `multivariate_t_frozen` for parameters.
"""
if df == np.inf:
return multivariate_normal_frozen(mean=loc, cov=shape,
allow_singular=allow_singular,
seed=seed)
return multivariate_t_frozen(loc=loc, shape=shape, df=df,
allow_singular=allow_singular, seed=seed)
def pdf(self, x, loc=None, shape=1, df=1, allow_singular=False):
"""Multivariate t-distribution probability density function.
Parameters
----------
x : array_like
Points at which to evaluate the probability density function.
%(_mvt_doc_default_callparams)s
Returns
-------
pdf : Probability density function evaluated at `x`.
Examples
--------
>>> from scipy.stats import multivariate_t
>>> x = [0.4, 5]
>>> loc = [0, 1]
>>> shape = [[1, 0.1], [0.1, 1]]
>>> df = 7
>>> multivariate_t.pdf(x, loc, shape, df)
array([0.00075713])
"""
dim, loc, shape, df = self._process_parameters(loc, shape, df)
x = self._process_quantiles(x, dim)
shape_info = _PSD(shape, allow_singular=allow_singular)
logpdf = self._logpdf(x, loc, shape_info.U, shape_info.log_pdet, df,
dim, shape_info.rank)
return np.exp(logpdf)
def logpdf(self, x, loc=None, shape=1, df=1):
"""Log of the multivariate t-distribution probability density function.
Parameters
----------
x : array_like
Points at which to evaluate the log of the probability density
function.
%(_mvt_doc_default_callparams)s
Returns
-------
logpdf : Log of the probability density function evaluated at `x`.
Examples
--------
>>> from scipy.stats import multivariate_t
>>> x = [0.4, 5]
>>> loc = [0, 1]
>>> shape = [[1, 0.1], [0.1, 1]]
>>> df = 7
>>> multivariate_t.logpdf(x, loc, shape, df)
array([-7.1859802])
See Also
--------
pdf : Probability density function.
"""
dim, loc, shape, df = self._process_parameters(loc, shape, df)
x = self._process_quantiles(x, dim)
shape_info = _PSD(shape)
return self._logpdf(x, loc, shape_info.U, shape_info.log_pdet, df, dim,
shape_info.rank)
def _logpdf(self, x, loc, prec_U, log_pdet, df, dim, rank):
"""Utility method `pdf`, `logpdf` for parameters.
Parameters
----------
x : ndarray
Points at which to evaluate the log of the probability density
function.
loc : ndarray
Location of the distribution.
prec_U : ndarray
A decomposition such that `np.dot(prec_U, prec_U.T)` is the inverse
of the shape matrix.
log_pdet : float
Logarithm of the determinant of the shape matrix.
df : float
Degrees of freedom of the distribution.
dim : int
Dimension of the quantiles x.
rank : int
Rank of the shape matrix.
Notes
-----
As this function does no argument checking, it should not be called
directly; use 'logpdf' instead.
"""
if df == np.inf:
return multivariate_normal._logpdf(x, loc, prec_U, log_pdet, rank)
dev = x - loc
maha = np.square(np.dot(dev, prec_U)).sum(axis=-1)
t = 0.5 * (df + dim)
A = gammaln(t)
B = gammaln(0.5 * df)
C = dim/2. * np.log(df * np.pi)
D = 0.5 * log_pdet
E = -t * np.log(1 + (1./df) * maha)
return _squeeze_output(A - B - C - D + E)
def rvs(self, loc=None, shape=1, df=1, size=1, random_state=None):
"""Draw random samples from a multivariate t-distribution.
Parameters
----------
%(_mvt_doc_default_callparams)s
size : integer, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray or scalar
Random variates of size (`size`, `P`), where `P` is the
dimension of the random variable.
Examples
--------
>>> from scipy.stats import multivariate_t
>>> x = [0.4, 5]
>>> loc = [0, 1]
>>> shape = [[1, 0.1], [0.1, 1]]
>>> df = 7
>>> multivariate_t.rvs(loc, shape, df)
array([[0.93477495, 3.00408716]])
"""
# For implementation details, see equation (3):
#
# Hofert, "On Sampling from the Multivariatet Distribution", 2013
# http://rjournal.github.io/archive/2013-2/hofert.pdf
#
dim, loc, shape, df = self._process_parameters(loc, shape, df)
if random_state is not None:
rng = check_random_state(random_state)
else:
rng = self._random_state
if np.isinf(df):
x = np.ones(size)
else:
x = rng.chisquare(df, size=size) / df
z = rng.multivariate_normal(np.zeros(dim), shape, size=size)
samples = loc + z / np.sqrt(x)[:, None]
return _squeeze_output(samples)
def _process_quantiles(self, x, dim):
"""
Adjust quantiles array so that last axis labels the components of
each data point.
"""
x = np.asarray(x, dtype=float)
if x.ndim == 0:
x = x[np.newaxis]
elif x.ndim == 1:
if dim == 1:
x = x[:, np.newaxis]
else:
x = x[np.newaxis, :]
return x
def _process_parameters(self, loc, shape, df):
"""
Infer dimensionality from location array and shape matrix, handle
defaults, and ensure compatible dimensions.
"""
if loc is None and shape is None:
loc = np.asarray(0, dtype=float)
shape = np.asarray(1, dtype=float)
dim = 1
elif loc is None:
shape = np.asarray(shape, dtype=float)
if shape.ndim < 2:
dim = 1
else:
dim = shape.shape[0]
loc = np.zeros(dim)
elif shape is None:
loc = np.asarray(loc, dtype=float)
dim = loc.size
shape = np.eye(dim)
else:
shape = np.asarray(shape, dtype=float)
loc = np.asarray(loc, dtype=float)
dim = loc.size
if dim == 1:
loc.shape = (1,)
shape.shape = (1, 1)
if loc.ndim != 1 or loc.shape[0] != dim:
raise ValueError("Array 'loc' must be a vector of length %d." %
dim)
if shape.ndim == 0:
shape = shape * np.eye(dim)
elif shape.ndim == 1:
shape = np.diag(shape)
elif shape.ndim == 2 and shape.shape != (dim, dim):
rows, cols = shape.shape
if rows != cols:
msg = ("Array 'cov' must be square if it is two dimensional,"
" but cov.shape = %s." % str(shape.shape))
else:
msg = ("Dimension mismatch: array 'cov' is of shape %s,"
" but 'loc' is a vector of length %d.")
msg = msg % (str(shape.shape), len(loc))
raise ValueError(msg)
elif shape.ndim > 2:
raise ValueError("Array 'cov' must be at most two-dimensional,"
" but cov.ndim = %d" % shape.ndim)
# Process degrees of freedom.
if df is None:
df = 1
elif df <= 0:
raise ValueError("'df' must be greater than zero.")
elif np.isnan(df):
raise ValueError("'df' is 'nan' but must be greater than zero or 'np.inf'.")
return dim, loc, shape, df
class multivariate_t_frozen(multi_rv_frozen):
def __init__(self, loc=None, shape=1, df=1, allow_singular=False,
seed=None):
"""Create a frozen multivariate t distribution.
Parameters
----------
%(_mvt_doc_default_callparams)s
Examples
--------
>>> loc = np.zeros(3)
>>> shape = np.eye(3)
>>> df = 10
>>> dist = multivariate_t(loc, shape, df)
>>> dist.rvs()
array([[ 0.81412036, -1.53612361, 0.42199647]])
>>> dist.pdf([1, 1, 1])
array([0.01237803])
"""
self._dist = multivariate_t_gen(seed)
dim, loc, shape, df = self._dist._process_parameters(loc, shape, df)
self.dim, self.loc, self.shape, self.df = dim, loc, shape, df
self.shape_info = _PSD(shape, allow_singular=allow_singular)
def logpdf(self, x):
x = self._dist._process_quantiles(x, self.dim)
U = self.shape_info.U
log_pdet = self.shape_info.log_pdet
return self._dist._logpdf(x, self.loc, U, log_pdet, self.df, self.dim,
self.shape_info.rank)
def pdf(self, x):
return np.exp(self.logpdf(x))
def rvs(self, size=1, random_state=None):
return self._dist.rvs(loc=self.loc,
shape=self.shape,
df=self.df,
size=size,
random_state=random_state)
multivariate_t = multivariate_t_gen()
# Set frozen generator docstrings from corresponding docstrings in
# matrix_normal_gen and fill in default strings in class docstrings
for name in ['logpdf', 'pdf', 'rvs']:
method = multivariate_t_gen.__dict__[name]
method_frozen = multivariate_t_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(method.__doc__,
mvt_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__, mvt_docdict_params)
_mhg_doc_default_callparams = """\
m : array_like
The number of each type of object in the population.
That is, :math:`m[i]` is the number of objects of
type :math:`i`.
n : array_like
The number of samples taken from the population.
"""
_mhg_doc_callparams_note = """\
`m` must be an array of positive integers. If the quantile
:math:`i` contains values out of the range :math:`[0, m_i]`
where :math:`m_i` is the number of objects of type :math:`i`
in the population or if the parameters are inconsistent with one
another (e.g. ``x.sum() != n``), methods return the appropriate
value (e.g. ``0`` for ``pmf``). If `m` or `n` contain negative
values, the result will contain ``nan`` there.
"""
_mhg_doc_frozen_callparams = ""
_mhg_doc_frozen_callparams_note = \
"""See class definition for a detailed description of parameters."""
mhg_docdict_params = {
'_doc_default_callparams': _mhg_doc_default_callparams,
'_doc_callparams_note': _mhg_doc_callparams_note,
'_doc_random_state': _doc_random_state
}
mhg_docdict_noparams = {
'_doc_default_callparams': _mhg_doc_frozen_callparams,
'_doc_callparams_note': _mhg_doc_frozen_callparams_note,
'_doc_random_state': _doc_random_state
}
class multivariate_hypergeom_gen(multi_rv_generic):
r"""A multivariate hypergeometric random variable.
Methods
-------
``pmf(x, m, n)``
Probability mass function.
``logpmf(x, m, n)``
Log of the probability mass function.
``rvs(m, n, size=1, random_state=None)``
Draw random samples from a multivariate hypergeometric
distribution.
``mean(m, n)``
Mean of the multivariate hypergeometric distribution.
``var(m, n)``
Variance of the multivariate hypergeometric distribution.
``cov(m, n)``
Compute the covariance matrix of the multivariate
hypergeometric distribution.
Parameters
----------
%(_doc_default_callparams)s
%(_doc_random_state)s
Notes
-----
%(_doc_callparams_note)s
The probability mass function for `multivariate_hypergeom` is
.. math::
P(X_1 = x_1, X_2 = x_2, \ldots, X_k = x_k) = \frac{\binom{m_1}{x_1}
\binom{m_2}{x_2} \cdots \binom{m_k}{x_k}}{\binom{M}{n}}, \\ \quad
(x_1, x_2, \ldots, x_k) \in \mathbb{N}^k \text{ with }
\sum_{i=1}^k x_i = n
where :math:`m_i` are the number of objects of type :math:`i`, :math:`M`
is the total number of objects in the population (sum of all the
:math:`m_i`), and :math:`n` is the size of the sample to be taken
from the population.
.. versionadded:: 1.6.0
Examples
--------
To evaluate the probability mass function of the multivariate
hypergeometric distribution, with a dichotomous population of size
:math:`10` and :math:`20`, at a sample of size :math:`12` with
:math:`8` objects of the first type and :math:`4` objects of the
second type, use:
>>> from scipy.stats import multivariate_hypergeom
>>> multivariate_hypergeom.pmf(x=[8, 4], m=[10, 20], n=12)
0.0025207176631464523
The `multivariate_hypergeom` distribution is identical to the
corresponding `hypergeom` distribution (tiny numerical differences
notwithstanding) when only two types (good and bad) of objects
are present in the population as in the example above. Consider
another example for a comparison with the hypergeometric distribution:
>>> from scipy.stats import hypergeom
>>> multivariate_hypergeom.pmf(x=[3, 1], m=[10, 5], n=4)
0.4395604395604395
>>> hypergeom.pmf(k=3, M=15, n=4, N=10)
0.43956043956044005
The functions ``pmf``, ``logpmf``, ``mean``, ``var``, ``cov``, and ``rvs``
support broadcasting, under the convention that the vector parameters
(``x``, ``m``, and ``n``) are interpreted as if each row along the last
axis is a single object. For instance, we can combine the previous two
calls to `multivariate_hypergeom` as
>>> multivariate_hypergeom.pmf(x=[[8, 4], [3, 1]], m=[[10, 20], [10, 5]],
... n=[12, 4])
array([0.00252072, 0.43956044])
This broadcasting also works for ``cov``, where the output objects are
square matrices of size ``m.shape[-1]``. For example:
>>> multivariate_hypergeom.cov(m=[[7, 9], [10, 15]], n=[8, 12])
array([[[ 1.05, -1.05],
[-1.05, 1.05]],
[[ 1.56, -1.56],
[-1.56, 1.56]]])
That is, ``result[0]`` is equal to
``multivariate_hypergeom.cov(m=[7, 9], n=8)`` and ``result[1]`` is equal
to ``multivariate_hypergeom.cov(m=[10, 15], n=12)``.
Alternatively, the object may be called (as a function) to fix the `m`
and `n` parameters, returning a "frozen" multivariate hypergeometric
random variable.
>>> rv = multivariate_hypergeom(m=[10, 20], n=12)
>>> rv.pmf(x=[8, 4])
0.0025207176631464523
See Also
--------
scipy.stats.hypergeom : The hypergeometric distribution.
scipy.stats.multinomial : The multinomial distribution.
References
----------
.. [1] The Multivariate Hypergeometric Distribution,
http://www.randomservices.org/random/urn/MultiHypergeometric.html
.. [2] Thomas J. Sargent and John Stachurski, 2020,
Multivariate Hypergeometric Distribution
https://python.quantecon.org/_downloads/pdf/multi_hyper.pdf
"""
def __init__(self, seed=None):
super().__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, mhg_docdict_params)
def __call__(self, m, n, seed=None):
"""Create a frozen multivariate_hypergeom distribution.
See `multivariate_hypergeom_frozen` for more information.
"""
return multivariate_hypergeom_frozen(m, n, seed=seed)
def _process_parameters(self, m, n):
m = np.asarray(m)
n = np.asarray(n)
if m.size == 0:
m = m.astype(int)
if n.size == 0:
n = n.astype(int)
if not np.issubdtype(m.dtype, np.integer):
raise TypeError("'m' must an array of integers.")
if not np.issubdtype(n.dtype, np.integer):
raise TypeError("'n' must an array of integers.")
if m.ndim == 0:
raise ValueError("'m' must be an array with"
" at least one dimension.")
# check for empty arrays
if m.size != 0:
n = n[..., np.newaxis]
m, n = np.broadcast_arrays(m, n)
# check for empty arrays
if m.size != 0:
n = n[..., 0]
mcond = m < 0
M = m.sum(axis=-1)
ncond = (n < 0) | (n > M)
return M, m, n, mcond, ncond, np.any(mcond, axis=-1) | ncond
def _process_quantiles(self, x, M, m, n):
x = np.asarray(x)
if not np.issubdtype(x.dtype, np.integer):
raise TypeError("'x' must an array of integers.")
if x.ndim == 0:
raise ValueError("'x' must be an array with"
" at least one dimension.")
if not x.shape[-1] == m.shape[-1]:
raise ValueError(f"Size of each quantile must be size of 'm': "
f"received {x.shape[-1]}, "
f"but expected {m.shape[-1]}.")
# check for empty arrays
if m.size != 0:
n = n[..., np.newaxis]
M = M[..., np.newaxis]
x, m, n, M = np.broadcast_arrays(x, m, n, M)
# check for empty arrays
if m.size != 0:
n, M = n[..., 0], M[..., 0]
xcond = (x < 0) | (x > m)
return (x, M, m, n, xcond,
np.any(xcond, axis=-1) | (x.sum(axis=-1) != n))
def _checkresult(self, result, cond, bad_value):
result = np.asarray(result)
if cond.ndim != 0:
result[cond] = bad_value
elif cond:
return bad_value
if result.ndim == 0:
return result[()]
return result
def _logpmf(self, x, M, m, n, mxcond, ncond):
# This equation of the pmf comes from the relation,
# n combine r = beta(n+1, 1) / beta(r+1, n-r+1)
num = np.zeros_like(m, dtype=np.float_)
den = np.zeros_like(n, dtype=np.float_)
m, x = m[~mxcond], x[~mxcond]
M, n = M[~ncond], n[~ncond]
num[~mxcond] = (betaln(m+1, 1) - betaln(x+1, m-x+1))
den[~ncond] = (betaln(M+1, 1) - betaln(n+1, M-n+1))
num[mxcond] = np.nan
den[ncond] = np.nan
num = num.sum(axis=-1)
return num - den
def logpmf(self, x, m, n):
"""Log of the multivariate hypergeometric probability mass function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_doc_default_callparams)s
Returns
-------
logpmf : ndarray or scalar
Log of the probability mass function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
M, m, n, mcond, ncond, mncond = self._process_parameters(m, n)
(x, M, m, n, xcond,
xcond_reduced) = self._process_quantiles(x, M, m, n)
mxcond = mcond | xcond
ncond = ncond | np.zeros(n.shape, dtype=np.bool_)
result = self._logpmf(x, M, m, n, mxcond, ncond)
# replace values for which x was out of the domain; broadcast
# xcond to the right shape
xcond_ = xcond_reduced | np.zeros(mncond.shape, dtype=np.bool_)
result = self._checkresult(result, xcond_, np.NINF)
# replace values bad for n or m; broadcast
# mncond to the right shape
mncond_ = mncond | np.zeros(xcond_reduced.shape, dtype=np.bool_)
return self._checkresult(result, mncond_, np.nan)
def pmf(self, x, m, n):
"""Multivariate hypergeometric probability mass function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_doc_default_callparams)s
Returns
-------
pmf : ndarray or scalar
Probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
out = np.exp(self.logpmf(x, m, n))
return out
def mean(self, m, n):
"""Mean of the multivariate hypergeometric distribution.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
mean : array_like or scalar
The mean of the distribution
"""
M, m, n, _, _, mncond = self._process_parameters(m, n)
# check for empty arrays
if m.size != 0:
M, n = M[..., np.newaxis], n[..., np.newaxis]
cond = (M == 0)
M = np.ma.masked_array(M, mask=cond)
mu = n*(m/M)
if m.size != 0:
mncond = (mncond[..., np.newaxis] |
np.zeros(mu.shape, dtype=np.bool_))
return self._checkresult(mu, mncond, np.nan)
def var(self, m, n):
"""Variance of the multivariate hypergeometric distribution.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
array_like
The variances of the components of the distribution. This is
the diagonal of the covariance matrix of the distribution
"""
M, m, n, _, _, mncond = self._process_parameters(m, n)
# check for empty arrays
if m.size != 0:
M, n = M[..., np.newaxis], n[..., np.newaxis]
cond = (M == 0) & (M-1 == 0)
M = np.ma.masked_array(M, mask=cond)
output = n * m/M * (M-m)/M * (M-n)/(M-1)
if m.size != 0:
mncond = (mncond[..., np.newaxis] |
np.zeros(output.shape, dtype=np.bool_))
return self._checkresult(output, mncond, np.nan)
def cov(self, m, n):
"""Covariance matrix of the multivariate hypergeometric distribution.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
cov : array_like
The covariance matrix of the distribution
"""
# see [1]_ for the formula and [2]_ for implementation
# cov( x_i,x_j ) = -n * (M-n)/(M-1) * (K_i*K_j) / (M**2)
M, m, n, _, _, mncond = self._process_parameters(m, n)
# check for empty arrays
if m.size != 0:
M = M[..., np.newaxis, np.newaxis]
n = n[..., np.newaxis, np.newaxis]
cond = (M == 0) & (M-1 == 0)
M = np.ma.masked_array(M, mask=cond)
output = (-n * (M-n)/(M-1) *
np.einsum("...i,...j->...ij", m, m) / (M**2))
# check for empty arrays
if m.size != 0:
M, n = M[..., 0, 0], n[..., 0, 0]
cond = cond[..., 0, 0]
dim = m.shape[-1]
# diagonal entries need to be computed differently
for i in range(dim):
output[..., i, i] = (n * (M-n) * m[..., i]*(M-m[..., i]))
output[..., i, i] = output[..., i, i] / (M-1)
output[..., i, i] = output[..., i, i] / (M**2)
if m.size != 0:
mncond = (mncond[..., np.newaxis, np.newaxis] |
np.zeros(output.shape, dtype=np.bool_))
return self._checkresult(output, mncond, np.nan)
def rvs(self, m, n, size=None, random_state=None):
"""Draw random samples from a multivariate hypergeometric distribution.
Parameters
----------
%(_doc_default_callparams)s
size : integer or iterable of integers, optional
Number of samples to draw. Default is ``None``, in which case a
single variate is returned as an array with shape ``m.shape``.
%(_doc_random_state)s
Returns
-------
rvs : array_like
Random variates of shape ``size`` or ``m.shape``
(if ``size=None``).
Notes
-----
%(_doc_callparams_note)s
Also note that NumPy's `multivariate_hypergeometric` sampler is not
used as it doesn't support broadcasting.
"""
M, m, n, _, _, _ = self._process_parameters(m, n)
random_state = self._get_random_state(random_state)
if size is not None and isinstance(size, int):
size = (size, )
if size is None:
rvs = np.empty(m.shape, dtype=m.dtype)
else:
rvs = np.empty(size + (m.shape[-1], ), dtype=m.dtype)
rem = M
# This sampler has been taken from numpy gh-13794
# https://github.com/numpy/numpy/pull/13794
for c in range(m.shape[-1] - 1):
rem = rem - m[..., c]
rvs[..., c] = ((n != 0) *
random_state.hypergeometric(m[..., c], rem,
n + (n == 0),
size=size))
n = n - rvs[..., c]
rvs[..., m.shape[-1] - 1] = n
return rvs
multivariate_hypergeom = multivariate_hypergeom_gen()
class multivariate_hypergeom_frozen(multi_rv_frozen):
def __init__(self, m, n, seed=None):
self._dist = multivariate_hypergeom_gen(seed)
(self.M, self.m, self.n,
self.mcond, self.ncond,
self.mncond) = self._dist._process_parameters(m, n)
# monkey patch self._dist
def _process_parameters(m, n):
return (self.M, self.m, self.n,
self.mcond, self.ncond,
self.mncond)
self._dist._process_parameters = _process_parameters
def logpmf(self, x):
return self._dist.logpmf(x, self.m, self.n)
def pmf(self, x):
return self._dist.pmf(x, self.m, self.n)
def mean(self):
return self._dist.mean(self.m, self.n)
def var(self):
return self._dist.var(self.m, self.n)
def cov(self):
return self._dist.cov(self.m, self.n)
def rvs(self, size=1, random_state=None):
return self._dist.rvs(self.m, self.n,
size=size,
random_state=random_state)
# Set frozen generator docstrings from corresponding docstrings in
# multivariate_hypergeom and fill in default strings in class docstrings
for name in ['logpmf', 'pmf', 'mean', 'var', 'cov', 'rvs']:
method = multivariate_hypergeom_gen.__dict__[name]
method_frozen = multivariate_hypergeom_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(
method.__doc__, mhg_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__,
mhg_docdict_params)
| bsd-3-clause |
jskDr/jamespy_py3 | medic/kdl.py | 1 | 53787 | """
KDL - deep learning for medic
"""
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy.signal import convolve2d, fftconvolve
from sklearn import preprocessing, model_selection, metrics
import os
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D, Conv2D
from keras.layers.normalization import BatchNormalization
from keras.utils import np_utils
from keras import backend as K
from keras import callbacks
import kkeras
from . import beads
def fig2array(fig):
fig.canvas.draw()
data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
# ones_255 = np.ones_like( data) * 255
# data = 255 - data
return data
def _gen_cell_r0(bd_on=True,
rand_pos_cell=False,
r_cell=0.1, # 0<r_cell<=1
r_bd=0.05, # 0<r_bd<=1
max_bd=5, # max_bd >= 1
bound_flag=True,
visible=False,
disp=False,
fig=None,
ax=None):
"""
Generate cell images
"""
if fig is None or ax is None:
assert fig is None and ax is None
fig, ax = plt.subplots(figsize=(2, 2))
# set_axis_bgcolor is not working because of plt.axis('off')
# ax.set_axis_bgcolor('red')
close_fig_flag = True
else:
close_fig_flag = False
fig.patch.set_facecolor('black')
circle_d = {}
if rand_pos_cell:
if bound_flag: # Not generate cells in the boundary
B = r_cell + 2.0 * r_bd
pos_cell = B + (1.0 - 2 * B) * np.random.random(2)
else:
pos_cell = np.random.random(2)
else:
pos_cell = np.array([0.5, 0.5])
def rand_pos_bd():
th = np.random.random() * 2 * np.pi
pos_bd = pos_cell + (r_cell + r_bd) * \
np.array((np.cos(th), np.sin(th)))
return pos_bd
#print( pos_cell, pos_bd)
circle_d["cell"] = plt.Circle(pos_cell, r_cell, color='w')
if bd_on:
for bd_n in range(np.random.randint(max_bd)+1):
circle_d["bd{}".format(bd_n)] = plt.Circle(rand_pos_bd(), r_bd, color='w')
# circle_d["bd2"] = plt.Circle(rand_pos_bd(), r_bd, color='w')
for k in circle_d.keys():
ax.add_artist(circle_d[k])
plt.axis('off')
data_a = fig2array(fig)
if disp:
print("Image array shape = ", data_a.shape)
if visible:
plt.show()
else:
if close_fig_flag:
plt.close()
else:
plt.cla()
return data_a
def _gen_cell_r1(bd_on=True,
rand_pos_cell=False,
r_cell=0.1, # 0<r_cell<=1
r_bd=0.05, # 0<r_bd<=1
max_bd=5, # max_bd >= 1
# stat_ext_bd=None, # or {'mean':5, 'std':1}
stat_ext_bd={'mean':2, 'std':2},
bound_flag=True,
visible=False,
disp=False,
fig=None,
ax=None):
"""
Generate cell images
The PS bead size is 6 um and silica bead is 5 um.
Lymphoma cell size varies in a larger variance, but the mean value is around 9-12 um.
"""
if fig is None or ax is None:
assert fig is None and ax is None
fig, ax = plt.subplots(figsize=(2, 2))
# set_axis_bgcolor is not working because of plt.axis('off')
# ax.set_axis_bgcolor('red')
close_fig_flag = True
else:
close_fig_flag = False
fig.patch.set_facecolor('black')
circle_d = {}
if rand_pos_cell:
if bound_flag: # Not generate cells in the boundary
B = r_cell + 2.0 * r_bd
pos_cell = B + (1.0 - 2 * B) * np.random.random(2)
else:
pos_cell = np.random.random(2)
else:
pos_cell = np.array([0.5, 0.5])
def rand_pos_bd():
th = np.random.random() * 2 * np.pi
pos_bd = pos_cell + (r_cell + r_bd) * \
np.array((np.cos(th), np.sin(th)))
return pos_bd
# print( pos_cell, pos_bd)
circle_d["cell"] = plt.Circle(pos_cell, r_cell, color='w')
if bd_on:
for bd_n in range(np.random.randint(max_bd)+1):
circle_d["bd{}".format(bd_n)] = plt.Circle(rand_pos_bd(), r_bd, color='w')
# circle_d["bd2"] = plt.Circle(rand_pos_bd(), r_bd, color='w')
if stat_ext_bd is not None:
n_ext_bd = np.max((0, int(np.random.randn()*stat_ext_bd['std'] + stat_ext_bd['mean'])))
for ext_bd_n in range(n_ext_bd):
ext_bd_pos = np.random.rand(2)
circle_d["ext_bd{}".format(ext_bd_n)] = plt.Circle(ext_bd_pos, r_bd, color='w')
for k in circle_d.keys():
ax.add_artist(circle_d[k])
plt.axis('off')
data_a = fig2array(fig)
if disp:
print("Image array shape = ", data_a.shape)
if visible:
plt.show()
else:
if close_fig_flag:
plt.close()
else:
plt.cla()
return data_a
def _gen_cell_r2(bd_on=True,
rand_pos_cell=False,
r_cell=0.1, # 0<r_cell<=1
r_bd=0.05, # 0<r_bd<=1
max_bd=3, # max_bd >= 1
# stat_ext_bd=None, # or {'mean':5, 'std':1}
stat_ext_bd={'mean':2, 'std':2},
bound_flag=True,
visible=False,
disp=False,
fig=None,
ax=None):
"""
Generate cell images
The PS bead size is 6 um and silica bead is 5 um.
Lymphoma cell size varies in a larger variance, but the mean value is around 9-12 um.
Inputs
======
max_bd, int, default=3
The number of the maximum beads attached to a cell.
"""
if fig is None or ax is None:
assert fig is None and ax is None
fig, ax = plt.subplots(figsize=(2, 2))
# set_axis_bgcolor is not working because of plt.axis('off')
# ax.set_axis_bgcolor('red')
close_fig_flag = True
else:
close_fig_flag = False
fig.patch.set_facecolor('black')
circle_d = {}
if rand_pos_cell:
if bound_flag: # Not generate cells in the boundary
B = r_cell + 2.0 * r_bd
pos_cell = B + (1.0 - 2 * B) * np.random.random(2)
else:
pos_cell = np.random.random(2)
else:
pos_cell = np.array([0.5, 0.5])
def rand_pos_bd():
th = np.random.random() * 2 * np.pi
pos_bd = pos_cell + (r_cell + r_bd) * \
np.array((np.cos(th), np.sin(th)))
return pos_bd
#print( pos_cell, pos_bd)
circle_d["cell"] = plt.Circle(pos_cell, r_cell, color='w')
if bd_on:
for bd_n in range(np.random.randint(max_bd)+1):
circle_d["bd{}".format(bd_n)] = plt.Circle(rand_pos_bd(), r_bd, color='w')
# circle_d["bd2"] = plt.Circle(rand_pos_bd(), r_bd, color='w')
if stat_ext_bd is not None:
#n_ext_bd = np.max((0, int(np.random.randn()*stat_ext_bd['std'] + stat_ext_bd['mean'])))
n_ext_bd = np.random.randint(stat_ext_bd['mean']+1)
for ext_bd_n in range(n_ext_bd):
ext_bd_pos = np.random.rand(2)
circle_d["ext_bd{}".format(ext_bd_n)] = plt.Circle(ext_bd_pos, r_bd, color='w')
for k in circle_d.keys():
ax.add_artist(circle_d[k])
plt.axis('off')
data_a = fig2array(fig)
if disp:
print("Image array shape = ", data_a.shape)
if visible:
plt.show()
else:
if close_fig_flag:
plt.close()
else:
plt.cla()
return data_a
def _gen_cell_db_r0(N=5, rand_pos_cell=False, disp=False):
db_l = []
cell_img_org = gen_cell(bd_on=False, rand_pos_cell=rand_pos_cell)
for i in range(N):
if disp: # 1, 2, True (not 0 or False)
print('Iteration:', i)
elif disp == 2:
print(i, end=",")
if rand_pos_cell:
cell_img = gen_cell(bd_on=False, rand_pos_cell=rand_pos_cell)
else:
cell_img = cell_img_org.copy()
cellbd_img = gen_cell(bd_on=True, rand_pos_cell=rand_pos_cell)
db_l.append(cell_img[:, :, 0]) # No RGB Info
db_l.append(cellbd_img[:, :, 0]) # No RGB Info
print("The end.")
return db_l
def gen_cell_db(N=5, rand_pos_cell=False,
extra_bead_on=True,
max_bd=3,
disp=False):
"""
db_l = gen_cell_db(N=5, rand_pos_cell=False, extra_bead_on=True, disp=False)
Generate cell_db
Inputs
======
max_bd, int, default=3
The number of the maximum beads attached to a cell.
"""
fig, ax = plt.subplots(figsize=(2, 2))
# ax.set_axis_bgcolor('red')
if extra_bead_on:
stat_ext_bd = {'mean': 5, 'std': 1}
else:
stat_ext_bd = None
db_l = []
cell_img_org = gen_cell(bd_on=False,
rand_pos_cell=rand_pos_cell,
fig=fig, ax=ax,
stat_ext_bd=stat_ext_bd)
for i in range(N):
if disp:
print(i, end=",")
if rand_pos_cell:
cell_img = gen_cell(
bd_on=False, rand_pos_cell=rand_pos_cell,
max_bd=max_bd,
fig=fig, ax=ax,
stat_ext_bd=stat_ext_bd)
else:
cell_img = cell_img_org.copy()
cellbd_img = gen_cell(
bd_on=True, rand_pos_cell=rand_pos_cell,
fig=fig, ax=ax,
stat_ext_bd=stat_ext_bd)
db_l.append(cell_img[:, :, 0]) # No RGB Info
db_l.append(cellbd_img[:, :, 0]) # No RGB Info
plt.close(fig)
print("The end.")
return db_l
def save_cell_db(db_l, fname_gz="sheet.gz/cell_db.cvs.gz"):
df_l = []
celltype = 0
for i, db in enumerate(db_l):
df_i = pd.DataFrame()
df_i["ID"] = [i] * np.prod(db.shape)
df_i["celltype"] = celltype
df_i["x"] = np.repeat(np.arange(db.shape[0]), db.shape[1])
df_i["y"] = list(range(db.shape[1])) * db.shape[0]
df_i["image"] = db.reshape(-1)
celltype ^= 1
df_l.append(df_i)
cell_df = pd.concat(df_l, ignore_index=True)
cell_df.to_csv(fname_gz, index=False, compression='gzip')
return cell_df
# ===================================
# Functions for the Center_Cell mode
# - gen_cell_n_beads,
# gen_cell_db_center_cell,
# save_cell_db_center_cell
# ===================================
def gen_cell(bd_on=True,
rand_pos_cell=False,
r_cell=0.1, # 0<r_cell<=1
r_bd=0.05, # 0<r_bd<=1
max_bd=3, # max_bd >= 1
stat_ext_bd={'mean':2, 'std':2},
bound_flag=True,
visible=False,
disp=False,
fig=None,
ax=None):
"""
Generate cell images
The PS bead size is 6 um and silica bead is 5 um.
Lymphoma cell size varies in a larger variance, but the mean value is around 9-12 um.
Inputs
======
max_bd, int, default=3
The number of the maximum beads attached to a cell.
"""
return gen_cell_n_beads(bd_on=bd_on,
rand_pos_cell=rand_pos_cell,
r_cell=r_cell, # 0<r_cell<=1
r_bd=r_bd, # 0<r_bd<=1
max_bd=max_bd, # max_bd >= 1
rand_bead_flag=True, # This is onlu changed part.
# stat_ext_bd=None, # or {'mean':5, 'std':1}
stat_ext_bd=stat_ext_bd,
bound_flag=bound_flag,
visible=visible,
disp=disp,
fig=fig,
ax=ax)
def gen_cell_n_beads(bd_on=True,
rand_pos_cell=False,
r_cell=0.1, # 0<r_cell<=1
r_bd=0.05, # 0<r_bd<=1
max_bd=3, # max_bd >= 1
rand_bead_flag=False,
# stat_ext_bd=None, # or {'mean':5, 'std':1}
stat_ext_bd={'mean': 2, 'std': 2},
bound_flag=True,
visible=False,
disp=False,
fig=None,
ax=None):
"""
Generate cell images
The PS bead size is 6 um and silica bead is 5 um.
Lymphoma cell size varies in a larger variance,
but the mean value is around 9-12 um.
Inputs
======
max_bd, int, default=3
The number of the maximum beads attached to a cell.
"""
if fig is None or ax is None:
assert fig is None and ax is None
fig, ax = plt.subplots(figsize=(2, 2))
# set_axis_bgcolor is not working because of plt.axis('off')
# ax.set_axis_bgcolor('red')
close_fig_flag = True
else:
close_fig_flag = False
fig.patch.set_facecolor('black')
circle_d = {}
if rand_pos_cell:
if bound_flag: # Not generate cells in the boundary
B = r_cell + 2.0 * r_bd
pos_cell = B + (1.0 - 2 * B) * np.random.random(2)
else:
pos_cell = np.random.random(2)
else:
pos_cell = np.array([0.5, 0.5])
def rand_pos_bd():
th = np.random.random() * 2 * np.pi
pos_bd = pos_cell + (r_cell + r_bd) * \
np.array((np.cos(th), np.sin(th)))
return pos_bd
#print( pos_cell, pos_bd)
circle_d["cell"] = plt.Circle(pos_cell, r_cell, color='w')
if bd_on:
if rand_bead_flag:
final_max_bd = np.random.randint(max_bd) + 1
else:
# Now, the number of total beads attached a cell is fixed (not random).
final_max_bd = max_bd
for bd_n in range(final_max_bd):
circle_d["bd{}".format(bd_n)] = \
plt.Circle(rand_pos_bd(), r_bd, color='w')
if stat_ext_bd is not None:
#n_ext_bd = np.max((0, int(np.random.randn()*stat_ext_bd['std'] + stat_ext_bd['mean'])))
n_ext_bd = np.random.randint(stat_ext_bd['mean']+1)
for ext_bd_n in range(n_ext_bd):
ext_bd_pos = np.random.rand(2)
circle_d["ext_bd{}".format(ext_bd_n)] = \
plt.Circle(ext_bd_pos, r_bd, color='w')
for k in circle_d.keys():
ax.add_artist(circle_d[k])
plt.axis('off')
data_a = fig2array(fig)
if disp:
print("Image array shape = ", data_a.shape)
if visible:
plt.show()
else:
if close_fig_flag:
plt.close()
else:
plt.cla()
return data_a
class CELL():
def __init__(self, flag_no_overlap_beads=False):
self.flag_no_overlap_beads = flag_no_overlap_beads
def gen(self,
bd_on=True,
rand_pos_cell=False,
r_cell=0.1, # 0<r_cell<=1
r_bd=0.05, # 0<r_bd<=1
max_bd=3, # max_bd >= 1
rand_bead_flag=False,
# stat_ext_bd=None, # or {'mean':5, 'std':1}
stat_ext_bd={'mean': 2, 'std': 2},
bound_flag=True,
visible=False,
disp=False,
fig=None,
ax=None):
if fig is None or ax is None:
assert fig is None and ax is None
fig, ax = plt.subplots(figsize=(2, 2))
# set_axis_bgcolor is not working because of plt.axis('off')
# ax.set_axis_bgcolor('red')
close_fig_flag = True
else:
close_fig_flag = False
fig.patch.set_facecolor('black')
circle_d = {}
if rand_pos_cell:
if bound_flag: # Not generate cells in the boundary
B = r_cell + 2.0 * r_bd
pos_cell = B + (1.0 - 2 * B) * np.random.random(2)
else:
pos_cell = np.random.random(2)
else:
pos_cell = np.array([0.5, 0.5])
def get_pos_bd(th):
return pos_cell + (r_cell + r_bd) * np.array((np.cos(th), np.sin(th)))
def rand_pos_bd():
th = np.random.random() * 2 * np.pi
pos_bd = get_pos_bd(th)
return pos_bd
circle_d["cell"] = plt.Circle(pos_cell, r_cell, color='w')
if bd_on:
if rand_bead_flag:
final_max_bd = np.random.randint(max_bd) + 1
else:
# Now, the number of total beads attached
# a cell is fixed (not random).
final_max_bd = max_bd
if not self.flag_no_overlap_beads:
rand_pos_bd_l = []
for bd_n in range(final_max_bd):
rand_pos_bd_l.append(rand_pos_bd())
else:
bead_center_l = []
cnt = 0
while len(bead_center_l) < final_max_bd:
# generate beads until the number of it reaches the limit
bead_center_l = beads.BEADS(r_cell, r_bd).gen_bead_centers(final_max_bd)
cnt += 1
assert cnt < 100, 'Try to reduce the number of beads!'
rand_pos_bd_l = [get_pos_bd(th/180*np.pi) for th in bead_center_l]
for bd_n in range(final_max_bd):
circle_d["bd{}".format(bd_n)] = \
plt.Circle(rand_pos_bd_l[bd_n], r_bd, color='w')
if stat_ext_bd is not None:
n_ext_bd = np.random.randint(stat_ext_bd['mean'] + 1)
for ext_bd_n in range(n_ext_bd):
ext_bd_pos = np.random.rand(2)
circle_d["ext_bd{}".format(ext_bd_n)] = \
plt.Circle(ext_bd_pos, r_bd, color='w')
for k in circle_d.keys():
ax.add_artist(circle_d[k])
plt.axis('off')
data_a = fig2array(fig)
if disp:
print("Image array shape = ", data_a.shape)
if visible:
plt.show()
else:
if close_fig_flag:
plt.close()
else:
plt.cla()
return data_a
def gen_cell_n_nooverlap_beads(bd_on=True,
rand_pos_cell=False,
r_cell=0.1, # 0<r_cell<=1
r_bd=0.05, # 0<r_bd<=1
max_bd=3, # max_bd >= 1
rand_bead_flag=False,
# stat_ext_bd=None, # or {'mean':5, 'std':1}
stat_ext_bd={'mean': 2, 'std': 2},
bound_flag=True,
visible=False,
disp=False,
fig=None,
ax=None):
"""
Generate cell images
The PS bead size is 6 um and silica bead is 5 um.
Lymphoma cell size varies in a larger variance,
but the mean value is around 9-12 um.
Inputs
======
max_bd, int, default=3
The number of the maximum beads attached to a cell.
"""
if fig is None or ax is None:
assert fig is None and ax is None
fig, ax = plt.subplots(figsize=(2, 2))
# set_axis_bgcolor is not working because of plt.axis('off')
# ax.set_axis_bgcolor('red')
close_fig_flag = True
else:
close_fig_flag = False
fig.patch.set_facecolor('black')
circle_d = {}
if rand_pos_cell:
if bound_flag: # Not generate cells in the boundary
B = r_cell + 2.0 * r_bd
pos_cell = B + (1.0 - 2 * B) * np.random.random(2)
else:
pos_cell = np.random.random(2)
else:
pos_cell = np.array([0.5, 0.5])
def rand_pos_bd():
th = np.random.random() * 2 * np.pi
pos_bd = pos_cell + (r_cell + r_bd) * \
np.array((np.cos(th), np.sin(th)))
return pos_bd
#print( pos_cell, pos_bd)
circle_d["cell"] = plt.Circle(pos_cell, r_cell, color='w')
if bd_on:
if rand_bead_flag:
final_max_bd = np.random.randint(max_bd) + 1
else:
# Now, the number of total beads attached a cell is fixed (not random).
final_max_bd = max_bd
for bd_n in range(final_max_bd):
circle_d["bd{}".format(bd_n)] = \
plt.Circle(rand_pos_bd(), r_bd, color='w')
if stat_ext_bd is not None:
#n_ext_bd = np.max((0, int(np.random.randn()*stat_ext_bd['std'] + stat_ext_bd['mean'])))
n_ext_bd = np.random.randint(stat_ext_bd['mean']+1)
for ext_bd_n in range(n_ext_bd):
ext_bd_pos = np.random.rand(2)
circle_d["ext_bd{}".format(ext_bd_n)] = \
plt.Circle(ext_bd_pos, r_bd, color='w')
for k in circle_d.keys():
ax.add_artist(circle_d[k])
plt.axis('off')
data_a = fig2array(fig)
if disp:
print("Image array shape = ", data_a.shape)
if visible:
plt.show()
else:
if close_fig_flag:
plt.close()
else:
plt.cla()
return data_a
def gen_cell_db_center_cell(N=5, rand_pos_cell=False,
extra_bead_on=True,
max_bd=3,
flag_no_overlap_beads=False,
disp=False):
"""
db_l = gen_cell_db(N=5, rand_pos_cell=False, extra_bead_on=True, disp=False)
Generate cell_db
Inputs
======
max_bd, int, default=3
The number of the maximum beads attached to a cell.
"""
cellgen = CELL(flag_no_overlap_beads=flag_no_overlap_beads)
fig, ax = plt.subplots(figsize=(2, 2))
# ax.set_axis_bgcolor('red')
if extra_bead_on:
stat_ext_bd = {'mean': 5, 'std': 1}
else:
stat_ext_bd = None
db_l = []
for i in range(N):
if disp:
print(i, end=",")
# no_beads is circulated from 0 to max_bd-1
# Hence, gen_cell_no_beads should be prepared.
n_beads = i % max_bd
cellbd_img = cellgen.gen(bd_on=True,
rand_pos_cell=rand_pos_cell,
# max_bd is repeated from 0 to max_bd
max_bd=n_beads,
rand_bead_flag=False,
fig=fig, ax=ax,
stat_ext_bd=stat_ext_bd)
db_l.append(cellbd_img[:, :, 0]) # No RGB Info
plt.close(fig)
print("The end.")
return db_l
def save_cell_db_center_cell(db_l, max_bd, fname_gz="sheet.gz/cell_db.cvs.gz"):
"""
Each image include a cell at the center location and
the numbers of beads in a cell is equally distributed
circulately. That is, 0 beads, 1 beads, ..., max_bd are repeated
for all images.
"""
df_l = []
celltype = 0
for i, db in enumerate(db_l):
df_i = pd.DataFrame()
df_i["ID"] = [i] * np.prod(db.shape)
df_i["n_beads"] = [i % max_bd] * np.prod(db.shape)
df_i["x"] = np.repeat(np.arange(db.shape[0]), db.shape[1])
df_i["y"] = list(range(db.shape[1])) * db.shape[0]
df_i["image"] = db.reshape(-1)
celltype ^= 1
df_l.append(df_i)
cell_df = pd.concat(df_l, ignore_index=True)
cell_df.to_csv(fname_gz, index=False, compression='gzip')
return cell_df
def _gen_save_cell_db_r0(N=5, fname_gz="sheet.gz/cell_db.cvs.gz",
extra_bead_on=True, rand_pos_cell=False,
max_bd=3,
classification_mode="Cancer_Normal_Cell",
disp=False):
"""
- Image show without pausing is needed. (Oct 31, 2016)
Parameters
==========
rand_pos_cell, Default=False
If it is True, the position of cell is varied
Otherwise, the position is fixed to be the center (0,0).
max_bd, int, default=3
The number of the maximum beads attached to a cell.
classification_mode, string, default="Cancer_Normal"
if it is "Cancer_Normal_Cell", this function classifies cancer or normal.
If it is "Center_Cell", this fucntion classifies numer of beads in each cell.
In this case, the number of beads in cells are equaly distributed
from 0 to max_bd. For example, if N=100 & max_bd=4, 0-beads,
1-beads, 2-beads and 3-beads cell images are repeated 25 times.
"""
def save(save_fn, db_l, max_bd=None, fname_gz=None):
fname_gz_fold, fname_gz_file = os.path.split(fname_gz)
os.makedirs(fname_gz_fold, exist_ok=True)
if max_bd is None:
cell_df = save_fn(db_l, fname_gz=fname_gz)
else:
cell_df = save_fn(db_l, max_bd, fname_gz=fname_gz)
return cell_df
if classification_mode == "Cancer_Normal_Cell":
db_l = gen_cell_db(N, rand_pos_cell=rand_pos_cell,
extra_bead_on=extra_bead_on,
max_bd=max_bd,
# classification_mode=classification_mode,
disp=disp)
if disp:
print("Saving...")
cell_df = save(save_cell_db, db_l, fname_gz=fname_gz)
elif classification_mode == "Center_Cell":
assert int(N % max_bd) == 0, "N % max_bd should zero in the Center_Cell mode"
db_l = gen_cell_db_center_cell(N, rand_pos_cell=rand_pos_cell,
extra_bead_on=extra_bead_on,
max_bd=max_bd,
disp=disp)
if disp:
print("Saving...")
cell_df = save(save_cell_db_center_cell, db_l, max_bd,
fname_gz=fname_gz)
else:
raise ValueError("classification_mode = {} is not supported.".format(classification_mode))
return cell_df
def gen_save_cell_db(N=5, fname_gz="sheet.gz/cell_db.cvs.gz",
extra_bead_on=True, rand_pos_cell=False,
max_bd=3,
classification_mode="Cancer_Normal_Cell",
flag_no_overlap_beads=False,
disp=False):
"""
- Image show without pausing is needed. (Oct 31, 2016)
Parameters
==========
rand_pos_cell, Default=False
If it is True, the position of cell is varied
Otherwise, the position is fixed to be the center (0,0).
max_bd, int, default=3
The number of the maximum beads attached to a cell.
classification_mode, string, default="Cancer_Normal"
if it is "Cancer_Normal_Cell", this function classifies cancer or normal.
If it is "Center_Cell", this fucntion classifies numer of beads in each cell.
In this case, the number of beads in cells are equaly distributed
from 0 to max_bd. For example, if N=100 & max_bd=4, 0-beads,
1-beads, 2-beads and 3-beads cell images are repeated 25 times.
"""
def save(save_fn, db_l, max_bd=None, fname_gz=None):
fname_gz_fold, fname_gz_file = os.path.split(fname_gz)
os.makedirs(fname_gz_fold, exist_ok=True)
if max_bd is None:
cell_df = save_fn(db_l, fname_gz=fname_gz)
else:
cell_df = save_fn(db_l, max_bd, fname_gz=fname_gz)
return cell_df
if classification_mode == "Cancer_Normal_Cell":
db_l = gen_cell_db(N, rand_pos_cell=rand_pos_cell,
extra_bead_on=extra_bead_on,
max_bd=max_bd,
disp=disp)
if disp:
print("Saving...")
cell_df = save(save_cell_db, db_l, fname_gz=fname_gz)
elif classification_mode == "Center_Cell":
assert int(N % max_bd) == 0, "N % max_bd should zero in the Center_Cell mode"
db_l = gen_cell_db_center_cell(N, rand_pos_cell=rand_pos_cell,
extra_bead_on=extra_bead_on,
max_bd=max_bd,
flag_no_overlap_beads=flag_no_overlap_beads,
disp=disp)
if disp:
print("Saving...")
cell_df = save(save_cell_db_center_cell, db_l, max_bd,
fname_gz=fname_gz)
else:
raise ValueError("classification_mode = {} is not supported.".format(classification_mode))
return cell_df
def gen_save_cell_db_no_overlap(N_each, max_bd_p1 = 4, disp=2):
"""
N_each is the number of cell images for each bead case.
max_pd_p1 is max_pd puls 1.
"""
N = N_each * max_bd_p1
max_bd = max_bd_p1 - 1
fname_gz="sheet.gz/cell_db{0}_center_cell_{1}_nooverlap.cvs.gz".format(N, max_bd)
print("Output file name is", fname_gz)
return gen_save_cell_db(N = N, fname_gz=fname_gz,
rand_pos_cell=False, max_bd=max_bd_p1,
classification_mode="Center_Cell",
extra_bead_on=False,
flag_no_overlap_beads=True,
disp=disp)
class obj:
def __init__(self, r, L=144):
"""
The PS bead size is 6 um and silica bead is 5 um.
Lymphoma cell size varies in a larger variance,
but the mean value is around 9-12 um.
"""
# Initial values
self.Lx, self.Ly = L, L
self.downsamples = 4
self.d_um = 2.2 / self.downsamples
# Input and generated values
self.r = r
self.r_pixels_x = self.r * self.Lx
self.r_pixels_y = self.r * self.Ly
self.r_x_um = self.r_pixels_x * self.d_um
self.r_y_um = self.r_pixels_y * self.d_um
def get_h2d(nx, ny, l=700, z=0.5, dx=0.8, dy=0.8):
"""
1D Freznel Differaction Formulation
Input
=====
x, np.array
x position
z, np.array
hight
l, float
lambda
"""
k = 2.0 * np.pi / l
x_vec = (np.arange(1, nx+1).reshape(1, -1) - nx/2)*dx
x = np.dot(np.ones((nx, 1)), x_vec)
y_vec = (np.arange(1, ny+1).reshape(-1, 1) - ny/2)*dy
y = y_vec * np.ones((1, ny))
#k = 2.0 * np.pi / l
return np.exp(1j * k * z) / (1j * l * z) * np.exp((1j * k / (2 * z)) *
(np.power(x, 2) + np.power(y, 2)))
def get_h2d_inv(nx, ny, l=700, z=0.5, dx=0.8, dy=0.8):
"""
1D Freznel Differaction Formulation
Input
=====
x, np.array
x position
z, np.array
hight
l, float
lambda
"""
k = 2.0 * np.pi / l
x_vec = (np.arange(1, nx+1).reshape(1, -1) - nx/2)*dx
x = np.dot(np.ones((nx, 1)), x_vec)
y_vec = (np.arange(1, ny+1).reshape(-1, 1) - ny/2)*dy
y = y_vec * np.ones((1, ny))
#k = 2.0 * np.pi / l
return np.exp(-1j * k * z) / (1j * l * z) * np.exp((-1j * k / (2 * z)) *
(np.power(x, 2) + np.power(y, 2)))
# Freznel
def get_h(ny, nx, z_mm=0.5, dx_um=2.2, dy_um=2.2, l_nm=405):
"""
1D Freznel Differaction Formulation
Input
=====
x, np.array
x position
z, np.array
hight
l, float
lambda
"The PS bead size is 6 um and silica bead is 5 um.
Lymphoma cell size varies in a larger variance, but the mean value is around 9-12 um."
"""
# nano-meter to micro-meter transform (nm -> um)
l_um = l_nm / 1000
z_um = z_mm * 1000
x_vec_um = (np.arange(1, nx+1).reshape(1, -1) - nx/2)*dx_um
x_um = np.dot(np.ones((ny, 1)), x_vec_um)
y_vec_um = (np.arange(1, ny+1).reshape(-1, 1) - ny/2)*dy_um
y_um = y_vec_um * np.ones((1, nx))
return np.exp((1j * np.pi) / (l_um * z_um) *
(np.power(x_um, 2) + np.power(y_um, 2)))
def get_h_inv(ny, nx, z_mm=0.5, dx_um=2.2, dy_um=2.2, l_nm=405):
"""
1D Freznel Differaction Formulation
Input
=====
x, np.array
x position
z, np.array
hight
l, float
lambda
"The PS bead size is 6 um and silica bead is 5 um.
Lymphoma cell size varies in a larger variance, but the mean value is around 9-12 um."
"""
# nano-meter to micro-meter transform (nm -> um)
l_um = l_nm / 1000
z_um = z_mm * 1000
x_vec_um = (np.arange(1, nx+1).reshape(1, -1) - nx/2)*dx_um
x_um = np.dot(np.ones((ny, 1)), x_vec_um)
y_vec_um = (np.arange(1, ny+1).reshape(-1, 1) - ny/2)*dy_um
y_um = y_vec_um * np.ones((1, nx))
return np.exp((-1j * np.pi) / (l_um * z_um) *
(np.power(x_um, 2) + np.power(y_um, 2)))
def fd_conv(Img_xy, h2d, mode ='same'):
#return convolve2d(Img_xy, h2d, mode=mode)
return fftconvolve(Img_xy, h2d, mode=mode)
def cell_fd_info(cell_df):
Lx = cell_df['x'].max() + 1
Ly = cell_df['y'].max() + 1
Limg = cell_df['ID'].max() + 1
#print( Lx, Ly, Limg)
return Limg, Lx, Ly
def cell_fd_conv(cell_df, h144=None):
Limg, Lx, Ly = cell_fd_info(cell_df)
if h144 is None:
h144 = get_h2d(Lx, Ly, l=405, z=0.5, dx=2.2/4, dy=2.2/4)
cell_img_fd_l = []
for l in range(Limg):
cell_img = cell_df[cell_df["ID"] == l]["image"].values.reshape(Lx, Ly)
#cell_img_fd = fd_conv(cell_img, h144)
cell_img_fd = fftconvolve(cell_img, h144, mode='same')
cell_img_fd_l.append(cell_img_fd)
cell_img_fd_a = np.array(cell_img_fd_l)
#print( cell_img_fd_a.shape)
return cell_img_fd_a
def cell_fd_extention(fname_org='sheet.gz/cell_db.cvs.gz', camera_bit_resolution=14):
cell_df = pd.read_csv(fname_org)
Limg, Lx, Ly = cell_fd_info(cell_df)
cell_df_ext = cell_df.copy()
# Fresnel diffraction
cell_img_fd_a = cell_fd_conv(cell_df)
cell_df_ext['freznel image'] = cell_img_fd_a.reshape(-1)
# max_v, min_v = np.max(cell_df["image"]), np.min(cell_df["image"])
cell_img_fd_a_2d = cell_img_fd_a.reshape(Limg, -1)
cell_img_fd_a_2d_scale = preprocessing.minmax_scale(
np.abs(cell_img_fd_a_2d)) * (2**camera_bit_resolution)
cell_img_fd_a_2d_scale_200x144x144 = cell_img_fd_a_2d_scale.reshape(
Limg, Lx, Ly).astype(int)
cell_df_ext[
'mag freznel image'] = cell_img_fd_a_2d_scale_200x144x144.reshape(-1)
return cell_df_ext
def cell_fd_ext_save(fname_org='sheet.gz/cell_db100.cvs.gz',
fname_ext='sheet.gz/cell_fd_db100.cvs.gz'):
cell_df_ext = cell_fd_extention(fname_org)
# Save data
cell_df_ext.to_csv(fname_ext, index=False, compression='gzip')
return cell_df_ext
class CELL_FD_EXT():
def __init__(self, fname_org, h2d=None, h2d_inv=None):
cell_df = pd.read_csv(fname_org)
Limg, Lx, Ly = cell_fd_info(cell_df)
if h2d is None:
h2d = get_h(Ly, Lx, z_mm=0.5, dx_um=2.2/4, dy_um=2.2/4, l_nm=405)
if h2d_inv is None:
h2d_inv = get_h_inv(Ly, Lx, z_mm=0.5, dx_um=2.2/4, dy_um=2.2/4, l_nm=405)
self.fname_org = fname_org
self.h2d = h2d
self.h2d_inv = h2d_inv
def save(self):
fname_org = self.fname_org
fname_ext = fname_org[:-7] + '_fd' + fname_org[-7:]
print('fname_ext is', fname_ext)
cell_df_ext = self.extention()
# Save data
cell_df_ext.to_csv(fname_ext, index=False, compression='gzip')
return cell_df_ext
def extention(self, camera_bit_resolution=14):
fname_org = self.fname_org
h2d = self.h2d
cell_df = pd.read_csv(fname_org)
Limg, Lx, Ly = cell_fd_info(cell_df)
cell_df_ext = cell_df.copy()
# Fresnel diffraction
cell_img_fd_a = cell_fd_conv(cell_df, h2d)
cell_df_ext['freznel image'] = cell_img_fd_a.reshape(-1)
# max_v, min_v = np.max(cell_df["image"]), np.min(cell_df["image"])
cell_img_fd_a_2d = cell_img_fd_a.reshape(Limg, -1)
cell_img_fd_a_2d_scale = preprocessing.minmax_scale(
np.abs(cell_img_fd_a_2d)) * (2**camera_bit_resolution)
cell_img_fd_a_2d_scale_200x144x144 = cell_img_fd_a_2d_scale.reshape(
Limg, Lx, Ly).astype(int)
cell_df_ext[
'mag freznel image'] = cell_img_fd_a_2d_scale_200x144x144.reshape(-1)
return cell_df_ext
#Deep Learning
def run_dl_mgh_params_1cl_do(X, y, Lx, Ly, nb_epoch=5000,
batch_size = 128,
nb_classes = 2):
# input image dimensions
img_rows, img_cols = Lx, Ly
# number of convolutional filters to use
nb_filters = 8
# size of pooling area for max pooling
pool_size = (50, 50)
# convolution kernel size
kernel_size = (20, 20)
# the data, shuffled and split between train and test sets
X_train, X_test, y_train, y_test = model_selection.train_test_split(X, y, test_size=0.2, random_state=0)
if K.image_dim_ordering() == 'th':
X_train = X_train.reshape(X_train.shape[0], 1, img_rows, img_cols)
X_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 1)
X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
model = Sequential()
model.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1],
border_mode='valid',
input_shape=input_shape))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=pool_size))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(4))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adadelta',
metrics=['accuracy'])
earlyStopping = callbacks.EarlyStopping(monitor='val_loss', patience=3, verbose=1, mode='auto')
history = model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch,
verbose=0, validation_data=(X_test, Y_test))
score = model.evaluate(X_test, Y_test, verbose=0)
print('Test score:', score[0])
print('Test accuracy:', score[1])
kkeras.plot_acc(history)
plt.show()
kkeras.plot_loss(history)
# Deep Learning
def run_dl_mgh_params_1cl_bn(X, y, Lx, Ly, nb_epoch=5000,
batch_size=128,
nb_classes=2):
# input image dimensions
img_rows, img_cols = Lx, Ly
# number of convolutional filters to use
nb_filters = 8
# size of pooling area for max pooling
pool_size = (50, 50)
# convolution kernel size
kernel_size = (20, 20)
# the data, shuffled and split between train and test sets
X_train, X_test, y_train, y_test = model_selection.train_test_split(X, y, test_size=0.2, random_state=0)
if K.image_dim_ordering() == 'th':
X_train = X_train.reshape(X_train.shape[0], 1, img_rows, img_cols)
X_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 1)
X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
model = Sequential()
model.add(Conv2D(nb_filters, kernel_size,
padding='valid',
input_shape=input_shape))
model.add(BatchNormalization())
# model.add(Activation('relu'))
model.add(Activation('tanh'))
model.add(MaxPooling2D(pool_size=pool_size))
#model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(4))
model.add(BatchNormalization())
model.add(Activation('tanh'))
#model.add(Activation('relu'))
#model.add(Dropout(0.5))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adadelta',
metrics=['accuracy'])
# earlyStopping=callbacks.EarlyStopping(monitor='val_loss', patience=3, verbose=1, mode='auto')
history = model.fit(X_train, Y_train, batch_size=batch_size, epochs=nb_epoch,
verbose=0, validation_data=(X_test, Y_test)) #, callbacks=[earlyStopping])
score = model.evaluate(X_test, Y_test, verbose=0)
Y_test_pred = model.predict(X_test, verbose=0)
print('Confusion metrix')
# y_test_pred = np_utils.categorical_probas_to_classes(Y_test_pred)
y_test_pred = np.argmax(Y_test_pred, axis=1)
print(metrics.confusion_matrix(y_test, y_test_pred))
print('Test score:', score[0])
print('Test accuracy:', score[1])
kkeras.plot_acc(history)
plt.show()
kkeras.plot_loss(history)
run_dl_mgh_params = run_dl_mgh_params_1cl_bn
def run_dl_mgh_params_1cl_bn_do(X, y, Lx, Ly, nb_epoch=5000,
batch_size = 128,
nb_classes = 2):
# input image dimensions
img_rows, img_cols = Lx, Ly
# number of convolutional filters to use
nb_filters = 8
# size of pooling area for max pooling
pool_size = (50, 50)
# convolution kernel size
kernel_size = (20, 20)
# the data, shuffled and split between train and test sets
X_train, X_test, y_train, y_test = model_selection.train_test_split(X,y, test_size=0.2, random_state=0)
if K.image_dim_ordering() == 'th':
X_train = X_train.reshape(X_train.shape[0], 1, img_rows, img_cols)
X_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 1)
X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
model = Sequential()
model.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1],
border_mode='valid',
input_shape=input_shape))
model.add(BatchNormalization())
# model.add(Activation('relu'))
model.add(Activation('tanh'))
model.add(MaxPooling2D(pool_size=pool_size))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(4))
model.add(BatchNormalization())
model.add(Activation('tanh'))
#model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adadelta',
metrics=['accuracy'])
# earlyStopping=callbacks.EarlyStopping(monitor='val_loss', patience=3, verbose=1, mode='auto')
history = model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch,
verbose=0, validation_data=(X_test, Y_test)) #, callbacks=[earlyStopping])
score = model.evaluate(X_test, Y_test, verbose=0)
Y_test_pred = model.predict(X_test, verbose=0)
print('Confusion metrix')
# y_test_pred = np_utils.categorical_probas_to_classes(Y_test_pred)
y_test_pred = np.argmax(Y_test_pred, axis=1)
print(metrics.confusion_matrix(y_test, y_test_pred))
print('Test score:', score[0])
print('Test accuracy:', score[1])
kkeras.plot_acc( history)
plt.show()
kkeras.plot_loss( history)
def run_dl_mgh_params_2cl_bn(X, y, Lx, Ly, nb_epoch=5000,
batch_size = 128,
nb_classes = 2):
# input image dimensions
img_rows, img_cols = Lx, Ly
# number of convolutional filters to use
nb_filters = 8
# size of pooling area for max pooling
pool_size = (10, 10)
# convolution kernel size
kernel_size = (20, 20)
# the data, shuffled and split between train and test sets
X_train, X_test, y_train, y_test = model_selection.train_test_split(X,y, test_size=0.2, random_state=0)
if K.image_dim_ordering() == 'th':
X_train = X_train.reshape(X_train.shape[0], 1, img_rows, img_cols)
X_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 1)
X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
model = Sequential()
model.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1],
border_mode='valid',
input_shape=input_shape))
model.add(BatchNormalization())
# model.add(Activation('relu'))
model.add(Activation('tanh'))
model.add(MaxPooling2D(pool_size=pool_size))
#model.add(Dropout(0.25))
model.add(Convolution2D(5, 5, 5, border_mode='valid'))
model.add(BatchNormalization())
model.add(Activation('tanh'))
model.add(MaxPooling2D(pool_size=(5,5)))
model.add(Flatten())
model.add(Dense(4))
model.add(BatchNormalization())
model.add(Activation('tanh'))
#model.add(Activation('relu'))
#model.add(Dropout(0.5))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adadelta',
metrics=['accuracy'])
# earlyStopping=callbacks.EarlyStopping(monitor='val_loss', patience=3, verbose=1, mode='auto')
history = model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch,
verbose=0, validation_data=(X_test, Y_test)) #, callbacks=[earlyStopping])
score = model.evaluate(X_test, Y_test, verbose=0)
Y_test_pred = model.predict(X_test, verbose=0)
print('Confusion metrix')
# y_test_pred = np_utils.categorical_probas_to_classes(Y_test_pred)
y_test_pred = np.argmax(Y_test_pred, axis=1)
print(metrics.confusion_matrix(y_test, y_test_pred))
print('Test score:', score[0])
print('Test accuracy:', score[1])
kkeras.plot_acc( history)
plt.show()
kkeras.plot_loss( history)
# function name alias
run_dl_mgh_params_2cl = run_dl_mgh_params_2cl_bn
def run_dl_mgh_params_2cl_bn_do(X, y, Lx, Ly, nb_epoch=5000,
batch_size = 128,
nb_classes = 2):
"""
Dropout is also included after batchnormalization to protect
overfitting.
"""
# input image dimensions
img_rows, img_cols = Lx, Ly
# number of convolutional filters to use
nb_filters = 8
# size of pooling area for max pooling
pool_size = (10, 10)
# convolution kernel size
kernel_size = (20, 20)
# the data, shuffled and split between train and test sets
X_train, X_test, y_train, y_test = model_selection.train_test_split(X,y, test_size=0.2, random_state=0)
if K.image_dim_ordering() == 'th':
X_train = X_train.reshape(X_train.shape[0], 1, img_rows, img_cols)
X_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 1)
X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
model = Sequential()
model.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1],
border_mode='valid',
input_shape=input_shape))
model.add(BatchNormalization())
# model.add(Activation('relu'))
model.add(Activation('tanh'))
model.add(MaxPooling2D(pool_size=pool_size))
model.add(Dropout(0.1))
model.add(Convolution2D(5, 5, 5, border_mode='valid'))
model.add(BatchNormalization())
model.add(Activation('tanh'))
model.add(MaxPooling2D(pool_size=(5,5)))
model.add(Dropout(0.1))
model.add(Flatten())
model.add(Dense(4))
model.add(BatchNormalization())
model.add(Activation('tanh'))
#model.add(Activation('relu'))
model.add(Dropout(0.1))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adadelta',
metrics=['accuracy'])
# earlyStopping=callbacks.EarlyStopping(monitor='val_loss', patience=3, verbose=1, mode='auto')
history = model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch,
verbose=0, validation_data=(X_test, Y_test)) #, callbacks=[earlyStopping])
score = model.evaluate(X_test, Y_test, verbose=0)
Y_test_pred = model.predict(X_test, verbose=0)
print('Confusion metrix')
# y_test_pred = np_utils.categorical_probas_to_classes(Y_test_pred)
y_test_pred = np.argmax(Y_test_pred, axis=1)
print(metrics.confusion_matrix(y_test, y_test_pred))
print('Test score:', score[0])
print('Test accuracy:', score[1])
kkeras.plot_acc( history)
plt.show()
kkeras.plot_loss( history)
run_dl_mgh_params_2cl_do = run_dl_mgh_params_2cl_bn_do
"""
Fresenel Diffraction with a new approach
"""
def f(x_um, y_um, z_mm=0.5, l_nm=405):
return np.exp(1j * np.pi * (np.power(x_um, 2) + np.power(y_um,2)) / (l_nm * z_mm))
def cimshow(f_impulse):
plt.figure(figsize=(7,5))
plt.subplot(2,2,1)
plt.imshow(np.real(f_impulse))
plt.colorbar()
plt.title('Re{}')
plt.subplot(2,2,2)
plt.imshow(np.imag(f_impulse))
plt.colorbar()
plt.title('Img{}')
plt.subplot(2,2,2+1)
plt.imshow(np.abs(f_impulse))
plt.colorbar()
plt.title('Magnitude')
plt.subplot(2,2,2+2)
plt.imshow(np.angle(f_impulse))
plt.colorbar()
plt.title('Phase')
def xy(MAX_x_um = 55, pixel_um=2.2, oversample_rate=4):
N = int(MAX_x_um / (pixel_um / oversample_rate))
x = np.dot(np.ones((N,1)), np.linspace(-MAX_x_um,MAX_x_um,N).reshape(1,-1))
y = np.dot(np.linspace(-MAX_x_um,MAX_x_um,N).reshape(-1,1), np.ones((1,N)))
return x, y
def u(x, y, alpha):
out = np.zeros_like(x)
out[(y>=-alpha/2)&(y<=alpha/2)&(x>=-alpha/2)&(x<=alpha/2)] = 1.0
return out
def u_circle(x,y,radius):
xy2 = np.power(x,2) + np.power(y,2)
# Since x is already matrix for griding, out shape is copied just from x.
# If x is a vector, the shape of out should be redefined to have 2-D form.
out = np.zeros_like(x)
out[xy2<=np.power(radius,2)] = 1.0
return out
# Code for generation H in frequency domain: H <--> h
# Gbp(n,m) = exp(1i*k*Dz*sqrt(1-lambda^2*fx(n,m)^2-lambda^2*fy(n,m)^2))
class GenG():
def upsampling(self, Pow2factor, dx1):
"""
Utility codes
"""
dx2 = dx1 / (2**Pow2factor)
return dx2
def __init__(self, NxNy=(144, 144), Dz_mm=0.5, delta_um = 2.2, UpsampleFactor=2, lambda_nm=405):
"""
oversample=2^UpsampleFactor
"""
delta_m = delta_um * 1e-6
delta2_m = self.upsampling(UpsampleFactor, delta_m)
Nx, Ny = NxNy
dfx = 1/(Nx*delta2_m)
dfy = 1/(Ny*delta2_m)
x = np.arange(-Ny/2, Ny/2)*dfy
y = np.arange(-Nx/2, Nx/2)*dfx
self.xv, self.yv = np.meshgrid(x, y)
self.lambda_m = lambda_nm * 1e-9
self.k_rad = 2*np.pi/self.lambda_m
self.Dz_m = Dz_mm * 1e-3
def bp(self):
x, y = self.xv, self.yv
l = self.lambda_m
k = self.k_rad
Dz = self.Dz_m
return np.exp(1j * k * Dz * np.sqrt(1-np.power(l*x,2)-np.power(l*y,2)))
def fp(self):
x, y = self.xv, self.yv
l = self.lambda_m
k = self.k_rad
Dz = self.Dz_m
return np.exp(-1j * k * Dz * np.sqrt(1-np.power(l*x,2)-np.power(l*y,2))) | mit |
GuyAllard/markov_clustering | markov_clustering/mcl.py | 1 | 7864 | import numpy as np
from scipy.sparse import isspmatrix, dok_matrix, csc_matrix
import sklearn.preprocessing
from .utils import MessagePrinter
def sparse_allclose(a, b, rtol=1e-5, atol=1e-8):
"""
Version of np.allclose for use with sparse matrices
"""
c = np.abs(a - b) - rtol * np.abs(b)
# noinspection PyUnresolvedReferences
return c.max() <= atol
def normalize(matrix):
"""
Normalize the columns of the given matrix
:param matrix: The matrix to be normalized
:returns: The normalized matrix
"""
return sklearn.preprocessing.normalize(matrix, norm="l1", axis=0)
def inflate(matrix, power):
"""
Apply cluster inflation to the given matrix by raising
each element to the given power.
:param matrix: The matrix to be inflated
:param power: Cluster inflation parameter
:returns: The inflated matrix
"""
if isspmatrix(matrix):
return normalize(matrix.power(power))
return normalize(np.power(matrix, power))
def expand(matrix, power):
"""
Apply cluster expansion to the given matrix by raising
the matrix to the given power.
:param matrix: The matrix to be expanded
:param power: Cluster expansion parameter
:returns: The expanded matrix
"""
if isspmatrix(matrix):
return matrix ** power
return np.linalg.matrix_power(matrix, power)
def add_self_loops(matrix, loop_value):
"""
Add self-loops to the matrix by setting the diagonal
to loop_value
:param matrix: The matrix to add loops to
:param loop_value: Value to use for self-loops
:returns: The matrix with self-loops
"""
shape = matrix.shape
assert shape[0] == shape[1], "Error, matrix is not square"
if isspmatrix(matrix):
new_matrix = matrix.todok()
else:
new_matrix = matrix.copy()
for i in range(shape[0]):
new_matrix[i, i] = loop_value
if isspmatrix(matrix):
return new_matrix.tocsc()
return new_matrix
def prune(matrix, threshold):
"""
Prune the matrix so that very small edges are removed.
The maximum value in each column is never pruned.
:param matrix: The matrix to be pruned
:param threshold: The value below which edges will be removed
:returns: The pruned matrix
"""
if isspmatrix(matrix):
pruned = dok_matrix(matrix.shape)
pruned[matrix >= threshold] = matrix[matrix >= threshold]
pruned = pruned.tocsc()
else:
pruned = matrix.copy()
pruned[pruned < threshold] = 0
# keep max value in each column. same behaviour for dense/sparse
num_cols = matrix.shape[1]
row_indices = matrix.argmax(axis=0).reshape((num_cols,))
col_indices = np.arange(num_cols)
pruned[row_indices, col_indices] = matrix[row_indices, col_indices]
return pruned
def converged(matrix1, matrix2):
"""
Check for convergence by determining if
matrix1 and matrix2 are approximately equal.
:param matrix1: The matrix to compare with matrix2
:param matrix2: The matrix to compare with matrix1
:returns: True if matrix1 and matrix2 approximately equal
"""
if isspmatrix(matrix1) or isspmatrix(matrix2):
return sparse_allclose(matrix1, matrix2)
return np.allclose(matrix1, matrix2)
def iterate(matrix, expansion, inflation):
"""
Run a single iteration (expansion + inflation) of the mcl algorithm
:param matrix: The matrix to perform the iteration on
:param expansion: Cluster expansion factor
:param inflation: Cluster inflation factor
"""
# Expansion
matrix = expand(matrix, expansion)
# Inflation
matrix = inflate(matrix, inflation)
return matrix
def get_clusters(matrix):
"""
Retrieve the clusters from the matrix
:param matrix: The matrix produced by the MCL algorithm
:returns: A list of tuples where each tuple represents a cluster and
contains the indices of the nodes belonging to the cluster
"""
if not isspmatrix(matrix):
# cast to sparse so that we don't need to handle different
# matrix types
matrix = csc_matrix(matrix)
# get the attractors - non-zero elements of the matrix diagonal
attractors = matrix.diagonal().nonzero()[0]
# somewhere to put the clusters
clusters = set()
# the nodes in the same row as each attractor form a cluster
for attractor in attractors:
cluster = tuple(matrix.getrow(attractor).nonzero()[1].tolist())
clusters.add(cluster)
return sorted(list(clusters))
def run_mcl(matrix, expansion=2, inflation=2, loop_value=1,
iterations=100, pruning_threshold=0.001, pruning_frequency=1,
convergence_check_frequency=1, verbose=False):
"""
Perform MCL on the given similarity matrix
:param matrix: The similarity matrix to cluster
:param expansion: The cluster expansion factor
:param inflation: The cluster inflation factor
:param loop_value: Initialization value for self-loops
:param iterations: Maximum number of iterations
(actual number of iterations will be less if convergence is reached)
:param pruning_threshold: Threshold below which matrix elements will be set
set to 0
:param pruning_frequency: Perform pruning every 'pruning_frequency'
iterations.
:param convergence_check_frequency: Perform the check for convergence
every convergence_check_frequency iterations
:param verbose: Print extra information to the console
:returns: The final matrix
"""
assert expansion > 1, "Invalid expansion parameter"
assert inflation > 1, "Invalid inflation parameter"
assert loop_value >= 0, "Invalid loop_value"
assert iterations > 0, "Invalid number of iterations"
assert pruning_threshold >= 0, "Invalid pruning_threshold"
assert pruning_frequency > 0, "Invalid pruning_frequency"
assert convergence_check_frequency > 0, "Invalid convergence_check_frequency"
printer = MessagePrinter(verbose)
printer.print("-" * 50)
printer.print("MCL Parameters")
printer.print("Expansion: {}".format(expansion))
printer.print("Inflation: {}".format(inflation))
if pruning_threshold > 0:
printer.print("Pruning threshold: {}, frequency: {} iteration{}".format(
pruning_threshold, pruning_frequency, "s" if pruning_frequency > 1 else ""))
else:
printer.print("No pruning")
printer.print("Convergence check: {} iteration{}".format(
convergence_check_frequency, "s" if convergence_check_frequency > 1 else ""))
printer.print("Maximum iterations: {}".format(iterations))
printer.print("{} matrix mode".format("Sparse" if isspmatrix(matrix) else "Dense"))
printer.print("-" * 50)
# Initialize self-loops
if loop_value > 0:
matrix = add_self_loops(matrix, loop_value)
# Normalize
matrix = normalize(matrix)
# iterations
for i in range(iterations):
printer.print("Iteration {}".format(i + 1))
# store current matrix for convergence checking
last_mat = matrix.copy()
# perform MCL expansion and inflation
matrix = iterate(matrix, expansion, inflation)
# prune
if pruning_threshold > 0 and i % pruning_frequency == pruning_frequency - 1:
printer.print("Pruning")
matrix = prune(matrix, pruning_threshold)
# Check for convergence
if i % convergence_check_frequency == convergence_check_frequency - 1:
printer.print("Checking for convergence")
if converged(matrix, last_mat):
printer.print("Converged after {} iteration{}".format(i + 1, "s" if i > 0 else ""))
break
printer.print("-" * 50)
return matrix
| mit |
RobertABT/heightmap | build/matplotlib/lib/matplotlib/dates.py | 4 | 42875 | #!/usr/bin/env python
"""
Matplotlib provides sophisticated date plotting capabilities, standing on the
shoulders of python :mod:`datetime`, the add-on modules :mod:`pytz` and
:mod:`dateutils`. :class:`datetime` objects are converted to floating point
numbers which represent time in days since 0001-01-01 UTC, plus 1. For
example, 0001-01-01, 06:00 is 1.25, not 0.25. The helper functions
:func:`date2num`, :func:`num2date` and :func:`drange` are used to facilitate
easy conversion to and from :mod:`datetime` and numeric ranges.
.. note::
Like Python's datetime, mpl uses the Gregorian calendar for all
conversions between dates and floating point numbers. This practice
is not universal, and calendar differences can cause confusing
differences between what Python and mpl give as the number of days
since 0001-01-01 and what other software and databases yield. For
example, the US Naval Observatory uses a calendar that switches
from Julian to Gregorian in October, 1582. Hence, using their
calculator, the number of days between 0001-01-01 and 2006-04-01 is
732403, whereas using the Gregorian calendar via the datetime
module we find::
In [31]:date(2006,4,1).toordinal() - date(1,1,1).toordinal()
Out[31]:732401
A wide range of specific and general purpose date tick locators and
formatters are provided in this module. See
:mod:`matplotlib.ticker` for general information on tick locators
and formatters. These are described below.
All the matplotlib date converters, tickers and formatters are
timezone aware, and the default timezone is given by the timezone
parameter in your :file:`matplotlibrc` file. If you leave out a
:class:`tz` timezone instance, the default from your rc file will be
assumed. If you want to use a custom time zone, pass a
:class:`pytz.timezone` instance with the tz keyword argument to
:func:`num2date`, :func:`plot_date`, and any custom date tickers or
locators you create. See `pytz <http://pytz.sourceforge.net>`_ for
information on :mod:`pytz` and timezone handling.
The `dateutil module <http://labix.org/python-dateutil>`_ provides
additional code to handle date ticking, making it easy to place ticks
on any kinds of dates. See examples below.
Date tickers
------------
Most of the date tickers can locate single or multiple values. For
example::
# tick on mondays every week
loc = WeekdayLocator(byweekday=MO, tz=tz)
# tick on mondays and saturdays
loc = WeekdayLocator(byweekday=(MO, SA))
In addition, most of the constructors take an interval argument::
# tick on mondays every second week
loc = WeekdayLocator(byweekday=MO, interval=2)
The rrule locator allows completely general date ticking::
# tick every 5th easter
rule = rrulewrapper(YEARLY, byeaster=1, interval=5)
loc = RRuleLocator(rule)
Here are all the date tickers:
* :class:`MinuteLocator`: locate minutes
* :class:`HourLocator`: locate hours
* :class:`DayLocator`: locate specifed days of the month
* :class:`WeekdayLocator`: Locate days of the week, eg MO, TU
* :class:`MonthLocator`: locate months, eg 7 for july
* :class:`YearLocator`: locate years that are multiples of base
* :class:`RRuleLocator`: locate using a
:class:`matplotlib.dates.rrulewrapper`. The
:class:`rrulewrapper` is a simple wrapper around a
:class:`dateutil.rrule` (`dateutil
<http://labix.org/python-dateutil>`_) which allow almost
arbitrary date tick specifications. See `rrule example
<../examples/pylab_examples/date_demo_rrule.html>`_.
* :class:`AutoDateLocator`: On autoscale, this class picks the best
:class:`MultipleDateLocator` to set the view limits and the tick
locations.
Date formatters
---------------
Here all all the date formatters:
* :class:`AutoDateFormatter`: attempts to figure out the best format
to use. This is most useful when used with the :class:`AutoDateLocator`.
* :class:`DateFormatter`: use :func:`strftime` format strings
* :class:`IndexDateFormatter`: date plots with implicit *x*
indexing.
"""
from __future__ import print_function
import re
import time
import math
import datetime
from itertools import izip
import warnings
from dateutil.rrule import (rrule, MO, TU, WE, TH, FR, SA, SU, YEARLY,
MONTHLY, WEEKLY, DAILY, HOURLY, MINUTELY,
SECONDLY)
from dateutil.relativedelta import relativedelta
import dateutil.parser
import numpy as np
import matplotlib
import matplotlib.units as units
import matplotlib.cbook as cbook
import matplotlib.ticker as ticker
__all__ = ('date2num', 'num2date', 'drange', 'epoch2num',
'num2epoch', 'mx2num', 'DateFormatter',
'IndexDateFormatter', 'AutoDateFormatter', 'DateLocator',
'RRuleLocator', 'AutoDateLocator', 'YearLocator',
'MonthLocator', 'WeekdayLocator',
'DayLocator', 'HourLocator', 'MinuteLocator',
'SecondLocator', 'MicrosecondLocator',
'rrule', 'MO', 'TU', 'WE', 'TH', 'FR', 'SA', 'SU',
'YEARLY', 'MONTHLY', 'WEEKLY', 'DAILY',
'HOURLY', 'MINUTELY', 'SECONDLY', 'MICROSECONDLY', 'relativedelta',
'seconds', 'minutes', 'hours', 'weeks')
# Make a simple UTC instance so we don't always have to import
# pytz. From the python datetime library docs:
class _UTC(datetime.tzinfo):
"""UTC"""
def utcoffset(self, dt):
return datetime.timedelta(0)
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return datetime.timedelta(0)
UTC = _UTC()
def _get_rc_timezone():
s = matplotlib.rcParams['timezone']
if s == 'UTC':
return UTC
import pytz
return pytz.timezone(s)
MICROSECONDLY = SECONDLY + 1
HOURS_PER_DAY = 24.
MINUTES_PER_DAY = 60. * HOURS_PER_DAY
SECONDS_PER_DAY = 60. * MINUTES_PER_DAY
MUSECONDS_PER_DAY = 1e6 * SECONDS_PER_DAY
SEC_PER_MIN = 60
SEC_PER_HOUR = 3600
SEC_PER_DAY = SEC_PER_HOUR * 24
SEC_PER_WEEK = SEC_PER_DAY * 7
MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY = (
MO, TU, WE, TH, FR, SA, SU)
WEEKDAYS = (MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY)
def _to_ordinalf(dt):
"""
Convert :mod:`datetime` to the Gregorian date as UTC float days,
preserving hours, minutes, seconds and microseconds. Return value
is a :func:`float`.
"""
if hasattr(dt, 'tzinfo') and dt.tzinfo is not None:
delta = dt.tzinfo.utcoffset(dt)
if delta is not None:
dt -= delta
base = float(dt.toordinal())
if hasattr(dt, 'hour'):
base += (dt.hour / HOURS_PER_DAY + dt.minute / MINUTES_PER_DAY +
dt.second / SECONDS_PER_DAY +
dt.microsecond / MUSECONDS_PER_DAY
)
return base
def _from_ordinalf(x, tz=None):
"""
Convert Gregorian float of the date, preserving hours, minutes,
seconds and microseconds. Return value is a :class:`datetime`.
"""
if tz is None:
tz = _get_rc_timezone()
ix = int(x)
dt = datetime.datetime.fromordinal(ix)
remainder = float(x) - ix
hour, remainder = divmod(24 * remainder, 1)
minute, remainder = divmod(60 * remainder, 1)
second, remainder = divmod(60 * remainder, 1)
microsecond = int(1e6 * remainder)
if microsecond < 10:
microsecond = 0 # compensate for rounding errors
dt = datetime.datetime(
dt.year, dt.month, dt.day, int(hour), int(minute), int(second),
microsecond, tzinfo=UTC).astimezone(tz)
if microsecond > 999990: # compensate for rounding errors
dt += datetime.timedelta(microseconds=1e6 - microsecond)
return dt
class strpdate2num:
"""
Use this class to parse date strings to matplotlib datenums when
you know the date format string of the date you are parsing. See
:file:`examples/load_demo.py`.
"""
def __init__(self, fmt):
""" fmt: any valid strptime format is supported """
self.fmt = fmt
def __call__(self, s):
"""s : string to be converted
return value: a date2num float
"""
return date2num(datetime.datetime(*time.strptime(s, self.fmt)[:6]))
def datestr2num(d):
"""
Convert a date string to a datenum using
:func:`dateutil.parser.parse`. *d* can be a single string or a
sequence of strings.
"""
if cbook.is_string_like(d):
dt = dateutil.parser.parse(d)
return date2num(dt)
else:
return date2num([dateutil.parser.parse(s) for s in d])
def date2num(d):
"""
*d* is either a :class:`datetime` instance or a sequence of datetimes.
Return value is a floating point number (or sequence of floats)
which gives the number of days (fraction part represents hours,
minutes, seconds) since 0001-01-01 00:00:00 UTC, *plus* *one*.
The addition of one here is a historical artifact. Also, note
that the Gregorian calendar is assumed; this is not universal
practice. For details, see the module docstring.
"""
if not cbook.iterable(d):
return _to_ordinalf(d)
else:
return np.asarray([_to_ordinalf(val) for val in d])
def julian2num(j):
'Convert a Julian date (or sequence) to a matplotlib date (or sequence).'
if cbook.iterable(j):
j = np.asarray(j)
return j - 1721424.5
def num2julian(n):
'Convert a matplotlib date (or sequence) to a Julian date (or sequence).'
if cbook.iterable(n):
n = np.asarray(n)
return n + 1721424.5
def num2date(x, tz=None):
"""
*x* is a float value which gives the number of days
(fraction part represents hours, minutes, seconds) since
0001-01-01 00:00:00 UTC *plus* *one*.
The addition of one here is a historical artifact. Also, note
that the Gregorian calendar is assumed; this is not universal
practice. For details, see the module docstring.
Return value is a :class:`datetime` instance in timezone *tz* (default to
rcparams TZ value).
If *x* is a sequence, a sequence of :class:`datetime` objects will
be returned.
"""
if tz is None:
tz = _get_rc_timezone()
if not cbook.iterable(x):
return _from_ordinalf(x, tz)
else:
return [_from_ordinalf(val, tz) for val in x]
def drange(dstart, dend, delta):
"""
Return a date range as float Gregorian ordinals. *dstart* and
*dend* are :class:`datetime` instances. *delta* is a
:class:`datetime.timedelta` instance.
"""
step = (delta.days + delta.seconds / SECONDS_PER_DAY +
delta.microseconds / MUSECONDS_PER_DAY)
f1 = _to_ordinalf(dstart)
f2 = _to_ordinalf(dend)
# calculate the difference between dend and dstart in times of delta
num = int(np.ceil((f2 - f1) / step))
# calculate end of the interval which will be generated
dinterval_end = dstart + num * delta
# ensure, that an half open interval will be generated [dstart, dend)
if dinterval_end >= dend:
# if the endpoint is greated than dend, just subtract one delta
dinterval_end -= delta
num -= 1
f2 = _to_ordinalf(dinterval_end) # new float-endpoint
return np.linspace(f1, f2, num + 1)
### date tickers and formatters ###
class DateFormatter(ticker.Formatter):
"""
Tick location is seconds since the epoch. Use a :func:`strftime`
format string.
Python only supports :mod:`datetime` :func:`strftime` formatting
for years greater than 1900. Thanks to Andrew Dalke, Dalke
Scientific Software who contributed the :func:`strftime` code
below to include dates earlier than this year.
"""
illegal_s = re.compile(r"((^|[^%])(%%)*%s)")
def __init__(self, fmt, tz=None):
"""
*fmt* is an :func:`strftime` format string; *tz* is the
:class:`tzinfo` instance.
"""
if tz is None:
tz = _get_rc_timezone()
self.fmt = fmt
self.tz = tz
def __call__(self, x, pos=0):
if x == 0:
raise ValueError('DateFormatter found a value of x=0, which is '
'an illegal date. This usually occurs because '
'you have not informed the axis that it is '
'plotting dates, eg with ax.xaxis_date()')
dt = num2date(x, self.tz)
return self.strftime(dt, self.fmt)
def set_tzinfo(self, tz):
self.tz = tz
def _findall(self, text, substr):
# Also finds overlaps
sites = []
i = 0
while 1:
j = text.find(substr, i)
if j == -1:
break
sites.append(j)
i = j + 1
return sites
# Dalke: I hope I did this math right. Every 28 years the
# calendar repeats, except through century leap years excepting
# the 400 year leap years. But only if you're using the Gregorian
# calendar.
def strftime(self, dt, fmt):
fmt = self.illegal_s.sub(r"\1", fmt)
fmt = fmt.replace("%s", "s")
if dt.year > 1900:
return cbook.unicode_safe(dt.strftime(fmt))
year = dt.year
# For every non-leap year century, advance by
# 6 years to get into the 28-year repeat cycle
delta = 2000 - year
off = 6 * (delta // 100 + delta // 400)
year = year + off
# Move to around the year 2000
year = year + ((2000 - year) // 28) * 28
timetuple = dt.timetuple()
s1 = time.strftime(fmt, (year,) + timetuple[1:])
sites1 = self._findall(s1, str(year))
s2 = time.strftime(fmt, (year + 28,) + timetuple[1:])
sites2 = self._findall(s2, str(year + 28))
sites = []
for site in sites1:
if site in sites2:
sites.append(site)
s = s1
syear = "%4d" % (dt.year,)
for site in sites:
s = s[:site] + syear + s[site + 4:]
return cbook.unicode_safe(s)
class IndexDateFormatter(ticker.Formatter):
"""
Use with :class:`~matplotlib.ticker.IndexLocator` to cycle format
strings by index.
"""
def __init__(self, t, fmt, tz=None):
"""
*t* is a sequence of dates (floating point days). *fmt* is a
:func:`strftime` format string.
"""
if tz is None:
tz = _get_rc_timezone()
self.t = t
self.fmt = fmt
self.tz = tz
def __call__(self, x, pos=0):
'Return the label for time *x* at position *pos*'
ind = int(round(x))
if ind >= len(self.t) or ind <= 0:
return ''
dt = num2date(self.t[ind], self.tz)
return cbook.unicode_safe(dt.strftime(self.fmt))
class AutoDateFormatter(ticker.Formatter):
"""
This class attempts to figure out the best format to use. This is
most useful when used with the :class:`AutoDateLocator`.
The AutoDateFormatter has a scale dictionary that maps the scale
of the tick (the distance in days between one major tick) and a
format string. The default looks like this::
self.scaled = {
365.0 : '%Y',
30. : '%b %Y',
1.0 : '%b %d %Y',
1./24. : '%H:%M:%D',
1. / (24. * 60.): '%H:%M:%S.%f',
}
The algorithm picks the key in the dictionary that is >= the
current scale and uses that format string. You can customize this
dictionary by doing::
formatter = AutoDateFormatter()
formatter.scaled[1/(24.*60.)] = '%M:%S' # only show min and sec
"""
# This can be improved by providing some user-level direction on
# how to choose the best format (precedence, etc...)
# Perhaps a 'struct' that has a field for each time-type where a
# zero would indicate "don't show" and a number would indicate
# "show" with some sort of priority. Same priorities could mean
# show all with the same priority.
# Or more simply, perhaps just a format string for each
# possibility...
def __init__(self, locator, tz=None, defaultfmt='%Y-%m-%d'):
"""
Autofmt the date labels. The default format is the one to use
if none of the times in scaled match
"""
self._locator = locator
self._tz = tz
self.defaultfmt = defaultfmt
self._formatter = DateFormatter(self.defaultfmt, tz)
self.scaled = {365.0: '%Y',
30.: '%b %Y',
1.0: '%b %d %Y',
1. / 24.: '%H:%M:%S',
1. / (24. * 60.): '%H:%M:%S.%f'}
def __call__(self, x, pos=0):
scale = float(self._locator._get_unit())
fmt = self.defaultfmt
for k in sorted(self.scaled):
if k >= scale:
fmt = self.scaled[k]
break
self._formatter = DateFormatter(fmt, self._tz)
return self._formatter(x, pos)
class rrulewrapper:
def __init__(self, freq, **kwargs):
self._construct = kwargs.copy()
self._construct["freq"] = freq
self._rrule = rrule(**self._construct)
def set(self, **kwargs):
self._construct.update(kwargs)
self._rrule = rrule(**self._construct)
def __getattr__(self, name):
if name in self.__dict__:
return self.__dict__[name]
return getattr(self._rrule, name)
class DateLocator(ticker.Locator):
hms0d = {'byhour': 0, 'byminute': 0, 'bysecond': 0}
def __init__(self, tz=None):
"""
*tz* is a :class:`tzinfo` instance.
"""
if tz is None:
tz = _get_rc_timezone()
self.tz = tz
def set_tzinfo(self, tz):
self.tz = tz
def datalim_to_dt(self):
dmin, dmax = self.axis.get_data_interval()
return num2date(dmin, self.tz), num2date(dmax, self.tz)
def viewlim_to_dt(self):
vmin, vmax = self.axis.get_view_interval()
return num2date(vmin, self.tz), num2date(vmax, self.tz)
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
return 1
def _get_interval(self):
"""
Return the number of units for each tick.
"""
return 1
def nonsingular(self, vmin, vmax):
"""
Given the proposed upper and lower extent, adjust the range
if it is too close to being singular (i.e. a range of ~0).
"""
unit = self._get_unit()
interval = self._get_interval()
if abs(vmax - vmin) < 1e-6:
vmin -= 2 * unit * interval
vmax += 2 * unit * interval
return vmin, vmax
class RRuleLocator(DateLocator):
# use the dateutil rrule instance
def __init__(self, o, tz=None):
DateLocator.__init__(self, tz)
self.rule = o
def __call__(self):
# if no data have been set, this will tank with a ValueError
try:
dmin, dmax = self.viewlim_to_dt()
except ValueError:
return []
if dmin > dmax:
dmax, dmin = dmin, dmax
delta = relativedelta(dmax, dmin)
# We need to cap at the endpoints of valid datetime
try:
start = dmin - delta
except ValueError:
start = _from_ordinalf(1.0)
try:
stop = dmax + delta
except ValueError:
# The magic number!
stop = _from_ordinalf(3652059.9999999)
self.rule.set(dtstart=start, until=stop, count=self.MAXTICKS + 1)
# estimate the number of ticks very approximately so we don't
# have to do a very expensive (and potentially near infinite)
# 'between' calculation, only to find out it will fail.
nmax, nmin = date2num((dmax, dmin))
estimate = (nmax - nmin) / (self._get_unit() * self._get_interval())
# This estimate is only an estimate, so be really conservative
# about bailing...
if estimate > self.MAXTICKS * 2:
raise RuntimeError(
'RRuleLocator estimated to generate %d ticks from %s to %s: '
'exceeds Locator.MAXTICKS * 2 (%d) ' % (estimate, dmin, dmax,
self.MAXTICKS * 2))
dates = self.rule.between(dmin, dmax, True)
if len(dates) == 0:
return date2num([dmin, dmax])
return self.raise_if_exceeds(date2num(dates))
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
freq = self.rule._rrule._freq
return self.get_unit_generic(freq)
@staticmethod
def get_unit_generic(freq):
if (freq == YEARLY):
return 365.0
elif (freq == MONTHLY):
return 30.0
elif (freq == WEEKLY):
return 7.0
elif (freq == DAILY):
return 1.0
elif (freq == HOURLY):
return (1.0 / 24.0)
elif (freq == MINUTELY):
return (1.0 / (24 * 60))
elif (freq == SECONDLY):
return (1.0 / (24 * 3600))
else:
# error
return -1 # or should this just return '1'?
def _get_interval(self):
return self.rule._rrule._interval
def autoscale(self):
"""
Set the view limits to include the data range.
"""
dmin, dmax = self.datalim_to_dt()
if dmin > dmax:
dmax, dmin = dmin, dmax
delta = relativedelta(dmax, dmin)
# We need to cap at the endpoints of valid datetime
try:
start = dmin - delta
except ValueError:
start = _from_ordinalf(1.0)
try:
stop = dmax + delta
except ValueError:
# The magic number!
stop = _from_ordinalf(3652059.9999999)
self.rule.set(dtstart=start, until=stop)
dmin, dmax = self.datalim_to_dt()
vmin = self.rule.before(dmin, True)
if not vmin:
vmin = dmin
vmax = self.rule.after(dmax, True)
if not vmax:
vmax = dmax
vmin = date2num(vmin)
vmax = date2num(vmax)
return self.nonsingular(vmin, vmax)
class AutoDateLocator(DateLocator):
"""
On autoscale, this class picks the best
:class:`DateLocator` to set the view limits and the tick
locations.
"""
def __init__(self, tz=None, minticks=5, maxticks=None,
interval_multiples=False):
"""
*minticks* is the minimum number of ticks desired, which is used to
select the type of ticking (yearly, monthly, etc.).
*maxticks* is the maximum number of ticks desired, which controls
any interval between ticks (ticking every other, every 3, etc.).
For really fine-grained control, this can be a dictionary mapping
individual rrule frequency constants (YEARLY, MONTHLY, etc.)
to their own maximum number of ticks. This can be used to keep
the number of ticks appropriate to the format chosen in
:class:`AutoDateFormatter`. Any frequency not specified in this
dictionary is given a default value.
*tz* is a :class:`tzinfo` instance.
*interval_multiples* is a boolean that indicates whether ticks
should be chosen to be multiple of the interval. This will lock
ticks to 'nicer' locations. For example, this will force the
ticks to be at hours 0,6,12,18 when hourly ticking is done at
6 hour intervals.
The AutoDateLocator has an interval dictionary that maps the
frequency of the tick (a constant from dateutil.rrule) and a
multiple allowed for that ticking. The default looks like this::
self.intervald = {
YEARLY : [1, 2, 4, 5, 10, 20, 40, 50, 100, 200, 400, 500,
1000, 2000, 4000, 5000, 10000],
MONTHLY : [1, 2, 3, 4, 6],
DAILY : [1, 2, 3, 7, 14],
HOURLY : [1, 2, 3, 4, 6, 12],
MINUTELY: [1, 5, 10, 15, 30],
SECONDLY: [1, 5, 10, 15, 30],
MICROSECONDLY: [1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000,
5000, 10000, 20000, 50000, 100000, 200000, 500000,
1000000],
}
The interval is used to specify multiples that are appropriate for
the frequency of ticking. For instance, every 7 days is sensible
for daily ticks, but for minutes/seconds, 15 or 30 make sense.
You can customize this dictionary by doing::
locator = AutoDateLocator()
locator.intervald[HOURLY] = [3] # only show every 3 hours
"""
DateLocator.__init__(self, tz)
self._locator = YearLocator()
self._freq = YEARLY
self._freqs = [YEARLY, MONTHLY, DAILY, HOURLY, MINUTELY,
SECONDLY, MICROSECONDLY]
self.minticks = minticks
self.maxticks = {YEARLY: 11, MONTHLY: 12, DAILY: 11, HOURLY: 12,
MINUTELY: 11, SECONDLY: 11, MICROSECONDLY: 8}
if maxticks is not None:
try:
self.maxticks.update(maxticks)
except TypeError:
# Assume we were given an integer. Use this as the maximum
# number of ticks for every frequency and create a
# dictionary for this
self.maxticks = dict(izip(self._freqs,
[maxticks] * len(self._freqs)))
self.interval_multiples = interval_multiples
self.intervald = {
YEARLY: [1, 2, 4, 5, 10, 20, 40, 50, 100, 200, 400, 500,
1000, 2000, 4000, 5000, 10000],
MONTHLY: [1, 2, 3, 4, 6],
DAILY: [1, 2, 3, 7, 14, 21],
HOURLY: [1, 2, 3, 4, 6, 12],
MINUTELY: [1, 5, 10, 15, 30],
SECONDLY: [1, 5, 10, 15, 30],
MICROSECONDLY: [1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000,
5000, 10000, 20000, 50000, 100000, 200000, 500000,
1000000]}
self._byranges = [None, range(1, 13), range(1, 32), range(0, 24),
range(0, 60), range(0, 60), None]
def __call__(self):
'Return the locations of the ticks'
self.refresh()
return self._locator()
def nonsingular(self, vmin, vmax):
# whatever is thrown at us, we can scale the unit.
# But default nonsingular date plots at an ~4 year period.
if vmin == vmax:
vmin = vmin - 365 * 2
vmax = vmax + 365 * 2
return vmin, vmax
def set_axis(self, axis):
DateLocator.set_axis(self, axis)
self._locator.set_axis(axis)
def refresh(self):
'Refresh internal information based on current limits.'
dmin, dmax = self.viewlim_to_dt()
self._locator = self.get_locator(dmin, dmax)
def _get_unit(self):
if self._freq in [MICROSECONDLY]:
return 1. / MUSECONDS_PER_DAY
else:
return RRuleLocator.get_unit_generic(self._freq)
def autoscale(self):
'Try to choose the view limits intelligently.'
dmin, dmax = self.datalim_to_dt()
self._locator = self.get_locator(dmin, dmax)
return self._locator.autoscale()
def get_locator(self, dmin, dmax):
'Pick the best locator based on a distance.'
delta = relativedelta(dmax, dmin)
numYears = (delta.years * 1.0)
numMonths = (numYears * 12.0) + delta.months
numDays = (numMonths * 31.0) + delta.days
numHours = (numDays * 24.0) + delta.hours
numMinutes = (numHours * 60.0) + delta.minutes
numSeconds = (numMinutes * 60.0) + delta.seconds
numMicroseconds = (numSeconds * 1e6) + delta.microseconds
nums = [numYears, numMonths, numDays, numHours, numMinutes,
numSeconds, numMicroseconds]
use_rrule_locator = [True] * 6 + [False]
# Default setting of bymonth, etc. to pass to rrule
# [unused (for year), bymonth, bymonthday, byhour, byminute,
# bysecond, unused (for microseconds)]
byranges = [None, 1, 1, 0, 0, 0, None]
# Loop over all the frequencies and try to find one that gives at
# least a minticks tick positions. Once this is found, look for
# an interval from an list specific to that frequency that gives no
# more than maxticks tick positions. Also, set up some ranges
# (bymonth, etc.) as appropriate to be passed to rrulewrapper.
for i, (freq, num) in enumerate(izip(self._freqs, nums)):
# If this particular frequency doesn't give enough ticks, continue
if num < self.minticks:
# Since we're not using this particular frequency, set
# the corresponding by_ to None so the rrule can act as
# appropriate
byranges[i] = None
continue
# Find the first available interval that doesn't give too many
# ticks
for interval in self.intervald[freq]:
if num <= interval * (self.maxticks[freq] - 1):
break
else:
# We went through the whole loop without breaking, default to
# the last interval in the list and raise a warning
warnings.warn('AutoDateLocator was unable to pick an '
'appropriate interval for this date range. '
'It may be necessary to add an interval value '
"to the AutoDateLocator's intervald dictionary."
' Defaulting to {0}.'.format(interval))
# Set some parameters as appropriate
self._freq = freq
if self._byranges[i] and self.interval_multiples:
byranges[i] = self._byranges[i][::interval]
interval = 1
else:
byranges[i] = self._byranges[i]
# We found what frequency to use
break
else:
raise ValueError('No sensible date limit could be found in the '
'AutoDateLocator.')
if use_rrule_locator[i]:
_, bymonth, bymonthday, byhour, byminute, bysecond, _ = byranges
rrule = rrulewrapper(self._freq, interval=interval,
dtstart=dmin, until=dmax,
bymonth=bymonth, bymonthday=bymonthday,
byhour=byhour, byminute=byminute,
bysecond=bysecond)
locator = RRuleLocator(rrule, self.tz)
else:
locator = MicrosecondLocator(interval, tz=self.tz)
locator.set_axis(self.axis)
locator.set_view_interval(*self.axis.get_view_interval())
locator.set_data_interval(*self.axis.get_data_interval())
return locator
class YearLocator(DateLocator):
"""
Make ticks on a given day of each year that is a multiple of base.
Examples::
# Tick every year on Jan 1st
locator = YearLocator()
# Tick every 5 years on July 4th
locator = YearLocator(5, month=7, day=4)
"""
def __init__(self, base=1, month=1, day=1, tz=None):
"""
Mark years that are multiple of base on a given month and day
(default jan 1).
"""
DateLocator.__init__(self, tz)
self.base = ticker.Base(base)
self.replaced = {'month': month,
'day': day,
'hour': 0,
'minute': 0,
'second': 0,
'tzinfo': tz
}
def __call__(self):
dmin, dmax = self.viewlim_to_dt()
ymin = self.base.le(dmin.year)
ymax = self.base.ge(dmax.year)
ticks = [dmin.replace(year=ymin, **self.replaced)]
while 1:
dt = ticks[-1]
if dt.year >= ymax:
return date2num(ticks)
year = dt.year + self.base.get_base()
ticks.append(dt.replace(year=year, **self.replaced))
def autoscale(self):
"""
Set the view limits to include the data range.
"""
dmin, dmax = self.datalim_to_dt()
ymin = self.base.le(dmin.year)
ymax = self.base.ge(dmax.year)
vmin = dmin.replace(year=ymin, **self.replaced)
vmax = dmax.replace(year=ymax, **self.replaced)
vmin = date2num(vmin)
vmax = date2num(vmax)
return self.nonsingular(vmin, vmax)
class MonthLocator(RRuleLocator):
"""
Make ticks on occurances of each month month, eg 1, 3, 12.
"""
def __init__(self, bymonth=None, bymonthday=1, interval=1, tz=None):
"""
Mark every month in *bymonth*; *bymonth* can be an int or
sequence. Default is ``range(1,13)``, i.e. every month.
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurance.
"""
if bymonth is None:
bymonth = range(1, 13)
o = rrulewrapper(MONTHLY, bymonth=bymonth, bymonthday=bymonthday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, o, tz)
class WeekdayLocator(RRuleLocator):
"""
Make ticks on occurances of each weekday.
"""
def __init__(self, byweekday=1, interval=1, tz=None):
"""
Mark every weekday in *byweekday*; *byweekday* can be a number or
sequence.
Elements of *byweekday* must be one of MO, TU, WE, TH, FR, SA,
SU, the constants from :mod:`dateutils.rrule`.
*interval* specifies the number of weeks to skip. For example,
``interval=2`` plots every second week.
"""
o = rrulewrapper(DAILY, byweekday=byweekday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, o, tz)
class DayLocator(RRuleLocator):
"""
Make ticks on occurances of each day of the month. For example,
1, 15, 30.
"""
def __init__(self, bymonthday=None, interval=1, tz=None):
"""
Mark every day in *bymonthday*; *bymonthday* can be an int or
sequence.
Default is to tick every day of the month: ``bymonthday=range(1,32)``
"""
if bymonthday is None:
bymonthday = range(1, 32)
o = rrulewrapper(DAILY, bymonthday=bymonthday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, o, tz)
class HourLocator(RRuleLocator):
"""
Make ticks on occurances of each hour.
"""
def __init__(self, byhour=None, interval=1, tz=None):
"""
Mark every hour in *byhour*; *byhour* can be an int or sequence.
Default is to tick every hour: ``byhour=range(24)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if byhour is None:
byhour = range(24)
rule = rrulewrapper(HOURLY, byhour=byhour, interval=interval,
byminute=0, bysecond=0)
RRuleLocator.__init__(self, rule, tz)
class MinuteLocator(RRuleLocator):
"""
Make ticks on occurances of each minute.
"""
def __init__(self, byminute=None, interval=1, tz=None):
"""
Mark every minute in *byminute*; *byminute* can be an int or
sequence. Default is to tick every minute: ``byminute=range(60)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if byminute is None:
byminute = range(60)
rule = rrulewrapper(MINUTELY, byminute=byminute, interval=interval,
bysecond=0)
RRuleLocator.__init__(self, rule, tz)
class SecondLocator(RRuleLocator):
"""
Make ticks on occurances of each second.
"""
def __init__(self, bysecond=None, interval=1, tz=None):
"""
Mark every second in *bysecond*; *bysecond* can be an int or
sequence. Default is to tick every second: ``bysecond = range(60)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if bysecond is None:
bysecond = range(60)
rule = rrulewrapper(SECONDLY, bysecond=bysecond, interval=interval)
RRuleLocator.__init__(self, rule, tz)
class MicrosecondLocator(DateLocator):
"""
Make ticks on occurances of each microsecond.
"""
def __init__(self, interval=1, tz=None):
"""
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second microsecond.
"""
self._interval = interval
self._wrapped_locator = ticker.MultipleLocator(interval)
self.tz = tz
def set_axis(self, axis):
self._wrapped_locator.set_axis(axis)
return DateLocator.set_axis(self, axis)
def set_view_interval(self, vmin, vmax):
self._wrapped_locator.set_view_interval(vmin, vmax)
return DateLocator.set_view_interval(self, vmin, vmax)
def set_data_interval(self, vmin, vmax):
self._wrapped_locator.set_data_interval(vmin, vmax)
return DateLocator.set_data_interval(self, vmin, vmax)
def __call__(self, *args, **kwargs):
vmin, vmax = self.axis.get_view_interval()
vmin *= MUSECONDS_PER_DAY
vmax *= MUSECONDS_PER_DAY
ticks = self._wrapped_locator.tick_values(vmin, vmax)
ticks = [tick / MUSECONDS_PER_DAY for tick in ticks]
return ticks
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
return 1. / MUSECONDS_PER_DAY
def _get_interval(self):
"""
Return the number of units for each tick.
"""
return self._interval
def _close_to_dt(d1, d2, epsilon=5):
'Assert that datetimes *d1* and *d2* are within *epsilon* microseconds.'
delta = d2 - d1
mus = abs(delta.days * MUSECONDS_PER_DAY + delta.seconds * 1e6 +
delta.microseconds)
assert(mus < epsilon)
def _close_to_num(o1, o2, epsilon=5):
"""
Assert that float ordinals *o1* and *o2* are within *epsilon*
microseconds.
"""
delta = abs((o2 - o1) * MUSECONDS_PER_DAY)
assert(delta < epsilon)
def epoch2num(e):
"""
Convert an epoch or sequence of epochs to the new date format,
that is days since 0001.
"""
spd = 24. * 3600.
return 719163 + np.asarray(e) / spd
def num2epoch(d):
"""
Convert days since 0001 to epoch. *d* can be a number or sequence.
"""
spd = 24. * 3600.
return (np.asarray(d) - 719163) * spd
def mx2num(mxdates):
"""
Convert mx :class:`datetime` instance (or sequence of mx
instances) to the new date format.
"""
scalar = False
if not cbook.iterable(mxdates):
scalar = True
mxdates = [mxdates]
ret = epoch2num([m.ticks() for m in mxdates])
if scalar:
return ret[0]
else:
return ret
def date_ticker_factory(span, tz=None, numticks=5):
"""
Create a date locator with *numticks* (approx) and a date formatter
for *span* in days. Return value is (locator, formatter).
"""
if span == 0:
span = 1 / 24.
minutes = span * 24 * 60
hours = span * 24
days = span
weeks = span / 7.
months = span / 31. # approx
years = span / 365.
if years > numticks:
locator = YearLocator(int(years / numticks), tz=tz) # define
fmt = '%Y'
elif months > numticks:
locator = MonthLocator(tz=tz)
fmt = '%b %Y'
elif weeks > numticks:
locator = WeekdayLocator(tz=tz)
fmt = '%a, %b %d'
elif days > numticks:
locator = DayLocator(interval=int(math.ceil(days / numticks)), tz=tz)
fmt = '%b %d'
elif hours > numticks:
locator = HourLocator(interval=int(math.ceil(hours / numticks)), tz=tz)
fmt = '%H:%M\n%b %d'
elif minutes > numticks:
locator = MinuteLocator(interval=int(math.ceil(minutes / numticks)),
tz=tz)
fmt = '%H:%M:%S'
else:
locator = MinuteLocator(tz=tz)
fmt = '%H:%M:%S'
formatter = DateFormatter(fmt, tz=tz)
return locator, formatter
def seconds(s):
'Return seconds as days.'
return float(s) / SEC_PER_DAY
def minutes(m):
'Return minutes as days.'
return float(m) / MINUTES_PER_DAY
def hours(h):
'Return hours as days.'
return h / 24.
def weeks(w):
'Return weeks as days.'
return w * 7.
class DateConverter(units.ConversionInterface):
"""
Converter for datetime.date and datetime.datetime data,
or for date/time data represented as it would be converted
by :func:`date2num`.
The 'unit' tag for such data is None or a tzinfo instance.
"""
@staticmethod
def axisinfo(unit, axis):
"""
Return the :class:`~matplotlib.units.AxisInfo` for *unit*.
*unit* is a tzinfo instance or None.
The *axis* argument is required but not used.
"""
tz = unit
majloc = AutoDateLocator(tz=tz)
majfmt = AutoDateFormatter(majloc, tz=tz)
datemin = datetime.date(2000, 1, 1)
datemax = datetime.date(2010, 1, 1)
return units.AxisInfo(majloc=majloc, majfmt=majfmt, label='',
default_limits=(datemin, datemax))
@staticmethod
def convert(value, unit, axis):
"""
If *value* is not already a number or sequence of numbers,
convert it with :func:`date2num`.
The *unit* and *axis* arguments are not used.
"""
if units.ConversionInterface.is_numlike(value):
return value
return date2num(value)
@staticmethod
def default_units(x, axis):
'Return the tzinfo instance of *x* or of its first element, or None'
try:
x = x[0]
except (TypeError, IndexError):
pass
try:
return x.tzinfo
except AttributeError:
pass
return None
units.registry[datetime.date] = DateConverter()
units.registry[datetime.datetime] = DateConverter()
| mit |
lache/RacingKingLee | monitor/engine.win64/2.74/python/lib/site-packages/numpy/lib/twodim_base.py | 37 | 26758 | """ Basic functions for manipulating 2d arrays
"""
from __future__ import division, absolute_import, print_function
from numpy.core.numeric import (
asanyarray, arange, zeros, greater_equal, multiply, ones, asarray,
where, int8, int16, int32, int64, empty, promote_types
)
from numpy.core import iinfo
__all__ = [
'diag', 'diagflat', 'eye', 'fliplr', 'flipud', 'rot90', 'tri', 'triu',
'tril', 'vander', 'histogram2d', 'mask_indices', 'tril_indices',
'tril_indices_from', 'triu_indices', 'triu_indices_from', ]
i1 = iinfo(int8)
i2 = iinfo(int16)
i4 = iinfo(int32)
def _min_int(low, high):
""" get small int that fits the range """
if high <= i1.max and low >= i1.min:
return int8
if high <= i2.max and low >= i2.min:
return int16
if high <= i4.max and low >= i4.min:
return int32
return int64
def fliplr(m):
"""
Flip array in the left/right direction.
Flip the entries in each row in the left/right direction.
Columns are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array, must be at least 2-D.
Returns
-------
f : ndarray
A view of `m` with the columns reversed. Since a view
is returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
flipud : Flip array in the up/down direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to A[:,::-1]. Requires the array to be at least 2-D.
Examples
--------
>>> A = np.diag([1.,2.,3.])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.fliplr(A)
array([[ 0., 0., 1.],
[ 0., 2., 0.],
[ 3., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.fliplr(A)==A[:,::-1,...])
True
"""
m = asanyarray(m)
if m.ndim < 2:
raise ValueError("Input must be >= 2-d.")
return m[:, ::-1]
def flipud(m):
"""
Flip array in the up/down direction.
Flip the entries in each column in the up/down direction.
Rows are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array.
Returns
-------
out : array_like
A view of `m` with the rows reversed. Since a view is
returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
fliplr : Flip array in the left/right direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to ``A[::-1,...]``.
Does not require the array to be two-dimensional.
Examples
--------
>>> A = np.diag([1.0, 2, 3])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.flipud(A)
array([[ 0., 0., 3.],
[ 0., 2., 0.],
[ 1., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.flipud(A)==A[::-1,...])
True
>>> np.flipud([1,2])
array([2, 1])
"""
m = asanyarray(m)
if m.ndim < 1:
raise ValueError("Input must be >= 1-d.")
return m[::-1, ...]
def rot90(m, k=1):
"""
Rotate an array by 90 degrees in the counter-clockwise direction.
The first two dimensions are rotated; therefore, the array must be at
least 2-D.
Parameters
----------
m : array_like
Array of two or more dimensions.
k : integer
Number of times the array is rotated by 90 degrees.
Returns
-------
y : ndarray
Rotated array.
See Also
--------
fliplr : Flip an array horizontally.
flipud : Flip an array vertically.
Examples
--------
>>> m = np.array([[1,2],[3,4]], int)
>>> m
array([[1, 2],
[3, 4]])
>>> np.rot90(m)
array([[2, 4],
[1, 3]])
>>> np.rot90(m, 2)
array([[4, 3],
[2, 1]])
"""
m = asanyarray(m)
if m.ndim < 2:
raise ValueError("Input must >= 2-d.")
k = k % 4
if k == 0:
return m
elif k == 1:
return fliplr(m).swapaxes(0, 1)
elif k == 2:
return fliplr(flipud(m))
else:
# k == 3
return fliplr(m.swapaxes(0, 1))
def eye(N, M=None, k=0, dtype=float):
"""
Return a 2-D array with ones on the diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the output.
M : int, optional
Number of columns in the output. If None, defaults to `N`.
k : int, optional
Index of the diagonal: 0 (the default) refers to the main diagonal,
a positive value refers to an upper diagonal, and a negative value
to a lower diagonal.
dtype : data-type, optional
Data-type of the returned array.
Returns
-------
I : ndarray of shape (N,M)
An array where all elements are equal to zero, except for the `k`-th
diagonal, whose values are equal to one.
See Also
--------
identity : (almost) equivalent function
diag : diagonal 2-D array from a 1-D array specified by the user.
Examples
--------
>>> np.eye(2, dtype=int)
array([[1, 0],
[0, 1]])
>>> np.eye(3, k=1)
array([[ 0., 1., 0.],
[ 0., 0., 1.],
[ 0., 0., 0.]])
"""
if M is None:
M = N
m = zeros((N, M), dtype=dtype)
if k >= M:
return m
if k >= 0:
i = k
else:
i = (-k) * M
m[:M-k].flat[i::M+1] = 1
return m
def diag(v, k=0):
"""
Extract a diagonal or construct a diagonal array.
See the more detailed documentation for ``numpy.diagonal`` if you use this
function to extract a diagonal and wish to write to the resulting array;
whether it returns a copy or a view depends on what version of numpy you
are using.
Parameters
----------
v : array_like
If `v` is a 2-D array, return a copy of its `k`-th diagonal.
If `v` is a 1-D array, return a 2-D array with `v` on the `k`-th
diagonal.
k : int, optional
Diagonal in question. The default is 0. Use `k>0` for diagonals
above the main diagonal, and `k<0` for diagonals below the main
diagonal.
Returns
-------
out : ndarray
The extracted diagonal or constructed diagonal array.
See Also
--------
diagonal : Return specified diagonals.
diagflat : Create a 2-D array with the flattened input as a diagonal.
trace : Sum along diagonals.
triu : Upper triangle of an array.
tril : Lower triangle of an array.
Examples
--------
>>> x = np.arange(9).reshape((3,3))
>>> x
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> np.diag(x)
array([0, 4, 8])
>>> np.diag(x, k=1)
array([1, 5])
>>> np.diag(x, k=-1)
array([3, 7])
>>> np.diag(np.diag(x))
array([[0, 0, 0],
[0, 4, 0],
[0, 0, 8]])
"""
v = asarray(v)
s = v.shape
if len(s) == 1:
n = s[0]+abs(k)
res = zeros((n, n), v.dtype)
if k >= 0:
i = k
else:
i = (-k) * n
res[:n-k].flat[i::n+1] = v
return res
elif len(s) == 2:
return v.diagonal(k)
else:
raise ValueError("Input must be 1- or 2-d.")
def diagflat(v, k=0):
"""
Create a two-dimensional array with the flattened input as a diagonal.
Parameters
----------
v : array_like
Input data, which is flattened and set as the `k`-th
diagonal of the output.
k : int, optional
Diagonal to set; 0, the default, corresponds to the "main" diagonal,
a positive (negative) `k` giving the number of the diagonal above
(below) the main.
Returns
-------
out : ndarray
The 2-D output array.
See Also
--------
diag : MATLAB work-alike for 1-D and 2-D arrays.
diagonal : Return specified diagonals.
trace : Sum along diagonals.
Examples
--------
>>> np.diagflat([[1,2], [3,4]])
array([[1, 0, 0, 0],
[0, 2, 0, 0],
[0, 0, 3, 0],
[0, 0, 0, 4]])
>>> np.diagflat([1,2], 1)
array([[0, 1, 0],
[0, 0, 2],
[0, 0, 0]])
"""
try:
wrap = v.__array_wrap__
except AttributeError:
wrap = None
v = asarray(v).ravel()
s = len(v)
n = s + abs(k)
res = zeros((n, n), v.dtype)
if (k >= 0):
i = arange(0, n-k)
fi = i+k+i*n
else:
i = arange(0, n+k)
fi = i+(i-k)*n
res.flat[fi] = v
if not wrap:
return res
return wrap(res)
def tri(N, M=None, k=0, dtype=float):
"""
An array with ones at and below the given diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the array.
M : int, optional
Number of columns in the array.
By default, `M` is taken equal to `N`.
k : int, optional
The sub-diagonal at and below which the array is filled.
`k` = 0 is the main diagonal, while `k` < 0 is below it,
and `k` > 0 is above. The default is 0.
dtype : dtype, optional
Data type of the returned array. The default is float.
Returns
-------
tri : ndarray of shape (N, M)
Array with its lower triangle filled with ones and zero elsewhere;
in other words ``T[i,j] == 1`` for ``i <= j + k``, 0 otherwise.
Examples
--------
>>> np.tri(3, 5, 2, dtype=int)
array([[1, 1, 1, 0, 0],
[1, 1, 1, 1, 0],
[1, 1, 1, 1, 1]])
>>> np.tri(3, 5, -1)
array([[ 0., 0., 0., 0., 0.],
[ 1., 0., 0., 0., 0.],
[ 1., 1., 0., 0., 0.]])
"""
if M is None:
M = N
m = greater_equal.outer(arange(N, dtype=_min_int(0, N)),
arange(-k, M-k, dtype=_min_int(-k, M - k)))
# Avoid making a copy if the requested type is already bool
m = m.astype(dtype, copy=False)
return m
def tril(m, k=0):
"""
Lower triangle of an array.
Return a copy of an array with elements above the `k`-th diagonal zeroed.
Parameters
----------
m : array_like, shape (M, N)
Input array.
k : int, optional
Diagonal above which to zero elements. `k = 0` (the default) is the
main diagonal, `k < 0` is below it and `k > 0` is above.
Returns
-------
tril : ndarray, shape (M, N)
Lower triangle of `m`, of same shape and data-type as `m`.
See Also
--------
triu : same thing, only for the upper triangle
Examples
--------
>>> np.tril([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 0, 0, 0],
[ 4, 0, 0],
[ 7, 8, 0],
[10, 11, 12]])
"""
m = asanyarray(m)
mask = tri(*m.shape[-2:], k=k, dtype=bool)
return where(mask, m, zeros(1, m.dtype))
def triu(m, k=0):
"""
Upper triangle of an array.
Return a copy of a matrix with the elements below the `k`-th diagonal
zeroed.
Please refer to the documentation for `tril` for further details.
See Also
--------
tril : lower triangle of an array
Examples
--------
>>> np.triu([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 1, 2, 3],
[ 4, 5, 6],
[ 0, 8, 9],
[ 0, 0, 12]])
"""
m = asanyarray(m)
mask = tri(*m.shape[-2:], k=k-1, dtype=bool)
return where(mask, zeros(1, m.dtype), m)
# Originally borrowed from John Hunter and matplotlib
def vander(x, N=None, increasing=False):
"""
Generate a Vandermonde matrix.
The columns of the output matrix are powers of the input vector. The
order of the powers is determined by the `increasing` boolean argument.
Specifically, when `increasing` is False, the `i`-th output column is
the input vector raised element-wise to the power of ``N - i - 1``. Such
a matrix with a geometric progression in each row is named for Alexandre-
Theophile Vandermonde.
Parameters
----------
x : array_like
1-D input array.
N : int, optional
Number of columns in the output. If `N` is not specified, a square
array is returned (``N = len(x)``).
increasing : bool, optional
Order of the powers of the columns. If True, the powers increase
from left to right, if False (the default) they are reversed.
.. versionadded:: 1.9.0
Returns
-------
out : ndarray
Vandermonde matrix. If `increasing` is False, the first column is
``x^(N-1)``, the second ``x^(N-2)`` and so forth. If `increasing` is
True, the columns are ``x^0, x^1, ..., x^(N-1)``.
See Also
--------
polynomial.polynomial.polyvander
Examples
--------
>>> x = np.array([1, 2, 3, 5])
>>> N = 3
>>> np.vander(x, N)
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> np.column_stack([x**(N-1-i) for i in range(N)])
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> x = np.array([1, 2, 3, 5])
>>> np.vander(x)
array([[ 1, 1, 1, 1],
[ 8, 4, 2, 1],
[ 27, 9, 3, 1],
[125, 25, 5, 1]])
>>> np.vander(x, increasing=True)
array([[ 1, 1, 1, 1],
[ 1, 2, 4, 8],
[ 1, 3, 9, 27],
[ 1, 5, 25, 125]])
The determinant of a square Vandermonde matrix is the product
of the differences between the values of the input vector:
>>> np.linalg.det(np.vander(x))
48.000000000000043
>>> (5-3)*(5-2)*(5-1)*(3-2)*(3-1)*(2-1)
48
"""
x = asarray(x)
if x.ndim != 1:
raise ValueError("x must be a one-dimensional array or sequence.")
if N is None:
N = len(x)
v = empty((len(x), N), dtype=promote_types(x.dtype, int))
tmp = v[:, ::-1] if not increasing else v
if N > 0:
tmp[:, 0] = 1
if N > 1:
tmp[:, 1:] = x[:, None]
multiply.accumulate(tmp[:, 1:], out=tmp[:, 1:], axis=1)
return v
def histogram2d(x, y, bins=10, range=None, normed=False, weights=None):
"""
Compute the bi-dimensional histogram of two data samples.
Parameters
----------
x : array_like, shape (N,)
An array containing the x coordinates of the points to be
histogrammed.
y : array_like, shape (N,)
An array containing the y coordinates of the points to be
histogrammed.
bins : int or [int, int] or array_like or [array, array], optional
The bin specification:
* If int, the number of bins for the two dimensions (nx=ny=bins).
* If [int, int], the number of bins in each dimension
(nx, ny = bins).
* If array_like, the bin edges for the two dimensions
(x_edges=y_edges=bins).
* If [array, array], the bin edges in each dimension
(x_edges, y_edges = bins).
range : array_like, shape(2,2), optional
The leftmost and rightmost edges of the bins along each dimension
(if not specified explicitly in the `bins` parameters):
``[[xmin, xmax], [ymin, ymax]]``. All values outside of this range
will be considered outliers and not tallied in the histogram.
normed : bool, optional
If False, returns the number of samples in each bin. If True,
returns the bin density ``bin_count / sample_count / bin_area``.
weights : array_like, shape(N,), optional
An array of values ``w_i`` weighing each sample ``(x_i, y_i)``.
Weights are normalized to 1 if `normed` is True. If `normed` is
False, the values of the returned histogram are equal to the sum of
the weights belonging to the samples falling into each bin.
Returns
-------
H : ndarray, shape(nx, ny)
The bi-dimensional histogram of samples `x` and `y`. Values in `x`
are histogrammed along the first dimension and values in `y` are
histogrammed along the second dimension.
xedges : ndarray, shape(nx,)
The bin edges along the first dimension.
yedges : ndarray, shape(ny,)
The bin edges along the second dimension.
See Also
--------
histogram : 1D histogram
histogramdd : Multidimensional histogram
Notes
-----
When `normed` is True, then the returned histogram is the sample
density, defined such that the sum over bins of the product
``bin_value * bin_area`` is 1.
Please note that the histogram does not follow the Cartesian convention
where `x` values are on the abscissa and `y` values on the ordinate
axis. Rather, `x` is histogrammed along the first dimension of the
array (vertical), and `y` along the second dimension of the array
(horizontal). This ensures compatibility with `histogramdd`.
Examples
--------
>>> import matplotlib as mpl
>>> import matplotlib.pyplot as plt
Construct a 2D-histogram with variable bin width. First define the bin
edges:
>>> xedges = [0, 1, 1.5, 3, 5]
>>> yedges = [0, 2, 3, 4, 6]
Next we create a histogram H with random bin content:
>>> x = np.random.normal(3, 1, 100)
>>> y = np.random.normal(1, 1, 100)
>>> H, xedges, yedges = np.histogram2d(y, x, bins=(xedges, yedges))
Or we fill the histogram H with a determined bin content:
>>> H = np.ones((4, 4)).cumsum().reshape(4, 4)
>>> print H[::-1] # This shows the bin content in the order as plotted
[[ 13. 14. 15. 16.]
[ 9. 10. 11. 12.]
[ 5. 6. 7. 8.]
[ 1. 2. 3. 4.]]
Imshow can only do an equidistant representation of bins:
>>> fig = plt.figure(figsize=(7, 3))
>>> ax = fig.add_subplot(131)
>>> ax.set_title('imshow: equidistant')
>>> im = plt.imshow(H, interpolation='nearest', origin='low',
extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]])
pcolormesh can display exact bin edges:
>>> ax = fig.add_subplot(132)
>>> ax.set_title('pcolormesh: exact bin edges')
>>> X, Y = np.meshgrid(xedges, yedges)
>>> ax.pcolormesh(X, Y, H)
>>> ax.set_aspect('equal')
NonUniformImage displays exact bin edges with interpolation:
>>> ax = fig.add_subplot(133)
>>> ax.set_title('NonUniformImage: interpolated')
>>> im = mpl.image.NonUniformImage(ax, interpolation='bilinear')
>>> xcenters = xedges[:-1] + 0.5 * (xedges[1:] - xedges[:-1])
>>> ycenters = yedges[:-1] + 0.5 * (yedges[1:] - yedges[:-1])
>>> im.set_data(xcenters, ycenters, H)
>>> ax.images.append(im)
>>> ax.set_xlim(xedges[0], xedges[-1])
>>> ax.set_ylim(yedges[0], yedges[-1])
>>> ax.set_aspect('equal')
>>> plt.show()
"""
from numpy import histogramdd
try:
N = len(bins)
except TypeError:
N = 1
if N != 1 and N != 2:
xedges = yedges = asarray(bins, float)
bins = [xedges, yedges]
hist, edges = histogramdd([x, y], bins, range, normed, weights)
return hist, edges[0], edges[1]
def mask_indices(n, mask_func, k=0):
"""
Return the indices to access (n, n) arrays, given a masking function.
Assume `mask_func` is a function that, for a square array a of size
``(n, n)`` with a possible offset argument `k`, when called as
``mask_func(a, k)`` returns a new array with zeros in certain locations
(functions like `triu` or `tril` do precisely this). Then this function
returns the indices where the non-zero values would be located.
Parameters
----------
n : int
The returned indices will be valid to access arrays of shape (n, n).
mask_func : callable
A function whose call signature is similar to that of `triu`, `tril`.
That is, ``mask_func(x, k)`` returns a boolean array, shaped like `x`.
`k` is an optional argument to the function.
k : scalar
An optional argument which is passed through to `mask_func`. Functions
like `triu`, `tril` take a second argument that is interpreted as an
offset.
Returns
-------
indices : tuple of arrays.
The `n` arrays of indices corresponding to the locations where
``mask_func(np.ones((n, n)), k)`` is True.
See Also
--------
triu, tril, triu_indices, tril_indices
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
These are the indices that would allow you to access the upper triangular
part of any 3x3 array:
>>> iu = np.mask_indices(3, np.triu)
For example, if `a` is a 3x3 array:
>>> a = np.arange(9).reshape(3, 3)
>>> a
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> a[iu]
array([0, 1, 2, 4, 5, 8])
An offset can be passed also to the masking function. This gets us the
indices starting on the first diagonal right of the main one:
>>> iu1 = np.mask_indices(3, np.triu, 1)
with which we now extract only three elements:
>>> a[iu1]
array([1, 2, 5])
"""
m = ones((n, n), int)
a = mask_func(m, k)
return where(a != 0)
def tril_indices(n, k=0, m=None):
"""
Return the indices for the lower-triangle of an (n, m) array.
Parameters
----------
n : int
The row dimension of the arrays for which the returned
indices will be valid.
k : int, optional
Diagonal offset (see `tril` for details).
m : int, optional
.. versionadded:: 1.9.0
The column dimension of the arrays for which the returned
arrays will be valid.
By default `m` is taken equal to `n`.
Returns
-------
inds : tuple of arrays
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array.
See also
--------
triu_indices : similar function, for upper-triangular.
mask_indices : generic function accepting an arbitrary mask function.
tril, triu
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
lower triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> il1 = np.tril_indices(4)
>>> il2 = np.tril_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[il1]
array([ 0, 4, 5, 8, 9, 10, 12, 13, 14, 15])
And for assigning values:
>>> a[il1] = -1
>>> a
array([[-1, 1, 2, 3],
[-1, -1, 6, 7],
[-1, -1, -1, 11],
[-1, -1, -1, -1]])
These cover almost the whole array (two diagonals right of the main one):
>>> a[il2] = -10
>>> a
array([[-10, -10, -10, 3],
[-10, -10, -10, -10],
[-10, -10, -10, -10],
[-10, -10, -10, -10]])
"""
return where(tri(n, m, k=k, dtype=bool))
def tril_indices_from(arr, k=0):
"""
Return the indices for the lower-triangle of arr.
See `tril_indices` for full details.
Parameters
----------
arr : array_like
The indices will be valid for square arrays whose dimensions are
the same as arr.
k : int, optional
Diagonal offset (see `tril` for details).
See Also
--------
tril_indices, tril
Notes
-----
.. versionadded:: 1.4.0
"""
if arr.ndim != 2:
raise ValueError("input array must be 2-d")
return tril_indices(arr.shape[-2], k=k, m=arr.shape[-1])
def triu_indices(n, k=0, m=None):
"""
Return the indices for the upper-triangle of an (n, m) array.
Parameters
----------
n : int
The size of the arrays for which the returned indices will
be valid.
k : int, optional
Diagonal offset (see `triu` for details).
m : int, optional
.. versionadded:: 1.9.0
The column dimension of the arrays for which the returned
arrays will be valid.
By default `m` is taken equal to `n`.
Returns
-------
inds : tuple, shape(2) of ndarrays, shape(`n`)
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array. Can be used
to slice a ndarray of shape(`n`, `n`).
See also
--------
tril_indices : similar function, for lower-triangular.
mask_indices : generic function accepting an arbitrary mask function.
triu, tril
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
upper triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> iu1 = np.triu_indices(4)
>>> iu2 = np.triu_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[iu1]
array([ 0, 1, 2, 3, 5, 6, 7, 10, 11, 15])
And for assigning values:
>>> a[iu1] = -1
>>> a
array([[-1, -1, -1, -1],
[ 4, -1, -1, -1],
[ 8, 9, -1, -1],
[12, 13, 14, -1]])
These cover only a small part of the whole array (two diagonals right
of the main one):
>>> a[iu2] = -10
>>> a
array([[ -1, -1, -10, -10],
[ 4, -1, -1, -10],
[ 8, 9, -1, -1],
[ 12, 13, 14, -1]])
"""
return where(~tri(n, m, k=k-1, dtype=bool))
def triu_indices_from(arr, k=0):
"""
Return the indices for the upper-triangle of arr.
See `triu_indices` for full details.
Parameters
----------
arr : ndarray, shape(N, N)
The indices will be valid for square arrays.
k : int, optional
Diagonal offset (see `triu` for details).
Returns
-------
triu_indices_from : tuple, shape(2) of ndarray, shape(N)
Indices for the upper-triangle of `arr`.
See Also
--------
triu_indices, triu
Notes
-----
.. versionadded:: 1.4.0
"""
if arr.ndim != 2:
raise ValueError("input array must be 2-d")
return triu_indices(arr.shape[-2], k=k, m=arr.shape[-1])
| mit |
carrillo/scikit-learn | setup.py | 76 | 9370 | #! /usr/bin/env python
#
# Copyright (C) 2007-2009 Cournapeau David <[email protected]>
# 2010 Fabian Pedregosa <[email protected]>
# License: 3-clause BSD
descr = """A set of python modules for machine learning and data mining"""
import sys
import os
import shutil
from distutils.command.clean import clean as Clean
from pkg_resources import parse_version
if sys.version_info[0] < 3:
import __builtin__ as builtins
else:
import builtins
# This is a bit (!) hackish: we are setting a global variable so that the main
# sklearn __init__ can detect if it is being loaded by the setup routine, to
# avoid attempting to load components that aren't built yet:
# the numpy distutils extensions that are used by scikit-learn to recursively
# build the compiled extensions in sub-packages is based on the Python import
# machinery.
builtins.__SKLEARN_SETUP__ = True
DISTNAME = 'scikit-learn'
DESCRIPTION = 'A set of python modules for machine learning and data mining'
with open('README.rst') as f:
LONG_DESCRIPTION = f.read()
MAINTAINER = 'Andreas Mueller'
MAINTAINER_EMAIL = '[email protected]'
URL = 'http://scikit-learn.org'
LICENSE = 'new BSD'
DOWNLOAD_URL = 'http://sourceforge.net/projects/scikit-learn/files/'
# We can actually import a restricted version of sklearn that
# does not need the compiled code
import sklearn
VERSION = sklearn.__version__
# Optional setuptools features
# We need to import setuptools early, if we want setuptools features,
# as it monkey-patches the 'setup' function
# For some commands, use setuptools
SETUPTOOLS_COMMANDS = set([
'develop', 'release', 'bdist_egg', 'bdist_rpm',
'bdist_wininst', 'install_egg_info', 'build_sphinx',
'egg_info', 'easy_install', 'upload', 'bdist_wheel',
'--single-version-externally-managed',
])
if SETUPTOOLS_COMMANDS.intersection(sys.argv):
import setuptools
extra_setuptools_args = dict(
zip_safe=False, # the package can run out of an .egg file
include_package_data=True,
)
else:
extra_setuptools_args = dict()
# Custom clean command to remove build artifacts
class CleanCommand(Clean):
description = "Remove build artifacts from the source tree"
def run(self):
Clean.run(self)
if os.path.exists('build'):
shutil.rmtree('build')
for dirpath, dirnames, filenames in os.walk('sklearn'):
for filename in filenames:
if (filename.endswith('.so') or filename.endswith('.pyd')
or filename.endswith('.dll')
or filename.endswith('.pyc')):
os.unlink(os.path.join(dirpath, filename))
for dirname in dirnames:
if dirname == '__pycache__':
shutil.rmtree(os.path.join(dirpath, dirname))
cmdclass = {'clean': CleanCommand}
# Optional wheelhouse-uploader features
# To automate release of binary packages for scikit-learn we need a tool
# to download the packages generated by travis and appveyor workers (with
# version number matching the current release) and upload them all at once
# to PyPI at release time.
# The URL of the artifact repositories are configured in the setup.cfg file.
WHEELHOUSE_UPLOADER_COMMANDS = set(['fetch_artifacts', 'upload_all'])
if WHEELHOUSE_UPLOADER_COMMANDS.intersection(sys.argv):
import wheelhouse_uploader.cmd
cmdclass.update(vars(wheelhouse_uploader.cmd))
def configuration(parent_package='', top_path=None):
if os.path.exists('MANIFEST'):
os.remove('MANIFEST')
from numpy.distutils.misc_util import Configuration
config = Configuration(None, parent_package, top_path)
# Avoid non-useful msg:
# "Ignoring attempt to set 'name' (from ... "
config.set_options(ignore_setup_xxx_py=True,
assume_default_configuration=True,
delegate_options_to_subpackages=True,
quiet=True)
config.add_subpackage('sklearn')
return config
scipy_min_version = '0.9'
numpy_min_version = '1.6.1'
def get_scipy_status():
"""
Returns a dictionary containing a boolean specifying whether SciPy
is up-to-date, along with the version string (empty string if
not installed).
"""
scipy_status = {}
try:
import scipy
scipy_version = scipy.__version__
scipy_status['up_to_date'] = parse_version(
scipy_version) >= parse_version(scipy_min_version)
scipy_status['version'] = scipy_version
except ImportError:
scipy_status['up_to_date'] = False
scipy_status['version'] = ""
return scipy_status
def get_numpy_status():
"""
Returns a dictionary containing a boolean specifying whether NumPy
is up-to-date, along with the version string (empty string if
not installed).
"""
numpy_status = {}
try:
import numpy
numpy_version = numpy.__version__
numpy_status['up_to_date'] = parse_version(
numpy_version) >= parse_version(numpy_min_version)
numpy_status['version'] = numpy_version
except ImportError:
numpy_status['up_to_date'] = False
numpy_status['version'] = ""
return numpy_status
def setup_package():
metadata = dict(name=DISTNAME,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
license=LICENSE,
url=URL,
version=VERSION,
download_url=DOWNLOAD_URL,
long_description=LONG_DESCRIPTION,
classifiers=['Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved',
'Programming Language :: C',
'Programming Language :: Python',
'Topic :: Software Development',
'Topic :: Scientific/Engineering',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
cmdclass=cmdclass,
**extra_setuptools_args)
if (len(sys.argv) >= 2
and ('--help' in sys.argv[1:] or sys.argv[1]
in ('--help-commands', 'egg_info', '--version', 'clean'))):
# For these actions, NumPy is not required.
#
# They are required to succeed without Numpy for example when
# pip is used to install Scikit-learn when Numpy is not yet present in
# the system.
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
metadata['version'] = VERSION
else:
numpy_status = get_numpy_status()
numpy_req_str = "scikit-learn requires NumPy >= {0}.\n".format(
numpy_min_version)
scipy_status = get_scipy_status()
scipy_req_str = "scikit-learn requires SciPy >= {0}.\n".format(
scipy_min_version)
instructions = ("Installation instructions are available on the "
"scikit-learn website: "
"http://scikit-learn.org/stable/install.html\n")
if numpy_status['up_to_date'] is False:
if numpy_status['version']:
raise ImportError("Your installation of Numerical Python "
"(NumPy) {0} is out-of-date.\n{1}{2}"
.format(numpy_status['version'],
numpy_req_str, instructions))
else:
raise ImportError("Numerical Python (NumPy) is not "
"installed.\n{0}{1}"
.format(numpy_req_str, instructions))
if scipy_status['up_to_date'] is False:
if scipy_status['version']:
raise ImportError("Your installation of Scientific Python "
"(SciPy) {0} is out-of-date.\n{1}{2}"
.format(scipy_status['version'],
scipy_req_str, instructions))
else:
raise ImportError("Scientific Python (SciPy) is not "
"installed.\n{0}{1}"
.format(scipy_req_str, instructions))
from numpy.distutils.core import setup
metadata['configuration'] = configuration
setup(**metadata)
if __name__ == "__main__":
setup_package()
| bsd-3-clause |
springer-math/Mathematics-of-Epidemics-on-Networks | setup.py | 1 | 1049 | #!/usr/bin/env python
r'''
Setup script for EoN (Epidemics on Networks)
to install from this script, run
python setup.py install
Alternately, you can install with pip.
pip install EoN
If this is a "release candidate" (has an "rc" in the version name below), then
pip will download the previous version - see the download_url below.
'''
from setuptools import setup
setup(name='EoN',
packages = ['EoN'],
version='1.2rc1', #http://semver.org/
description = 'Epidemics on Networks',
author = 'Joel C. Miller, Istvan Z. Kiss, and Peter Simon',
author_email = '[email protected]',
url = 'https://springer-math.github.io/Mathematics-of-Epidemics-on-Networks/',
#download_url = 'https://github.com/springer-math/Mathematics-of-Epidemics-on-Networks/archive/1.1.tar.gz',
keywords = ['Epidemics on Networks', 'Epidemic Sonnet Works'],
install_requires = [
'networkx>=2',
'numpy',
'scipy',
'matplotlib'
],
)
| mit |
mjgrav2001/scikit-learn | sklearn/linear_model/coordinate_descent.py | 37 | 74167 | # Author: Alexandre Gramfort <[email protected]>
# Fabian Pedregosa <[email protected]>
# Olivier Grisel <[email protected]>
# Gael Varoquaux <[email protected]>
#
# License: BSD 3 clause
import sys
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import sparse
from .base import LinearModel, _pre_fit
from ..base import RegressorMixin
from .base import center_data, sparse_center_data
from ..utils import check_array, check_X_y, deprecated
from ..utils.validation import check_random_state
from ..cross_validation import check_cv
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..externals.six.moves import xrange
from ..utils.extmath import safe_sparse_dot
from ..utils.validation import check_is_fitted
from ..utils.validation import column_or_1d
from ..utils import ConvergenceWarning
from . import cd_fast
###############################################################################
# Paths functions
def _alpha_grid(X, y, Xy=None, l1_ratio=1.0, fit_intercept=True,
eps=1e-3, n_alphas=100, normalize=False, copy_X=True):
""" Compute the grid of alpha values for elastic net parameter search
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication
y : ndarray, shape (n_samples,)
Target values
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed.
l1_ratio : float
The elastic net mixing parameter, with ``0 <= l1_ratio <= 1``.
For ``l1_ratio = 0`` the penalty is an L2 penalty. ``For
l1_ratio = 1`` it is an L1 penalty. For ``0 < l1_ratio <
1``, the penalty is a combination of L1 and L2.
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
fit_intercept : boolean, default True
Whether to fit an intercept or not
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
"""
n_samples = len(y)
sparse_center = False
if Xy is None:
X_sparse = sparse.isspmatrix(X)
sparse_center = X_sparse and (fit_intercept or normalize)
X = check_array(X, 'csc',
copy=(copy_X and fit_intercept and not X_sparse))
if not X_sparse:
# X can be touched inplace thanks to the above line
X, y, _, _, _ = center_data(X, y, fit_intercept,
normalize, copy=False)
Xy = safe_sparse_dot(X.T, y, dense_output=True)
if sparse_center:
# Workaround to find alpha_max for sparse matrices.
# since we should not destroy the sparsity of such matrices.
_, _, X_mean, _, X_std = sparse_center_data(X, y, fit_intercept,
normalize)
mean_dot = X_mean * np.sum(y)
if Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
if sparse_center:
if fit_intercept:
Xy -= mean_dot[:, np.newaxis]
if normalize:
Xy /= X_std[:, np.newaxis]
alpha_max = (np.sqrt(np.sum(Xy ** 2, axis=1)).max() /
(n_samples * l1_ratio))
if alpha_max <= np.finfo(float).resolution:
alphas = np.empty(n_alphas)
alphas.fill(np.finfo(float).resolution)
return alphas
return np.logspace(np.log10(alpha_max * eps), np.log10(alpha_max),
num=n_alphas)[::-1]
def lasso_path(X, y, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, copy_X=True, coef_init=None,
verbose=False, return_n_iter=False, positive=False, **params):
"""Compute Lasso path with coordinate descent
The Lasso optimization function varies for mono and multi-outputs.
For mono-output tasks it is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
For multi-output tasks it is::
(1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication. If ``y`` is mono-output then ``X``
can be sparse.
y : ndarray, shape (n_samples,), or (n_samples, n_outputs)
Target values
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
alphas : ndarray, optional
List of alphas where to compute the models.
If ``None`` alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
coef_init : array, shape (n_features, ) | None
The initial values of the coefficients.
verbose : bool or integer
Amount of verbosity.
params : kwargs
keyword arguments passed to the coordinate descent solver.
positive : bool, default False
If set to True, forces coefficients to be positive.
return_n_iter : bool
whether to return the number of iterations or not.
Returns
-------
alphas : array, shape (n_alphas,)
The alphas along the path where models are computed.
coefs : array, shape (n_features, n_alphas) or \
(n_outputs, n_features, n_alphas)
Coefficients along the path.
dual_gaps : array, shape (n_alphas,)
The dual gaps at the end of the optimization for each alpha.
n_iters : array-like, shape (n_alphas,)
The number of iterations taken by the coordinate descent optimizer to
reach the specified tolerance for each alpha.
Notes
-----
See examples/linear_model/plot_lasso_coordinate_descent_path.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
Note that in certain cases, the Lars solver may be significantly
faster to implement this functionality. In particular, linear
interpolation can be used to retrieve model coefficients between the
values output by lars_path
Examples
---------
Comparing lasso_path and lars_path with interpolation:
>>> X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T
>>> y = np.array([1, 2, 3.1])
>>> # Use lasso_path to compute a coefficient path
>>> _, coef_path, _ = lasso_path(X, y, alphas=[5., 1., .5])
>>> print(coef_path)
[[ 0. 0. 0.46874778]
[ 0.2159048 0.4425765 0.23689075]]
>>> # Now use lars_path and 1D linear interpolation to compute the
>>> # same path
>>> from sklearn.linear_model import lars_path
>>> alphas, active, coef_path_lars = lars_path(X, y, method='lasso')
>>> from scipy import interpolate
>>> coef_path_continuous = interpolate.interp1d(alphas[::-1],
... coef_path_lars[:, ::-1])
>>> print(coef_path_continuous([5., 1., .5]))
[[ 0. 0. 0.46915237]
[ 0.2159048 0.4425765 0.23668876]]
See also
--------
lars_path
Lasso
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
"""
return enet_path(X, y, l1_ratio=1., eps=eps, n_alphas=n_alphas,
alphas=alphas, precompute=precompute, Xy=Xy,
copy_X=copy_X, coef_init=coef_init, verbose=verbose,
positive=positive, **params)
def enet_path(X, y, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, copy_X=True, coef_init=None,
verbose=False, return_n_iter=False, positive=False, **params):
"""Compute elastic net path with coordinate descent
The elastic net optimization function varies for mono and multi-outputs.
For mono-output tasks it is::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
For multi-output tasks it is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication. If ``y`` is mono-output then ``X``
can be sparse.
y : ndarray, shape (n_samples,) or (n_samples, n_outputs)
Target values
l1_ratio : float, optional
float between 0 and 1 passed to elastic net (scaling between
l1 and l2 penalties). ``l1_ratio=1`` corresponds to the Lasso
eps : float
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
alphas : ndarray, optional
List of alphas where to compute the models.
If None alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
coef_init : array, shape (n_features, ) | None
The initial values of the coefficients.
verbose : bool or integer
Amount of verbosity.
params : kwargs
keyword arguments passed to the coordinate descent solver.
return_n_iter : bool
whether to return the number of iterations or not.
positive : bool, default False
If set to True, forces coefficients to be positive.
Returns
-------
alphas : array, shape (n_alphas,)
The alphas along the path where models are computed.
coefs : array, shape (n_features, n_alphas) or \
(n_outputs, n_features, n_alphas)
Coefficients along the path.
dual_gaps : array, shape (n_alphas,)
The dual gaps at the end of the optimization for each alpha.
n_iters : array-like, shape (n_alphas,)
The number of iterations taken by the coordinate descent optimizer to
reach the specified tolerance for each alpha.
(Is returned when ``return_n_iter`` is set to True).
Notes
-----
See examples/plot_lasso_coordinate_descent_path.py for an example.
See also
--------
MultiTaskElasticNet
MultiTaskElasticNetCV
ElasticNet
ElasticNetCV
"""
X = check_array(X, 'csc', dtype=np.float64, order='F', copy=copy_X)
y = check_array(y, 'csc', dtype=np.float64, order='F', copy=False, ensure_2d=False)
if Xy is not None:
Xy = check_array(Xy, 'csc', dtype=np.float64, order='F', copy=False,
ensure_2d=False)
n_samples, n_features = X.shape
multi_output = False
if y.ndim != 1:
multi_output = True
_, n_outputs = y.shape
# MultiTaskElasticNet does not support sparse matrices
if not multi_output and sparse.isspmatrix(X):
if 'X_mean' in params:
# As sparse matrices are not actually centered we need this
# to be passed to the CD solver.
X_sparse_scaling = params['X_mean'] / params['X_std']
else:
X_sparse_scaling = np.zeros(n_features)
# X should be normalized and fit already.
X, y, X_mean, y_mean, X_std, precompute, Xy = \
_pre_fit(X, y, Xy, precompute, normalize=False, fit_intercept=False,
copy=False)
if alphas is None:
# No need to normalize of fit_intercept: it has been done
# above
alphas = _alpha_grid(X, y, Xy=Xy, l1_ratio=l1_ratio,
fit_intercept=False, eps=eps, n_alphas=n_alphas,
normalize=False, copy_X=False)
else:
alphas = np.sort(alphas)[::-1] # make sure alphas are properly ordered
n_alphas = len(alphas)
tol = params.get('tol', 1e-4)
max_iter = params.get('max_iter', 1000)
dual_gaps = np.empty(n_alphas)
n_iters = []
rng = check_random_state(params.get('random_state', None))
selection = params.get('selection', 'cyclic')
if selection not in ['random', 'cyclic']:
raise ValueError("selection should be either random or cyclic.")
random = (selection == 'random')
if not multi_output:
coefs = np.empty((n_features, n_alphas), dtype=np.float64)
else:
coefs = np.empty((n_outputs, n_features, n_alphas),
dtype=np.float64)
if coef_init is None:
coef_ = np.asfortranarray(np.zeros(coefs.shape[:-1]))
else:
coef_ = np.asfortranarray(coef_init)
for i, alpha in enumerate(alphas):
l1_reg = alpha * l1_ratio * n_samples
l2_reg = alpha * (1.0 - l1_ratio) * n_samples
if not multi_output and sparse.isspmatrix(X):
model = cd_fast.sparse_enet_coordinate_descent(
coef_, l1_reg, l2_reg, X.data, X.indices,
X.indptr, y, X_sparse_scaling,
max_iter, tol, rng, random, positive)
elif multi_output:
model = cd_fast.enet_coordinate_descent_multi_task(
coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random)
elif isinstance(precompute, np.ndarray):
precompute = check_array(precompute, 'csc', dtype=np.float64, order='F')
model = cd_fast.enet_coordinate_descent_gram(
coef_, l1_reg, l2_reg, precompute, Xy, y, max_iter,
tol, rng, random, positive)
elif precompute is False:
model = cd_fast.enet_coordinate_descent(
coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random,
positive)
else:
raise ValueError("Precompute should be one of True, False, "
"'auto' or array-like")
coef_, dual_gap_, eps_, n_iter_ = model
coefs[..., i] = coef_
dual_gaps[i] = dual_gap_
n_iters.append(n_iter_)
if dual_gap_ > eps_:
warnings.warn('Objective did not converge.' +
' You might want' +
' to increase the number of iterations',
ConvergenceWarning)
if verbose:
if verbose > 2:
print(model)
elif verbose > 1:
print('Path: %03i out of %03i' % (i, n_alphas))
else:
sys.stderr.write('.')
if return_n_iter:
return alphas, coefs, dual_gaps, n_iters
return alphas, coefs, dual_gaps
###############################################################################
# ElasticNet model
class ElasticNet(LinearModel, RegressorMixin):
"""Linear regression with combined L1 and L2 priors as regularizer.
Minimizes the objective function::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
where::
alpha = a + b and l1_ratio = a / (a + b)
The parameter l1_ratio corresponds to alpha in the glmnet R package while
alpha corresponds to the lambda parameter in glmnet. Specifically, l1_ratio
= 1 is the lasso penalty. Currently, l1_ratio <= 0.01 is not reliable,
unless you supply your own sequence of alpha.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
alpha : float
Constant that multiplies the penalty terms. Defaults to 1.0
See the notes for the exact mathematical meaning of this
parameter.
``alpha = 0`` is equivalent to an ordinary least square, solved
by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` with the Lasso object is not advised
and you should prefer the LinearRegression object.
l1_ratio : float
The ElasticNet mixing parameter, with ``0 <= l1_ratio <= 1``. For
``l1_ratio = 0`` the penalty is an L2 penalty. ``For l1_ratio = 1`` it
is an L1 penalty. For ``0 < l1_ratio < 1``, the penalty is a
combination of L1 and L2.
fit_intercept : bool
Whether the intercept should be estimated or not. If ``False``, the
data is assumed to be already centered.
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument. For sparse input
this option is always ``True`` to preserve sparsity.
WARNING : The ``'auto'`` option is deprecated and will
be removed in 0.18.
max_iter : int, optional
The maximum number of iterations
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
sparse_coef_ : scipy.sparse matrix, shape (n_features, 1) | \
(n_targets, n_features)
``sparse_coef_`` is a readonly property derived from ``coef_``
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
n_iter_ : array-like, shape (n_targets,)
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Notes
-----
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
See also
--------
SGDRegressor: implements elastic net regression with incremental training.
SGDClassifier: implements logistic regression with elastic net penalty
(``SGDClassifier(loss="log", penalty="elasticnet")``).
"""
path = staticmethod(enet_path)
def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True,
normalize=False, precompute=False, max_iter=1000,
copy_X=True, tol=1e-4, warm_start=False, positive=False,
random_state=None, selection='cyclic'):
self.alpha = alpha
self.l1_ratio = l1_ratio
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.positive = positive
self.intercept_ = 0.0
self.random_state = random_state
self.selection = selection
def fit(self, X, y):
"""Fit model with coordinate descent.
Parameters
-----------
X : ndarray or scipy.sparse matrix, (n_samples, n_features)
Data
y : ndarray, shape (n_samples,) or (n_samples, n_targets)
Target
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
if self.alpha == 0:
warnings.warn("With alpha=0, this algorithm does not converge "
"well. You are advised to use the LinearRegression "
"estimator", stacklevel=2)
if self.precompute == 'auto':
warnings.warn("Setting precompute to 'auto', was found to be "
"slower even when n_samples > n_features. Hence "
"it will be removed in 0.18.",
DeprecationWarning, stacklevel=2)
X, y = check_X_y(X, y, accept_sparse='csc', dtype=np.float64,
order='F', copy=self.copy_X and self.fit_intercept,
multi_output=True, y_numeric=True)
X, y, X_mean, y_mean, X_std, precompute, Xy = \
_pre_fit(X, y, None, self.precompute, self.normalize,
self.fit_intercept, copy=True)
if y.ndim == 1:
y = y[:, np.newaxis]
if Xy is not None and Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
n_samples, n_features = X.shape
n_targets = y.shape[1]
if self.selection not in ['cyclic', 'random']:
raise ValueError("selection should be either random or cyclic.")
if not self.warm_start or self.coef_ is None:
coef_ = np.zeros((n_targets, n_features), dtype=np.float64,
order='F')
else:
coef_ = self.coef_
if coef_.ndim == 1:
coef_ = coef_[np.newaxis, :]
dual_gaps_ = np.zeros(n_targets, dtype=np.float64)
self.n_iter_ = []
for k in xrange(n_targets):
if Xy is not None:
this_Xy = Xy[:, k]
else:
this_Xy = None
_, this_coef, this_dual_gap, this_iter = \
self.path(X, y[:, k],
l1_ratio=self.l1_ratio, eps=None,
n_alphas=None, alphas=[self.alpha],
precompute=precompute, Xy=this_Xy,
fit_intercept=False, normalize=False, copy_X=True,
verbose=False, tol=self.tol, positive=self.positive,
X_mean=X_mean, X_std=X_std, return_n_iter=True,
coef_init=coef_[k], max_iter=self.max_iter,
random_state=self.random_state,
selection=self.selection)
coef_[k] = this_coef[:, 0]
dual_gaps_[k] = this_dual_gap[0]
self.n_iter_.append(this_iter[0])
if n_targets == 1:
self.n_iter_ = self.n_iter_[0]
self.coef_, self.dual_gap_ = map(np.squeeze, [coef_, dual_gaps_])
self._set_intercept(X_mean, y_mean, X_std)
# return self for chaining fit and predict calls
return self
@property
def sparse_coef_(self):
""" sparse representation of the fitted coef """
return sparse.csr_matrix(self.coef_)
@deprecated(" and will be removed in 0.19")
def decision_function(self, X):
"""Decision function of the linear model
Parameters
----------
X : numpy array or scipy.sparse matrix of shape (n_samples, n_features)
Returns
-------
T : array, shape (n_samples,)
The predicted decision function
"""
return self._decision_function(X)
def _decision_function(self, X):
"""Decision function of the linear model
Parameters
----------
X : numpy array or scipy.sparse matrix of shape (n_samples, n_features)
Returns
-------
T : array, shape (n_samples,)
The predicted decision function
"""
check_is_fitted(self, 'n_iter_')
if sparse.isspmatrix(X):
return np.ravel(safe_sparse_dot(self.coef_, X.T, dense_output=True)
+ self.intercept_)
else:
return super(ElasticNet, self)._decision_function(X)
###############################################################################
# Lasso model
class Lasso(ElasticNet):
"""Linear Model trained with L1 prior as regularizer (aka the Lasso)
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Technically the Lasso model is optimizing the same objective function as
the Elastic Net with ``l1_ratio=1.0`` (no L2 penalty).
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1 term. Defaults to 1.0.
``alpha = 0`` is equivalent to an ordinary least square, solved
by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` is with the Lasso object is not advised
and you should prefer the LinearRegression object.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument. For sparse input
this option is always ``True`` to preserve sparsity.
WARNING : The ``'auto'`` option is deprecated and will
be removed in 0.18.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
sparse_coef_ : scipy.sparse matrix, shape (n_features, 1) | \
(n_targets, n_features)
``sparse_coef_`` is a readonly property derived from ``coef_``
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
n_iter_ : int | array-like, shape (n_targets,)
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.Lasso(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
Lasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000,
normalize=False, positive=False, precompute=False, random_state=None,
selection='cyclic', tol=0.0001, warm_start=False)
>>> print(clf.coef_)
[ 0.85 0. ]
>>> print(clf.intercept_)
0.15
See also
--------
lars_path
lasso_path
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(enet_path)
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
precompute=False, copy_X=True, max_iter=1000,
tol=1e-4, warm_start=False, positive=False,
random_state=None, selection='cyclic'):
super(Lasso, self).__init__(
alpha=alpha, l1_ratio=1.0, fit_intercept=fit_intercept,
normalize=normalize, precompute=precompute, copy_X=copy_X,
max_iter=max_iter, tol=tol, warm_start=warm_start,
positive=positive, random_state=random_state,
selection=selection)
###############################################################################
# Functions for CV with paths functions
def _path_residuals(X, y, train, test, path, path_params, alphas=None,
l1_ratio=1, X_order=None, dtype=None):
"""Returns the MSE for the models computed by 'path'
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values
train : list of indices
The indices of the train set
test : list of indices
The indices of the test set
path : callable
function returning a list of models on the path. See
enet_path for an example of signature
path_params : dictionary
Parameters passed to the path function
alphas : array-like, optional
Array of float that is used for cross-validation. If not
provided, computed using 'path'
l1_ratio : float, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). For ``l1_ratio = 0`` the penalty is an
L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty. For ``0
< l1_ratio < 1``, the penalty is a combination of L1 and L2
X_order : {'F', 'C', or None}, optional
The order of the arrays expected by the path function to
avoid memory copies
dtype : a numpy dtype or None
The dtype of the arrays expected by the path function to
avoid memory copies
"""
X_train = X[train]
y_train = y[train]
X_test = X[test]
y_test = y[test]
fit_intercept = path_params['fit_intercept']
normalize = path_params['normalize']
if y.ndim == 1:
precompute = path_params['precompute']
else:
# No Gram variant of multi-task exists right now.
# Fall back to default enet_multitask
precompute = False
X_train, y_train, X_mean, y_mean, X_std, precompute, Xy = \
_pre_fit(X_train, y_train, None, precompute, normalize, fit_intercept,
copy=False)
path_params = path_params.copy()
path_params['Xy'] = Xy
path_params['X_mean'] = X_mean
path_params['X_std'] = X_std
path_params['precompute'] = precompute
path_params['copy_X'] = False
path_params['alphas'] = alphas
if 'l1_ratio' in path_params:
path_params['l1_ratio'] = l1_ratio
# Do the ordering and type casting here, as if it is done in the path,
# X is copied and a reference is kept here
X_train = check_array(X_train, 'csc', dtype=dtype, order=X_order)
alphas, coefs, _ = path(X_train, y_train, **path_params)
del X_train, y_train
if y.ndim == 1:
# Doing this so that it becomes coherent with multioutput.
coefs = coefs[np.newaxis, :, :]
y_mean = np.atleast_1d(y_mean)
y_test = y_test[:, np.newaxis]
if normalize:
nonzeros = np.flatnonzero(X_std)
coefs[:, nonzeros] /= X_std[nonzeros][:, np.newaxis]
intercepts = y_mean[:, np.newaxis] - np.dot(X_mean, coefs)
if sparse.issparse(X_test):
n_order, n_features, n_alphas = coefs.shape
# Work around for sparse matices since coefs is a 3-D numpy array.
coefs_feature_major = np.rollaxis(coefs, 1)
feature_2d = np.reshape(coefs_feature_major, (n_features, -1))
X_test_coefs = safe_sparse_dot(X_test, feature_2d)
X_test_coefs = X_test_coefs.reshape(X_test.shape[0], n_order, -1)
else:
X_test_coefs = safe_sparse_dot(X_test, coefs)
residues = X_test_coefs - y_test[:, :, np.newaxis]
residues += intercepts
this_mses = ((residues ** 2).mean(axis=0)).mean(axis=0)
return this_mses
class LinearModelCV(six.with_metaclass(ABCMeta, LinearModel)):
"""Base class for iterative model fitting along a regularization path"""
@abstractmethod
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000, tol=1e-4,
copy_X=True, cv=None, verbose=False, n_jobs=1,
positive=False, random_state=None, selection='cyclic'):
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.copy_X = copy_X
self.cv = cv
self.verbose = verbose
self.n_jobs = n_jobs
self.positive = positive
self.random_state = random_state
self.selection = selection
def fit(self, X, y):
"""Fit linear model with coordinate descent
Fit is on grid of alphas and best alpha estimated by cross-validation.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
Training data. Pass directly as float64, Fortran-contiguous data
to avoid unnecessary memory duplication. If y is mono-output,
X can be sparse.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values
"""
y = np.asarray(y, dtype=np.float64)
if y.shape[0] == 0:
raise ValueError("y has 0 samples: %r" % y)
if hasattr(self, 'l1_ratio'):
model_str = 'ElasticNet'
else:
model_str = 'Lasso'
if isinstance(self, ElasticNetCV) or isinstance(self, LassoCV):
if model_str == 'ElasticNet':
model = ElasticNet()
else:
model = Lasso()
if y.ndim > 1 and y.shape[1] > 1:
raise ValueError("For multi-task outputs, use "
"MultiTask%sCV" % (model_str))
y = column_or_1d(y, warn=True)
else:
if sparse.isspmatrix(X):
raise TypeError("X should be dense but a sparse matrix was"
"passed")
elif y.ndim == 1:
raise ValueError("For mono-task outputs, use "
"%sCV" % (model_str))
if model_str == 'ElasticNet':
model = MultiTaskElasticNet()
else:
model = MultiTaskLasso()
if self.selection not in ["random", "cyclic"]:
raise ValueError("selection should be either random or cyclic.")
# This makes sure that there is no duplication in memory.
# Dealing right with copy_X is important in the following:
# Multiple functions touch X and subsamples of X and can induce a
# lot of duplication of memory
copy_X = self.copy_X and self.fit_intercept
if isinstance(X, np.ndarray) or sparse.isspmatrix(X):
# Keep a reference to X
reference_to_old_X = X
# Let us not impose fortran ordering or float64 so far: it is
# not useful for the cross-validation loop and will be done
# by the model fitting itself
X = check_array(X, 'csc', copy=False)
if sparse.isspmatrix(X):
if not np.may_share_memory(reference_to_old_X.data, X.data):
# X is a sparse matrix and has been copied
copy_X = False
elif not np.may_share_memory(reference_to_old_X, X):
# X has been copied
copy_X = False
del reference_to_old_X
else:
X = check_array(X, 'csc', dtype=np.float64, order='F', copy=copy_X)
copy_X = False
if X.shape[0] != y.shape[0]:
raise ValueError("X and y have inconsistent dimensions (%d != %d)"
% (X.shape[0], y.shape[0]))
# All LinearModelCV parameters except 'cv' are acceptable
path_params = self.get_params()
if 'l1_ratio' in path_params:
l1_ratios = np.atleast_1d(path_params['l1_ratio'])
# For the first path, we need to set l1_ratio
path_params['l1_ratio'] = l1_ratios[0]
else:
l1_ratios = [1, ]
path_params.pop('cv', None)
path_params.pop('n_jobs', None)
alphas = self.alphas
n_l1_ratio = len(l1_ratios)
if alphas is None:
alphas = []
for l1_ratio in l1_ratios:
alphas.append(_alpha_grid(
X, y, l1_ratio=l1_ratio,
fit_intercept=self.fit_intercept,
eps=self.eps, n_alphas=self.n_alphas,
normalize=self.normalize,
copy_X=self.copy_X))
else:
# Making sure alphas is properly ordered.
alphas = np.tile(np.sort(alphas)[::-1], (n_l1_ratio, 1))
# We want n_alphas to be the number of alphas used for each l1_ratio.
n_alphas = len(alphas[0])
path_params.update({'n_alphas': n_alphas})
path_params['copy_X'] = copy_X
# We are not computing in parallel, we can modify X
# inplace in the folds
if not (self.n_jobs == 1 or self.n_jobs is None):
path_params['copy_X'] = False
# init cross-validation generator
cv = check_cv(self.cv, X)
# Compute path for all folds and compute MSE to get the best alpha
folds = list(cv)
best_mse = np.inf
# We do a double for loop folded in one, in order to be able to
# iterate in parallel on l1_ratio and folds
jobs = (delayed(_path_residuals)(X, y, train, test, self.path,
path_params, alphas=this_alphas,
l1_ratio=this_l1_ratio, X_order='F',
dtype=np.float64)
for this_l1_ratio, this_alphas in zip(l1_ratios, alphas)
for train, test in folds)
mse_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(jobs)
mse_paths = np.reshape(mse_paths, (n_l1_ratio, len(folds), -1))
mean_mse = np.mean(mse_paths, axis=1)
self.mse_path_ = np.squeeze(np.rollaxis(mse_paths, 2, 1))
for l1_ratio, l1_alphas, mse_alphas in zip(l1_ratios, alphas,
mean_mse):
i_best_alpha = np.argmin(mse_alphas)
this_best_mse = mse_alphas[i_best_alpha]
if this_best_mse < best_mse:
best_alpha = l1_alphas[i_best_alpha]
best_l1_ratio = l1_ratio
best_mse = this_best_mse
self.l1_ratio_ = best_l1_ratio
self.alpha_ = best_alpha
if self.alphas is None:
self.alphas_ = np.asarray(alphas)
if n_l1_ratio == 1:
self.alphas_ = self.alphas_[0]
# Remove duplicate alphas in case alphas is provided.
else:
self.alphas_ = np.asarray(alphas[0])
# Refit the model with the parameters selected
common_params = dict((name, value)
for name, value in self.get_params().items()
if name in model.get_params())
model.set_params(**common_params)
model.alpha = best_alpha
model.l1_ratio = best_l1_ratio
model.copy_X = copy_X
model.precompute = False
model.fit(X, y)
if not hasattr(self, 'l1_ratio'):
del self.l1_ratio_
self.coef_ = model.coef_
self.intercept_ = model.intercept_
self.dual_gap_ = model.dual_gap_
self.n_iter_ = model.n_iter_
return self
class LassoCV(LinearModelCV, RegressorMixin):
"""Lasso linear model with iterative fitting along a regularization path
The best model is selected by cross-validation.
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, optional
Number of alphas along the regularization path
alphas : numpy array, optional
List of alphas where to compute the models.
If ``None`` alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see the
:mod:`sklearn.cross_validation` module for the list of possible
objects.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs.
positive : bool, optional
If positive, restrict regression coefficients to be positive
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
fit_intercept : boolean, default True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
Attributes
----------
alpha_ : float
The amount of penalization chosen by cross validation
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
mse_path_ : array, shape (n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,)
The grid of alphas used for fitting
dual_gap_ : ndarray, shape ()
The dual gap at the end of the optimization for the optimal alpha
(``alpha_``).
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Notes
-----
See examples/linear_model/lasso_path_with_crossvalidation.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
See also
--------
lars_path
lasso_path
LassoLars
Lasso
LassoLarsCV
"""
path = staticmethod(lasso_path)
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000, tol=1e-4,
copy_X=True, cv=None, verbose=False, n_jobs=1,
positive=False, random_state=None, selection='cyclic'):
super(LassoCV, self).__init__(
eps=eps, n_alphas=n_alphas, alphas=alphas,
fit_intercept=fit_intercept, normalize=normalize,
precompute=precompute, max_iter=max_iter, tol=tol, copy_X=copy_X,
cv=cv, verbose=verbose, n_jobs=n_jobs, positive=positive,
random_state=random_state, selection=selection)
class ElasticNetCV(LinearModelCV, RegressorMixin):
"""Elastic Net model with iterative fitting along a regularization path
The best model is selected by cross-validation.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
l1_ratio : float, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). For ``l1_ratio = 0``
the penalty is an L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1 and L2
This parameter can be a list, in which case the different
values are tested by cross-validation and the one giving the best
prediction score is used. Note that a good choice of list of
values for l1_ratio is often to put more values close to 1
(i.e. Lasso) and less close to 0 (i.e. Ridge), as in ``[.1, .5, .7,
.9, .95, .99, 1]``
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, optional
Number of alphas along the regularization path, used for each l1_ratio.
alphas : numpy array, optional
List of alphas where to compute the models.
If None alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see the
:mod:`sklearn.cross_validation` module for the list of possible
objects.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
Attributes
----------
alpha_ : float
The amount of penalization chosen by cross validation
l1_ratio_ : float
The compromise between l1 and l2 penalization chosen by
cross validation
coef_ : array, shape (n_features,) | (n_targets, n_features)
Parameter vector (w in the cost function formula),
intercept_ : float | array, shape (n_targets, n_features)
Independent term in the decision function.
mse_path_ : array, shape (n_l1_ratio, n_alpha, n_folds)
Mean square error for the test set on each fold, varying l1_ratio and
alpha.
alphas_ : numpy array, shape (n_alphas,) or (n_l1_ratio, n_alphas)
The grid of alphas used for fitting, for each l1_ratio.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Notes
-----
See examples/linear_model/lasso_path_with_crossvalidation.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
The parameter l1_ratio corresponds to alpha in the glmnet R package
while alpha corresponds to the lambda parameter in glmnet.
More specifically, the optimization objective is::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
for::
alpha = a + b and l1_ratio = a / (a + b).
See also
--------
enet_path
ElasticNet
"""
path = staticmethod(enet_path)
def __init__(self, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
fit_intercept=True, normalize=False, precompute='auto',
max_iter=1000, tol=1e-4, cv=None, copy_X=True,
verbose=0, n_jobs=1, positive=False, random_state=None,
selection='cyclic'):
self.l1_ratio = l1_ratio
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.cv = cv
self.copy_X = copy_X
self.verbose = verbose
self.n_jobs = n_jobs
self.positive = positive
self.random_state = random_state
self.selection = selection
###############################################################################
# Multi Task ElasticNet and Lasso models (with joint feature selection)
class MultiTaskElasticNet(Lasso):
"""Multi-task ElasticNet model trained with L1/L2 mixed-norm as regularizer
The optimization objective for MultiTaskElasticNet is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1/L2 term. Defaults to 1.0
l1_ratio : float
The ElasticNet mixing parameter, with 0 < l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L1/L2 penalty. For l1_ratio = 1 it
is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula). If a 1D y is \
passed in at fit (non multi-task usage), ``coef_`` is then a 1D array
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskElasticNet(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]])
... #doctest: +NORMALIZE_WHITESPACE
MultiTaskElasticNet(alpha=0.1, copy_X=True, fit_intercept=True,
l1_ratio=0.5, max_iter=1000, normalize=False, random_state=None,
selection='cyclic', tol=0.0001, warm_start=False)
>>> print(clf.coef_)
[[ 0.45663524 0.45612256]
[ 0.45663524 0.45612256]]
>>> print(clf.intercept_)
[ 0.0872422 0.0872422]
See also
--------
ElasticNet, MultiTaskLasso
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True,
normalize=False, copy_X=True, max_iter=1000, tol=1e-4,
warm_start=False, random_state=None, selection='cyclic'):
self.l1_ratio = l1_ratio
self.alpha = alpha
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.random_state = random_state
self.selection = selection
def fit(self, X, y):
"""Fit MultiTaskLasso model with coordinate descent
Parameters
-----------
X : ndarray, shape (n_samples, n_features)
Data
y : ndarray, shape (n_samples, n_tasks)
Target
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
# X and y must be of type float64
X = check_array(X, dtype=np.float64, order='F',
copy=self.copy_X and self.fit_intercept)
y = np.asarray(y, dtype=np.float64)
if hasattr(self, 'l1_ratio'):
model_str = 'ElasticNet'
else:
model_str = 'Lasso'
if y.ndim == 1:
raise ValueError("For mono-task outputs, use %s" % model_str)
n_samples, n_features = X.shape
_, n_tasks = y.shape
if n_samples != y.shape[0]:
raise ValueError("X and y have inconsistent dimensions (%d != %d)"
% (n_samples, y.shape[0]))
X, y, X_mean, y_mean, X_std = center_data(
X, y, self.fit_intercept, self.normalize, copy=False)
if not self.warm_start or self.coef_ is None:
self.coef_ = np.zeros((n_tasks, n_features), dtype=np.float64,
order='F')
l1_reg = self.alpha * self.l1_ratio * n_samples
l2_reg = self.alpha * (1.0 - self.l1_ratio) * n_samples
self.coef_ = np.asfortranarray(self.coef_) # coef contiguous in memory
if self.selection not in ['random', 'cyclic']:
raise ValueError("selection should be either random or cyclic.")
random = (self.selection == 'random')
self.coef_, self.dual_gap_, self.eps_, self.n_iter_ = \
cd_fast.enet_coordinate_descent_multi_task(
self.coef_, l1_reg, l2_reg, X, y, self.max_iter, self.tol,
check_random_state(self.random_state), random)
self._set_intercept(X_mean, y_mean, X_std)
if self.dual_gap_ > self.eps_:
warnings.warn('Objective did not converge, you might want'
' to increase the number of iterations')
# return self for chaining fit and predict calls
return self
class MultiTaskLasso(MultiTaskElasticNet):
"""Multi-task Lasso model trained with L1/L2 mixed-norm as regularizer
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of earch row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1/L2 term. Defaults to 1.0
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_tasks, n_features)
parameter vector (W in the cost function formula)
intercept_ : array, shape (n_tasks,)
independent term in decision function.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskLasso(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]])
MultiTaskLasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000,
normalize=False, random_state=None, selection='cyclic', tol=0.0001,
warm_start=False)
>>> print(clf.coef_)
[[ 0.89393398 0. ]
[ 0.89393398 0. ]]
>>> print(clf.intercept_)
[ 0.10606602 0.10606602]
See also
--------
Lasso, MultiTaskElasticNet
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=1000, tol=1e-4, warm_start=False,
random_state=None, selection='cyclic'):
self.alpha = alpha
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.l1_ratio = 1.0
self.random_state = random_state
self.selection = selection
class MultiTaskElasticNetCV(LinearModelCV, RegressorMixin):
"""Multi-task L1/L2 ElasticNet with built-in cross-validation.
The optimization objective for MultiTaskElasticNet is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
alphas : array-like, optional
List of alphas where to compute the models.
If not provided, set automatically.
n_alphas : int, optional
Number of alphas along the regularization path
l1_ratio : float or array of floats
The ElasticNet mixing parameter, with 0 < l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L1/L2 penalty. For l1_ratio = 1 it
is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see the
:mod:`sklearn.cross_validation` module for the list of possible
objects.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs. Note that this is used only if multiple values for
l1_ratio are given.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula).
alpha_ : float
The amount of penalization chosen by cross validation
mse_path_ : array, shape (n_alphas, n_folds) or \
(n_l1_ratio, n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,) or (n_l1_ratio, n_alphas)
The grid of alphas used for fitting, for each l1_ratio
l1_ratio_ : float
best l1_ratio obtained by cross-validation.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskElasticNetCV()
>>> clf.fit([[0,0], [1, 1], [2, 2]],
... [[0, 0], [1, 1], [2, 2]])
... #doctest: +NORMALIZE_WHITESPACE
MultiTaskElasticNetCV(alphas=None, copy_X=True, cv=None, eps=0.001,
fit_intercept=True, l1_ratio=0.5, max_iter=1000, n_alphas=100,
n_jobs=1, normalize=False, random_state=None, selection='cyclic',
tol=0.0001, verbose=0)
>>> print(clf.coef_)
[[ 0.52875032 0.46958558]
[ 0.52875032 0.46958558]]
>>> print(clf.intercept_)
[ 0.00166409 0.00166409]
See also
--------
MultiTaskElasticNet
ElasticNetCV
MultiTaskLassoCV
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(enet_path)
def __init__(self, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
fit_intercept=True, normalize=False,
max_iter=1000, tol=1e-4, cv=None, copy_X=True,
verbose=0, n_jobs=1, random_state=None, selection='cyclic'):
self.l1_ratio = l1_ratio
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.tol = tol
self.cv = cv
self.copy_X = copy_X
self.verbose = verbose
self.n_jobs = n_jobs
self.random_state = random_state
self.selection = selection
class MultiTaskLassoCV(LinearModelCV, RegressorMixin):
"""Multi-task L1/L2 Lasso with built-in cross-validation.
The optimization objective for MultiTaskLasso is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2 + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
alphas : array-like, optional
List of alphas where to compute the models.
If not provided, set automaticlly.
n_alphas : int, optional
Number of alphas along the regularization path
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations.
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see the
:mod:`sklearn.cross_validation` module for the list of possible
objects.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs. Note that this is used only if multiple values for
l1_ratio are given.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula).
alpha_ : float
The amount of penalization chosen by cross validation
mse_path_ : array, shape (n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,)
The grid of alphas used for fitting.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
See also
--------
MultiTaskElasticNet
ElasticNetCV
MultiTaskElasticNetCV
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(lasso_path)
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, max_iter=1000, tol=1e-4, copy_X=True,
cv=None, verbose=False, n_jobs=1, random_state=None,
selection='cyclic'):
super(MultiTaskLassoCV, self).__init__(
eps=eps, n_alphas=n_alphas, alphas=alphas,
fit_intercept=fit_intercept, normalize=normalize,
max_iter=max_iter, tol=tol, copy_X=copy_X,
cv=cv, verbose=verbose, n_jobs=n_jobs, random_state=random_state,
selection=selection)
| bsd-3-clause |
pyrrho314/recipesystem | trunk/gempy/adlibrary/extract.py | 1 | 37277 | #! /usr/bin/env python
import sys, os
import time
import numpy as np
from matplotlib import pyplot as pl
import pyfits as pf
from astrodata import AstroData, new_pyfits_version
from astrodata import Lookups
from gempy.library import gfit
from gempy.adlibrary import segmentation as seg
# Load the timestamp keyword dictionary.
timestamp_keys = Lookups.get_lookup_table("Gemini/timestamp_keywords",
"timestamp_keys")
def print_timing(func):
def wrapper(*arg,**kargs):
t1 = time.time()
res = func(*arg,**kargs)
t2 = time.time()
print '%s took %0.3fs' % (func.func_name, (t2-t1))
return res
return wrapper
def trace_footprints(ad, function='polynomial', order=2,
trace_threshold=1., debug=False):
"""
This function finds the footprints edges of spectroscopic flats, creates a
BINTABLE extension with the footprint parameters and appends it to the output
AstroData object.
:param ad: Input Astrodata object.
:param function: Name of the fitting function to use.
:type function: Default is 'polynomial'.
:param order: Degree of the polynomial.
:type order: Default is 2
:param trace_threshold: Threshold in units of sigma to applied to the
filtered image.
:type trace_threshold: Default is 1.
:param debug: For debugging purposes.
:return adoutput: Output Astrodata object containing the input ad object plus
the TRACEFP binary table.
"""
#try:
# Find edges in the image, pair them in spectrum edges.
footprints = find_footprints(ad, function=function, order=order,
trace_threshold=trace_threshold,debug=debug)
# Create a FootprintTrace object.
ft = FootprintTrace(footprints)
# Use the footprint information to prepare BINTABLE format.
# tb_adout is of type AD
tb_adout = ft.as_bintable()
# Append to the input AD object
ad.append(tb_adout)
#except:
# raise SystemError(repr(sys.exc_info()[1]))
return ad
def find_footprints(ad, function='polynomial', order=2, trace_threshold=1.,
debug=False):
"""
Function to find footprint edges in an image that has
been processed with the Sobel convolution kernel.
These are the steps to find the footprint:
1) Use the edge_detector_data() function to read the input data
and MDF metadata from the AstroData object.
2) The function in 1) returns an <instrument>_EdgeDetector
subclass object.
3) Use this class method find_edges() returning two lists of lists,
one is the list of (x,y) coordinates for all the left/bottom edges
in the image and the other is the list of all the right/top edges
in each spectrum.
::
Input
ad: AstroData object.
function: Name of the fitting function to use.
order: Degree of the polynomial.
trace_threshold:
Threshold in units of sigma to applied to the
filtered image.
Output: List of Footprint objects where each object contains:
id: Integer reference number for footprint
region: Section of the image where the footprint solution is
valid, (x1, x2, y1, y2)
edges: Tuple of Edge object (edge_1,edge_2) defining the
long edges of the footprint.
width: Average width of the footprint (calculated with the edges)
"""
# After setting metadata instantiate an EdgeDetector
# class object, returning the object as the sublclass
# instance to which the 'ad' object indicates.
edt= seg.EdgeDetector(ad)
# Get a list of lists with (x,y) for each edge.
(xylist_1,xylist_2) = edt.find_edges()
# --- Create 2 lists of Edge objects. One (edge_1)
# has the left/bottom edge of the footprints and
# edge_2 has the right/top edge of the footprints.
# get orientation in degress
orientation = [0, 90][edt.axis]
# initialize list of lef/bottom (edge_1) edges and
# right/top (edge_2) edges.
edges_1 = []
edges_2 = []
# the lists xylist_1 and _2 are list of lists:
# xylist_ = [[] for _ in range(len(reference_edges))]
k=0
for xy_1,xy_2 in zip(xylist_1,xylist_2):
low = edt.footprint_low[k]
high = edt.footprint_high[k]
# For the current footprint (has 2 edges)
# get the list of x's and y's.
xx1 = np.asarray([x for x,y in xy_1])
yy1 = np.asarray([y for x,y in xy_1])
xx2 = np.asarray([x for x,y in xy_2])
yy2 = np.asarray([y for x,y in xy_2])
# For vertical edges, the yy1,yy2 are the independent
# variable arrays. Instantiate the Edge class.
# Instantiates left and right Edge objects
ed1 = seg.Edge(xx1,yy1)
ed2 = seg.Edge(xx2,yy2)
ed1.orientation = orientation
ed2.orientation = orientation
# Set the low and high location in the dispersion
# direction. Fit edges.
set_dispersion_limits(low,high,ed1,ed2,function,order)
k += 1
# add to the current lists of edges.
edges_1.append(ed1)
edges_2.append(ed2)
# --- Now that we have both footprint edges, make a list of
# Footprint objects.
footprints = []
sn = 1
for ed1,ed2 in zip(edges_1,edges_2):
footprint = Footprint(sn, ed1, ed2)
footprints.append(footprint)
sn += 1
if debug:
_plot_footprints(edt.image, footprints)
return footprints
def set_dispersion_limits(low,high,ed1,ed2,function,order):
""" Dispersion limits (low,high) are the minimum and
maximum value in the dispersion direction. They
are calculated using the instrument parameters
like gratings and filters plus tables in
dictionaries and others. Please see footprint_len()
for instrument specific details.
If low and high are defined, set them as values in the
Edge.trace (xx,yy) members and recalculate the
fit function.
"""
# Change the function and/or order if they are not the defaults.
if (function !='polynomial') | (order != 2):
for ed1,ed2 in zip(edges_1,edges_2):
ed1.setfunction(function)
ed1.setorder(order)
ed2.setfunction(function)
ed2.setorder(order)
for ed in [ed1,ed2]:
xx,yy = ed.trace
# fit Edges
ed.fitfunction()
# Make sure we have points in the range (iy1,iy2) in dispersion
if ed.orientation == 90:
g = np.where((yy >= low) & (yy <= high))
xx,yy = (xx[g],yy[g])
# Evaluate the spatial coordinate low and high
xx[0],xx[-1] = ed.evalfunction([low,high])
yy[0],yy[-1] = (low,high)
else:
g = np.where((xx >= low) & (xx <= high))
# Evaluate the spatial coordinate low and high
xx[0],xx[-1] = (low,high)
yy[0],yy[-1] = ed.evalfunction([low,high])
# Reset the trace attribute with these update coordinates.
ed.trace = (xx,yy)
# Now that we have good extreme points, fit again to set
# xlim,ylim as well.
ed.fitfunction()
return
def _plot_footprints(image,footprints):
"""
NOTE: This for development. Not for
public release
Plot the edges in the list array self.trace
Plot the footprint edges using the fitting functions.
"""
try:
from stsci.numdisplay import display
except ImportError:
from numdisplay import display
orientation = footprints[0].edges[0].orientation
pl.clf()
med = np.median(np.where(image>0,image,0))
for k,footprint in enumerate(footprints):
edge1 = footprint.edges[0]; edge2 = footprint.edges[1]
xx1,yy1 = np.asarray(edge1.trace,int)
xx2,yy2 = np.asarray(edge2.trace,int)
# Plot (x_array,y_array)
evf1 = edge1.evalfunction
evf2 = edge2.evalfunction
if orientation == 90:
xx1 = np.asarray(evf1(yy1),int)
xx2 = np.asarray(evf2(yy2),int)
pl.plot(xx1,yy1,'b',xx2,yy2,'r')
else:
yy1 = np.asarray(evf1(xx1),int)
yy2 = np.asarray(evf2(xx2),int)
pl.plot(xx1,yy1,'b',xx2,yy2,'r')
image[yy1,xx1]=med*2
image[yy2,xx2]=med*2
display(image,frame=2,quiet=True)
def _fit_edges(edges):
"""
Fit a function to each edge.
Input:
edges: List of Edge object with (x,y) traces
returns:
edges: Same as input plus fit coefficients for each edge.
"""
for edge in edges:
# Use default edge.function, edge.order
edge.fitfunction()
class Footprint(object):
"""
Provides facilities to create a footprint each containing a pair
of edge traces.
::
Footprint attributes:
id: Integer reference number for footprint
region: Section of the image where the footprint solution is valid,
(x1, x2, y1, y2)
edges: Tuple of Edge object (edge_1,edge_2) defining the
long edges of the footprint.
width: Average width of the footprint.
"""
def __init__(self, id, edge_1,edge_2):
self.id = id
self.edges = (edge_1,edge_2)
x1 = np.min(edge_1.xlim+edge_2.xlim)
x2 = np.max(edge_1.xlim+edge_2.xlim)
y1 = np.min(edge_1.ylim+edge_2.ylim)
y2 = np.max(edge_1.ylim+edge_2.ylim)
if edge_1.orientation == 90:
# Evaluate x2 with the fitting function using the
# largest of y2 from both edges.
x2 = edge_2.evalfunction(y2)[0]
self.width = edge_2.evalfunction(y1)-edge_1.evalfunction(y1)
else:
# Evaluate y2 with the fitting function using the
# largest of x2 from both edges.
y2 = edge_2.evalfunction(x2)[0]
self.width = edge_2.evalfunction(x1)-edge_1.evalfunction(x1)
self.region = (x1, x2, y1, y2)
class FootprintTrace(object):
"""
FootprintTrace provides facilities to create a BINTABLE
extension with the input footprint list of objects.
Attributes:
footprints: Footprint object list.
Methods:
as_bintable: Creates BINTABLE
"""
def __init__(self,footprints):
self.footprints = footprints
def as_bintable(self):
"""
Creates a BINTABLE object from the
FootprintTrace object.
Input:
self.footprints: list of Footprint objects.
Output:
AD: HDU astrodata object with a TRACEFP bintable extension.
**Column discription**
::
'id' : integer reference number for footprint.
'region' : (x1,x2,y1,y2), window of pixel co-ords enclosing this
footprint. The origin of these coordinates could be
the lower left of the original image.
'range1' : (x1,x2,y1,y2), range where edge_1 is valid.
The origin of these coordinates is the lower left of the
original image.
'function1': Fit function name (default: polynomial) fitting edge_1.
'coeff1' : Arrray of coefficients, high to low order, such that
pol(x) = c1*x**2 + c2*x + c3 (for order 2).
'order1' : Order or polynomial (default: 2).
'range2' : ditto for edge_2.
'function2': ditto for edges_2
'coeff2' : ditto for edges_2
'order2' : ditto for edges_2
'cutrange1' : (x1,x2,y1,y2), range where edge_1 is valid.
The origin of these coordinates is the lower left
of the cutout region.
'cutfunction1': Fit function name (default: polynomial).
'cutcoeff1' : Arrray of coefficients, high to low order, such that
pol(x) = c1*x**2 + c2*x + c3 (for order 2)
'cutorder1' : Order or polynomial (default: 2).
'cutrange2' : ditto for edge_2
'cutfunction2': ditto for edge_2
'cutcoeff2' : ditto for edge_2
'cutorder2' : ditto for edge_2
"""
footprints = self.footprints
# Get n_coeffs'. We are assuming they are the same for all edges.
n_coeff = len(footprints[0].edges[0].coefficients)
c1 = pf.Column (name='id',format='J')
c2 = pf.Column (name='region',format='4E')
c3 = pf.Column (name='range1',format='4E')
c4 = pf.Column (name='function1',format='15A')
c5 = pf.Column (name='order1',format='J')
c6 = pf.Column (name='coeff1',format='%dE'%n_coeff)
c7 = pf.Column (name='range2',format='4E')
c8 = pf.Column (name='function2',format='15A')
c9 = pf.Column (name='order2',format='J')
c10 = pf.Column (name='coeff2',format='%dE'%n_coeff)
c11 = pf.Column (name='cutrange1',format='4E')
c12 = pf.Column (name='cutfunction1',format='15A')
c13 = pf.Column (name='cutorder1',format='J')
c14 = pf.Column (name='cutcoeff1',format='%dE'%n_coeff)
c15 = pf.Column (name='cutrange2',format='4E')
c16 = pf.Column (name='cutfunction2',format='15A')
c17 = pf.Column (name='cutorder2',format='J')
c18 = pf.Column (name='cutcoeff2',format='%dE'%n_coeff)
nrows = len(footprints)
tbhdu = pf.new_table(pf.ColDefs([c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,\
c11,c12,c13,c14,c15,c16,c17,c18]),nrows=nrows)
tb = tbhdu # an alias
# Write data to table columns
orientation = footprints[0].edges[0].orientation
for k,footprint in enumerate(footprints):
edge1 = footprint.edges[0]; edge2 = footprint.edges[1]
tb.data.field('id')[k] = footprint.id
tb.data.field('region')[k] = np.asarray(footprint.region)
# EGDE_1 DATA with respect to original image co-ords
range1 = np.asarray(edge1.xlim+edge1.ylim) # (x1, x2, y1, y2)
tb.data.field('range1')[k] = range1
tb.data.field('function1')[k] = edge1.function
tb.data.field('order1')[k] = edge1.order
tb.data.field('coeff1')[k] = edge1.coefficients
# EGDE_2 DATA with respect to original image co-ords
range2 = np.asarray(edge2.xlim+edge2.ylim) # (x1, x2, y1, y2)
tb.data.field('range2')[k] = range2
tb.data.field('function2')[k] = edge2.function
tb.data.field('order2')[k] = edge2.order
tb.data.field('coeff2')[k] = edge2.coefficients
region_x1 = footprint.region[0]
region_y1 = footprint.region[2]
# Setup the coefficient of the edge fit functions. We are
# shifting the origin; so refit
lcoeff=[]
zval=[]
for xx,yy in [edge1.trace,edge2.trace]:
# We need to refit inside the cutregion
xmr = xx - region_x1
ymr = yy - region_y1
if orientation == 0:
z = gfit.Gfit(xmr,ymr,edge1.function,edge1.order)
else:
z = gfit.Gfit(ymr,xmr,edge1.function,edge1.order)
lcoeff.append(z.coeff)
zval.append(z)
xlim1 = np.asarray(edge1.xlim)
ylim1 = np.asarray(edge1.ylim)
xlim2 = np.asarray(edge2.xlim)
ylim2 = np.asarray(edge2.ylim)
# Get the maximum values from both edges, so we can zero
# the areas outside the footprint when cutting.
#
if orientation == 0:
# Choose the largest x between both edges.
xmax = max(xlim1[1],xlim2[1])
xlim1[1] = xmax
xlim2[1] = xmax
x1,x2 = (min(0,xlim1[0]),xmax)
# And reevaluate the y values at this xmax
y1 = ylim1[0] - region_y1
y2 = zval[1](xmax)[0]
else:
# Choose the largest y between both edges
ymax = max(ylim1[1],ylim2[1])
ylim1[1] = ymax
ylim2[1] = ymax
y1,y2 = (min(0,ylim1[0]),ymax)
# And reevaluate the x values at this ymax
x1 = xlim1[0] - region_x1
x2 = zval[1](ymax)[0]
# --- Set edge_1 data with respect to cutout image co-ords.
tb.data.field('cutrange1')[k] = (x1,x2,y1,y2)
tb.data.field('cutfunction1')[k] = edge1.function
tb.data.field('cutorder1')[k] = edge1.order
tb.data.field('cutcoeff1')[k] = lcoeff[0]
# --- Set edge_2 data with respect to cutout image co-ords
# Applied offsets to range2 from footprint.region(x1,y1)
tb.data.field('cutrange2')[k] = (x1,x2,y1,y2)
tb.data.field('cutfunction2')[k] = edge2.function
tb.data.field('cutorder2')[k] = edge2.order
tb.data.field('cutcoeff2')[k] = lcoeff[1]
# Add comment to TTYPE card
hdr = tb.header
if new_pyfits_version:
hdr.update = hdr.set
hdr.update('TTYPE2',hdr['TTYPE2'],
comment='(x1,y1,x2,y2): footprint window of pixel co-ords.')
hdr.update('TTYPE3',hdr['TTYPE3'], comment='type of fitting function.')
hdr.update('TTYPE4',hdr['TTYPE4'], comment='Number of coefficients.')
hdr.update('TTYPE5',hdr['TTYPE5'],
comment='Coeff array: c[0]*x**3 + c[1]*x**2+ c[2]*x+c[3]')
hdr.update('TTYPE6',hdr['TTYPE6'],
comment='(x1,y1,x2,y2): Edge fit window definition.')
tb.header = hdr
# Create an AD object with this
tabad = AstroData(tbhdu)
tabad.rename_ext("TRACEFP", 1)
return tabad
def _plot_footprints(self):
"""
NOTE: This for development. Not for
public release
plot the edges in the list array self.trace
i: sequential number, can be edge number
Plot the footprint edges using the fitting functions.
"""
footprints = self.footprints
orientation = footprints[0].edges[0].orientation
for k,footprint in enumerate(footprints):
edge1 = footprint.edges[0]; edge2 = footprint.edges[1]
xx1,yy1 = edge1.trace
xx2,yy2 = edge2.trace
evf1 = edge1.evalfunction
evf2 = edge2.evalfunction
if orientation == 0:
pl.plot(xx1,evf1(xx1),'b',xx2,evf2(xx2),'r')
else:
pl.plot(evf1(yy1),yy1,'b',evf2(yy2),yy2,'r')
import pywcs
def cut_footprints(ad, debug=False):
""" Creates individual footprint images from the information in the 'TRACEFP'
extension in the input AD object.
It returns an AD object with a list of IMAGE extensions; each
one containing a footprint cut.
INPUT
:param ad: Astrodata object with a 'TRACEFP' bintable extension.
:type adinput: Astrodata
:param debug: Display and plot. If True it will display on DS9 the cuts
performed with the enclosed footprint. The footprints
will have the edges highlighted to show
how good the tracing was. It also shows
the cut and the edges on a plot.
OUTPUT
:return adout: AD object with a list of IMAGE extensions containing
one footprint per cut as describe in the TRACEFP bintable
extension of the input AD object.
"""
try:
# Instantiate a CutFootprints object
cl = CutFootprints(ad,debug)
# Cut rectangular regions containing a footprint with its DQ and VAR
# region if any. The adout contains as many images extension as there
# entries in the TRACEFP table.
cl.cut_regions()
adout = cl.as_astrodata()
except:
raise SystemError(repr(sys.exc_info()[1]))
return adout
class CutFootprints():
"""
CutFootprints provides facilites to build a list of footprint
sections from the input TRACEFP table in the Astrodata object.
::
Methods
-------
cut_regions
Loop through the records of the TRACEFP table creating
one tuple per iteration containing (region, data,dq,and var
cut out pixel data)
cut_out:
Cut a region enclosing a footprint.
_init_as_astrodata
Initialize parameters to be used by as_astrodata.
as_astrodata
Form an hdu and each cutout and append it to adout.
Members
-------
ad: AD object containing the extension 'TRACEFP'
created by the ULF 'trace_footprint'.
has_var,has_dq:
Booleans to tell whether the VAR or DQ image sections
are in the input AD object.
orientation:
Int. Zero value for F2 and 90 for GMOS or GNIRS.
filename,instrument:
Use for output information when debug is True.
science,var,dq:
Reference to the input SCI, VAR and DQ image data.
cut_list:
List with the tuples. Each contains region, sci_cut,
var_cut and dq_cut footprint cut pixel data.
wcs: Input header WCS from pywcs.
debug:Display and plot. If True it will display on DS9 the cuts
performed with the enclosed footprint. The footprints
will have the edges highlighted to show
how good the tracing was. It also shows
the cut and the edges on a plot.
"""
def __init__(self,ad,debug=False):
self.ad = ad
self.has_dq = ad['DQ',1] != None
self.has_var = ad['VAR',1] != None
self.debug = debug
instrument = ad.instrument()
if instrument == 'F2':
self.orientation = 90
else:
if ad['SCI',1].dispersion_axis() == 1:
self.orientation = 0
else:
self.orientation = 90
if debug:
self.filename = ad.filename
self.instrument = instrument
self.dq = None
self.var = None
self.science = self.ad['SCI',1].data
if self.has_dq:
self.dq = self.ad['DQ', 1].data
if self.has_var:
self.var = self.ad['VAR',1].data
self.cut_list = [] # Contains the list of Cut objects
self.nregions = None # The number of records in TRACEFP
def cut_regions(self):
"""
Loop through the records of the TRACEFP table creating
one tuple per iteration with region,sci,dq,and var sections.
Then it appends each tuple to a list of cuts.
"""
table = self.ad['TRACEFP'].data
self.nregions = len(table.field('id'))
if self.debug:
plot_footprint(self.ad)
for rec in table:
cut_data = self.cut_out(rec)
self.cut_list.append(cut_data)
def cut_out(self,rec):
"""
Cut a region enclosing a footprint. Each cut is defined by 'region'
and the footprint in it is defined by the edges fitting functions.
The science section is zero out between the rectangle borders
and the footprint edge. The DQ section is bitwise ORed with 1.
::
Input:
rec: TRACEFP record
Output:
Tuple with region, sci_cut, dq_cut, var_cut data
"""
# Input frames to get cut outs from
science = self.science
var = self.var
dq = self.dq
t1=time.time()
id = rec.field('id')
region = rec.field('region')
cutrange1 = rec.field('cutrange1')
cutrange2 = rec.field('cutrange2')
evf1 = set_evalfunction(rec.field('cutcoeff1'))
evf2 = set_evalfunction(rec.field('cutcoeff2'))
# This is the rectangle coordinates containing one footprint
rx1,rx2,ry1,ry2 = region
# Get data sections. We add 1 to get the last element of the range.
sci_cut = science[ry1:ry2+1,rx1:rx2+1].copy()
# Define empty DQ, VAR cut array elements, in case the AD instance does
# not have them.
dq_cut,var_cut=(None,None)
has_dq = self.has_dq
has_var = self.has_var
if has_dq:
dq_cut = dq[ry1:ry2+1,rx1:rx2+1].copy()
if has_var:
var_cut = var[ry1:ry2+1,rx1:rx2+1].copy()
# Now clear (zero out) the area of the data and dq cuts
# between the rectangle and the footprint edges.
# Make indices representing the indices of a grid.
y,x=np.indices(sci_cut.shape)
# Generate mask values for the whole rectangular cut
# except for the footprint.
if self.orientation == 90:
# Mask values between indices of the cut left side
# and the indices of the left edge.
mask_1 = x < evf1(y)
# Mask values between the right edge indices and
# the indices of the cut right side.
# index of the left edge
mask_2 = evf2(y) < x
mask = mask_1 + mask_2
else:
# bottom
mask_1 = y < evf1(x)
# top
mask_2 = evf2(x) < y
mask = mask_1 + mask_2
sci_cut[mask] = 0
if has_var: var_cut[mask] = 0
if has_dq: dq_cut[mask] = np.bitwise_or(dq_cut[mask],1)
if self.debug:
# plot and display
plot_footprint_cut(sci_cut,x,y,self.orientation,evf1,evf2,
region, self.filename,self.instrument)
return (region, sci_cut, var_cut, dq_cut)
def _init_as_astrodata(self):
"""
Initialize parameters to be used by as_astrodata.
Creates a WCS object (pywcs) from the SCI header and
form the output AD object with the PHU and MDF from
the input AD. We are adding the TRACEFP extension as well
for later use on the spectral reduction process.
Input:
self.ad: AD object.
Output:
adout: Output AD object with AD phu and MDF
"""
ad = self.ad
# Start output AD with the original phu and the MDF extension.
adout = AstroData(phu=ad.phu)
adout.append(ad['MDF'])
adout.append(ad['TRACEFP'])
# Get wcs information. It is in the PHU
try:
self.wcs = pywcs.WCS(ad.phu.header)
if not hasattr(self.wcs.wcs, 'cd'):
self.wcs = None
except: # Something wrong with WCS, set it to None
self.wcs = None
return adout
def as_astrodata(self):
"""
With each cut object in the cut_list having the SCI,DQ,VAR set,
form an hdu and append it to adout. Update keywords EXTNAME= 'SCI',
EXTVER=<footprint#>, CCDSEC, DISPAXIS, CUTSECT, CUTORDER in the header
and reset WCS information if there was a WCS in the input AD header.
::
Input:
self.cut_list: List of Cut objects.
self.adout: Output AD object with MDF and
TRACEFP extensions.
Output:
adout: contains the appended HDUs.
"""
adout = self._init_as_astrodata()
ad = self.ad
scihdr = ad['SCI',1].header.copy()
if self.has_dq:
dqheader = ad['DQ', 1].header.copy()
if self.has_var:
varheader = ad['VAR',1].header.copy()
# Update NSCIEXT keyword to represent the current number of cuts.
if new_pyfits_version:
adout.phu.header.update = adout.phu.header.set
adout.phu.header.update('NSCIEXT',len(self.cut_list))
# This is a function renaming when using Pyfits 3.1
if new_pyfits_version:
scihdr.update = scihdr.set
extver = 1
# Generate the cuts using the region's sci_cut,var_cut and
# dq_cut
for region,sci_cut,var_cut,dq_cut in self.cut_list:
rx1,rx2,ry1,ry2 = np.asarray(region) + 1 # To 1-based
csec = '[%d:%d,%d:%d]'%(rx1,rx2,ry1,ry2)
scihdr.update('NSCUTSEC',csec,
comment="Region extracted by 'cut_footprints'")
scihdr.update('NSCUTSPC',extver,comment="Spectral order")
form_extn_wcs(scihdr, self.wcs, region)
new_sci_ext = AstroData(data=sci_cut,header=scihdr)
new_sci_ext.rename_ext(name='SCI',ver=extver)
adout.append(new_sci_ext)
if self.has_dq:
new_dq_ext = AstroData(data=dq_cut, header=dqheader)
new_dq_ext.rename_ext(name='DQ',ver=extver)
adout.append(new_dq_ext)
if self.has_var:
new_var_ext = AstroData(data=var_cut, header=varheader)
new_var_ext.rename_ext(name='VAR',ver=extver)
adout.append(new_var_ext)
extver += 1
return adout
def set_evalfunction(coeff):
"""
Utility function to form a polynomial given
a coefficients array.
Input:
coeff: coefficients array, no greater than
4 elements.
Output:
eval: Evaluator function
"""
terms = ['','*x', '*x**2', '*x**3','*x**4']
func = 'lambda x:'
cc = coeff[::-1] # The coefficients are from high order to low
for i in range(len(cc)):
func = func + '+%g'%cc[i]+terms[i]
evf = eval(func)
return evf
def form_extn_wcs(scihdr, wcs, region):
"""
Form wcs information for this cut and
update the header. The original WCS information
is used to calculate CRVAL1,2 of the center
of the cut. The CD matrix is unchanged.
We used PYWCS module for the transformations.
Input:
scihdr: SCI header from the original FITS WCS
wcs: WCS instance of pywcs
region: coords of the cut.
"""
if wcs == None: return # Don't do anything if wcs is bad
kwlist = ['equinox','ctype1','cunit1','crpix1','crval1','ctype2','cunit2',
'crpix2','crval2']
# Get the WCS keywords from the PHU
pheader = wcs.to_header()
rx1,rx2,ry1,ry2 = np.asarray(region) + 1 # To 1-based
# Calculate crpix,crval for this section middle point
cpix1 = (rx2-rx1+1)/2.0+rx1
cpix2 = (ry2-ry1+1)/2.0+ry1
(pheader['crval1'],pheader['crval2']), = wcs.wcs_pix2sky([[cpix1,cpix2]],1)
pheader['crpix1'] = cpix1-rx1+1 # Set origin of the section to (1,1)
pheader['crpix2'] = cpix2-ry1+1
if new_pyfits_version:
scihdr.update = scihdr.set
# Now form the WCS header
for kw in kwlist:
scihdr.update(kw,pheader[kw])
# Now CD. PC are the pywcs names for the CD's
scihdr.update('cd1_1',pheader['pc1_1'])
scihdr.update('cd1_2',pheader['pc1_2'])
scihdr.update('cd2_1',pheader['pc2_1'])
scihdr.update('cd2_2',pheader['pc2_2'])
return
def plot_footprint(ad):
""" Plot and display the edges found by trace_footprints.
This information is the TRACEFP bintable
extension in the AD object.
"""
try:
from stsci.numdisplay import display
except ImportError:
from numdisplay import display
if type(ad) is list: ad=ad[0]
if ad.instrument() == 'F2':
orientation = 90
else:
if ad['SCI',1].dispersion_axis() == 1:
orientation = 0
else:
orientation = 90
pl.clf()
tb = ad['TRACEFP'].data
data = ad['SCI',1].data
for rec in tb:
region = rec.field('region')
cutrange1 = rec.field('cutrange1')
cutrange2 = rec.field('cutrange2')
coeff1 = rec.field('cutcoeff1')
coeff2 = rec.field('cutcoeff2')
evf1 = set_evalfunction(coeff1)
evf2 = set_evalfunction(coeff2)
if orientation == 0:
x1,x2,y1,y2 = region
x = np.arange(x1,x2+1)
pl.plot(x,evf1(x)+y1,'b') # edge_1
pl.plot(x,evf2(x)+y1,'r') # edge_2
for x in np.arange(int(x1),int(x2+1.),20):
xi = slice(x,x+5)
xr = np.arange(x,x+5)
yi = list(evf1(xr)+y1) + list(evf2(xr)+y1)
data[yi,xi] = 9999
else: # Orientation is 90
x1,x2,y1,y2 = region
y = np.arange(y1,y2+1)
pl.plot(evf1(y)+x1,y,'b') # edge 1
pl.plot(evf2(y)+x1,y,'r') # edges 2
off = evf1(y1)
for y in range(int(y1),int(y2+1.),20):
yi = slice(y,y+5)
xi = list(evf1(np.arange(y,y+5))+x1-off) + \
list(evf2(np.arange(y,y+5))+x1-off)
data[yi,xi] = 99999
fname = os.path.basename(ad.filename)
pl.title(fname+' ('+ad.instrument()+')')
display(data,frame=1,quiet=True)
time.sleep(3)
def plot_footprint_cut(data,x,y,orientation,evf1,evf2,region,filename,instru):
"""
Plot the footprint cut inside a rectangle then
display the same cut on frame 2.
"""
try:
from stsci.numdisplay import display
except ImportError:
from numdisplay import display
pl.clf()
bval = data.max()
bval += bval/10.
#rx1,rx2,ry1,ry2 = region
ny,nx = data.shape
rxmin,rymin=(nx,ny)
rxmax,rymax=(0,0)
if (True):
if orientation == 0:
x = np.arange(nx)
rxmin,rxmax = (0,nx)
for evf,color in zip([evf1,evf2],['b','r']):
zy = evf(x)
pl.plot(x,zy,color)
imin = np.argmin(zy)
imax = np.argmax(zy)
rymin = min(rymin,zy[imin])
rymax = max(rymax,zy[imax])
pl.fill([rxmin,rxmax,rxmax,rxmin], [rymin,rymin,rymax,rymax],fill=False)
else: # Orientation is 90
y = np.arange(ny)
rymin,rymax = (0,ny)
for evf,color in zip([evf1,evf2],['b','r']):
zx = evf(y)
pl.plot(zx,y,color)
imin = np.argmin(zx)
imax = np.argmax(zx)
rxmin = min(rxmin,zx[imin])
rxmax = max(rxmax,zx[imax])
pl.fill([rxmin,rxmax,rxmax,rxmin], [rymin,rymin,rymax,rymax],fill=False)
fname = os.path.basename(filename)
pl.title(fname+' ('+instru+')')
#pl.xlabel(str(region))
pl.draw()
display(data,frame=2,z1=0,z2=bval,quiet=True)
time.sleep(1)
if __name__ == '__main__':
""" Testing in the unix shell
"""
from astrodata import AstroData
import time
f2='/data2/ed/data/fS20120104S0070.fits'
gnirs = '/data2/ed/data/nN20101215S0475_comb.fits'
gmos = '/data2/ed/data/mgS20100113S0110.fits'
t1=time.time()
for ff in [gmos,f2,gnirs]:
ad = AstroData(ff)
print 'MAIN:>>>>>>>>>>>>>>>>>',ad.instrument(),ad.filename
adout = trace_footprints(ad,debug=False)
print adout.info()
t2 = time.time()
print '.....trace_footprints:','(%.2f curr: %.1f)'%(t2-t1,t2-t1)
cl = CutFootprints(adout,debug=False)
t4=time.time()
cl.cut_regions()
t5=time.time()
print '.....cut_regions:','(%.2f curr: %.1f)'%(t5-t4,t5-t1)
adcut=cl.as_astrodata()
t6=time.time()
print '...cl.as_astrodata:','(%.2f curr: %.1f)'%(t6-t5,t6-t1)
#adcut.filename='adcut.fits'
#adcut.write(clobber=True)
#raw_input('Enter...to continue')
"""
k=1
for ad in adcut:
ad.filename='adlist'+str(k)+'.fits'
print '...Writing', ad.filename
ad.write(clobber=True)
k=k+1
print 'Total time:',time.time()-t1
"""
#nN20120305S0080_comb.fits # GNIRS 4 files comb (.15 pixscale) [4 secs] (new:3.5)
#fS20120104S0070.fits # F2 MOS [21 secs] (new: 8 sec)
#gS20100113S0110.fits # prepare gmos MOS flat
#mgS20100113S0110.fits # Mosaic GMOS [58 secs] (new: 12 sec)
| mpl-2.0 |
andrewcbennett/iris | docs/iris/example_code/General/polynomial_fit.py | 7 | 1443 | """
Fitting a polynomial
====================
This example demonstrates computing a polynomial fit to 1D data from an Iris
cube, adding the fit to the cube's metadata, and plotting both the 1D data and
the fit.
"""
import matplotlib.pyplot as plt
import numpy as np
import iris
import iris.quickplot as qplt
def main():
fname = iris.sample_data_path('A1B_north_america.nc')
cube = iris.load_cube(fname)
# Extract a single time series at a latitude and longitude point.
location = next(cube.slices(['time']))
# Calculate a polynomial fit to the data at this time series.
x_points = location.coord('time').points
y_points = location.data
degree = 2
p = np.polyfit(x_points, y_points, degree)
y_fitted = np.polyval(p, x_points)
# Add the polynomial fit values to the time series to take
# full advantage of Iris plotting functionality.
long_name = 'degree_{}_polynomial_fit_of_{}'.format(degree, cube.name())
fit = iris.coords.AuxCoord(y_fitted, long_name=long_name,
units=location.units)
location.add_aux_coord(fit, 0)
qplt.plot(location.coord('time'), location, label='data')
qplt.plot(location.coord('time'),
location.coord(long_name),
'g-', label='polynomial fit')
plt.legend(loc='best')
plt.title('Trend of US air temperature over time')
qplt.show()
if __name__ == '__main__':
main()
| gpl-3.0 |
jasonyaw/keras | examples/kaggle_otto_nn.py | 70 | 3775 | from __future__ import absolute_import
from __future__ import print_function
import numpy as np
import pandas as pd
np.random.seed(1337) # for reproducibility
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.layers.normalization import BatchNormalization
from keras.layers.advanced_activations import PReLU
from keras.utils import np_utils, generic_utils
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandardScaler
'''
This demonstrates how to reach a score of 0.4890 (local validation)
on the Kaggle Otto challenge, with a deep net using Keras.
Compatible Python 2.7-3.4. Requires Scikit-Learn and Pandas.
Recommended to run on GPU:
Command: THEANO_FLAGS=mode=FAST_RUN,device=gpu,floatX=float32 python kaggle_otto_nn.py
On EC2 g2.2xlarge instance: 19s/epoch. 6-7 minutes total training time.
Best validation score at epoch 21: 0.4881
Try it at home:
- with/without BatchNormalization (BatchNormalization helps!)
- with ReLU or with PReLU (PReLU helps!)
- with smaller layers, largers layers
- with more layers, less layers
- with different optimizers (SGD+momentum+decay is probably better than Adam!)
Get the data from Kaggle: https://www.kaggle.com/c/otto-group-product-classification-challenge/data
'''
def load_data(path, train=True):
df = pd.read_csv(path)
X = df.values.copy()
if train:
np.random.shuffle(X) # https://youtu.be/uyUXoap67N8
X, labels = X[:, 1:-1].astype(np.float32), X[:, -1]
return X, labels
else:
X, ids = X[:, 1:].astype(np.float32), X[:, 0].astype(str)
return X, ids
def preprocess_data(X, scaler=None):
if not scaler:
scaler = StandardScaler()
scaler.fit(X)
X = scaler.transform(X)
return X, scaler
def preprocess_labels(labels, encoder=None, categorical=True):
if not encoder:
encoder = LabelEncoder()
encoder.fit(labels)
y = encoder.transform(labels).astype(np.int32)
if categorical:
y = np_utils.to_categorical(y)
return y, encoder
def make_submission(y_prob, ids, encoder, fname):
with open(fname, 'w') as f:
f.write('id,')
f.write(','.join([str(i) for i in encoder.classes_]))
f.write('\n')
for i, probs in zip(ids, y_prob):
probas = ','.join([i] + [str(p) for p in probs.tolist()])
f.write(probas)
f.write('\n')
print("Wrote submission to file {}.".format(fname))
print("Loading data...")
X, labels = load_data('train.csv', train=True)
X, scaler = preprocess_data(X)
y, encoder = preprocess_labels(labels)
X_test, ids = load_data('test.csv', train=False)
X_test, _ = preprocess_data(X_test, scaler)
nb_classes = y.shape[1]
print(nb_classes, 'classes')
dims = X.shape[1]
print(dims, 'dims')
print("Building model...")
model = Sequential()
model.add(Dense(dims, 512, init='glorot_uniform'))
model.add(PReLU((512,)))
model.add(BatchNormalization((512,)))
model.add(Dropout(0.5))
model.add(Dense(512, 512, init='glorot_uniform'))
model.add(PReLU((512,)))
model.add(BatchNormalization((512,)))
model.add(Dropout(0.5))
model.add(Dense(512, 512, init='glorot_uniform'))
model.add(PReLU((512,)))
model.add(BatchNormalization((512,)))
model.add(Dropout(0.5))
model.add(Dense(512, nb_classes, init='glorot_uniform'))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer="adam")
print("Training model...")
model.fit(X, y, nb_epoch=20, batch_size=128, validation_split=0.15)
print("Generating submission...")
proba = model.predict_proba(X_test)
make_submission(proba, ids, encoder, fname='keras-otto.csv')
| mit |
aarshayj/easyML | easyML/models_classification.py | 1 | 59771 | #####################################################################
##### IMPORT STANDARD MODULES
#####################################################################
#Python 3 support:
from __future__ import absolute_import, division
from __future__ import print_function, unicode_literals
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# import pydot
import os
from scipy.stats.mstats import chisquare, mode
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import KFold, GridSearchCV
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
from sklearn.ensemble import AdaBoostClassifier, GradientBoostingClassifier
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier, export_graphviz
from sklearn import metrics, model_selection
from sklearn.feature_selection import RFE, RFECV
from abc import ABCMeta, abstractmethod
# from StringIO import StringIO
# import xgboost as xgb
# from xgboost.sklearn import XGBClassifier
from .genericmodelclass import GenericModelClass
from .data import DataBlock
#####################################################################
##### GENERIC MODEL CLASS
#####################################################################
class base_classification(GenericModelClass):
""" A base class which defines the generic classification functions
and variable definitions.
Parameters
----------
alg : object
An sklearn-style estimator
data_block : object
An object of easyML's DataBlock class. You should first create an
object of that class and then pass it as a parameter.
predictors : list of strings, default []
A list of columns which are to be used as predictors (also called
independent variables or features).
The default value is an empty list because these need not always be
defined at the time of class initialization. The set_predictors
method can be used later but before creating any predictive model.
cv_folds : int, default 5
The number of folds to be created while performing CV.
This parameter can be adjusted later by passing using the
set_parameters method
scoring_metric : str, default 'accuracy'
The scoring metric to be used for evaluating the model across the
different functions available. The available options are
- 'accuracy'
- 'auc'
- 'log_loss'
- 'f1'
- 'average_precision'
additional_display_metrics : list of string, default []
A list of additional display metrics to be shown for the test and
train dataframes in data_block. Note:
- These will be just shown for user reference and not actually used
for model evaluation
- The same available options as scoring_metric apply
"""
#Define as a meta class to disable direct instances
__metaclass__ = ABCMeta
# Map possible inputs to functions in sklean.metrics.
# Each value of the dictionary is a tuple of 3:
# (function, multi-class support, requires-probabilities)
# function: the sklearn metrics function
# multi-class support: if True, function allows multi-class support
# requires-probabilities: if True, the function requires
# probabilities to be passed as arguments
metrics_map = {
'accuracy':(metrics.accuracy_score,True,False),
'auc':(metrics.roc_auc_score,False,True),
'log_loss':(metrics.log_loss,True,True),
'f1':(metrics.f1_score,True,False),
'average_precision':(metrics.average_precision_score,False,True)
}
def __init__(
self, alg, data_block, predictors=[],cv_folds=5,
scoring_metric='accuracy',additional_display_metrics=[]
):
GenericModelClass.__init__(
self, alg=alg, data_block=data_block, predictors=predictors,
cv_folds=cv_folds,scoring_metric=scoring_metric,
additional_display_metrics=additional_display_metrics)
#Run input datatype checks:
self.check_datatype(data_block,'data_block',DataBlock)
self.subset_check(predictors)
self.check_datatype(cv_folds,'cv_folds',int)
self.check_datatype(scoring_metric,'scoring_metric',basestring)
self.check_datatype(
additional_display_metrics,'additional_display_metrics',list)
#Store predicted probabilities in a dictionary with keys as the
# name of the dataset (train/test/predict) and values as the actual
# predictions.
self.predictions_probabilities = {}
#Boolean to store whether the estimator chosen allows probability
# predictions
self.probabilities_available = True
#Define number of classes in target.
self.num_target_class = len(
self.datablock.train[self.datablock.target].unique())
#A Series object to store generic classification model outcomes.
self.classification_output=pd.Series(
index = ['ModelID','CVScore_mean','CVScore_std','AUC',
'ActualScore (manual entry)','CVMethod','Predictors']
)
#Get the dictionary of available dataframes
self.dp = self.datablock.data_present()
#Check all the entered metrics. Note that this check has to be
#placed after declaration of num_target_class attribute
for metric in [scoring_metric]+additional_display_metrics:
self.check_metric(metric,self.num_target_class)
@classmethod
def check_metric(cls,metric,num_target_class):
if metric not in cls.metrics_map:
raise self.InvalidInput("The input '%s' is not a valid scoring metric for this module"%metric)
if num_target_class>2:
if not cls.metrics_map[metric][1]:
raise self.InvalidInput("The %s metric does not support multi-class classification case"%metric)
def fit_model(
self, performCV=True, printResults=True,
printTopN=None, printConfusionMatrix=True,
printModelParameters=True):
"""An advanced model fit function which fits the model on the
training data and performs cross-validation. It prints a model
report containing the following:
- The parameters being used to fit the model
- Confusion matrix for the train and test data
- Scoring metrics for the train and test data
- CV mean and std scores for scoring metric
- Additional scoring metrics on train and test data, if specified
Note that you can decide which details are to be printed using method
arguments.
Parameters
----------
performCV : bool, default True
if True, the model performs cross-validation using the number of
folds as the cv_folds parameter of the model
printResults : bool, default True
if True, prints the report of the model. This should be kept as
True unless the module being used in a background script
printTopN : int, default None
The number of top scored features to be displayed in the feature
importance or coefficient plot of the model. If None, all the
features will be displayed by default. Note:
- For algorithms supporting real coefficient, the features will
be sorted by their magnitudes (absolute values).
- For algorithms supporting positive feature importance scores,
features are sorted on the score itself.
This will be ignored is printResults is False.
printConfusionMatrix : bool, default True
if True, the confusion matrix for the train and test dataframes
are printed, otherwise they are ommitted.
This will be ignored is printResults is False.
print
printModelParameters : bool, default True
if True, the parameters being used to the run the model are
printed. It helps in validating the parameters and also makes
jupyter notebooks more informative if used
"""
self.check_datatype(performCV,'performCV',bool)
self.check_datatype(printResults,'printResults',bool)
self.check_datatype(printConfusionMatrix,'printConfusionMatrix',bool)
self.check_datatype(printModelParameters,'printModelParameters',bool)
if printTopN:
self.check_datatype(printTopN,'printTopN',int)
self.alg.fit(
self.datablock.train[self.predictors],
self.datablock.train[self.datablock.target])
#Get algo_specific_values
self.algo_specific_fit(printTopN)
#Get predictions:
for key,data in self.dp.items():
self.predictions_class[key] = self.alg.predict(
data[self.predictors])
if self.probabilities_available:
for key,data in self.dp.items():
self.predictions_probabilities[key] = self.alg.predict_proba(
data[self.predictors])
self.calc_model_characteristics(performCV)
if printResults:
self.printReport(printConfusionMatrix, printModelParameters)
def calc_model_characteristics(self, performCV=True):
# Determine key metrics to analyze the classification model. These
# are stored in the classification_output series object belonginf to
# this class.
for metric in [self.scoring_metric]+self.additional_display_metrics:
#Determine for both test and train, except predict:
for key,data in self.dp.items():
if key!='predict':
name = '%s_%s'%(metric,key)
#Case where probabilities to be passed as arguments
if base_classification.metrics_map[metric][2]:
self.classification_output[name] = \
base_classification.metrics_map[metric][0](
data[self.datablock.target],
self.predictions_probabilities[key])
#case where class predictions to be passed as arguments
else:
self.classification_output[name] = \
base_classification.metrics_map[metric][0](
data[self.datablock.target],
self.predictions_class[key])
#Determine confusion matrix:
name = 'ConfusionMatrix_%s'%key
self.classification_output[name] = pd.crosstab(
data[self.datablock.target],
self.predictions_class[key]
).to_string()
if performCV:
cv_score = self.KFold_CrossValidation(
scoring_metric=self.scoring_metric)
else:
cv_score = {
'mean_error': 0.0,
'std_error': 0.0
}
self.classification_output['CVMethod'] = \
'KFold - ' + str(self.cv_folds)
self.classification_output['CVScore_mean'] = cv_score['mean_error']
self.classification_output['CVScore_std'] = cv_score['std_error']
self.classification_output['Predictors'] = str(self.predictors)
def printReport(self, printConfusionMatrix, printModelParameters):
# Print the metric determined in the previous function.
print("\nModel Report")
#Outpute the parameters used for modeling
if printModelParameters:
print('\nModel being built with the following parameters:')
print(self.alg.get_params())
if printConfusionMatrix:
for key,data in self.dp.items():
if key!='predict':
print("\nConfusion Matrix for %s data:"%key)
print(pd.crosstab(
data[self.datablock.target],
self.predictions_class[key])
)
print('Note: rows - actual; col - predicted')
print("\nScoring Metric:")
for key,data in self.dp.items():
if key!='predict':
name = '%s_%s'%(self.scoring_metric,key)
print("\t%s (%s): %s" %
(
self.scoring_metric,
key,
"{0:.3%}".format(self.classification_output[name])
)
)
print("\nCV Score for Scoring Metric (%s):"%self.scoring_metric)
print("\tMean - %f | Std - %f" % (
self.classification_output['CVScore_mean'],
self.classification_output['CVScore_std'])
)
if self.additional_display_metrics:
print("\nAdditional Scoring Metrics:")
for metric in self.additional_display_metrics:
for key,data in self.dp.items():
if key!='predict':
name = '%s_%s'%(metric,key)
print("\t%s (%s): %s" % (
metric,
key,
"{0:.3%}".format(
self.classification_output[name])
)
)
def plot_feature_importance(self, printTopN):
num_print = len(self.feature_imp)
if printTopN is not None:
num_print = min(printTopN,len(self.feature_imp))
self.feature_imp.iloc[:num_print].plot(
kind='bar', title='Feature Importances')
plt.ylabel('Feature Importance Score')
plt.show(block=False)
def plot_abs_coefficients(self,coeff,printTopN):
num_print = len(coeff)
if printTopN is not None:
num_print = min(printTopN,num_print)
coeff_abs_sorted = sorted(
abs(coeff).index,
key=lambda x: abs(coeff_abs[x]),
reverse=True
)
coeff[coeff_abs_sorted].iloc[:num_print,].plot(
kind='bar',
title='Feature Coefficients (Sorted by Magnitude)'
)
plt.ylabel('Magnitute of Coefficients')
plt.show(block=False)
def submission_proba(
self, IDcol, proba_colnames,filename="Submission.csv"):
"""
"""
submission = pd.DataFrame({
x: self.datablock.predict[x] for x in list(IDcol)
})
if len(list(proba_colnames))>1:
for i in range(len(proba_colnames)):
submission[proba_colnames[i]] = self.test_pred_prob[:,i]
else:
submission[list(proba_colnames)[0]] = self.test_pred_prob[:,1]
submission.to_csv(filename, index=False)
def set_parameters(self, param=None, cv_folds=None, set_default=False):
""" Set the parameters of the model. Only the parameters to be
updated are required to be passed.
Parameters
__________
param : dict, default None
A dictionary of key,value pairs where the keys are the parameters
to be updated and values as the new value of those parameters.
If None, no update performed
Ignored if set_default iss True.
cv_folds : int, default None
Pass the number of CV folds to be used in the model.
If None, no update performed.
set_default : bool, default True
if True, the model will be set to default parameters as defined
in model definition by scikit-learn. Note that this will not
affect the cv_folds parameter.
"""
#Check input
self.check_datatype(param,'param',dict)
self.check_datatype(set_default,'set_default',bool)
if param:
if set(param.keys()).issubset(
set(base_classification.default_parameters.keys())
):
raise self.InvalidInput("""The parameters passed should be a
subset of the model parameters""")
if set_default:
param = self.default_parameters
self.alg.set_params(**param)
self.model_output.update(pd.Series(param))
if cv_folds:
self.cv_folds = cv_folds
def export_model_base(self, IDcol, mstr):
self.create_ensemble_dir()
filename = os.path.join(os.getcwd(),'ensemble/%s_models.csv'%mstr)
comb_series = self.classification_output.append(
self.model_output,
verify_integrity=True)
if os.path.exists(filename):
models = pd.read_csv(filename)
mID = int(max(models['ModelID'])+1)
else:
mID = 1
models = pd.DataFrame(columns=comb_series.index)
comb_series['ModelID'] = mID
models = models.append(comb_series, ignore_index=True)
models.to_csv(filename, index=False, float_format="%.5f")
model_filename = os.path.join(
os.getcwd(),
'ensemble/%s_%s.csv'%(mstr,str(mID))
)
self.submission(IDcol, model_filename)
@abstractmethod
def algo_specific_fit(self,printTopN):
#Run algo-specific commands
pass
@abstractmethod
def export_model(self,IDcol):
#Export models
pass
#####################################################################
##### LOGISTIC REGRESSION
#####################################################################
class logistic_regression(base_classification):
""" Create a Logistic Regression model using implementation from
scikit-learn.
Parameters
----------
data_block : object of type easyML.DataBlock
An object of easyML's DataBlock class. You should first create an
object of that class and then pass it as a parameter.
predictors : list of strings, default []
A list of columns which are to be used as predictors (also called
independent variables or features).
The default value is an empty list because these need not always be
defined at the time of class initialization. The set_predictors
method can be used later but before creating any predictive model.
cv_folds : int, default 5
The number of folds to be created while performing CV.
This parameter can be adjusted later by passing using the
set_parameters method
scoring_metric : str, default 'accuracy'
The scoring metric to be used for evaluating the model across the
different functions available. The available options are
- 'accuracy'
- 'auc'
- 'log_loss'
- 'f1'
- 'average_precision'
additional_display_metrics : list of string, default []
A list of additional display metrics to be shown for the test and
train dataframes in data_block. Note:
- These will be just shown for user reference and not actually used
for model evaluation
- The same available options as scoring_metric apply
"""
default_parameters = {
'C':1.0,
'tol':0.0001,
'solver':'liblinear',
'multi_class':'ovr',
'class_weight':'balanced'
}
def __init__(
self,data_block, predictors=[],cv_folds=10,
scoring_metric='accuracy',additional_display_metrics=[]):
base_classification.__init__(
self, alg=LogisticRegression(), data_block=data_block,
predictors=predictors,cv_folds=cv_folds,
scoring_metric=scoring_metric,
additional_display_metrics=additional_display_metrics
)
self.model_output=pd.Series(self.default_parameters)
self.model_output['Coefficients'] = "-"
#Set parameters to default values:
self.set_parameters(set_default=True)
def algo_specific_fit(self, printTopN):
if self.num_target_class==2:
coeff = pd.Series(
np.concatenate(
(self.alg.intercept_,
self.alg.coef_[0])),
index=["Intercept"]+self.predictors
)
self.plot_abs_coefficients(coeff,printTopN)
else:
cols=['coef_class_%d'%i for i in range(0,self.num_target_class)]
coeff = pd.DataFrame(
self.alg.coef_.T,
columns=cols,
index=self.predictors
)
print('\nCoefficients:')
print(coeff)
self.model_output['Coefficients'] = coeff.to_string()
def export_model(self, IDcol):
#Export the model into the model file as well as create a submission
#with model index. This will be used for creating an ensemble.
self.export_model_base(IDcol,'logistic_reg')
#####################################################################
##### DECISION TREE
#####################################################################
class decision_tree(base_classification):
""" Create a Decision Tree model using implementation from
scikit-learn.
Parameters
----------
data_block : object of type easyML.DataBlock
An object of easyML's DataBlock class. You should first create an
object of that class and then pass it as a parameter.
predictors : list of strings, default []
A list of columns which are to be used as predictors (also called
independent variables or features).
The default value is an empty list because these need not always be
defined at the time of class initialization. The set_predictors
method can be used later but before creating any predictive model.
cv_folds : int, default 5
The number of folds to be created while performing CV.
This parameter can be adjusted later by passing using the
set_parameters method
scoring_metric : str, default 'accuracy'
The scoring metric to be used for evaluating the model across the
different functions available. The available options are
- 'accuracy'
- 'auc'
- 'log_loss'
- 'f1'
- 'average_precision'
additional_display_metrics : list of string, default []
A list of additional display metrics to be shown for the test and
train dataframes in data_block. Note:
- These will be just shown for user reference and not actually used
for model evaluation
- The same available options as scoring_metric apply
"""
default_parameters = {
'criterion':'gini',
'max_depth':None,
'min_samples_split':2,
'min_samples_leaf':1,
'max_features':None,
'random_state':None,
'max_leaf_nodes':None,
'class_weight':'balanced'
}
def __init__(
self,data_block, predictors=[],cv_folds=10,
scoring_metric='accuracy',additional_display_metrics=[]):
base_classification.__init__(
self, alg=DecisionTreeClassifier(), data_block=data_block,
predictors=predictors,cv_folds=cv_folds,
scoring_metric=scoring_metric,
additional_display_metrics=additional_display_metrics
)
self.model_output = pd.Series(self.default_parameters)
self.model_output['Feature_Importance'] = "-"
#Set parameters to default values:
self.set_parameters(set_default=True)
def algo_specific_fit(self, printTopN):
# print Feature Importance Scores table
self.feature_imp = pd.Series(
self.alg.feature_importances_,
index=self.predictors
).sort_values(ascending=False)
self.plot_feature_importance(printTopN)
self.model_output['Feature_Importance'] = \
self.feature_imp.to_string()
def export_model(self, IDcol):
#Export the model into the model file as well as create a submission
#with model index. This will be used for creating an ensemble.
self.export_model_base(IDcol,'decision_tree')
## UNDER DEVELOPMENT CODE FOR PRINTING TREES
# def get_tree(self):
# return self.alg.tree_
# Print the tree in visual format
# Inputs:
# export_pdf - if True, a pdf will be exported with the
# filename as specified in pdf_name argument
# pdf_name - name of the pdf file if export_pdf is True
# def printTree(self, export_pdf=True, file_name="Decision_Tree.pdf"):
# dot_data = StringIO()
# export_graphviz(
# self.alg, out_file=dot_data, feature_names=self.predictors,
# filled=True, rounded=True, special_characters=True)
# export_graphviz(
# self.alg, out_file='data.dot', feature_names=self.predictors,
# filled=True, rounded=True, special_characters=True
# )
# graph = pydot.graph_from_dot_data(dot_data.getvalue())
# if export_pdf:
# graph.write_pdf(file_name)
# return graph
#####################################################################
##### RANDOM FOREST
#####################################################################
class random_forest(base_classification):
""" Create a Random Forest model using implementation from
scikit-learn.
Parameters
----------
data_block : object of type easyML.DataBlock
An object of easyML's DataBlock class. You should first create an
object of that class and then pass it as a parameter.
predictors : list of strings, default []
A list of columns which are to be used as predictors (also called
independent variables or features).
The default value is an empty list because these need not always be
defined at the time of class initialization. The set_predictors
method can be used later but before creating any predictive model.
cv_folds : int, default 5
The number of folds to be created while performing CV.
This parameter can be adjusted later by passing using the
set_parameters method
scoring_metric : str, default 'accuracy'
The scoring metric to be used for evaluating the model across the
different functions available. The available options are
- 'accuracy'
- 'auc'
- 'log_loss'
- 'f1'
- 'average_precision'
additional_display_metrics : list of string, default []
A list of additional display metrics to be shown for the test and
train dataframes in data_block. Note:
- These will be just shown for user reference and not actually used
for model evaluation
- The same available options as scoring_metric apply
"""
default_parameters = {
'n_estimators':10,
'criterion':'gini',
'max_depth':None,
'min_samples_split':2,
'min_samples_leaf':1,
'max_features':'auto',
'max_leaf_nodes':None,
'oob_score':False,
'random_state':None,
'class_weight':'balanced',
'n_jobs':1
}
def __init__(
self,data_block, predictors=[],cv_folds=10,
scoring_metric='accuracy',additional_display_metrics=[]):
base_classification.__init__(
self, alg=RandomForestClassifier(), data_block=data_block,
predictors=predictors,cv_folds=cv_folds,
scoring_metric=scoring_metric,
additional_display_metrics=additional_display_metrics
)
self.model_output = pd.Series(self.default_parameters)
self.model_output['Feature_Importance'] = "-"
self.model_output['OOB_Score'] = "-"
#Set parameters to default values:
self.set_parameters(set_default=True)
def algo_specific_fit(self, printTopN):
# print Feature Importance Scores table
self.feature_imp = pd.Series(
self.alg.feature_importances_,
index=self.predictors
).sort_values(ascending=False)
self.plot_feature_importance(printTopN)
self.model_output['Feature_Importance'] = \
self.feature_imp.to_string()
if self.model_output['oob_score']:
print('OOB Score : %f' % self.alg.oob_score_)
self.model_output['OOB_Score'] = self.alg.oob_score_
def export_model(self, IDcol):
#Export the model into the model file as well as create a submission
#with model index. This will be used for creating an ensemble.
self.export_model_base(IDcol,'random_forest')
#####################################################################
##### EXTRA TREES FOREST
#####################################################################
class extra_trees(base_classification):
""" Create an Extra Trees Forest model using implementation from
scikit-learn.
Parameters
----------
data_block : object of type easyML.DataBlock
An object of easyML's DataBlock class. You should first create an
object of that class and then pass it as a parameter.
predictors : list of strings, default []
A list of columns which are to be used as predictors (also called
independent variables or features).
The default value is an empty list because these need not always be
defined at the time of class initialization. The set_predictors
method can be used later but before creating any predictive model.
cv_folds : int, default 5
The number of folds to be created while performing CV.
This parameter can be adjusted later by passing using the
set_parameters method
scoring_metric : str, default 'accuracy'
The scoring metric to be used for evaluating the model across the
different functions available. The available options are
- 'accuracy'
- 'auc'
- 'log_loss'
- 'f1'
- 'average_precision'
additional_display_metrics : list of string, default []
A list of additional display metrics to be shown for the test and
train dataframes in data_block. Note:
- These will be just shown for user reference and not actually used
for model evaluation
- The same available options as scoring_metric apply
"""
default_parameters = {
'n_estimators':10,
'criterion':'gini',
'max_depth':None,
'min_samples_split':2,
'min_samples_leaf':1,
'max_features':'auto',
'max_leaf_nodes':None,
'oob_score':False,
'random_state':None,
'class_weight':'balanced',
'n_jobs':1
}
def __init__(
self,data_block, predictors=[],cv_folds=10,
scoring_metric='accuracy',additional_display_metrics=[]):
base_classification.__init__(
self, alg=ExtraTreesClassifier(), data_block=data_block,
predictors=predictors,cv_folds=cv_folds,
scoring_metric=scoring_metric,
additional_display_metrics=additional_display_metrics)
self.model_output = pd.Series(self.default_parameters)
self.model_output['Feature_Importance'] = "-"
self.model_output['OOB_Score'] = "-"
#Set parameters to default values:
self.set_parameters(set_default=True)
def algo_specific_fit(self, printTopN):
# print Feature Importance Scores table
self.feature_imp = pd.Series(
self.alg.feature_importances_,
index=self.predictors
).sort_values(ascending=False)
self.plot_feature_importance(printTopN)
self.model_output['Feature_Importance'] = \
self.feature_imp.to_string()
if self.model_output['oob_score']:
print('OOB Score : %f' % self.alg.oob_score_)
self.model_output['OOB_Score'] = self.alg.oob_score_
def export_model(self, IDcol):
#Export the model into the model file as well as create a submission
#with model index. This will be used for creating an ensemble.
self.export_model_base(IDcol,'extra_trees')
#####################################################################
##### ADABOOST CLASSIFICATION
#####################################################################
class adaboost(base_classification):
""" Create an AdaBoost model using implementation from
scikit-learn.
Parameters
----------
data_block : object of type easyML.DataBlock
An object of easyML's DataBlock class. You should first create an
object of that class and then pass it as a parameter.
predictors : list of strings, default []
A list of columns which are to be used as predictors (also called
independent variables or features).
The default value is an empty list because these need not always be
defined at the time of class initialization. The set_predictors
method can be used later but before creating any predictive model.
cv_folds : int, default 5
The number of folds to be created while performing CV.
This parameter can be adjusted later by passing using the
set_parameters method
scoring_metric : str, default 'accuracy'
The scoring metric to be used for evaluating the model across the
different functions available. The available options are
- 'accuracy'
- 'auc'
- 'log_loss'
- 'f1'
- 'average_precision'
additional_display_metrics : list of string, default []
A list of additional display metrics to be shown for the test and
train dataframes in data_block. Note:
- These will be just shown for user reference and not actually used
for model evaluation
- The same available options as scoring_metric apply
"""
default_parameters = {
'n_estimators':50,
'learning_rate':1.0
}
def __init__(
self,data_block, predictors=[],cv_folds=10,
scoring_metric='accuracy',additional_display_metrics=[]):
base_classification.__init__(
self, alg=AdaBoostClassifier(), data_block=data_block,
predictors=predictors,cv_folds=cv_folds,
scoring_metric=scoring_metric,
additional_display_metrics=additional_display_metrics
)
self.model_output = pd.Series(self.default_parameters)
self.model_output['Feature_Importance'] = "-"
#Set parameters to default values:
self.set_parameters(set_default=True)
def algo_specific_fit(self, printTopN):
# print Feature Importance Scores table
self.feature_imp = pd.Series(
self.alg.feature_importances_,
index=self.predictors
).sort_values(ascending=False)
self.plot_feature_importance(printTopN)
self.model_output['Feature_Importance'] = \
self.feature_imp.to_string()
plt.xlabel("AdaBoost Estimator")
plt.ylabel("Estimator Error")
plt.plot(
range(1, int(self.model_output['n_estimators'])+1),
self.alg.estimator_errors_
)
plt.plot(
range(1, int(self.model_output['n_estimators'])+1),
self.alg.estimator_weights_
)
plt.legend(
['estimator_errors','estimator_weights'],
loc='upper left'
)
plt.show(block=False)
def export_model(self, IDcol):
#Export the model into the model file as well as create a submission
#with model index. This will be used for creating an ensemble.
self.export_model_base(IDcol,'adaboost')
#####################################################################
##### GRADIENT BOOSTING MACHINE
#####################################################################
class gradient_boosting_machine(base_classification):
""" Create a GBM (Gradient Boosting Machine) model using implementation
from scikit-learn.
Parameters
----------
data_block : object of type easyML.DataBlock
An object of easyML's DataBlock class. You should first create an
object of that class and then pass it as a parameter.
predictors : list of strings, default []
A list of columns which are to be used as predictors (also called
independent variables or features).
The default value is an empty list because these need not always be
defined at the time of class initialization. The set_predictors
method can be used later but before creating any predictive model.
cv_folds : int, default 5
The number of folds to be created while performing CV.
This parameter can be adjusted later by passing using the
set_parameters method
scoring_metric : str, default 'accuracy'
The scoring metric to be used for evaluating the model across the
different functions available. The available options are
- 'accuracy'
- 'auc'
- 'log_loss'
- 'f1'
- 'average_precision'
additional_display_metrics : list of string, default []
A list of additional display metrics to be shown for the test and
train dataframes in data_block. Note:
- These will be just shown for user reference and not actually used
for model evaluation
- The same available options as scoring_metric apply
"""
default_parameters = {
'loss':'deviance',
'learning_rate':0.1,
'n_estimators':100,
'subsample':1.0,
'min_samples_split':2,
'min_samples_leaf':1,
'max_depth':3, 'init':None,
'random_state':None,
'max_features':None,
'verbose':0,
'max_leaf_nodes':None,
'warm_start':False,
'presort':'auto'
}
def __init__(
self, data_block, predictors=[],cv_folds=10,
scoring_metric='accuracy',additional_display_metrics=[]):
base_classification.__init__(
self, alg=GradientBoostingClassifier(), data_block=data_block,
predictors=predictors,cv_folds=cv_folds,
scoring_metric=scoring_metric,
additional_display_metrics=additional_display_metrics
)
self.model_output = pd.Series(self.default_parameters)
self.model_output['Feature_Importance'] = "-"
#Set parameters to default values:
self.set_parameters(set_default=True)
def algo_specific_fit(self, printTopN):
# print Feature Importance Scores table
self.feature_imp = pd.Series(
self.alg.feature_importances_,
index=self.predictors
).sort_values(ascending=False)
self.plot_feature_importance(printTopN)
self.model_output['Feature_Importance'] = \
self.feature_imp.to_string()
#Plot OOB estimates if subsample <1:
if self.model_output['subsample']<1:
plt.xlabel("GBM Iteration")
plt.ylabel("Score")
plt.plot(
range(1, self.model_output['n_estimators']+1),
self.alg.oob_improvement_
)
plt.legend(['oob_improvement_','train_score_'], loc='upper left')
plt.show(block=False)
def export_model(self, IDcol):
#Export the model into the model file as well as create a submission
#with model index. This will be used for creating an ensemble.
self.export_model_base(IDcol,'gbm')
#####################################################################
##### Support Vector Classifier
#####################################################################
class linear_svm(base_classification):
""" Create a Linear Support Vector Machine model using implementation
from scikit-learn.
Parameters
----------
data_block : object of type easyML.DataBlock
An object of easyML's DataBlock class. You should first create an
object of that class and then pass it as a parameter.
predictors : list of strings, default []
A list of columns which are to be used as predictors (also called
independent variables or features).
The default value is an empty list because these need not always be
defined at the time of class initialization. The set_predictors
method can be used later but before creating any predictive model.
cv_folds : int, default 5
The number of folds to be created while performing CV.
This parameter can be adjusted later by passing using the
set_parameters method
scoring_metric : str, default 'accuracy'
The scoring metric to be used for evaluating the model across the
different functions available. The available options are
- 'accuracy'
- 'auc'
- 'log_loss'
- 'f1'
- 'average_precision'
additional_display_metrics : list of string, default []
A list of additional display metrics to be shown for the test and
train dataframes in data_block. Note:
- These will be just shown for user reference and not actually used
for model evaluation
- The same available options as scoring_metric apply
"""
default_parameters = {
'C':1.0,
'kernel':'linear', #modified not default
'degree':3,
'gamma':'auto',
'coef0':0.0,
'shrinking':True,
'probability':False,
'tol':0.001,
'cache_size':200,
'class_weight':None,
'verbose':False,
'max_iter':-1,
'decision_function_shape':None,
'random_state':None
}
def __init__(
self,data_block, predictors=[],cv_folds=10,
scoring_metric='accuracy',additional_display_metrics=[]):
base_classification.__init__(
self, alg=SVC(), data_block=data_block, predictors=predictors,
cv_folds=cv_folds,scoring_metric=scoring_metric,
additional_display_metrics=additional_display_metrics
)
self.model_output=pd.Series(self.default_parameters)
self.model_output['Coefficients'] = "-"
#Set parameters to default values:
self.set_parameters(set_default=True)
#Check if probabilities enables:
if not self.alg.get_params()['probability']:
self.probabilities_available = False
def algo_specific_fit(self, printTopN):
if self.num_target_class==2:
coeff = pd.Series(
np.concatenate((self.alg.intercept_,self.alg.coef_[0])),
index=["Intercept"]+self.predictors
)
#print the chart of importances
self.plot_abs_coefficients(coeff, printTopN)
else:
cols=['coef_class_%d'%i for i in range(0,self.num_target_class)]
coeff = pd.DataFrame(
self.alg.coef_.T,
columns=cols,
index=self.predictors
)
print('\nCoefficients:')
print(coeff)
self.model_output['Coefficients'] = coeff.to_string()
def export_model(self, IDcol):
#Export the model into the model file as well as create a submission
#with model index. This will be used for creating an ensemble.
self.export_model_base(IDcol,'linear_svm')
#####################################################################
##### XGBOOST ALGORITHM (UNDER DEVELOPMENT)
#####################################################################
"""
#Define the class similar to the overall classification class
class XGBoost(base_classification):
def __init__(self,data_block, predictors, cv_folds=5,scoring_metric_skl='accuracy', scoring_metric_xgb='error'):
base_classification.__init__(self, alg=XGBClassifier(), data_block=data_block, predictors=predictors,cv_folds=cv_folds,scoring_metric=scoring_metric_skl)
#Define default parameters on your own:
self.default_parameters = {
'max_depth':3, 'learning_rate':0.1,
'n_estimators':100, 'silent':True,
'objective':"binary:logistic",
'nthread':1, 'gamma':0, 'min_child_weight':1,
'max_delta_step':0, 'subsample':1, 'colsample_bytree':1, 'colsample_bylevel':1,
'reg_alpha':0, 'reg_lambda':1, 'scale_pos_weight':1,
'base_score':0.5, 'seed':0, 'missing':None
}
self.model_output = pd.Series(self.default_parameters)
#create DMatrix with nan as missing by default. If later this is changed then the matrix are re-calculated. If not set,will give error is nan present in data
self.xgtrain = xgb.DMatrix(self.datablock.train[self.predictors].values, label=self.datablock.train[self.datablock.target].values, missing=np.nan)
self.xgtest = xgb.DMatrix(self.datablock.predict[self.predictors].values, missing=np.nan)
self.num_class = 2
self.n_estimators = 10
self.eval_metric = 'error'
self.train_predictions = []
self.train_pred_prob = []
self.test_predictions = []
self.test_pred_prob = []
self.num_target_class = len(data_train[target].unique())
#define scoring metric:
self.scoring_metric_skl = scoring_metric_skl
# if scoring_metric_xgb=='f1':
# self.scoring_metric_xgb = self.xg_f1
# else:
self.scoring_metric_xgb = scoring_metric_xgb
#Define a Series object to store generic classification model outcomes;
self.classification_output=pd.Series(index=['ModelID','Accuracy','CVScore_mean','CVScore_std','SpecifiedMetric',
'ActualScore (manual entry)','CVMethod','ConfusionMatrix','Predictors'])
#feature importance (g_scores)
self.feature_imp = None
self.model_output['Feature_Importance'] = "-"
#Set parameters to default values:
# self.set_parameters(set_default=True)
#Define custom f1 score metric:
def xg_f1(self,y,t):
t = t.get_label()
y_bin = [1. if y_cont > 0.5 else 0. for y_cont in y] # binaryzing your output
return 'f1',metrics.f1_score(t,y_bin)
# Set the parameters of the model.
# Note:
# > only the parameters to be updated are required to be passed
# > if set_default is True, the passed parameters are ignored and default parameters are set which are defined in scikit learn module
def set_parameters(self, param=None, set_default=False):
if set_default:
param = self.default_parameters
self.alg.set_params(**param)
self.model_output.update(pd.Series(param))
if 'missing' in param:
#update DMatrix with missing:
self.xgtrain = xgb.DMatrix(self.datablock.train[self.predictors].values, label=self.datablock.train[self.datablock.target].values, missing=param['missing'])
self.xgtest = xgb.DMatrix(self.datablock.predict[self.predictors].values, missing=param['missing'])
if 'num_class' in param:
self.num_class = param['num_class']
if 'cv_folds' in param:
self.cv_folds = param['cv_folds']
# def set_feature_importance(self):
# fs = self.alg.booster().get_fscore()
# ftimp = pd.DataFrame({
# 'feature': fs.keys(),
# 'importance_Score': fs.values()
# })
# ftimp['predictor'] = ftimp['feature'].apply(lambda x: self.predictors[int(x[1:])])
# self.feature_imp = pd.Series(ftimp['importance_Score'].values, index=ftimp['predictor'].values)
#Fit the model using predictors and parameters specified before.
# Inputs:
# printCV - if True, CV is performed
def modelfit(self, performCV=True, useTrainCV=False, TrainCVFolds=5, early_stopping_rounds=20, show_progress=True, printTopN='all'):
if useTrainCV:
xgb_param = self.alg.get_xgb_params()
if self.num_class>2:
xgb_param['num_class']=self.num_class
if self.scoring_metric_xgb=='f1':
cvresult = xgb.cv(xgb_param,self.xgtrain, num_boost_round=self.alg.get_params()['n_estimators'], nfold=self.cv_folds,
metrics=['auc'],feval=self.xg_f1,early_stopping_rounds=early_stopping_rounds, show_progress=show_progress)
else:
cvresult = xgb.cv(xgb_param,self.xgtrain, num_boost_round=self.alg.get_params()['n_estimators'], nfold=self.cv_folds,
metrics=self.scoring_metric_xgb, early_stopping_rounds=early_stopping_rounds, show_progress=show_progress)
self.alg.set_params(n_estimators=cvresult.shape[0])
print(self.alg.get_params())
obj = self.alg.fit(self.datablock.train[self.predictors], self.datablock.train[self.datablock.target], eval_metric=self.eval_metric)
#Print feature importance
# self.set_feature_importance()
self.feature_imp = pd.Series(self.alg.booster().get_fscore()).sort_values(ascending=False)
num_print = len(self.feature_imp)
if printTopN is not None:
if printTopN != 'all':
num_print = min(printTopN,len(self.feature_imp))
self.feature_imp.iloc[:num_print].plot(kind='bar', title='Feature Importances')
plt.ylabel('Feature Importance Score')
plt.show(block=False)
self.model_output['Feature_Importance'] = self.feature_imp.to_string()
#Get train predictions:
self.train_predictions = self.alg.predict(self.datablock.train[self.predictors])
self.train_pred_prob = self.alg.predict_proba(self.datablock.train[self.predictors])
#Get test predictions:
self.test_predictions = self.alg.predict(self.datablock.predict[self.predictors])
self.test_pred_prob = self.alg.predict_proba(self.datablock.predict[self.predictors])
self.calc_model_characteristics(performCV)
self.printReport()
#Export the model into the model file as well as create a submission with model index. This will be used for creating an ensemble.
def export_model(self, IDcol):
self.create_ensemble_dir()
filename = os.path.join(os.getcwd(),'ensemble/xgboost_models.csv')
comb_series = self.classification_output.append(self.model_output, verify_integrity=True)
if os.path.exists(filename):
models = pd.read_csv(filename)
mID = int(max(models['ModelID'])+1)
else:
mID = 1
models = pd.DataFrame(columns=comb_series.index)
comb_series['ModelID'] = mID
models = models.append(comb_series, ignore_index=True)
models.to_csv(filename, index=False, float_format="%.5f")
model_filename = os.path.join(os.getcwd(),'ensemble/xgboost_'+str(mID)+'.csv')
self.submission(IDcol, model_filename)
"""
#####################################################################
##### ENSEMBLE (UNDER DEVELOPMENT)
#####################################################################
"""
#Class for creating an ensemble model using the exported files from previous classes
class Ensemble_Classification(object):
#initialize the object with target variable
def __init__(self, target, IDcol):
self.datablock.target = target
self.data = None
self.relationMatrix_chi2 = None
self.relationMatrix_diff = None
self.IDcol = IDcol
#create the ensemble data
# Inputs:
# models - dictionary with key as the model name and values as list containing the model numbers to be ensebled
# Note: all the models in the list specified should be present in the ensemble folder. Please cross-check once
def create_ensemble_data(self, models):
self.data = None
for key, value in models.items():
# print key,value
for i in value:
fname = key + '_' + str(i)
fpath = os.path.join(os.getcwd(), 'ensemble', fname+'.csv')
tempdata = pd.read_csv(fpath)
tempdata = tempdata.rename(columns = {self.datablock.target: fname})
if self.data is None:
self.data = tempdata
else:
self.data = self.data.merge(tempdata,on=self.data.columns[0])
#get the data being used for ensemble
def get_ensemble_data(self):
return self.data
#Check chisq test between different model outputs to check which combination of ensemble will generate better results. Note: Models with high correlation should not be combined together.
def chisq_independence(self, col1, col2, verbose = False):
contingencyTable = pd.crosstab(col1,col2,margins=True)
if len(col1)/((contingencyTable.shape[0] - 1) * (contingencyTable.shape[1] - 1)) <= 5:
return "TMC"
expected = contingencyTable.copy()
total = contingencyTable.loc["All","All"]
# print contingencyTable.index
# print contingencyTable.columns
for m in contingencyTable.index:
for n in contingencyTable.columns:
expected.loc[m,n] = contingencyTable.loc[m,"All"]*contingencyTable.loc["All",n]/float(total)
if verbose:
print('\n\nAnalysis of models: %s and %s' % (col1.name, col2.name))
print('Contingency Table:')
print(contingencyTable)
# print '\nExpected Frequency Table:'
# print expected
observed_frq = contingencyTable.iloc[:-1,:-1].values.ravel()
expected_frq = expected.iloc[:-1,:-1].values.ravel()
numless1 = len(expected_frq[expected_frq<1])
perless5 = len(expected_frq[expected_frq<5])/len(expected_frq)
#Adjustment in DOF so use the 1D chisquare to matrix shaped data; -1 in row n col because of All row and column
matrixadj = (contingencyTable.shape[0] - 1) + (contingencyTable.shape[1] - 1) - 2
# print matrixadj
pval = np.round(chisquare(observed_frq, expected_frq,ddof=matrixadj)[1],3)
if numless1>0 or perless5>=0.2:
return str(pval)+"*"
else:
return pval
#Create the relational matrix between models
def check_ch2(self, verbose=False):
col = self.data.columns[1:]
self.relationMatrix_chi2 = pd.DataFrame(index=col,columns=col)
for i in range(len(col)):
for j in range(i, len(col)):
if i==j:
self.relationMatrix_chi2.loc[col[i],col[j]] = 1
else:
pval = self.chisq_independence(self.data.iloc[:,i+1],self.data.iloc[:,j+1], verbose=verbose)
self.relationMatrix_chi2.loc[col[j],col[i]] = pval
self.relationMatrix_chi2.loc[col[i],col[j]] = pval
print('\n\n Relational Matrix (based on Chi-square test):')
print(self.relationMatrix_chi2)
def check_diff(self):
col = self.data.columns[1:]
self.relationMatrix_diff = pd.DataFrame(index=col,columns=col)
nrow = self.data.shape[0]
for i in range(len(col)):
for j in range(i, len(col)):
if i==j:
self.relationMatrix_diff.loc[col[i],col[j]] = '-'
else:
# print col[i],col[j]
pval = "{0:.2%}".format(sum( np.abs(self.data.iloc[:,i+1]-self.data.iloc[:,j+1]) )/float(nrow))
self.relationMatrix_diff.loc[col[j],col[i]] = pval
self.relationMatrix_diff.loc[col[i],col[j]] = pval
print('\n\n Relational Matrix (based on perc difference):')
print(self.relationMatrix_diff)
#Generate submission for the ensembled model by combining the mentioned models.
# Inputs:
# models_to_use - list with model names to use; if None- all models will be used
# filename - the filename of the final submission
# Note: the models should be odd in nucmber to allow a clear winner in terms of mode otherwise the first element will be chosen
def submission(self, models_to_use=None, filename="Submission_ensemble.csv"):
#if models_to_use is None then use all, else filter:
if models_to_use is None:
data_ens = self.data
else:
data_ens = self.data[models_to_use]
def mode_ens(x):
return int(mode(x).mode[0])
ensemble_output = data_ens.apply(mode_ens,axis=1)
submission = pd.DataFrame({
self.IDcol: self.data.iloc[:,0],
self.datablock.target: ensemble_output
})
submission.to_csv(filename, index=False)
""" | bsd-3-clause |
sumanthjamadagni/OZ_Examples | IsothermCalculation.py | 1 | 1267 | import numpy as np
import sys
import matplotlib.pyplot as plt
import matplotlib
#from itertools import cycle
import OZ.Potentials as Potentials
import OZ.OZ_Functions as OZF
import OZ.PP_Functions as PP_Functions
#import FigFuncs
#import HNC
import OZ.RHNC as RHNC
from OZ.SinglePointCalculation import *
dr = 0.02
nr = 2048
sig=1.0
r,k = OZF.Create_r_k(dr, nr)
Ur_ref = Potentials.WCAPotential(r,sig=1.0, eps=1.0, m=12, n=6)
Ur = Potentials.LJPotential(r,sig=1.0, eps=1.0, m=12, n=6)
T = float(sys.argv[1])
OutFile = sys.argv[2]
OutFileH = open(OutFile, 'w')
branch = sys.argv[3]
if branch == 'vap':
rho_array = np.arange(0.01,0.25,0.005)
elif branch == 'liq'
rho_array = np.arange(0.80,0.50,-0.01)
else:
print "Unknown branch. Has to be 'vap' or 'liq'"
sys.exit()
cr_guess = None
i = 0
for rho in rho_array:
i = i + 1
try:
ListHeader, ListValues , gr, cr, Sk = SinglePointCalculation(r, k, Ur, Ur_ref, T, rho, OutFile=None, cr_guess = cr_guess)
cr_guess = cr
print ListHeader
print ListValues
if i == 1:
OutFileH.write(OZF.ListToTabbedStr(ListHeader))
OutFileH.write(OZF.ListToTabbedStr(ListValues))
except TypeError:
break
| gpl-3.0 |
igsr/igsr_analysis | scripts/VCF/QC/BENCHMARKING_TRUESET/calc_gtconcordance.py | 1 | 1544 | #!/usr/bin/env python3
'''
author: ernesto lowy ([email protected])
This script will parse a .tsv file in the following format:
POS REF ALT GT
18948096 G T 0|1
18948246 G A 0|1
18948886 T A 0|1
18949107 A G 0|1
...
And will generate some contigency tables in order to have a feeling
on the Genotype concordance of the 2 call sets
USAGE: python calc_gtconcordance <igsr.tsv> <giab.tsv>
'''
import pandas as pd
import sys
#read-in the igsr tsv file
DF_igsr=pd.read_csv(sys.argv[1], sep='\t',index_col=0,names=['POS','REF','ALT','GT'])
#read-in the GIAB tsv file
DF_giab=pd.read_csv(sys.argv[2], sep='\t',index_col=0,names=['POS','REF','ALT','GT'])
#combine 2 dataframes by index
final_DF=pd.merge(DF_igsr,DF_giab,left_index=True, right_index=True, suffixes=('_igsr', '_giab'))
# contigency tables for ref and alt alleles
ref_table=pd.crosstab(final_DF.REF_igsr, final_DF.REF_giab)
print("\n###Correspondence between REF alleles:")
print(ref_table)
alt_table=pd.crosstab(final_DF.ALT_igsr, final_DF.ALT_giab)
print("\n###Correspondence between ALT alleles:")
print(alt_table)
#contigency tables for genotype concordance
gt_tables=pd.crosstab(final_DF.GT_igsr, final_DF.GT_giab,margins=True, margins_name="Total")
print("\n### GT concordance (counts):")
print(gt_tables)
gt_tables=pd.crosstab(final_DF.GT_igsr, final_DF.GT_giab,margins=True, margins_name="Total",normalize=True).round(4)*100
print("\n### GT concordance (%):")
print(gt_tables)
| apache-2.0 |
iproduct/course-social-robotics | 11-dnn-keras/venv/Lib/site-packages/pandas/tests/indexes/timedeltas/test_scalar_compat.py | 3 | 4487 | """
Tests for TimedeltaIndex methods behaving like their Timedelta counterparts
"""
import numpy as np
import pytest
from pandas._libs.tslibs.offsets import INVALID_FREQ_ERR_MSG
from pandas import Index, Series, Timedelta, TimedeltaIndex, timedelta_range
import pandas._testing as tm
class TestVectorizedTimedelta:
def test_tdi_total_seconds(self):
# GH#10939
# test index
rng = timedelta_range("1 days, 10:11:12.100123456", periods=2, freq="s")
expt = [
1 * 86400 + 10 * 3600 + 11 * 60 + 12 + 100123456.0 / 1e9,
1 * 86400 + 10 * 3600 + 11 * 60 + 13 + 100123456.0 / 1e9,
]
tm.assert_almost_equal(rng.total_seconds(), Index(expt))
# test Series
ser = Series(rng)
s_expt = Series(expt, index=[0, 1])
tm.assert_series_equal(ser.dt.total_seconds(), s_expt)
# with nat
ser[1] = np.nan
s_expt = Series(
[1 * 86400 + 10 * 3600 + 11 * 60 + 12 + 100123456.0 / 1e9, np.nan],
index=[0, 1],
)
tm.assert_series_equal(ser.dt.total_seconds(), s_expt)
# with both nat
ser = Series([np.nan, np.nan], dtype="timedelta64[ns]")
tm.assert_series_equal(
ser.dt.total_seconds(), Series([np.nan, np.nan], index=[0, 1])
)
def test_tdi_round(self):
td = timedelta_range(start="16801 days", periods=5, freq="30Min")
elt = td[1]
expected_rng = TimedeltaIndex(
[
Timedelta("16801 days 00:00:00"),
Timedelta("16801 days 00:00:00"),
Timedelta("16801 days 01:00:00"),
Timedelta("16801 days 02:00:00"),
Timedelta("16801 days 02:00:00"),
]
)
expected_elt = expected_rng[1]
tm.assert_index_equal(td.round(freq="H"), expected_rng)
assert elt.round(freq="H") == expected_elt
msg = INVALID_FREQ_ERR_MSG
with pytest.raises(ValueError, match=msg):
td.round(freq="foo")
with pytest.raises(ValueError, match=msg):
elt.round(freq="foo")
msg = "<MonthEnd> is a non-fixed frequency"
with pytest.raises(ValueError, match=msg):
td.round(freq="M")
with pytest.raises(ValueError, match=msg):
elt.round(freq="M")
@pytest.mark.parametrize(
"freq,msg",
[
("Y", "<YearEnd: month=12> is a non-fixed frequency"),
("M", "<MonthEnd> is a non-fixed frequency"),
("foobar", "Invalid frequency: foobar"),
],
)
def test_tdi_round_invalid(self, freq, msg):
t1 = timedelta_range("1 days", periods=3, freq="1 min 2 s 3 us")
with pytest.raises(ValueError, match=msg):
t1.round(freq)
with pytest.raises(ValueError, match=msg):
# Same test for TimedeltaArray
t1._data.round(freq)
# TODO: de-duplicate with test_tdi_round
def test_round(self):
t1 = timedelta_range("1 days", periods=3, freq="1 min 2 s 3 us")
t2 = -1 * t1
t1a = timedelta_range("1 days", periods=3, freq="1 min 2 s")
t1c = TimedeltaIndex([1, 1, 1], unit="D")
# note that negative times round DOWN! so don't give whole numbers
for (freq, s1, s2) in [
("N", t1, t2),
("U", t1, t2),
(
"L",
t1a,
TimedeltaIndex(
["-1 days +00:00:00", "-2 days +23:58:58", "-2 days +23:57:56"]
),
),
(
"S",
t1a,
TimedeltaIndex(
["-1 days +00:00:00", "-2 days +23:58:58", "-2 days +23:57:56"]
),
),
("12T", t1c, TimedeltaIndex(["-1 days", "-1 days", "-1 days"])),
("H", t1c, TimedeltaIndex(["-1 days", "-1 days", "-1 days"])),
("d", t1c, TimedeltaIndex([-1, -1, -1], unit="D")),
]:
r1 = t1.round(freq)
tm.assert_index_equal(r1, s1)
r2 = t2.round(freq)
tm.assert_index_equal(r2, s2)
def test_components(self):
rng = timedelta_range("1 days, 10:11:12", periods=2, freq="s")
rng.components
# with nat
s = Series(rng)
s[1] = np.nan
result = s.dt.components
assert not result.iloc[0].isna().all()
assert result.iloc[1].isna().all()
| gpl-2.0 |
mahak/spark | python/pyspark/pandas/tests/data_type_ops/test_binary_ops.py | 7 | 6774 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pandas as pd
from pandas.api.types import CategoricalDtype
from pyspark import pandas as ps
from pyspark.pandas.config import option_context
from pyspark.pandas.tests.data_type_ops.testing_utils import TestCasesUtils
from pyspark.testing.pandasutils import PandasOnSparkTestCase
class BinaryOpsTest(PandasOnSparkTestCase, TestCasesUtils):
@property
def pser(self):
return pd.Series([b"1", b"2", b"3"])
@property
def psser(self):
return ps.from_pandas(self.pser)
def test_add(self):
psser = self.psser
pser = self.pser
self.assert_eq(psser + b"1", pser + b"1")
self.assert_eq(psser + psser, pser + pser)
self.assert_eq(psser + psser.astype("bytes"), pser + pser.astype("bytes"))
self.assertRaises(TypeError, lambda: psser + "x")
self.assertRaises(TypeError, lambda: psser + 1)
with option_context("compute.ops_on_diff_frames", True):
for psser in self.pssers:
self.assertRaises(TypeError, lambda: self.psser + psser)
self.assert_eq(self.psser + self.psser, self.pser + self.pser)
def test_sub(self):
self.assertRaises(TypeError, lambda: self.psser - "x")
self.assertRaises(TypeError, lambda: self.psser - 1)
with option_context("compute.ops_on_diff_frames", True):
for psser in self.pssers:
self.assertRaises(TypeError, lambda: self.psser - psser)
def test_mul(self):
self.assertRaises(TypeError, lambda: self.psser * "x")
self.assertRaises(TypeError, lambda: self.psser * 1)
with option_context("compute.ops_on_diff_frames", True):
for psser in self.pssers:
self.assertRaises(TypeError, lambda: self.psser * psser)
def test_truediv(self):
self.assertRaises(TypeError, lambda: self.psser / "x")
self.assertRaises(TypeError, lambda: self.psser / 1)
with option_context("compute.ops_on_diff_frames", True):
for psser in self.pssers:
self.assertRaises(TypeError, lambda: self.psser / psser)
def test_floordiv(self):
self.assertRaises(TypeError, lambda: self.psser // "x")
self.assertRaises(TypeError, lambda: self.psser // 1)
with option_context("compute.ops_on_diff_frames", True):
for psser in self.pssers:
self.assertRaises(TypeError, lambda: self.psser // psser)
def test_mod(self):
self.assertRaises(TypeError, lambda: self.psser % "x")
self.assertRaises(TypeError, lambda: self.psser % 1)
with option_context("compute.ops_on_diff_frames", True):
for psser in self.pssers:
self.assertRaises(TypeError, lambda: self.psser % psser)
def test_pow(self):
self.assertRaises(TypeError, lambda: self.psser ** "x")
self.assertRaises(TypeError, lambda: self.psser ** 1)
with option_context("compute.ops_on_diff_frames", True):
for psser in self.pssers:
self.assertRaises(TypeError, lambda: self.psser ** psser)
def test_radd(self):
self.assert_eq(b"1" + self.psser, b"1" + self.pser)
self.assertRaises(TypeError, lambda: "x" + self.psser)
self.assertRaises(TypeError, lambda: 1 + self.psser)
def test_rsub(self):
self.assertRaises(TypeError, lambda: "x" - self.psser)
self.assertRaises(TypeError, lambda: 1 - self.psser)
def test_rmul(self):
self.assertRaises(TypeError, lambda: "x" * self.psser)
self.assertRaises(TypeError, lambda: 2 * self.psser)
def test_rtruediv(self):
self.assertRaises(TypeError, lambda: "x" / self.psser)
self.assertRaises(TypeError, lambda: 1 / self.psser)
def test_rfloordiv(self):
self.assertRaises(TypeError, lambda: "x" // self.psser)
self.assertRaises(TypeError, lambda: 1 // self.psser)
def test_rmod(self):
self.assertRaises(TypeError, lambda: 1 % self.psser)
def test_rpow(self):
self.assertRaises(TypeError, lambda: "x" ** self.psser)
self.assertRaises(TypeError, lambda: 1 ** self.psser)
def test_and(self):
self.assertRaises(TypeError, lambda: self.psser & True)
self.assertRaises(TypeError, lambda: self.psser & False)
self.assertRaises(TypeError, lambda: self.psser & self.psser)
def test_rand(self):
self.assertRaises(TypeError, lambda: True & self.psser)
self.assertRaises(TypeError, lambda: False & self.psser)
def test_or(self):
self.assertRaises(TypeError, lambda: self.psser | True)
self.assertRaises(TypeError, lambda: self.psser | False)
self.assertRaises(TypeError, lambda: self.psser | self.psser)
def test_ror(self):
self.assertRaises(TypeError, lambda: True | self.psser)
self.assertRaises(TypeError, lambda: False | self.psser)
def test_from_to_pandas(self):
data = [b"1", b"2", b"3"]
pser = pd.Series(data)
psser = ps.Series(data)
self.assert_eq(pser, psser.to_pandas())
self.assert_eq(ps.from_pandas(pser), psser)
def test_isnull(self):
self.assert_eq(self.pser.isnull(), self.psser.isnull())
def test_astype(self):
pser = self.pser
psser = self.psser
self.assert_eq(pd.Series(["1", "2", "3"]), psser.astype(str))
self.assert_eq(pser.astype("category"), psser.astype("category"))
cat_type = CategoricalDtype(categories=[b"2", b"3", b"1"])
self.assert_eq(pser.astype(cat_type), psser.astype(cat_type))
if __name__ == "__main__":
import unittest
from pyspark.pandas.tests.data_type_ops.test_binary_ops import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 |
bmazin/ARCONS-pipeline | examples/Pal2012-sdss/sdssview_frameBlue.py | 1 | 1595 | from sdssgaussfitter import gaussfit
import numpy as np
from util import utils
import matplotlib.pyplot as plt
import sys
### this is used to view a single frame from display stack or psffit npz. Into the command type "python sdssview_frameBlue.py 3" to view image number 3.
def aperture(startpx,startpy,radius=3):
r = radius
length = 2*r
height = length
allx = xrange(startpx-int(np.ceil(length/2.0)),startpx+int(np.floor(length/2.0))+1)
ally = xrange(startpy-int(np.ceil(height/2.0)),startpy+int(np.floor(height/2.0))+1)
pixx = []
pixy = []
mask=np.ones((46,44))
for x in allx:
for y in ally:
if (np.abs(x-startpx))**2+(np.abs(y-startpy))**2 <= (r)**2 and 0 <= y and y < 46 and 0 <= x and x < 44:
mask[y,x]=0.
return mask
def gaussian(height, center_x, center_y, width_x, width_y,offset):
"""Returns a gaussian function with the given parameters"""
width_x = float(width_x)
width_y = float(width_y)
return lambda x,y: height*np.exp(-(((center_x-x)/width_x)**2+((center_y-y)/width_y)**2)/2)+offset
stackDict = np.load('/Scratch/dataProcessing/SDSS_J0926/Dec8ImageStackAllInt3Aperture510.npz')
stack = stackDict['stack']
if len(sys.argv) == 1:
print 'Useage: ',sys.argv[0],' iFrame'
print """
set0 Frames 0-179
"""
exit(1)
iFrame = int(sys.argv[1])
frame = stack[:,:,iFrame]
# plt.hist(np.ravel(frame),bins=100,range=(0,5000))
# plt.show()
nanMask = np.isnan(frame)
frame[nanMask] = 0
frame = np.ma.masked_array(frame,mask=nanMask)
utils.plotArray(frame,cbar=True)
| gpl-2.0 |
annahs/atmos_research | WHI_add_gc_co_to_db.py | 1 | 13375 | import matplotlib.pyplot as plt
import numpy as np
import os
import sys
from pprint import pprint
from datetime import datetime
from datetime import timedelta
import pickle
import copy
from pyhdf.SD import SD, SDC, SDS
import collections
import calendar
import mysql.connector
#database tables
###
filter_by_RH = True
high_RH_limit = 90
if filter_by_RH == False:
high_RH_limit = 101
####
#database connection
cnx = mysql.connector.connect(user='root', password='Suresh15', host='localhost', database='black_carbon')
cursor = cnx.cursor()
calib_stability_uncertainty = 0.1
#fire times
timezone = timedelta(hours = -8)
fire_time1 = [datetime.strptime('2009/07/27 00:00', '%Y/%m/%d %H:%M'), datetime.strptime('2009/08/08 00:00', '%Y/%m/%d %H:%M')] #row_datetimes follwing Takahama et al (2011) doi:10.5194/acp-11-6367-2011 #PST
fire_time1_UNIX_UTC_start = float(calendar.timegm((fire_time1[0]-timezone).utctimetuple()))
fire_time1_UNIX_UTC_end = float(calendar.timegm((fire_time1[1]-timezone).utctimetuple()))
fire_time2 = [datetime.strptime('2010/07/26 09:00', '%Y/%m/%d %H:%M'), datetime.strptime('2010/07/28 09:30', '%Y/%m/%d %H:%M')] #jason's BC clear report #PST
fire_time2_UNIX_UTC_start = float(calendar.timegm((fire_time2[0]-timezone).utctimetuple()))
fire_time2_UNIX_UTC_end = float(calendar.timegm((fire_time2[1]-timezone).utctimetuple()))
############Meaurements
#get full rBC record (in PST and 10 min binned intervals) and put in dictionaries keyed by date
rBC_24h_data = {} #does not include BB data
rBC_BB_24h_data = {}
rBC_FT_data_cluster_NPac = {}
rBC_FT_data_cluster_SPac = {}
rBC_FT_data_cluster_Cont = {}
rBC_FT_data_cluster_LRT = {}
rBC_FT_data_cluster_GBPS = {}
rBC_FT_data_cluster_BB = {}
##
#select data (spikes already rmoved) and exclude fire times
SP2_data_query = ('SELECT * FROM whi_sp2_rbc_record_2009to2012_spikes_removed WHERE UNIX_GMT_ts NOT BETWEEN %(fire_time1_start)s AND %(fire_time1_end)s AND UNIX_GMT_ts NOT BETWEEN %(fire_time2_start)s AND %(fire_time2_end)s')
query_terms ={
'fire_time1_start':fire_time1_UNIX_UTC_start,
'fire_time1_end':fire_time1_UNIX_UTC_end,
'fire_time2_start':fire_time2_UNIX_UTC_start,
'fire_time2_end':fire_time2_UNIX_UTC_end
}
cursor.execute(SP2_data_query,query_terms)
SP2_data = cursor.fetchall()
start_hour = 4 #PST 20000
end_hour = 16 #PST 0800
for row in SP2_data:
UNIX_UTC_ts = row[8]
date_time_UTC = datetime.utcfromtimestamp(UNIX_UTC_ts)
BC_mass_conc = row[3]
BC_mass_conc_LL = row[4]
BC_mass_conc_UL = row[5]
#avoid high RH times
if filter_by_RH == True:
cursor.execute(('SELECT RH from whi_high_rh_times_2009to2012 where high_RH_start_time <= %s and high_RH_end_time > %s'),(UNIX_UTC_ts,UNIX_UTC_ts))
RH_data = cursor.fetchall()
if len(RH_data):
if RH_data[0] > high_RH_limit:
continue
#use night only data
if start_hour <= date_time_UTC.hour < end_hour:
cursor.execute(('SELECT * from whi_ft_cluster_times_2009to2012 where cluster_start_time <= %s and cluster_end_time >= %s'),(UNIX_UTC_ts,UNIX_UTC_ts))
cluster_data = cursor.fetchall()
#we have a few samples from teh first day of 2009 and 2012 that are before our first cluster, so this ignores those . .
if len(cluster_data) == 0:
continue
cluster_midtime = datetime.strptime(cluster_data[0][4], '%Y-%m-%d %H:%M:%S')
cluster_number = cluster_data[0][3]
#####get abs err
if np.isnan(BC_mass_conc_LL):
abs_err = np.nan
else:
abs_err = (BC_mass_conc-BC_mass_conc_LL)
#add data to list in cluster dictionaries (1 list per cluster time early night/late night)
if cluster_number == 9:
correction_factor_for_massdistr = 1./0.5411
mass_distr_correction_error = 0.015 #this is the uncertainty in the firt of the mass distribution for this period. from WHI_long_term_v2_size_distr_fitting_and_plotting.py
corrected_mass_conc = BC_mass_conc*correction_factor_for_massdistr
row_data = [corrected_mass_conc, abs_err+(corrected_mass_conc*(mass_distr_correction_error+calib_stability_uncertainty)) ]
if cluster_midtime in rBC_FT_data_cluster_SPac:
rBC_FT_data_cluster_SPac[cluster_midtime].append(row_data)
else:
rBC_FT_data_cluster_SPac[cluster_midtime] = [row_data]
if cluster_number == 4:
correction_factor_for_massdistr = 1./0.4028
mass_distr_correction_error = 0.028 #this is the uncertainty in the firt of the mass distribution for this period. from WHI_long_term_v2_size_distr_fitting_and_plotting.py
corrected_mass_conc = BC_mass_conc*correction_factor_for_massdistr
row_data = [corrected_mass_conc, abs_err+(corrected_mass_conc*(mass_distr_correction_error+calib_stability_uncertainty)) ]
if cluster_midtime in rBC_FT_data_cluster_Cont:
rBC_FT_data_cluster_Cont[cluster_midtime].append(row_data)
else:
rBC_FT_data_cluster_Cont[cluster_midtime] = [row_data]
if cluster_number in [6,8]:
correction_factor_for_massdistr = 1./0.4626
mass_distr_correction_error = 0.032 #this is the uncertainty in the firt of the mass distribution for this period. from WHI_long_term_v2_size_distr_fitting_and_plotting.py
corrected_mass_conc = BC_mass_conc*correction_factor_for_massdistr
row_data = [corrected_mass_conc, abs_err+(corrected_mass_conc*(mass_distr_correction_error+calib_stability_uncertainty)) ]
if cluster_midtime in rBC_FT_data_cluster_SPac:
rBC_FT_data_cluster_SPac[cluster_midtime].append(row_data)
else:
rBC_FT_data_cluster_SPac[cluster_midtime] = [row_data]
if cluster_number in [2,7]:
correction_factor_for_massdistr = 1./0.5280
mass_distr_correction_error = 0.019 #this is the uncertainty in the firt of the mass distribution for this period. from WHI_long_term_v2_size_distr_fitting_and_plotting.py
corrected_mass_conc = BC_mass_conc*correction_factor_for_massdistr
row_data = [corrected_mass_conc, abs_err+(corrected_mass_conc*(mass_distr_correction_error+calib_stability_uncertainty)) ]
if cluster_midtime in rBC_FT_data_cluster_LRT:
rBC_FT_data_cluster_LRT[cluster_midtime].append(row_data)
else:
rBC_FT_data_cluster_LRT[cluster_midtime] = [row_data]
if cluster_number in [1,3,5,10]:
correction_factor_for_massdistr = 1./0.3525
mass_distr_correction_error = 0.015 #this is the uncertainty in the firt of the mass distribution for this period. from WHI_long_term_v2_size_distr_fitting_and_plotting.py
corrected_mass_conc = BC_mass_conc*correction_factor_for_massdistr
row_data = [corrected_mass_conc, abs_err+(corrected_mass_conc*(mass_distr_correction_error+calib_stability_uncertainty)) ]
if cluster_midtime in rBC_FT_data_cluster_NPac:
rBC_FT_data_cluster_NPac[cluster_midtime].append(row_data)
else:
rBC_FT_data_cluster_NPac[cluster_midtime] = [row_data]
##print data set lengths
#for cluster in [rBC_FT_data_cluster_NPac,rBC_FT_data_cluster_SPac,rBC_FT_data_cluster_Cont,rBC_FT_data_cluster_GBPS,rBC_FT_data_cluster_LRT]:
# print len(cluster)
# data_pts = 0
# for midtime in cluster:
# data_pts = data_pts + len(cluster[midtime])
# print data_pts
# print '\n'
#sys.exit()
#6h rBC-meas avgs (FT data)
SP2_6h_NPac = []
SP2_6h_SPac = []
SP2_6h_Cont = []
SP2_6h_LRT = []
SP2_6h_BB = []
SP2_6h_all_non_BB = []
all_dict = {}
#6h means
for date, mass_data in rBC_FT_data_cluster_NPac.iteritems():
mass_concs = [row[0] for row in mass_data]
mass_concs_abs_err = [row[1] for row in mass_data]
date_mean = np.mean(mass_concs)
date_mean_err = np.mean(mass_concs_abs_err)/date_mean
SP2_6h_NPac.append([date_mean,date_mean_err])
SP2_6h_all_non_BB.append([date_mean,date_mean_err])
if date in all_dict:
print 'alert!',date
else:
all_dict[date] = [date_mean,date_mean_err]
for date, mass_data in rBC_FT_data_cluster_SPac.iteritems():
mass_concs = [row[0] for row in mass_data]
mass_concs_abs_err = [row[1] for row in mass_data]
date_mean = np.mean(mass_concs)
date_mean_err = np.mean(mass_concs_abs_err)/date_mean
SP2_6h_SPac.append([date_mean,date_mean_err])
SP2_6h_all_non_BB.append([date_mean,date_mean_err])
if date in all_dict:
print 'alert!',date
else:
all_dict[date] = [date_mean,date_mean_err]
for date, mass_data in rBC_FT_data_cluster_Cont.iteritems():
mass_concs = [row[0] for row in mass_data]
mass_concs_abs_err = [row[1] for row in mass_data]
date_mean = np.mean(mass_concs)
date_mean_err = np.mean(mass_concs_abs_err)/date_mean
SP2_6h_Cont.append([date_mean,date_mean_err])
SP2_6h_all_non_BB.append([date_mean,date_mean_err])
if date in all_dict:
print 'alert!',date
else:
all_dict[date] = [date_mean,date_mean_err]
for date, mass_data in rBC_FT_data_cluster_LRT.iteritems():
mass_concs = [row[0] for row in mass_data]
mass_concs_abs_err = [row[1] for row in mass_data]
date_mean = np.mean(mass_concs)
date_mean_err = np.mean(mass_concs_abs_err)/date_mean
SP2_6h_LRT.append([date_mean,date_mean_err])
SP2_6h_all_non_BB.append([date_mean,date_mean_err])
if date in all_dict:
print 'alert!',date
else:
all_dict[date] = [date_mean,date_mean_err]
for date, mass_data in rBC_FT_data_cluster_BB.iteritems():
mass_concs = [row[0] for row in mass_data]
mass_concs_abs_err = [row[1] for row in mass_data]
date_mean = np.mean(mass_concs)
date_mean_err = np.mean(mass_concs_abs_err)/date_mean
SP2_6h_BB.append([date_mean,date_mean_err])
###################GEOS-Chem
##sampling times
sampling_times_file = 'C:/Users/Sarah Hanna/Documents/Data/WHI long term record/GOES-Chem/Junwei_runs/WHI_SP2_6h_rBC_mass_concs.txt'
sampling_times = []
with open(sampling_times_file,'r') as f:
f.readline()
for line in f:
newline = line.split()
sampling_date = newline[0]
sampling_time = newline[1]
sampling_datetime = datetime(int(sampling_date[0:4]),int(sampling_date[5:7]),int(sampling_date[8:10]),int(sampling_time[0:2]))
sampling_times.append(sampling_datetime+timedelta(hours=8)) #get into UTC
GC_data = {}
GC_runs = ['default','Vancouver_emission','wet_scavenging','no_biomass','All_together']
lat = 20 #20 corresponds to 50deg
lon = 7 #7 corresponds to -122.5deg
level = 9 #1-47 #9 is closest to WHI avg P (WHI 95% CI = 770-793)
molar_mass_BC = 12.0107 #in g/mol
ng_per_g = 10**9
R = 8.3144621 # in m3*Pa/(K*mol)
GEOS_Chem_factor = 10**-9
#start_hour = 3
#end_hour = 15
GC_run = 'default' #the runs are 'default','Vancouver_emission','wet_scavenging','no_biomass','All_together'
print GC_run
data_dir = 'C:/Users/Sarah Hanna/Documents/Data/WHI long term record/GOES-Chem/Junwei_runs/' + GC_run +'/'
os.chdir(data_dir)
for file in os.listdir(data_dir):
if file.endswith('.hdf'):
file_year = int(file[2:6])
file_month = int(file[6:8])
file_day = int(file[8:10])
file_hour = int(file[11:13])
GC_datetime = datetime(file_year,file_month,file_day,file_hour)
GC_UNIX_UTC_ts = calendar.timegm(GC_datetime.utctimetuple())
#avoid high RH times
if filter_by_RH == True:
cursor.execute(('SELECT RH from whi_high_rh_times_2009to2012 where high_RH_start_time <= %s and high_RH_end_time > %s'),(GC_UNIX_UTC_ts,GC_UNIX_UTC_ts))
RH_data = cursor.fetchone()
if RH_data != None:
if RH_data[0] > high_RH_limit:
continue
############
if start_hour <= file_hour < end_hour: #ignore any times not in the 2000-0800 PST window (0400-1600 UTC)
hdf_file = SD(file, SDC.READ)
if start_hour <= file_hour < (start_hour+6):
period_midtime = datetime(file_year,file_month,file_day,7)
period_starttime = datetime(file_year,file_month,file_day,4)
period_endtime = datetime(file_year,file_month,file_day,10)
if (start_hour+6) <= file_hour < end_hour:
period_midtime = datetime(file_year,file_month,file_day,13)
period_starttime = datetime(file_year,file_month,file_day,10)
period_endtime = datetime(file_year,file_month,file_day,16)
GC_CO = hdf_file.select('IJ-AVG-$::CO') #3d CO data in ppbv (molBC/molAIR)
CO_ppbv = GC_CO[level,lat,lon]
GC_CO_lvl = CO_ppbv#*GEOS_Chem_factor*(101325/(R*273)) #101325/(R*273) corrects to STP
if period_midtime in all_dict: #this excludes BB times already
if period_midtime not in GC_data:
GC_data[period_midtime] = []
GC_data[period_midtime].append(CO_ppbv)
hdf_file.end()
#query to add 6h mass ocnc data
add_6h_data = ('INSERT INTO whi_gc_co_data'
'(UNIX_UTC_start_time,UNIX_UTC_end_time,CO_ppbv,RH_threshold,GC_scenario)'
'VALUES (%(UNIX_UTC_start_time)s,%(UNIX_UTC_end_time)s,%(CO_ppbv)s,%(RH_threshold)s,%(GC_scenario)s)'
)
#get the means for each 6-h period
for period_midtime in GC_data:
period_starttime = period_midtime - timedelta(hours = 3)
period_endtime = period_midtime + timedelta(hours = 3)
CO_mean = np.mean(GC_data[period_midtime])
CO_data = {
'UNIX_UTC_start_time':float(calendar.timegm(period_starttime.utctimetuple())),
'UNIX_UTC_end_time':float(calendar.timegm(period_endtime.utctimetuple())),
'CO_ppbv':float(CO_mean),
'RH_threshold':high_RH_limit,
'GC_scenario': GC_run,
}
cursor.execute(('''UPDATE whi_gc_co_data SET CO_ppbv = %(CO_ppbv)s
WHERE UNIX_UTC_start_time = %(UNIX_UTC_start_time)s AND UNIX_UTC_end_time = %(UNIX_UTC_end_time)s and GC_scenario = %(GC_scenario)s and RH_threshold = %(RH_threshold)s'''),(CO_data))
#cursor.execute(add_6h_data, CO_data)
cnx.commit()
cnx.close()
| mit |
cngo-github/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/projections/polar.py | 69 | 20981 | import math
import numpy as npy
import matplotlib
rcParams = matplotlib.rcParams
from matplotlib.artist import kwdocd
from matplotlib.axes import Axes
from matplotlib import cbook
from matplotlib.patches import Circle
from matplotlib.path import Path
from matplotlib.ticker import Formatter, Locator
from matplotlib.transforms import Affine2D, Affine2DBase, Bbox, \
BboxTransformTo, IdentityTransform, Transform, TransformWrapper
class PolarAxes(Axes):
"""
A polar graph projection, where the input dimensions are *theta*, *r*.
Theta starts pointing east and goes anti-clockwise.
"""
name = 'polar'
class PolarTransform(Transform):
"""
The base polar transform. This handles projection *theta* and
*r* into Cartesian coordinate space *x* and *y*, but does not
perform the ultimate affine transformation into the correct
position.
"""
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
"""
Create a new polar transform. Resolution is the number of steps
to interpolate between each input line segment to approximate its
path in curved polar space.
"""
Transform.__init__(self)
self._resolution = resolution
def transform(self, tr):
xy = npy.zeros(tr.shape, npy.float_)
t = tr[:, 0:1]
r = tr[:, 1:2]
x = xy[:, 0:1]
y = xy[:, 1:2]
x[:] = r * npy.cos(t)
y[:] = r * npy.sin(t)
return xy
transform.__doc__ = Transform.transform.__doc__
transform_non_affine = transform
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def transform_path(self, path):
vertices = path.vertices
t = vertices[:, 0:1]
t[t != (npy.pi * 2.0)] %= (npy.pi * 2.0)
if len(vertices) == 2 and vertices[0, 0] == vertices[1, 0]:
return Path(self.transform(vertices), path.codes)
ipath = path.interpolated(self._resolution)
return Path(self.transform(ipath.vertices), ipath.codes)
transform_path.__doc__ = Transform.transform_path.__doc__
transform_path_non_affine = transform_path
transform_path_non_affine.__doc__ = Transform.transform_path_non_affine.__doc__
def inverted(self):
return PolarAxes.InvertedPolarTransform(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
class PolarAffine(Affine2DBase):
"""
The affine part of the polar projection. Scales the output so
that maximum radius rests on the edge of the axes circle.
"""
def __init__(self, scale_transform, limits):
u"""
*limits* is the view limit of the data. The only part of
its bounds that is used is ymax (for the radius maximum).
The theta range is always fixed to (0, 2\u03c0).
"""
Affine2DBase.__init__(self)
self._scale_transform = scale_transform
self._limits = limits
self.set_children(scale_transform, limits)
self._mtx = None
def get_matrix(self):
if self._invalid:
limits_scaled = self._limits.transformed(self._scale_transform)
ymax = limits_scaled.ymax
affine = Affine2D() \
.scale(0.5 / ymax) \
.translate(0.5, 0.5)
self._mtx = affine.get_matrix()
self._inverted = None
self._invalid = 0
return self._mtx
get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__
class InvertedPolarTransform(Transform):
"""
The inverse of the polar transform, mapping Cartesian
coordinate space *x* and *y* back to *theta* and *r*.
"""
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
Transform.__init__(self)
self._resolution = resolution
def transform(self, xy):
x = xy[:, 0:1]
y = xy[:, 1:]
r = npy.sqrt(x*x + y*y)
theta = npy.arccos(x / r)
theta = npy.where(y < 0, 2 * npy.pi - theta, theta)
return npy.concatenate((theta, r), 1)
transform.__doc__ = Transform.transform.__doc__
def inverted(self):
return PolarAxes.PolarTransform(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
class ThetaFormatter(Formatter):
u"""
Used to format the *theta* tick labels. Converts the
native unit of radians into degrees and adds a degree symbol
(\u00b0).
"""
def __call__(self, x, pos=None):
# \u00b0 : degree symbol
if rcParams['text.usetex'] and not rcParams['text.latex.unicode']:
return r"$%0.0f^\circ$" % ((x / npy.pi) * 180.0)
else:
# we use unicode, rather than mathtext with \circ, so
# that it will work correctly with any arbitrary font
# (assuming it has a degree sign), whereas $5\circ$
# will only work correctly with one of the supported
# math fonts (Computer Modern and STIX)
return u"%0.0f\u00b0" % ((x / npy.pi) * 180.0)
class RadialLocator(Locator):
"""
Used to locate radius ticks.
Ensures that all ticks are strictly positive. For all other
tasks, it delegates to the base
:class:`~matplotlib.ticker.Locator` (which may be different
depending on the scale of the *r*-axis.
"""
def __init__(self, base):
self.base = base
def __call__(self):
ticks = self.base()
return [x for x in ticks if x > 0]
def autoscale(self):
return self.base.autoscale()
def pan(self, numsteps):
return self.base.pan(numsteps)
def zoom(self, direction):
return self.base.zoom(direction)
def refresh(self):
return self.base.refresh()
RESOLUTION = 75
def __init__(self, *args, **kwargs):
"""
Create a new Polar Axes for a polar plot.
"""
self._rpad = 0.05
self.resolution = kwargs.pop('resolution', self.RESOLUTION)
Axes.__init__(self, *args, **kwargs)
self.set_aspect('equal', adjustable='box', anchor='C')
self.cla()
__init__.__doc__ = Axes.__init__.__doc__
def cla(self):
Axes.cla(self)
self.title.set_y(1.05)
self.xaxis.set_major_formatter(self.ThetaFormatter())
angles = npy.arange(0.0, 360.0, 45.0)
self.set_thetagrids(angles)
self.yaxis.set_major_locator(self.RadialLocator(self.yaxis.get_major_locator()))
self.grid(rcParams['polaraxes.grid'])
self.xaxis.set_ticks_position('none')
self.yaxis.set_ticks_position('none')
def _set_lim_and_transforms(self):
self.transAxes = BboxTransformTo(self.bbox)
# Transforms the x and y axis separately by a scale factor
# It is assumed that this part will have non-linear components
self.transScale = TransformWrapper(IdentityTransform())
# A (possibly non-linear) projection on the (already scaled) data
self.transProjection = self.PolarTransform(self.resolution)
# An affine transformation on the data, generally to limit the
# range of the axes
self.transProjectionAffine = self.PolarAffine(self.transScale, self.viewLim)
# The complete data transformation stack -- from data all the
# way to display coordinates
self.transData = self.transScale + self.transProjection + \
(self.transProjectionAffine + self.transAxes)
# This is the transform for theta-axis ticks. It is
# equivalent to transData, except it always puts r == 1.0 at
# the edge of the axis circle.
self._xaxis_transform = (
self.transProjection +
self.PolarAffine(IdentityTransform(), Bbox.unit()) +
self.transAxes)
# The theta labels are moved from radius == 0.0 to radius == 1.1
self._theta_label1_position = Affine2D().translate(0.0, 1.1)
self._xaxis_text1_transform = (
self._theta_label1_position +
self._xaxis_transform)
self._theta_label2_position = Affine2D().translate(0.0, 1.0 / 1.1)
self._xaxis_text2_transform = (
self._theta_label2_position +
self._xaxis_transform)
# This is the transform for r-axis ticks. It scales the theta
# axis so the gridlines from 0.0 to 1.0, now go from 0.0 to
# 2pi.
self._yaxis_transform = (
Affine2D().scale(npy.pi * 2.0, 1.0) +
self.transData)
# The r-axis labels are put at an angle and padded in the r-direction
self._r_label1_position = Affine2D().translate(22.5, self._rpad)
self._yaxis_text1_transform = (
self._r_label1_position +
Affine2D().scale(1.0 / 360.0, 1.0) +
self._yaxis_transform
)
self._r_label2_position = Affine2D().translate(22.5, self._rpad)
self._yaxis_text2_transform = (
self._r_label2_position +
Affine2D().scale(1.0 / 360.0, 1.0) +
self._yaxis_transform
)
def get_xaxis_transform(self):
return self._xaxis_transform
def get_xaxis_text1_transform(self, pad):
return self._xaxis_text1_transform, 'center', 'center'
def get_xaxis_text2_transform(self, pad):
return self._xaxis_text2_transform, 'center', 'center'
def get_yaxis_transform(self):
return self._yaxis_transform
def get_yaxis_text1_transform(self, pad):
return self._yaxis_text1_transform, 'center', 'center'
def get_yaxis_text2_transform(self, pad):
return self._yaxis_text2_transform, 'center', 'center'
def _gen_axes_patch(self):
return Circle((0.5, 0.5), 0.5)
def set_rmax(self, rmax):
self.viewLim.y1 = rmax
angle = self._r_label1_position.to_values()[4]
self._r_label1_position.clear().translate(
angle, rmax * self._rpad)
self._r_label2_position.clear().translate(
angle, -rmax * self._rpad)
def get_rmax(self):
return self.viewLim.ymax
def set_yscale(self, *args, **kwargs):
Axes.set_yscale(self, *args, **kwargs)
self.yaxis.set_major_locator(
self.RadialLocator(self.yaxis.get_major_locator()))
set_rscale = Axes.set_yscale
set_rticks = Axes.set_yticks
def set_thetagrids(self, angles, labels=None, frac=None,
**kwargs):
"""
Set the angles at which to place the theta grids (these
gridlines are equal along the theta dimension). *angles* is in
degrees.
*labels*, if not None, is a ``len(angles)`` list of strings of
the labels to use at each angle.
If *labels* is None, the labels will be ``fmt %% angle``
*frac* is the fraction of the polar axes radius at which to
place the label (1 is the edge). Eg. 1.05 is outside the axes
and 0.95 is inside the axes.
Return value is a list of tuples (*line*, *label*), where
*line* is :class:`~matplotlib.lines.Line2D` instances and the
*label* is :class:`~matplotlib.text.Text` instances.
kwargs are optional text properties for the labels:
%(Text)s
ACCEPTS: sequence of floats
"""
angles = npy.asarray(angles, npy.float_)
self.set_xticks(angles * (npy.pi / 180.0))
if labels is not None:
self.set_xticklabels(labels)
if frac is not None:
self._theta_label1_position.clear().translate(0.0, frac)
self._theta_label2_position.clear().translate(0.0, 1.0 / frac)
for t in self.xaxis.get_ticklabels():
t.update(kwargs)
return self.xaxis.get_ticklines(), self.xaxis.get_ticklabels()
set_thetagrids.__doc__ = cbook.dedent(set_thetagrids.__doc__) % kwdocd
def set_rgrids(self, radii, labels=None, angle=None, rpad=None, **kwargs):
"""
Set the radial locations and labels of the *r* grids.
The labels will appear at radial distances *radii* at the
given *angle* in degrees.
*labels*, if not None, is a ``len(radii)`` list of strings of the
labels to use at each radius.
If *labels* is None, the built-in formatter will be used.
*rpad* is a fraction of the max of *radii* which will pad each of
the radial labels in the radial direction.
Return value is a list of tuples (*line*, *label*), where
*line* is :class:`~matplotlib.lines.Line2D` instances and the
*label* is :class:`~matplotlib.text.Text` instances.
kwargs are optional text properties for the labels:
%(Text)s
ACCEPTS: sequence of floats
"""
radii = npy.asarray(radii)
rmin = radii.min()
if rmin <= 0:
raise ValueError('radial grids must be strictly positive')
self.set_yticks(radii)
if labels is not None:
self.set_yticklabels(labels)
if angle is None:
angle = self._r_label1_position.to_values()[4]
if rpad is not None:
self._rpad = rpad
rmax = self.get_rmax()
self._r_label1_position.clear().translate(angle, self._rpad * rmax)
self._r_label2_position.clear().translate(angle, -self._rpad * rmax)
for t in self.yaxis.get_ticklabels():
t.update(kwargs)
return self.yaxis.get_ticklines(), self.yaxis.get_ticklabels()
set_rgrids.__doc__ = cbook.dedent(set_rgrids.__doc__) % kwdocd
def set_xscale(self, scale, *args, **kwargs):
if scale != 'linear':
raise NotImplementedError("You can not set the xscale on a polar plot.")
def set_xlim(self, *args, **kargs):
# The xlim is fixed, no matter what you do
self.viewLim.intervalx = (0.0, npy.pi * 2.0)
def format_coord(self, theta, r):
"""
Return a format string formatting the coordinate using Unicode
characters.
"""
theta /= math.pi
# \u03b8: lower-case theta
# \u03c0: lower-case pi
# \u00b0: degree symbol
return u'\u03b8=%0.3f\u03c0 (%0.3f\u00b0), r=%0.3f' % (theta, theta * 180.0, r)
def get_data_ratio(self):
'''
Return the aspect ratio of the data itself. For a polar plot,
this should always be 1.0
'''
return 1.0
### Interactive panning
def can_zoom(self):
"""
Return True if this axes support the zoom box
"""
return False
def start_pan(self, x, y, button):
angle = self._r_label1_position.to_values()[4] / 180.0 * npy.pi
mode = ''
if button == 1:
epsilon = npy.pi / 45.0
t, r = self.transData.inverted().transform_point((x, y))
if t >= angle - epsilon and t <= angle + epsilon:
mode = 'drag_r_labels'
elif button == 3:
mode = 'zoom'
self._pan_start = cbook.Bunch(
rmax = self.get_rmax(),
trans = self.transData.frozen(),
trans_inverse = self.transData.inverted().frozen(),
r_label_angle = self._r_label1_position.to_values()[4],
x = x,
y = y,
mode = mode
)
def end_pan(self):
del self._pan_start
def drag_pan(self, button, key, x, y):
p = self._pan_start
if p.mode == 'drag_r_labels':
startt, startr = p.trans_inverse.transform_point((p.x, p.y))
t, r = p.trans_inverse.transform_point((x, y))
# Deal with theta
dt0 = t - startt
dt1 = startt - t
if abs(dt1) < abs(dt0):
dt = abs(dt1) * sign(dt0) * -1.0
else:
dt = dt0 * -1.0
dt = (dt / npy.pi) * 180.0
rpad = self._r_label1_position.to_values()[5]
self._r_label1_position.clear().translate(
p.r_label_angle - dt, rpad)
self._r_label2_position.clear().translate(
p.r_label_angle - dt, -rpad)
elif p.mode == 'zoom':
startt, startr = p.trans_inverse.transform_point((p.x, p.y))
t, r = p.trans_inverse.transform_point((x, y))
dr = r - startr
# Deal with r
scale = r / startr
self.set_rmax(p.rmax / scale)
# These are a couple of aborted attempts to project a polar plot using
# cubic bezier curves.
# def transform_path(self, path):
# twopi = 2.0 * npy.pi
# halfpi = 0.5 * npy.pi
# vertices = path.vertices
# t0 = vertices[0:-1, 0]
# t1 = vertices[1: , 0]
# td = npy.where(t1 > t0, t1 - t0, twopi - (t0 - t1))
# maxtd = td.max()
# interpolate = npy.ceil(maxtd / halfpi)
# if interpolate > 1.0:
# vertices = self.interpolate(vertices, interpolate)
# vertices = self.transform(vertices)
# result = npy.zeros((len(vertices) * 3 - 2, 2), npy.float_)
# codes = mpath.Path.CURVE4 * npy.ones((len(vertices) * 3 - 2, ), mpath.Path.code_type)
# result[0] = vertices[0]
# codes[0] = mpath.Path.MOVETO
# kappa = 4.0 * ((npy.sqrt(2.0) - 1.0) / 3.0)
# kappa = 0.5
# p0 = vertices[0:-1]
# p1 = vertices[1: ]
# x0 = p0[:, 0:1]
# y0 = p0[:, 1: ]
# b0 = ((y0 - x0) - y0) / ((x0 + y0) - x0)
# a0 = y0 - b0*x0
# x1 = p1[:, 0:1]
# y1 = p1[:, 1: ]
# b1 = ((y1 - x1) - y1) / ((x1 + y1) - x1)
# a1 = y1 - b1*x1
# x = -(a0-a1) / (b0-b1)
# y = a0 + b0*x
# xk = (x - x0) * kappa + x0
# yk = (y - y0) * kappa + y0
# result[1::3, 0:1] = xk
# result[1::3, 1: ] = yk
# xk = (x - x1) * kappa + x1
# yk = (y - y1) * kappa + y1
# result[2::3, 0:1] = xk
# result[2::3, 1: ] = yk
# result[3::3] = p1
# print vertices[-2:]
# print result[-2:]
# return mpath.Path(result, codes)
# twopi = 2.0 * npy.pi
# halfpi = 0.5 * npy.pi
# vertices = path.vertices
# t0 = vertices[0:-1, 0]
# t1 = vertices[1: , 0]
# td = npy.where(t1 > t0, t1 - t0, twopi - (t0 - t1))
# maxtd = td.max()
# interpolate = npy.ceil(maxtd / halfpi)
# print "interpolate", interpolate
# if interpolate > 1.0:
# vertices = self.interpolate(vertices, interpolate)
# result = npy.zeros((len(vertices) * 3 - 2, 2), npy.float_)
# codes = mpath.Path.CURVE4 * npy.ones((len(vertices) * 3 - 2, ), mpath.Path.code_type)
# result[0] = vertices[0]
# codes[0] = mpath.Path.MOVETO
# kappa = 4.0 * ((npy.sqrt(2.0) - 1.0) / 3.0)
# tkappa = npy.arctan(kappa)
# hyp_kappa = npy.sqrt(kappa*kappa + 1.0)
# t0 = vertices[0:-1, 0]
# t1 = vertices[1: , 0]
# r0 = vertices[0:-1, 1]
# r1 = vertices[1: , 1]
# td = npy.where(t1 > t0, t1 - t0, twopi - (t0 - t1))
# td_scaled = td / (npy.pi * 0.5)
# rd = r1 - r0
# r0kappa = r0 * kappa * td_scaled
# r1kappa = r1 * kappa * td_scaled
# ravg_kappa = ((r1 + r0) / 2.0) * kappa * td_scaled
# result[1::3, 0] = t0 + (tkappa * td_scaled)
# result[1::3, 1] = r0*hyp_kappa
# # result[1::3, 1] = r0 / npy.cos(tkappa * td_scaled) # npy.sqrt(r0*r0 + ravg_kappa*ravg_kappa)
# result[2::3, 0] = t1 - (tkappa * td_scaled)
# result[2::3, 1] = r1*hyp_kappa
# # result[2::3, 1] = r1 / npy.cos(tkappa * td_scaled) # npy.sqrt(r1*r1 + ravg_kappa*ravg_kappa)
# result[3::3, 0] = t1
# result[3::3, 1] = r1
# print vertices[:6], result[:6], t0[:6], t1[:6], td[:6], td_scaled[:6], tkappa
# result = self.transform(result)
# return mpath.Path(result, codes)
# transform_path_non_affine = transform_path
| agpl-3.0 |
Insight-book/data-science-from-scratch | scratch/gradient_descent.py | 3 | 5298 | from scratch.linear_algebra import Vector, dot
def sum_of_squares(v: Vector) -> float:
"""Computes the sum of squared elements in v"""
return dot(v, v)
from typing import Callable
def difference_quotient(f: Callable[[float], float],
x: float,
h: float) -> float:
return (f(x + h) - f(x)) / h
def square(x: float) -> float:
return x * x
def derivative(x: float) -> float:
return 2 * x
def estimate_gradient(f: Callable[[Vector], float],
v: Vector,
h: float = 0.0001):
return [partial_difference_quotient(f, v, i, h)
for i in range(len(v))]
import random
from scratch.linear_algebra import distance, add, scalar_multiply
def gradient_step(v: Vector, gradient: Vector, step_size: float) -> Vector:
"""Moves `step_size` in the `gradient` direction from `v`"""
assert len(v) == len(gradient)
step = scalar_multiply(step_size, gradient)
return add(v, step)
def sum_of_squares_gradient(v: Vector) -> Vector:
return [2 * v_i for v_i in v]
# x ranges from -50 to 49, y is always 20 * x + 5
inputs = [(x, 20 * x + 5) for x in range(-50, 50)]
def linear_gradient(x: float, y: float, theta: Vector) -> Vector:
slope, intercept = theta
predicted = slope * x + intercept # The prediction of the model.
error = (predicted - y) # error is (predicted - actual)
squared_error = error ** 2 # We'll minimize squared error
grad = [2 * error * x, 2 * error] # using its gradient.
return grad
from typing import TypeVar, List, Iterator
T = TypeVar('T') # this allows us to type "generic" functions
def minibatches(dataset: List[T],
batch_size: int,
shuffle: bool = True) -> Iterator[List[T]]:
"""Generates `batch_size`-sized minibatches from the dataset"""
# Start indexes 0, batch_size, 2 * batch_size, ...
batch_starts = [start for start in range(0, len(dataset), batch_size)]
if shuffle: random.shuffle(batch_starts) # shuffle the batches
for start in batch_starts:
end = start + batch_size
yield dataset[start:end]
def main():
xs = range(-10, 11)
actuals = [derivative(x) for x in xs]
estimates = [difference_quotient(square, x, h=0.001) for x in xs]
# plot to show they're basically the same
import matplotlib.pyplot as plt
plt.title("Actual Derivatives vs. Estimates")
plt.plot(xs, actuals, 'rx', label='Actual') # red x
plt.plot(xs, estimates, 'b+', label='Estimate') # blue +
plt.legend(loc=9)
# plt.show()
plt.close()
def partial_difference_quotient(f: Callable[[Vector], float],
v: Vector,
i: int,
h: float) -> float:
"""Returns the i-th partial difference quotient of f at v"""
w = [v_j + (h if j == i else 0) # add h to just the ith element of v
for j, v_j in enumerate(v)]
return (f(w) - f(v)) / h
# "Using the Gradient" example
# pick a random starting point
v = [random.uniform(-10, 10) for i in range(3)]
for epoch in range(1000):
grad = sum_of_squares_gradient(v) # compute the gradient at v
v = gradient_step(v, grad, -0.01) # take a negative gradient step
print(epoch, v)
assert distance(v, [0, 0, 0]) < 0.001 # v should be close to 0
# First "Using Gradient Descent to Fit Models" example
from scratch.linear_algebra import vector_mean
# Start with random values for slope and intercept.
theta = [random.uniform(-1, 1), random.uniform(-1, 1)]
learning_rate = 0.001
for epoch in range(5000):
# Compute the mean of the gradients
grad = vector_mean([linear_gradient(x, y, theta) for x, y in inputs])
# Take a step in that direction
theta = gradient_step(theta, grad, -learning_rate)
print(epoch, theta)
slope, intercept = theta
assert 19.9 < slope < 20.1, "slope should be about 20"
assert 4.9 < intercept < 5.1, "intercept should be about 5"
# Minibatch gradient descent example
theta = [random.uniform(-1, 1), random.uniform(-1, 1)]
for epoch in range(1000):
for batch in minibatches(inputs, batch_size=20):
grad = vector_mean([linear_gradient(x, y, theta) for x, y in batch])
theta = gradient_step(theta, grad, -learning_rate)
print(epoch, theta)
slope, intercept = theta
assert 19.9 < slope < 20.1, "slope should be about 20"
assert 4.9 < intercept < 5.1, "intercept should be about 5"
# Stochastic gradient descent example
theta = [random.uniform(-1, 1), random.uniform(-1, 1)]
for epoch in range(100):
for x, y in inputs:
grad = linear_gradient(x, y, theta)
theta = gradient_step(theta, grad, -learning_rate)
print(epoch, theta)
slope, intercept = theta
assert 19.9 < slope < 20.1, "slope should be about 20"
assert 4.9 < intercept < 5.1, "intercept should be about 5"
if __name__ == "__main__": main() | unlicense |
fzalkow/scikit-learn | sklearn/datasets/__init__.py | 176 | 3671 | """
The :mod:`sklearn.datasets` module includes utilities to load datasets,
including methods to load and fetch popular reference datasets. It also
features some artificial data generators.
"""
from .base import load_diabetes
from .base import load_digits
from .base import load_files
from .base import load_iris
from .base import load_linnerud
from .base import load_boston
from .base import get_data_home
from .base import clear_data_home
from .base import load_sample_images
from .base import load_sample_image
from .covtype import fetch_covtype
from .mlcomp import load_mlcomp
from .lfw import load_lfw_pairs
from .lfw import load_lfw_people
from .lfw import fetch_lfw_pairs
from .lfw import fetch_lfw_people
from .twenty_newsgroups import fetch_20newsgroups
from .twenty_newsgroups import fetch_20newsgroups_vectorized
from .mldata import fetch_mldata, mldata_filename
from .samples_generator import make_classification
from .samples_generator import make_multilabel_classification
from .samples_generator import make_hastie_10_2
from .samples_generator import make_regression
from .samples_generator import make_blobs
from .samples_generator import make_moons
from .samples_generator import make_circles
from .samples_generator import make_friedman1
from .samples_generator import make_friedman2
from .samples_generator import make_friedman3
from .samples_generator import make_low_rank_matrix
from .samples_generator import make_sparse_coded_signal
from .samples_generator import make_sparse_uncorrelated
from .samples_generator import make_spd_matrix
from .samples_generator import make_swiss_roll
from .samples_generator import make_s_curve
from .samples_generator import make_sparse_spd_matrix
from .samples_generator import make_gaussian_quantiles
from .samples_generator import make_biclusters
from .samples_generator import make_checkerboard
from .svmlight_format import load_svmlight_file
from .svmlight_format import load_svmlight_files
from .svmlight_format import dump_svmlight_file
from .olivetti_faces import fetch_olivetti_faces
from .species_distributions import fetch_species_distributions
from .california_housing import fetch_california_housing
from .rcv1 import fetch_rcv1
__all__ = ['clear_data_home',
'dump_svmlight_file',
'fetch_20newsgroups',
'fetch_20newsgroups_vectorized',
'fetch_lfw_pairs',
'fetch_lfw_people',
'fetch_mldata',
'fetch_olivetti_faces',
'fetch_species_distributions',
'fetch_california_housing',
'fetch_covtype',
'fetch_rcv1',
'get_data_home',
'load_boston',
'load_diabetes',
'load_digits',
'load_files',
'load_iris',
'load_lfw_pairs',
'load_lfw_people',
'load_linnerud',
'load_mlcomp',
'load_sample_image',
'load_sample_images',
'load_svmlight_file',
'load_svmlight_files',
'make_biclusters',
'make_blobs',
'make_circles',
'make_classification',
'make_checkerboard',
'make_friedman1',
'make_friedman2',
'make_friedman3',
'make_gaussian_quantiles',
'make_hastie_10_2',
'make_low_rank_matrix',
'make_moons',
'make_multilabel_classification',
'make_regression',
'make_s_curve',
'make_sparse_coded_signal',
'make_sparse_spd_matrix',
'make_sparse_uncorrelated',
'make_spd_matrix',
'make_swiss_roll',
'mldata_filename']
| bsd-3-clause |
bhargav/scikit-learn | sklearn/neighbors/tests/test_approximate.py | 55 | 19053 | """
Testing for the approximate neighbor search using
Locality Sensitive Hashing Forest module
(sklearn.neighbors.LSHForest).
"""
# Author: Maheshakya Wijewardena, Joel Nothman
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_array_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.neighbors import LSHForest
from sklearn.neighbors import NearestNeighbors
def test_neighbors_accuracy_with_n_candidates():
# Checks whether accuracy increases as `n_candidates` increases.
n_candidates_values = np.array([.1, 50, 500])
n_samples = 100
n_features = 10
n_iter = 10
n_points = 5
rng = np.random.RandomState(42)
accuracies = np.zeros(n_candidates_values.shape[0], dtype=float)
X = rng.rand(n_samples, n_features)
for i, n_candidates in enumerate(n_candidates_values):
lshf = LSHForest(n_candidates=n_candidates)
ignore_warnings(lshf.fit)(X)
for j in range(n_iter):
query = X[rng.randint(0, n_samples)].reshape(1, -1)
neighbors = lshf.kneighbors(query, n_neighbors=n_points,
return_distance=False)
distances = pairwise_distances(query, X, metric='cosine')
ranks = np.argsort(distances)[0, :n_points]
intersection = np.intersect1d(ranks, neighbors).shape[0]
ratio = intersection / float(n_points)
accuracies[i] = accuracies[i] + ratio
accuracies[i] = accuracies[i] / float(n_iter)
# Sorted accuracies should be equal to original accuracies
assert_true(np.all(np.diff(accuracies) >= 0),
msg="Accuracies are not non-decreasing.")
# Highest accuracy should be strictly greater than the lowest
assert_true(np.ptp(accuracies) > 0,
msg="Highest accuracy is not strictly greater than lowest.")
def test_neighbors_accuracy_with_n_estimators():
# Checks whether accuracy increases as `n_estimators` increases.
n_estimators = np.array([1, 10, 100])
n_samples = 100
n_features = 10
n_iter = 10
n_points = 5
rng = np.random.RandomState(42)
accuracies = np.zeros(n_estimators.shape[0], dtype=float)
X = rng.rand(n_samples, n_features)
for i, t in enumerate(n_estimators):
lshf = LSHForest(n_candidates=500, n_estimators=t)
ignore_warnings(lshf.fit)(X)
for j in range(n_iter):
query = X[rng.randint(0, n_samples)].reshape(1, -1)
neighbors = lshf.kneighbors(query, n_neighbors=n_points,
return_distance=False)
distances = pairwise_distances(query, X, metric='cosine')
ranks = np.argsort(distances)[0, :n_points]
intersection = np.intersect1d(ranks, neighbors).shape[0]
ratio = intersection / float(n_points)
accuracies[i] = accuracies[i] + ratio
accuracies[i] = accuracies[i] / float(n_iter)
# Sorted accuracies should be equal to original accuracies
assert_true(np.all(np.diff(accuracies) >= 0),
msg="Accuracies are not non-decreasing.")
# Highest accuracy should be strictly greater than the lowest
assert_true(np.ptp(accuracies) > 0,
msg="Highest accuracy is not strictly greater than lowest.")
@ignore_warnings
def test_kneighbors():
# Checks whether desired number of neighbors are returned.
# It is guaranteed to return the requested number of neighbors
# if `min_hash_match` is set to 0. Returned distances should be
# in ascending order.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(min_hash_match=0)
# Test unfitted estimator
assert_raises(ValueError, lshf.kneighbors, X[0])
ignore_warnings(lshf.fit)(X)
for i in range(n_iter):
n_neighbors = rng.randint(0, n_samples)
query = X[rng.randint(0, n_samples)].reshape(1, -1)
neighbors = lshf.kneighbors(query, n_neighbors=n_neighbors,
return_distance=False)
# Desired number of neighbors should be returned.
assert_equal(neighbors.shape[1], n_neighbors)
# Multiple points
n_queries = 5
queries = X[rng.randint(0, n_samples, n_queries)]
distances, neighbors = lshf.kneighbors(queries,
n_neighbors=1,
return_distance=True)
assert_equal(neighbors.shape[0], n_queries)
assert_equal(distances.shape[0], n_queries)
# Test only neighbors
neighbors = lshf.kneighbors(queries, n_neighbors=1,
return_distance=False)
assert_equal(neighbors.shape[0], n_queries)
# Test random point(not in the data set)
query = rng.randn(n_features).reshape(1, -1)
lshf.kneighbors(query, n_neighbors=1,
return_distance=False)
# Test n_neighbors at initialization
neighbors = lshf.kneighbors(query, return_distance=False)
assert_equal(neighbors.shape[1], 5)
# Test `neighbors` has an integer dtype
assert_true(neighbors.dtype.kind == 'i',
msg="neighbors are not in integer dtype.")
def test_radius_neighbors():
# Checks whether Returned distances are less than `radius`
# At least one point should be returned when the `radius` is set
# to mean distance from the considering point to other points in
# the database.
# Moreover, this test compares the radius neighbors of LSHForest
# with the `sklearn.neighbors.NearestNeighbors`.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest()
# Test unfitted estimator
assert_raises(ValueError, lshf.radius_neighbors, X[0])
ignore_warnings(lshf.fit)(X)
for i in range(n_iter):
# Select a random point in the dataset as the query
query = X[rng.randint(0, n_samples)].reshape(1, -1)
# At least one neighbor should be returned when the radius is the
# mean distance from the query to the points of the dataset.
mean_dist = np.mean(pairwise_distances(query, X, metric='cosine'))
neighbors = lshf.radius_neighbors(query, radius=mean_dist,
return_distance=False)
assert_equal(neighbors.shape, (1,))
assert_equal(neighbors.dtype, object)
assert_greater(neighbors[0].shape[0], 0)
# All distances to points in the results of the radius query should
# be less than mean_dist
distances, neighbors = lshf.radius_neighbors(query,
radius=mean_dist,
return_distance=True)
assert_array_less(distances[0], mean_dist)
# Multiple points
n_queries = 5
queries = X[rng.randint(0, n_samples, n_queries)]
distances, neighbors = lshf.radius_neighbors(queries,
return_distance=True)
# dists and inds should not be 1D arrays or arrays of variable lengths
# hence the use of the object dtype.
assert_equal(distances.shape, (n_queries,))
assert_equal(distances.dtype, object)
assert_equal(neighbors.shape, (n_queries,))
assert_equal(neighbors.dtype, object)
# Compare with exact neighbor search
query = X[rng.randint(0, n_samples)].reshape(1, -1)
mean_dist = np.mean(pairwise_distances(query, X, metric='cosine'))
nbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
distances_exact, _ = nbrs.radius_neighbors(query, radius=mean_dist)
distances_approx, _ = lshf.radius_neighbors(query, radius=mean_dist)
# Radius-based queries do not sort the result points and the order
# depends on the method, the random_state and the dataset order. Therefore
# we need to sort the results ourselves before performing any comparison.
sorted_dists_exact = np.sort(distances_exact[0])
sorted_dists_approx = np.sort(distances_approx[0])
# Distances to exact neighbors are less than or equal to approximate
# counterparts as the approximate radius query might have missed some
# closer neighbors.
assert_true(np.all(np.less_equal(sorted_dists_exact,
sorted_dists_approx)))
@ignore_warnings
def test_radius_neighbors_boundary_handling():
X = [[0.999, 0.001], [0.5, 0.5], [0, 1.], [-1., 0.001]]
n_points = len(X)
# Build an exact nearest neighbors model as reference model to ensure
# consistency between exact and approximate methods
nnbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
# Build a LSHForest model with hyperparameter values that always guarantee
# exact results on this toy dataset.
lsfh = LSHForest(min_hash_match=0, n_candidates=n_points).fit(X)
# define a query aligned with the first axis
query = [[1., 0.]]
# Compute the exact cosine distances of the query to the four points of
# the dataset
dists = pairwise_distances(query, X, metric='cosine').ravel()
# The first point is almost aligned with the query (very small angle),
# the cosine distance should therefore be almost null:
assert_almost_equal(dists[0], 0, decimal=5)
# The second point form an angle of 45 degrees to the query vector
assert_almost_equal(dists[1], 1 - np.cos(np.pi / 4))
# The third point is orthogonal from the query vector hence at a distance
# exactly one:
assert_almost_equal(dists[2], 1)
# The last point is almost colinear but with opposite sign to the query
# therefore it has a cosine 'distance' very close to the maximum possible
# value of 2.
assert_almost_equal(dists[3], 2, decimal=5)
# If we query with a radius of one, all the samples except the last sample
# should be included in the results. This means that the third sample
# is lying on the boundary of the radius query:
exact_dists, exact_idx = nnbrs.radius_neighbors(query, radius=1)
approx_dists, approx_idx = lsfh.radius_neighbors(query, radius=1)
assert_array_equal(np.sort(exact_idx[0]), [0, 1, 2])
assert_array_equal(np.sort(approx_idx[0]), [0, 1, 2])
assert_array_almost_equal(np.sort(exact_dists[0]), dists[:-1])
assert_array_almost_equal(np.sort(approx_dists[0]), dists[:-1])
# If we perform the same query with a slightly lower radius, the third
# point of the dataset that lay on the boundary of the previous query
# is now rejected:
eps = np.finfo(np.float64).eps
exact_dists, exact_idx = nnbrs.radius_neighbors(query, radius=1 - eps)
approx_dists, approx_idx = lsfh.radius_neighbors(query, radius=1 - eps)
assert_array_equal(np.sort(exact_idx[0]), [0, 1])
assert_array_equal(np.sort(approx_idx[0]), [0, 1])
assert_array_almost_equal(np.sort(exact_dists[0]), dists[:-2])
assert_array_almost_equal(np.sort(approx_dists[0]), dists[:-2])
def test_distances():
# Checks whether returned neighbors are from closest to farthest.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest()
ignore_warnings(lshf.fit)(X)
for i in range(n_iter):
n_neighbors = rng.randint(0, n_samples)
query = X[rng.randint(0, n_samples)].reshape(1, -1)
distances, neighbors = lshf.kneighbors(query,
n_neighbors=n_neighbors,
return_distance=True)
# Returned neighbors should be from closest to farthest, that is
# increasing distance values.
assert_true(np.all(np.diff(distances[0]) >= 0))
# Note: the radius_neighbors method does not guarantee the order of
# the results.
def test_fit():
# Checks whether `fit` method sets all attribute values correctly.
n_samples = 12
n_features = 2
n_estimators = 5
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(n_estimators=n_estimators)
ignore_warnings(lshf.fit)(X)
# _input_array = X
assert_array_equal(X, lshf._fit_X)
# A hash function g(p) for each tree
assert_equal(n_estimators, len(lshf.hash_functions_))
# Hash length = 32
assert_equal(32, lshf.hash_functions_[0].components_.shape[0])
# Number of trees_ in the forest
assert_equal(n_estimators, len(lshf.trees_))
# Each tree has entries for every data point
assert_equal(n_samples, len(lshf.trees_[0]))
# Original indices after sorting the hashes
assert_equal(n_estimators, len(lshf.original_indices_))
# Each set of original indices in a tree has entries for every data point
assert_equal(n_samples, len(lshf.original_indices_[0]))
def test_partial_fit():
# Checks whether inserting array is consistent with fitted data.
# `partial_fit` method should set all attribute values correctly.
n_samples = 12
n_samples_partial_fit = 3
n_features = 2
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
X_partial_fit = rng.rand(n_samples_partial_fit, n_features)
lshf = LSHForest()
# Test unfitted estimator
ignore_warnings(lshf.partial_fit)(X)
assert_array_equal(X, lshf._fit_X)
ignore_warnings(lshf.fit)(X)
# Insert wrong dimension
assert_raises(ValueError, lshf.partial_fit,
np.random.randn(n_samples_partial_fit, n_features - 1))
ignore_warnings(lshf.partial_fit)(X_partial_fit)
# size of _input_array = samples + 1 after insertion
assert_equal(lshf._fit_X.shape[0],
n_samples + n_samples_partial_fit)
# size of original_indices_[1] = samples + 1
assert_equal(len(lshf.original_indices_[0]),
n_samples + n_samples_partial_fit)
# size of trees_[1] = samples + 1
assert_equal(len(lshf.trees_[1]),
n_samples + n_samples_partial_fit)
def test_hash_functions():
# Checks randomness of hash functions.
# Variance and mean of each hash function (projection vector)
# should be different from flattened array of hash functions.
# If hash functions are not randomly built (seeded with
# same value), variances and means of all functions are equal.
n_samples = 12
n_features = 2
n_estimators = 5
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(n_estimators=n_estimators,
random_state=rng.randint(0, np.iinfo(np.int32).max))
ignore_warnings(lshf.fit)(X)
hash_functions = []
for i in range(n_estimators):
hash_functions.append(lshf.hash_functions_[i].components_)
for i in range(n_estimators):
assert_not_equal(np.var(hash_functions),
np.var(lshf.hash_functions_[i].components_))
for i in range(n_estimators):
assert_not_equal(np.mean(hash_functions),
np.mean(lshf.hash_functions_[i].components_))
def test_candidates():
# Checks whether candidates are sufficient.
# This should handle the cases when number of candidates is 0.
# User should be warned when number of candidates is less than
# requested number of neighbors.
X_train = np.array([[5, 5, 2], [21, 5, 5], [1, 1, 1], [8, 9, 1],
[6, 10, 2]], dtype=np.float32)
X_test = np.array([7, 10, 3], dtype=np.float32).reshape(1, -1)
# For zero candidates
lshf = LSHForest(min_hash_match=32)
ignore_warnings(lshf.fit)(X_train)
message = ("Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (3, 32))
assert_warns_message(UserWarning, message, lshf.kneighbors,
X_test, n_neighbors=3)
distances, neighbors = lshf.kneighbors(X_test, n_neighbors=3)
assert_equal(distances.shape[1], 3)
# For candidates less than n_neighbors
lshf = LSHForest(min_hash_match=31)
ignore_warnings(lshf.fit)(X_train)
message = ("Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (5, 31))
assert_warns_message(UserWarning, message, lshf.kneighbors,
X_test, n_neighbors=5)
distances, neighbors = lshf.kneighbors(X_test, n_neighbors=5)
assert_equal(distances.shape[1], 5)
def test_graphs():
# Smoke tests for graph methods.
n_samples_sizes = [5, 10, 20]
n_features = 3
rng = np.random.RandomState(42)
for n_samples in n_samples_sizes:
X = rng.rand(n_samples, n_features)
lshf = LSHForest(min_hash_match=0)
ignore_warnings(lshf.fit)(X)
kneighbors_graph = lshf.kneighbors_graph(X)
radius_neighbors_graph = lshf.radius_neighbors_graph(X)
assert_equal(kneighbors_graph.shape[0], n_samples)
assert_equal(kneighbors_graph.shape[1], n_samples)
assert_equal(radius_neighbors_graph.shape[0], n_samples)
assert_equal(radius_neighbors_graph.shape[1], n_samples)
def test_sparse_input():
# note: Fixed random state in sp.rand is not supported in older scipy.
# The test should succeed regardless.
X1 = sp.rand(50, 100)
X2 = sp.rand(10, 100)
forest_sparse = LSHForest(radius=1, random_state=0).fit(X1)
forest_dense = LSHForest(radius=1, random_state=0).fit(X1.A)
d_sparse, i_sparse = forest_sparse.kneighbors(X2, return_distance=True)
d_dense, i_dense = forest_dense.kneighbors(X2.A, return_distance=True)
assert_almost_equal(d_sparse, d_dense)
assert_almost_equal(i_sparse, i_dense)
d_sparse, i_sparse = forest_sparse.radius_neighbors(X2,
return_distance=True)
d_dense, i_dense = forest_dense.radius_neighbors(X2.A,
return_distance=True)
assert_equal(d_sparse.shape, d_dense.shape)
for a, b in zip(d_sparse, d_dense):
assert_almost_equal(a, b)
for a, b in zip(i_sparse, i_dense):
assert_almost_equal(a, b)
| bsd-3-clause |
bikong2/scikit-learn | examples/svm/plot_svm_margin.py | 318 | 2328 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
SVM Margins Example
=========================================================
The plots below illustrate the effect the parameter `C` has
on the separation line. A large value of `C` basically tells
our model that we do not have that much faith in our data's
distribution, and will only consider points close to line
of separation.
A small value of `C` includes more/all the observations, allowing
the margins to be calculated using all the data in the area.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# we create 40 separable points
np.random.seed(0)
X = np.r_[np.random.randn(20, 2) - [2, 2], np.random.randn(20, 2) + [2, 2]]
Y = [0] * 20 + [1] * 20
# figure number
fignum = 1
# fit the model
for name, penalty in (('unreg', 1), ('reg', 0.05)):
clf = svm.SVC(kernel='linear', C=penalty)
clf.fit(X, Y)
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(-5, 5)
yy = a * xx - (clf.intercept_[0]) / w[1]
# plot the parallels to the separating hyperplane that pass through the
# support vectors
margin = 1 / np.sqrt(np.sum(clf.coef_ ** 2))
yy_down = yy + a * margin
yy_up = yy - a * margin
# plot the line, the points, and the nearest vectors to the plane
plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.plot(xx, yy, 'k-')
plt.plot(xx, yy_down, 'k--')
plt.plot(xx, yy_up, 'k--')
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=80,
facecolors='none', zorder=10)
plt.scatter(X[:, 0], X[:, 1], c=Y, zorder=10, cmap=plt.cm.Paired)
plt.axis('tight')
x_min = -4.8
x_max = 4.2
y_min = -6
y_max = 6
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.predict(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.figure(fignum, figsize=(4, 3))
plt.pcolormesh(XX, YY, Z, cmap=plt.cm.Paired)
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
fignum = fignum + 1
plt.show()
| bsd-3-clause |
roaminsight/roamresearch | Papers/Mittens/bootstrap.py | 2 | 13431 | from numpy.random import randint
from scipy.stats import norm
import numpy as np
import warnings
class InstabilityWarning(UserWarning):
"""Issued when results may be unstable."""
pass
# On import, make sure that InstabilityWarnings are not filtered out.
warnings.simplefilter('always',InstabilityWarning)
def ci(data, statfunction=np.average, alpha=0.05, n_samples=10000, method='bca', output='lowhigh', epsilon=0.001, multi=None):
"""
Given a set of data ``data``, and a statistics function ``statfunction`` that
applies to that data, computes the bootstrap confidence interval for
``statfunction`` on that data. Data points are assumed to be delineated by
axis 0.
Parameters
----------
data: array_like, shape (N, ...) OR tuple of array_like all with shape (N, ...)
Input data. Data points are assumed to be delineated by axis 0. Beyond this,
the shape doesn't matter, so long as ``statfunction`` can be applied to the
array. If a tuple of array_likes is passed, then samples from each array (along
axis 0) are passed in order as separate parameters to the statfunction. The
type of data (single array or tuple of arrays) can be explicitly specified
by the multi parameter.
statfunction: function (data, weights=(weights, optional)) -> value
This function should accept samples of data from ``data``. It is applied
to these samples individually.
If using the ABC method, the function _must_ accept a named ``weights``
parameter which will be an array_like with weights for each sample, and
must return a _weighted_ result. Otherwise this parameter is not used
or required. Note that numpy's np.average accepts this. (default=np.average)
alpha: float or iterable, optional
The percentiles to use for the confidence interval (default=0.05). If this
is a float, the returned values are (alpha/2, 1-alpha/2) percentile confidence
intervals. If it is an iterable, alpha is assumed to be an iterable of
each desired percentile.
n_samples: float, optional
The number of bootstrap samples to use (default=10000)
method: string, optional
The method to use: one of 'pi', 'bca', or 'abc' (default='bca')
output: string, optional
The format of the output. 'lowhigh' gives low and high confidence interval
values. 'errorbar' gives transposed abs(value-confidence interval value) values
that are suitable for use with matplotlib's errorbar function. (default='lowhigh')
epsilon: float, optional (only for ABC method)
The step size for finite difference calculations in the ABC method. Ignored for
all other methods. (default=0.001)
multi: boolean, optional
If False, assume data is a single array. If True, assume data is a tuple/other
iterable of arrays of the same length that should be sampled together. If None,
decide based on whether the data is an actual tuple. (default=None)
Returns
-------
confidences: tuple of floats
The confidence percentiles specified by alpha
Calculation Methods
-------------------
'pi': Percentile Interval (Efron 13.3)
The percentile interval method simply returns the 100*alphath bootstrap
sample's values for the statistic. This is an extremely simple method of
confidence interval calculation. However, it has several disadvantages
compared to the bias-corrected accelerated method, which is the default.
'bca': Bias-Corrected Accelerated Non-Parametric (Efron 14.3) (default)
This method is much more complex to explain. However, it gives considerably
better results, and is generally recommended for normal situations. Note
that in cases where the statistic is smooth, and can be expressed with
weights, the ABC method will give approximated results much, much faster.
'abc': Approximate Bootstrap Confidence (Efron 14.4, 22.6)
This method provides approximated bootstrap confidence intervals without
actually taking bootstrap samples. This requires that the statistic be
smooth, and allow for weighting of individual points with a weights=
parameter (note that np.average allows this). This is _much_ faster
than all other methods for situations where it can be used.
Examples
--------
To calculate the confidence intervals for the mean of some numbers:
>> boot.ci( np.randn(100), np.average )
Given some data points in arrays x and y calculate the confidence intervals
for all linear regression coefficients simultaneously:
>> boot.ci( (x,y), scipy.stats.linregress )
References
----------
Efron, An Introduction to the Bootstrap. Chapman & Hall 1993
"""
# Deal with the alpha values
if np.iterable(alpha):
alphas = np.array(alpha)
else:
alphas = np.array([alpha/2,1-alpha/2])
if multi == None:
if isinstance(data, tuple):
multi = True
else:
multi = False
# Ensure that the data is actually an array. This isn't nice to pandas,
# but pandas seems much much slower and the indexes become a problem.
if multi == False:
data = np.array(data)
tdata = (data,)
else:
tdata = tuple( np.array(x) for x in data )
# Deal with ABC *now*, as it doesn't need samples.
if method == 'abc':
n = tdata[0].shape[0]*1.0
nn = tdata[0].shape[0]
I = np.identity(nn)
ep = epsilon / n*1.0
p0 = np.repeat(1.0/n,nn)
t1 = np.zeros(nn); t2 = np.zeros(nn)
try:
t0 = statfunction(*tdata,weights=p0)
except TypeError as e:
raise TypeError("statfunction does not accept correct arguments for ABC ({0})".format(e.message))
# There MUST be a better way to do this!
for i in range(0,nn):
di = I[i] - p0
tp = statfunction(*tdata,weights=p0+ep*di)
tm = statfunction(*tdata,weights=p0-ep*di)
t1[i] = (tp-tm)/(2*ep)
t2[i] = (tp-2*t0+tm)/ep**2
sighat = np.sqrt(np.sum(t1**2))/n
a = (np.sum(t1**3))/(6*n**3*sighat**3)
delta = t1/(n**2*sighat)
cq = (statfunction(*tdata,weights=p0+ep*delta)-2*t0+statfunction(*tdata,weights=p0-ep*delta))/(2*sighat*ep**2)
bhat = np.sum(t2)/(2*n**2)
curv = bhat/sighat-cq
z0 = norm.ppf(2*norm.cdf(a)*norm.cdf(-curv))
Z = z0+norm.ppf(alphas)
za = Z/(1-a*Z)**2
# stan = t0 + sighat * norm.ppf(alphas)
abc = np.zeros_like(alphas)
for i in range(0,len(alphas)):
abc[i] = statfunction(*tdata,weights=p0+za[i]*delta)
if output == 'lowhigh':
return abc
elif output == 'errorbar':
return abs(abc-statfunction(tdata))[np.newaxis].T
else:
raise ValueError("Output option {0} is not supported.".format(output))
# We don't need to generate actual samples; that would take more memory.
# Instead, we can generate just the indexes, and then apply the statfun
# to those indexes.
bootindexes = bootstrap_indexes( tdata[0], n_samples )
stat = np.array([statfunction(*(x[indexes] for x in tdata)) for indexes in bootindexes])
stat.sort(axis=0)
# Percentile Interval Method
if method == 'pi':
avals = alphas
# Bias-Corrected Accelerated Method
elif method == 'bca':
# The value of the statistic function applied just to the actual data.
ostat = statfunction(*tdata)
# The bias correction value.
z0 = norm.ppf( ( 1.0*np.sum(stat < ostat, axis=0) ) / n_samples )
# Statistics of the jackknife distribution
jackindexes = jackknife_indexes(tdata[0])
jstat = [statfunction(*(x[indexes] for x in tdata)) for indexes in jackindexes]
jmean = np.mean(jstat,axis=0)
# Acceleration value
a = np.sum( (jmean - jstat)**3, axis=0 ) / ( 6.0 * np.sum( (jmean - jstat)**2, axis=0)**1.5 )
zs = z0 + norm.ppf(alphas).reshape(alphas.shape+(1,)*z0.ndim)
avals = norm.cdf(z0 + zs/(1-a*zs))
else:
raise ValueError("Method {0} is not supported.".format(method))
nvals = np.round((n_samples-1)*avals).astype('int')
if np.any(nvals==0) or np.any(nvals==n_samples-1):
warnings.warn("Some values used extremal samples; results are probably unstable.", InstabilityWarning)
elif np.any(nvals<10) or np.any(nvals>=n_samples-10):
warnings.warn("Some values used top 10 low/high samples; results may be unstable.", InstabilityWarning)
if output == 'lowhigh':
if nvals.ndim == 1:
# All nvals are the same. Simple broadcasting
return stat[nvals]
else:
# Nvals are different for each data point. Not simple broadcasting.
# Each set of nvals along axis 0 corresponds to the data at the same
# point in other axes.
return stat[(nvals, np.indices(nvals.shape)[1:].squeeze())]
elif output == 'errorbar':
if nvals.ndim == 1:
return abs(statfunction(data)-stat[nvals])[np.newaxis].T
else:
return abs(statfunction(data)-stat[(nvals, np.indices(nvals.shape)[1:])])[np.newaxis].T
else:
raise ValueError("Output option {0} is not supported.".format(output))
def ci_abc(data, stat=lambda x,y: np.average(x,weights=y), alpha=0.05, epsilon = 0.001):
"""
.. note:: Deprecated. This functionality is now rolled into ci.
Given a set of data ``data``, and a statistics function ``statfunction`` that
applies to that data, computes the non-parametric approximate bootstrap
confidence (ABC) interval for ``stat`` on that data. Data points are assumed
to be delineated by axis 0.
Parameters
----------
data: array_like, shape (N, ...)
Input data. Data points are assumed to be delineated by axis 0. Beyond this,
the shape doesn't matter, so long as ``statfunction`` can be applied to the
array.
stat: function (data, weights) -> value
The _weighted_ statistic function. This must accept weights, unlike for other
methods.
alpha: float or iterable, optional
The percentiles to use for the confidence interval (default=0.05). If this
is a float, the returned values are (alpha/2, 1-alpha/2) percentile confidence
intervals. If it is an iterable, alpha is assumed to be an iterable of
each desired percentile.
epsilon: float
The step size for finite difference calculations. (default=0.001)
Returns
-------
confidences: tuple of floats
The confidence percentiles specified by alpha
References
----------
Efron, An Introduction to the Bootstrap. Chapman & Hall 1993
bootstrap R package: http://cran.r-project.org/web/packages/bootstrap/
"""
# Deal with the alpha values
if not np.iterable(alpha):
alpha = np.array([alpha/2,1-alpha/2])
else:
alpha = np.array(alpha)
# Ensure that the data is actually an array. This isn't nice to pandas,
# but pandas seems much much slower and the indexes become a problem.
data = np.array(data)
n = data.shape[0]*1.0
nn = data.shape[0]
I = np.identity(nn)
ep = epsilon / n*1.0
p0 = np.repeat(1.0/n,nn)
t1 = np.zeros(nn); t2 = np.zeros(nn)
t0 = stat(data,p0)
# There MUST be a better way to do this!
for i in range(0,nn):
di = I[i] - p0
tp = stat(data,p0+ep*di)
tm = stat(data,p0-ep*di)
t1[i] = (tp-tm)/(2*ep)
t2[i] = (tp-2*t0+tm)/ep**2
sighat = np.sqrt(np.sum(t1**2))/n
a = (np.sum(t1**3))/(6*n**3*sighat**3)
delta = t1/(n**2*sighat)
cq = (stat(data,p0+ep*delta)-2*t0+stat(data,p0-ep*delta))/(2*sighat*ep**2)
bhat = np.sum(t2)/(2*n**2)
curv = bhat/sighat-cq
z0 = norm.ppf(2*norm.cdf(a)*norm.cdf(-curv))
Z = z0+norm.ppf(alpha)
za = Z/(1-a*Z)**2
# stan = t0 + sighat * norm.ppf(alpha)
abc = np.zeros_like(alpha)
for i in range(0,len(alpha)):
abc[i] = stat(data,p0+za[i]*delta)
return abc
def bootstrap_indexes(data, n_samples=10000):
"""
Given data points data, where axis 0 is considered to delineate points, return
an array where each row is a set of bootstrap indexes. This can be used as a list
of bootstrap indexes as well.
"""
return randint(data.shape[0],size=(n_samples,data.shape[0]) )
def jackknife_indexes(data):
"""
Given data points data, where axis 0 is considered to delineate points, return
a list of arrays where each array is a set of jackknife indexes.
For a given set of data Y, the jackknife sample J[i] is defined as the data set
Y with the ith data point deleted.
"""
base = np.arange(0,len(data))
return (np.delete(base,i) for i in base)
def subsample_indexes(data, n_samples=1000, size=0.5):
"""
Given data points data, where axis 0 is considered to delineate points, return
a list of arrays where each array is indexes a subsample of the data of size
``size``. If size is >= 1, then it will be taken to be an absolute size. If
size < 1, it will be taken to be a fraction of the data size. If size == -1, it
will be taken to mean subsamples the same size as the sample (ie, permuted
samples)
"""
if size == -1:
size = len(data)
elif (size < 1) and (size > 0):
size = round(size*len(data))
elif size > 1:
pass
else:
raise ValueError("size cannot be {0}".format(size))
base = np.tile(np.arange(len(data)),(n_samples,1))
for sample in base: np.random.shuffle(sample)
return base[:,0:size]
| apache-2.0 |
jakejhansen/minesweeper_solver | policy_gradients/full_6x6_CNN/train.py | 1 | 10287 | import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from tensorflow.python.ops.nn import relu, softmax
import gym
import pickle
from sklearn.preprocessing import normalize
import sys
import os
sys.path.append('../../')
from minesweeper_tk import Minesweeper
model = os.path.realpath(__file__).split('/')[-2]
# training settings
epochs = 100000 # number of training batches
batch_size = 200 # number of timesteps in a batch
rollout_limit = 50 # max rollout length
discount_factor = 0 # reward discount factor (gamma), 1.0 = no discount
learning_rate = 0.00005 # you know this by now #0.001,
#5600: 78% win --> LR: 0.0001
#6801: 87% win --> LR: 0.00002
early_stop_loss = 0 # stop training if loss < early_stop_loss, 0 or False to disable
""" condensed
epochs = 100000 # number of training batches
batch_size = 400 # number of timesteps in a batch
rollout_limit = 50 # max rollout length
discount_factor = 0 # reward discount factor (gamma), 1.0 = no discount
learning_rate = 0.00004 # you know this by now #0.0005
early_stop_loss = 0 # stop training if loss < early_stop_loss, 0 or False to disable
"""
""" 261 epocs to learn 2 specific board (overfit)
epochs = 10000 # number of training batches
batch_size = 200 # number of timesteps in a batch
rollout_limit = 130 # max rollout length
discount_factor = 0 # reward discount factor (gamma), 1.0 = no discount
learning_rate = 0.001 # you know this by now
early_stop_loss = 0 # stop training if loss < early_stop_loss, 0 or False to disable
"""
# setup policy network
n = 6
n_inputs = n*n*10
n_hidden = 6*6*8
n_hidden2 = 220
n_hidden3 = 220
n_hidden4 = 220
n_outputs = n*n
tf.reset_default_graph()
states_pl = tf.placeholder(tf.float32, [None, n_inputs], name='states_pl')
actions_pl = tf.placeholder(tf.int32, [None, 2], name='actions_pl')
advantages_pl = tf.placeholder(tf.float32, [None], name='advantages_pl')
learning_rate_pl = tf.placeholder(tf.float32, name='learning_rate_pl')
input_layer = tf.reshape(states_pl, [-1, n, n, 10])
conv1 = tf.layers.conv2d(inputs=input_layer,filters=18,kernel_size=[5, 5],padding="same", activation=tf.nn.relu)
conv2 = tf.layers.conv2d(inputs=conv1,filters=36,kernel_size=[3, 3],padding="same", activation=tf.nn.relu)
conv2_flat = tf.contrib.layers.flatten(conv2)
l_hidden = tf.layers.dense(inputs=conv2_flat, units=n_hidden, activation=relu, name='l_hidden')
l_hidden2 = tf.layers.dense(inputs=l_hidden, units=n_hidden2, activation=relu, name='l_hidden2')
l_hidden3 = tf.layers.dense(inputs=l_hidden2, units=n_hidden3, activation=relu, name='l_hidden3')
l_out = tf.layers.dense(inputs=l_hidden3, units=n_outputs, activation=softmax, name='l_out')
# print network
print('states_pl:', states_pl.get_shape())
print('actions_pl:', actions_pl.get_shape())
print('advantages_pl:', advantages_pl.get_shape())
print('l_hidden:', l_hidden.get_shape())
print('l_hidden2:', l_hidden2.get_shape())
print('l_hidden3:', l_hidden3.get_shape())
print('l_out:', l_out.get_shape())
total_parameters = 0
for variable in tf.trainable_variables():
# shape is an array of tf.Dimension
#import IPython
#IPython.embed()
name = variable.name
shape = variable.get_shape()
print(name, shape, len(shape))
variable_parameters = 1
for dim in shape:
#print(dim)
variable_parameters *= dim.value
print(variable_parameters)
total_parameters += variable_parameters
print(total_parameters)
# define loss and optimizer
loss_f = -tf.reduce_mean(tf.multiply(tf.log(tf.gather_nd(l_out, actions_pl)), advantages_pl))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate_pl, beta1=0.8, beta2=0.92)
train_f = optimizer.minimize(loss_f)
saver = tf.train.Saver() # we use this later to save the model
# test forward pass
from minesweeper_tk import Minesweeper
env = Minesweeper(display=False, ROWS = n, COLS = n, MINES = 6, OUT = "FULL", rewards = {"win" : 1, "loss" : -1, "progress" : 0.9, "noprogress" : -0.3, "YOLO" : -0.3})
state = env.stateConverter(env.get_state()).flatten()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
action_probabilities = sess.run(fetches=l_out, feed_dict={states_pl: [state]})
print(action_probabilities)
# helper functions
def get_rollout(sess, env, rollout_limit=None, stochastic=False, seed=None):
"""Generate rollout by iteratively evaluating the current policy on the environment."""
rollout_limit = rollout_limit
env.reset()
s = env.stateConverter(env.get_state()).flatten()
states, actions, rewards = [], [], []
for i in range(rollout_limit):
a = get_action(sess, s, stochastic)
s1, r, done, _ = env.step(a)
states.append(s)
actions.append(a)
rewards.append(r)
s = s1
if done: break
return states, actions, rewards, i+1
def get_action(sess, state, stochastic=False):
"""Choose an action, given a state, with the current policy network."""
a_prob = sess.run(fetches=l_out, feed_dict={states_pl: np.atleast_2d(state)})
if stochastic:
# sample action from distribution
return (np.cumsum(np.asarray(a_prob)) > np.random.rand()).argmax()
else:
# select action with highest probability
return a_prob.argmax()
def get_advantages(rewards, rollout_limit, discount_factor, eps=1e-12):
"""Compute advantages"""
returns = get_returns(rewards, rollout_limit, discount_factor)
# standardize columns of returns to get advantages
advantages = (returns - np.mean(returns, axis=0)) / (np.std(returns, axis=0) + eps)
# restore original rollout lengths
advantages = [adv[:len(rewards[i])] for i, adv in enumerate(advantages)]
return advantages
def get_returns(rewards, rollout_limit, discount_factor):
"""Compute the cumulative discounted rewards, a.k.a. returns."""
returns = np.zeros((len(rewards), rollout_limit))
for i, r in enumerate(rewards):
returns[i, len(r) - 1] = r[-1]
for j in reversed(range(len(r)-1)):
returns[i,j] = r[j] + discount_factor * returns[i,j+1]
return returns
def get_winrate(sess, env):
games = 0
moves = 0
stuck = 0
won_games = 0
lost_games = 0
r = 0
while games < 1000:
while True:
s = env.stateConverter(env.get_state()).flatten()
if r < 0:
a = get_action(sess, s, stochastic=True)
else:
a = get_action(sess, s, stochastic=False)
moves += 1
s, r, done, _ = env.step(a)
if r == 1:
won_games += 1
if r == -1:
lost_games += 1
if done:
games += 1
env.reset()
moves = 0
break
elif moves >= 30:
stuck += 1
games += 1
env.lost = env.lost + 1
env.reset()
moves = 0
break
return(won_games/games)
# train policy network
try:
statistics = []
win_rate = 0
win_rate_best = 0
with tf.Session() as sess:
#Load last model
try:
stats = stats = pickle.load(open("stats.p", "rb"))
saver.restore(sess, "{}.ckpt".format(model))
epoch_start = stats[len(stats)-1][0]
env.nb_actions = stats[len(stats)-1][1]
win_rate = stats[len(stats)-1][4]
win_rate_best = win_rate
except:
print("Couldn't find old model")
sess.run(tf.global_variables_initializer())
epoch_start = 0
for epoch in range(epoch_start, epochs):
# generate rollouts until batch_size total timesteps are collected
states, actions, rewards = [], [], []
timesteps = 0
while timesteps < batch_size:
_rollout_limit = min(rollout_limit, batch_size - timesteps) # limit rollout to match batch_size
s, a, r, t = get_rollout(sess, env, rollout_limit, stochastic=True, seed=epoch)
states.append(s)
actions.append(a)
rewards.append(r)
timesteps += t
# compute advantages
advantages = get_advantages(rewards, rollout_limit, discount_factor)
# policy gradient update
loss, _ = sess.run(fetches=[loss_f, train_f], feed_dict={
states_pl: np.concatenate(states),
actions_pl: np.column_stack((np.arange(timesteps), np.concatenate(actions))),
advantages_pl: np.concatenate(advantages),
learning_rate_pl: learning_rate
})
# store and print training statistics
mtr = np.mean([np.sum(r) for r in rewards])
statistics.append([epoch, env.get_nbactions(), mtr, loss, win_rate])
if epoch % 10 == 0:
print('%4d. training reward: %6.2f, loss: %7.4f' % (epoch+1, mtr, loss))
if epoch % 100 == 0:
saver.save(sess, "{}.ckpt".format(model))
if epoch % 400 == 0:
#Get win-rate
win_rate = get_winrate(sess, env)
print(win_rate)
if win_rate > win_rate_best:
saver.save(sess, "{}_best.ckpt".format(model))
print('done')
# save session
try:
stats = pickle.load(open("stats.p", "rb"))
for i in range(len(statistics)):
stats.append(statistics[i])
statistics = stats
except:
print("No old model data found, saving into new file")
pickle.dump(statistics, open("stats.p", "wb"))
except KeyboardInterrupt:
print('Saving Statistics')
try:
stats = pickle.load(open("stats.p", "rb"))
for i in range(len(statistics)):
stats.append(statistics[i])
statistics = stats
except:
print("No old model data found, saving into new file")
pickle.dump(statistics, open("stats.p", "wb"))
| mit |
jp-barron/Susi_Simulation | tools/pyBAR_converter_old.py | 2 | 10125 | """This script converts a hdf5 table into a CERN ROOT Ttree.
"""
import tables as tb
import numpy as np
import ctypes
import progressbar
import os
import math
import ROOT as r
from pybar.fei4 import register_utils
from pybar.analysis.RawDataConverter import data_struct
def get_charge_calibration(tdc_calibation_file, plsr_dac_calibation_file):
with tb.openFile(tdc_calibation_file, mode="r") as in_file_calibration_h5:
tdc_calibration, tdc_error = in_file_calibration_h5.root.HitOrCalibration[:, :, :, 1], in_file_calibration_h5.root.HitOrCalibration[:, :, :, 3]
tot_calibration, tot_error = in_file_calibration_h5.root.HitOrCalibration[:, :, :, 0], in_file_calibration_h5.root.HitOrCalibration[:, :, :, 2]
tdc_calibration_values = in_file_calibration_h5.root.HitOrCalibration.attrs.scan_parameter_values[:]
global_config = register_utils.parse_global_config(plsr_dac_calibation_file)
c_high, vcal_c0, vcal_c1 = global_config['C_Inj_High'], global_config['Vcal_Coeff_0'], global_config['Vcal_Coeff_1']
charge_calibration_values = (vcal_c0 + vcal_c1 * tdc_calibration_values) * c_high / 0.16022
return charge_calibration_values, tdc_calibration, tdc_error, tot_calibration, tot_error
def create_hit_table(input_file_name, tdc_calibation_file, plsr_dac_calibation_file, n_sub_files=8): # loops over all root files and merges the data into a hdf5 file aligned at the event number
print 'Converting data from CERN ROOT TTree to hdf5 table'
charge_calibration_values, tdc_calibration, tdc_error, tot_calibration, tot_error = get_charge_calibration(tdc_calibation_file, plsr_dac_calibation_file)
# add all files that have the input_file_name praefix and load their data
input_file_names = [input_file_name + '_t%d.root' % index for index in range(n_sub_files) if os.path.isfile(input_file_name + '_t%d.root' % index)]
n_files = len(input_file_names)
input_files_root = [r.TFile(file_name, 'read') for file_name in input_file_names]
pixel_digits = [input_file_root.Get('EventData').Get('Pixel Digits') for input_file_root in input_files_root]
n_hits = [pixel_digit.GetEntries() for pixel_digit in pixel_digits] # total pixel hits to analyze
n_total_hits = sum(n_hits)
with tb.open_file(input_file_name + '_interpreted.h5', 'w') as out_file_h5:
hit_table = out_file_h5.create_table(out_file_h5.root, name='Hits', description=data_struct.HitInfoTable, title='hit_data', filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
# tmp data structures to be filles by ROOT
data = {}
for index, pixel_digit in enumerate(pixel_digits):
column_data = {}
for branch in pixel_digit.GetListOfBranches(): # loop over the branches
column_data[branch.GetName()] = np.zeros(shape=1, dtype=np.int32)
branch.SetAddress(column_data[branch.GetName()].data)
data[index] = column_data
# result data structur to be filles in the following loop
hits = np.zeros((n_total_hits,), dtype=tb.dtype_from_descr(data_struct.HitInfoTable))
# get file index with lowest event number
for pixel_digit in pixel_digits:
pixel_digit.GetEntry(0)
min_event_number = min([data[index]['event'][0] for index in range(n_files)])
actual_file_index = np.where(np.array([data[index]['event'][0] for index in range(n_files)]) == min_event_number)[0][0]
indices = [0] * n_files
table_index = 0
actual_data = data[actual_file_index]
actual_event_number = actual_data['event'][0]
last_valid_event_number = 0
last_tdc = 0
expected_event_number = actual_event_number
indices[actual_file_index] = 1
progress_bar = progressbar.ProgressBar(widgets=['', progressbar.Percentage(), ' ', progressbar.Bar(marker='*', left='|', right='|'), ' ', progressbar.AdaptiveETA()], maxval=n_total_hits, term_width=80)
progress_bar.start()
def add_actual_data(actual_data, table_index):
if actual_data['column'] >= 0 and actual_data['column'] < 80 and actual_data['row'] >= 0 and actual_data['row'] < 336:
tdc_interpolation = interp1d(x=charge_calibration_values, y=tdc_calibration[actual_data['column'], actual_data['row']], kind='slinear', bounds_error=False, fill_value=0)
tdc = tdc_interpolation(actual_data['charge'])
tot_interpolation = interp1d(x=charge_calibration_values, y=tot_calibration[actual_data['column'], actual_data['row']], kind='slinear', bounds_error=False, fill_value=0)
tot = tot_interpolation(actual_data['charge'])
if math.isnan(tdc): # do not add hits where tdc is nan, these pixel have a very high threshold or do not work
return table_index
if tdc == 0 and actual_data['charge'] > 10000: # no calibration for TDC due to high charge, thus mark as TDC overflow event
hits[table_index]['event_status'] |= 0b0000010000000000
tdc = 4095
if tot == 0 and actual_data['charge'] > 10000: # no calibration for TOT due to high charge, thus set max tot
tot = 13
hits[table_index]['event_status'] |= 0b0000000100000000
hits[table_index]['event_number'] = actual_data['event'][0].astype(np.int64)
hits[table_index]['column'] = (actual_data['column'] + 1).astype(np.uint8)
hits[table_index]['row'] = (actual_data['row'] + 1).astype(np.uint16)
hits[table_index]['TDC'] = int(actual_data['charge'] / 300.)
hits[table_index]['tot'] = int(tot)
table_index += 1
return table_index
while True:
actual_event_number = actual_data['event'][0]
if (actual_event_number == expected_event_number or actual_event_number == expected_event_number - 1): # check if event number increases
actual_index, actual_digits, actual_data = indices[actual_file_index], pixel_digits[actual_file_index], data[actual_file_index]
table_index = add_actual_data(actual_data, table_index)
else: # event number does not increase, thus the events are in another file --> switch file or the event number is missing
file_event_numbers = [data[file_index]['event'][0] for file_index in range(n_files)] # all files actual event number
actual_file_index = np.where(file_event_numbers == min(file_event_numbers))[0][0]
actual_index, actual_digits, actual_data = indices[actual_file_index], pixel_digits[actual_file_index], data[actual_file_index]
actual_event_number = actual_data['event'][0]
table_index = add_actual_data(actual_data, table_index)
progress_bar.update(table_index)
expected_event_number = actual_event_number + 1
actual_digits.GetEntry(actual_index)
if indices[actual_file_index] < n_hits[actual_file_index]: # simply stop when the first file is fully iterated
indices[actual_file_index] += 1
else:
break
# Set missing data and store to file
hits[:table_index]['LVL1ID'] = hits[:table_index]['event_number'] % 255
hits[:table_index]['BCID'] = hits[:table_index]['LVL1ID']
hits[:table_index]['relative_BCID'] = 6
hit_table.append(hits[:table_index])
progress_bar.finish()
for input_file_root in input_files_root:
input_file_root.Close()
if __name__ == "__main__":
import matplotlib.pyplot as plt
from scipy.interpolate import interp1d
input_file_name = r'/home/davidlp/geant4/SourceSim-build/SourceSimulation'
plsr_dac_calibation_file = r'/home/davidlp/git/SourceSim/converter/calibration_data/plsr_dac_scan.cfg'
tdc_calibation_file = r'/home/davidlp/git/SourceSim/converter/calibration_data/hit_or_calibration_calibration.h5'
create_hit_table(input_file_name, tdc_calibation_file, plsr_dac_calibation_file)
# with tb.openFile(tdc_calibation_file, mode="r") as in_file_calibration_h5:
# tdc_calibration, tdc_error = in_file_calibration_h5.root.HitOrCalibration[:, :, :, 1], in_file_calibration_h5.root.HitOrCalibration[:, :, :, 3]
# tot_calibration, tot_error = in_file_calibration_h5.root.HitOrCalibration[:, :, :, 0], in_file_calibration_h5.root.HitOrCalibration[:, :, :, 2]
# tdc_calibration_values = in_file_calibration_h5.root.HitOrCalibration.attrs.scan_parameter_values[:]
# global_config = register_utils.parse_global_config(plsr_dac_calibation_file)
# c_high, vcal_c0, vcal_c1 = global_config['C_Inj_High'], global_config['Vcal_Coeff_0'], global_config['Vcal_Coeff_1']
# charge_calibration_values = (vcal_c0 + vcal_c1 * tdc_calibration_values) * c_high / 0.16022
#
# column, row = 20, 30
# print charge_calibration_values, tdc_calibration[column, row]
# tdc_interpolation = interp1d(x=charge_calibration_values, y=tdc_calibration[column, row], kind='slinear', bounds_error=False, fill_value=0)
# tdc_error_interpolation = interp1d(x=charge_calibration_values, y=tdc_error[column, row], kind='slinear', bounds_error=False, fill_value=0)
# tot_interpolation = interp1d(x=charge_calibration_values, y=tot_calibration[column, row], kind='slinear', bounds_error=False, fill_value=0)
# tot_error_interpolation = interp1d(x=charge_calibration_values, y=tot_error[column, row], kind='slinear', bounds_error=False, fill_value=0)
# plt.plot(np.arange(2000, 10000, 1), tdc_interpolation(np.arange(2000, 10000, 1)), '-')
# plt.plot(np.arange(2000, 10000, 1), tdc_error_interpolation(np.arange(2000, 10000, 1)), '-')
# plt.plot(np.arange(2000, 10000, 1), tot_interpolation(np.arange(2000, 10000, 1)), '-')
# plt.plot(np.arange(2000, 10000, 1), tot_error_interpolation(np.arange(2000, 10000, 1)), '-')
# plt.show()
| bsd-2-clause |
5aurabhpathak/masters-thesis | test/visualise_corpus.py | 2 | 1736 | #!/bin/env python3
#Author: Saurabh Pathak
'''graph visualizer for corpus'''
from matplotlib import pyplot as pl, rcParams
from statistics import mean
from os import environ
from sys import argv
def figplot(prefix, output=environ['THESISDIR']+'/data/'):
with open(prefix+'.en', encoding='utf-8') as en_ip, open(prefix+'.hi', encoding='utf-8') as hi_ip:
de, dh, f, k = [], [], [], 0
for hi_line, en_line in zip(hi_ip, en_ip):
i, j = len(en_line.split()), len(hi_line.split())
de += i,
dh += j,
if j != 0: f += i/j,
k += 1
pl.figure('Sentence lengths / Fertility -- '+prefix, figsize=(12,7))
pl.suptitle('Mean fertility ratio: {}\nNumber of pairs: {}'.format(round(mean(f), 4), k))
def plotter(t,n,a):
pl.subplot(n)
pl.title(t)
counts, bins, patches = pl.hist(a, bins=[0,11,21,31,41,51,61,71,81,91,101])
pl.xlabel('Sentence lengths in words')
xlabels = [0]+[b-1 for b in bins[1:]]
pl.xticks(bins)
pl.gca().set_xticklabels(xlabels)
pl.ticklabel_format(axis='y', style='sci', scilimits=(0,4))
pl.xlim(xmin=0)
pl.ylim(ymin=0)
for i, v in enumerate(counts): pl.text(bins[i] + 1, v + 3000, round(v/100000, 1))
#for i, v in enumerate(counts): pl.text(bins[i] + 1, v + 3, int(v))
pl.rcParams['patch.force_edgecolor'] = True
plotter('en', 122, de)
plotter('hi', 121, dh)
pl.ylabel('Number of sentences (x10$^5$)')
#pl.ylabel('Number of sentences')
pl.savefig(output+prefix.split('/')[-1]+'.png', format='png', bbox_inches='tight')
pl.show()
if __name__=="__main__":
rcParams.update({'font.size': 16})
figplot(argv[1])
| gpl-3.0 |
GCerar/PythonTecaj | LTFE/staticne_slike.py | 1 | 3336 | from PIL import Image, ImageEnhance, ImageOps
import matplotlib as mpl
import matplotlib.pyplot as plt
def histogram(slika, pokazi=True):
"""Izriše histogram za dano sliko. Ta je lahko sivinska ali barvna.
Args:
slika (object): PIL/Pillow objekt
pokazi (bool): Če želimo takoj videti graf. (Default: True)
Retuns:
object: PIL/Pillow objekt
"""
hst = slika.histogram()
st_barv = len(histogram) // 256 # stevilo bar; 1 = sivinska, 3 = RGB
if st_barv == 1:
bl = hst
plt.plot(bl, color='bl', alpha=0.5)
plt.fill_between(range(255), bl, color='black', alpha=0.5)
elif st_barv == 3:
r, g, b = hst[:256], hst[256:512], hst[512:] # Razkosamo na 3
plt.plot(r, color='r', alpha=0.5)
plt.fill_between(range(256), r, color='red', alpha=0.5)
plt.plot(g, color='g', alpha=0.5)
plt.fill_between(range(256), g, color='green', alpha=0.5)
plt.plot(b, color='b', alpha=0.5)
plt.fill_between(range(256), b, color='blue', alpha=0.5)
else:
raise TypeError('Znam procesirat samo 1 in 3 barvne histograme!')
plt.autoscale(tight=True)
if pokazi:
plt.show()
def v_sivinsko(slika, autokontrast=False, izravnava=False):
"""Sliko pretvori v sivinski barvni prostor.
Args:
slika: (object): PIL/Pillow objekt slike
autokontrast (bool): Avtomatsko popravi kontrast slike. (Default: False)
izravnava (bool): Skuša izravati histogram slike. (Default: False)
Returns:
object: PIL/Pillow objekt
"""
_slika = slika.copy()
if _slika.mode != 'L':
_slika.convert('L')
if autokontrast:
_slika = ImageOps.autocontrast(_slika)
if izravnava:
_slika = ImageOps.equalize(_slika)
return _slika
def v_crnobelo(slika, autokontrast=False, izravnava=False):
"""Sliko pretvori v dvorbarvni prostor.
Args:
slika: (object): PIL/Pillow objekt slike
autokontrast (bool): Avtomatsko popravi kontrast slike. (Default: False)
izravnava (bool): Skuša izravati histogram slike. (Default: False)
Returns:
object: PIL/Pillow objekt
"""
_slika = v_sivinsko(slika, autokontrast=autokontrast, izravnava=izravnava)
return _slika.point(lambda x: 0 if x < 128 else 255, '1')
def v_sepia(slika, autokontrast=False, izravnava=False):
"""Pretvori slika v sepia (učinek starinske slike).
Args:
slika: (object): PIL/Pillow objekt slike
autokontrast (bool): Avtomatsko popravi kontrast slike. (Default: False)
izravnava (bool): Skuša izravati histogram slike. (Default: False)
Returns:
object: PIL/Pillow objekt
"""
def linearizacija(bela_barva):
"""Linearizira barvni prostor med med črno in poljubno barvo.
Args:
bela_barva (array): Mora biti oblike [int, int, int].
Returns:
array: Linearizirana barvna paleta
"""
ramp = []
r, g, b = bela_barva
for i in range(255):
ramp.extend((round(r*i/255), round(g*i/255), round(b*i/255)))
return ramp
sepia = linearizacija([255, 240, 192])
sivinska = v_sivinsko(slika, autokontrast=autokontrast, izravnava=izravnava)
return sivinska.putpalette(sepia)
| mit |
yavalvas/yav_com | build/matplotlib/examples/api/sankey_demo_links.py | 9 | 1721 | """Demonstrate/test the Sankey class by producing a long chain of connections.
"""
from itertools import cycle
import matplotlib.pyplot as plt
from matplotlib.sankey import Sankey
links_per_side = 6
def side(sankey, n=1):
"""Generate a side chain."""
prior = len(sankey.diagrams)
colors = cycle(['orange', 'b', 'g', 'r', 'c', 'm', 'y'])
for i in range(0, 2*n, 2):
sankey.add(flows=[1, -1], orientations=[-1, -1],
patchlabel=str(prior+i), facecolor=next(colors),
prior=prior+i-1, connect=(1, 0), alpha=0.5)
sankey.add(flows=[1, -1], orientations=[1, 1],
patchlabel=str(prior+i+1), facecolor=next(colors),
prior=prior+i, connect=(1, 0), alpha=0.5)
def corner(sankey):
"""Generate a corner link."""
prior = len(sankey.diagrams)
sankey.add(flows=[1, -1], orientations=[0, 1],
patchlabel=str(prior), facecolor='k',
prior=prior-1, connect=(1, 0), alpha=0.5)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, xticks=[], yticks=[],
title="Why would you want to do this?\n(But you could.)")
sankey = Sankey(ax=ax, unit=None)
sankey.add(flows=[1, -1], orientations=[0, 1],
patchlabel="0", facecolor='k',
rotation=45)
side(sankey, n=links_per_side)
corner(sankey)
side(sankey, n=links_per_side)
corner(sankey)
side(sankey, n=links_per_side)
corner(sankey)
side(sankey, n=links_per_side)
sankey.finish()
# Notice:
# 1. The alignment doesn't drift significantly (if at all; with 16007
# subdiagrams there is still closure).
# 2. The first diagram is rotated 45 deg, so all other diagrams are rotated
# accordingly.
plt.show()
| mit |
aymanim/rosdep | test/test_rosdep_dependency_graph.py | 7 | 7886 | # Copyright (c) 2012, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# Author William Woodall/[email protected]
def test_DependencyGraph_Linear():
from rosdep2.dependency_graph import DependencyGraph
# Normal A-B-C
dg = DependencyGraph()
dg['A']['installer_key'] = 'a_installer'
dg['A']['install_keys'] = ['a']
dg['A']['dependencies'] = ['B']
dg['B']['installer_key'] = 'b_installer'
dg['B']['install_keys'] = ['b']
dg['B']['dependencies'] = ['C']
dg['C']['installer_key'] = 'c_installer'
dg['C']['install_keys'] = ['c']
dg['C']['dependencies'] = []
result = dg.get_ordered_dependency_list()
expected = [('c_installer', ['c']), ('b_installer', ['b']), ('a_installer', ['a'])]
assert result == expected, "Results did not match expectations: %s == %s"%(str(result),str(expected))
def test_DependencyGraph_Cycle():
from rosdep2.dependency_graph import DependencyGraph
# Full Loop A-B-C-A-...
dg = DependencyGraph()
dg['A']['installer_key'] = 'a_installer'
dg['A']['install_keys'] = ['a']
dg['A']['dependencies'] = ['B']
dg['B']['installer_key'] = 'b_installer'
dg['B']['install_keys'] = ['b']
dg['B']['dependencies'] = ['C']
dg['C']['installer_key'] = 'c_installer'
dg['C']['install_keys'] = ['c']
dg['C']['dependencies'] = ['A']
try:
result = dg.get_ordered_dependency_list()
assert False, "Doesn't fail, it should fail with an AssertionError because of the cycle."
except AssertionError as e:
if not str(e).startswith("A cycle in the dependency graph occurred with key"):
assert False, "Throws AssertionError, but with the wrong message. Error was: %s: %s"%(type(e),str(e))
except Exception as e:
assert False, "Throws and Exception, but not an AssertionError. Error was: %s: %s"%(type(e),str(e))
def test_DependencyGraph_Short_Cycle():
from rosdep2.dependency_graph import DependencyGraph
# Short cycle A-B-C-D-B-C-D-...
dg = DependencyGraph()
dg['A']['installer_key'] = 'a_installer'
dg['A']['install_keys'] = ['a']
dg['A']['dependencies'] = ['B']
dg['B']['installer_key'] = 'b_installer'
dg['B']['install_keys'] = ['b']
dg['B']['dependencies'] = ['C']
dg['C']['installer_key'] = 'c_installer'
dg['C']['install_keys'] = ['c']
dg['C']['dependencies'] = ['D']
dg['D']['installer_key'] = 'd_installer'
dg['D']['install_keys'] = ['d']
dg['D']['dependencies'] = ['B']
try:
result = dg.get_ordered_dependency_list()
assert False, "Doesn't fail, it should fail with an AssertionError because of the cycle."
except AssertionError as e:
if not str(e).startswith("A cycle in the dependency graph occurred with key"):
assert False, "Throws AssertionError, but with the wrong message. Error was: %s: %s"%(type(e),str(e))
except Exception as e:
assert False, "Throws and Exception, but not an AssertionError. Error was: %s: %s"%(type(e),str(e))
def test_DependencyGraph_Invalid_Key():
from rosdep2.dependency_graph import DependencyGraph
# Invalid graph A-B-C where C doesn't exist
dg = DependencyGraph()
dg['A']['installer_key'] = 'a_installer'
dg['A']['install_keys'] = ['a']
dg['A']['dependencies'] = ['B']
dg['B']['installer_key'] = 'b_installer'
dg['B']['install_keys'] = ['b']
dg['B']['dependencies'] = ['C']
try:
result = dg.get_ordered_dependency_list()
assert False, "Doesn't fail, it should fail with an KeyError because of the invalid rosdep key."
except KeyError as e:
if not str(e).endswith("does not exist in the dictionary of resolutions.'"):
assert False, "Throws KeyError, but with the wrong message. Error was: %s: %s"%(type(e),str(e))
except Exception as e:
assert False, "Throws and Exception, but not an KeyError. Error was: %s: %s"%(type(e),str(e))
def test_DependencyGraph_Invalid_Key2():
from rosdep2.dependency_graph import DependencyGraph
# Invalid graph A-B-C where B doesn't exist
dg = DependencyGraph()
dg['A']['installer_key'] = 'a_installer'
dg['A']['install_keys'] = ['a']
dg['A']['dependencies'] = ['B']
dg['C']['installer_key'] = 'c_installer'
dg['C']['install_keys'] = ['c']
dg['C']['dependencies'] = []
try:
result = dg.get_ordered_dependency_list()
assert False, "Doesn't fail, it should fail with an KeyError because of the invalid rosdep key."
except KeyError as e:
if not str(e).endswith("does not exist in the dictionary of resolutions.'"):
assert False, "Throws KeyError, but with the wrong message. Error was: %s: %s"%(type(e),str(e))
except Exception as e:
assert False, "Throws and Exception, but not an KeyError. Error was: %s: %s"%(type(e),str(e))
def test_DependencyGraph_Multi_Root():
from rosdep2.dependency_graph import DependencyGraph
# Multi root, shared dependency: A-B-C, D-C
dg = DependencyGraph()
dg['A']['installer_key'] = 'a_installer'
dg['A']['install_keys'] = ['a']
dg['A']['dependencies'] = ['B']
dg['B']['installer_key'] = 'b_installer'
dg['B']['install_keys'] = ['b']
dg['B']['dependencies'] = ['C']
dg['C']['installer_key'] = 'c_installer'
dg['C']['install_keys'] = ['c']
dg['C']['dependencies'] = []
dg['D']['installer_key'] = 'd_installer'
dg['D']['install_keys'] = ['d']
dg['D']['dependencies'] = ['C']
result = dg.get_ordered_dependency_list()
# TODO: The expected might also have a different order, for example it might be:
# [('c_installer', ['c']), ('d_installer', ['d']), ('b_installer', ['b']), ('a_installer', ['a'])]
# But that wont invalidate the order from a dependency graph stand point
expected = [
[('c_installer', ['c']), ('b_installer', ['b']), ('a_installer', ['a']), ('d_installer', ['d'])],
[('c_installer', ['c']), ('d_installer', ['d']), ('b_installer', ['b']), ('a_installer', ['a'])],
]
assert result in expected, "Results did not match expectations: %s == %s"%(str(result),str(expected))
def test_DependencyGraph_Realworld():
from rosdep2.dependency_graph import DependencyGraph
# Real world example
dg = DependencyGraph()
dg['python-matplotlib']['installer_key'] = 'pip'
dg['python-matplotlib']['install_keys'] = ['matplotlib']
dg['python-matplotlib']['dependencies'] = ['pkg-config']
dg['pkg-config']['installer_key'] = 'homebrew'
dg['pkg-config']['install_keys'] = ['pkg-config']
dg['pkg-config']['dependencies'] = []
result = dg.get_ordered_dependency_list()
expected = [('homebrew', ['pkg-config']), ('pip', ['matplotlib'])]
assert result == expected, "Results did not match expectations: %s == %s"%(str(result),str(expected))
| bsd-3-clause |
unnikrishnankgs/va | venv/lib/python3.5/site-packages/mpl_toolkits/tests/test_axes_grid1.py | 5 | 4246 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import matplotlib.pyplot as plt
from matplotlib.testing.decorators import image_comparison, cleanup
from mpl_toolkits.axes_grid1 import make_axes_locatable
from mpl_toolkits.axes_grid1 import AxesGrid
from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes, mark_inset
from mpl_toolkits.axes_grid1.anchored_artists import AnchoredSizeBar
from matplotlib.colors import LogNorm
import numpy as np
@image_comparison(baseline_images=['divider_append_axes'])
def test_divider_append_axes():
# the random data
np.random.seed(0)
x = np.random.randn(1000)
y = np.random.randn(1000)
fig, axScatter = plt.subplots()
# the scatter plot:
axScatter.scatter(x, y)
# create new axes on the right and on the top of the current axes
# The first argument of the new_vertical(new_horizontal) method is
# the height (width) of the axes to be created in inches.
divider = make_axes_locatable(axScatter)
axHistbot = divider.append_axes("bottom", 1.2, pad=0.1, sharex=axScatter)
axHistright = divider.append_axes("right", 1.2, pad=0.1, sharey=axScatter)
axHistleft = divider.append_axes("left", 1.2, pad=0.1, sharey=axScatter)
axHisttop = divider.append_axes("top", 1.2, pad=0.1, sharex=axScatter)
# now determine nice limits by hand:
binwidth = 0.25
xymax = np.max([np.max(np.fabs(x)), np.max(np.fabs(y))])
lim = (int(xymax/binwidth) + 1) * binwidth
bins = np.arange(-lim, lim + binwidth, binwidth)
axHisttop.hist(x, bins=bins)
axHistbot.hist(x, bins=bins)
axHistleft.hist(y, bins=bins, orientation='horizontal')
axHistright.hist(y, bins=bins, orientation='horizontal')
axHistbot.invert_yaxis()
axHistleft.invert_xaxis()
axHisttop.xaxis.set_ticklabels(())
axHistbot.xaxis.set_ticklabels(())
axHistleft.yaxis.set_ticklabels(())
axHistright.yaxis.set_ticklabels(())
@cleanup
def test_axesgrid_colorbar_log_smoketest():
fig = plt.figure()
grid = AxesGrid(fig, 111, # modified to be only subplot
nrows_ncols=(1, 1),
label_mode="L",
cbar_location="top",
cbar_mode="single",
)
Z = 10000 * np.random.rand(10, 10)
im = grid[0].imshow(Z, interpolation="nearest", norm=LogNorm())
grid.cbar_axes[0].colorbar(im)
@image_comparison(
baseline_images=['inset_locator'], style='default', extensions=['png'],
remove_text=True)
def test_inset_locator():
def get_demo_image():
from matplotlib.cbook import get_sample_data
import numpy as np
f = get_sample_data("axes_grid/bivariate_normal.npy", asfileobj=False)
z = np.load(f)
# z is a numpy array of 15x15
return z, (-3, 4, -4, 3)
fig, ax = plt.subplots(figsize=[5, 4])
# prepare the demo image
Z, extent = get_demo_image()
Z2 = np.zeros([150, 150], dtype="d")
ny, nx = Z.shape
Z2[30:30 + ny, 30:30 + nx] = Z
# extent = [-3, 4, -4, 3]
ax.imshow(Z2, extent=extent, interpolation="nearest",
origin="lower")
axins = zoomed_inset_axes(ax, 6, loc=1) # zoom = 6
axins.imshow(Z2, extent=extent, interpolation="nearest",
origin="lower")
axins.yaxis.get_major_locator().set_params(nbins=7)
axins.xaxis.get_major_locator().set_params(nbins=7)
# sub region of the original image
x1, x2, y1, y2 = -1.5, -0.9, -2.5, -1.9
axins.set_xlim(x1, x2)
axins.set_ylim(y1, y2)
plt.xticks(visible=False)
plt.yticks(visible=False)
# draw a bbox of the region of the inset axes in the parent axes and
# connecting lines between the bbox and the inset axes area
mark_inset(ax, axins, loc1=2, loc2=4, fc="none", ec="0.5")
asb = AnchoredSizeBar(ax.transData,
0.5,
'0.5',
loc=8,
pad=0.1, borderpad=0.5, sep=5,
frameon=False)
ax.add_artist(asb)
if __name__ == '__main__':
import nose
nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
| bsd-2-clause |
quantum13/mlbootcamp5 | qml_workdir/ensembling/level1_models_extra1.py | 1 | 1967 | import datetime
import numpy as np
from hyperopt import hp, fmin, tpe
import os
import sys
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
sys.path.insert(0, os.getcwd())
import qml_workdir.classes.config
from qml.cv import QCV
from qml.models import QXgb, QXgb2
from qml_workdir.classes.models import qm
if __name__ == "__main__":
CV_SCORE_TO_STOP = 0.544
DATAS = [266, 269]
EVALS_ROUNDS = 4000
rounds = EVALS_ROUNDS
cv = QCV(qm)
counter = 0
def fn(params):
global counter
counter +=1
params['max_features'] = params['max_features'] / 10
params['n_estimators'] = int(1.3 ** params['n_estimators'])
model_id = qm.add_by_params(
ExtraTreesClassifier(
max_depth=int(params['max_depth']),
n_estimators=int(params['n_estimators']),
max_features=float(params['max_features']),
n_jobs=-1
),
'hyperopt rand_forest',
predict_fn='predict_proba'
)
res = cv.cross_val(model_id, params['data_id'], seed=1000, early_stop_cv=lambda x: x>CV_SCORE_TO_STOP)
res = np.float64(res)
res_arr = [res]
# if res < CV_SCORE_TO_STOP:
# for i in range(7):
# res = cv.cross_val(model_id, data_id, seed=1001 + i, force=True)
# res = np.float64(res)
# res_arr.append(res)
print(params['data_id'], model_id, "{}/{}".format(counter, rounds), res_arr, datetime.datetime.now(), params)
return np.mean(res_arr)
space = {
'max_depth': hp.quniform('max_depth', 3, 10, 1),
'n_estimators': hp.quniform('n_estimators', 18, 26, 1),
'max_features': hp.quniform('max_features', 2, 10, 1)
}
counter = 0
space['data_id'] = hp.choice('data_id', DATAS)
rounds = EVALS_ROUNDS
fmin(fn, space, algo=tpe.suggest, max_evals=rounds)
| mit |
ky822/scikit-learn | examples/decomposition/plot_pca_vs_fa_model_selection.py | 142 | 4467 | """
===============================================================
Model selection with Probabilistic PCA and Factor Analysis (FA)
===============================================================
Probabilistic PCA and Factor Analysis are probabilistic models.
The consequence is that the likelihood of new data can be used
for model selection and covariance estimation.
Here we compare PCA and FA with cross-validation on low rank data corrupted
with homoscedastic noise (noise variance
is the same for each feature) or heteroscedastic noise (noise variance
is the different for each feature). In a second step we compare the model
likelihood to the likelihoods obtained from shrinkage covariance estimators.
One can observe that with homoscedastic noise both FA and PCA succeed
in recovering the size of the low rank subspace. The likelihood with PCA
is higher than FA in this case. However PCA fails and overestimates
the rank when heteroscedastic noise is present. Under appropriate
circumstances the low rank models are more likely than shrinkage models.
The automatic estimation from
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604
by Thomas P. Minka is also compared.
"""
print(__doc__)
# Authors: Alexandre Gramfort
# Denis A. Engemann
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg
from sklearn.decomposition import PCA, FactorAnalysis
from sklearn.covariance import ShrunkCovariance, LedoitWolf
from sklearn.cross_validation import cross_val_score
from sklearn.grid_search import GridSearchCV
###############################################################################
# Create the data
n_samples, n_features, rank = 1000, 50, 10
sigma = 1.
rng = np.random.RandomState(42)
U, _, _ = linalg.svd(rng.randn(n_features, n_features))
X = np.dot(rng.randn(n_samples, rank), U[:, :rank].T)
# Adding homoscedastic noise
X_homo = X + sigma * rng.randn(n_samples, n_features)
# Adding heteroscedastic noise
sigmas = sigma * rng.rand(n_features) + sigma / 2.
X_hetero = X + rng.randn(n_samples, n_features) * sigmas
###############################################################################
# Fit the models
n_components = np.arange(0, n_features, 5) # options for n_components
def compute_scores(X):
pca = PCA()
fa = FactorAnalysis()
pca_scores, fa_scores = [], []
for n in n_components:
pca.n_components = n
fa.n_components = n
pca_scores.append(np.mean(cross_val_score(pca, X)))
fa_scores.append(np.mean(cross_val_score(fa, X)))
return pca_scores, fa_scores
def shrunk_cov_score(X):
shrinkages = np.logspace(-2, 0, 30)
cv = GridSearchCV(ShrunkCovariance(), {'shrinkage': shrinkages})
return np.mean(cross_val_score(cv.fit(X).best_estimator_, X))
def lw_score(X):
return np.mean(cross_val_score(LedoitWolf(), X))
for X, title in [(X_homo, 'Homoscedastic Noise'),
(X_hetero, 'Heteroscedastic Noise')]:
pca_scores, fa_scores = compute_scores(X)
n_components_pca = n_components[np.argmax(pca_scores)]
n_components_fa = n_components[np.argmax(fa_scores)]
pca = PCA(n_components='mle')
pca.fit(X)
n_components_pca_mle = pca.n_components_
print("best n_components by PCA CV = %d" % n_components_pca)
print("best n_components by FactorAnalysis CV = %d" % n_components_fa)
print("best n_components by PCA MLE = %d" % n_components_pca_mle)
plt.figure()
plt.plot(n_components, pca_scores, 'b', label='PCA scores')
plt.plot(n_components, fa_scores, 'r', label='FA scores')
plt.axvline(rank, color='g', label='TRUTH: %d' % rank, linestyle='-')
plt.axvline(n_components_pca, color='b',
label='PCA CV: %d' % n_components_pca, linestyle='--')
plt.axvline(n_components_fa, color='r',
label='FactorAnalysis CV: %d' % n_components_fa, linestyle='--')
plt.axvline(n_components_pca_mle, color='k',
label='PCA MLE: %d' % n_components_pca_mle, linestyle='--')
# compare with other covariance estimators
plt.axhline(shrunk_cov_score(X), color='violet',
label='Shrunk Covariance MLE', linestyle='-.')
plt.axhline(lw_score(X), color='orange',
label='LedoitWolf MLE' % n_components_pca_mle, linestyle='-.')
plt.xlabel('nb of components')
plt.ylabel('CV scores')
plt.legend(loc='lower right')
plt.title(title)
plt.show()
| bsd-3-clause |
ProjectQ-Framework/ProjectQ | projectq/backends/_circuits/_plot.py | 1 | 21374 | # -*- coding: utf-8 -*-
# Copyright 2017 ProjectQ-Framework (www.projectq.ch)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module provides the basic functionality required to plot a quantum
circuit in a matplotlib figure.
It is mainly used by the CircuitDrawerMatplotlib compiler engine.
Currently, it supports all single-qubit gates, including their controlled
versions to an arbitrary number of control qubits. It also supports
multi-target qubit gates under some restrictions. Namely that the target
qubits must be neighbours in the output figure (which cannot be determined
durinng compilation at this time).
"""
from copy import deepcopy
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.collections import PatchCollection, LineCollection
from matplotlib.lines import Line2D
from matplotlib.patches import Circle, Arc, Rectangle
# Important note on units for the plot parameters.
# The following entries are in inches:
# - column_spacing
# - labels_margin
# - wire_height
#
# The following entries are in data units (matplotlib)
# - control_radius
# - gate_offset
# - mgate_width
# - not_radius
# - swap_delta
# - x_offset
#
# The rest have misc. units (as defined by matplotlib)
_DEFAULT_PLOT_PARAMS = dict(
fontsize=14.0,
column_spacing=0.5,
control_radius=0.015,
labels_margin=1,
linewidth=1.0,
not_radius=0.03,
gate_offset=0.05,
mgate_width=0.1,
swap_delta=0.02,
x_offset=0.05,
wire_height=1,
)
# ==============================================================================
def to_draw(qubit_lines, qubit_labels=None, drawing_order=None, **kwargs):
"""
Translates a given circuit to a matplotlib figure.
Args:
qubit_lines (dict): list of gates for each qubit axis
qubit_labels (dict): label to print in front of the qubit wire for
each qubit ID
drawing_order (dict): index of the wire for each qubit ID to be drawn.
**kwargs (dict): additional parameters are used to update the default
plot parameters
Returns:
A tuple with (figure, axes)
Note:
Numbering of qubit wires starts at 0 at the bottom and increases
vertically.
Note:
Additional keyword arguments can be passed to this
function in order to further customize the figure output
by matplotlib (default value in parentheses):
- fontsize (14): Font size in pt
- column_spacing (.5): Vertical spacing between two
neighbouring gates (roughly in inches)
- control_radius (.015): Radius of the circle for controls
- labels_margin (1): Margin between labels and begin of
wire (roughly in inches)
- linewidth (1): Width of line
- not_radius (.03): Radius of the circle for X/NOT gates
- gate_offset (.05): Inner margins for gates with a text
representation
- mgate_width (.1): Width of the measurement gate
- swap_delta (.02): Half-size of the SWAP gate
- x_offset (.05): Absolute X-offset for drawing within the axes
- wire_height (1): Vertical spacing between two qubit
wires (roughly in inches)
"""
if qubit_labels is None:
qubit_labels = {qubit_id: r'$|0\rangle$' for qubit_id in qubit_lines}
else:
if list(qubit_labels) != list(qubit_lines):
raise RuntimeError('Qubit IDs in qubit_labels do not match qubit IDs in qubit_lines!')
if drawing_order is None:
n_qubits = len(qubit_lines)
drawing_order = {qubit_id: n_qubits - qubit_id - 1 for qubit_id in list(qubit_lines)}
else:
if list(drawing_order) != list(qubit_lines):
raise RuntimeError('Qubit IDs in drawing_order do not match ' + 'qubit IDs in qubit_lines!')
if list(sorted(drawing_order.values())) != list(range(len(drawing_order))):
raise RuntimeError(
'Indices of qubit wires in drawing_order must be between 0 and {}!'.format(len(drawing_order))
)
plot_params = deepcopy(_DEFAULT_PLOT_PARAMS)
plot_params.update(kwargs)
n_labels = len(list(qubit_lines))
wire_height = plot_params['wire_height']
# Grid in inches
wire_grid = np.arange(wire_height, (n_labels + 1) * wire_height, wire_height, dtype=float)
fig, axes = create_figure(plot_params)
# Grid in inches
gate_grid = calculate_gate_grid(axes, qubit_lines, plot_params)
width = gate_grid[-1] + plot_params['column_spacing']
height = wire_grid[-1] + wire_height
resize_figure(fig, axes, width, height, plot_params)
# Convert grids into data coordinates
units_per_inch = plot_params['units_per_inch']
gate_grid *= units_per_inch
gate_grid = gate_grid + plot_params['x_offset']
wire_grid *= units_per_inch
plot_params['column_spacing'] *= units_per_inch
draw_wires(axes, n_labels, gate_grid, wire_grid, plot_params)
draw_labels(axes, qubit_labels, drawing_order, wire_grid, plot_params)
draw_gates(axes, qubit_lines, drawing_order, gate_grid, wire_grid, plot_params)
return fig, axes
# ==============================================================================
# Functions used to calculate the layout
def gate_width(axes, gate_str, plot_params):
"""
Calculate the width of a gate based on its string representation.
Args:
axes (matplotlib.axes.Axes): axes object
gate_str (str): string representation of a gate
plot_params (dict): plot parameters
Returns:
The width of a gate on the figure (in inches)
"""
if gate_str == 'X':
return 2 * plot_params['not_radius'] / plot_params['units_per_inch']
if gate_str == 'Swap':
return 2 * plot_params['swap_delta'] / plot_params['units_per_inch']
if gate_str == 'Measure':
return plot_params['mgate_width']
obj = axes.text(
0,
0,
gate_str,
visible=True,
bbox=dict(edgecolor='k', facecolor='w', fill=True, lw=1.0),
fontsize=14,
)
obj.figure.canvas.draw()
width = obj.get_window_extent(obj.figure.canvas.get_renderer()).width / axes.figure.dpi
obj.remove()
return width + 2 * plot_params['gate_offset']
def calculate_gate_grid(axes, qubit_lines, plot_params):
"""
Calculate an optimal grid spacing for a list of quantum gates.
Args:
axes (matplotlib.axes.Axes): axes object
qubit_lines (dict): list of gates for each qubit axis
plot_params (dict): plot parameters
Returns:
An array (np.ndarray) with the gate x positions.
"""
# NB: column_spacing is still in inch when this function is called
column_spacing = plot_params['column_spacing']
data = list(qubit_lines.values())
depth = len(data[0])
width_list = [
max(gate_width(axes, line[idx][0], plot_params) if line[idx] else 0 for line in data) for idx in range(depth)
]
gate_grid = np.array([0] * (depth + 1), dtype=float)
gate_grid[0] = plot_params['labels_margin']
if depth > 0:
gate_grid[0] += width_list[0] * 0.5
for idx in range(1, depth):
gate_grid[idx] = gate_grid[idx - 1] + column_spacing + (width_list[idx] + width_list[idx - 1]) * 0.5
gate_grid[-1] = gate_grid[-2] + column_spacing + width_list[-1] * 0.5
return gate_grid
# ==============================================================================
# Basic helper functions
def text(axes, gate_pos, wire_pos, textstr, plot_params):
"""
Draws a text box on the figure.
Args:
axes (matplotlib.axes.Axes): axes object
gate_pos (float): x coordinate of the gate [data units]
wire_pos (float): y coordinate of the qubit wire
textstr (str): text of the gate and box
plot_params (dict): plot parameters
box (bool): draw the rectangle box if box is True
"""
return axes.text(
gate_pos,
wire_pos,
textstr,
color='k',
ha='center',
va='center',
clip_on=True,
size=plot_params['fontsize'],
)
# ==============================================================================
def create_figure(plot_params):
"""
Create a new figure as well as a new axes instance
Args:
plot_params (dict): plot parameters
Returns:
A tuple with (figure, axes)
"""
fig = plt.figure(facecolor='w', edgecolor='w')
axes = plt.axes()
axes.set_axis_off()
axes.set_aspect('equal')
plot_params['units_per_inch'] = fig.dpi / axes.get_window_extent().width
return fig, axes
def resize_figure(fig, axes, width, height, plot_params):
"""
Resizes a figure and adjust the limits of the axes instance to make sure
that the distances in data coordinates on the screen stay constant.
Args:
fig (matplotlib.figure.Figure): figure object
axes (matplotlib.axes.Axes): axes object
width (float): new figure width
height (float): new figure height
plot_params (dict): plot parameters
Returns:
A tuple with (figure, axes)
"""
fig.set_size_inches(width, height)
new_limits = plot_params['units_per_inch'] * np.array([width, height])
axes.set_xlim(0, new_limits[0])
axes.set_ylim(0, new_limits[1])
def draw_gates( # pylint: disable=too-many-arguments
axes, qubit_lines, drawing_order, gate_grid, wire_grid, plot_params
):
"""
Draws the gates.
Args:
qubit_lines (dict): list of gates for each qubit axis
drawing_order (dict): index of the wire for each qubit ID to be drawn
gate_grid (np.ndarray): x positions of the gates
wire_grid (np.ndarray): y positions of the qubit wires
plot_params (dict): plot parameters
Returns:
A tuple with (figure, axes)
"""
for qubit_line in qubit_lines.values():
for idx, data in enumerate(qubit_line):
if data is not None:
(gate_str, targets, controls) = data
targets_order = [drawing_order[tgt] for tgt in targets]
draw_gate(
axes,
gate_str,
gate_grid[idx],
[wire_grid[tgt] for tgt in targets_order],
targets_order,
[wire_grid[drawing_order[ctrl]] for ctrl in controls],
plot_params,
)
def draw_gate(
axes, gate_str, gate_pos, target_wires, targets_order, control_wires, plot_params
): # pylint: disable=too-many-arguments
"""
Draws a single gate at a given location.
Args:
axes (AxesSubplot): axes object
gate_str (str): string representation of a gate
gate_pos (float): x coordinate of the gate [data units]
target_wires (list): y coordinates of the target qubits
targets_order (list): index of the wires corresponding to the target
qubit IDs
control_wires (list): y coordinates of the control qubits
plot_params (dict): plot parameters
Returns:
A tuple with (figure, axes)
"""
# Special cases
if gate_str == 'Z' and len(control_wires) == 1:
draw_control_z_gate(axes, gate_pos, target_wires[0], control_wires[0], plot_params)
elif gate_str == 'X':
draw_x_gate(axes, gate_pos, target_wires[0], plot_params)
elif gate_str == 'Swap':
draw_swap_gate(axes, gate_pos, target_wires[0], target_wires[1], plot_params)
elif gate_str == 'Measure':
draw_measure_gate(axes, gate_pos, target_wires[0], plot_params)
else:
if len(target_wires) == 1:
draw_generic_gate(axes, gate_pos, target_wires[0], gate_str, plot_params)
else:
if sorted(targets_order) != list(range(min(targets_order), max(targets_order) + 1)):
raise RuntimeError(
'Multi-qubit gate with non-neighbouring qubits!\n'
+ 'Gate: {} on wires {}'.format(gate_str, targets_order)
)
multi_qubit_gate(
axes,
gate_str,
gate_pos,
min(target_wires),
max(target_wires),
plot_params,
)
if not control_wires:
return
for control_wire in control_wires:
axes.add_patch(
Circle(
(gate_pos, control_wire),
plot_params['control_radius'],
ec='k',
fc='k',
fill=True,
lw=plot_params['linewidth'],
)
)
all_wires = target_wires + control_wires
axes.add_line(
Line2D(
(gate_pos, gate_pos),
(min(all_wires), max(all_wires)),
color='k',
lw=plot_params['linewidth'],
)
)
def draw_generic_gate(axes, gate_pos, wire_pos, gate_str, plot_params):
"""
Draws a measurement gate.
Args:
axes (AxesSubplot): axes object
gate_pos (float): x coordinate of the gate [data units]
wire_pos (float): y coordinate of the qubit wire
gate_str (str) : string representation of a gate
plot_params (dict): plot parameters
"""
obj = text(axes, gate_pos, wire_pos, gate_str, plot_params)
obj.set_zorder(7)
factor = plot_params['units_per_inch'] / obj.figure.dpi
gate_offset = plot_params['gate_offset']
renderer = obj.figure.canvas.get_renderer()
width = obj.get_window_extent(renderer).width * factor + 2 * gate_offset
height = obj.get_window_extent(renderer).height * factor + 2 * gate_offset
axes.add_patch(
Rectangle(
(gate_pos - width / 2, wire_pos - height / 2),
width,
height,
ec='k',
fc='w',
fill=True,
lw=plot_params['linewidth'],
zorder=6,
)
)
def draw_measure_gate(axes, gate_pos, wire_pos, plot_params):
"""
Draws a measurement gate.
Args:
axes (AxesSubplot): axes object
gate_pos (float): x coordinate of the gate [data units]
wire_pos (float): y coordinate of the qubit wire
plot_params (dict): plot parameters
"""
# pylint: disable=invalid-name
width = plot_params['mgate_width']
height = 0.9 * width
y_ref = wire_pos - 0.3 * height
# Cannot use PatchCollection for the arc due to bug in matplotlib code...
arc = Arc(
(gate_pos, y_ref),
width * 0.7,
height * 0.8,
theta1=0,
theta2=180,
ec='k',
fc='w',
zorder=5,
)
axes.add_patch(arc)
patches = [
Rectangle((gate_pos - width / 2, wire_pos - height / 2), width, height, fill=True),
Line2D(
(gate_pos, gate_pos + width * 0.35),
(y_ref, wire_pos + height * 0.35),
color='k',
linewidth=1,
),
]
gate = PatchCollection(
patches,
edgecolors='k',
facecolors='w',
linewidths=plot_params['linewidth'],
zorder=5,
)
gate.set_label('Measure')
axes.add_collection(gate)
def multi_qubit_gate( # pylint: disable=too-many-arguments
axes, gate_str, gate_pos, wire_pos_min, wire_pos_max, plot_params
):
"""
Draws a multi-target qubit gate.
Args:
axes (matplotlib.axes.Axes): axes object
gate_str (str): string representation of a gate
gate_pos (float): x coordinate of the gate [data units]
wire_pos_min (float): y coordinate of the lowest qubit wire
wire_pos_max (float): y coordinate of the highest qubit wire
plot_params (dict): plot parameters
"""
gate_offset = plot_params['gate_offset']
y_center = (wire_pos_max - wire_pos_min) / 2 + wire_pos_min
obj = axes.text(
gate_pos,
y_center,
gate_str,
color='k',
ha='center',
va='center',
size=plot_params['fontsize'],
zorder=7,
)
height = wire_pos_max - wire_pos_min + 2 * gate_offset
inv = axes.transData.inverted()
width = inv.transform_bbox(obj.get_window_extent(obj.figure.canvas.get_renderer())).width
return axes.add_patch(
Rectangle(
(gate_pos - width / 2, wire_pos_min - gate_offset),
width,
height,
edgecolor='k',
facecolor='w',
fill=True,
lw=plot_params['linewidth'],
zorder=6,
)
)
def draw_x_gate(axes, gate_pos, wire_pos, plot_params):
"""
Draws the symbol for a X/NOT gate.
Args:
axes (matplotlib.axes.Axes): axes object
gate_pos (float): x coordinate of the gate [data units]
wire_pos (float): y coordinate of the qubit wire [data units]
plot_params (dict): plot parameters
"""
not_radius = plot_params['not_radius']
gate = PatchCollection(
[
Circle((gate_pos, wire_pos), not_radius, fill=False),
Line2D((gate_pos, gate_pos), (wire_pos - not_radius, wire_pos + not_radius)),
],
edgecolors='k',
facecolors='w',
linewidths=plot_params['linewidth'],
)
gate.set_label('NOT')
axes.add_collection(gate)
def draw_control_z_gate(axes, gate_pos, wire_pos1, wire_pos2, plot_params):
"""
Draws the symbol for a controlled-Z gate.
Args:
axes (matplotlib.axes.Axes): axes object
wire_pos (float): x coordinate of the gate [data units]
y1 (float): y coordinate of the 1st qubit wire
y2 (float): y coordinate of the 2nd qubit wire
plot_params (dict): plot parameters
"""
gate = PatchCollection(
[
Circle((gate_pos, wire_pos1), plot_params['control_radius'], fill=True),
Circle((gate_pos, wire_pos2), plot_params['control_radius'], fill=True),
Line2D((gate_pos, gate_pos), (wire_pos1, wire_pos2)),
],
edgecolors='k',
facecolors='k',
linewidths=plot_params['linewidth'],
)
gate.set_label('CZ')
axes.add_collection(gate)
def draw_swap_gate(axes, gate_pos, wire_pos1, wire_pos2, plot_params):
"""
Draws the symbol for a SWAP gate.
Args:
axes (matplotlib.axes.Axes): axes object
x (float): x coordinate [data units]
y1 (float): y coordinate of the 1st qubit wire
y2 (float): y coordinate of the 2nd qubit wire
plot_params (dict): plot parameters
"""
delta = plot_params['swap_delta']
lines = []
for wire_pos in (wire_pos1, wire_pos2):
lines.append([(gate_pos - delta, wire_pos - delta), (gate_pos + delta, wire_pos + delta)])
lines.append([(gate_pos - delta, wire_pos + delta), (gate_pos + delta, wire_pos - delta)])
lines.append([(gate_pos, wire_pos1), (gate_pos, wire_pos2)])
gate = LineCollection(lines, colors='k', linewidths=plot_params['linewidth'])
gate.set_label('SWAP')
axes.add_collection(gate)
def draw_wires(axes, n_labels, gate_grid, wire_grid, plot_params):
"""
Draws all the circuit qubit wires.
Args:
axes (matplotlib.axes.Axes): axes object
n_labels (int): number of qubit
gate_grid (ndarray): array with the ref. x positions of the gates
wire_grid (ndarray): array with the ref. y positions of the qubit
wires
plot_params (dict): plot parameters
"""
# pylint: disable=invalid-name
lines = []
for i in range(n_labels):
lines.append(
(
(gate_grid[0] - plot_params['column_spacing'], wire_grid[i]),
(gate_grid[-1], wire_grid[i]),
)
)
all_lines = LineCollection(lines, linewidths=plot_params['linewidth'], edgecolor='k')
all_lines.set_label('qubit_wires')
axes.add_collection(all_lines)
def draw_labels(axes, qubit_labels, drawing_order, wire_grid, plot_params):
"""
Draws the labels at the start of each qubit wire
Args:
axes (matplotlib.axes.Axes): axes object
qubit_labels (list): labels of the qubit to be drawn
drawing_order (dict): Mapping between wire indices and qubit IDs
gate_grid (ndarray): array with the ref. x positions of the gates
wire_grid (ndarray): array with the ref. y positions of the qubit
wires
plot_params (dict): plot parameters
"""
for qubit_id in qubit_labels:
wire_idx = drawing_order[qubit_id]
text(
axes,
plot_params['x_offset'],
wire_grid[wire_idx],
qubit_labels[qubit_id],
plot_params,
)
| apache-2.0 |
enigmampc/catalyst | catalyst/errors.py | 1 | 22282 | #
# Copyright 2015 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from textwrap import dedent
from catalyst.utils.memoize import lazyval
class ZiplineError(Exception):
msg = None
def __init__(self, **kwargs):
self.kwargs = kwargs
@lazyval
def message(self):
return str(self)
def __str__(self):
msg = self.msg.format(**self.kwargs)
return msg
__unicode__ = __str__
__repr__ = __str__
class NoTradeDataAvailable(ZiplineError):
pass
class NoTradeDataAvailableTooEarly(NoTradeDataAvailable):
msg = "{sid} does not exist on {dt}. It started trading on {start_dt}."
class NoTradeDataAvailableTooLate(NoTradeDataAvailable):
msg = "{sid} does not exist on {dt}. It stopped trading on {end_dt}."
class BenchmarkAssetNotAvailableTooEarly(NoTradeDataAvailableTooEarly):
pass
class BenchmarkAssetNotAvailableTooLate(NoTradeDataAvailableTooLate):
pass
class InvalidBenchmarkAsset(ZiplineError):
msg = """
{sid} cannot be used as the benchmark because it has a stock \
dividend on {dt}. Choose another asset to use as the benchmark.
""".strip()
class WrongDataForTransform(ZiplineError):
"""
Raised whenever a rolling transform is called on an event that
does not have the necessary properties.
"""
msg = "{transform} requires {fields}. Event cannot be processed."
class UnsupportedSlippageModel(ZiplineError):
"""
Raised if a user script calls the set_slippage magic
with a slipage object that isn't a VolumeShareSlippage or
FixedSlipapge
"""
msg = """
You attempted to set slippage with an unsupported class. \
Please use VolumeShareSlippage or FixedSlippage.
""".strip()
class IncompatibleSlippageModel(ZiplineError):
"""
Raised if a user tries to set a futures slippage model for equities or vice
versa.
"""
msg = """
You attempted to set an incompatible slippage model for {asset_type}. \
The slippage model '{given_model}' only supports {supported_asset_types}.
""".strip()
class SetSlippagePostInit(ZiplineError):
# Raised if a users script calls set_slippage magic
# after the initialize method has returned.
msg = """
You attempted to set slippage outside of `initialize`. \
You may only call 'set_slippage' in your initialize method.
""".strip()
class SetCancelPolicyPostInit(ZiplineError):
# Raised if a users script calls set_cancel_policy
# after the initialize method has returned.
msg = """
You attempted to set the cancel policy outside of `initialize`. \
You may only call 'set_cancel_policy' in your initialize method.
""".strip()
class RegisterTradingControlPostInit(ZiplineError):
# Raised if a user's script register's a trading control after initialize
# has been run.
msg = """
You attempted to set a trading control outside of `initialize`. \
Trading controls may only be set in your initialize method.
""".strip()
class RegisterAccountControlPostInit(ZiplineError):
# Raised if a user's script register's a trading control after initialize
# has been run.
msg = """
You attempted to set an account control outside of `initialize`. \
Account controls may only be set in your initialize method.
""".strip()
class UnsupportedCommissionModel(ZiplineError):
"""
Raised if a user script calls the set_commission magic
with a commission object that isn't a PerShare, PerTrade or
PerDollar commission
"""
msg = """
You attempted to set commission with an unsupported class. \
Please use PerShare or PerTrade.
""".strip()
class IncompatibleCommissionModel(ZiplineError):
"""
Raised if a user tries to set a futures commission model for equities or
vice versa.
"""
msg = """
You attempted to set an incompatible commission model for {asset_type}. \
The commission model '{given_model}' only supports {supported_asset_types}.
""".strip()
class UnsupportedCancelPolicy(ZiplineError):
"""
Raised if a user script calls set_cancel_policy with an object that isn't
a CancelPolicy.
"""
msg = """
You attempted to set the cancel policy with an unsupported class. Please use
an instance of CancelPolicy.
""".strip()
class SetCommissionPostInit(ZiplineError):
"""
Raised if a users script calls set_commission magic
after the initialize method has returned.
"""
msg = """
You attempted to override commission outside of `initialize`. \
You may only call 'set_commission' in your initialize method.
""".strip()
class TransactionWithNoVolume(ZiplineError):
"""
Raised if a transact call returns a transaction with zero volume.
"""
msg = """
Transaction {txn} has a volume of zero.
""".strip()
class TransactionWithWrongDirection(ZiplineError):
"""
Raised if a transact call returns a transaction with a direction that
does not match the order.
"""
msg = """
Transaction {txn} not in same direction as corresponding order {order}.
""".strip()
class TransactionWithNoAmount(ZiplineError):
"""
Raised if a transact call returns a transaction with zero amount.
"""
msg = """
Transaction {txn} has an amount of zero.
""".strip()
class TransactionVolumeExceedsOrder(ZiplineError):
"""
Raised if a transact call returns a transaction with a volume greater than
the corresponding order.
"""
msg = """
Transaction volume of {txn} exceeds the order volume of {order}.
""".strip()
class UnsupportedOrderParameters(ZiplineError):
"""
Raised if a set of mutually exclusive parameters are passed to an order
call.
"""
msg = "{msg}"
class CannotOrderDelistedAsset(ZiplineError):
"""
Raised if an order is for a delisted asset.
"""
msg = "{msg}"
class BadOrderParameters(ZiplineError):
"""
Raised if any impossible parameters (nan, negative limit/stop)
are passed to an order call.
"""
msg = "{msg}"
class OrderDuringInitialize(ZiplineError):
"""
Raised if order is called during initialize()
"""
msg = "{msg}"
class SetBenchmarkOutsideInitialize(ZiplineError):
"""
Raised if set_benchmark is called outside initialize()
"""
msg = "'set_benchmark' can only be called within initialize function."
class AccountControlViolation(ZiplineError):
"""
Raised if the account violates a constraint set by a AccountControl.
"""
msg = """
Account violates account constraint {constraint}.
""".strip()
class TradingControlViolation(ZiplineError):
"""
Raised if an order would violate a constraint set by a TradingControl.
"""
msg = """
Order for {amount} shares of {asset} at {datetime} violates trading constraint
{constraint}.
""".strip()
class IncompatibleHistoryFrequency(ZiplineError):
"""
Raised when a frequency is given to history which is not supported.
At least, not yet.
"""
msg = """
Requested history at frequency '{frequency}' cannot be created with data
at frequency '{data_frequency}'.
""".strip()
class HistoryInInitialize(ZiplineError):
"""
Raised when an algorithm calls history() in initialize.
"""
msg = "history() should only be called in handle_data()"
class OrderInBeforeTradingStart(ZiplineError):
"""
Raised when an algorithm calls an order method in before_trading_start.
"""
msg = "Cannot place orders inside before_trading_start."
class MultipleSymbolsFound(ZiplineError):
"""
Raised when a symbol() call contains a symbol that changed over
time and is thus not resolvable without additional information
provided via as_of_date.
"""
msg = """
Multiple symbols with the name '{symbol}' found. Use the
as_of_date' argument to to specify when the date symbol-lookup
should be valid.
Possible options: {options}
""".strip()
class SymbolNotFound(ZiplineError):
"""
Raised when a symbol() call contains a non-existant symbol.
"""
msg = """
Symbol '{symbol}' was not found.
""".strip()
class RootSymbolNotFound(ZiplineError):
"""
Raised when a lookup_future_chain() call contains a non-existant symbol.
"""
msg = """
Root symbol '{root_symbol}' was not found.
""".strip()
class ValueNotFoundForField(ZiplineError):
"""
Raised when a lookup_by_supplementary_mapping() call contains a
value does not exist for the specified mapping type.
"""
msg = """
Value '{value}' was not found for field '{field}'.
""".strip()
class MultipleValuesFoundForField(ZiplineError):
"""
Raised when a lookup_by_supplementary_mapping() call contains a
value that changed over time for the specified field and is
thus not resolvable without additional information provided via
as_of_date.
"""
msg = """
Multiple occurrences of the value '{value}' found for field '{field}'.
Use the as_of_date' argument to specify when the lookup should be valid.
Possible options: {options}
""".strip()
class NoValueForSid(ZiplineError):
"""
Raised when a get_supplementary_field() call contains a sid that
does not have a value for the specified mapping type.
"""
msg = """
No '{field}' value found for sid '{sid}'.
""".strip()
class MultipleValuesFoundForSid(ZiplineError):
"""
Raised when a get_supplementary_field() call contains a value that
changed over time for the specified field and is thus not resolvable
without additional information provided via as_of_date.
"""
msg = """
Multiple '{field}' values found for sid '{sid}'. Use the as_of_date' argument
to specify when the lookup should be valid.
Possible options: {options}
""".strip()
class SidsNotFound(ZiplineError):
"""
Raised when a retrieve_asset() or retrieve_all() call contains a
non-existent sid.
"""
@lazyval
def plural(self):
return len(self.sids) > 1
@lazyval
def sids(self):
return self.kwargs['sids']
@lazyval
def msg(self):
if self.plural:
return "No assets found for sids: {sids}."
return "No asset found for sid: {sids[0]}."
class EquitiesNotFound(SidsNotFound):
"""
Raised when a call to `retrieve_equities` fails to find an asset.
"""
@lazyval
def msg(self):
if self.plural:
return "No equities found for sids: {sids}."
return "No equity found for sid: {sids[0]}."
class FutureContractsNotFound(SidsNotFound):
"""
Raised when a call to `retrieve_futures_contracts` fails to find an asset.
"""
@lazyval
def msg(self):
if self.plural:
return "No future contracts found for sids: {sids}."
return "No future contract found for sid: {sids[0]}."
class ConsumeAssetMetaDataError(ZiplineError):
"""
Raised when AssetFinder.consume() is called on an invalid object.
"""
msg = """
AssetFinder can not consume metadata of type {obj}. Metadata must be a dict, a
DataFrame, or a tables.Table. If the provided metadata is a Table, the rows
must contain both or one of 'sid' or 'symbol'.
""".strip()
class MapAssetIdentifierIndexError(ZiplineError):
"""
Raised when AssetMetaData.map_identifier_index_to_sids() is called on an
index of invalid objects.
"""
msg = """
AssetFinder can not map an index with values of type {obj}. Asset indices of
DataFrames or Panels must be integer sids, string symbols, or Asset objects.
""".strip()
class SidAssignmentError(ZiplineError):
"""
Raised when an AssetFinder tries to build an Asset that does not have a sid
and that AssetFinder is not permitted to assign sids.
"""
msg = """
AssetFinder metadata is missing a SID for identifier '{identifier}'.
""".strip()
class NoSourceError(ZiplineError):
"""
Raised when no source is given to the pipeline
"""
msg = """
No data source given.
""".strip()
class PipelineDateError(ZiplineError):
"""
Raised when only one date is passed to the pipeline
"""
msg = """
Only one simulation date given. Please specify both the 'start' and 'end' for
the simulation, or neither. If neither is given, the start and end of the
DataSource will be used. Given start = '{start}', end = '{end}'
""".strip()
class WindowLengthTooLong(ZiplineError):
"""
Raised when a trailing window is instantiated with a lookback greater than
the length of the underlying array.
"""
msg = (
"Can't construct a rolling window of length "
"{window_length} on an array of length {nrows}."
).strip()
class WindowLengthNotPositive(ZiplineError):
"""
Raised when a trailing window would be instantiated with a length less than
1.
"""
msg = (
"Expected a window_length greater than 0, got {window_length}."
).strip()
class NonWindowSafeInput(ZiplineError):
"""
Raised when a Pipeline API term that is not deemed window safe is specified
as an input to another windowed term.
This is an error because it's generally not safe to compose windowed
functions on split/dividend adjusted data.
"""
msg = (
"Can't compute windowed expression {parent} with "
"windowed input {child}."
)
class TermInputsNotSpecified(ZiplineError):
"""
Raised if a user attempts to construct a term without specifying inputs and
that term does not have class-level default inputs.
"""
msg = "{termname} requires inputs, but no inputs list was passed."
class TermOutputsEmpty(ZiplineError):
"""
Raised if a user attempts to construct a term with an empty outputs list.
"""
msg = (
"{termname} requires at least one output when passed an outputs "
"argument."
)
class InvalidOutputName(ZiplineError):
"""
Raised if a term's output names conflict with any of its attributes.
"""
msg = (
"{output_name!r} cannot be used as an output name for {termname}. "
"Output names cannot start with an underscore or be contained in the "
"following list: {disallowed_names}."
)
class WindowLengthNotSpecified(ZiplineError):
"""
Raised if a user attempts to construct a term without specifying window
length and that term does not have a class-level default window length.
"""
msg = (
"{termname} requires a window_length, but no window_length was passed."
)
class InvalidTermParams(ZiplineError):
"""
Raised if a user attempts to construct a Term using ParameterizedTermMixin
without specifying a `params` list in the class body.
"""
msg = (
"Expected a list of strings as a class-level attribute for "
"{termname}.params, but got {value} instead."
)
class DTypeNotSpecified(ZiplineError):
"""
Raised if a user attempts to construct a term without specifying dtype and
that term does not have class-level default dtype.
"""
msg = (
"{termname} requires a dtype, but no dtype was passed."
)
class NotDType(ZiplineError):
"""
Raised when a pipeline Term is constructed with a dtype that isn't a numpy
dtype object.
"""
msg = (
"{termname} expected a numpy dtype "
"object for a dtype, but got {dtype} instead."
)
class UnsupportedDType(ZiplineError):
"""
Raised when a pipeline Term is constructed with a dtype that's not
supported.
"""
msg = (
"Failed to construct {termname}.\n"
"Pipeline terms of dtype {dtype} are not yet supported."
)
class BadPercentileBounds(ZiplineError):
"""
Raised by API functions accepting percentile bounds when the passed bounds
are invalid.
"""
msg = (
"Percentile bounds must fall between 0.0 and {upper_bound}, and min "
"must be less than max."
"\nInputs were min={min_percentile}, max={max_percentile}."
)
class UnknownRankMethod(ZiplineError):
"""
Raised during construction of a Rank factor when supplied a bad Rank
method.
"""
msg = (
"Unknown ranking method: '{method}'. "
"`method` must be one of {choices}"
)
class AttachPipelineAfterInitialize(ZiplineError):
"""
Raised when a user tries to call add_pipeline outside of initialize.
"""
msg = (
"Attempted to attach a pipeline after initialize()."
"attach_pipeline() can only be called during initialize."
)
class PipelineOutputDuringInitialize(ZiplineError):
"""
Raised when a user tries to call `pipeline_output` during initialize.
"""
msg = (
"Attempted to call pipeline_output() during initialize. "
"pipeline_output() can only be called once initialize has completed."
)
class NoSuchPipeline(ZiplineError, KeyError):
"""
Raised when a user tries to access a non-existent pipeline by name.
"""
msg = (
"No pipeline named '{name}' exists. Valid pipeline names are {valid}. "
"Did you forget to call attach_pipeline()?"
)
class UnsupportedDataType(ZiplineError):
"""
Raised by CustomFactors with unsupported dtypes.
"""
msg = "{typename} instances with dtype {dtype} are not supported."
class NoFurtherDataError(ZiplineError):
"""
Raised by calendar operations that would ask for dates beyond the extent of
our known data.
"""
# This accepts an arbitrary message string because it's used in more places
# that can be usefully templated.
msg = '{msg}'
@classmethod
def from_lookback_window(cls,
initial_message,
first_date,
lookback_start,
lookback_length):
return cls(
msg=dedent(
"""
{initial_message}
lookback window started at {lookback_start}
earliest known date was {first_date}
{lookback_length} extra rows of data were required
"""
).format(
initial_message=initial_message,
first_date=first_date,
lookback_start=lookback_start,
lookback_length=lookback_length,
)
)
class UnsupportedDatetimeFormat(ZiplineError):
"""
Raised when an unsupported datetime is passed to an API method.
"""
msg = ("The input '{input}' passed to '{method}' is not "
"coercible to a pandas.Timestamp object.")
class AssetDBVersionError(ZiplineError):
"""
Raised by an AssetDBWriter or AssetFinder if the version number in the
versions table does not match the ASSET_DB_VERSION in asset_writer.py.
"""
msg = (
"The existing Asset database has an incorrect version: {db_version}. "
"Expected version: {expected_version}. Try rebuilding your asset "
"database or updating your version of Zipline."
)
class AssetDBImpossibleDowngrade(ZiplineError):
msg = (
"The existing Asset database is version: {db_version} which is lower "
"than the desired downgrade version: {desired_version}."
)
class HistoryWindowStartsBeforeData(ZiplineError):
msg = (
"History window extends before {first_trading_day}. To use this "
"history window, start the backtest on or after {suggested_start_day}."
)
class NonExistentAssetInTimeFrame(ZiplineError):
msg = (
"The target asset '{asset}' does not exist for the entire timeframe "
"between {start_date} and {end_date}."
)
class InvalidCalendarName(ZiplineError):
"""
Raised when a calendar with an invalid name is requested.
"""
msg = (
"The requested TradingCalendar, {calendar_name}, does not exist."
)
class CalendarNameCollision(ZiplineError):
"""
Raised when the static calendar registry already has a calendar with a
given name.
"""
msg = (
"A calendar with the name {calendar_name} is already registered."
)
class CyclicCalendarAlias(ZiplineError):
"""
Raised when calendar aliases form a cycle.
"""
msg = "Cycle in calendar aliases: [{cycle}]"
class ScheduleFunctionWithoutCalendar(ZiplineError):
"""
Raised when schedule_function is called but there is not a calendar to be
used in the construction of an event rule.
"""
# TODO update message when new TradingSchedules are built
msg = (
"To use schedule_function, the TradingAlgorithm must be running on an "
"ExchangeTradingSchedule, rather than {schedule}."
)
class ScheduleFunctionInvalidCalendar(ZiplineError):
"""
Raised when schedule_function is called with an invalid calendar argument.
"""
msg = (
"Invalid calendar '{given_calendar}' passed to schedule_function. "
"Allowed options are {allowed_calendars}."
)
class UnsupportedPipelineOutput(ZiplineError):
"""
Raised when a 1D term is added as a column to a pipeline.
"""
msg = (
"Cannot add column {column_name!r} with term {term}. Adding slices or "
"single-column-output terms as pipeline columns is not currently "
"supported."
)
class NonSliceableTerm(ZiplineError):
"""
Raised when attempting to index into a non-sliceable term, e.g. instances
of `catalyst.pipeline.term.LoadableTerm`.
"""
msg = "Taking slices of {term} is not currently supported."
class IncompatibleTerms(ZiplineError):
"""
Raised when trying to compute correlations/regressions between two 2D
factors with different masks.
"""
msg = (
"{term_1} and {term_2} must have the same mask in order to compute "
"correlations and regressions asset-wise."
)
| apache-2.0 |
fengzhyuan/scikit-learn | sklearn/ensemble/tests/test_bagging.py | 127 | 25365 | """
Testing for the bagging ensemble module (sklearn.ensemble.bagging).
"""
# Author: Gilles Louppe
# License: BSD 3 clause
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_warns_message
from sklearn.dummy import DummyClassifier, DummyRegressor
from sklearn.grid_search import GridSearchCV, ParameterGrid
from sklearn.ensemble import BaggingClassifier, BaggingRegressor
from sklearn.linear_model import Perceptron, LogisticRegression
from sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.svm import SVC, SVR
from sklearn.pipeline import make_pipeline
from sklearn.feature_selection import SelectKBest
from sklearn.cross_validation import train_test_split
from sklearn.datasets import load_boston, load_iris, make_hastie_10_2
from sklearn.utils import check_random_state
from scipy.sparse import csc_matrix, csr_matrix
rng = check_random_state(0)
# also load the iris dataset
# and randomly permute it
iris = load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
def test_classification():
# Check classification for various parameter settings.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
grid = ParameterGrid({"max_samples": [0.5, 1.0],
"max_features": [1, 2, 4],
"bootstrap": [True, False],
"bootstrap_features": [True, False]})
for base_estimator in [None,
DummyClassifier(),
Perceptron(),
DecisionTreeClassifier(),
KNeighborsClassifier(),
SVC()]:
for params in grid:
BaggingClassifier(base_estimator=base_estimator,
random_state=rng,
**params).fit(X_train, y_train).predict(X_test)
def test_sparse_classification():
# Check classification for various parameter settings on sparse input.
class CustomSVC(SVC):
"""SVC variant that records the nature of the training set"""
def fit(self, X, y):
super(CustomSVC, self).fit(X, y)
self.data_type_ = type(X)
return self
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
parameter_sets = [
{"max_samples": 0.5,
"max_features": 2,
"bootstrap": True,
"bootstrap_features": True},
{"max_samples": 1.0,
"max_features": 4,
"bootstrap": True,
"bootstrap_features": True},
{"max_features": 2,
"bootstrap": False,
"bootstrap_features": True},
{"max_samples": 0.5,
"bootstrap": True,
"bootstrap_features": False},
]
for sparse_format in [csc_matrix, csr_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
for params in parameter_sets:
# Trained on sparse format
sparse_classifier = BaggingClassifier(
base_estimator=CustomSVC(),
random_state=1,
**params
).fit(X_train_sparse, y_train)
sparse_results = sparse_classifier.predict(X_test_sparse)
# Trained on dense format
dense_results = BaggingClassifier(
base_estimator=CustomSVC(),
random_state=1,
**params
).fit(X_train, y_train).predict(X_test)
sparse_type = type(X_train_sparse)
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert_array_equal(sparse_results, dense_results)
assert all([t == sparse_type for t in types])
def test_regression():
# Check regression for various parameter settings.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data[:50],
boston.target[:50],
random_state=rng)
grid = ParameterGrid({"max_samples": [0.5, 1.0],
"max_features": [0.5, 1.0],
"bootstrap": [True, False],
"bootstrap_features": [True, False]})
for base_estimator in [None,
DummyRegressor(),
DecisionTreeRegressor(),
KNeighborsRegressor(),
SVR()]:
for params in grid:
BaggingRegressor(base_estimator=base_estimator,
random_state=rng,
**params).fit(X_train, y_train).predict(X_test)
def test_sparse_regression():
# Check regression for various parameter settings on sparse input.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data[:50],
boston.target[:50],
random_state=rng)
class CustomSVR(SVR):
"""SVC variant that records the nature of the training set"""
def fit(self, X, y):
super(CustomSVR, self).fit(X, y)
self.data_type_ = type(X)
return self
parameter_sets = [
{"max_samples": 0.5,
"max_features": 2,
"bootstrap": True,
"bootstrap_features": True},
{"max_samples": 1.0,
"max_features": 4,
"bootstrap": True,
"bootstrap_features": True},
{"max_features": 2,
"bootstrap": False,
"bootstrap_features": True},
{"max_samples": 0.5,
"bootstrap": True,
"bootstrap_features": False},
]
for sparse_format in [csc_matrix, csr_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
for params in parameter_sets:
# Trained on sparse format
sparse_classifier = BaggingRegressor(
base_estimator=CustomSVR(),
random_state=1,
**params
).fit(X_train_sparse, y_train)
sparse_results = sparse_classifier.predict(X_test_sparse)
# Trained on dense format
dense_results = BaggingRegressor(
base_estimator=CustomSVR(),
random_state=1,
**params
).fit(X_train, y_train).predict(X_test)
sparse_type = type(X_train_sparse)
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert_array_equal(sparse_results, dense_results)
assert all([t == sparse_type for t in types])
assert_array_equal(sparse_results, dense_results)
def test_bootstrap_samples():
# Test that bootstraping samples generate non-perfect base estimators.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
base_estimator = DecisionTreeRegressor().fit(X_train, y_train)
# without bootstrap, all trees are perfect on the training set
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_samples=1.0,
bootstrap=False,
random_state=rng).fit(X_train, y_train)
assert_equal(base_estimator.score(X_train, y_train),
ensemble.score(X_train, y_train))
# with bootstrap, trees are no longer perfect on the training set
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_samples=1.0,
bootstrap=True,
random_state=rng).fit(X_train, y_train)
assert_greater(base_estimator.score(X_train, y_train),
ensemble.score(X_train, y_train))
def test_bootstrap_features():
# Test that bootstraping features may generate dupplicate features.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_features=1.0,
bootstrap_features=False,
random_state=rng).fit(X_train, y_train)
for features in ensemble.estimators_features_:
assert_equal(boston.data.shape[1], np.unique(features).shape[0])
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_features=1.0,
bootstrap_features=True,
random_state=rng).fit(X_train, y_train)
for features in ensemble.estimators_features_:
assert_greater(boston.data.shape[1], np.unique(features).shape[0])
def test_probability():
# Predict probabilities.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
with np.errstate(divide="ignore", invalid="ignore"):
# Normal case
ensemble = BaggingClassifier(base_estimator=DecisionTreeClassifier(),
random_state=rng).fit(X_train, y_train)
assert_array_almost_equal(np.sum(ensemble.predict_proba(X_test),
axis=1),
np.ones(len(X_test)))
assert_array_almost_equal(ensemble.predict_proba(X_test),
np.exp(ensemble.predict_log_proba(X_test)))
# Degenerate case, where some classes are missing
ensemble = BaggingClassifier(base_estimator=LogisticRegression(),
random_state=rng,
max_samples=5).fit(X_train, y_train)
assert_array_almost_equal(np.sum(ensemble.predict_proba(X_test),
axis=1),
np.ones(len(X_test)))
assert_array_almost_equal(ensemble.predict_proba(X_test),
np.exp(ensemble.predict_log_proba(X_test)))
def test_oob_score_classification():
# Check that oob prediction is a good estimation of the generalization
# error.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
for base_estimator in [DecisionTreeClassifier(), SVC()]:
clf = BaggingClassifier(base_estimator=base_estimator,
n_estimators=100,
bootstrap=True,
oob_score=True,
random_state=rng).fit(X_train, y_train)
test_score = clf.score(X_test, y_test)
assert_less(abs(test_score - clf.oob_score_), 0.1)
# Test with few estimators
assert_warns(UserWarning,
BaggingClassifier(base_estimator=base_estimator,
n_estimators=1,
bootstrap=True,
oob_score=True,
random_state=rng).fit,
X_train,
y_train)
def test_oob_score_regression():
# Check that oob prediction is a good estimation of the generalization
# error.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
clf = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
n_estimators=50,
bootstrap=True,
oob_score=True,
random_state=rng).fit(X_train, y_train)
test_score = clf.score(X_test, y_test)
assert_less(abs(test_score - clf.oob_score_), 0.1)
# Test with few estimators
assert_warns(UserWarning,
BaggingRegressor(base_estimator=DecisionTreeRegressor(),
n_estimators=1,
bootstrap=True,
oob_score=True,
random_state=rng).fit,
X_train,
y_train)
def test_single_estimator():
# Check singleton ensembles.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
clf1 = BaggingRegressor(base_estimator=KNeighborsRegressor(),
n_estimators=1,
bootstrap=False,
bootstrap_features=False,
random_state=rng).fit(X_train, y_train)
clf2 = KNeighborsRegressor().fit(X_train, y_train)
assert_array_equal(clf1.predict(X_test), clf2.predict(X_test))
def test_error():
# Test that it gives proper exception on deficient input.
X, y = iris.data, iris.target
base = DecisionTreeClassifier()
# Test max_samples
assert_raises(ValueError,
BaggingClassifier(base, max_samples=-1).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples=0.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples=2.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples=1000).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples="foobar").fit, X, y)
# Test max_features
assert_raises(ValueError,
BaggingClassifier(base, max_features=-1).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features=0.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features=2.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features=5).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features="foobar").fit, X, y)
# Test support of decision_function
assert_false(hasattr(BaggingClassifier(base).fit(X, y), 'decision_function'))
def test_parallel_classification():
# Check parallel classification.
rng = check_random_state(0)
# Classification
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
ensemble = BaggingClassifier(DecisionTreeClassifier(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
# predict_proba
ensemble.set_params(n_jobs=1)
y1 = ensemble.predict_proba(X_test)
ensemble.set_params(n_jobs=2)
y2 = ensemble.predict_proba(X_test)
assert_array_almost_equal(y1, y2)
ensemble = BaggingClassifier(DecisionTreeClassifier(),
n_jobs=1,
random_state=0).fit(X_train, y_train)
y3 = ensemble.predict_proba(X_test)
assert_array_almost_equal(y1, y3)
# decision_function
ensemble = BaggingClassifier(SVC(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
ensemble.set_params(n_jobs=1)
decisions1 = ensemble.decision_function(X_test)
ensemble.set_params(n_jobs=2)
decisions2 = ensemble.decision_function(X_test)
assert_array_almost_equal(decisions1, decisions2)
ensemble = BaggingClassifier(SVC(),
n_jobs=1,
random_state=0).fit(X_train, y_train)
decisions3 = ensemble.decision_function(X_test)
assert_array_almost_equal(decisions1, decisions3)
def test_parallel_regression():
# Check parallel regression.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
ensemble = BaggingRegressor(DecisionTreeRegressor(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
ensemble.set_params(n_jobs=1)
y1 = ensemble.predict(X_test)
ensemble.set_params(n_jobs=2)
y2 = ensemble.predict(X_test)
assert_array_almost_equal(y1, y2)
ensemble = BaggingRegressor(DecisionTreeRegressor(),
n_jobs=1,
random_state=0).fit(X_train, y_train)
y3 = ensemble.predict(X_test)
assert_array_almost_equal(y1, y3)
def test_gridsearch():
# Check that bagging ensembles can be grid-searched.
# Transform iris into a binary classification task
X, y = iris.data, iris.target
y[y == 2] = 1
# Grid search with scoring based on decision_function
parameters = {'n_estimators': (1, 2),
'base_estimator__C': (1, 2)}
GridSearchCV(BaggingClassifier(SVC()),
parameters,
scoring="roc_auc").fit(X, y)
def test_base_estimator():
# Check base_estimator and its default values.
rng = check_random_state(0)
# Classification
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
ensemble = BaggingClassifier(None,
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, DecisionTreeClassifier))
ensemble = BaggingClassifier(DecisionTreeClassifier(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, DecisionTreeClassifier))
ensemble = BaggingClassifier(Perceptron(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, Perceptron))
# Regression
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
ensemble = BaggingRegressor(None,
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, DecisionTreeRegressor))
ensemble = BaggingRegressor(DecisionTreeRegressor(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, DecisionTreeRegressor))
ensemble = BaggingRegressor(SVR(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, SVR))
def test_bagging_with_pipeline():
estimator = BaggingClassifier(make_pipeline(SelectKBest(k=1),
DecisionTreeClassifier()),
max_features=2)
estimator.fit(iris.data, iris.target)
class DummyZeroEstimator(BaseEstimator):
def fit(self, X, y):
self.classes_ = np.unique(y)
return self
def predict(self, X):
return self.classes_[np.zeros(X.shape[0], dtype=int)]
def test_bagging_sample_weight_unsupported_but_passed():
estimator = BaggingClassifier(DummyZeroEstimator())
rng = check_random_state(0)
estimator.fit(iris.data, iris.target).predict(iris.data)
assert_raises(ValueError, estimator.fit, iris.data, iris.target,
sample_weight=rng.randint(10, size=(iris.data.shape[0])))
def test_warm_start(random_state=42):
# Test if fitting incrementally with warm start gives a forest of the
# right size and the same results as a normal fit.
X, y = make_hastie_10_2(n_samples=20, random_state=1)
clf_ws = None
for n_estimators in [5, 10]:
if clf_ws is None:
clf_ws = BaggingClassifier(n_estimators=n_estimators,
random_state=random_state,
warm_start=True)
else:
clf_ws.set_params(n_estimators=n_estimators)
clf_ws.fit(X, y)
assert_equal(len(clf_ws), n_estimators)
clf_no_ws = BaggingClassifier(n_estimators=10, random_state=random_state,
warm_start=False)
clf_no_ws.fit(X, y)
assert_equal(set([tree.random_state for tree in clf_ws]),
set([tree.random_state for tree in clf_no_ws]))
def test_warm_start_smaller_n_estimators():
# Test if warm start'ed second fit with smaller n_estimators raises error.
X, y = make_hastie_10_2(n_samples=20, random_state=1)
clf = BaggingClassifier(n_estimators=5, warm_start=True)
clf.fit(X, y)
clf.set_params(n_estimators=4)
assert_raises(ValueError, clf.fit, X, y)
def test_warm_start_equal_n_estimators():
# Test that nothing happens when fitting without increasing n_estimators
X, y = make_hastie_10_2(n_samples=20, random_state=1)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=43)
clf = BaggingClassifier(n_estimators=5, warm_start=True, random_state=83)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
# modify X to nonsense values, this should not change anything
X_train += 1.
assert_warns_message(UserWarning,
"Warm-start fitting without increasing n_estimators does not",
clf.fit, X_train, y_train)
assert_array_equal(y_pred, clf.predict(X_test))
def test_warm_start_equivalence():
# warm started classifier with 5+5 estimators should be equivalent to
# one classifier with 10 estimators
X, y = make_hastie_10_2(n_samples=20, random_state=1)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=43)
clf_ws = BaggingClassifier(n_estimators=5, warm_start=True,
random_state=3141)
clf_ws.fit(X_train, y_train)
clf_ws.set_params(n_estimators=10)
clf_ws.fit(X_train, y_train)
y1 = clf_ws.predict(X_test)
clf = BaggingClassifier(n_estimators=10, warm_start=False,
random_state=3141)
clf.fit(X_train, y_train)
y2 = clf.predict(X_test)
assert_array_almost_equal(y1, y2)
def test_warm_start_with_oob_score_fails():
# Check using oob_score and warm_start simultaneously fails
X, y = make_hastie_10_2(n_samples=20, random_state=1)
clf = BaggingClassifier(n_estimators=5, warm_start=True, oob_score=True)
assert_raises(ValueError, clf.fit, X, y)
def test_oob_score_removed_on_warm_start():
X, y = make_hastie_10_2(n_samples=2000, random_state=1)
clf = BaggingClassifier(n_estimators=50, oob_score=True)
clf.fit(X, y)
clf.set_params(warm_start=True, oob_score=False, n_estimators=100)
clf.fit(X, y)
assert_raises(AttributeError, getattr, clf, "oob_score_")
| bsd-3-clause |
alvason/stochastic-infectious-pulse | code/stochastic_SIR-Copy1.py | 2 | 12878 |
# coding: utf-8
# # Stochastic infectious pulse
# https://github.com/alvason/stochastic-infectious-pulse
#
# ### Stochastic version for evolutionary insights
# In[1]:
'''
author: Alvason Zhenhua Li
date: 07/07/2015
'''
get_ipython().magic(u'matplotlib inline')
import numpy as np
import matplotlib.pyplot as plt
import time
import os
dir_path = '/Users/al/Desktop/GitHub/stochastic-infectious-pulse/figure'
file_name = 'stochastic-sir'
import alva_machinery_probability as alva
AlvaFontSize = 23
AlvaFigSize = (16, 8)
numberingFig = 0
# stochastic evolution
figure_name = '-stochastic-event'
file_suffix = '.png'
save_figure = os.path.join(dir_path, file_name + figure_name + file_suffix)
text_list = [r'$ Stochastic-SIR-evolution(all \ possible \ evolving \ events): $'
, r'$ 1. event(new \ SIR \ in) = \mu(S+I+R) $'
, r'$ 2. event(old \ S \ out) = \mu S $'
, r'$ 3. event(old \ I \ out) = \mu I $'
, r'$ 4. event(old \ R \ out) = \mu R $'
, r'$ 5. event(SI \ infected) = \beta S(t)I(t) $'
, r'$ 6. event(IR \ recovred) = \gamma I(t) $']
total_list = np.size(text_list)
numberingFig = numberingFig + 1
plt.figure(numberingFig, figsize=(total_list*2, total_list))
plt.axis('off')
for i in range(total_list):
plt.text(0, (total_list - float(i))/total_list
, text_list[i].replace('\\\n', '')
, fontsize = 1.2*AlvaFontSize)
plt.savefig(save_figure, dpi = 100)
plt.show()
# relating to the deterministic SIR equation
text_list = [r'$ Corresponding \ to \ deterministic-SIR-equation $'
, r'$ \frac{\partial S(t)}{\partial t} = \
-\beta S(t)I(t) +\mu N -\mu S(t) $'
, r'$ \frac{\partial I(t)}{\partial t} = \
+\beta S(t)I(t) - \gamma I(t) -\mu I(t) $'
, r'$ \frac{\partial R(t)}{\partial t} = \
+\gamma I(t) - \mu R(t) $']
total_list = np.size(text_list)
numberingFig = numberingFig + 1
plt.figure(numberingFig, figsize=(total_list*2, total_list))
plt.axis('off')
for i in range(total_list):
plt.text(0, (total_list - float(i))/total_list
, text_list[i].replace('\\\n', '')
, fontsize = 1.2*AlvaFontSize)
plt.show()
# In[2]:
# algorithm for stochastic evolution
figure_name = '-Gillespie-algorithm'
file_suffix = '.png'
save_figure = os.path.join(dir_path, file_name + figure_name + file_suffix)
text_list = [r'$ Gillespie-algorithm: $'
, r'$ 1. \ initialize \ the \ number \ of \ each \ group: \ S(t=0), I(t=0), R(t=0) $'
, r'$ 2. \ compute \ the \ probability \ of \ each \ possible \ event_i \ at \ the \ moment \ \bf{t} $'
, r'$ 3. \ randomly \ select \ event_{next} \
\ according \ to \ random{[0,1)} < \frac{\sum_{k=1}^{next}event_{k}}{\sum_{i=1}^{all} event_i} $'
, r'$ 4. \ update \ the \ number \ of \ corresponding \ group $'
, r'$ 5. \ compute \ \Delta t = \frac{-log_{e}(event_{next})}{\sum_{i}^{} event_i} $'
, r'$ \ (according \ to \ probability-density-function: \ Pr(t < event_{next} < t+\Delta t) = \
exp(-\Delta t \sum_{i}^{} event_i )) $'
, r'$ 7. \ update \ t = t + \Delta t $'
, r'$ 6. \ go \ to \ step-2 $'
]
total_list = np.size(text_list)
numberingFig = numberingFig + 1
plt.figure(numberingFig, figsize=(total_list, total_list*1.5))
plt.axis('off')
for i in range(total_list):
plt.text(0, (total_list - float(i))/total_list
, text_list[i].replace('\\\n', '')
, fontsize = 1.2*AlvaFontSize)
plt.savefig(save_figure, dpi = 100)
plt.show()
# In[3]:
''' define stochasticSIR function '''
def stochasticSIR(total_step, minT, maxT, initial_S, initial_I, initial_R
, reprodNum, recovRate, inOutRate, infecRate):
# intialized
gT = np.zeros([total_step])
gS = np.zeros([total_step])
gI = np.zeros([total_step])
gR = np.zeros([total_step])
j = int(0)
gT[j] = minT
gS[j] = initial_S
gI[j] = initial_I
gR[j] = initial_R
# all possible events
event_SIRin = inOutRate*(gS[j] + gI[j] + gR[j])
event_Sout = inOutRate*gS[j]
event_Iout = inOutRate*gI[j]
event_Rout = inOutRate*gR[j]
event_SI = infecRate*gS[j]*gI[j]/(gS[j] + gI[j] + gR[j])
event_IR = recovRate*gI[j]
# configuration table
eventRate_updateSIR = np.array([[event_SIRin, +1, 0, 0]
, [event_Sout, -1, 0, 0]
, [event_Iout, 0, -1, 0]
, [event_Rout, 0, 0, -1]
, [event_SI, -1, +1, 0]
, [event_IR, 0, -1, +1]])
###
while (gT[j] < maxT):
# randomly choose event
if np.random.random() < (eventRate_updateSIR[0:0 + 1, 0].sum()/eventRate_updateSIR[:, 0].sum()):
k = 0
elif np.random.random() < (eventRate_updateSIR[0:1 + 1, 0].sum()/eventRate_updateSIR[:, 0].sum()):
k = 1
elif np.random.random() < (eventRate_updateSIR[0:2 + 1, 0].sum()/eventRate_updateSIR[:, 0].sum()):
k = 2
elif np.random.random() < (eventRate_updateSIR[0:3 + 1, 0].sum()/eventRate_updateSIR[:, 0].sum()):
k = 3
elif np.random.random() < (eventRate_updateSIR[0:4 + 1, 0].sum()/eventRate_updateSIR[:, 0].sum()):
k = 4
else:
k = 5
# update number of section
gS[j] = gS[j] + eventRate_updateSIR[k, 1]
gI[j] = gI[j] + eventRate_updateSIR[k, 2]
gR[j] = gR[j] + eventRate_updateSIR[k, 3]
# update event_rate
event_SIRin = inOutRate*(gS[j] + gI[j] + gR[j])
event_Sout = inOutRate*gS[j]
event_Iout = inOutRate*gI[j]
event_Rout = inOutRate*gR[j]
event_SI = infecRate*gS[j]*gI[j]/(gS[j] + gI[j] + gR[j])
event_IR = recovRate*gI[j]
eventRate_updateSIR = np.array([[event_SIRin, 1, 0, 0]
, [event_Sout, -1, 0, 0]
, [event_Iout, 0, -1, 0]
, [event_Rout, 0, 0, -1]
, [event_SI, -1, +1, 0]
, [event_IR, 0, -1, +1]])
# next step is based on current step
dt = -np.log(np.random.random()) / eventRate_updateSIR[:, 0].sum()
gT[j + 1] = gT[j] + dt
gS[j + 1] = gS[j]
gI[j + 1] = gI[j]
gR[j + 1] = gR[j]
j = j + 1
# set the value of remaining steps = value of the last step (for ending)
gT[j:] = gT[j]
gS[j:] = gS[j]
gI[j:] = gI[j]
gR[j:] = gR[j]
###
return(gT, gS, gI, gR)
# In[9]:
''' starting from one infected '''
# setting parameter
timeUnit = 'day'
if timeUnit == 'day':
day = 1
year = 365
elif timeUnit == 'year':
year = 1
day = float(1)/365
total_SIR = 300
initial_I = 1
initial_S = total_SIR - initial_I
initial_R = total_SIR - initial_S - initial_I
# set parameter
reprodNum = float(1.5) # basic reproductive number R0: one infected person will transmit to 1.8 person
recovRate = float(1)/(4*day) # 4 days per period ==> rate/year = 365/4
inOutRate = float(1)/(30*year) # birth rate per year
infecRate = reprodNum*(recovRate + inOutRate)/1 # per year, per person, per total-population
# initial boundary condition
minT = float(0*day)
maxT = float(90*day)
total_step = int(maxT*total_SIR)
# stochastic evolution way
total_way = int(5)
gTT = np.zeros([total_way, total_step])
gSS = np.zeros([total_way, total_step])
gII = np.zeros([total_way, total_step])
gRR = np.zeros([total_way, total_step])
for i in range(total_way):
aaa = stochasticSIR(total_step, minT, maxT, initial_S, initial_I, initial_R
, reprodNum, recovRate, inOutRate, infecRate)
gTT[i] = aaa[0]
gSS[i] = aaa[1]
gII[i] = aaa[2]
gRR[i] = aaa[3]
# plotting
figure_name = '-sir'
file_suffix = '.png'
save_figure = os.path.join(dir_path, file_name + figure_name + file_suffix)
numberingFig = numberingFig + 1
figure = plt.figure(numberingFig, figsize = AlvaFigSize)
for i in range(total_way):
plt.plot(gTT[i], gSS[i], drawstyle = 'steps', label = r'$ S_{:}(t) $'.format(i), linewidth = (1 + i)
, color = 'blue', alpha = float(0.5 + i/total_way))
plt.plot(gTT[i], gII[i], drawstyle = 'steps', label = r'$ I_{:}(t) $'.format(i), linewidth = (1 + i)
, color = 'red', alpha = float(0.5 + i/total_way))
plt.plot(gTT[i], gRR[i], drawstyle = 'steps', label = r'$ R_{:}(t) $'.format(i), linewidth = (1 + i)
, color = 'green', alpha = float(0.5 + i/total_way))
plt.plot(gTT[i], (gSS[i] + gII[i] + gRR[i]), drawstyle = 'steps', label = r'$ N_{:}(t) $'.format(i)
, linewidth = (1 + i), color = 'black', alpha = float(0.5 + i/total_way))
plt.grid(True)
plt.title(r'$ Stochastic \ SIR \ (Susceptible-Infected-Recovered) $', fontsize = AlvaFontSize)
plt.xlabel(r'$ time \ ({:})$'.format(timeUnit), fontsize = AlvaFontSize)
plt.ylabel(r'$ Population $', fontsize = AlvaFontSize)
plt.legend(loc = (1,0))
plt.text(maxT, total_SIR*6.0/6, r'$ R_0 = %f $'%(reprodNum), fontsize = AlvaFontSize)
plt.text(maxT, total_SIR*5.0/6, r'$ \gamma = %f $'%(recovRate), fontsize = AlvaFontSize)
plt.text(maxT, total_SIR*4.0/6, r'$ \beta = %f $'%(infecRate), fontsize = AlvaFontSize)
plt.text(maxT, total_SIR*3.0/6, r'$ \mu = %f $'%(inOutRate), fontsize = AlvaFontSize)
plt.xticks(fontsize = AlvaFontSize*0.7)
plt.yticks(fontsize = AlvaFontSize*0.7)
figure.tight_layout()
plt.savefig(save_figure, dpi = 100, bbox_inches='tight')
plt.show()
# In[5]:
if np.random.random() < 1.0/4:
print 'k = 1'
elif np.random.random() < 2.0/4:
print 'k = 2'
elif np.random.random() < 3.0/4:
print 'k = 3'
else:
print 'k = 4'
print 'end'
# In[6]:
'''uniform randomness --- uniform distribution'''
total_event = int(100)
gInput = np.arange(total_event)
meanP = 0.5
randomSeed = np.random.uniform(0, 1, total_event)
sumP = 0
for i in range(total_event):
sumP = sumP + (meanP - randomSeed[i])**2
deviationP = (sumP/total_event)**(1.0/2)
totalLevel = int(total_event/10)
category = alva.AlvaLevel(randomSeed, totalLevel, False)
gLevel = category[0]
gLevel_int = gLevel.astype(int)
numberLevel = category[1]
#print ('level =', gLevel)
#print ('level_int =', gLevel_int)
# plotting
figure_name = ''
file_suffix = '.png'
save_figure = os.path.join(dir_path, file_name + figure_name + file_suffix)
numberingFig = numberingFig + 1
figure = plt.figure(numberingFig, figsize = AlvaFigSize)
plot1 = figure.add_subplot(1, 2, 1)
plot1.plot(gInput, randomSeed, color = 'gray', marker = 'o', label = 'data')
plot1.plot(gInput, alva.AlvaMinMax(randomSeed), color = 'red', marker = 'o', label = 'minMaxListing')
if total_event < 100:
plot1.set_xticks(gInput, minor = True)
plot1.set_yticks(randomSeed, minor = True)
plot1.grid(True, which = 'minor')
else:
plot1.grid(True, which = 'major')
plt.title(r'$ Exponential \ (mean = {:1.3f},\ deviation = {:1.3f}) $'.format(meanP, deviationP), fontsize = AlvaFontSize)
plt.xlabel(r'$ event-input $', fontsize = AlvaFontSize)
plt.ylabel(r'$ output $', fontsize = AlvaFontSize)
plt.legend(loc = (0, -0.2))
plt.xticks(fontsize = AlvaFontSize*0.6)
plt.yticks(fontsize = AlvaFontSize*0.6)
plot2 = figure.add_subplot(1, 2, 2)
plot2.plot(numberLevel, gLevel, color = 'red', marker = 'o', label = 'category')
if totalLevel < 100:
plot2.set_xticks(numberLevel, minor = True)
plot2.set_yticks(gLevel, minor = True)
plot2.grid(True, which = 'minor')
else:
plot2.grid(True, which = 'major')
plt.title(r'$ (events = {:},\ levels = {:}) $'.format(total_event, totalLevel)
, fontsize = AlvaFontSize)
plt.xlabel(r'$ event/level $', fontsize = AlvaFontSize)
plt.ylabel(r'$ level-range $', fontsize = AlvaFontSize)
plt.legend(loc = (0, -0.2))
plt.xticks(fontsize = AlvaFontSize*0.6)
plt.yticks(fontsize = AlvaFontSize*0.6)
figure.tight_layout()
plt.savefig(save_figure, dpi = 100)
plt.show()
# In[7]:
randomSeed = np.random.uniform(0, 1, 100)
event_all_rate = randomSeed[0:10]
print event_all_rate
event_all_PD = event_all_rate/event_all_rate.sum()
dt = 0.01
total_step = 10
gT = np.arange(total_step)*dt
event_n = np.zeros([total_step, event_all_rate.size])
event_n[0] = event_all_PD
for i in range(total_step):
for j in range(event_all_rate.size):
event_n[i, j] = np.exp(-dt * np.sum(event_all_rate))
event_all_rate[j] = event_n[i, j]
# event_all_PD = event_all_PD/event_all_PD.sum()
numberingFig = numberingFig + 1
figure = plt.figure(numberingFig, figsize = AlvaFigSize)
for j in range(event_all_rate.size):
plt.plot(gT, event_n[:, j], marker = 'o')
plt.show()
| gpl-2.0 |
saltastro/pysalt | lib/saltimagetools.py | 2 | 4865 | ################################# LICENSE ##################################
# Copyright (c) 2009, South African Astronomical Observatory (SAAO) #
# All rights reserved. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions #
# are met: #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# * Redistributions in binary form must reproduce the above copyright #
# notice, this list of conditions and the following disclaimer #
# in the documentation and/or other materials provided with the #
# distribution. #
# * Neither the name of the South African Astronomical Observatory #
# (SAAO) nor the names of its contributors may be used to endorse #
# or promote products derived from this software without specific #
# prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE SAAO ''AS IS'' AND ANY EXPRESS OR #
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED #
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE #
# DISCLAIMED. IN NO EVENT SHALL THE SAAO BE LIABLE FOR ANY #
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL #
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS #
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) #
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, #
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN #
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
############################################################################
"""Module for working with image data."""
import numpy as np
def find_centroid(image):
"""Find the centroid *(cx,cy)* of *image* where:
.. math::
cx=\sum f\cdot x/\sum f
cy=\sum f\cdot y/\sum f
"""
# Ensure that input is a numpy array
image=np.asarray(image)
# Setup indices
x,y=np.indices(image.shape)
# Calculate total flux
tot=image.sum()
cx=(image*x).sum()/tot
cy=(image*y).sum()/tot
return cx,cy
def find_object(image,x,y,distance=5):
"""Returns image pixel coordinates of centroid in box of size::
2*distance+1
around coordinates *(x,y)* in *image*.
"""
# Ensure input image is numpy array
image=np.asarray(image).transpose()
# Round to nearest integer for search box
x=int(round(x))
y=int(round(y))
distance=int(round(distance))
#print image
# Set range and check for regions outside boundary
xstart=x-distance
if xstart<0:
xstart=0
ystart=y-distance
if ystart<0:
ystart=0
xend=x+distance+1
yend=y+distance+1
#print xstart,xend,ystart,yend
#print image.shape
section=image[xstart:xend,ystart:yend]
#print section
cx,cy=find_centroid(section)
return cx+xstart,cy+ystart
def zscale(image, contrast=1.0):
"""Implementation of the IRAF zscale algorithm to find vmin and vmax parameters for the dynamic range of a display. It finds the image values near the median image value without the time consuming process of computing a full image histogram."""
from scipy import optimize
#import matplotlib.pyplot as plt
# Get ordered list of points
I=np.sort(image.flatten())
# Get number of points
npoints=len(I)
# Find the midpoint (median)
midpoint=(npoints-1)/2
# Fit a linear function
# I(i) = intercept + slope * (i - midpoint)
fitfunc = lambda p, x: p[0]*x+p[1]
errfunc = lambda p, x, y: fitfunc(p, x) - y
# Initial guess for the parameters
p0 = [(I[-1]-I[0])/npoints,I[midpoint]]
# Fit
i=np.arange(len(I))
p1, success = optimize.leastsq(errfunc, p0[:], args=(i, I))
# plt.plot(i,I,'r+')
# plt.plot(i,fitfunc(p1,i))
# plt.show()
if success in [1,2,3,4]:
slope=p1[0]
z1=I[midpoint]+(slope/contrast)*(1-midpoint)
z2=I[midpoint]+(slope/contrast)*(npoints-midpoint)
else:
z1=np.min(image)
z2=np.max(image)
return z1, z2
| bsd-3-clause |
nikste/tensorflow | tensorflow/contrib/learn/python/learn/dataframe/tensorflow_dataframe.py | 75 | 29377 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlowDataFrame implements convenience functions using TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import csv
import numpy as np
from tensorflow.contrib.learn.python.learn.dataframe import dataframe as df
from tensorflow.contrib.learn.python.learn.dataframe.transforms import batch
from tensorflow.contrib.learn.python.learn.dataframe.transforms import csv_parser
from tensorflow.contrib.learn.python.learn.dataframe.transforms import example_parser
from tensorflow.contrib.learn.python.learn.dataframe.transforms import in_memory_source
from tensorflow.contrib.learn.python.learn.dataframe.transforms import reader_source
from tensorflow.contrib.learn.python.learn.dataframe.transforms import sparsify
from tensorflow.contrib.learn.python.learn.dataframe.transforms import split_mask
from tensorflow.python.client import session as sess
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner as qr
def _expand_file_names(filepatterns):
"""Takes a list of file patterns and returns a list of resolved file names."""
if not isinstance(filepatterns, (list, tuple, set)):
filepatterns = [filepatterns]
filenames = set()
for filepattern in filepatterns:
names = set(gfile.Glob(filepattern))
filenames |= names
return list(filenames)
def _dtype_to_nan(dtype):
if dtype is dtypes.string:
return b""
elif dtype.is_integer:
return np.nan
elif dtype.is_floating:
return np.nan
elif dtype is dtypes.bool:
return np.nan
else:
raise ValueError("Can't parse type without NaN into sparse tensor: %s" %
dtype)
def _get_default_value(feature_spec):
if isinstance(feature_spec, parsing_ops.FixedLenFeature):
return feature_spec.default_value
else:
return _dtype_to_nan(feature_spec.dtype)
class TensorFlowDataFrame(df.DataFrame):
"""TensorFlowDataFrame implements convenience functions using TensorFlow."""
def run(self,
num_batches=None,
graph=None,
session=None,
start_queues=True,
initialize_variables=True,
**kwargs):
"""Builds and runs the columns of the `DataFrame` and yields batches.
This is a generator that yields a dictionary mapping column names to
evaluated columns.
Args:
num_batches: the maximum number of batches to produce. If none specified,
the returned value will iterate through infinite batches.
graph: the `Graph` in which the `DataFrame` should be built.
session: the `Session` in which to run the columns of the `DataFrame`.
start_queues: if true, queues will be started before running and halted
after producting `n` batches.
initialize_variables: if true, variables will be initialized.
**kwargs: Additional keyword arguments e.g. `num_epochs`.
Yields:
A dictionary, mapping column names to the values resulting from running
each column for a single batch.
"""
if graph is None:
graph = ops.get_default_graph()
with graph.as_default():
if session is None:
session = sess.Session()
self_built = self.build(**kwargs)
keys = list(self_built.keys())
cols = list(self_built.values())
if initialize_variables:
if variables.local_variables():
session.run(variables.local_variables_initializer())
if variables.global_variables():
session.run(variables.global_variables_initializer())
if start_queues:
coord = coordinator.Coordinator()
threads = qr.start_queue_runners(sess=session, coord=coord)
i = 0
while num_batches is None or i < num_batches:
i += 1
try:
values = session.run(cols)
yield collections.OrderedDict(zip(keys, values))
except errors.OutOfRangeError:
break
if start_queues:
coord.request_stop()
coord.join(threads)
def select_rows(self, boolean_series):
"""Returns a `DataFrame` with only the rows indicated by `boolean_series`.
Note that batches may no longer have consistent size after calling
`select_rows`, so the new `DataFrame` may need to be rebatched.
For example:
'''
filtered_df = df.select_rows(df["country"] == "jp").batch(64)
'''
Args:
boolean_series: a `Series` that evaluates to a boolean `Tensor`.
Returns:
A new `DataFrame` with the same columns as `self`, but selecting only the
rows where `boolean_series` evaluated to `True`.
"""
result = type(self)()
for key, col in self._columns.items():
try:
result[key] = col.select_rows(boolean_series)
except AttributeError as e:
raise NotImplementedError((
"The select_rows method is not implemented for Series type {}. "
"Original error: {}").format(type(col), e))
return result
def split(self, index_series, proportion, batch_size=None):
"""Deterministically split a `DataFrame` into two `DataFrame`s.
Note this split is only as deterministic as the underlying hash function;
see `tf.string_to_hash_bucket_fast`. The hash function is deterministic
for a given binary, but may change occasionally. The only way to achieve
an absolute guarantee that the split `DataFrame`s do not change across runs
is to materialize them.
Note too that the allocation of a row to one partition or the
other is evaluated independently for each row, so the exact number of rows
in each partition is binomially distributed.
Args:
index_series: a `Series` of unique strings, whose hash will determine the
partitioning; or the name in this `DataFrame` of such a `Series`.
(This `Series` must contain strings because TensorFlow provides hash
ops only for strings, and there are no number-to-string converter ops.)
proportion: The proportion of the rows to select for the 'left'
partition; the remaining (1 - proportion) rows form the 'right'
partition.
batch_size: the batch size to use when rebatching the left and right
`DataFrame`s. If None (default), the `DataFrame`s are not rebatched;
thus their batches will have variable sizes, according to which rows
are selected from each batch of the original `DataFrame`.
Returns:
Two `DataFrame`s containing the partitioned rows.
"""
if isinstance(index_series, str):
index_series = self[index_series]
left_mask, = split_mask.SplitMask(proportion)(index_series)
right_mask = ~left_mask
left_rows = self.select_rows(left_mask)
right_rows = self.select_rows(right_mask)
if batch_size:
left_rows = left_rows.batch(batch_size=batch_size, shuffle=False)
right_rows = right_rows.batch(batch_size=batch_size, shuffle=False)
return left_rows, right_rows
def split_fast(self, index_series, proportion, batch_size,
base_batch_size=1000):
"""Deterministically split a `DataFrame` into two `DataFrame`s.
Note this split is only as deterministic as the underlying hash function;
see `tf.string_to_hash_bucket_fast`. The hash function is deterministic
for a given binary, but may change occasionally. The only way to achieve
an absolute guarantee that the split `DataFrame`s do not change across runs
is to materialize them.
Note too that the allocation of a row to one partition or the
other is evaluated independently for each row, so the exact number of rows
in each partition is binomially distributed.
Args:
index_series: a `Series` of unique strings, whose hash will determine the
partitioning; or the name in this `DataFrame` of such a `Series`.
(This `Series` must contain strings because TensorFlow provides hash
ops only for strings, and there are no number-to-string converter ops.)
proportion: The proportion of the rows to select for the 'left'
partition; the remaining (1 - proportion) rows form the 'right'
partition.
batch_size: the batch size to use when rebatching the left and right
`DataFrame`s. If None (default), the `DataFrame`s are not rebatched;
thus their batches will have variable sizes, according to which rows
are selected from each batch of the original `DataFrame`.
base_batch_size: the batch size to use for materialized data, prior to the
split.
Returns:
Two `DataFrame`s containing the partitioned rows.
"""
if isinstance(index_series, str):
index_series = self[index_series]
left_mask, = split_mask.SplitMask(proportion)(index_series)
right_mask = ~left_mask
self["left_mask__"] = left_mask
self["right_mask__"] = right_mask
# TODO(soergel): instead of base_batch_size can we just do one big batch?
# avoid computing the hashes twice
m = self.materialize_to_memory(batch_size=base_batch_size)
left_rows_df = m.select_rows(m["left_mask__"])
right_rows_df = m.select_rows(m["right_mask__"])
del left_rows_df[["left_mask__", "right_mask__"]]
del right_rows_df[["left_mask__", "right_mask__"]]
# avoid recomputing the split repeatedly
left_rows_df = left_rows_df.materialize_to_memory(batch_size=batch_size)
right_rows_df = right_rows_df.materialize_to_memory(batch_size=batch_size)
return left_rows_df, right_rows_df
def run_one_batch(self):
"""Creates a new 'Graph` and `Session` and runs a single batch.
Returns:
A dictionary mapping column names to numpy arrays that contain a single
batch of the `DataFrame`.
"""
return list(self.run(num_batches=1))[0]
def run_one_epoch(self):
"""Creates a new 'Graph` and `Session` and runs a single epoch.
Naturally this makes sense only for DataFrames that fit in memory.
Returns:
A dictionary mapping column names to numpy arrays that contain a single
epoch of the `DataFrame`.
"""
# batches is a list of dicts of numpy arrays
batches = [b for b in self.run(num_epochs=1)]
# first invert that to make a dict of lists of numpy arrays
pivoted_batches = {}
for k in batches[0].keys():
pivoted_batches[k] = []
for b in batches:
for k, v in b.items():
pivoted_batches[k].append(v)
# then concat the arrays in each column
result = {k: np.concatenate(column_batches)
for k, column_batches in pivoted_batches.items()}
return result
def materialize_to_memory(self, batch_size):
unordered_dict_of_arrays = self.run_one_epoch()
# there may already be an 'index' column, in which case from_ordereddict)
# below will complain because it wants to generate a new one.
# for now, just remove it.
# TODO(soergel): preserve index history, potentially many levels deep
del unordered_dict_of_arrays["index"]
# the order of the columns in this dict is arbitrary; we just need it to
# remain consistent.
ordered_dict_of_arrays = collections.OrderedDict(unordered_dict_of_arrays)
return TensorFlowDataFrame.from_ordereddict(ordered_dict_of_arrays,
batch_size=batch_size)
def batch(self,
batch_size,
shuffle=False,
num_threads=1,
queue_capacity=None,
min_after_dequeue=None,
seed=None):
"""Resize the batches in the `DataFrame` to the given `batch_size`.
Args:
batch_size: desired batch size.
shuffle: whether records should be shuffled. Defaults to true.
num_threads: the number of enqueueing threads.
queue_capacity: capacity of the queue that will hold new batches.
min_after_dequeue: minimum number of elements that can be left by a
dequeue operation. Only used if `shuffle` is true.
seed: passed to random shuffle operations. Only used if `shuffle` is true.
Returns:
A `DataFrame` with `batch_size` rows.
"""
column_names = list(self._columns.keys())
if shuffle:
batcher = batch.ShuffleBatch(batch_size,
output_names=column_names,
num_threads=num_threads,
queue_capacity=queue_capacity,
min_after_dequeue=min_after_dequeue,
seed=seed)
else:
batcher = batch.Batch(batch_size,
output_names=column_names,
num_threads=num_threads,
queue_capacity=queue_capacity)
batched_series = batcher(list(self._columns.values()))
dataframe = type(self)()
dataframe.assign(**(dict(zip(column_names, batched_series))))
return dataframe
@classmethod
def _from_csv_base(cls, filepatterns, get_default_values, has_header,
column_names, num_threads, enqueue_size,
batch_size, queue_capacity, min_after_dequeue, shuffle,
seed):
"""Create a `DataFrame` from CSV files.
If `has_header` is false, then `column_names` must be specified. If
`has_header` is true and `column_names` are specified, then `column_names`
overrides the names in the header.
Args:
filepatterns: a list of file patterns that resolve to CSV files.
get_default_values: a function that produces a list of default values for
each column, given the column names.
has_header: whether or not the CSV files have headers.
column_names: a list of names for the columns in the CSV files.
num_threads: the number of readers that will work in parallel.
enqueue_size: block size for each read operation.
batch_size: desired batch size.
queue_capacity: capacity of the queue that will store parsed lines.
min_after_dequeue: minimum number of elements that can be left by a
dequeue operation. Only used if `shuffle` is true.
shuffle: whether records should be shuffled. Defaults to true.
seed: passed to random shuffle operations. Only used if `shuffle` is true.
Returns:
A `DataFrame` that has columns corresponding to `features` and is filled
with examples from `filepatterns`.
Raises:
ValueError: no files match `filepatterns`.
ValueError: `features` contains the reserved name 'index'.
"""
filenames = _expand_file_names(filepatterns)
if not filenames:
raise ValueError("No matching file names.")
if column_names is None:
if not has_header:
raise ValueError("If column_names is None, has_header must be true.")
with gfile.GFile(filenames[0]) as f:
column_names = csv.DictReader(f).fieldnames
if "index" in column_names:
raise ValueError(
"'index' is reserved and can not be used for a column name.")
default_values = get_default_values(column_names)
reader_kwargs = {"skip_header_lines": (1 if has_header else 0)}
index, value = reader_source.TextFileSource(
filenames,
reader_kwargs=reader_kwargs,
enqueue_size=enqueue_size,
batch_size=batch_size,
queue_capacity=queue_capacity,
shuffle=shuffle,
min_after_dequeue=min_after_dequeue,
num_threads=num_threads,
seed=seed)()
parser = csv_parser.CSVParser(column_names, default_values)
parsed = parser(value)
column_dict = parsed._asdict()
column_dict["index"] = index
dataframe = cls()
dataframe.assign(**column_dict)
return dataframe
@classmethod
def from_csv(cls,
filepatterns,
default_values,
has_header=True,
column_names=None,
num_threads=1,
enqueue_size=None,
batch_size=32,
queue_capacity=None,
min_after_dequeue=None,
shuffle=True,
seed=None):
"""Create a `DataFrame` from CSV files.
If `has_header` is false, then `column_names` must be specified. If
`has_header` is true and `column_names` are specified, then `column_names`
overrides the names in the header.
Args:
filepatterns: a list of file patterns that resolve to CSV files.
default_values: a list of default values for each column.
has_header: whether or not the CSV files have headers.
column_names: a list of names for the columns in the CSV files.
num_threads: the number of readers that will work in parallel.
enqueue_size: block size for each read operation.
batch_size: desired batch size.
queue_capacity: capacity of the queue that will store parsed lines.
min_after_dequeue: minimum number of elements that can be left by a
dequeue operation. Only used if `shuffle` is true.
shuffle: whether records should be shuffled. Defaults to true.
seed: passed to random shuffle operations. Only used if `shuffle` is true.
Returns:
A `DataFrame` that has columns corresponding to `features` and is filled
with examples from `filepatterns`.
Raises:
ValueError: no files match `filepatterns`.
ValueError: `features` contains the reserved name 'index'.
"""
def get_default_values(column_names):
# pylint: disable=unused-argument
return default_values
return cls._from_csv_base(filepatterns, get_default_values, has_header,
column_names, num_threads,
enqueue_size, batch_size, queue_capacity,
min_after_dequeue, shuffle, seed)
@classmethod
def from_csv_with_feature_spec(cls,
filepatterns,
feature_spec,
has_header=True,
column_names=None,
num_threads=1,
enqueue_size=None,
batch_size=32,
queue_capacity=None,
min_after_dequeue=None,
shuffle=True,
seed=None):
"""Create a `DataFrame` from CSV files, given a feature_spec.
If `has_header` is false, then `column_names` must be specified. If
`has_header` is true and `column_names` are specified, then `column_names`
overrides the names in the header.
Args:
filepatterns: a list of file patterns that resolve to CSV files.
feature_spec: a dict mapping column names to `FixedLenFeature` or
`VarLenFeature`.
has_header: whether or not the CSV files have headers.
column_names: a list of names for the columns in the CSV files.
num_threads: the number of readers that will work in parallel.
enqueue_size: block size for each read operation.
batch_size: desired batch size.
queue_capacity: capacity of the queue that will store parsed lines.
min_after_dequeue: minimum number of elements that can be left by a
dequeue operation. Only used if `shuffle` is true.
shuffle: whether records should be shuffled. Defaults to true.
seed: passed to random shuffle operations. Only used if `shuffle` is true.
Returns:
A `DataFrame` that has columns corresponding to `features` and is filled
with examples from `filepatterns`.
Raises:
ValueError: no files match `filepatterns`.
ValueError: `features` contains the reserved name 'index'.
"""
def get_default_values(column_names):
return [_get_default_value(feature_spec[name]) for name in column_names]
dataframe = cls._from_csv_base(filepatterns, get_default_values, has_header,
column_names, num_threads,
enqueue_size, batch_size, queue_capacity,
min_after_dequeue, shuffle, seed)
# replace the dense columns with sparse ones in place in the dataframe
for name in dataframe.columns():
if name != "index" and isinstance(feature_spec[name],
parsing_ops.VarLenFeature):
strip_value = _get_default_value(feature_spec[name])
(dataframe[name],) = sparsify.Sparsify(strip_value)(dataframe[name])
return dataframe
@classmethod
def from_examples(cls,
filepatterns,
features,
reader_cls=io_ops.TFRecordReader,
num_threads=1,
enqueue_size=None,
batch_size=32,
queue_capacity=None,
min_after_dequeue=None,
shuffle=True,
seed=None):
"""Create a `DataFrame` from `tensorflow.Example`s.
Args:
filepatterns: a list of file patterns containing `tensorflow.Example`s.
features: a dict mapping feature names to `VarLenFeature` or
`FixedLenFeature`.
reader_cls: a subclass of `tensorflow.ReaderBase` that will be used to
read the `Example`s.
num_threads: the number of readers that will work in parallel.
enqueue_size: block size for each read operation.
batch_size: desired batch size.
queue_capacity: capacity of the queue that will store parsed `Example`s
min_after_dequeue: minimum number of elements that can be left by a
dequeue operation. Only used if `shuffle` is true.
shuffle: whether records should be shuffled. Defaults to true.
seed: passed to random shuffle operations. Only used if `shuffle` is true.
Returns:
A `DataFrame` that has columns corresponding to `features` and is filled
with `Example`s from `filepatterns`.
Raises:
ValueError: no files match `filepatterns`.
ValueError: `features` contains the reserved name 'index'.
"""
filenames = _expand_file_names(filepatterns)
if not filenames:
raise ValueError("No matching file names.")
if "index" in features:
raise ValueError(
"'index' is reserved and can not be used for a feature name.")
index, record = reader_source.ReaderSource(
reader_cls,
filenames,
enqueue_size=enqueue_size,
batch_size=batch_size,
queue_capacity=queue_capacity,
shuffle=shuffle,
min_after_dequeue=min_after_dequeue,
num_threads=num_threads,
seed=seed)()
parser = example_parser.ExampleParser(features)
parsed = parser(record)
column_dict = parsed._asdict()
column_dict["index"] = index
dataframe = cls()
dataframe.assign(**column_dict)
return dataframe
@classmethod
def from_pandas(cls,
pandas_dataframe,
num_threads=None,
enqueue_size=None,
batch_size=None,
queue_capacity=None,
min_after_dequeue=None,
shuffle=True,
seed=None,
data_name="pandas_data"):
"""Create a `tf.learn.DataFrame` from a `pandas.DataFrame`.
Args:
pandas_dataframe: `pandas.DataFrame` that serves as a data source.
num_threads: the number of threads to use for enqueueing.
enqueue_size: the number of rows to enqueue per step.
batch_size: desired batch size.
queue_capacity: capacity of the queue that will store parsed `Example`s
min_after_dequeue: minimum number of elements that can be left by a
dequeue operation. Only used if `shuffle` is true.
shuffle: whether records should be shuffled. Defaults to true.
seed: passed to random shuffle operations. Only used if `shuffle` is true.
data_name: a scope name identifying the data.
Returns:
A `tf.learn.DataFrame` that contains batches drawn from the given
`pandas_dataframe`.
"""
pandas_source = in_memory_source.PandasSource(
pandas_dataframe,
num_threads=num_threads,
enqueue_size=enqueue_size,
batch_size=batch_size,
queue_capacity=queue_capacity,
shuffle=shuffle,
min_after_dequeue=min_after_dequeue,
seed=seed,
data_name=data_name)
dataframe = cls()
dataframe.assign(**(pandas_source()._asdict()))
return dataframe
@classmethod
def from_numpy(cls,
numpy_array,
num_threads=None,
enqueue_size=None,
batch_size=None,
queue_capacity=None,
min_after_dequeue=None,
shuffle=True,
seed=None,
data_name="numpy_data"):
"""Creates a `tf.learn.DataFrame` from a `numpy.ndarray`.
The returned `DataFrame` contains two columns: 'index' and 'value'. The
'value' column contains a row from the array. The 'index' column contains
the corresponding row number.
Args:
numpy_array: `numpy.ndarray` that serves as a data source.
num_threads: the number of threads to use for enqueueing.
enqueue_size: the number of rows to enqueue per step.
batch_size: desired batch size.
queue_capacity: capacity of the queue that will store parsed `Example`s
min_after_dequeue: minimum number of elements that can be left by a
dequeue operation. Only used if `shuffle` is true.
shuffle: whether records should be shuffled. Defaults to true.
seed: passed to random shuffle operations. Only used if `shuffle` is true.
data_name: a scope name identifying the data.
Returns:
A `tf.learn.DataFrame` that contains batches drawn from the given
array.
"""
numpy_source = in_memory_source.NumpySource(
numpy_array,
num_threads=num_threads,
enqueue_size=enqueue_size,
batch_size=batch_size,
queue_capacity=queue_capacity,
shuffle=shuffle,
min_after_dequeue=min_after_dequeue,
seed=seed,
data_name=data_name)
dataframe = cls()
dataframe.assign(**(numpy_source()._asdict()))
return dataframe
@classmethod
def from_ordereddict(cls,
ordered_dict_of_arrays,
num_threads=None,
enqueue_size=None,
batch_size=None,
queue_capacity=None,
min_after_dequeue=None,
shuffle=True,
seed=None,
data_name="numpy_data"):
"""Creates a `tf.learn.DataFrame` from an `OrderedDict` of `numpy.ndarray`.
The returned `DataFrame` contains a column for each key of the dict plus an
extra 'index' column. The 'index' column contains the row number. Each of
the other columns contains a row from the corresponding array.
Args:
ordered_dict_of_arrays: `OrderedDict` of `numpy.ndarray` that serves as a
data source.
num_threads: the number of threads to use for enqueueing.
enqueue_size: the number of rows to enqueue per step.
batch_size: desired batch size.
queue_capacity: capacity of the queue that will store parsed `Example`s
min_after_dequeue: minimum number of elements that can be left by a
dequeue operation. Only used if `shuffle` is true.
shuffle: whether records should be shuffled. Defaults to true.
seed: passed to random shuffle operations. Only used if `shuffle` is true.
data_name: a scope name identifying the data.
Returns:
A `tf.learn.DataFrame` that contains batches drawn from the given arrays.
Raises:
ValueError: `ordered_dict_of_arrays` contains the reserved name 'index'.
"""
numpy_source = in_memory_source.OrderedDictNumpySource(
ordered_dict_of_arrays,
num_threads=num_threads,
enqueue_size=enqueue_size,
batch_size=batch_size,
queue_capacity=queue_capacity,
shuffle=shuffle,
min_after_dequeue=min_after_dequeue,
seed=seed,
data_name=data_name)
dataframe = cls()
dataframe.assign(**(numpy_source()._asdict()))
return dataframe
| apache-2.0 |
jpautom/scikit-learn | examples/linear_model/plot_ols.py | 220 | 1940 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Linear Regression Example
=========================================================
This example uses the only the first feature of the `diabetes` dataset, in
order to illustrate a two-dimensional plot of this regression technique. The
straight line can be seen in the plot, showing how linear regression attempts
to draw a straight line that will best minimize the residual sum of squares
between the observed responses in the dataset, and the responses predicted by
the linear approximation.
The coefficients, the residual sum of squares and the variance score are also
calculated.
"""
print(__doc__)
# Code source: Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets, linear_model
# Load the diabetes dataset
diabetes = datasets.load_diabetes()
# Use only one feature
diabetes_X = diabetes.data[:, np.newaxis, 2]
# Split the data into training/testing sets
diabetes_X_train = diabetes_X[:-20]
diabetes_X_test = diabetes_X[-20:]
# Split the targets into training/testing sets
diabetes_y_train = diabetes.target[:-20]
diabetes_y_test = diabetes.target[-20:]
# Create linear regression object
regr = linear_model.LinearRegression()
# Train the model using the training sets
regr.fit(diabetes_X_train, diabetes_y_train)
# The coefficients
print('Coefficients: \n', regr.coef_)
# The mean square error
print("Residual sum of squares: %.2f"
% np.mean((regr.predict(diabetes_X_test) - diabetes_y_test) ** 2))
# Explained variance score: 1 is perfect prediction
print('Variance score: %.2f' % regr.score(diabetes_X_test, diabetes_y_test))
# Plot outputs
plt.scatter(diabetes_X_test, diabetes_y_test, color='black')
plt.plot(diabetes_X_test, regr.predict(diabetes_X_test), color='blue',
linewidth=3)
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
liuchenxjtu/Lung_Cancer | data0.py | 1 | 6245 | import logging
import numpy as np
import pandas as pd
import os
import video
import settings
class ChunkLoader():
def __init__(self, set_name, repo_dir, datum_dtype=np.uint8,
nclasses=2, augment=False, test_mode=False):
# assert test_mode is False, 'Test mode not implemented yet'
np.random.seed(0)
self.set_name = set_name
# self.bsz = self.be.bsz
self.augment = augment
self.repo_dir = repo_dir
self.is_training = (set_name == 'train')
self.chunk_size = settings.chunk_size
self.chunk_shape = (self.chunk_size, self.chunk_size, self.chunk_size)
self.chunk_volume = np.prod(self.chunk_shape)
self.metadata = pd.read_csv(os.path.join(self.repo_dir, set_name + '-metadata.csv'))
self.data_size = self.metadata.shape[0]
self.pos_users = self.metadata[self.metadata['flag']==1]['uid']
self.neg_users = self.metadata[self.metadata['flag']==0]['uid']
self.nvids = self.metadata.shape[0]
self.chunks_filled = 0
self.video_idx = 0
if not test_mode:
self.labels = pd.read_csv(os.path.join(self.repo_dir, 'labels.csv'))
self.nega_labels = pd.read_csv(os.path.join(self.repo_dir, 'candidates.csv'))
else:
self.labels = None
self.test_mode = test_mode
self.chunks,self.starts,self.targets = [],[],[]
##positive points in lables.csv
self.pos_labels = self.labels[self.labels['uid'].isin(self.pos_users)].shape[0]
self.pos_neg_ratio = 6.0
self.chunk_from_neg_users = int(self.pos_labels*self.pos_neg_ratio/len(self.neg_users))
self.current_uid = self.current_flag = self.current_meta = None
def reset(self):
self.chunks,self.starts,self.targets = [],[],[]
def next_video(self,video_idx):
self.reset()
self.current_meta = self.metadata.iloc[video_idx]
uid = self.current_meta['uid']
self.current_uid = self.current_meta['uid']
self.current_flag = int(self.current_meta['flag'])
data_filename = os.path.join(self.repo_dir, uid + '.' + settings.file_ext)
vid_shape = (int(self.current_meta['z_len']),
int(self.current_meta['y_len']),
int(self.current_meta['x_len']))
vid_data = video.read_blp(data_filename, vid_shape)
self.video_idx += 1
self.extract_chunks(vid_data)
return self.chunks,self.starts,self.targets
def slice_chunk(self, start, data):
return data[start[0]:start[0] + self.chunk_size,
start[1]:start[1] + self.chunk_size,
start[2]:start[2] + self.chunk_size]#.ravel()
def extract_one(self, data, data_shape, uid_data,idx):
# assert uid_data.shape[0] != 0
if not self.test_mode:
center = np.array((uid_data['z'].iloc[idx],
uid_data['y'].iloc[idx],
uid_data['x'].iloc[idx]), dtype=np.int32)
# radius
rad = 0.5 * uid_data['diam'].iloc[idx]
if rad == 0:
# Assign an arbitrary radius to candidate nodules
rad = 20 / settings.resolution
#comment by lc: low may <0
low = np.int32(center + rad - self.chunk_size)
high = np.int32(center - rad)
for j in range(3):
low[j] = max(0, low[j])
high[j] = max(low[j] + 1, high[j])
high[j] = min(data_shape[j] - self.chunk_size, high[j])
low[j] = min(low[j], high[j] - 1)
start = [np.random.randint(low=low[i], high=high[i]) for i in range(3)]
else:
start = self.generate_chunk_start(chunk_idx, data_shape)
chunk = self.slice_chunk(start, data)
return chunk,start
def generate_chunk_start(self, chunk_idx, data_shape):
chunk_spacing = np.int32((np.array(data_shape) - self.chunk_size) / settings.chunks_per_dim)
z_chunk_idx = chunk_idx / settings.chunks_per_dim ** 2
y_chunk_idx = (chunk_idx - z_chunk_idx * settings.chunks_per_dim ** 2) / settings.chunks_per_dim
x_chunk_idx = chunk_idx - z_chunk_idx * settings.chunks_per_dim ** 2 \
- y_chunk_idx * settings.chunks_per_dim
start = [z_chunk_idx * chunk_spacing[0],
y_chunk_idx * chunk_spacing[1],
x_chunk_idx * chunk_spacing[2]]
return start
def extract_chunks(self, data):
data_shape = np.array(data.shape, dtype=np.int32)
if self.current_flag:
uid_data = self.labels[self.labels['uid'] == self.current_uid]
for idx in range(uid_data.shape[0]):
chunk,start = self.extract_one(data, data_shape, uid_data, idx)
if chunk is None:
continue
self.chunks.append(chunk)
self.starts.append(start)
self.targets.append(1)
else:
uid_data = self.labels[self.labels['uid'] == self.current_uid]
for idx in range(min(self.chunk_from_neg_users,uid_data.shape[0])):
chunk,start = self.extract_one(data, data_shape, uid_data, idx)
if chunk is None:
continue
self.chunks.append(chunk)
self.starts.append(start)
self.targets.append(0)
## not enough negative from labels, then to candidates
if uid_data.shape[0]<self.chunk_from_neg_users:
left_chunk = self.chunk_from_neg_users-uid_data.shape[0]
uid_data = self.nega_labels[self.nega_labels['uid'] == self.current_uid]
for i in range(min(left_chunk,uid_data.shape[0])):
idx = np.random.randint(uid_data.shape[0])
chunk,start = self.extract_one(data, data_shape, uid_data, idx)
if chunk is None:
continue
self.chunks.append(chunk)
self.starts.append(start)
self.targets.append(0)
| apache-2.0 |
fergalbyrne/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_ps.py | 69 | 50262 | """
A PostScript backend, which can produce both PostScript .ps and .eps
"""
from __future__ import division
import glob, math, os, shutil, sys, time
def _fn_name(): return sys._getframe(1).f_code.co_name
try:
from hashlib import md5
except ImportError:
from md5 import md5 #Deprecated in 2.5
from tempfile import gettempdir
from cStringIO import StringIO
from matplotlib import verbose, __version__, rcParams
from matplotlib._pylab_helpers import Gcf
from matplotlib.afm import AFM
from matplotlib.backend_bases import RendererBase, GraphicsContextBase,\
FigureManagerBase, FigureCanvasBase
from matplotlib.cbook import is_string_like, get_realpath_and_stat, \
is_writable_file_like, maxdict
from matplotlib.mlab import quad2cubic
from matplotlib.figure import Figure
from matplotlib.font_manager import findfont, is_opentype_cff_font
from matplotlib.ft2font import FT2Font, KERNING_DEFAULT, LOAD_NO_HINTING
from matplotlib.ttconv import convert_ttf_to_ps
from matplotlib.mathtext import MathTextParser
from matplotlib._mathtext_data import uni2type1
from matplotlib.text import Text
from matplotlib.path import Path
from matplotlib.transforms import IdentityTransform
import numpy as npy
import binascii
import re
try:
set
except NameError:
from sets import Set as set
if sys.platform.startswith('win'): cmd_split = '&'
else: cmd_split = ';'
backend_version = 'Level II'
debugPS = 0
papersize = {'letter': (8.5,11),
'legal': (8.5,14),
'ledger': (11,17),
'a0': (33.11,46.81),
'a1': (23.39,33.11),
'a2': (16.54,23.39),
'a3': (11.69,16.54),
'a4': (8.27,11.69),
'a5': (5.83,8.27),
'a6': (4.13,5.83),
'a7': (2.91,4.13),
'a8': (2.07,2.91),
'a9': (1.457,2.05),
'a10': (1.02,1.457),
'b0': (40.55,57.32),
'b1': (28.66,40.55),
'b2': (20.27,28.66),
'b3': (14.33,20.27),
'b4': (10.11,14.33),
'b5': (7.16,10.11),
'b6': (5.04,7.16),
'b7': (3.58,5.04),
'b8': (2.51,3.58),
'b9': (1.76,2.51),
'b10': (1.26,1.76)}
def _get_papertype(w, h):
keys = papersize.keys()
keys.sort()
keys.reverse()
for key in keys:
if key.startswith('l'): continue
pw, ph = papersize[key]
if (w < pw) and (h < ph): return key
else:
return 'a0'
def _num_to_str(val):
if is_string_like(val): return val
ival = int(val)
if val==ival: return str(ival)
s = "%1.3f"%val
s = s.rstrip("0")
s = s.rstrip(".")
return s
def _nums_to_str(*args):
return ' '.join(map(_num_to_str,args))
def quote_ps_string(s):
"Quote dangerous characters of S for use in a PostScript string constant."
s=s.replace("\\", "\\\\")
s=s.replace("(", "\\(")
s=s.replace(")", "\\)")
s=s.replace("'", "\\251")
s=s.replace("`", "\\301")
s=re.sub(r"[^ -~\n]", lambda x: r"\%03o"%ord(x.group()), s)
return s
def seq_allequal(seq1, seq2):
"""
seq1 and seq2 are either None or sequences or numerix arrays
Return True if both are None or both are seqs with identical
elements
"""
if seq1 is None:
return seq2 is None
if seq2 is None:
return False
#ok, neither are None:, assuming iterable
if len(seq1) != len(seq2): return False
return npy.alltrue(npy.equal(seq1, seq2))
class RendererPS(RendererBase):
"""
The renderer handles all the drawing primitives using a graphics
context instance that controls the colors/styles.
"""
fontd = maxdict(50)
afmfontd = maxdict(50)
def __init__(self, width, height, pswriter, imagedpi=72):
"""
Although postscript itself is dpi independent, we need to
imform the image code about a requested dpi to generate high
res images and them scale them before embeddin them
"""
RendererBase.__init__(self)
self.width = width
self.height = height
self._pswriter = pswriter
if rcParams['text.usetex']:
self.textcnt = 0
self.psfrag = []
self.imagedpi = imagedpi
if rcParams['path.simplify']:
self.simplify = (width * imagedpi, height * imagedpi)
else:
self.simplify = None
# current renderer state (None=uninitialised)
self.color = None
self.linewidth = None
self.linejoin = None
self.linecap = None
self.linedash = None
self.fontname = None
self.fontsize = None
self.hatch = None
self.image_magnification = imagedpi/72.0
self._clip_paths = {}
self._path_collection_id = 0
self.used_characters = {}
self.mathtext_parser = MathTextParser("PS")
def track_characters(self, font, s):
"""Keeps track of which characters are required from
each font."""
realpath, stat_key = get_realpath_and_stat(font.fname)
used_characters = self.used_characters.setdefault(
stat_key, (realpath, set()))
used_characters[1].update([ord(x) for x in s])
def merge_used_characters(self, other):
for stat_key, (realpath, charset) in other.items():
used_characters = self.used_characters.setdefault(
stat_key, (realpath, set()))
used_characters[1].update(charset)
def set_color(self, r, g, b, store=1):
if (r,g,b) != self.color:
if r==g and r==b:
self._pswriter.write("%1.3f setgray\n"%r)
else:
self._pswriter.write("%1.3f %1.3f %1.3f setrgbcolor\n"%(r,g,b))
if store: self.color = (r,g,b)
def set_linewidth(self, linewidth, store=1):
if linewidth != self.linewidth:
self._pswriter.write("%1.3f setlinewidth\n"%linewidth)
if store: self.linewidth = linewidth
def set_linejoin(self, linejoin, store=1):
if linejoin != self.linejoin:
self._pswriter.write("%d setlinejoin\n"%linejoin)
if store: self.linejoin = linejoin
def set_linecap(self, linecap, store=1):
if linecap != self.linecap:
self._pswriter.write("%d setlinecap\n"%linecap)
if store: self.linecap = linecap
def set_linedash(self, offset, seq, store=1):
if self.linedash is not None:
oldo, oldseq = self.linedash
if seq_allequal(seq, oldseq): return
if seq is not None and len(seq):
s="[%s] %d setdash\n"%(_nums_to_str(*seq), offset)
self._pswriter.write(s)
else:
self._pswriter.write("[] 0 setdash\n")
if store: self.linedash = (offset,seq)
def set_font(self, fontname, fontsize, store=1):
if rcParams['ps.useafm']: return
if (fontname,fontsize) != (self.fontname,self.fontsize):
out = ("/%s findfont\n"
"%1.3f scalefont\n"
"setfont\n" % (fontname,fontsize))
self._pswriter.write(out)
if store: self.fontname = fontname
if store: self.fontsize = fontsize
def set_hatch(self, hatch):
"""
hatch can be one of:
/ - diagonal hatching
\ - back diagonal
| - vertical
- - horizontal
+ - crossed
X - crossed diagonal
letters can be combined, in which case all the specified
hatchings are done
if same letter repeats, it increases the density of hatching
in that direction
"""
hatches = {'horiz':0, 'vert':0, 'diag1':0, 'diag2':0}
for letter in hatch:
if (letter == '/'): hatches['diag2'] += 1
elif (letter == '\\'): hatches['diag1'] += 1
elif (letter == '|'): hatches['vert'] += 1
elif (letter == '-'): hatches['horiz'] += 1
elif (letter == '+'):
hatches['horiz'] += 1
hatches['vert'] += 1
elif (letter.lower() == 'x'):
hatches['diag1'] += 1
hatches['diag2'] += 1
def do_hatch(angle, density):
if (density == 0): return ""
return """\
gsave
eoclip %s rotate 0.0 0.0 0.0 0.0 setrgbcolor 0 setlinewidth
/hatchgap %d def
pathbbox /hatchb exch def /hatchr exch def /hatcht exch def /hatchl exch def
hatchl cvi hatchgap idiv hatchgap mul
hatchgap
hatchr cvi hatchgap idiv hatchgap mul
{hatcht m 0 hatchb hatcht sub r }
for
stroke
grestore
""" % (angle, 12/density)
self._pswriter.write("gsave\n")
self._pswriter.write(do_hatch(90, hatches['horiz']))
self._pswriter.write(do_hatch(0, hatches['vert']))
self._pswriter.write(do_hatch(45, hatches['diag1']))
self._pswriter.write(do_hatch(-45, hatches['diag2']))
self._pswriter.write("grestore\n")
def get_canvas_width_height(self):
'return the canvas width and height in display coords'
return self.width, self.height
def get_text_width_height_descent(self, s, prop, ismath):
"""
get the width and height in display coords of the string s
with FontPropertry prop
"""
if rcParams['text.usetex']:
texmanager = self.get_texmanager()
fontsize = prop.get_size_in_points()
l,b,r,t = texmanager.get_ps_bbox(s, fontsize)
w = (r-l)
h = (t-b)
# TODO: We need a way to get a good baseline from
# text.usetex
return w, h, 0
if ismath:
width, height, descent, pswriter, used_characters = \
self.mathtext_parser.parse(s, 72, prop)
return width, height, descent
if rcParams['ps.useafm']:
if ismath: s = s[1:-1]
font = self._get_font_afm(prop)
l,b,w,h,d = font.get_str_bbox_and_descent(s)
fontsize = prop.get_size_in_points()
scale = 0.001*fontsize
w *= scale
h *= scale
d *= scale
return w, h, d
font = self._get_font_ttf(prop)
font.set_text(s, 0.0, flags=LOAD_NO_HINTING)
w, h = font.get_width_height()
w /= 64.0 # convert from subpixels
h /= 64.0
d = font.get_descent()
d /= 64.0
#print s, w, h
return w, h, d
def flipy(self):
'return true if small y numbers are top for renderer'
return False
def _get_font_afm(self, prop):
key = hash(prop)
font = self.afmfontd.get(key)
if font is None:
fname = findfont(prop, fontext='afm')
font = self.afmfontd.get(fname)
if font is None:
font = AFM(file(findfont(prop, fontext='afm')))
self.afmfontd[fname] = font
self.afmfontd[key] = font
return font
def _get_font_ttf(self, prop):
key = hash(prop)
font = self.fontd.get(key)
if font is None:
fname = findfont(prop)
font = self.fontd.get(fname)
if font is None:
font = FT2Font(str(fname))
self.fontd[fname] = font
self.fontd[key] = font
font.clear()
size = prop.get_size_in_points()
font.set_size(size, 72.0)
return font
def _rgba(self, im):
return im.as_rgba_str()
def _rgb(self, im):
h,w,s = im.as_rgba_str()
rgba = npy.fromstring(s, npy.uint8)
rgba.shape = (h, w, 4)
rgb = rgba[:,:,:3]
return h, w, rgb.tostring()
def _gray(self, im, rc=0.3, gc=0.59, bc=0.11):
rgbat = im.as_rgba_str()
rgba = npy.fromstring(rgbat[2], npy.uint8)
rgba.shape = (rgbat[0], rgbat[1], 4)
rgba_f = rgba.astype(npy.float32)
r = rgba_f[:,:,0]
g = rgba_f[:,:,1]
b = rgba_f[:,:,2]
gray = (r*rc + g*gc + b*bc).astype(npy.uint8)
return rgbat[0], rgbat[1], gray.tostring()
def _hex_lines(self, s, chars_per_line=128):
s = binascii.b2a_hex(s)
nhex = len(s)
lines = []
for i in range(0,nhex,chars_per_line):
limit = min(i+chars_per_line, nhex)
lines.append(s[i:limit])
return lines
def get_image_magnification(self):
"""
Get the factor by which to magnify images passed to draw_image.
Allows a backend to have images at a different resolution to other
artists.
"""
return self.image_magnification
def draw_image(self, x, y, im, bbox, clippath=None, clippath_trans=None):
"""
Draw the Image instance into the current axes; x is the
distance in pixels from the left hand side of the canvas and y
is the distance from bottom
bbox is a matplotlib.transforms.BBox instance for clipping, or
None
"""
im.flipud_out()
if im.is_grayscale:
h, w, bits = self._gray(im)
imagecmd = "image"
else:
h, w, bits = self._rgb(im)
imagecmd = "false 3 colorimage"
hexlines = '\n'.join(self._hex_lines(bits))
xscale, yscale = (
w/self.image_magnification, h/self.image_magnification)
figh = self.height*72
#print 'values', origin, flipud, figh, h, y
clip = []
if bbox is not None:
clipx,clipy,clipw,cliph = bbox.bounds
clip.append('%s clipbox' % _nums_to_str(clipw, cliph, clipx, clipy))
if clippath is not None:
id = self._get_clip_path(clippath, clippath_trans)
clip.append('%s' % id)
clip = '\n'.join(clip)
#y = figh-(y+h)
ps = """gsave
%(clip)s
%(x)s %(y)s translate
%(xscale)s %(yscale)s scale
/DataString %(w)s string def
%(w)s %(h)s 8 [ %(w)s 0 0 -%(h)s 0 %(h)s ]
{
currentfile DataString readhexstring pop
} bind %(imagecmd)s
%(hexlines)s
grestore
""" % locals()
self._pswriter.write(ps)
# unflip
im.flipud_out()
def _convert_path(self, path, transform, simplify=None):
path = transform.transform_path(path)
ps = []
last_points = None
for points, code in path.iter_segments(simplify):
if code == Path.MOVETO:
ps.append("%g %g m" % tuple(points))
elif code == Path.LINETO:
ps.append("%g %g l" % tuple(points))
elif code == Path.CURVE3:
points = quad2cubic(*(list(last_points[-2:]) + list(points)))
ps.append("%g %g %g %g %g %g c" %
tuple(points[2:]))
elif code == Path.CURVE4:
ps.append("%g %g %g %g %g %g c" % tuple(points))
elif code == Path.CLOSEPOLY:
ps.append("cl")
last_points = points
ps = "\n".join(ps)
return ps
def _get_clip_path(self, clippath, clippath_transform):
id = self._clip_paths.get((clippath, clippath_transform))
if id is None:
id = 'c%x' % len(self._clip_paths)
ps_cmd = ['/%s {' % id]
ps_cmd.append(self._convert_path(clippath, clippath_transform))
ps_cmd.extend(['clip', 'newpath', '} bind def\n'])
self._pswriter.write('\n'.join(ps_cmd))
self._clip_paths[(clippath, clippath_transform)] = id
return id
def draw_path(self, gc, path, transform, rgbFace=None):
"""
Draws a Path instance using the given affine transform.
"""
ps = self._convert_path(path, transform, self.simplify)
self._draw_ps(ps, gc, rgbFace)
def draw_markers(self, gc, marker_path, marker_trans, path, trans, rgbFace=None):
"""
Draw the markers defined by path at each of the positions in x
and y. path coordinates are points, x and y coords will be
transformed by the transform
"""
if debugPS: self._pswriter.write('% draw_markers \n')
write = self._pswriter.write
if rgbFace:
if rgbFace[0]==rgbFace[1] and rgbFace[0]==rgbFace[2]:
ps_color = '%1.3f setgray' % rgbFace[0]
else:
ps_color = '%1.3f %1.3f %1.3f setrgbcolor' % rgbFace
# construct the generic marker command:
ps_cmd = ['/o {', 'gsave', 'newpath', 'translate'] # dont want the translate to be global
ps_cmd.append(self._convert_path(marker_path, marker_trans))
if rgbFace:
ps_cmd.extend(['gsave', ps_color, 'fill', 'grestore'])
ps_cmd.extend(['stroke', 'grestore', '} bind def'])
tpath = trans.transform_path(path)
for vertices, code in tpath.iter_segments():
if len(vertices):
x, y = vertices[-2:]
ps_cmd.append("%g %g o" % (x, y))
ps = '\n'.join(ps_cmd)
self._draw_ps(ps, gc, rgbFace, fill=False, stroke=False)
def draw_path_collection(self, master_transform, cliprect, clippath,
clippath_trans, paths, all_transforms, offsets,
offsetTrans, facecolors, edgecolors, linewidths,
linestyles, antialiaseds, urls):
write = self._pswriter.write
path_codes = []
for i, (path, transform) in enumerate(self._iter_collection_raw_paths(
master_transform, paths, all_transforms)):
name = 'p%x_%x' % (self._path_collection_id, i)
ps_cmd = ['/%s {' % name,
'newpath', 'translate']
ps_cmd.append(self._convert_path(path, transform))
ps_cmd.extend(['} bind def\n'])
write('\n'.join(ps_cmd))
path_codes.append(name)
for xo, yo, path_id, gc, rgbFace in self._iter_collection(
path_codes, cliprect, clippath, clippath_trans,
offsets, offsetTrans, facecolors, edgecolors,
linewidths, linestyles, antialiaseds, urls):
ps = "%g %g %s" % (xo, yo, path_id)
self._draw_ps(ps, gc, rgbFace)
self._path_collection_id += 1
def draw_tex(self, gc, x, y, s, prop, angle, ismath='TeX!'):
"""
draw a Text instance
"""
w, h, bl = self.get_text_width_height_descent(s, prop, ismath)
fontsize = prop.get_size_in_points()
corr = 0#w/2*(fontsize-10)/10
pos = _nums_to_str(x-corr, y)
thetext = 'psmarker%d' % self.textcnt
color = '%1.3f,%1.3f,%1.3f'% gc.get_rgb()[:3]
fontcmd = {'sans-serif' : r'{\sffamily %s}',
'monospace' : r'{\ttfamily %s}'}.get(
rcParams['font.family'], r'{\rmfamily %s}')
s = fontcmd % s
tex = r'\color[rgb]{%s} %s' % (color, s)
self.psfrag.append(r'\psfrag{%s}[bl][bl][1][%f]{\fontsize{%f}{%f}%s}'%(thetext, angle, fontsize, fontsize*1.25, tex))
ps = """\
gsave
%(pos)s moveto
(%(thetext)s)
show
grestore
""" % locals()
self._pswriter.write(ps)
self.textcnt += 1
def draw_text(self, gc, x, y, s, prop, angle, ismath):
"""
draw a Text instance
"""
# local to avoid repeated attribute lookups
write = self._pswriter.write
if debugPS:
write("% text\n")
if ismath=='TeX':
return self.tex(gc, x, y, s, prop, angle)
elif ismath:
return self.draw_mathtext(gc, x, y, s, prop, angle)
elif isinstance(s, unicode):
return self.draw_unicode(gc, x, y, s, prop, angle)
elif rcParams['ps.useafm']:
font = self._get_font_afm(prop)
l,b,w,h = font.get_str_bbox(s)
fontsize = prop.get_size_in_points()
l *= 0.001*fontsize
b *= 0.001*fontsize
w *= 0.001*fontsize
h *= 0.001*fontsize
if angle==90: l,b = -b, l # todo generalize for arb rotations
pos = _nums_to_str(x-l, y-b)
thetext = '(%s)' % s
fontname = font.get_fontname()
fontsize = prop.get_size_in_points()
rotate = '%1.1f rotate' % angle
setcolor = '%1.3f %1.3f %1.3f setrgbcolor' % gc.get_rgb()[:3]
#h = 0
ps = """\
gsave
/%(fontname)s findfont
%(fontsize)s scalefont
setfont
%(pos)s moveto
%(rotate)s
%(thetext)s
%(setcolor)s
show
grestore
""" % locals()
self._draw_ps(ps, gc, None)
else:
font = self._get_font_ttf(prop)
font.set_text(s, 0, flags=LOAD_NO_HINTING)
self.track_characters(font, s)
self.set_color(*gc.get_rgb())
self.set_font(font.get_sfnt()[(1,0,0,6)], prop.get_size_in_points())
write("%s m\n"%_nums_to_str(x,y))
if angle:
write("gsave\n")
write("%s rotate\n"%_num_to_str(angle))
descent = font.get_descent() / 64.0
if descent:
write("0 %s rmoveto\n"%_num_to_str(descent))
write("(%s) show\n"%quote_ps_string(s))
if angle:
write("grestore\n")
def new_gc(self):
return GraphicsContextPS()
def draw_unicode(self, gc, x, y, s, prop, angle):
"""draw a unicode string. ps doesn't have unicode support, so
we have to do this the hard way
"""
if rcParams['ps.useafm']:
self.set_color(*gc.get_rgb())
font = self._get_font_afm(prop)
fontname = font.get_fontname()
fontsize = prop.get_size_in_points()
scale = 0.001*fontsize
thisx = 0
thisy = font.get_str_bbox_and_descent(s)[4] * scale
last_name = None
lines = []
for c in s:
name = uni2type1.get(ord(c), 'question')
try:
width = font.get_width_from_char_name(name)
except KeyError:
name = 'question'
width = font.get_width_char('?')
if last_name is not None:
kern = font.get_kern_dist_from_name(last_name, name)
else:
kern = 0
last_name = name
thisx += kern * scale
lines.append('%f %f m /%s glyphshow'%(thisx, thisy, name))
thisx += width * scale
thetext = "\n".join(lines)
ps = """\
gsave
/%(fontname)s findfont
%(fontsize)s scalefont
setfont
%(x)f %(y)f translate
%(angle)f rotate
%(thetext)s
grestore
""" % locals()
self._pswriter.write(ps)
else:
font = self._get_font_ttf(prop)
font.set_text(s, 0, flags=LOAD_NO_HINTING)
self.track_characters(font, s)
self.set_color(*gc.get_rgb())
self.set_font(font.get_sfnt()[(1,0,0,6)], prop.get_size_in_points())
cmap = font.get_charmap()
lastgind = None
#print 'text', s
lines = []
thisx = 0
thisy = font.get_descent() / 64.0
for c in s:
ccode = ord(c)
gind = cmap.get(ccode)
if gind is None:
ccode = ord('?')
name = '.notdef'
gind = 0
else:
name = font.get_glyph_name(gind)
glyph = font.load_char(ccode, flags=LOAD_NO_HINTING)
if lastgind is not None:
kern = font.get_kerning(lastgind, gind, KERNING_DEFAULT)
else:
kern = 0
lastgind = gind
thisx += kern/64.0
lines.append('%f %f m /%s glyphshow'%(thisx, thisy, name))
thisx += glyph.linearHoriAdvance/65536.0
thetext = '\n'.join(lines)
ps = """gsave
%(x)f %(y)f translate
%(angle)f rotate
%(thetext)s
grestore
""" % locals()
self._pswriter.write(ps)
def draw_mathtext(self, gc,
x, y, s, prop, angle):
"""
Draw the math text using matplotlib.mathtext
"""
if debugPS:
self._pswriter.write("% mathtext\n")
width, height, descent, pswriter, used_characters = \
self.mathtext_parser.parse(s, 72, prop)
self.merge_used_characters(used_characters)
self.set_color(*gc.get_rgb())
thetext = pswriter.getvalue()
ps = """gsave
%(x)f %(y)f translate
%(angle)f rotate
%(thetext)s
grestore
""" % locals()
self._pswriter.write(ps)
def _draw_ps(self, ps, gc, rgbFace, fill=True, stroke=True, command=None):
"""
Emit the PostScript sniplet 'ps' with all the attributes from 'gc'
applied. 'ps' must consist of PostScript commands to construct a path.
The fill and/or stroke kwargs can be set to False if the
'ps' string already includes filling and/or stroking, in
which case _draw_ps is just supplying properties and
clipping.
"""
# local variable eliminates all repeated attribute lookups
write = self._pswriter.write
if debugPS and command:
write("% "+command+"\n")
mightstroke = (gc.get_linewidth() > 0.0 and
(len(gc.get_rgb()) <= 3 or gc.get_rgb()[3] != 0.0))
stroke = stroke and mightstroke
fill = (fill and rgbFace is not None and
(len(rgbFace) <= 3 or rgbFace[3] != 0.0))
if mightstroke:
self.set_linewidth(gc.get_linewidth())
jint = gc.get_joinstyle()
self.set_linejoin(jint)
cint = gc.get_capstyle()
self.set_linecap(cint)
self.set_linedash(*gc.get_dashes())
self.set_color(*gc.get_rgb()[:3])
write('gsave\n')
cliprect = gc.get_clip_rectangle()
if cliprect:
x,y,w,h=cliprect.bounds
write('%1.4g %1.4g %1.4g %1.4g clipbox\n' % (w,h,x,y))
clippath, clippath_trans = gc.get_clip_path()
if clippath:
id = self._get_clip_path(clippath, clippath_trans)
write('%s\n' % id)
# Jochen, is the strip necessary? - this could be a honking big string
write(ps.strip())
write("\n")
if fill:
if stroke:
write("gsave\n")
self.set_color(store=0, *rgbFace[:3])
write("fill\ngrestore\n")
else:
self.set_color(store=0, *rgbFace[:3])
write("fill\n")
hatch = gc.get_hatch()
if hatch:
self.set_hatch(hatch)
if stroke:
write("stroke\n")
write("grestore\n")
class GraphicsContextPS(GraphicsContextBase):
def get_capstyle(self):
return {'butt':0,
'round':1,
'projecting':2}[GraphicsContextBase.get_capstyle(self)]
def get_joinstyle(self):
return {'miter':0,
'round':1,
'bevel':2}[GraphicsContextBase.get_joinstyle(self)]
def new_figure_manager(num, *args, **kwargs):
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
canvas = FigureCanvasPS(thisFig)
manager = FigureManagerPS(canvas, num)
return manager
class FigureCanvasPS(FigureCanvasBase):
def draw(self):
pass
filetypes = {'ps' : 'Postscript',
'eps' : 'Encapsulated Postscript'}
def get_default_filetype(self):
return 'ps'
def print_ps(self, outfile, *args, **kwargs):
return self._print_ps(outfile, 'ps', *args, **kwargs)
def print_eps(self, outfile, *args, **kwargs):
return self._print_ps(outfile, 'eps', *args, **kwargs)
def _print_ps(self, outfile, format, *args, **kwargs):
papertype = kwargs.get("papertype", rcParams['ps.papersize'])
papertype = papertype.lower()
if papertype == 'auto':
pass
elif papertype not in papersize:
raise RuntimeError( '%s is not a valid papertype. Use one \
of %s'% (papertype, ', '.join( papersize.keys() )) )
orientation = kwargs.get("orientation", "portrait").lower()
if orientation == 'landscape': isLandscape = True
elif orientation == 'portrait': isLandscape = False
else: raise RuntimeError('Orientation must be "portrait" or "landscape"')
self.figure.set_dpi(72) # Override the dpi kwarg
imagedpi = kwargs.get("dpi", 72)
facecolor = kwargs.get("facecolor", "w")
edgecolor = kwargs.get("edgecolor", "w")
if rcParams['text.usetex']:
self._print_figure_tex(outfile, format, imagedpi, facecolor, edgecolor,
orientation, isLandscape, papertype)
else:
self._print_figure(outfile, format, imagedpi, facecolor, edgecolor,
orientation, isLandscape, papertype)
def _print_figure(self, outfile, format, dpi=72, facecolor='w', edgecolor='w',
orientation='portrait', isLandscape=False, papertype=None):
"""
Render the figure to hardcopy. Set the figure patch face and
edge colors. This is useful because some of the GUIs have a
gray figure face color background and you'll probably want to
override this on hardcopy
If outfile is a string, it is interpreted as a file name.
If the extension matches .ep* write encapsulated postscript,
otherwise write a stand-alone PostScript file.
If outfile is a file object, a stand-alone PostScript file is
written into this file object.
"""
isEPSF = format == 'eps'
passed_in_file_object = False
if is_string_like(outfile):
title = outfile
tmpfile = os.path.join(gettempdir(), md5(outfile).hexdigest())
elif is_writable_file_like(outfile):
title = None
tmpfile = os.path.join(gettempdir(), md5(str(hash(outfile))).hexdigest())
passed_in_file_object = True
else:
raise ValueError("outfile must be a path or a file-like object")
fh = file(tmpfile, 'w')
# find the appropriate papertype
width, height = self.figure.get_size_inches()
if papertype == 'auto':
if isLandscape: papertype = _get_papertype(height, width)
else: papertype = _get_papertype(width, height)
if isLandscape: paperHeight, paperWidth = papersize[papertype]
else: paperWidth, paperHeight = papersize[papertype]
if rcParams['ps.usedistiller'] and not papertype == 'auto':
# distillers will improperly clip eps files if the pagesize is
# too small
if width>paperWidth or height>paperHeight:
if isLandscape:
papertype = _get_papertype(height, width)
paperHeight, paperWidth = papersize[papertype]
else:
papertype = _get_papertype(width, height)
paperWidth, paperHeight = papersize[papertype]
# center the figure on the paper
xo = 72*0.5*(paperWidth - width)
yo = 72*0.5*(paperHeight - height)
l, b, w, h = self.figure.bbox.bounds
llx = xo
lly = yo
urx = llx + w
ury = lly + h
rotation = 0
if isLandscape:
llx, lly, urx, ury = lly, llx, ury, urx
xo, yo = 72*paperHeight - yo, xo
rotation = 90
bbox = (llx, lly, urx, ury)
# generate PostScript code for the figure and store it in a string
origfacecolor = self.figure.get_facecolor()
origedgecolor = self.figure.get_edgecolor()
self.figure.set_facecolor(facecolor)
self.figure.set_edgecolor(edgecolor)
self._pswriter = StringIO()
renderer = RendererPS(width, height, self._pswriter, imagedpi=dpi)
self.figure.draw(renderer)
self.figure.set_facecolor(origfacecolor)
self.figure.set_edgecolor(origedgecolor)
# write the PostScript headers
if isEPSF: print >>fh, "%!PS-Adobe-3.0 EPSF-3.0"
else: print >>fh, "%!PS-Adobe-3.0"
if title: print >>fh, "%%Title: "+title
print >>fh, ("%%Creator: matplotlib version "
+__version__+", http://matplotlib.sourceforge.net/")
print >>fh, "%%CreationDate: "+time.ctime(time.time())
print >>fh, "%%Orientation: " + orientation
if not isEPSF: print >>fh, "%%DocumentPaperSizes: "+papertype
print >>fh, "%%%%BoundingBox: %d %d %d %d" % bbox
if not isEPSF: print >>fh, "%%Pages: 1"
print >>fh, "%%EndComments"
Ndict = len(psDefs)
print >>fh, "%%BeginProlog"
if not rcParams['ps.useafm']:
Ndict += len(renderer.used_characters)
print >>fh, "/mpldict %d dict def"%Ndict
print >>fh, "mpldict begin"
for d in psDefs:
d=d.strip()
for l in d.split('\n'):
print >>fh, l.strip()
if not rcParams['ps.useafm']:
for font_filename, chars in renderer.used_characters.values():
if len(chars):
font = FT2Font(font_filename)
cmap = font.get_charmap()
glyph_ids = []
for c in chars:
gind = cmap.get(c) or 0
glyph_ids.append(gind)
# The ttf to ps (subsetting) support doesn't work for
# OpenType fonts that are Postscript inside (like the
# STIX fonts). This will simply turn that off to avoid
# errors.
if is_opentype_cff_font(font_filename):
raise RuntimeError("OpenType CFF fonts can not be saved using the internal Postscript backend at this time.\nConsider using the Cairo backend.")
else:
fonttype = rcParams['ps.fonttype']
convert_ttf_to_ps(font_filename, fh, rcParams['ps.fonttype'], glyph_ids)
print >>fh, "end"
print >>fh, "%%EndProlog"
if not isEPSF: print >>fh, "%%Page: 1 1"
print >>fh, "mpldict begin"
#print >>fh, "gsave"
print >>fh, "%s translate"%_nums_to_str(xo, yo)
if rotation: print >>fh, "%d rotate"%rotation
print >>fh, "%s clipbox"%_nums_to_str(width*72, height*72, 0, 0)
# write the figure
print >>fh, self._pswriter.getvalue()
# write the trailer
#print >>fh, "grestore"
print >>fh, "end"
print >>fh, "showpage"
if not isEPSF: print >>fh, "%%EOF"
fh.close()
if rcParams['ps.usedistiller'] == 'ghostscript':
gs_distill(tmpfile, isEPSF, ptype=papertype, bbox=bbox)
elif rcParams['ps.usedistiller'] == 'xpdf':
xpdf_distill(tmpfile, isEPSF, ptype=papertype, bbox=bbox)
if passed_in_file_object:
fh = file(tmpfile)
print >>outfile, fh.read()
else:
shutil.move(tmpfile, outfile)
def _print_figure_tex(self, outfile, format, dpi, facecolor, edgecolor,
orientation, isLandscape, papertype):
"""
If text.usetex is True in rc, a temporary pair of tex/eps files
are created to allow tex to manage the text layout via the PSFrags
package. These files are processed to yield the final ps or eps file.
"""
isEPSF = format == 'eps'
title = outfile
# write to a temp file, we'll move it to outfile when done
tmpfile = os.path.join(gettempdir(), md5(outfile).hexdigest())
fh = file(tmpfile, 'w')
self.figure.dpi = 72 # ignore the dpi kwarg
width, height = self.figure.get_size_inches()
xo = 0
yo = 0
l, b, w, h = self.figure.bbox.bounds
llx = xo
lly = yo
urx = llx + w
ury = lly + h
bbox = (llx, lly, urx, ury)
# generate PostScript code for the figure and store it in a string
origfacecolor = self.figure.get_facecolor()
origedgecolor = self.figure.get_edgecolor()
self.figure.set_facecolor(facecolor)
self.figure.set_edgecolor(edgecolor)
self._pswriter = StringIO()
renderer = RendererPS(width, height, self._pswriter, imagedpi=dpi)
self.figure.draw(renderer)
self.figure.set_facecolor(origfacecolor)
self.figure.set_edgecolor(origedgecolor)
# write the Encapsulated PostScript headers
print >>fh, "%!PS-Adobe-3.0 EPSF-3.0"
if title: print >>fh, "%%Title: "+title
print >>fh, ("%%Creator: matplotlib version "
+__version__+", http://matplotlib.sourceforge.net/")
print >>fh, "%%CreationDate: "+time.ctime(time.time())
print >>fh, "%%%%BoundingBox: %d %d %d %d" % bbox
print >>fh, "%%EndComments"
Ndict = len(psDefs)
print >>fh, "%%BeginProlog"
print >>fh, "/mpldict %d dict def"%Ndict
print >>fh, "mpldict begin"
for d in psDefs:
d=d.strip()
for l in d.split('\n'):
print >>fh, l.strip()
print >>fh, "end"
print >>fh, "%%EndProlog"
print >>fh, "mpldict begin"
#print >>fh, "gsave"
print >>fh, "%s translate"%_nums_to_str(xo, yo)
print >>fh, "%s clipbox"%_nums_to_str(width*72, height*72, 0, 0)
# write the figure
print >>fh, self._pswriter.getvalue()
# write the trailer
#print >>fh, "grestore"
print >>fh, "end"
print >>fh, "showpage"
fh.close()
if isLandscape: # now we are ready to rotate
isLandscape = True
width, height = height, width
bbox = (lly, llx, ury, urx)
temp_papertype = _get_papertype(width, height)
if papertype=='auto':
papertype = temp_papertype
paperWidth, paperHeight = papersize[temp_papertype]
else:
paperWidth, paperHeight = papersize[papertype]
if (width>paperWidth or height>paperHeight) and isEPSF:
paperWidth, paperHeight = papersize[temp_papertype]
verbose.report('Your figure is too big to fit on %s paper. %s \
paper will be used to prevent clipping.'%(papertype, temp_papertype), 'helpful')
texmanager = renderer.get_texmanager()
font_preamble = texmanager.get_font_preamble()
custom_preamble = texmanager.get_custom_preamble()
convert_psfrags(tmpfile, renderer.psfrag, font_preamble,
custom_preamble, paperWidth, paperHeight,
orientation)
if rcParams['ps.usedistiller'] == 'ghostscript':
gs_distill(tmpfile, isEPSF, ptype=papertype, bbox=bbox)
elif rcParams['ps.usedistiller'] == 'xpdf':
xpdf_distill(tmpfile, isEPSF, ptype=papertype, bbox=bbox)
elif rcParams['text.usetex']:
if False: pass # for debugging
else: gs_distill(tmpfile, isEPSF, ptype=papertype, bbox=bbox)
if isinstance(outfile, file):
fh = file(tmpfile)
print >>outfile, fh.read()
else: shutil.move(tmpfile, outfile)
def convert_psfrags(tmpfile, psfrags, font_preamble, custom_preamble,
paperWidth, paperHeight, orientation):
"""
When we want to use the LaTeX backend with postscript, we write PSFrag tags
to a temporary postscript file, each one marking a position for LaTeX to
render some text. convert_psfrags generates a LaTeX document containing the
commands to convert those tags to text. LaTeX/dvips produces the postscript
file that includes the actual text.
"""
tmpdir = os.path.split(tmpfile)[0]
epsfile = tmpfile+'.eps'
shutil.move(tmpfile, epsfile)
latexfile = tmpfile+'.tex'
outfile = tmpfile+'.output'
latexh = file(latexfile, 'w')
dvifile = tmpfile+'.dvi'
psfile = tmpfile+'.ps'
if orientation=='landscape': angle = 90
else: angle = 0
if rcParams['text.latex.unicode']:
unicode_preamble = """\usepackage{ucs}
\usepackage[utf8x]{inputenc}"""
else:
unicode_preamble = ''
s = r"""\documentclass{article}
%s
%s
%s
\usepackage[dvips, papersize={%sin,%sin}, body={%sin,%sin}, margin={0in,0in}]{geometry}
\usepackage{psfrag}
\usepackage[dvips]{graphicx}
\usepackage{color}
\pagestyle{empty}
\begin{document}
\begin{figure}
\centering
\leavevmode
%s
\includegraphics*[angle=%s]{%s}
\end{figure}
\end{document}
"""% (font_preamble, unicode_preamble, custom_preamble, paperWidth, paperHeight,
paperWidth, paperHeight,
'\n'.join(psfrags), angle, os.path.split(epsfile)[-1])
if rcParams['text.latex.unicode']:
latexh.write(s.encode('utf8'))
else:
try:
latexh.write(s)
except UnicodeEncodeError, err:
verbose.report("You are using unicode and latex, but have "
"not enabled the matplotlib 'text.latex.unicode' "
"rcParam.", 'helpful')
raise
latexh.close()
# the split drive part of the command is necessary for windows users with
# multiple
if sys.platform == 'win32': precmd = '%s &&'% os.path.splitdrive(tmpdir)[0]
else: precmd = ''
command = '%s cd "%s" && latex -interaction=nonstopmode "%s" > "%s"'\
%(precmd, tmpdir, latexfile, outfile)
verbose.report(command, 'debug')
exit_status = os.system(command)
fh = file(outfile)
if exit_status:
raise RuntimeError('LaTeX was not able to process your file:\
\nHere is the full report generated by LaTeX: \n\n%s'% fh.read())
else: verbose.report(fh.read(), 'debug')
fh.close()
os.remove(outfile)
command = '%s cd "%s" && dvips -q -R0 -o "%s" "%s" > "%s"'%(precmd, tmpdir,
os.path.split(psfile)[-1], os.path.split(dvifile)[-1], outfile)
verbose.report(command, 'debug')
exit_status = os.system(command)
fh = file(outfile)
if exit_status: raise RuntimeError('dvips was not able to \
process the following file:\n%s\nHere is the full report generated by dvips: \
\n\n'% dvifile + fh.read())
else: verbose.report(fh.read(), 'debug')
fh.close()
os.remove(outfile)
os.remove(epsfile)
shutil.move(psfile, tmpfile)
if not debugPS:
for fname in glob.glob(tmpfile+'.*'):
os.remove(fname)
def gs_distill(tmpfile, eps=False, ptype='letter', bbox=None):
"""
Use ghostscript's pswrite or epswrite device to distill a file.
This yields smaller files without illegal encapsulated postscript
operators. The output is low-level, converting text to outlines.
"""
paper = '-sPAPERSIZE=%s'% ptype
psfile = tmpfile + '.ps'
outfile = tmpfile + '.output'
dpi = rcParams['ps.distiller.res']
if sys.platform == 'win32': gs_exe = 'gswin32c'
else: gs_exe = 'gs'
command = '%s -dBATCH -dNOPAUSE -r%d -sDEVICE=pswrite %s -sOutputFile="%s" \
"%s" > "%s"'% (gs_exe, dpi, paper, psfile, tmpfile, outfile)
verbose.report(command, 'debug')
exit_status = os.system(command)
fh = file(outfile)
if exit_status: raise RuntimeError('ghostscript was not able to process \
your image.\nHere is the full report generated by ghostscript:\n\n' + fh.read())
else: verbose.report(fh.read(), 'debug')
fh.close()
os.remove(outfile)
os.remove(tmpfile)
shutil.move(psfile, tmpfile)
if eps:
pstoeps(tmpfile, bbox)
def xpdf_distill(tmpfile, eps=False, ptype='letter', bbox=None):
"""
Use ghostscript's ps2pdf and xpdf's/poppler's pdftops to distill a file.
This yields smaller files without illegal encapsulated postscript
operators. This distiller is preferred, generating high-level postscript
output that treats text as text.
"""
pdffile = tmpfile + '.pdf'
psfile = tmpfile + '.ps'
outfile = tmpfile + '.output'
command = 'ps2pdf -dAutoFilterColorImages=false \
-sColorImageFilter=FlateEncode -sPAPERSIZE=%s "%s" "%s" > "%s"'% \
(ptype, tmpfile, pdffile, outfile)
if sys.platform == 'win32': command = command.replace('=', '#')
verbose.report(command, 'debug')
exit_status = os.system(command)
fh = file(outfile)
if exit_status: raise RuntimeError('ps2pdf was not able to process your \
image.\n\Here is the report generated by ghostscript:\n\n' + fh.read())
else: verbose.report(fh.read(), 'debug')
fh.close()
os.remove(outfile)
command = 'pdftops -paper match -level2 "%s" "%s" > "%s"'% \
(pdffile, psfile, outfile)
verbose.report(command, 'debug')
exit_status = os.system(command)
fh = file(outfile)
if exit_status: raise RuntimeError('pdftops was not able to process your \
image.\nHere is the full report generated by pdftops: \n\n' + fh.read())
else: verbose.report(fh.read(), 'debug')
fh.close()
os.remove(outfile)
os.remove(tmpfile)
shutil.move(psfile, tmpfile)
if eps:
pstoeps(tmpfile, bbox)
for fname in glob.glob(tmpfile+'.*'):
os.remove(fname)
def get_bbox(tmpfile, bbox):
"""
Use ghostscript's bbox device to find the center of the bounding box. Return
an appropriately sized bbox centered around that point. A bit of a hack.
"""
outfile = tmpfile + '.output'
if sys.platform == 'win32': gs_exe = 'gswin32c'
else: gs_exe = 'gs'
command = '%s -dBATCH -dNOPAUSE -sDEVICE=bbox "%s"' %\
(gs_exe, tmpfile)
verbose.report(command, 'debug')
stdin, stdout, stderr = os.popen3(command)
verbose.report(stdout.read(), 'debug-annoying')
bbox_info = stderr.read()
verbose.report(bbox_info, 'helpful')
bbox_found = re.search('%%HiResBoundingBox: .*', bbox_info)
if bbox_found:
bbox_info = bbox_found.group()
else:
raise RuntimeError('Ghostscript was not able to extract a bounding box.\
Here is the Ghostscript output:\n\n%s'% bbox_info)
l, b, r, t = [float(i) for i in bbox_info.split()[-4:]]
# this is a hack to deal with the fact that ghostscript does not return the
# intended bbox, but a tight bbox. For now, we just center the ink in the
# intended bbox. This is not ideal, users may intend the ink to not be
# centered.
if bbox is None:
l, b, r, t = (l-1, b-1, r+1, t+1)
else:
x = (l+r)/2
y = (b+t)/2
dx = (bbox[2]-bbox[0])/2
dy = (bbox[3]-bbox[1])/2
l,b,r,t = (x-dx, y-dy, x+dx, y+dy)
bbox_info = '%%%%BoundingBox: %d %d %d %d' % (l, b, npy.ceil(r), npy.ceil(t))
hires_bbox_info = '%%%%HiResBoundingBox: %.6f %.6f %.6f %.6f' % (l, b, r, t)
return '\n'.join([bbox_info, hires_bbox_info])
def pstoeps(tmpfile, bbox):
"""
Convert the postscript to encapsulated postscript.
"""
bbox_info = get_bbox(tmpfile, bbox)
epsfile = tmpfile + '.eps'
epsh = file(epsfile, 'w')
tmph = file(tmpfile)
line = tmph.readline()
# Modify the header:
while line:
if line.startswith('%!PS'):
print >>epsh, "%!PS-Adobe-3.0 EPSF-3.0"
print >>epsh, bbox_info
elif line.startswith('%%EndComments'):
epsh.write(line)
print >>epsh, '%%BeginProlog'
print >>epsh, 'save'
print >>epsh, 'countdictstack'
print >>epsh, 'mark'
print >>epsh, 'newpath'
print >>epsh, '/showpage {} def'
print >>epsh, '/setpagedevice {pop} def'
print >>epsh, '%%EndProlog'
print >>epsh, '%%Page 1 1'
break
elif line.startswith('%%Bound') \
or line.startswith('%%HiResBound') \
or line.startswith('%%Pages'):
pass
else:
epsh.write(line)
line = tmph.readline()
# Now rewrite the rest of the file, and modify the trailer.
# This is done in a second loop such that the header of the embedded
# eps file is not modified.
line = tmph.readline()
while line:
if line.startswith('%%Trailer'):
print >>epsh, '%%Trailer'
print >>epsh, 'cleartomark'
print >>epsh, 'countdictstack'
print >>epsh, 'exch sub { end } repeat'
print >>epsh, 'restore'
if rcParams['ps.usedistiller'] == 'xpdf':
# remove extraneous "end" operator:
line = tmph.readline()
else:
epsh.write(line)
line = tmph.readline()
tmph.close()
epsh.close()
os.remove(tmpfile)
shutil.move(epsfile, tmpfile)
class FigureManagerPS(FigureManagerBase):
pass
FigureManager = FigureManagerPS
# The following Python dictionary psDefs contains the entries for the
# PostScript dictionary mpldict. This dictionary implements most of
# the matplotlib primitives and some abbreviations.
#
# References:
# http://www.adobe.com/products/postscript/pdfs/PLRM.pdf
# http://www.mactech.com/articles/mactech/Vol.09/09.04/PostscriptTutorial/
# http://www.math.ubc.ca/people/faculty/cass/graphics/text/www/
#
# The usage comments use the notation of the operator summary
# in the PostScript Language reference manual.
psDefs = [
# x y *m* -
"/m { moveto } bind def",
# x y *l* -
"/l { lineto } bind def",
# x y *r* -
"/r { rlineto } bind def",
# x1 y1 x2 y2 x y *c* -
"/c { curveto } bind def",
# *closepath* -
"/cl { closepath } bind def",
# w h x y *box* -
"""/box {
m
1 index 0 r
0 exch r
neg 0 r
cl
} bind def""",
# w h x y *clipbox* -
"""/clipbox {
box
clip
newpath
} bind def""",
]
| agpl-3.0 |
ryandougherty/mwa-capstone | MWA_Tools/build/matplotlib/lib/matplotlib/mathtext.py | 1 | 108907 | r"""
:mod:`~matplotlib.mathtext` is a module for parsing a subset of the
TeX math syntax and drawing them to a matplotlib backend.
For a tutorial of its usage see :ref:`mathtext-tutorial`. This
document is primarily concerned with implementation details.
The module uses pyparsing_ to parse the TeX expression.
.. _pyparsing: http://pyparsing.wikispaces.com/
The Bakoma distribution of the TeX Computer Modern fonts, and STIX
fonts are supported. There is experimental support for using
arbitrary fonts, but results may vary without proper tweaking and
metrics for those fonts.
If you find TeX expressions that don't parse or render properly,
please email [email protected], but please check KNOWN ISSUES below first.
"""
from __future__ import division
import os
from cStringIO import StringIO
from math import ceil
try:
set
except NameError:
from sets import Set as set
import unicodedata
from warnings import warn
from numpy import inf, isinf
import numpy as np
from matplotlib.pyparsing import Combine, Group, Optional, Forward, \
Literal, OneOrMore, ZeroOrMore, ParseException, Empty, \
ParseResults, Suppress, oneOf, StringEnd, ParseFatalException, \
FollowedBy, Regex, ParserElement
# Enable packrat parsing
ParserElement.enablePackrat()
from matplotlib.afm import AFM
from matplotlib.cbook import Bunch, get_realpath_and_stat, \
is_string_like, maxdict
from matplotlib.ft2font import FT2Font, FT2Image, KERNING_DEFAULT, LOAD_FORCE_AUTOHINT, LOAD_NO_HINTING
from matplotlib.font_manager import findfont, FontProperties
from matplotlib._mathtext_data import latex_to_bakoma, \
latex_to_standard, tex2uni, latex_to_cmex, stix_virtual_fonts
from matplotlib import get_data_path, rcParams
import matplotlib.colors as mcolors
import matplotlib._png as _png
####################
##############################################################################
# FONTS
def get_unicode_index(symbol):
"""get_unicode_index(symbol) -> integer
Return the integer index (from the Unicode table) of symbol. *symbol*
can be a single unicode character, a TeX command (i.e. r'\pi'), or a
Type1 symbol name (i.e. 'phi').
"""
# From UTF #25: U+2212 minus sign is the preferred
# representation of the unary and binary minus sign rather than
# the ASCII-derived U+002D hyphen-minus, because minus sign is
# unambiguous and because it is rendered with a more desirable
# length, usually longer than a hyphen.
if symbol == '-':
return 0x2212
try:# This will succeed if symbol is a single unicode char
return ord(symbol)
except TypeError:
pass
try:# Is symbol a TeX symbol (i.e. \alpha)
return tex2uni[symbol.strip("\\")]
except KeyError:
message = """'%(symbol)s' is not a valid Unicode character or
TeX/Type1 symbol"""%locals()
raise ValueError, message
def unichr_safe(index):
"""Return the Unicode character corresponding to the index,
or the replacement character if this is a narrow build of Python
and the requested character is outside the BMP."""
try:
return unichr(index)
except ValueError:
return unichr(0xFFFD)
class MathtextBackend(object):
"""
The base class for the mathtext backend-specific code. The
purpose of :class:`MathtextBackend` subclasses is to interface
between mathtext and a specific matplotlib graphics backend.
Subclasses need to override the following:
- :meth:`render_glyph`
- :meth:`render_filled_rect`
- :meth:`get_results`
And optionally, if you need to use a Freetype hinting style:
- :meth:`get_hinting_type`
"""
def __init__(self):
self.fonts_object = None
def set_canvas_size(self, w, h, d):
'Dimension the drawing canvas'
self.width = w
self.height = h
self.depth = d
def render_glyph(self, ox, oy, info):
"""
Draw a glyph described by *info* to the reference point (*ox*,
*oy*).
"""
raise NotImplementedError()
def render_filled_rect(self, x1, y1, x2, y2):
"""
Draw a filled black rectangle from (*x1*, *y1*) to (*x2*, *y2*).
"""
raise NotImplementedError()
def get_results(self, box):
"""
Return a backend-specific tuple to return to the backend after
all processing is done.
"""
raise NotImplementedError()
def get_hinting_type(self):
"""
Get the Freetype hinting type to use with this particular
backend.
"""
return LOAD_NO_HINTING
class MathtextBackendBbox(MathtextBackend):
"""
A backend whose only purpose is to get a precise bounding box.
Only required for the Agg backend.
"""
def __init__(self, real_backend):
MathtextBackend.__init__(self)
self.bbox = [0, 0, 0, 0]
self.real_backend = real_backend
def _update_bbox(self, x1, y1, x2, y2):
self.bbox = [min(self.bbox[0], x1),
min(self.bbox[1], y1),
max(self.bbox[2], x2),
max(self.bbox[3], y2)]
def render_glyph(self, ox, oy, info):
self._update_bbox(ox + info.metrics.xmin,
oy - info.metrics.ymax,
ox + info.metrics.xmax,
oy - info.metrics.ymin)
def render_rect_filled(self, x1, y1, x2, y2):
self._update_bbox(x1, y1, x2, y2)
def get_results(self, box):
orig_height = box.height
orig_depth = box.depth
ship(0, 0, box)
bbox = self.bbox
bbox = [bbox[0] - 1, bbox[1] - 1, bbox[2] + 1, bbox[3] + 1]
self._switch_to_real_backend()
self.fonts_object.set_canvas_size(
bbox[2] - bbox[0],
(bbox[3] - bbox[1]) - orig_depth,
(bbox[3] - bbox[1]) - orig_height)
ship(-bbox[0], -bbox[1], box)
return self.fonts_object.get_results(box)
def get_hinting_type(self):
return self.real_backend.get_hinting_type()
def _switch_to_real_backend(self):
self.fonts_object.mathtext_backend = self.real_backend
self.real_backend.fonts_object = self.fonts_object
self.real_backend.ox = self.bbox[0]
self.real_backend.oy = self.bbox[1]
class MathtextBackendAggRender(MathtextBackend):
"""
Render glyphs and rectangles to an FTImage buffer, which is later
transferred to the Agg image by the Agg backend.
"""
def __init__(self):
self.ox = 0
self.oy = 0
self.image = None
MathtextBackend.__init__(self)
def set_canvas_size(self, w, h, d):
MathtextBackend.set_canvas_size(self, w, h, d)
self.image = FT2Image(ceil(w), ceil(h + d))
def render_glyph(self, ox, oy, info):
info.font.draw_glyph_to_bitmap(
self.image, ox, oy - info.metrics.iceberg, info.glyph)
def render_rect_filled(self, x1, y1, x2, y2):
height = max(int(y2 - y1) - 1, 0)
if height == 0:
center = (y2 + y1) / 2.0
y = int(center - (height + 1) / 2.0)
else:
y = int(y1)
self.image.draw_rect_filled(int(x1), y, ceil(x2), y + height)
def get_results(self, box):
return (self.ox,
self.oy,
self.width,
self.height + self.depth,
self.depth,
self.image,
self.fonts_object.get_used_characters())
def get_hinting_type(self):
if rcParams['text.hinting']:
return LOAD_FORCE_AUTOHINT
else:
return LOAD_NO_HINTING
def MathtextBackendAgg():
return MathtextBackendBbox(MathtextBackendAggRender())
class MathtextBackendBitmapRender(MathtextBackendAggRender):
def get_results(self, box):
return self.image, self.depth
def MathtextBackendBitmap():
"""
A backend to generate standalone mathtext images. No additional
matplotlib backend is required.
"""
return MathtextBackendBbox(MathtextBackendBitmapRender())
class MathtextBackendPs(MathtextBackend):
"""
Store information to write a mathtext rendering to the PostScript
backend.
"""
def __init__(self):
self.pswriter = StringIO()
self.lastfont = None
def render_glyph(self, ox, oy, info):
oy = self.height - oy + info.offset
postscript_name = info.postscript_name
fontsize = info.fontsize
symbol_name = info.symbol_name
if (postscript_name, fontsize) != self.lastfont:
ps = """/%(postscript_name)s findfont
%(fontsize)s scalefont
setfont
""" % locals()
self.lastfont = postscript_name, fontsize
self.pswriter.write(ps)
ps = """%(ox)f %(oy)f moveto
/%(symbol_name)s glyphshow\n
""" % locals()
self.pswriter.write(ps)
def render_rect_filled(self, x1, y1, x2, y2):
ps = "%f %f %f %f rectfill\n" % (x1, self.height - y2, x2 - x1, y2 - y1)
self.pswriter.write(ps)
def get_results(self, box):
ship(0, -self.depth, box)
#print self.depth
return (self.width,
self.height + self.depth,
self.depth,
self.pswriter,
self.fonts_object.get_used_characters())
class MathtextBackendPdf(MathtextBackend):
"""
Store information to write a mathtext rendering to the PDF
backend.
"""
def __init__(self):
self.glyphs = []
self.rects = []
def render_glyph(self, ox, oy, info):
filename = info.font.fname
oy = self.height - oy + info.offset
self.glyphs.append(
(ox, oy, filename, info.fontsize,
info.num, info.symbol_name))
def render_rect_filled(self, x1, y1, x2, y2):
self.rects.append((x1, self.height - y2, x2 - x1, y2 - y1))
def get_results(self, box):
ship(0, -self.depth, box)
return (self.width,
self.height + self.depth,
self.depth,
self.glyphs,
self.rects,
self.fonts_object.get_used_characters())
class MathtextBackendSvg(MathtextBackend):
"""
Store information to write a mathtext rendering to the SVG
backend.
"""
def __init__(self):
self.svg_glyphs = []
self.svg_rects = []
def render_glyph(self, ox, oy, info):
oy = self.height - oy + info.offset
self.svg_glyphs.append(
(info.font, info.fontsize, info.num, ox, oy, info.metrics))
def render_rect_filled(self, x1, y1, x2, y2):
self.svg_rects.append(
(x1, self.height - y1 + 1, x2 - x1, y2 - y1))
def get_results(self, box):
ship(0, -self.depth, box)
svg_elements = Bunch(svg_glyphs = self.svg_glyphs,
svg_rects = self.svg_rects)
return (self.width,
self.height + self.depth,
self.depth,
svg_elements,
self.fonts_object.get_used_characters())
class MathtextBackendPath(MathtextBackend):
"""
Store information to write a mathtext rendering to the text path
machinery.
"""
def __init__(self):
self.glyphs = []
self.rects = []
def render_glyph(self, ox, oy, info):
oy = self.height - oy + info.offset
thetext = info.num
self.glyphs.append(
(info.font, info.fontsize, thetext, ox, oy))
def render_rect_filled(self, x1, y1, x2, y2):
self.rects.append(
(x1, self.height-y2 , x2 - x1, y2 - y1))
def get_results(self, box):
ship(0, -self.depth, box)
return (self.width,
self.height + self.depth,
self.depth,
self.glyphs,
self.rects)
class MathtextBackendCairo(MathtextBackend):
"""
Store information to write a mathtext rendering to the Cairo
backend.
"""
def __init__(self):
self.glyphs = []
self.rects = []
def render_glyph(self, ox, oy, info):
oy = oy - info.offset - self.height
thetext = unichr_safe(info.num)
self.glyphs.append(
(info.font, info.fontsize, thetext, ox, oy))
def render_rect_filled(self, x1, y1, x2, y2):
self.rects.append(
(x1, y1 - self.height, x2 - x1, y2 - y1))
def get_results(self, box):
ship(0, -self.depth, box)
return (self.width,
self.height + self.depth,
self.depth,
self.glyphs,
self.rects)
class Fonts(object):
"""
An abstract base class for a system of fonts to use for mathtext.
The class must be able to take symbol keys and font file names and
return the character metrics. It also delegates to a backend class
to do the actual drawing.
"""
def __init__(self, default_font_prop, mathtext_backend):
"""
*default_font_prop*: A
:class:`~matplotlib.font_manager.FontProperties` object to use
for the default non-math font, or the base font for Unicode
(generic) font rendering.
*mathtext_backend*: A subclass of :class:`MathTextBackend`
used to delegate the actual rendering.
"""
self.default_font_prop = default_font_prop
self.mathtext_backend = mathtext_backend
# Make these classes doubly-linked
self.mathtext_backend.fonts_object = self
self.used_characters = {}
def destroy(self):
"""
Fix any cyclical references before the object is about
to be destroyed.
"""
self.used_characters = None
def get_kern(self, font1, fontclass1, sym1, fontsize1,
font2, fontclass2, sym2, fontsize2, dpi):
"""
Get the kerning distance for font between *sym1* and *sym2*.
*fontX*: one of the TeX font names::
tt, it, rm, cal, sf, bf or default/regular (non-math)
*fontclassX*: TODO
*symX*: a symbol in raw TeX form. e.g. '1', 'x' or '\sigma'
*fontsizeX*: the fontsize in points
*dpi*: the current dots-per-inch
"""
return 0.
def get_metrics(self, font, font_class, sym, fontsize, dpi):
"""
*font*: one of the TeX font names::
tt, it, rm, cal, sf, bf or default/regular (non-math)
*font_class*: TODO
*sym*: a symbol in raw TeX form. e.g. '1', 'x' or '\sigma'
*fontsize*: font size in points
*dpi*: current dots-per-inch
Returns an object with the following attributes:
- *advance*: The advance distance (in points) of the glyph.
- *height*: The height of the glyph in points.
- *width*: The width of the glyph in points.
- *xmin*, *xmax*, *ymin*, *ymax* - the ink rectangle of the glyph
- *iceberg* - the distance from the baseline to the top of
the glyph. This corresponds to TeX's definition of
"height".
"""
info = self._get_info(font, font_class, sym, fontsize, dpi)
return info.metrics
def set_canvas_size(self, w, h, d):
"""
Set the size of the buffer used to render the math expression.
Only really necessary for the bitmap backends.
"""
self.width, self.height, self.depth = ceil(w), ceil(h), ceil(d)
self.mathtext_backend.set_canvas_size(self.width, self.height, self.depth)
def render_glyph(self, ox, oy, facename, font_class, sym, fontsize, dpi):
"""
Draw a glyph at
- *ox*, *oy*: position
- *facename*: One of the TeX face names
- *font_class*:
- *sym*: TeX symbol name or single character
- *fontsize*: fontsize in points
- *dpi*: The dpi to draw at.
"""
info = self._get_info(facename, font_class, sym, fontsize, dpi)
realpath, stat_key = get_realpath_and_stat(info.font.fname)
used_characters = self.used_characters.setdefault(
stat_key, (realpath, set()))
used_characters[1].add(info.num)
self.mathtext_backend.render_glyph(ox, oy, info)
def render_rect_filled(self, x1, y1, x2, y2):
"""
Draw a filled rectangle from (*x1*, *y1*) to (*x2*, *y2*).
"""
self.mathtext_backend.render_rect_filled(x1, y1, x2, y2)
def get_xheight(self, font, fontsize, dpi):
"""
Get the xheight for the given *font* and *fontsize*.
"""
raise NotImplementedError()
def get_underline_thickness(self, font, fontsize, dpi):
"""
Get the line thickness that matches the given font. Used as a
base unit for drawing lines such as in a fraction or radical.
"""
raise NotImplementedError()
def get_used_characters(self):
"""
Get the set of characters that were used in the math
expression. Used by backends that need to subset fonts so
they know which glyphs to include.
"""
return self.used_characters
def get_results(self, box):
"""
Get the data needed by the backend to render the math
expression. The return value is backend-specific.
"""
return self.mathtext_backend.get_results(box)
def get_sized_alternatives_for_symbol(self, fontname, sym):
"""
Override if your font provides multiple sizes of the same
symbol. Should return a list of symbols matching *sym* in
various sizes. The expression renderer will select the most
appropriate size for a given situation from this list.
"""
return [(fontname, sym)]
class TruetypeFonts(Fonts):
"""
A generic base class for all font setups that use Truetype fonts
(through FT2Font).
"""
class CachedFont:
def __init__(self, font):
self.font = font
self.charmap = font.get_charmap()
self.glyphmap = dict(
[(glyphind, ccode) for ccode, glyphind in self.charmap.iteritems()])
def __repr__(self):
return repr(self.font)
def __init__(self, default_font_prop, mathtext_backend):
Fonts.__init__(self, default_font_prop, mathtext_backend)
self.glyphd = {}
self._fonts = {}
filename = findfont(default_font_prop)
default_font = self.CachedFont(FT2Font(str(filename)))
self._fonts['default'] = default_font
self._fonts['regular'] = default_font
def destroy(self):
self.glyphd = None
Fonts.destroy(self)
def _get_font(self, font):
if font in self.fontmap:
basename = self.fontmap[font]
else:
basename = font
cached_font = self._fonts.get(basename)
if cached_font is None:
font = FT2Font(str(basename))
cached_font = self.CachedFont(font)
self._fonts[basename] = cached_font
self._fonts[font.postscript_name] = cached_font
self._fonts[font.postscript_name.lower()] = cached_font
return cached_font
def _get_offset(self, cached_font, glyph, fontsize, dpi):
if cached_font.font.postscript_name == 'Cmex10':
return ((glyph.height/64.0/2.0) + (fontsize/3.0 * dpi/72.0))
return 0.
def _get_info(self, fontname, font_class, sym, fontsize, dpi):
key = fontname, font_class, sym, fontsize, dpi
bunch = self.glyphd.get(key)
if bunch is not None:
return bunch
cached_font, num, symbol_name, fontsize, slanted = \
self._get_glyph(fontname, font_class, sym, fontsize)
font = cached_font.font
font.set_size(fontsize, dpi)
glyph = font.load_char(
num,
flags=self.mathtext_backend.get_hinting_type())
xmin, ymin, xmax, ymax = [val/64.0 for val in glyph.bbox]
offset = self._get_offset(cached_font, glyph, fontsize, dpi)
metrics = Bunch(
advance = glyph.linearHoriAdvance/65536.0,
height = glyph.height/64.0,
width = glyph.width/64.0,
xmin = xmin,
xmax = xmax,
ymin = ymin+offset,
ymax = ymax+offset,
# iceberg is the equivalent of TeX's "height"
iceberg = glyph.horiBearingY/64.0 + offset,
slanted = slanted
)
result = self.glyphd[key] = Bunch(
font = font,
fontsize = fontsize,
postscript_name = font.postscript_name,
metrics = metrics,
symbol_name = symbol_name,
num = num,
glyph = glyph,
offset = offset
)
return result
def get_xheight(self, font, fontsize, dpi):
cached_font = self._get_font(font)
cached_font.font.set_size(fontsize, dpi)
pclt = cached_font.font.get_sfnt_table('pclt')
if pclt is None:
# Some fonts don't store the xHeight, so we do a poor man's xHeight
metrics = self.get_metrics(font, rcParams['mathtext.default'], 'x', fontsize, dpi)
return metrics.iceberg
xHeight = (pclt['xHeight'] / 64.0) * (fontsize / 12.0) * (dpi / 100.0)
return xHeight
def get_underline_thickness(self, font, fontsize, dpi):
# This function used to grab underline thickness from the font
# metrics, but that information is just too un-reliable, so it
# is now hardcoded.
return ((0.75 / 12.0) * fontsize * dpi) / 72.0
def get_kern(self, font1, fontclass1, sym1, fontsize1,
font2, fontclass2, sym2, fontsize2, dpi):
if font1 == font2 and fontsize1 == fontsize2:
info1 = self._get_info(font1, fontclass1, sym1, fontsize1, dpi)
info2 = self._get_info(font2, fontclass2, sym2, fontsize2, dpi)
font = info1.font
return font.get_kerning(info1.num, info2.num, KERNING_DEFAULT) / 64.0
return Fonts.get_kern(self, font1, fontclass1, sym1, fontsize1,
font2, fontclass2, sym2, fontsize2, dpi)
class BakomaFonts(TruetypeFonts):
"""
Use the Bakoma TrueType fonts for rendering.
Symbols are strewn about a number of font files, each of which has
its own proprietary 8-bit encoding.
"""
_fontmap = { 'cal' : 'cmsy10',
'rm' : 'cmr10',
'tt' : 'cmtt10',
'it' : 'cmmi10',
'bf' : 'cmb10',
'sf' : 'cmss10',
'ex' : 'cmex10'
}
def __init__(self, *args, **kwargs):
self._stix_fallback = StixFonts(*args, **kwargs)
TruetypeFonts.__init__(self, *args, **kwargs)
self.fontmap = {}
for key, val in self._fontmap.iteritems():
fullpath = findfont(val)
self.fontmap[key] = fullpath
self.fontmap[val] = fullpath
_slanted_symbols = set(r"\int \oint".split())
def _get_glyph(self, fontname, font_class, sym, fontsize):
symbol_name = None
if fontname in self.fontmap and sym in latex_to_bakoma:
basename, num = latex_to_bakoma[sym]
slanted = (basename == "cmmi10") or sym in self._slanted_symbols
try:
cached_font = self._get_font(basename)
except RuntimeError:
pass
else:
symbol_name = cached_font.font.get_glyph_name(num)
num = cached_font.glyphmap[num]
elif len(sym) == 1:
slanted = (fontname == "it")
try:
cached_font = self._get_font(fontname)
except RuntimeError:
pass
else:
num = ord(sym)
gid = cached_font.charmap.get(num)
if gid is not None:
symbol_name = cached_font.font.get_glyph_name(
cached_font.charmap[num])
if symbol_name is None:
return self._stix_fallback._get_glyph(
fontname, font_class, sym, fontsize)
return cached_font, num, symbol_name, fontsize, slanted
# The Bakoma fonts contain many pre-sized alternatives for the
# delimiters. The AutoSizedChar class will use these alternatives
# and select the best (closest sized) glyph.
_size_alternatives = {
'(' : [('rm', '('), ('ex', '\xa1'), ('ex', '\xb3'),
('ex', '\xb5'), ('ex', '\xc3')],
')' : [('rm', ')'), ('ex', '\xa2'), ('ex', '\xb4'),
('ex', '\xb6'), ('ex', '\x21')],
'{' : [('cal', '{'), ('ex', '\xa9'), ('ex', '\x6e'),
('ex', '\xbd'), ('ex', '\x28')],
'}' : [('cal', '}'), ('ex', '\xaa'), ('ex', '\x6f'),
('ex', '\xbe'), ('ex', '\x29')],
# The fourth size of '[' is mysteriously missing from the BaKoMa
# font, so I've ommitted it for both '[' and ']'
'[' : [('rm', '['), ('ex', '\xa3'), ('ex', '\x68'),
('ex', '\x22')],
']' : [('rm', ']'), ('ex', '\xa4'), ('ex', '\x69'),
('ex', '\x23')],
r'\lfloor' : [('ex', '\xa5'), ('ex', '\x6a'),
('ex', '\xb9'), ('ex', '\x24')],
r'\rfloor' : [('ex', '\xa6'), ('ex', '\x6b'),
('ex', '\xba'), ('ex', '\x25')],
r'\lceil' : [('ex', '\xa7'), ('ex', '\x6c'),
('ex', '\xbb'), ('ex', '\x26')],
r'\rceil' : [('ex', '\xa8'), ('ex', '\x6d'),
('ex', '\xbc'), ('ex', '\x27')],
r'\langle' : [('ex', '\xad'), ('ex', '\x44'),
('ex', '\xbf'), ('ex', '\x2a')],
r'\rangle' : [('ex', '\xae'), ('ex', '\x45'),
('ex', '\xc0'), ('ex', '\x2b')],
r'\__sqrt__' : [('ex', '\x70'), ('ex', '\x71'),
('ex', '\x72'), ('ex', '\x73')],
r'\backslash': [('ex', '\xb2'), ('ex', '\x2f'),
('ex', '\xc2'), ('ex', '\x2d')],
r'/' : [('rm', '/'), ('ex', '\xb1'), ('ex', '\x2e'),
('ex', '\xcb'), ('ex', '\x2c')],
r'\widehat' : [('rm', '\x5e'), ('ex', '\x62'), ('ex', '\x63'),
('ex', '\x64')],
r'\widetilde': [('rm', '\x7e'), ('ex', '\x65'), ('ex', '\x66'),
('ex', '\x67')],
r'<' : [('cal', 'h'), ('ex', 'D')],
r'>' : [('cal', 'i'), ('ex', 'E')]
}
for alias, target in [('\leftparen', '('),
('\rightparent', ')'),
('\leftbrace', '{'),
('\rightbrace', '}'),
('\leftbracket', '['),
('\rightbracket', ']')]:
_size_alternatives[alias] = _size_alternatives[target]
def get_sized_alternatives_for_symbol(self, fontname, sym):
return self._size_alternatives.get(sym, [(fontname, sym)])
class UnicodeFonts(TruetypeFonts):
"""
An abstract base class for handling Unicode fonts.
While some reasonably complete Unicode fonts (such as DejaVu) may
work in some situations, the only Unicode font I'm aware of with a
complete set of math symbols is STIX.
This class will "fallback" on the Bakoma fonts when a required
symbol can not be found in the font.
"""
use_cmex = True
def __init__(self, *args, **kwargs):
# This must come first so the backend's owner is set correctly
if rcParams['mathtext.fallback_to_cm']:
self.cm_fallback = BakomaFonts(*args, **kwargs)
else:
self.cm_fallback = None
TruetypeFonts.__init__(self, *args, **kwargs)
self.fontmap = {}
for texfont in "cal rm tt it bf sf".split():
prop = rcParams['mathtext.' + texfont]
font = findfont(prop)
self.fontmap[texfont] = font
prop = FontProperties('cmex10')
font = findfont(prop)
self.fontmap['ex'] = font
_slanted_symbols = set(r"\int \oint".split())
def _map_virtual_font(self, fontname, font_class, uniindex):
return fontname, uniindex
def _get_glyph(self, fontname, font_class, sym, fontsize):
found_symbol = False
if self.use_cmex:
uniindex = latex_to_cmex.get(sym)
if uniindex is not None:
fontname = 'ex'
found_symbol = True
if not found_symbol:
try:
uniindex = get_unicode_index(sym)
found_symbol = True
except ValueError:
uniindex = ord('?')
warn("No TeX to unicode mapping for '%s'" %
sym.encode('ascii', 'backslashreplace'),
MathTextWarning)
fontname, uniindex = self._map_virtual_font(
fontname, font_class, uniindex)
new_fontname = fontname
# Only characters in the "Letter" class should be italicized in 'it'
# mode. Greek capital letters should be Roman.
if found_symbol:
if fontname == 'it':
if uniindex < 0x10000:
unistring = unichr(uniindex)
if (not unicodedata.category(unistring)[0] == "L"
or unicodedata.name(unistring).startswith("GREEK CAPITAL")):
new_fontname = 'rm'
slanted = (new_fontname == 'it') or sym in self._slanted_symbols
found_symbol = False
try:
cached_font = self._get_font(new_fontname)
except RuntimeError:
pass
else:
try:
glyphindex = cached_font.charmap[uniindex]
found_symbol = True
except KeyError:
pass
if not found_symbol:
if self.cm_fallback:
warn("Substituting with a symbol from Computer Modern.",
MathTextWarning)
return self.cm_fallback._get_glyph(
fontname, 'it', sym, fontsize)
else:
if fontname in ('it', 'regular') and isinstance(self, StixFonts):
return self._get_glyph('rm', font_class, sym, fontsize)
warn("Font '%s' does not have a glyph for '%s' [U%x]" %
(new_fontname, sym.encode('ascii', 'backslashreplace'), uniindex),
MathTextWarning)
warn("Substituting with a dummy symbol.", MathTextWarning)
fontname = 'rm'
new_fontname = fontname
cached_font = self._get_font(fontname)
uniindex = 0xA4 # currency character, for lack of anything better
glyphindex = cached_font.charmap[uniindex]
slanted = False
symbol_name = cached_font.font.get_glyph_name(glyphindex)
return cached_font, uniindex, symbol_name, fontsize, slanted
def get_sized_alternatives_for_symbol(self, fontname, sym):
if self.cm_fallback:
return self.cm_fallback.get_sized_alternatives_for_symbol(
fontname, sym)
return [(fontname, sym)]
class StixFonts(UnicodeFonts):
"""
A font handling class for the STIX fonts.
In addition to what UnicodeFonts provides, this class:
- supports "virtual fonts" which are complete alpha numeric
character sets with different font styles at special Unicode
code points, such as "Blackboard".
- handles sized alternative characters for the STIXSizeX fonts.
"""
_fontmap = { 'rm' : 'STIXGeneral',
'it' : 'STIXGeneral:italic',
'bf' : 'STIXGeneral:weight=bold',
'nonunirm' : 'STIXNonUnicode',
'nonuniit' : 'STIXNonUnicode:italic',
'nonunibf' : 'STIXNonUnicode:weight=bold',
0 : 'STIXGeneral',
1 : 'STIXSizeOneSym',
2 : 'STIXSizeTwoSym',
3 : 'STIXSizeThreeSym',
4 : 'STIXSizeFourSym',
5 : 'STIXSizeFiveSym'
}
use_cmex = False
cm_fallback = False
_sans = False
def __init__(self, *args, **kwargs):
TruetypeFonts.__init__(self, *args, **kwargs)
self.fontmap = {}
for key, name in self._fontmap.iteritems():
fullpath = findfont(name)
self.fontmap[key] = fullpath
self.fontmap[name] = fullpath
def _map_virtual_font(self, fontname, font_class, uniindex):
# Handle these "fonts" that are actually embedded in
# other fonts.
mapping = stix_virtual_fonts.get(fontname)
if (self._sans and mapping is None and
fontname not in ('regular', 'default')):
mapping = stix_virtual_fonts['sf']
doing_sans_conversion = True
else:
doing_sans_conversion = False
if mapping is not None:
if isinstance(mapping, dict):
mapping = mapping.get(font_class, 'rm')
# Binary search for the source glyph
lo = 0
hi = len(mapping)
while lo < hi:
mid = (lo+hi)//2
range = mapping[mid]
if uniindex < range[0]:
hi = mid
elif uniindex <= range[1]:
break
else:
lo = mid + 1
if uniindex >= range[0] and uniindex <= range[1]:
uniindex = uniindex - range[0] + range[3]
fontname = range[2]
elif not doing_sans_conversion:
# This will generate a dummy character
uniindex = 0x1
fontname = rcParams['mathtext.default']
# Handle private use area glyphs
if (fontname in ('it', 'rm', 'bf') and
uniindex >= 0xe000 and uniindex <= 0xf8ff):
fontname = 'nonuni' + fontname
return fontname, uniindex
_size_alternatives = {}
def get_sized_alternatives_for_symbol(self, fontname, sym):
alternatives = self._size_alternatives.get(sym)
if alternatives:
return alternatives
alternatives = []
try:
uniindex = get_unicode_index(sym)
except ValueError:
return [(fontname, sym)]
fix_ups = {
ord('<'): 0x27e8,
ord('>'): 0x27e9 }
uniindex = fix_ups.get(uniindex, uniindex)
for i in range(6):
cached_font = self._get_font(i)
glyphindex = cached_font.charmap.get(uniindex)
if glyphindex is not None:
alternatives.append((i, unichr_safe(uniindex)))
# The largest size of the radical symbol in STIX has incorrect
# metrics that cause it to be disconnected from the stem.
if sym == r'\__sqrt__':
alternatives = alternatives[:-1]
self._size_alternatives[sym] = alternatives
return alternatives
class StixSansFonts(StixFonts):
"""
A font handling class for the STIX fonts (that uses sans-serif
characters by default).
"""
_sans = True
class StandardPsFonts(Fonts):
"""
Use the standard postscript fonts for rendering to backend_ps
Unlike the other font classes, BakomaFont and UnicodeFont, this
one requires the Ps backend.
"""
basepath = os.path.join( get_data_path(), 'fonts', 'afm' )
fontmap = { 'cal' : 'pzcmi8a', # Zapf Chancery
'rm' : 'pncr8a', # New Century Schoolbook
'tt' : 'pcrr8a', # Courier
'it' : 'pncri8a', # New Century Schoolbook Italic
'sf' : 'phvr8a', # Helvetica
'bf' : 'pncb8a', # New Century Schoolbook Bold
None : 'psyr' # Symbol
}
def __init__(self, default_font_prop):
Fonts.__init__(self, default_font_prop, MathtextBackendPs())
self.glyphd = {}
self.fonts = {}
filename = findfont(default_font_prop, fontext='afm',
directory=self.basepath)
if filename is None:
filename = findfont('Helvetica', fontext='afm',
directory=self.basepath)
default_font = AFM(file(filename, 'r'))
default_font.fname = filename
self.fonts['default'] = default_font
self.fonts['regular'] = default_font
self.pswriter = StringIO()
def _get_font(self, font):
if font in self.fontmap:
basename = self.fontmap[font]
else:
basename = font
cached_font = self.fonts.get(basename)
if cached_font is None:
fname = os.path.join(self.basepath, basename + ".afm")
cached_font = AFM(file(fname, 'r'))
cached_font.fname = fname
self.fonts[basename] = cached_font
self.fonts[cached_font.get_fontname()] = cached_font
return cached_font
def _get_info (self, fontname, font_class, sym, fontsize, dpi):
'load the cmfont, metrics and glyph with caching'
key = fontname, sym, fontsize, dpi
tup = self.glyphd.get(key)
if tup is not None:
return tup
# Only characters in the "Letter" class should really be italicized.
# This class includes greek letters, so we're ok
if (fontname == 'it' and
(len(sym) > 1 or
not unicodedata.category(unicode(sym)).startswith("L"))):
fontname = 'rm'
found_symbol = False
if sym in latex_to_standard:
fontname, num = latex_to_standard[sym]
glyph = chr(num)
found_symbol = True
elif len(sym) == 1:
glyph = sym
num = ord(glyph)
found_symbol = True
else:
warn("No TeX to built-in Postscript mapping for '%s'" % sym,
MathTextWarning)
slanted = (fontname == 'it')
font = self._get_font(fontname)
if found_symbol:
try:
symbol_name = font.get_name_char(glyph)
except KeyError:
warn("No glyph in standard Postscript font '%s' for '%s'" %
(font.postscript_name, sym),
MathTextWarning)
found_symbol = False
if not found_symbol:
glyph = sym = '?'
num = ord(glyph)
symbol_name = font.get_name_char(glyph)
offset = 0
scale = 0.001 * fontsize
xmin, ymin, xmax, ymax = [val * scale
for val in font.get_bbox_char(glyph)]
metrics = Bunch(
advance = font.get_width_char(glyph) * scale,
width = font.get_width_char(glyph) * scale,
height = font.get_height_char(glyph) * scale,
xmin = xmin,
xmax = xmax,
ymin = ymin+offset,
ymax = ymax+offset,
# iceberg is the equivalent of TeX's "height"
iceberg = ymax + offset,
slanted = slanted
)
self.glyphd[key] = Bunch(
font = font,
fontsize = fontsize,
postscript_name = font.get_fontname(),
metrics = metrics,
symbol_name = symbol_name,
num = num,
glyph = glyph,
offset = offset
)
return self.glyphd[key]
def get_kern(self, font1, fontclass1, sym1, fontsize1,
font2, fontclass2, sym2, fontsize2, dpi):
if font1 == font2 and fontsize1 == fontsize2:
info1 = self._get_info(font1, fontclass1, sym1, fontsize1, dpi)
info2 = self._get_info(font2, fontclass2, sym2, fontsize2, dpi)
font = info1.font
return (font.get_kern_dist(info1.glyph, info2.glyph)
* 0.001 * fontsize1)
return Fonts.get_kern(self, font1, fontclass1, sym1, fontsize1,
font2, fontclass2, sym2, fontsize2, dpi)
def get_xheight(self, font, fontsize, dpi):
cached_font = self._get_font(font)
return cached_font.get_xheight() * 0.001 * fontsize
def get_underline_thickness(self, font, fontsize, dpi):
cached_font = self._get_font(font)
return cached_font.get_underline_thickness() * 0.001 * fontsize
##############################################################################
# TeX-LIKE BOX MODEL
# The following is based directly on the document 'woven' from the
# TeX82 source code. This information is also available in printed
# form:
#
# Knuth, Donald E.. 1986. Computers and Typesetting, Volume B:
# TeX: The Program. Addison-Wesley Professional.
#
# The most relevant "chapters" are:
# Data structures for boxes and their friends
# Shipping pages out (Ship class)
# Packaging (hpack and vpack)
# Data structures for math mode
# Subroutines for math mode
# Typesetting math formulas
#
# Many of the docstrings below refer to a numbered "node" in that
# book, e.g. node123
#
# Note that (as TeX) y increases downward, unlike many other parts of
# matplotlib.
# How much text shrinks when going to the next-smallest level. GROW_FACTOR
# must be the inverse of SHRINK_FACTOR.
SHRINK_FACTOR = 0.7
GROW_FACTOR = 1.0 / SHRINK_FACTOR
# The number of different sizes of chars to use, beyond which they will not
# get any smaller
NUM_SIZE_LEVELS = 6
# Percentage of x-height of additional horiz. space after sub/superscripts
SCRIPT_SPACE = 0.2
# Percentage of x-height that sub/superscripts drop below the baseline
SUBDROP = 0.3
# Percentage of x-height that superscripts drop below the baseline
SUP1 = 0.5
# Percentage of x-height that subscripts drop below the baseline
SUB1 = 0.0
# Percentage of x-height that superscripts are offset relative to the subscript
DELTA = 0.18
class MathTextWarning(Warning):
pass
class Node(object):
"""
A node in the TeX box model
"""
def __init__(self):
self.size = 0
def __repr__(self):
return self.__internal_repr__()
def __internal_repr__(self):
return self.__class__.__name__
def get_kerning(self, next):
return 0.0
def shrink(self):
"""
Shrinks one level smaller. There are only three levels of
sizes, after which things will no longer get smaller.
"""
self.size += 1
def grow(self):
"""
Grows one level larger. There is no limit to how big
something can get.
"""
self.size -= 1
def render(self, x, y):
pass
class Box(Node):
"""
Represents any node with a physical location.
"""
def __init__(self, width, height, depth):
Node.__init__(self)
self.width = width
self.height = height
self.depth = depth
def shrink(self):
Node.shrink(self)
if self.size < NUM_SIZE_LEVELS:
self.width *= SHRINK_FACTOR
self.height *= SHRINK_FACTOR
self.depth *= SHRINK_FACTOR
def grow(self):
Node.grow(self)
self.width *= GROW_FACTOR
self.height *= GROW_FACTOR
self.depth *= GROW_FACTOR
def render(self, x1, y1, x2, y2):
pass
class Vbox(Box):
"""
A box with only height (zero width).
"""
def __init__(self, height, depth):
Box.__init__(self, 0., height, depth)
class Hbox(Box):
"""
A box with only width (zero height and depth).
"""
def __init__(self, width):
Box.__init__(self, width, 0., 0.)
class Char(Node):
"""
Represents a single character. Unlike TeX, the font information
and metrics are stored with each :class:`Char` to make it easier
to lookup the font metrics when needed. Note that TeX boxes have
a width, height, and depth, unlike Type1 and Truetype which use a
full bounding box and an advance in the x-direction. The metrics
must be converted to the TeX way, and the advance (if different
from width) must be converted into a :class:`Kern` node when the
:class:`Char` is added to its parent :class:`Hlist`.
"""
def __init__(self, c, state):
Node.__init__(self)
self.c = c
self.font_output = state.font_output
assert isinstance(state.font, (str, unicode, int))
self.font = state.font
self.font_class = state.font_class
self.fontsize = state.fontsize
self.dpi = state.dpi
# The real width, height and depth will be set during the
# pack phase, after we know the real fontsize
self._update_metrics()
def __internal_repr__(self):
return '`%s`' % self.c
def _update_metrics(self):
metrics = self._metrics = self.font_output.get_metrics(
self.font, self.font_class, self.c, self.fontsize, self.dpi)
if self.c == ' ':
self.width = metrics.advance
else:
self.width = metrics.width
self.height = metrics.iceberg
self.depth = -(metrics.iceberg - metrics.height)
def is_slanted(self):
return self._metrics.slanted
def get_kerning(self, next):
"""
Return the amount of kerning between this and the given
character. Called when characters are strung together into
:class:`Hlist` to create :class:`Kern` nodes.
"""
advance = self._metrics.advance - self.width
kern = 0.
if isinstance(next, Char):
kern = self.font_output.get_kern(
self.font, self.font_class, self.c, self.fontsize,
next.font, next.font_class, next.c, next.fontsize,
self.dpi)
return advance + kern
def render(self, x, y):
"""
Render the character to the canvas
"""
self.font_output.render_glyph(
x, y,
self.font, self.font_class, self.c, self.fontsize, self.dpi)
def shrink(self):
Node.shrink(self)
if self.size < NUM_SIZE_LEVELS:
self.fontsize *= SHRINK_FACTOR
self.width *= SHRINK_FACTOR
self.height *= SHRINK_FACTOR
self.depth *= SHRINK_FACTOR
def grow(self):
Node.grow(self)
self.fontsize *= GROW_FACTOR
self.width *= GROW_FACTOR
self.height *= GROW_FACTOR
self.depth *= GROW_FACTOR
class Accent(Char):
"""
The font metrics need to be dealt with differently for accents,
since they are already offset correctly from the baseline in
TrueType fonts.
"""
def _update_metrics(self):
metrics = self._metrics = self.font_output.get_metrics(
self.font, self.font_class, self.c, self.fontsize, self.dpi)
self.width = metrics.xmax - metrics.xmin
self.height = metrics.ymax - metrics.ymin
self.depth = 0
def shrink(self):
Char.shrink(self)
self._update_metrics()
def grow(self):
Char.grow(self)
self._update_metrics()
def render(self, x, y):
"""
Render the character to the canvas.
"""
self.font_output.render_glyph(
x - self._metrics.xmin, y + self._metrics.ymin,
self.font, self.font_class, self.c, self.fontsize, self.dpi)
class List(Box):
"""
A list of nodes (either horizontal or vertical).
"""
def __init__(self, elements):
Box.__init__(self, 0., 0., 0.)
self.shift_amount = 0. # An arbitrary offset
self.children = elements # The child nodes of this list
# The following parameters are set in the vpack and hpack functions
self.glue_set = 0. # The glue setting of this list
self.glue_sign = 0 # 0: normal, -1: shrinking, 1: stretching
self.glue_order = 0 # The order of infinity (0 - 3) for the glue
def __repr__(self):
return '[%s <%.02f %.02f %.02f %.02f> %s]' % (
self.__internal_repr__(),
self.width, self.height,
self.depth, self.shift_amount,
' '.join([repr(x) for x in self.children]))
def _determine_order(self, totals):
"""
A helper function to determine the highest order of glue
used by the members of this list. Used by vpack and hpack.
"""
o = 0
for i in range(len(totals) - 1, 0, -1):
if totals[i] != 0.0:
o = i
break
return o
def _set_glue(self, x, sign, totals, error_type):
o = self._determine_order(totals)
self.glue_order = o
self.glue_sign = sign
if totals[o] != 0.:
self.glue_set = x / totals[o]
else:
self.glue_sign = 0
self.glue_ratio = 0.
if o == 0:
if len(self.children):
warn("%s %s: %r" % (error_type, self.__class__.__name__, self),
MathTextWarning)
def shrink(self):
for child in self.children:
child.shrink()
Box.shrink(self)
if self.size < NUM_SIZE_LEVELS:
self.shift_amount *= SHRINK_FACTOR
self.glue_set *= SHRINK_FACTOR
def grow(self):
for child in self.children:
child.grow()
Box.grow(self)
self.shift_amount *= GROW_FACTOR
self.glue_set *= GROW_FACTOR
class Hlist(List):
"""
A horizontal list of boxes.
"""
def __init__(self, elements, w=0., m='additional', do_kern=True):
List.__init__(self, elements)
if do_kern:
self.kern()
self.hpack()
def kern(self):
"""
Insert :class:`Kern` nodes between :class:`Char` nodes to set
kerning. The :class:`Char` nodes themselves determine the
amount of kerning they need (in :meth:`~Char.get_kerning`),
and this function just creates the linked list in the correct
way.
"""
new_children = []
num_children = len(self.children)
if num_children:
for i in range(num_children):
elem = self.children[i]
if i < num_children - 1:
next = self.children[i + 1]
else:
next = None
new_children.append(elem)
kerning_distance = elem.get_kerning(next)
if kerning_distance != 0.:
kern = Kern(kerning_distance)
new_children.append(kern)
self.children = new_children
# This is a failed experiment to fake cross-font kerning.
# def get_kerning(self, next):
# if len(self.children) >= 2 and isinstance(self.children[-2], Char):
# if isinstance(next, Char):
# print "CASE A"
# return self.children[-2].get_kerning(next)
# elif isinstance(next, Hlist) and len(next.children) and isinstance(next.children[0], Char):
# print "CASE B"
# result = self.children[-2].get_kerning(next.children[0])
# print result
# return result
# return 0.0
def hpack(self, w=0., m='additional'):
"""
The main duty of :meth:`hpack` is to compute the dimensions of
the resulting boxes, and to adjust the glue if one of those
dimensions is pre-specified. The computed sizes normally
enclose all of the material inside the new box; but some items
may stick out if negative glue is used, if the box is
overfull, or if a ``\\vbox`` includes other boxes that have
been shifted left.
- *w*: specifies a width
- *m*: is either 'exactly' or 'additional'.
Thus, ``hpack(w, 'exactly')`` produces a box whose width is
exactly *w*, while ``hpack(w, 'additional')`` yields a box
whose width is the natural width plus *w*. The default values
produce a box with the natural width.
"""
# I don't know why these get reset in TeX. Shift_amount is pretty
# much useless if we do.
#self.shift_amount = 0.
h = 0.
d = 0.
x = 0.
total_stretch = [0.] * 4
total_shrink = [0.] * 4
for p in self.children:
if isinstance(p, Char):
x += p.width
h = max(h, p.height)
d = max(d, p.depth)
elif isinstance(p, Box):
x += p.width
if not isinf(p.height) and not isinf(p.depth):
s = getattr(p, 'shift_amount', 0.)
h = max(h, p.height - s)
d = max(d, p.depth + s)
elif isinstance(p, Glue):
glue_spec = p.glue_spec
x += glue_spec.width
total_stretch[glue_spec.stretch_order] += glue_spec.stretch
total_shrink[glue_spec.shrink_order] += glue_spec.shrink
elif isinstance(p, Kern):
x += p.width
self.height = h
self.depth = d
if m == 'additional':
w += x
self.width = w
x = w - x
if x == 0.:
self.glue_sign = 0
self.glue_order = 0
self.glue_ratio = 0.
return
if x > 0.:
self._set_glue(x, 1, total_stretch, "Overfull")
else:
self._set_glue(x, -1, total_shrink, "Underfull")
class Vlist(List):
"""
A vertical list of boxes.
"""
def __init__(self, elements, h=0., m='additional'):
List.__init__(self, elements)
self.vpack()
def vpack(self, h=0., m='additional', l=float(inf)):
"""
The main duty of :meth:`vpack` is to compute the dimensions of
the resulting boxes, and to adjust the glue if one of those
dimensions is pre-specified.
- *h*: specifies a height
- *m*: is either 'exactly' or 'additional'.
- *l*: a maximum height
Thus, ``vpack(h, 'exactly')`` produces a box whose height is
exactly *h*, while ``vpack(h, 'additional')`` yields a box
whose height is the natural height plus *h*. The default
values produce a box with the natural width.
"""
# I don't know why these get reset in TeX. Shift_amount is pretty
# much useless if we do.
# self.shift_amount = 0.
w = 0.
d = 0.
x = 0.
total_stretch = [0.] * 4
total_shrink = [0.] * 4
for p in self.children:
if isinstance(p, Box):
x += d + p.height
d = p.depth
if not isinf(p.width):
s = getattr(p, 'shift_amount', 0.)
w = max(w, p.width + s)
elif isinstance(p, Glue):
x += d
d = 0.
glue_spec = p.glue_spec
x += glue_spec.width
total_stretch[glue_spec.stretch_order] += glue_spec.stretch
total_shrink[glue_spec.shrink_order] += glue_spec.shrink
elif isinstance(p, Kern):
x += d + p.width
d = 0.
elif isinstance(p, Char):
raise RuntimeError("Internal mathtext error: Char node found in Vlist.")
self.width = w
if d > l:
x += d - l
self.depth = l
else:
self.depth = d
if m == 'additional':
h += x
self.height = h
x = h - x
if x == 0:
self.glue_sign = 0
self.glue_order = 0
self.glue_ratio = 0.
return
if x > 0.:
self._set_glue(x, 1, total_stretch, "Overfull")
else:
self._set_glue(x, -1, total_shrink, "Underfull")
class Rule(Box):
"""
A :class:`Rule` node stands for a solid black rectangle; it has
*width*, *depth*, and *height* fields just as in an
:class:`Hlist`. However, if any of these dimensions is inf, the
actual value will be determined by running the rule up to the
boundary of the innermost enclosing box. This is called a "running
dimension." The width is never running in an :class:`Hlist`; the
height and depth are never running in a :class:`Vlist`.
"""
def __init__(self, width, height, depth, state):
Box.__init__(self, width, height, depth)
self.font_output = state.font_output
def render(self, x, y, w, h):
self.font_output.render_rect_filled(x, y, x + w, y + h)
class Hrule(Rule):
"""
Convenience class to create a horizontal rule.
"""
def __init__(self, state, thickness=None):
if thickness is None:
thickness = state.font_output.get_underline_thickness(
state.font, state.fontsize, state.dpi)
height = depth = thickness * 0.5
Rule.__init__(self, inf, height, depth, state)
class Vrule(Rule):
"""
Convenience class to create a vertical rule.
"""
def __init__(self, state):
thickness = state.font_output.get_underline_thickness(
state.font, state.fontsize, state.dpi)
Rule.__init__(self, thickness, inf, inf, state)
class Glue(Node):
"""
Most of the information in this object is stored in the underlying
:class:`GlueSpec` class, which is shared between multiple glue objects. (This
is a memory optimization which probably doesn't matter anymore, but it's
easier to stick to what TeX does.)
"""
def __init__(self, glue_type, copy=False):
Node.__init__(self)
self.glue_subtype = 'normal'
if is_string_like(glue_type):
glue_spec = GlueSpec.factory(glue_type)
elif isinstance(glue_type, GlueSpec):
glue_spec = glue_type
else:
raise ArgumentError("glue_type must be a glue spec name or instance.")
if copy:
glue_spec = glue_spec.copy()
self.glue_spec = glue_spec
def shrink(self):
Node.shrink(self)
if self.size < NUM_SIZE_LEVELS:
if self.glue_spec.width != 0.:
self.glue_spec = self.glue_spec.copy()
self.glue_spec.width *= SHRINK_FACTOR
def grow(self):
Node.grow(self)
if self.glue_spec.width != 0.:
self.glue_spec = self.glue_spec.copy()
self.glue_spec.width *= GROW_FACTOR
class GlueSpec(object):
"""
See :class:`Glue`.
"""
def __init__(self, width=0., stretch=0., stretch_order=0, shrink=0., shrink_order=0):
self.width = width
self.stretch = stretch
self.stretch_order = stretch_order
self.shrink = shrink
self.shrink_order = shrink_order
def copy(self):
return GlueSpec(
self.width,
self.stretch,
self.stretch_order,
self.shrink,
self.shrink_order)
def factory(cls, glue_type):
return cls._types[glue_type]
factory = classmethod(factory)
GlueSpec._types = {
'fil': GlueSpec(0., 1., 1, 0., 0),
'fill': GlueSpec(0., 1., 2, 0., 0),
'filll': GlueSpec(0., 1., 3, 0., 0),
'neg_fil': GlueSpec(0., 0., 0, 1., 1),
'neg_fill': GlueSpec(0., 0., 0, 1., 2),
'neg_filll': GlueSpec(0., 0., 0, 1., 3),
'empty': GlueSpec(0., 0., 0, 0., 0),
'ss': GlueSpec(0., 1., 1, -1., 1)
}
# Some convenient ways to get common kinds of glue
class Fil(Glue):
def __init__(self):
Glue.__init__(self, 'fil')
class Fill(Glue):
def __init__(self):
Glue.__init__(self, 'fill')
class Filll(Glue):
def __init__(self):
Glue.__init__(self, 'filll')
class NegFil(Glue):
def __init__(self):
Glue.__init__(self, 'neg_fil')
class NegFill(Glue):
def __init__(self):
Glue.__init__(self, 'neg_fill')
class NegFilll(Glue):
def __init__(self):
Glue.__init__(self, 'neg_filll')
class SsGlue(Glue):
def __init__(self):
Glue.__init__(self, 'ss')
class HCentered(Hlist):
"""
A convenience class to create an :class:`Hlist` whose contents are
centered within its enclosing box.
"""
def __init__(self, elements):
Hlist.__init__(self, [SsGlue()] + elements + [SsGlue()],
do_kern=False)
class VCentered(Hlist):
"""
A convenience class to create a :class:`Vlist` whose contents are
centered within its enclosing box.
"""
def __init__(self, elements):
Vlist.__init__(self, [SsGlue()] + elements + [SsGlue()])
class Kern(Node):
"""
A :class:`Kern` node has a width field to specify a (normally
negative) amount of spacing. This spacing correction appears in
horizontal lists between letters like A and V when the font
designer said that it looks better to move them closer together or
further apart. A kern node can also appear in a vertical list,
when its *width* denotes additional spacing in the vertical
direction.
"""
height = 0
depth = 0
def __init__(self, width):
Node.__init__(self)
self.width = width
def __repr__(self):
return "k%.02f" % self.width
def shrink(self):
Node.shrink(self)
if self.size < NUM_SIZE_LEVELS:
self.width *= SHRINK_FACTOR
def grow(self):
Node.grow(self)
self.width *= GROW_FACTOR
class SubSuperCluster(Hlist):
"""
:class:`SubSuperCluster` is a sort of hack to get around that fact
that this code do a two-pass parse like TeX. This lets us store
enough information in the hlist itself, namely the nucleus, sub-
and super-script, such that if another script follows that needs
to be attached, it can be reconfigured on the fly.
"""
def __init__(self):
self.nucleus = None
self.sub = None
self.super = None
Hlist.__init__(self, [])
class AutoHeightChar(Hlist):
"""
:class:`AutoHeightChar` will create a character as close to the
given height and depth as possible. When using a font with
multiple height versions of some characters (such as the BaKoMa
fonts), the correct glyph will be selected, otherwise this will
always just return a scaled version of the glyph.
"""
def __init__(self, c, height, depth, state, always=False):
alternatives = state.font_output.get_sized_alternatives_for_symbol(
state.font, c)
state = state.copy()
target_total = height + depth
for fontname, sym in alternatives:
state.font = fontname
char = Char(sym, state)
if char.height + char.depth >= target_total:
break
factor = target_total / (char.height + char.depth)
state.fontsize *= factor
char = Char(sym, state)
shift = (depth - char.depth)
Hlist.__init__(self, [char])
self.shift_amount = shift
class AutoWidthChar(Hlist):
"""
:class:`AutoWidthChar` will create a character as close to the
given width as possible. When using a font with multiple width
versions of some characters (such as the BaKoMa fonts), the
correct glyph will be selected, otherwise this will always just
return a scaled version of the glyph.
"""
def __init__(self, c, width, state, always=False, char_class=Char):
alternatives = state.font_output.get_sized_alternatives_for_symbol(
state.font, c)
state = state.copy()
for fontname, sym in alternatives:
state.font = fontname
char = char_class(sym, state)
if char.width >= width:
break
factor = width / char.width
state.fontsize *= factor
char = char_class(sym, state)
Hlist.__init__(self, [char])
self.width = char.width
class Ship(object):
"""
Once the boxes have been set up, this sends them to output. Since
boxes can be inside of boxes inside of boxes, the main work of
:class:`Ship` is done by two mutually recursive routines,
:meth:`hlist_out` and :meth:`vlist_out`, which traverse the
:class:`Hlist` nodes and :class:`Vlist` nodes inside of horizontal
and vertical boxes. The global variables used in TeX to store
state as it processes have become member variables here.
"""
def __call__(self, ox, oy, box):
self.max_push = 0 # Deepest nesting of push commands so far
self.cur_s = 0
self.cur_v = 0.
self.cur_h = 0.
self.off_h = ox
self.off_v = oy + box.height
self.hlist_out(box)
def clamp(value):
if value < -1000000000.:
return -1000000000.
if value > 1000000000.:
return 1000000000.
return value
clamp = staticmethod(clamp)
def hlist_out(self, box):
cur_g = 0
cur_glue = 0.
glue_order = box.glue_order
glue_sign = box.glue_sign
base_line = self.cur_v
left_edge = self.cur_h
self.cur_s += 1
self.max_push = max(self.cur_s, self.max_push)
clamp = self.clamp
for p in box.children:
if isinstance(p, Char):
p.render(self.cur_h + self.off_h, self.cur_v + self.off_v)
self.cur_h += p.width
elif isinstance(p, Kern):
self.cur_h += p.width
elif isinstance(p, List):
# node623
if len(p.children) == 0:
self.cur_h += p.width
else:
edge = self.cur_h
self.cur_v = base_line + p.shift_amount
if isinstance(p, Hlist):
self.hlist_out(p)
else:
# p.vpack(box.height + box.depth, 'exactly')
self.vlist_out(p)
self.cur_h = edge + p.width
self.cur_v = base_line
elif isinstance(p, Box):
# node624
rule_height = p.height
rule_depth = p.depth
rule_width = p.width
if isinf(rule_height):
rule_height = box.height
if isinf(rule_depth):
rule_depth = box.depth
if rule_height > 0 and rule_width > 0:
self.cur_v = baseline + rule_depth
p.render(self.cur_h + self.off_h,
self.cur_v + self.off_v,
rule_width, rule_height)
self.cur_v = baseline
self.cur_h += rule_width
elif isinstance(p, Glue):
# node625
glue_spec = p.glue_spec
rule_width = glue_spec.width - cur_g
if glue_sign != 0: # normal
if glue_sign == 1: # stretching
if glue_spec.stretch_order == glue_order:
cur_glue += glue_spec.stretch
cur_g = round(clamp(float(box.glue_set) * cur_glue))
elif glue_spec.shrink_order == glue_order:
cur_glue += glue_spec.shrink
cur_g = round(clamp(float(box.glue_set) * cur_glue))
rule_width += cur_g
self.cur_h += rule_width
self.cur_s -= 1
def vlist_out(self, box):
cur_g = 0
cur_glue = 0.
glue_order = box.glue_order
glue_sign = box.glue_sign
self.cur_s += 1
self.max_push = max(self.max_push, self.cur_s)
left_edge = self.cur_h
self.cur_v -= box.height
top_edge = self.cur_v
clamp = self.clamp
for p in box.children:
if isinstance(p, Kern):
self.cur_v += p.width
elif isinstance(p, List):
if len(p.children) == 0:
self.cur_v += p.height + p.depth
else:
self.cur_v += p.height
self.cur_h = left_edge + p.shift_amount
save_v = self.cur_v
p.width = box.width
if isinstance(p, Hlist):
self.hlist_out(p)
else:
self.vlist_out(p)
self.cur_v = save_v + p.depth
self.cur_h = left_edge
elif isinstance(p, Box):
rule_height = p.height
rule_depth = p.depth
rule_width = p.width
if isinf(rule_width):
rule_width = box.width
rule_height += rule_depth
if rule_height > 0 and rule_depth > 0:
self.cur_v += rule_height
p.render(self.cur_h + self.off_h,
self.cur_v + self.off_v,
rule_width, rule_height)
elif isinstance(p, Glue):
glue_spec = p.glue_spec
rule_height = glue_spec.width - cur_g
if glue_sign != 0: # normal
if glue_sign == 1: # stretching
if glue_spec.stretch_order == glue_order:
cur_glue += glue_spec.stretch
cur_g = round(clamp(float(box.glue_set) * cur_glue))
elif glue_spec.shrink_order == glue_order: # shrinking
cur_glue += glue_spec.shrink
cur_g = round(clamp(float(box.glue_set) * cur_glue))
rule_height += cur_g
self.cur_v += rule_height
elif isinstance(p, Char):
raise RuntimeError("Internal mathtext error: Char node found in vlist")
self.cur_s -= 1
ship = Ship()
##############################################################################
# PARSER
def Error(msg):
"""
Helper class to raise parser errors.
"""
def raise_error(s, loc, toks):
raise ParseFatalException(msg + "\n" + s)
empty = Empty()
empty.setParseAction(raise_error)
return empty
class Parser(object):
"""
This is the pyparsing-based parser for math expressions. It
actually parses full strings *containing* math expressions, in
that raw text may also appear outside of pairs of ``$``.
The grammar is based directly on that in TeX, though it cuts a few
corners.
"""
_binary_operators = set(r'''
+ *
\pm \sqcap \rhd
\mp \sqcup \unlhd
\times \vee \unrhd
\div \wedge \oplus
\ast \setminus \ominus
\star \wr \otimes
\circ \diamond \oslash
\bullet \bigtriangleup \odot
\cdot \bigtriangledown \bigcirc
\cap \triangleleft \dagger
\cup \triangleright \ddagger
\uplus \lhd \amalg'''.split())
_relation_symbols = set(r'''
= < > :
\leq \geq \equiv \models
\prec \succ \sim \perp
\preceq \succeq \simeq \mid
\ll \gg \asymp \parallel
\subset \supset \approx \bowtie
\subseteq \supseteq \cong \Join
\sqsubset \sqsupset \neq \smile
\sqsubseteq \sqsupseteq \doteq \frown
\in \ni \propto
\vdash \dashv \dots'''.split())
_arrow_symbols = set(r'''
\leftarrow \longleftarrow \uparrow
\Leftarrow \Longleftarrow \Uparrow
\rightarrow \longrightarrow \downarrow
\Rightarrow \Longrightarrow \Downarrow
\leftrightarrow \longleftrightarrow \updownarrow
\Leftrightarrow \Longleftrightarrow \Updownarrow
\mapsto \longmapsto \nearrow
\hookleftarrow \hookrightarrow \searrow
\leftharpoonup \rightharpoonup \swarrow
\leftharpoondown \rightharpoondown \nwarrow
\rightleftharpoons \leadsto'''.split())
_spaced_symbols = _binary_operators | _relation_symbols | _arrow_symbols
_punctuation_symbols = set(r', ; . ! \ldotp \cdotp'.split())
_overunder_symbols = set(r'''
\sum \prod \coprod \bigcap \bigcup \bigsqcup \bigvee
\bigwedge \bigodot \bigotimes \bigoplus \biguplus
'''.split())
_overunder_functions = set(
r"lim liminf limsup sup max min".split())
_dropsub_symbols = set(r'''\int \oint'''.split())
_fontnames = set("rm cal it tt sf bf default bb frak circled scr regular".split())
_function_names = set("""
arccos csc ker min arcsin deg lg Pr arctan det lim sec arg dim
liminf sin cos exp limsup sinh cosh gcd ln sup cot hom log tan
coth inf max tanh""".split())
_ambiDelim = set(r"""
| \| / \backslash \uparrow \downarrow \updownarrow \Uparrow
\Downarrow \Updownarrow .""".split())
_leftDelim = set(r"( [ { < \lfloor \langle \lceil".split())
_rightDelim = set(r") ] } > \rfloor \rangle \rceil".split())
def __init__(self):
# All forward declarations are here
font = Forward().setParseAction(self.font).setName("font")
latexfont = Forward()
subsuper = Forward().setParseAction(self.subsuperscript).setName("subsuper")
placeable = Forward().setName("placeable")
simple = Forward().setName("simple")
autoDelim = Forward().setParseAction(self.auto_sized_delimiter)
self._expression = Forward().setParseAction(self.finish).setName("finish")
float = Regex(r"[-+]?([0-9]+\.?[0-9]*|\.[0-9]+)")
lbrace = Literal('{').suppress()
rbrace = Literal('}').suppress()
start_group = (Optional(latexfont) - lbrace)
start_group.setParseAction(self.start_group)
end_group = rbrace.copy()
end_group.setParseAction(self.end_group)
bslash = Literal('\\')
accent = oneOf(self._accent_map.keys() +
list(self._wide_accents))
function = oneOf(list(self._function_names))
fontname = oneOf(list(self._fontnames))
latex2efont = oneOf(['math' + x for x in self._fontnames])
space =(FollowedBy(bslash)
+ oneOf([r'\ ',
r'\/',
r'\,',
r'\;',
r'\quad',
r'\qquad',
r'\!'])
).setParseAction(self.space).setName('space')
customspace =(Literal(r'\hspace')
- (( lbrace
- float
- rbrace
) | Error(r"Expected \hspace{n}"))
).setParseAction(self.customspace).setName('customspace')
unicode_range = u"\U00000080-\U0001ffff"
symbol =(Regex(UR"([a-zA-Z0-9 +\-*/<>=:,.;!\?&'@()\[\]|%s])|(\\[%%${}\[\]_|])" % unicode_range)
| (Combine(
bslash
+ oneOf(tex2uni.keys())
) + FollowedBy(Regex("[^a-zA-Z]")))
).setParseAction(self.symbol).leaveWhitespace()
c_over_c =(Suppress(bslash)
+ oneOf(self._char_over_chars.keys())
).setParseAction(self.char_over_chars)
accent = Group(
Suppress(bslash)
+ accent
- placeable
).setParseAction(self.accent).setName("accent")
function =(Suppress(bslash)
+ function
).setParseAction(self.function).setName("function")
group = Group(
start_group
+ ZeroOrMore(
autoDelim
^ simple)
- end_group
).setParseAction(self.group).setName("group")
font <<(Suppress(bslash)
+ fontname)
latexfont <<(Suppress(bslash)
+ latex2efont)
frac = Group(
Suppress(Literal(r"\frac"))
+ ((group + group)
| Error(r"Expected \frac{num}{den}"))
).setParseAction(self.frac).setName("frac")
stackrel = Group(
Suppress(Literal(r"\stackrel"))
+ ((group + group)
| Error(r"Expected \stackrel{num}{den}"))
).setParseAction(self.stackrel).setName("stackrel")
binom = Group(
Suppress(Literal(r"\binom"))
+ ((group + group)
| Error(r"Expected \binom{num}{den}"))
).setParseAction(self.binom).setName("binom")
ambiDelim = oneOf(list(self._ambiDelim))
leftDelim = oneOf(list(self._leftDelim))
rightDelim = oneOf(list(self._rightDelim))
rightDelimSafe = oneOf(list(self._rightDelim - set(['}'])))
genfrac = Group(
Suppress(Literal(r"\genfrac"))
+ ((Suppress(Literal('{')) +
oneOf(list(self._ambiDelim | self._leftDelim | set(['']))) +
Suppress(Literal('}')) +
Suppress(Literal('{')) +
oneOf(list(self._ambiDelim |
(self._rightDelim - set(['}'])) |
set(['', r'\}']))) +
Suppress(Literal('}')) +
Suppress(Literal('{')) +
Regex("[0-9]*(\.?[0-9]*)?") +
Suppress(Literal('}')) +
group + group + group)
| Error(r"Expected \genfrac{ldelim}{rdelim}{rulesize}{style}{num}{den}"))
).setParseAction(self.genfrac).setName("genfrac")
sqrt = Group(
Suppress(Literal(r"\sqrt"))
+ Optional(
Suppress(Literal("["))
- Regex("[0-9]+")
- Suppress(Literal("]")),
default = None
)
+ (group | Error("Expected \sqrt{value}"))
).setParseAction(self.sqrt).setName("sqrt")
overline = Group(
Suppress(Literal(r"\overline"))
+ (group | Error("Expected \overline{value}"))
).setParseAction(self.overline).setName("overline")
placeable <<(function
^ (c_over_c | symbol)
^ accent
^ group
^ frac
^ stackrel
^ binom
^ genfrac
^ sqrt
^ overline
)
simple <<(space
| customspace
| font
| subsuper
)
subsuperop = oneOf(["_", "^"])
subsuper << Group(
( Optional(placeable)
+ OneOrMore(
subsuperop
- placeable
)
)
| placeable
)
autoDelim <<(Suppress(Literal(r"\left"))
+ ((leftDelim | ambiDelim) | Error("Expected a delimiter"))
+ Group(
OneOrMore(
autoDelim
^ simple))
+ Suppress(Literal(r"\right"))
+ ((rightDelim | ambiDelim) | Error("Expected a delimiter"))
)
math = OneOrMore(
autoDelim
^ simple
).setParseAction(self.math).setName("math")
math_delim = ~bslash + Literal('$')
non_math = Regex(r"(?:(?:\\[$])|[^$])*"
).setParseAction(self.non_math).setName("non_math").leaveWhitespace()
self._expression << (
non_math
+ ZeroOrMore(
Suppress(math_delim)
+ Optional(math)
+ (Suppress(math_delim)
| Error("Expected end of math '$'"))
+ non_math
)
) + StringEnd()
self.clear()
def clear(self):
"""
Clear any state before parsing.
"""
self._expr = None
self._state_stack = None
self._em_width_cache = {}
def parse(self, s, fonts_object, fontsize, dpi):
"""
Parse expression *s* using the given *fonts_object* for
output, at the given *fontsize* and *dpi*.
Returns the parse tree of :class:`Node` instances.
"""
self._state_stack = [self.State(fonts_object, 'default', 'rm', fontsize, dpi)]
try:
self._expression.parseString(s)
except ParseException, err:
raise ValueError("\n".join([
"",
err.line,
" " * (err.column - 1) + "^",
str(err)]))
return self._expr
# The state of the parser is maintained in a stack. Upon
# entering and leaving a group { } or math/non-math, the stack
# is pushed and popped accordingly. The current state always
# exists in the top element of the stack.
class State(object):
"""
Stores the state of the parser.
States are pushed and popped from a stack as necessary, and
the "current" state is always at the top of the stack.
"""
def __init__(self, font_output, font, font_class, fontsize, dpi):
self.font_output = font_output
self._font = font
self.font_class = font_class
self.fontsize = fontsize
self.dpi = dpi
def copy(self):
return Parser.State(
self.font_output,
self.font,
self.font_class,
self.fontsize,
self.dpi)
def _get_font(self):
return self._font
def _set_font(self, name):
if name in ('rm', 'it', 'bf'):
self.font_class = name
self._font = name
font = property(_get_font, _set_font)
def get_state(self):
"""
Get the current :class:`State` of the parser.
"""
return self._state_stack[-1]
def pop_state(self):
"""
Pop a :class:`State` off of the stack.
"""
self._state_stack.pop()
def push_state(self):
"""
Push a new :class:`State` onto the stack which is just a copy
of the current state.
"""
self._state_stack.append(self.get_state().copy())
def finish(self, s, loc, toks):
#~ print "finish", toks
self._expr = Hlist(toks)
return [self._expr]
def math(self, s, loc, toks):
#~ print "math", toks
hlist = Hlist(toks)
self.pop_state()
return [hlist]
def non_math(self, s, loc, toks):
#~ print "non_math", toks
s = toks[0].replace(r'\$', '$')
symbols = [Char(c, self.get_state()) for c in s]
hlist = Hlist(symbols)
# We're going into math now, so set font to 'it'
self.push_state()
self.get_state().font = rcParams['mathtext.default']
return [hlist]
def _make_space(self, percentage):
# All spaces are relative to em width
state = self.get_state()
key = (state.font, state.fontsize, state.dpi)
width = self._em_width_cache.get(key)
if width is None:
metrics = state.font_output.get_metrics(
state.font, rcParams['mathtext.default'], 'm', state.fontsize, state.dpi)
width = metrics.advance
self._em_width_cache[key] = width
return Kern(width * percentage)
_space_widths = { r'\ ' : 0.3,
r'\,' : 0.4,
r'\;' : 0.8,
r'\quad' : 1.6,
r'\qquad' : 3.2,
r'\!' : -0.4,
r'\/' : 0.4 }
def space(self, s, loc, toks):
assert(len(toks)==1)
num = self._space_widths[toks[0]]
box = self._make_space(num)
return [box]
def customspace(self, s, loc, toks):
return [self._make_space(float(toks[1]))]
def symbol(self, s, loc, toks):
# print "symbol", toks
c = toks[0]
if c == "'":
c = '\prime'
try:
char = Char(c, self.get_state())
except ValueError:
raise ParseFatalException("Unknown symbol: %s" % c)
if c in self._spaced_symbols:
return [Hlist( [self._make_space(0.2),
char,
self._make_space(0.2)] ,
do_kern = False)]
elif c in self._punctuation_symbols:
return [Hlist( [char,
self._make_space(0.2)] ,
do_kern = False)]
return [char]
_char_over_chars = {
# The first 2 entires in the tuple are (font, char, sizescale) for
# the two symbols under and over. The third element is the space
# (in multiples of underline height)
r'AA' : ( ('rm', 'A', 1.0), (None, '\circ', 0.5), 0.0),
}
def char_over_chars(self, s, loc, toks):
sym = toks[0]
state = self.get_state()
thickness = state.font_output.get_underline_thickness(
state.font, state.fontsize, state.dpi)
under_desc, over_desc, space = \
self._char_over_chars.get(sym, (None, None, 0.0))
if under_desc is None:
raise ParseFatalException("Error parsing symbol")
over_state = state.copy()
if over_desc[0] is not None:
over_state.font = over_desc[0]
over_state.fontsize *= over_desc[2]
over = Accent(over_desc[1], over_state)
under_state = state.copy()
if under_desc[0] is not None:
under_state.font = under_desc[0]
under_state.fontsize *= under_desc[2]
under = Char(under_desc[1], under_state)
width = max(over.width, under.width)
over_centered = HCentered([over])
over_centered.hpack(width, 'exactly')
under_centered = HCentered([under])
under_centered.hpack(width, 'exactly')
return Vlist([
over_centered,
Vbox(0., thickness * space),
under_centered
])
_accent_map = {
r'hat' : r'\circumflexaccent',
r'breve' : r'\combiningbreve',
r'bar' : r'\combiningoverline',
r'grave' : r'\combininggraveaccent',
r'acute' : r'\combiningacuteaccent',
r'ddot' : r'\combiningdiaeresis',
r'tilde' : r'\combiningtilde',
r'dot' : r'\combiningdotabove',
r'vec' : r'\combiningrightarrowabove',
r'"' : r'\combiningdiaeresis',
r"`" : r'\combininggraveaccent',
r"'" : r'\combiningacuteaccent',
r'~' : r'\combiningtilde',
r'.' : r'\combiningdotabove',
r'^' : r'\circumflexaccent',
r'overrightarrow' : r'\rightarrow',
r'overleftarrow' : r'\leftarrow'
}
_wide_accents = set(r"widehat widetilde widebar".split())
def accent(self, s, loc, toks):
assert(len(toks)==1)
state = self.get_state()
thickness = state.font_output.get_underline_thickness(
state.font, state.fontsize, state.dpi)
if len(toks[0]) != 2:
raise ParseFatalException("Error parsing accent")
accent, sym = toks[0]
if accent in self._wide_accents:
accent = AutoWidthChar(
'\\' + accent, sym.width, state, char_class=Accent)
else:
accent = Accent(self._accent_map[accent], state)
centered = HCentered([accent])
centered.hpack(sym.width, 'exactly')
return Vlist([
centered,
Vbox(0., thickness * 2.0),
Hlist([sym])
])
def function(self, s, loc, toks):
#~ print "function", toks
self.push_state()
state = self.get_state()
state.font = 'rm'
hlist = Hlist([Char(c, state) for c in toks[0]])
self.pop_state()
hlist.function_name = toks[0]
return hlist
def start_group(self, s, loc, toks):
self.push_state()
# Deal with LaTeX-style font tokens
if len(toks):
self.get_state().font = toks[0][4:]
return []
def group(self, s, loc, toks):
grp = Hlist(toks[0])
return [grp]
def end_group(self, s, loc, toks):
self.pop_state()
return []
def font(self, s, loc, toks):
assert(len(toks)==1)
name = toks[0]
self.get_state().font = name
return []
def is_overunder(self, nucleus):
if isinstance(nucleus, Char):
return nucleus.c in self._overunder_symbols
elif isinstance(nucleus, Hlist) and hasattr(nucleus, 'function_name'):
return nucleus.function_name in self._overunder_functions
return False
def is_dropsub(self, nucleus):
if isinstance(nucleus, Char):
return nucleus.c in self._dropsub_symbols
return False
def is_slanted(self, nucleus):
if isinstance(nucleus, Char):
return nucleus.is_slanted()
return False
def subsuperscript(self, s, loc, toks):
assert(len(toks)==1)
# print 'subsuperscript', toks
nucleus = None
sub = None
super = None
if len(toks[0]) == 1:
return toks[0].asList()
elif len(toks[0]) == 2:
op, next = toks[0]
nucleus = Hbox(0.0)
if op == '_':
sub = next
else:
super = next
elif len(toks[0]) == 3:
nucleus, op, next = toks[0]
if op == '_':
sub = next
else:
super = next
elif len(toks[0]) == 5:
nucleus, op1, next1, op2, next2 = toks[0]
if op1 == op2:
if op1 == '_':
raise ParseFatalException("Double subscript")
else:
raise ParseFatalException("Double superscript")
if op1 == '_':
sub = next1
super = next2
else:
super = next1
sub = next2
else:
raise ParseFatalException(
"Subscript/superscript sequence is too long. "
"Use braces { } to remove ambiguity.")
state = self.get_state()
rule_thickness = state.font_output.get_underline_thickness(
state.font, state.fontsize, state.dpi)
xHeight = state.font_output.get_xheight(
state.font, state.fontsize, state.dpi)
# Handle over/under symbols, such as sum or integral
if self.is_overunder(nucleus):
vlist = []
shift = 0.
width = nucleus.width
if super is not None:
super.shrink()
width = max(width, super.width)
if sub is not None:
sub.shrink()
width = max(width, sub.width)
if super is not None:
hlist = HCentered([super])
hlist.hpack(width, 'exactly')
vlist.extend([hlist, Kern(rule_thickness * 3.0)])
hlist = HCentered([nucleus])
hlist.hpack(width, 'exactly')
vlist.append(hlist)
if sub is not None:
hlist = HCentered([sub])
hlist.hpack(width, 'exactly')
vlist.extend([Kern(rule_thickness * 3.0), hlist])
shift = hlist.height
vlist = Vlist(vlist)
vlist.shift_amount = shift + nucleus.depth
result = Hlist([vlist])
return [result]
# Handle regular sub/superscripts
shift_up = nucleus.height - SUBDROP * xHeight
if self.is_dropsub(nucleus):
shift_down = nucleus.depth + SUBDROP * xHeight
else:
shift_down = SUBDROP * xHeight
if super is None:
# node757
sub.shrink()
x = Hlist([sub])
# x.width += SCRIPT_SPACE * xHeight
shift_down = max(shift_down, SUB1)
clr = x.height - (abs(xHeight * 4.0) / 5.0)
shift_down = max(shift_down, clr)
x.shift_amount = shift_down
else:
super.shrink()
x = Hlist([super, Kern(SCRIPT_SPACE * xHeight)])
# x.width += SCRIPT_SPACE * xHeight
clr = SUP1 * xHeight
shift_up = max(shift_up, clr)
clr = x.depth + (abs(xHeight) / 4.0)
shift_up = max(shift_up, clr)
if sub is None:
x.shift_amount = -shift_up
else: # Both sub and superscript
sub.shrink()
y = Hlist([sub])
# y.width += SCRIPT_SPACE * xHeight
shift_down = max(shift_down, SUB1 * xHeight)
clr = (2.0 * rule_thickness -
((shift_up - x.depth) - (y.height - shift_down)))
if clr > 0.:
shift_up += clr
shift_down += clr
if self.is_slanted(nucleus):
x.shift_amount = DELTA * (shift_up + shift_down)
x = Vlist([x,
Kern((shift_up - x.depth) - (y.height - shift_down)),
y])
x.shift_amount = shift_down
result = Hlist([nucleus, x])
return [result]
def _genfrac(self, ldelim, rdelim, rule, style, num, den):
state = self.get_state()
thickness = state.font_output.get_underline_thickness(
state.font, state.fontsize, state.dpi)
rule = float(rule)
num.shrink()
den.shrink()
cnum = HCentered([num])
cden = HCentered([den])
width = max(num.width, den.width)
cnum.hpack(width, 'exactly')
cden.hpack(width, 'exactly')
vlist = Vlist([cnum, # numerator
Vbox(0, thickness * 2.0), # space
Hrule(state, rule), # rule
Vbox(0, thickness * 2.0), # space
cden # denominator
])
# Shift so the fraction line sits in the middle of the
# equals sign
metrics = state.font_output.get_metrics(
state.font, rcParams['mathtext.default'],
'=', state.fontsize, state.dpi)
shift = (cden.height -
((metrics.ymax + metrics.ymin) / 2 -
thickness * 3.0))
vlist.shift_amount = shift
result = [Hlist([vlist, Hbox(thickness * 2.)])]
if ldelim or rdelim:
if ldelim == '':
ldelim = '.'
if rdelim == '':
rdelim = '.'
elif rdelim == r'\}':
rdelim = '}'
return self._auto_sized_delimiter(ldelim, result, rdelim)
return result
def genfrac(self, s, loc, toks):
assert(len(toks)==1)
assert(len(toks[0])==6)
return self._genfrac(*tuple(toks[0]))
def frac(self, s, loc, toks):
assert(len(toks)==1)
assert(len(toks[0])==2)
state = self.get_state()
thickness = state.font_output.get_underline_thickness(
state.font, state.fontsize, state.dpi)
num, den = toks[0]
return self._genfrac('', '', thickness, '', num, den)
def stackrel(self, s, loc, toks):
assert(len(toks)==1)
assert(len(toks[0])==2)
num, den = toks[0]
return self._genfrac('', '', 0.0, '', num, den)
def binom(self, s, loc, toks):
assert(len(toks)==1)
assert(len(toks[0])==2)
num, den = toks[0]
return self._genfrac('(', ')', 0.0, '', num, den)
def sqrt(self, s, loc, toks):
#~ print "sqrt", toks
root, body = toks[0]
state = self.get_state()
thickness = state.font_output.get_underline_thickness(
state.font, state.fontsize, state.dpi)
# Determine the height of the body, and add a little extra to
# the height so it doesn't seem cramped
height = body.height - body.shift_amount + thickness * 5.0
depth = body.depth + body.shift_amount
check = AutoHeightChar(r'\__sqrt__', height, depth, state, always=True)
height = check.height - check.shift_amount
depth = check.depth + check.shift_amount
# Put a little extra space to the left and right of the body
padded_body = Hlist([Hbox(thickness * 2.0),
body,
Hbox(thickness * 2.0)])
rightside = Vlist([Hrule(state),
Fill(),
padded_body])
# Stretch the glue between the hrule and the body
rightside.vpack(height + (state.fontsize * state.dpi) / (100.0 * 12.0),
depth, 'exactly')
# Add the root and shift it upward so it is above the tick.
# The value of 0.6 is a hard-coded hack ;)
if root is None:
root = Box(check.width * 0.5, 0., 0.)
else:
root = Hlist([Char(x, state) for x in root])
root.shrink()
root.shrink()
root_vlist = Vlist([Hlist([root])])
root_vlist.shift_amount = -height * 0.6
hlist = Hlist([root_vlist, # Root
# Negative kerning to put root over tick
Kern(-check.width * 0.5),
check, # Check
rightside]) # Body
return [hlist]
def overline(self, s, loc, toks):
assert(len(toks)==1)
assert(len(toks[0])==1)
body = toks[0][0]
state = self.get_state()
thickness = state.font_output.get_underline_thickness(
state.font, state.fontsize, state.dpi)
height = body.height - body.shift_amount + thickness * 3.0
depth = body.depth + body.shift_amount
# Place overline above body
rightside = Vlist([Hrule(state),
Fill(),
Hlist([body])])
# Stretch the glue between the hrule and the body
rightside.vpack(height + (state.fontsize * state.dpi) / (100.0 * 12.0),
depth, 'exactly')
hlist = Hlist([rightside])
return [hlist]
def _auto_sized_delimiter(self, front, middle, back):
state = self.get_state()
height = max([x.height for x in middle])
depth = max([x.depth for x in middle])
parts = []
# \left. and \right. aren't supposed to produce any symbols
if front != '.':
parts.append(AutoHeightChar(front, height, depth, state))
parts.extend(middle)
if back != '.':
parts.append(AutoHeightChar(back, height, depth, state))
hlist = Hlist(parts)
return hlist
def auto_sized_delimiter(self, s, loc, toks):
#~ print "auto_sized_delimiter", toks
front, middle, back = toks
return self._auto_sized_delimiter(front, middle.asList(), back)
###
##############################################################################
# MAIN
class MathTextParser(object):
_parser = None
_backend_mapping = {
'bitmap': MathtextBackendBitmap,
'agg' : MathtextBackendAgg,
'ps' : MathtextBackendPs,
'pdf' : MathtextBackendPdf,
'svg' : MathtextBackendSvg,
'path' : MathtextBackendPath,
'cairo' : MathtextBackendCairo,
'macosx': MathtextBackendAgg,
}
_font_type_mapping = {
'cm' : BakomaFonts,
'stix' : StixFonts,
'stixsans' : StixSansFonts,
'custom' : UnicodeFonts
}
def __init__(self, output):
"""
Create a MathTextParser for the given backend *output*.
"""
self._output = output.lower()
self._cache = maxdict(50)
def parse(self, s, dpi = 72, prop = None):
"""
Parse the given math expression *s* at the given *dpi*. If
*prop* is provided, it is a
:class:`~matplotlib.font_manager.FontProperties` object
specifying the "default" font to use in the math expression,
used for all non-math text.
The results are cached, so multiple calls to :meth:`parse`
with the same expression should be fast.
"""
if prop is None:
prop = FontProperties()
cacheKey = (s, dpi, hash(prop))
result = self._cache.get(cacheKey)
if result is not None:
return result
if self._output == 'ps' and rcParams['ps.useafm']:
font_output = StandardPsFonts(prop)
else:
backend = self._backend_mapping[self._output]()
fontset = rcParams['mathtext.fontset']
fontset_class = self._font_type_mapping.get(fontset.lower())
if fontset_class is not None:
font_output = fontset_class(prop, backend)
else:
raise ValueError(
"mathtext.fontset must be either 'cm', 'stix', "
"'stixsans', or 'custom'")
fontsize = prop.get_size_in_points()
# This is a class variable so we don't rebuild the parser
# with each request.
if self._parser is None:
self.__class__._parser = Parser()
box = self._parser.parse(s, font_output, fontsize, dpi)
font_output.set_canvas_size(box.width, box.height, box.depth)
result = font_output.get_results(box)
self._cache[cacheKey] = result
# Free up the transient data structures
self._parser.clear()
# Fix cyclical references
font_output.destroy()
font_output.mathtext_backend.fonts_object = None
font_output.mathtext_backend = None
return result
def to_mask(self, texstr, dpi=120, fontsize=14):
"""
*texstr*
A valid mathtext string, eg r'IQ: $\sigma_i=15$'
*dpi*
The dots-per-inch to render the text
*fontsize*
The font size in points
Returns a tuple (*array*, *depth*)
- *array* is an NxM uint8 alpha ubyte mask array of
rasterized tex.
- depth is the offset of the baseline from the bottom of the
image in pixels.
"""
assert(self._output=="bitmap")
prop = FontProperties(size=fontsize)
ftimage, depth = self.parse(texstr, dpi=dpi, prop=prop)
x = ftimage.as_array()
return x, depth
def to_rgba(self, texstr, color='black', dpi=120, fontsize=14):
"""
*texstr*
A valid mathtext string, eg r'IQ: $\sigma_i=15$'
*color*
Any matplotlib color argument
*dpi*
The dots-per-inch to render the text
*fontsize*
The font size in points
Returns a tuple (*array*, *depth*)
- *array* is an NxM uint8 alpha ubyte mask array of
rasterized tex.
- depth is the offset of the baseline from the bottom of the
image in pixels.
"""
x, depth = self.to_mask(texstr, dpi=dpi, fontsize=fontsize)
r, g, b = mcolors.colorConverter.to_rgb(color)
RGBA = np.zeros((x.shape[0], x.shape[1], 4), dtype=np.uint8)
RGBA[:,:,0] = int(255*r)
RGBA[:,:,1] = int(255*g)
RGBA[:,:,2] = int(255*b)
RGBA[:,:,3] = x
return RGBA, depth
def to_png(self, filename, texstr, color='black', dpi=120, fontsize=14):
"""
Writes a tex expression to a PNG file.
Returns the offset of the baseline from the bottom of the
image in pixels.
*filename*
A writable filename or fileobject
*texstr*
A valid mathtext string, eg r'IQ: $\sigma_i=15$'
*color*
A valid matplotlib color argument
*dpi*
The dots-per-inch to render the text
*fontsize*
The font size in points
Returns the offset of the baseline from the bottom of the
image in pixels.
"""
rgba, depth = self.to_rgba(texstr, color=color, dpi=dpi, fontsize=fontsize)
numrows, numcols, tmp = rgba.shape
_png.write_png(rgba.tostring(), numcols, numrows, filename)
return depth
def get_depth(self, texstr, dpi=120, fontsize=14):
"""
Returns the offset of the baseline from the bottom of the
image in pixels.
*texstr*
A valid mathtext string, eg r'IQ: $\sigma_i=15$'
*dpi*
The dots-per-inch to render the text
*fontsize*
The font size in points
"""
assert(self._output=="bitmap")
prop = FontProperties(size=fontsize)
ftimage, depth = self.parse(texstr, dpi=dpi, prop=prop)
return depth
def math_to_image(s, filename_or_obj, prop=None, dpi=None, format=None):
"""
Given a math expression, renders it in a closely-clipped bounding
box to an image file.
*s*
A math expression. The math portion should be enclosed in
dollar signs.
*filename_or_obj*
A filepath or writable file-like object to write the image data
to.
*prop*
If provided, a FontProperties() object describing the size and
style of the text.
*dpi*
Override the output dpi, otherwise use the default associated
with the output format.
*format*
The output format, eg. 'svg', 'pdf', 'ps' or 'png'. If not
provided, will be deduced from the filename.
"""
from matplotlib import figure
# backend_agg supports all of the core output formats
from matplotlib.backends import backend_agg
if prop is None:
prop = FontProperties()
parser = MathTextParser('path')
width, height, depth, _, _ = parser.parse(s, dpi=72, prop=prop)
fig = figure.Figure(figsize=(width / 72.0, height / 72.0))
fig.text(0, depth/height, s, fontproperties=prop)
backend_agg.FigureCanvasAgg(fig)
fig.savefig(filename_or_obj, dpi=dpi, format=format)
return depth
| gpl-2.0 |
qsnake/gpaw | gpaw/atom/aeatom.py | 1 | 21867 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from math import pi, log
import numpy as np
from scipy.special import gamma
from numpy.linalg import eigh
from scipy.interpolate import splrep, splev
from ase.data import atomic_numbers, atomic_names, chemical_symbols
from scipy.integrate import odeint
import ase.units as units
from ase.utils import devnull
from gpaw.atom.configurations import configurations
from gpaw.xc import XC
from gpaw.utilities.progressbar import ProgressBar
# Velocity of light in atomic units:
c = 2 * units._hplanck / (units._mu0 * units._c * units._e**2)
class GridDescriptor:
def __init__(self, r1, rN=50.0, N=1000):
"""Grid descriptor for radial grid.
The radial grid is::
a g
r(g) = -------, g = 0, 1, ..., N - 1
1 - b g
so that r(0)=0, r(1)=r1 and r(N)=rN."""
self.N = N
self.a = (1 - 1.0 / N) / (1.0 / r1 - 1.0 / rN)
self.b = 1.0 - self.a / r1
g_g = np.arange(N)
self.r_g = self.a * g_g / (1 - self.b * g_g)
self.dr_g = (self.b * self.r_g + self.a)**2 / self.a
self.dv_g = 4 * pi * self.r_g**2 * self.dr_g
def get_index(self, r):
return int(1 / (self.b + self.a / r) + 0.5)
def zeros(self, x=()):
if isinstance(x, int):
x = (x,)
return np.zeros(x + (self.N,))
def empty(self, x=()):
if isinstance(x, int):
x = (x,)
return np.zeros(x + (self.N,))
def integrate(self, a_xg, n=0):
assert n > -2
return np.dot(a_xg[..., 1:],
(self.r_g**(2 + n) * self.dr_g)[1:]) * (4 * pi)
def derivative(self, n_g, dndr_g):
"""Finite-difference derivative of radial function."""
dndr_g[0] = n_g[1] - n_g[0]
dndr_g[1:-1] = 0.5 * (n_g[2:] - n_g[:-2])
dndr_g[-1] = n_g[-1] - n_g[-2]
dndr_g /= self.dr_g
def derivative2(self, a_g, b_g):
"""Finite-difference derivative of radial function.
For an infinitely dense grid, this method would be identical
to the `derivative` method."""
c_g = a_g / self.dr_g
b_g[0] = 0.5 * c_g[1] + c_g[0]
b_g[1] = 0.5 * c_g[2] - c_g[0]
b_g[1:-1] = 0.5 * (c_g[2:] - c_g[:-2])
b_g[-2] = c_g[-1] - 0.5 * c_g[-3]
b_g[-1] = -c_g[-1] - 0.5 * c_g[-2]
def poisson(self, n_g):
a_g = -4 * pi * n_g * self.r_g * self.dr_g
A_g = np.add.accumulate(a_g)
vr_g = self.zeros()
vr_g[1:] = A_g[:-1] + 0.5 * a_g[1:]
vr_g -= A_g[-1]
vr_g *= self.r_g
a_g *= self.r_g
A_g = np.add.accumulate(a_g)
vr_g[1:] -= A_g[:-1] + 0.5 * a_g[1:]
return vr_g
def plot(self, a_g, n=0, rc=4.0, show=False):
import matplotlib.pyplot as plt
plt.plot(self.r_g, a_g * self.r_g**n)
plt.axis(xmax=rc)
if show:
plt.show()
class GaussianBasis:
def __init__(self, l, alpha_B, gd, eps=1.0e-7):
"""Guassian basis set for spherically symmetric atom.
l: int
Angular momentum quantum number.
alpha_B: ndarray
Exponents.
gd: GridDescriptor
Grid descriptor.
eps: float
Cutoff for eigenvalues of overlap matrix."""
self.l = l
self.alpha_B = alpha_B
self.gd = gd
A_BB = np.add.outer(alpha_B, alpha_B)
M_BB = np.multiply.outer(alpha_B, alpha_B)
# Overlap matrix:
S_BB = (2 * M_BB**0.5 / A_BB)**(l + 1.5)
# Kinetic energy matrix:
T_BB = 2**(l + 2.5) * M_BB**(0.5 * l + 0.75) / gamma(l + 1.5) * (
gamma(l + 2.5) * M_BB / A_BB**(l + 2.5) -
0.5 * (l + 1) * gamma(l + 1.5) / A_BB**(l + 0.5) +
0.25 * (l + 1) * (2 * l + 1) * gamma(l + 0.5) / A_BB**(l + 0.5))
# Derivative matrix:
D_BB = 2**(l + 2.5) * M_BB**(0.5 * l + 0.75) / gamma(l + 1.5) * (
0.5 * (l + 1) * gamma(l + 1) / A_BB**(l + 1) -
gamma(l + 2) * alpha_B / A_BB**(l + 2))
# 1/r matrix:
K_BB = 2**(l + 2.5) * M_BB**(0.5 * l + 0.75) / gamma(l + 1.5) * (
0.5 * gamma(l + 1) / A_BB**(l + 1))
# Find set of linearly independent functions.
# We have len(alpha_B) gaussians (index B) and self.nbasis
# linearly independent functions (index b).
s_B, U_BB = eigh(S_BB)
self.nbasis = int((s_B > eps).sum())
Q_Bb = np.dot(U_BB[:, -self.nbasis:],
np.diag(s_B[-self.nbasis:]**-0.5))
self.T_bb = np.dot(np.dot(Q_Bb.T, T_BB), Q_Bb)
self.D_bb = np.dot(np.dot(Q_Bb.T, D_BB), Q_Bb)
self.K_bb = np.dot(np.dot(Q_Bb.T, K_BB), Q_Bb)
r_g = gd.r_g
# Avoid errors in debug mode from division by zero:
old_settings = np.seterr(divide='ignore')
self.basis_bg = (np.dot(
Q_Bb.T,
(2 * (2 * alpha_B[:, np.newaxis])**(l + 1.5) /
gamma(l + 1.5))**0.5 *
np.exp(-np.multiply.outer(alpha_B, r_g**2))) * r_g**l)
np.seterr(**old_settings)
def __len__(self):
return self.nbasis
def expand(self, C_xb):
return np.dot(C_xb, self.basis_bg)
def calculate_potential_matrix(self, vr_g):
vr2dr_g = vr_g * self.gd.r_g * self.gd.dr_g
V_bb = np.inner(self.basis_bg[:, 1:],
self.basis_bg[:, 1:] * vr2dr_g[1:])
return V_bb
class Channel:
def __init__(self, l, s, f_n, basis):
self.l = l
self.s = s
self.basis = basis
self.C_nb = None # eigenvectors
self.e_n = None # eigenvalues
self.f_n = np.array(f_n, dtype=float) # occupation numbers
self.name = 'spdf'[l]
def solve(self, vr_g):
"""Diagonalize Schrödinger equation in basis set."""
H_bb = self.basis.calculate_potential_matrix(vr_g)
H_bb += self.basis.T_bb
self.e_n, C_bn = eigh(H_bb)
self.C_nb = C_bn.T
def calculate_density(self, n=None):
"""Calculate density."""
if n is None:
n_g = 0.0
for n, f in enumerate(self.f_n):
n_g += f * self.calculate_density(n)
else:
n_g = self.basis.expand(self.C_nb[n])**2 / (4 * pi)
return n_g
def get_eigenvalue_sum(self):
f_n = self.f_n
return np.dot(f_n, self.e_n[:len(f_n)])
class DiracChannel(Channel):
def __init__(self, k, f_n, basis):
l = (abs(2 * k + 1) - 1) // 2
Channel.__init__(self, l, 0, f_n, basis)
self.k = k
self.j = abs(k) - 0.5
self.c_nb = None # eigenvectors (small component)
self.name += '(%d/2)' % (2 * self.j)
def solve(self, vr_g):
"""Solve Dirac equation in basis set."""
nb = len(self.basis)
V_bb = self.basis.calculate_potential_matrix(vr_g)
H_bb = np.zeros((2 * nb, 2 * nb))
H_bb[:nb, :nb] = V_bb
H_bb[nb:, nb:] = V_bb - 2 * c**2 * np.eye(nb)
H_bb[nb:, :nb] = -c * (-self.basis.D_bb.T + self.k * self.basis.K_bb)
e_n, C_bn = eigh(H_bb)
if self.k < 0:
n0 = nb
else:
n0 = nb + 1
self.e_n = e_n[n0:].copy()
self.C_nb = C_bn[:nb, n0:].T.copy() # large component
self.c_nb = C_bn[nb:, n0:].T.copy() # small component
def calculate_density(self, n=None):
"""Calculate density."""
if n is None:
n_g = Channel.calculate_density(self)
else:
n_g = (self.basis.expand(self.C_nb[n])**2 +
self.basis.expand(self.c_nb[n])**2) / (4 * pi)
if self.basis.l < 0:
n_g[0] = n_g[1]
return n_g
class AllElectronAtom:
def __init__(self, symbol, xc='LDA', spinpol=False, dirac=False,
log=sys.stdout):
"""All-electron calculation for spherically symmetric atom.
symbol: str (or int)
Chemical symbol (or atomic number).
xc: str
Name of XC-functional.
spinpol: bool
If true, do spin-polarized calculation. Default is spin-paired.
dirac: bool
Solve Dirac equation instead of Schrödinger equation.
log: stream
Text output."""
if isinstance(symbol, int):
symbol = chemical_symbols[symbol]
self.symbol = symbol
self.Z = atomic_numbers[symbol]
self.nspins = 1 + int(bool(spinpol))
self.dirac = bool(dirac)
if isinstance(xc, str):
self.xc = XC(xc)
else:
self.xc = xc
if log is None:
log = devnull
self.fd = log
self.vr_sg = None # potential * r
self.n_sg = 0.0 # density
self.gd = None # radial grid descriptor
# Energies:
self.ekin = None
self.eeig = None
self.eH = None
self.eZ = None
self.channels = None
self.initialize_configuration()
self.log('Z: ', self.Z)
self.log('Name: ', atomic_names[self.Z])
self.log('Symbol: ', symbol)
self.log('XC-functional: ', self.xc.name)
self.log('Equation: ', ['Schrödinger', 'Dirac'][self.dirac])
def log(self, *args, **kwargs):
self.fd.write(kwargs.get('sep', ' ').join([str(arg) for arg in args]) +
kwargs.get('end', '\n'))
def initialize_configuration(self):
self.f_lsn = {}
for n, l, f, e in configurations[self.symbol][1]:
if l not in self.f_lsn:
self.f_lsn[l] = [[] for s in range(self.nspins)]
if self.nspins == 1:
self.f_lsn[l][0].append(f)
else:
# Use Hund's rule:
f0 = min(f, 2 * l + 1)
self.f_lsn[l][0].append(f0)
self.f_lsn[l][1].append(f - f0)
def add(self, n, l, df=+1, s=None):
"""Add (remove) electrons."""
if s is None:
if self.nspins == 1:
s = 0
else:
self.add(n, l, 0.5 * df, 0)
self.add(n, l, 0.5 * df, 1)
return
if l not in self.f_lsn:
self.f_lsn[l] = [[] for x in range(self.nspins)]
f_n = self.f_lsn[l][s]
if len(f_n) < n - l:
f_n.extend([0] * (n - l - len(f_n)))
f_n[n - l - 1] += df
def initialize(self, ngpts=1000, rcut=50.0,
alpha1=0.01, alpha2=None, ngauss=50,
eps=1.0e-7):
"""Initialize basis sets and radial grid.
ngpts: int
Number of grid points for radial grid.
rcut: float
Cutoff for radial grid.
alpha1: float
Smallest exponent for gaussian.
alpha2: float
Largest exponent for gaussian.
ngauss: int
Number of gaussians.
eps: float
Cutoff for eigenvalues of overlap matrix."""
if alpha2 is None:
alpha2 = 50.0 * self.Z**2
self.gd = GridDescriptor(r1=1 / alpha2**0.5 / 50, rN=rcut, N=ngpts)
self.log('Grid points: %d (%.5f, %.5f, %.5f, ..., %.3f, %.3f)' %
((self.gd.N,) + tuple(self.gd.r_g[[0, 1, 2, -2, -1]])))
# Distribute exponents between alpha1 and alpha2:
alpha_B = alpha1 * (alpha2 / alpha1)**np.linspace(0, 1, ngauss)
self.log('Exponents: %d (%.3f, %.3f, ..., %.3f, %.3f)' %
((ngauss,) + tuple(alpha_B[[0, 1, -2, -1]])))
# Maximum l value:
lmax = max(self.f_lsn.keys())
self.channels = []
nb_l = []
if not self.dirac:
for l in range(lmax + 1):
basis = GaussianBasis(l, alpha_B, self.gd, eps)
nb_l.append(len(basis))
for s in range(self.nspins):
self.channels.append(Channel(l, s, self.f_lsn[l][s],
basis))
else:
for K in range(1, lmax + 2):
leff = (K**2 - (self.Z / c)**2)**0.5 - 1
basis = GaussianBasis(leff, alpha_B, self.gd, eps)
nb_l.append(len(basis))
for k, l in [(-K, K - 1), (K, K)]:
if l > lmax:
continue
f_n = self.f_lsn[l][0]
j = abs(k) - 0.5
f_n = (2 * j + 1) / (4 * l + 2) * np.array(f_n)
self.channels.append(DiracChannel(k, f_n, basis))
self.log('Basis functions: %s (%s)' %
(', '.join([str(nb) for nb in nb_l]),
', '.join('spdf'[:lmax + 1])))
self.vr_sg = self.gd.zeros(self.nspins)
self.vr_sg[:] = -self.Z
def solve(self):
"""Diagonalize Schrödinger equation."""
self.eeig = 0.0
for channel in self.channels:
channel.solve(self.vr_sg[channel.s])
self.eeig += channel.get_eigenvalue_sum()
def calculate_density(self):
"""Calculate elctron density and kinetic energy."""
self.n_sg = self.gd.zeros(self.nspins)
for channel in self.channels:
self.n_sg[channel.s] += channel.calculate_density()
def calculate_electrostatic_potential(self):
"""Calculate electrostatic potential and energy."""
n_g = self.n_sg.sum(0)
self.vHr_g = self.gd.poisson(n_g)
self.eH = 0.5 * self.gd.integrate(n_g * self.vHr_g, -1)
self.eZ = -self.Z * self.gd.integrate(n_g, -1)
def calculate_xc_potential(self):
self.vxc_sg = self.gd.zeros(self.nspins)
self.exc = self.xc.calculate_spherical(self.gd, self.n_sg, self.vxc_sg)
def step(self):
self.solve()
self.calculate_density()
self.calculate_electrostatic_potential()
self.calculate_xc_potential()
self.vr_sg = self.vxc_sg * self.gd.r_g
self.vr_sg += self.vHr_g
self.vr_sg -= self.Z
self.ekin = (self.eeig -
self.gd.integrate((self.vr_sg * self.n_sg).sum(0), -1))
def run(self, mix=0.4, maxiter=117, dnmax=1e-9):
if self.channels is None:
self.initialize()
dn = self.Z
pb = ProgressBar(log(dnmax / dn), 0, 53, self.fd)
self.log()
for iter in range(maxiter):
if iter > 1:
self.vr_sg *= mix
self.vr_sg += (1 - mix) * vr_old_sg
dn = self.gd.integrate(abs(self.n_sg - n_old_sg).sum(0))
pb(log(dnmax / dn))
if dn <= dnmax:
break
vr_old_sg = self.vr_sg
n_old_sg = self.n_sg
self.step()
self.summary()
if dn > dnmax:
raise RuntimeError('Did not converge!')
def summary(self):
self.write_states()
self.write_energies()
def write_states(self):
self.log('\n state occupation eigenvalue <r>')
if self.dirac:
self.log(' nl(j) [Hartree] [eV] [Bohr]')
else:
self.log(' nl [Hartree] [eV] [Bohr]')
self.log('=====================================================')
states = []
for ch in self.channels:
for n, f in enumerate(ch.f_n):
states.append((ch.e_n[n], ch, n))
states.sort()
for e, ch, n in states:
name = str(n + ch.l + 1) + ch.name
if self.nspins == 2:
name += '(%s)' % '+-'[ch.s]
n_g = ch.calculate_density(n)
rave = self.gd.integrate(n_g, 1)
self.log(' %-7s %6.3f %13.6f %13.5f %6.3f' %
(name, ch.f_n[n], e, e * units.Hartree, rave))
self.log('=====================================================')
def write_energies(self):
self.log('\nEnergies: [Hartree] [eV]')
self.log('============================================')
for text, e in [('kinetic ', self.ekin),
('coulomb (e-e)', self.eH),
('coulomb (e-n)', self.eZ),
('xc ', self.exc),
('total ',
self.ekin + self.eH + self.eZ + self.exc)]:
self.log(' %s %+13.6f %+13.5f' % (text, e, units.Hartree * e))
self.log('============================================')
def get_channel(self, l=None, s=0, k=None):
if self.dirac:
for channel in self.channels:
if channel.k == k:
return channel
else:
for channel in self.channels:
if channel.l == l and channel.s == s:
return channel
raise ValueError
def get_orbital(self, n, l=None, s=0, k=None):
channel = self.get_channel(l, s, k)
return channel.basis.expand(channel.C_nb[n])
def plot_wave_functions(self, rc=4.0):
import matplotlib.pyplot as plt
colors = 'krgbycm'
for ch in self.channels:
for n in range(len(ch.f_n)):
fr_g = ch.basis.expand(ch.C_nb[n]) * self.gd.r_g
name = str(n + ch.l + 1) + ch.name
lw = 2
if self.nspins == 2:
name += '(%s)' % '+-'[ch.s]
if ch.s == 1:
lw = 1
if self.dirac and ch.k > 0:
lw = 1
ls = ['-', '--', '-.', ':'][ch.l]
n_g = ch.calculate_density(n)
rave = self.gd.integrate(n_g, 1)
gave = self.gd.get_index(rave)
fr_g *= cmp(fr_g[gave], 0)
plt.plot(self.gd.r_g, fr_g,
ls=ls, lw=lw, color=colors[n + ch.l], label=name)
plt.legend(loc='best')
plt.axis(xmax=rc)
plt.show()
def logarithmic_derivative(self, l, energies, rcut):
vr = splrep(self.gd.r_g, self.vr_sg[0])
def v(r):
return splev(r, vr) / r
def f(y, r, e):
if r == 0:
return [y[1], -2.0]
return [y[1], 2 * (v(r) - e) * y[0]]
logderivs = []
for e in energies:
u, dudr = odeint(f, [0, 1], [0, rcut], (e,))[1, :]
logderivs.append(dudr / u)
return logderivs
def build_parser():
from optparse import OptionParser
parser = OptionParser(usage='%prog [options] element',
version='%prog 0.1')
parser.add_option('-f', '--xc-functional', type='string', default='LDA',
help='Exchange-Correlation functional ' +
'(default value LDA)',
metavar='<XC>')
parser.add_option('--add', metavar='states',
help='Add electron(s). Use "1s0.5a" to add 0.5 1s ' +
'electrons to the alpha-spin channel (use "b" for ' +
'beta-spin). The number of electrons defaults to ' +
'one. Examples: "1s", "2p2b", "4f0.1b,3d-0.1a".')
parser.add_option('-s', '--spin-polarized', action='store_true')
parser.add_option('-d', '--dirac', action='store_true')
parser.add_option('-p', '--plot', action='store_true')
parser.add_option('-e', '--exponents',
help='Exponents a: exp(-a*r^2). Use "-e 0.1:20.0:30" ' +
'to get 30 exponents from 0.1 to 20.0.')
parser.add_option('-l', '--logarithmic-derivatives',
help='-l 1.3,spdf,-2,2,100')
return parser
def main():
parser = build_parser()
opt, args = parser.parse_args()
if len(args) != 1:
parser.error('Incorrect number of arguments')
symbol = args[0]
nlfs = []
if opt.add:
for x in opt.add.split(','):
n = int(x[0])
l = 'spdfg'.find(x[1])
x = x[2:]
if x and x[-1] in 'ab':
s = int(x[-1] == 'b')
opt.spin_polarized = True
x = x[:-1]
else:
s = None
if x:
f = float(x)
else:
f = 1
nlfs.append((n, l, f, s))
aea = AllElectronAtom(symbol,
xc=opt.xc_functional,
spinpol=opt.spin_polarized,
dirac=opt.dirac)
if opt.exponents:
parts = opt.exponents.split(':')
kwargs = {}
kwargs['alpha1'] = float(parts[0])
if len(parts) > 1:
kwargs['alpha2'] = float(parts[1])
if len(parts) > 2:
kwargs['ngauss'] = int(parts[2])
aea.initialize(**kwargs)
for n, l, f, s in nlfs:
aea.add(n, l, f, s)
aea.run()
if opt.logarithmic_derivatives:
rcut, lvalues, emin, emax, npoints = \
opt.logarithmic_derivatives.split(',')
rcut = float(rcut)
lvalues = ['spdfg'.find(x) for x in lvalues]
emin = float(emin)
emax = float(emax)
npoints = int(npoints)
energies = np.linspace(emin, emax, npoints)
import matplotlib.pyplot as plt
for l in lvalues:
ld = aea.logarithmic_derivative(l, energies, rcut)
plt.plot(energies, ld)
plt.show()
if opt.plot:
aea.plot_wave_functions()
if __name__ == '__main__':
main()
| gpl-3.0 |
gameduell/dask | dask/dataframe/io/tests/test_demo.py | 3 | 1460 | import dask.dataframe as dd
import pandas.util.testing as tm
import pandas as pd
def test_make_timeseries():
df = dd.demo.make_timeseries('2000', '2015', {'A': float, 'B': int, 'C': str},
freq='2D', partition_freq='6M')
assert df.divisions[0] == pd.Timestamp('2000-01-31', offset='6M')
assert df.divisions[-1] == pd.Timestamp('2014-07-31', offset='6M')
tm.assert_index_equal(df.columns, pd.Index(['A', 'B', 'C']))
assert df['A'].head().dtype == float
assert df['B'].head().dtype == int
assert df['C'].head().dtype == object
assert df.divisions == tuple(pd.DatetimeIndex(start='2000', end='2015',
freq='6M'))
tm.assert_frame_equal(df.head(), df.head())
a = dd.demo.make_timeseries('2000', '2015', {'A': float, 'B': int, 'C': str},
freq='2D', partition_freq='6M', seed=123)
b = dd.demo.make_timeseries('2000', '2015', {'A': float, 'B': int, 'C': str},
freq='2D', partition_freq='6M', seed=123)
tm.assert_frame_equal(a.head(), b.head())
def test_no_overlaps():
df = dd.demo.make_timeseries('2000', '2001', {'A': float},
freq='3H', partition_freq='3M')
assert all(df.get_partition(i).index.max().compute() <
df.get_partition(i + 1).index.min().compute()
for i in range(df.npartitions - 2))
| bsd-3-clause |
ctogle/dilapidator | src/dilap/BROKEN/structures/structgraph.py | 1 | 7649 | import dilap.core.base as db
import dilap.core.vector as dpv
import dilap.core.tools as dpr
import dilap.core.graph as dgr
import dilap.graph.twomanifold as tmg
import dilap.mesh.tools as dtl
import dilap.mesh.pointset as dps
import dilap.mesh.piecewisecomplex as pwc
import dilap.structures.tools as dstl
import dilap.structures.graphnode as gnd
import dilap.structures.graphedge as geg
import matplotlib.pyplot as plt
import random as rm
import math
import pdb
class graph(tmg.twomanifold_graph):
nodeclass = gnd.node
edgeclass = geg.edge
# given a collection of relevant walls
# produce polygons for one side of a wall
def model_interior_wall(self,ekeys):
pass
def model_rooms(self):
ww = 0.75 # THIS IS THE NOT THE RIGHT VALUE...
mpolys = []
for r in self.rooms:
outlines = r
#bbnd = []
#for x in outlines[0]:
# nd = self.nodes[self.nodes_lookup[x]]
# bbnd.append(nd.p.copy())
bbnd = self.get_node_points(outlines[0])
#tbnd = []
tbnd = self.get_node_points(outlines[-1])
for x in range(len(outlines[-1])):
nd = self.nodes[self.nodes_lookup[outlines[-1][x]]]
tbnd[x].translate_z(nd.height)
dpr.inflate(bbnd,-ww/math.sqrt(2))
dpr.inflate(tbnd,-ww/math.sqrt(2))
mpolys.append((tuple(bbnd),()))
mpolys.append((tuple(tbnd),()))
return mpolys
def model_walls(self):
mpolys = []
for e in self.edges:
if e is None:continue
print('lookup',e.key(),self.rooms_lookup[e.key()])
if len(self.rooms_lookup[e.key()]) == 1:
#e.cut_window(3,2,1,0.5)
pass
'''#
wh1,wh2 = e.one.height+e.one.gap,e.two.height+e.two.gap
wargs = (e.one.p,e.two.p,wh1,wh2,e.width)
wkwargs = {'doors':e.doors,'windows':e.windows}
mpolys.extend(dstl.wall(*wargs,**wkwargs))
'''#
wh1,wh2 = e.one.height+e.one.gap,e.two.height+e.two.gap
wargs = (e.one.p,e.two.p,wh1,wh2,e.width)
wkwargs = {'doors':e.doors,'windows':e.windows}
mpolys.extend(dstl.wall(*wargs,**wkwargs))
return mpolys
def model_corners(self):
ww = 0.75 # THIS IS THE NOT THE RIGHT VALUE...
mpolys = []
for n in self.nodes:
if n is None:continue
print('node',n.key(),n.ring)
mpolys.extend(dstl.post(n.p,4,ww,n.height+n.gap))
return mpolys
def model_roof(self):
ww = 0.75 # THIS IS THE NOT THE RIGHT VALUE...
mpolys = []
for r in self.rooms:
bnd = []
outlines = r
for nd in self.get_nodes(outlines[-1]):
np = nd.p.copy().translate_z(nd.height+nd.gap)
bnd.append(np)
mpolys.append((tuple(bnd),()))
return mpolys
def model(self):
mpolys = []
plc1 = dtl.box(15,15,10)
#plc2 = dtl.box(5,4,4).translate(dpv.vector(4,0,0))
plc2 = dtl.box(14,14,6).translate(dpv.vector(0,0,5))
plc3 = dtl.box(12,14,6).translate(dpv.vector(0,5,5))
#plc2 = dtl.box(8,3,2).translate(dpv.vector(0,0,1))
#plc2 = dtl.icosphere(2,1)
#plc2.translate(dpv.vector(0,0,5.0))
'''#
print('union input')
ax = dtl.plot_axes()
ax = plc1.plot(ax)
ax = plc2.plot(ax)
plt.show()
'''#
#plc4 = pwc.union(plc1,plc2)
plc5 = pwc.union(plc1,plc3)
#plc3 = pwc.difference(plc1,plc2)
#plc3 = pwc.intersection(plc1,plc2)
#plc2 = dtl.box(20,12,5).translate(dpv.vector(0,10,46.5))
#plc3 = pwc.difference(plc1,plc2)
print('union output')
ax = plc5.plot()
plt.show()
'''#
plc2 = dtl.box(10,10,6).translate(dpv.vector(5,0,6))
plc3 = pwc.union(plc3,plc2)
print('union output')
ax = dtl.plot_axes()
ax = plc3.plot(ax)
plt.show()
'''#
pys = []
for px in range(plc5.polygoncount):
pys.append(plc5.get_polygon_points(px))
mpolys = pys
#mpolys.extend(self.model_rooms())
#mpolys.extend(self.model_walls())
#mpolys.extend(self.model_corners())
#mpolys.extend(self.model_roof())
#mpolys = dtl.merge_polygons(mpolys)
return mpolys
def plot(self,ax = None):
ax = dgr.graph.plot(self,ax)
dtl.plot_polygon(list(self.boundary),ax)
return ax
def __init__(self,boundary,**kwargs):
dgr.graph.__init__(self,**kwargs)
self.boundary = boundary
self.rooms_lookup = {}
self.rooms = []
self.roomcount = 0
# given the index of a node, apply the effects of its layer
def _apply_node_layer(self,ndx):
nd = self.nodes[ndx]
if nd is None or nd.layer == 0:return
zkey = (nd.p.x,nd.p.y,nd.layer-1)
below = self.nodes[self.nodes_lookup[zkey]]
zoff = below.p.z + below.height + below.gap
nd.p.translate_z(zoff)
def _add_node(self,ndkey,**kwargs):
kwargs['height'] = 8
kwargs['gap'] = 1
return dgr.graph._add_node(self,ndkey,**kwargs)
# add a new edge to the graph, or return existing index
def _add_edge(self,ndkey1,ndkey2,**kwargs):
nex = dgr.graph._add_edge(self,ndkey1,ndkey2,**kwargs)
if nex == self.edgecount-1:
ekey1,ekey2 = (ndkey1,ndkey2),(ndkey2,ndkey1)
self.rooms_lookup[ekey1] = []
self.rooms_lookup[ekey2] = []
return nex
# delete an existing edge from the graph
def _del_edge(self,ndkey1,ndkey2):
dgr.graph._del_edge(self,ndkey1,ndkey2)
ekey1,ekey2 = (ndkey1,ndkey2),(ndkey2,ndkey1)
del self.rooms_lookup[ekey1]
del self.rooms_lookup[ekey2]
# find edges which bound both rooms u and v
def _find_edges(self,u,v):
found = []
foundkeys = []
for ekey in self.rooms_lookup:
if ekey in foundkeys:continue
foundkeys.append(ekey)
foundkeys.append(ekey[::-1])
ering = self.rooms_lookup[ekey]
if u in ering and v in ering:
found.append(self.edges_lookup[ekey])
elif v == -1 and len(ering) == 1:
found.append(self.edges_lookup[ekey])
return found
# add door to an edge which bound both rooms u and v
def _connect_rooms(self,u,v):
bwn = self._find_edges(u,v)
if len(bwn) == 0:print('rooms are nonadjacent...',u,v)
elif len(bwn) == 1:self.edges[bwn[0]].cut_door(2,3,0.5)
else:
elys = [self.edges[x].layer() for x in bwn]
elym = min(elys)
bwn = [bwn[x] for x in range(len(bwn)) if elys[x] == elym]
self.edges[bwn[0]].cut_door(3,4,0.25)
def _add_room(self,rmnds):
self.rooms.append(rmnds)
rl,rdex = self.rooms_lookup,self.roomcount
outlines = rmnds
for ox in range(len(outlines)):
out = outlines[ox]
for x in range(len(out)):
ndkey1,ndkey2 = out[x-1],out[x]
nex = self._add_edge(ndkey1,ndkey2)
ekey1,ekey2 = (ndkey1,ndkey2),(ndkey2,ndkey1)
if not rdex in rl[ekey1]:rl[ekey1].append(rdex)
if not rdex in rl[ekey2]:rl[ekey2].append(rdex)
self.roomcount += 1
return rdex
| mit |
arianhosseini/Question-Answering | utils.py | 1 | 12623 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
#import joblib
import random
import os
import numpy
def read_word_vectors(filename, embed_dim=300, labled=False, splitter=' '):
import sys
import gzip
import math
word_vecs = {}
if filename.endswith('.gz'): file_object = gzip.open(filename, 'r')
else: file_object = open(filename, 'r')
for line_num, line in enumerate(file_object):
line = line.decode('utf-8').strip()
splited = line.split(splitter)
word = (' '.join(splited[:-embed_dim])).strip().encode('utf-8')
if labled:
word = word.split(':')[1]
word_vecs[word] = numpy.array(map(float, splited[-embed_dim:]))
# for index, vec_val in enumerate(splited[-embed_dim:]):
# word_vecs[word][index] = float(vec_val)
# word_vecs[word] /= math.sqrt((word_vecs[word]**2).sum() + 1e-6) #normalizer
sys.stderr.write("Vectors read from: "+filename+" \n")
return word_vecs
def init_embedding_table(filename='embeddings/vocab_embeddings.txt', embed_dim=300, vocab_file='squad_rare/vocab.txt'):
import theano
vocab = ['<DUMMY>', '<EOA>', '@placeholder', '<UNK>'] + [ w.strip().split()[0] for w in open(vocab_file) ]
reverse_vocab = {w: i for i, w in enumerate(vocab)}
word_vecs = read_word_vectors(filename,embed_dim)
embeddings = numpy.ndarray(shape=(len(vocab), embed_dim),dtype=theano.config.floatX)
count = 0
for k,v in word_vecs.iteritems():
if k.upper() in ['<DUMMY>', '<EOA>', '@placeholder', '<UNK>']:
k = k.upper()
# print (count)
# print (reverse_vocab[k])
count += 1
embeddings[reverse_vocab[k],:] = v
return embeddings
def write_vocab_embeddings(input_file, vocab_file='squad_rare/vocab.txt',embed_path='embeddings', embed_dim=300):
word_vecs = read_word_vectors(input_file, embed_dim=embed_dim, splitter=' ')
vocab = ['<DUMMY>', '<EOA>', '@placeholder', '<UNK>'] + [ w.strip().split()[0] for w in open(vocab_file) ]
vocab_embeddings = open(os.path.join(embed_path,'vocab_embeddings.txt'),'w')
unk_words = 0
sigma = 0.2
mu = 0
for i,word in enumerate(vocab):
if word in word_vecs:
embed_string = ' '.join(map(str,word_vecs[word].tolist()))
else: #sigma * np.random.randn(...) + mu
rand_embed = sigma * numpy.random.randn(embed_dim) + mu
embed_string = ' '.join(map(str,rand_embed.tolist()))
unk_words += 1
vocab_embeddings.write(word+' '+embed_string+'\n')
vocab_embeddings.close()
print("unk_words: %d"%unk_words)
print("file written")
def generate_squad_vocab(path, vocabulary_size=30000):
import json
import itertools
# from operator import itemgetter
from nltk.probability import FreqDist
d = json.load(open(path))
tokenized_sentences = []
for reading in d['data']:
for paragraph in reading['paragraphs']:
sentence = paragraph['context'].lower()
tokenized_sentences.append(nltk.tokenize.word_tokenize(sentence))
for question in paragraph['qas']:
sentence = question['question'].lower() #TODO later check whether to add answer as well or not
tokenized_sentences.append(nltk.tokenize.word_tokenize(sentence))
word_freq = nltk.FreqDist(itertools.chain(*tokenized_sentences))
print('total uniq words:', len(word_freq))
# sorted_freq = sorted(dict(word_freq).items(), key=itemgetter(1))[::-1]
full_vocab = word_freq.most_common(len(word_freq))
vocab = open('vocab_full.txt','w')
for w in full_vocab:
vocab.write(w[0]+'\t'+str(w[1])+'\n')
vocab.close()
shorted_vocab = word_freq.most_common(vocabulary_size-1)
vocab = open('vocab.txt','w')
for w in shorted_vocab:
vocab.write(w[0]+'\t'+str(w[1])+'\n')
vocab.close()
def add_rare_to_vocab(vocab_path='squad/vocab.txt', rare_count=100):
with open(vocab_path,'r+') as vocab:
content = vocab.read()
vocab.seek(0,0)
for i in range(rare_count):
vocab.write('@rare'+str(i)+' 0\n')
vocab.write(content)
def add_rare(ctx, q, a_list, vocab):
rare_dict = {}
rares = vocab[4:104]
ctx = ctx.split(' ')
q = q.split(' ')
a = [a.split(' ') for a in a_list]
iterable = [ctx,q] + a
for i, words in enumerate(iterable):
for index, word in enumerate(iterable[i]):
if not word in vocab or any(ord(char) not in range(128) for char in word):
if i >= 2:
if word in rare_dict:
iterable[i][index] = rare_dict[word]
# print (word +' to '+ rare_dict[word])
else:
rare_can = random.choice(rares)
rares.remove(rare_can)
rare_dict[word] = rare_can
iterable[i][index] = rare_dict[word]
print (rare_dict[word])
else:
if word in rare_dict:
if i >= 2:
iterable[i][index] = rare_dict[word]
# print (word +' to '+ rare_dict[word])
else:
if len(rares) == 0:
return (False,'','','')
rare_can = random.choice(rares)
rares.remove(rare_can)
rare_dict[word] = rare_can
iterable[i][index] = rare_dict[word]
return (True, ' '.join(iterable[0]), ' '.join(iterable[1]), [' '.join(a) for a in iterable[2:]])
def add_rare_to_squad(data_path='squad/dev-v1.0_tokenized.json',new_path='squad_rare_test', vocab_file='squad_rare/vocab.txt'):
import json
import itertools
import os
d = json.load(open(data_path))
file_name = data_path.split('/')[1]
tokenized_sentences = []
vocab = ['<DUMMY>', '<EOA>', '@placeholder', '<UNK>'] + [ w.strip().split()[0] for w in open(vocab_file) ]
print(vocab[4:104])
for reading in d['data']:
for paragraph in reading['paragraphs']:
for question in paragraph['qas']:
answers = [answer['text'].strip().lower() for answer in question['answers']]
status,paragraph['context'],question['question'],answers = add_rare(paragraph['context'], question['question'], answers, vocab)
for i,answer in enumerate(question['answers']):
question['answers'][i]['text'] = answers[i]
# print(question['answers'])
with open(os.path.join(new_path, file_name),'w') as outfile:
json.dump(d, outfile)
def add_rare_to_cnn_document(i, file_name, data_path, new_path, vocab):
global bad_ctxs_count
if i % 1000 == 0:
print('added rare to: %d'%i)
lines = [l.decode("UTF-8").rstrip('\n') for l in open(os.path.join(data_path,file_name))]
status, ctx, q, a = add_rare(lines[2].lower(), lines[4].lower(), lines[6].lower(), vocab)
if not status:
#bad_ctxs_count += 1
print('bad_ctxs_count')#: %d'%bad_ctxs_count)
else:
new_file = open(os.path.join(new_path,file_name),'w')
new_file.write('\n'.join(lines[0:2])+'\n')
new_file.write(ctx+'\n\n')
new_file.write(q+'\n\n')
new_file.write(a+'\n\n')
new_file.close()
def add_rare_to_cnn(data_path='deepmind-qa/cnn/questions/training',new_path='deepmind-qa/cnn_rare/questions/training', vocab_file='squad_rare/vocab.txt'):
import itertools
vocab = ['<DUMMY>', '<EOA>', '@placeholder', '<UNK>'] + [ w.strip().split()[0] for w in open(vocab_file) ]
l = [f for f in os.listdir(data_path) if os.path.isfile(os.path.join(data_path, f))]
print("number of files: "+str(len(l)))
joblib.Parallel(n_jobs=-1)(joblib.delayed(add_rare_to_cnn_document)(i,file_name,data_path,new_path,vocab) for i,file_name in enumerate(l))
def unanonymise_cnn(path='cnn_questions', new_path='cnn_new_questions'):
import os
import re
l = [f for f in os.listdir(path) if os.path.isfile(os.path.join(path, f))]
print("number of files: "+str(len(l)))
for i,file_name in enumerate(l):
if i % 1000 == 0:
print('unanonymised: %d'%i)
lines = [l.rstrip('\n') for l in open(os.path.join(path,file_name))]
entity_dict = dict([(s.split(':')[0],s.split(':')[1]) for s in lines[8:]])
new_lines = []
for line in lines:
# print (line)
for k,v in entity_dict.items():
line = re.sub(r"%s\b" % k , v, line)
# print (line)
new_lines.append(line)
new_file = open(os.path.join(new_path,file_name),'w')
new_file.write( '\n'.join(new_lines) )
new_file.close()
def compute_length_coverage(train_path='new_dev-v1.0_tokenized.json'):
import json
import itertools
# import matplotlib.pyplot as plt
untokenized = json.load(open("squad/dev-v1.0.json"))
d = json.load(open(train_path))
rared = json.load(open('squad_rare/dev-v1.0_tokenized.json'))
lengths = []
count = 0
total = 0
for i,reading in enumerate(d['data']):
for j,paragraph in enumerate(reading['paragraphs']):
context = paragraph['context']
for k,question in enumerate(paragraph['qas']):
answer = question['answers'][0]['text']
q = question['question']
total += 1
if answer in context:
pass
else:
count += 1
print ("-------")
print ("C: "+context)
print ("C: "+untokenized['data'][i]['paragraphs'][j]['context'])
print('--')
print ('Q: '+q)
print('--')
print ("A: "+answer)
lengths.append(len(question['answers'][0]['text'].split(' ')))
print (count)
print(total)
def compute_average_margin(path, example_count=1836975):
data = open(path,'r')
data = data.readlines()
count = 0.0
sum_of_margins = 0.0
for i in range(example_count):
count += 1.0
sum_of_margins = (float(data[i*11 + 4].strip()) - float(data[i*11 + 8].strip()))
print ("count: ", count)
print ("sum_of_margins: ", sum_of_margins)
print ("average margin: ", sum_of_margins/count)
def tokenize_data(path, new_path):
import json
import nltk
import itertools
# from operator import itemgetter
from nltk.probability import FreqDist
d = json.load(open(path))
tokenized_sentences = []
for reading in d['data']:
for paragraph in reading['paragraphs']:
context_text = paragraph['context'].lower()
for question in paragraph['qas']:
question['question'] = ' '.join(nltk.tokenize.word_tokenize(question['question'].lower()))
for i,answer in enumerate(question['answers']):
answer_start_index = context_text.find(question['answers'][i]['text'].strip().lower())
if answer_start_index == -1:
answer_start_index = question['answers'][i]['answer_start']
answer_length = len(question['answers'][i]['text'].strip())
answer_text = context_text[answer_start_index:answer_start_index + answer_length].strip()
context_before_answer = context_text[:answer_start_index].lower()
context_after_answer = context_text[answer_start_index+answer_length:].lower()
tokenized_answer = nltk.tokenize.word_tokenize(answer_text)
question['answers'][i]['text'] = ' '.join(tokenized_answer)
# context_list = nltk.tokenize.word_tokenize(context_befor_answer) + tokenized_answer + nltk.tokenize.word_tokenize(context_after_answer)
tokenized_context = ' '.join(nltk.tokenize.word_tokenize(context_before_answer)) + \
' '+' '.join(tokenized_answer)+' '+\
' '.join(nltk.tokenize.word_tokenize(context_after_answer))
paragraph['context'] = tokenized_context
with open(new_path,'w') as outfile:
json.dump(d, outfile)
def main():
tokenize_data('squad/train-v1.0.json', 'squad/train-v1.0_tokenized.json' )
if __name__ == '__main__':
main()
| mit |
awentland90/AQ_MachineLearning_Experiments | NeuralNets/AQ_simple_nn.py | 1 | 3767 | #!/usr/bin/env python
""" AQ Simple NN
Description:
Testing a *very* primative neural network using the Python library pyneurgen (Don Smiley)
Usage:
User selects 1 observed variable to conduct ML using the NN
python AQ_simple_nn.py
Dataset:
Observed atmospheric variable at Big Bend NP during the end of August/early September 2012
TODO:
A lot.
"""
from __future__ import division
import matplotlib
matplotlib.use('TkAgg') # Display plots in GUI on Mac OS X
import random
import matplotlib.pyplot as plt
import pandas as pd
from pyneurgen.neuralnet import NeuralNet
# ~~~~ USER OPTIONS ~~~~ #
# Path to observations CSV
csv_in = "data/OBS_BBE401_subset.csv"
# Pick a variable to run the model on
# 'TEMPERATURE', 'RELATIVE_HUMIDITY', 'SOLAR_RADIATION', 'OZONE',
# 'PRECIPITATION', 'WINDSPEED', 'WIND_DIRECTION', 'WINDSPEED_SCALAR'
var = 'TEMPERATURE'
# ~~~~ END USER OPTIONS ~~~~ #
def parse_obs(obs_file):
names = ['DATE_TIME', 'TEMPERATURE', 'RELATIVE_HUMIDITY', 'SOLAR_RADIATION',
'OZONE', 'PRECIPITATION', 'WINDSPEED', 'WIND_DIRECTION', 'WINDSPEED_SCALAR']
obs_df = pd.read_csv(csv_in, names=names, header=1)
return obs_df
def parse_df(df, var):
obs_len = len(df[var])
factor = 1.0 / float(obs_len)
obs_arr = [[i, round(df[var][i])] for i in range(obs_len)]
return factor, obs_arr
def population_gen(obs_arr):
"""
This function shuffles the values of the population and yields the
items in a random fashion.
"""
obs_sort = [item for item in obs_arr]
random.shuffle(obs_sort)
for item in obs_sort:
yield item
def run_nn():
net = NeuralNet()
net.init_layers(2, [10], 1)
net.randomize_network()
net.set_halt_on_extremes(True)
net.set_random_constraint(0.6)
net.set_learnrate(.01)
net.set_all_inputs(all_inputs)
net.set_all_targets(all_targets)
length = len(all_inputs)
learn_end_point = int(length * .8)
# Set learn range
net.set_learn_range(0, learn_end_point)
net.set_test_range(learn_end_point + 1, length - 1)
# Hidden layer activation type
net.layers[1].set_activation_type('sigmoid')
net.learn(epochs=10, show_epoch_results=True, random_testing=False)
mse = net.test()
print "test mse = ", mse
test_positions = [item[0][1] * 1000.0 for item in net.get_test_data()]
all_targets1 = [item[0][0] for item in net.test_targets_activations]
allactuals = [item[1][0] for item in net.test_targets_activations]
return net, test_positions, all_targets1, allactuals
def plot_results(ozone_obs, net, test_positions, all_targets1, allactuals):
# Summarize results
plt.subplot(3, 1, 1)
plt.plot([i[1] for i in ozone_obs])
plt.title("Population")
plt.grid(True)
plt.subplot(3, 1, 2)
plt.plot(test_positions, all_targets1, 'bo', label='targets')
plt.plot(test_positions, allactuals, 'ro', label='actuals')
plt.grid(True)
plt.legend(loc='lower left', numpoints=1)
plt.title("Test Target Points vs Actual Points")
plt.subplot(3, 1, 3)
plt.plot(range(1, len(net.accum_mse) + 1, 1), net.accum_mse)
plt.xlabel('Epochs')
plt.ylabel('MSE')
plt.grid(True)
plt.title("Mean Squared Error by Epoch")
plt.show()
if __name__ == '__main__':
obs = parse_obs(csv_in)
factor, obs_arr = parse_df(obs, var)
all_inputs = []
all_targets = []
# Create NN inputs
for position, target in population_gen(obs_arr):
pos = float(position)
all_inputs.append([random.random(), pos * factor])
all_targets.append([target])
net, test_positions, all_targets1, allactuals = run_nn()
plot_results(obs_arr, net, test_positions, all_targets1, allactuals)
| mit |
shahankhatch/scikit-learn | sklearn/linear_model/tests/test_ridge.py | 68 | 23597 | import numpy as np
import scipy.sparse as sp
from scipy import linalg
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn import datasets
from sklearn.metrics import mean_squared_error
from sklearn.metrics import make_scorer
from sklearn.metrics import get_scorer
from sklearn.linear_model.base import LinearRegression
from sklearn.linear_model.ridge import ridge_regression
from sklearn.linear_model.ridge import Ridge
from sklearn.linear_model.ridge import _RidgeGCV
from sklearn.linear_model.ridge import RidgeCV
from sklearn.linear_model.ridge import RidgeClassifier
from sklearn.linear_model.ridge import RidgeClassifierCV
from sklearn.linear_model.ridge import _solve_cholesky
from sklearn.linear_model.ridge import _solve_cholesky_kernel
from sklearn.grid_search import GridSearchCV
from sklearn.cross_validation import KFold
diabetes = datasets.load_diabetes()
X_diabetes, y_diabetes = diabetes.data, diabetes.target
ind = np.arange(X_diabetes.shape[0])
rng = np.random.RandomState(0)
rng.shuffle(ind)
ind = ind[:200]
X_diabetes, y_diabetes = X_diabetes[ind], y_diabetes[ind]
iris = datasets.load_iris()
X_iris = sp.csr_matrix(iris.data)
y_iris = iris.target
DENSE_FILTER = lambda X: X
SPARSE_FILTER = lambda X: sp.csr_matrix(X)
def test_ridge():
# Ridge regression convergence test using score
# TODO: for this test to be robust, we should use a dataset instead
# of np.random.
rng = np.random.RandomState(0)
alpha = 1.0
for solver in ("svd", "sparse_cg", "cholesky", "lsqr", "sag"):
# With more samples than features
n_samples, n_features = 6, 5
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=alpha, solver=solver)
ridge.fit(X, y)
assert_equal(ridge.coef_.shape, (X.shape[1], ))
assert_greater(ridge.score(X, y), 0.47)
if solver in ("cholesky", "sag"):
# Currently the only solvers to support sample_weight.
ridge.fit(X, y, sample_weight=np.ones(n_samples))
assert_greater(ridge.score(X, y), 0.47)
# With more features than samples
n_samples, n_features = 5, 10
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=alpha, solver=solver)
ridge.fit(X, y)
assert_greater(ridge.score(X, y), .9)
if solver in ("cholesky", "sag"):
# Currently the only solvers to support sample_weight.
ridge.fit(X, y, sample_weight=np.ones(n_samples))
assert_greater(ridge.score(X, y), 0.9)
def test_primal_dual_relationship():
y = y_diabetes.reshape(-1, 1)
coef = _solve_cholesky(X_diabetes, y, alpha=[1e-2])
K = np.dot(X_diabetes, X_diabetes.T)
dual_coef = _solve_cholesky_kernel(K, y, alpha=[1e-2])
coef2 = np.dot(X_diabetes.T, dual_coef).T
assert_array_almost_equal(coef, coef2)
def test_ridge_singular():
# test on a singular matrix
rng = np.random.RandomState(0)
n_samples, n_features = 6, 6
y = rng.randn(n_samples // 2)
y = np.concatenate((y, y))
X = rng.randn(n_samples // 2, n_features)
X = np.concatenate((X, X), axis=0)
ridge = Ridge(alpha=0)
ridge.fit(X, y)
assert_greater(ridge.score(X, y), 0.9)
def test_ridge_sample_weights():
rng = np.random.RandomState(0)
for solver in ("cholesky", ):
for n_samples, n_features in ((6, 5), (5, 10)):
for alpha in (1.0, 1e-2):
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
sample_weight = 1 + rng.rand(n_samples)
coefs = ridge_regression(X, y,
alpha=alpha,
sample_weight=sample_weight,
solver=solver)
# Sample weight can be implemented via a simple rescaling
# for the square loss.
coefs2 = ridge_regression(
X * np.sqrt(sample_weight)[:, np.newaxis],
y * np.sqrt(sample_weight),
alpha=alpha, solver=solver)
assert_array_almost_equal(coefs, coefs2)
# Test for fit_intercept = True
est = Ridge(alpha=alpha, solver=solver)
est.fit(X, y, sample_weight=sample_weight)
# Check using Newton's Method
# Quadratic function should be solved in a single step.
# Initialize
sample_weight = np.sqrt(sample_weight)
X_weighted = sample_weight[:, np.newaxis] * (
np.column_stack((np.ones(n_samples), X)))
y_weighted = y * sample_weight
# Gradient is (X*coef-y)*X + alpha*coef_[1:]
# Remove coef since it is initialized to zero.
grad = -np.dot(y_weighted, X_weighted)
# Hessian is (X.T*X) + alpha*I except that the first
# diagonal element should be zero, since there is no
# penalization of intercept.
diag = alpha * np.ones(n_features + 1)
diag[0] = 0.
hess = np.dot(X_weighted.T, X_weighted)
hess.flat[::n_features + 2] += diag
coef_ = - np.dot(linalg.inv(hess), grad)
assert_almost_equal(coef_[0], est.intercept_)
assert_array_almost_equal(coef_[1:], est.coef_)
def test_ridge_shapes():
# Test shape of coef_ and intercept_
rng = np.random.RandomState(0)
n_samples, n_features = 5, 10
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
Y1 = y[:, np.newaxis]
Y = np.c_[y, 1 + y]
ridge = Ridge()
ridge.fit(X, y)
assert_equal(ridge.coef_.shape, (n_features,))
assert_equal(ridge.intercept_.shape, ())
ridge.fit(X, Y1)
assert_equal(ridge.coef_.shape, (1, n_features))
assert_equal(ridge.intercept_.shape, (1, ))
ridge.fit(X, Y)
assert_equal(ridge.coef_.shape, (2, n_features))
assert_equal(ridge.intercept_.shape, (2, ))
def test_ridge_intercept():
# Test intercept with multiple targets GH issue #708
rng = np.random.RandomState(0)
n_samples, n_features = 5, 10
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
Y = np.c_[y, 1. + y]
ridge = Ridge()
ridge.fit(X, y)
intercept = ridge.intercept_
ridge.fit(X, Y)
assert_almost_equal(ridge.intercept_[0], intercept)
assert_almost_equal(ridge.intercept_[1], intercept + 1.)
def test_toy_ridge_object():
# Test BayesianRegression ridge classifier
# TODO: test also n_samples > n_features
X = np.array([[1], [2]])
Y = np.array([1, 2])
clf = Ridge(alpha=0.0)
clf.fit(X, Y)
X_test = [[1], [2], [3], [4]]
assert_almost_equal(clf.predict(X_test), [1., 2, 3, 4])
assert_equal(len(clf.coef_.shape), 1)
assert_equal(type(clf.intercept_), np.float64)
Y = np.vstack((Y, Y)).T
clf.fit(X, Y)
X_test = [[1], [2], [3], [4]]
assert_equal(len(clf.coef_.shape), 2)
assert_equal(type(clf.intercept_), np.ndarray)
def test_ridge_vs_lstsq():
# On alpha=0., Ridge and OLS yield the same solution.
rng = np.random.RandomState(0)
# we need more samples than features
n_samples, n_features = 5, 4
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=0., fit_intercept=False)
ols = LinearRegression(fit_intercept=False)
ridge.fit(X, y)
ols.fit(X, y)
assert_almost_equal(ridge.coef_, ols.coef_)
ridge.fit(X, y)
ols.fit(X, y)
assert_almost_equal(ridge.coef_, ols.coef_)
def test_ridge_individual_penalties():
# Tests the ridge object using individual penalties
rng = np.random.RandomState(42)
n_samples, n_features, n_targets = 20, 10, 5
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples, n_targets)
penalties = np.arange(n_targets)
coef_cholesky = np.array([
Ridge(alpha=alpha, solver="cholesky").fit(X, target).coef_
for alpha, target in zip(penalties, y.T)])
coefs_indiv_pen = [
Ridge(alpha=penalties, solver=solver, tol=1e-8).fit(X, y).coef_
for solver in ['svd', 'sparse_cg', 'lsqr', 'cholesky', 'sag']]
for coef_indiv_pen in coefs_indiv_pen:
assert_array_almost_equal(coef_cholesky, coef_indiv_pen)
# Test error is raised when number of targets and penalties do not match.
ridge = Ridge(alpha=penalties[:-1])
assert_raises(ValueError, ridge.fit, X, y)
def _test_ridge_loo(filter_):
# test that can work with both dense or sparse matrices
n_samples = X_diabetes.shape[0]
ret = []
ridge_gcv = _RidgeGCV(fit_intercept=False)
ridge = Ridge(alpha=1.0, fit_intercept=False)
# generalized cross-validation (efficient leave-one-out)
decomp = ridge_gcv._pre_compute(X_diabetes, y_diabetes)
errors, c = ridge_gcv._errors(1.0, y_diabetes, *decomp)
values, c = ridge_gcv._values(1.0, y_diabetes, *decomp)
# brute-force leave-one-out: remove one example at a time
errors2 = []
values2 = []
for i in range(n_samples):
sel = np.arange(n_samples) != i
X_new = X_diabetes[sel]
y_new = y_diabetes[sel]
ridge.fit(X_new, y_new)
value = ridge.predict([X_diabetes[i]])[0]
error = (y_diabetes[i] - value) ** 2
errors2.append(error)
values2.append(value)
# check that efficient and brute-force LOO give same results
assert_almost_equal(errors, errors2)
assert_almost_equal(values, values2)
# generalized cross-validation (efficient leave-one-out,
# SVD variation)
decomp = ridge_gcv._pre_compute_svd(X_diabetes, y_diabetes)
errors3, c = ridge_gcv._errors_svd(ridge.alpha, y_diabetes, *decomp)
values3, c = ridge_gcv._values_svd(ridge.alpha, y_diabetes, *decomp)
# check that efficient and SVD efficient LOO give same results
assert_almost_equal(errors, errors3)
assert_almost_equal(values, values3)
# check best alpha
ridge_gcv.fit(filter_(X_diabetes), y_diabetes)
alpha_ = ridge_gcv.alpha_
ret.append(alpha_)
# check that we get same best alpha with custom loss_func
f = ignore_warnings
scoring = make_scorer(mean_squared_error, greater_is_better=False)
ridge_gcv2 = RidgeCV(fit_intercept=False, scoring=scoring)
f(ridge_gcv2.fit)(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv2.alpha_, alpha_)
# check that we get same best alpha with custom score_func
func = lambda x, y: -mean_squared_error(x, y)
scoring = make_scorer(func)
ridge_gcv3 = RidgeCV(fit_intercept=False, scoring=scoring)
f(ridge_gcv3.fit)(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv3.alpha_, alpha_)
# check that we get same best alpha with a scorer
scorer = get_scorer('mean_squared_error')
ridge_gcv4 = RidgeCV(fit_intercept=False, scoring=scorer)
ridge_gcv4.fit(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv4.alpha_, alpha_)
# check that we get same best alpha with sample weights
ridge_gcv.fit(filter_(X_diabetes), y_diabetes,
sample_weight=np.ones(n_samples))
assert_equal(ridge_gcv.alpha_, alpha_)
# simulate several responses
Y = np.vstack((y_diabetes, y_diabetes)).T
ridge_gcv.fit(filter_(X_diabetes), Y)
Y_pred = ridge_gcv.predict(filter_(X_diabetes))
ridge_gcv.fit(filter_(X_diabetes), y_diabetes)
y_pred = ridge_gcv.predict(filter_(X_diabetes))
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T,
Y_pred, decimal=5)
return ret
def _test_ridge_cv(filter_):
n_samples = X_diabetes.shape[0]
ridge_cv = RidgeCV()
ridge_cv.fit(filter_(X_diabetes), y_diabetes)
ridge_cv.predict(filter_(X_diabetes))
assert_equal(len(ridge_cv.coef_.shape), 1)
assert_equal(type(ridge_cv.intercept_), np.float64)
cv = KFold(n_samples, 5)
ridge_cv.set_params(cv=cv)
ridge_cv.fit(filter_(X_diabetes), y_diabetes)
ridge_cv.predict(filter_(X_diabetes))
assert_equal(len(ridge_cv.coef_.shape), 1)
assert_equal(type(ridge_cv.intercept_), np.float64)
def _test_ridge_diabetes(filter_):
ridge = Ridge(fit_intercept=False)
ridge.fit(filter_(X_diabetes), y_diabetes)
return np.round(ridge.score(filter_(X_diabetes), y_diabetes), 5)
def _test_multi_ridge_diabetes(filter_):
# simulate several responses
Y = np.vstack((y_diabetes, y_diabetes)).T
n_features = X_diabetes.shape[1]
ridge = Ridge(fit_intercept=False)
ridge.fit(filter_(X_diabetes), Y)
assert_equal(ridge.coef_.shape, (2, n_features))
Y_pred = ridge.predict(filter_(X_diabetes))
ridge.fit(filter_(X_diabetes), y_diabetes)
y_pred = ridge.predict(filter_(X_diabetes))
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T,
Y_pred, decimal=3)
def _test_ridge_classifiers(filter_):
n_classes = np.unique(y_iris).shape[0]
n_features = X_iris.shape[1]
for clf in (RidgeClassifier(), RidgeClassifierCV()):
clf.fit(filter_(X_iris), y_iris)
assert_equal(clf.coef_.shape, (n_classes, n_features))
y_pred = clf.predict(filter_(X_iris))
assert_greater(np.mean(y_iris == y_pred), .79)
n_samples = X_iris.shape[0]
cv = KFold(n_samples, 5)
clf = RidgeClassifierCV(cv=cv)
clf.fit(filter_(X_iris), y_iris)
y_pred = clf.predict(filter_(X_iris))
assert_true(np.mean(y_iris == y_pred) >= 0.8)
def _test_tolerance(filter_):
ridge = Ridge(tol=1e-5)
ridge.fit(filter_(X_diabetes), y_diabetes)
score = ridge.score(filter_(X_diabetes), y_diabetes)
ridge2 = Ridge(tol=1e-3)
ridge2.fit(filter_(X_diabetes), y_diabetes)
score2 = ridge2.score(filter_(X_diabetes), y_diabetes)
assert_true(score >= score2)
def test_dense_sparse():
for test_func in (_test_ridge_loo,
_test_ridge_cv,
_test_ridge_diabetes,
_test_multi_ridge_diabetes,
_test_ridge_classifiers,
_test_tolerance):
# test dense matrix
ret_dense = test_func(DENSE_FILTER)
# test sparse matrix
ret_sparse = test_func(SPARSE_FILTER)
# test that the outputs are the same
if ret_dense is not None and ret_sparse is not None:
assert_array_almost_equal(ret_dense, ret_sparse, decimal=3)
def test_ridge_cv_sparse_svd():
X = sp.csr_matrix(X_diabetes)
ridge = RidgeCV(gcv_mode="svd")
assert_raises(TypeError, ridge.fit, X)
def test_ridge_sparse_svd():
X = sp.csc_matrix(rng.rand(100, 10))
y = rng.rand(100)
ridge = Ridge(solver='svd')
assert_raises(TypeError, ridge.fit, X, y)
def test_class_weights():
# Test class weights.
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = RidgeClassifier(class_weight=None)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf = RidgeClassifier(class_weight={1: 0.001})
clf.fit(X, y)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
# check if class_weight = 'balanced' can handle negative labels.
clf = RidgeClassifier(class_weight='balanced')
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# class_weight = 'balanced', and class_weight = None should return
# same values when y has equal number of all labels
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0], [1.0, 1.0]])
y = [1, 1, -1, -1]
clf = RidgeClassifier(class_weight=None)
clf.fit(X, y)
clfa = RidgeClassifier(class_weight='balanced')
clfa.fit(X, y)
assert_equal(len(clfa.classes_), 2)
assert_array_almost_equal(clf.coef_, clfa.coef_)
assert_array_almost_equal(clf.intercept_, clfa.intercept_)
def test_class_weight_vs_sample_weight():
"""Check class_weights resemble sample_weights behavior."""
for clf in (RidgeClassifier, RidgeClassifierCV):
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = clf()
clf1.fit(iris.data, iris.target)
clf2 = clf(class_weight='balanced')
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.coef_, clf2.coef_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = clf()
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = clf(class_weight=class_weight)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.coef_, clf2.coef_)
# Check that sample_weight and class_weight are multiplicative
clf1 = clf()
clf1.fit(iris.data, iris.target, sample_weight ** 2)
clf2 = clf(class_weight=class_weight)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.coef_, clf2.coef_)
def test_class_weights_cv():
# Test class weights for cross validated ridge classifier.
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = RidgeClassifierCV(class_weight=None, alphas=[.01, .1, 1])
clf.fit(X, y)
# we give a small weights to class 1
clf = RidgeClassifierCV(class_weight={1: 0.001}, alphas=[.01, .1, 1, 10])
clf.fit(X, y)
assert_array_equal(clf.predict([[-.2, 2]]), np.array([-1]))
def test_ridgecv_store_cv_values():
# Test _RidgeCV's store_cv_values attribute.
rng = rng = np.random.RandomState(42)
n_samples = 8
n_features = 5
x = rng.randn(n_samples, n_features)
alphas = [1e-1, 1e0, 1e1]
n_alphas = len(alphas)
r = RidgeCV(alphas=alphas, store_cv_values=True)
# with len(y.shape) == 1
y = rng.randn(n_samples)
r.fit(x, y)
assert_equal(r.cv_values_.shape, (n_samples, n_alphas))
# with len(y.shape) == 2
n_responses = 3
y = rng.randn(n_samples, n_responses)
r.fit(x, y)
assert_equal(r.cv_values_.shape, (n_samples, n_responses, n_alphas))
def test_ridgecv_sample_weight():
rng = np.random.RandomState(0)
alphas = (0.1, 1.0, 10.0)
# There are different algorithms for n_samples > n_features
# and the opposite, so test them both.
for n_samples, n_features in ((6, 5), (5, 10)):
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
sample_weight = 1 + rng.rand(n_samples)
cv = KFold(n_samples, 5)
ridgecv = RidgeCV(alphas=alphas, cv=cv)
ridgecv.fit(X, y, sample_weight=sample_weight)
# Check using GridSearchCV directly
parameters = {'alpha': alphas}
fit_params = {'sample_weight': sample_weight}
gs = GridSearchCV(Ridge(), parameters, fit_params=fit_params,
cv=cv)
gs.fit(X, y)
assert_equal(ridgecv.alpha_, gs.best_estimator_.alpha)
assert_array_almost_equal(ridgecv.coef_, gs.best_estimator_.coef_)
def test_raises_value_error_if_sample_weights_greater_than_1d():
# Sample weights must be either scalar or 1D
n_sampless = [2, 3]
n_featuress = [3, 2]
rng = np.random.RandomState(42)
for n_samples, n_features in zip(n_sampless, n_featuress):
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
sample_weights_OK = rng.randn(n_samples) ** 2 + 1
sample_weights_OK_1 = 1.
sample_weights_OK_2 = 2.
sample_weights_not_OK = sample_weights_OK[:, np.newaxis]
sample_weights_not_OK_2 = sample_weights_OK[np.newaxis, :]
ridge = Ridge(alpha=1)
# make sure the "OK" sample weights actually work
ridge.fit(X, y, sample_weights_OK)
ridge.fit(X, y, sample_weights_OK_1)
ridge.fit(X, y, sample_weights_OK_2)
def fit_ridge_not_ok():
ridge.fit(X, y, sample_weights_not_OK)
def fit_ridge_not_ok_2():
ridge.fit(X, y, sample_weights_not_OK_2)
assert_raise_message(ValueError,
"Sample weights must be 1D array or scalar",
fit_ridge_not_ok)
assert_raise_message(ValueError,
"Sample weights must be 1D array or scalar",
fit_ridge_not_ok_2)
def test_sparse_design_with_sample_weights():
# Sample weights must work with sparse matrices
n_sampless = [2, 3]
n_featuress = [3, 2]
rng = np.random.RandomState(42)
sparse_matrix_converters = [sp.coo_matrix,
sp.csr_matrix,
sp.csc_matrix,
sp.lil_matrix,
sp.dok_matrix
]
sparse_ridge = Ridge(alpha=1., fit_intercept=False)
dense_ridge = Ridge(alpha=1., fit_intercept=False)
for n_samples, n_features in zip(n_sampless, n_featuress):
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
sample_weights = rng.randn(n_samples) ** 2 + 1
for sparse_converter in sparse_matrix_converters:
X_sparse = sparse_converter(X)
sparse_ridge.fit(X_sparse, y, sample_weight=sample_weights)
dense_ridge.fit(X, y, sample_weight=sample_weights)
assert_array_almost_equal(sparse_ridge.coef_, dense_ridge.coef_,
decimal=6)
def test_raises_value_error_if_solver_not_supported():
# Tests whether a ValueError is raised if a non-identified solver
# is passed to ridge_regression
wrong_solver = "This is not a solver (MagritteSolveCV QuantumBitcoin)"
exception = ValueError
message = "Solver %s not understood" % wrong_solver
def func():
X = np.eye(3)
y = np.ones(3)
ridge_regression(X, y, alpha=1., solver=wrong_solver)
assert_raise_message(exception, message, func)
def test_sparse_cg_max_iter():
reg = Ridge(solver="sparse_cg", max_iter=1)
reg.fit(X_diabetes, y_diabetes)
assert_equal(reg.coef_.shape[0], X_diabetes.shape[1])
@ignore_warnings
def test_n_iter():
# Test that self.n_iter_ is correct.
n_targets = 2
X, y = X_diabetes, y_diabetes
y_n = np.tile(y, (n_targets, 1)).T
for max_iter in range(1, 4):
for solver in ('sag', 'lsqr'):
reg = Ridge(solver=solver, max_iter=max_iter, tol=1e-12)
reg.fit(X, y_n)
assert_array_equal(reg.n_iter_, np.tile(max_iter, n_targets))
for solver in ('sparse_cg', 'svd', 'cholesky'):
reg = Ridge(solver=solver, max_iter=1, tol=1e-1)
reg.fit(X, y_n)
assert_equal(reg.n_iter_, None)
| bsd-3-clause |
trappedInARibosome/go-model | go_predict_blast.py | 1 | 3616 | import argparse
import multiprocessing
import warnings
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.Alphabet import generic_protein
from sklearn.externals import joblib
from sklearn.preprocessing import normalize
from lib.hmmer import hmmstats
from lib.hmmer import hmmscan
def main():
"""
The go_predict_blast module takes an input blast TSV file containing sequences and a SVC model object and makes
predictions about gene ontology based on the domain scores generated from a HMM domain model database
"""
sh_parse = argparse.ArgumentParser(description="Predict the classification of a tsv file from cp-blast")
sh_parse.add_argument("-f", "--file", dest="infile", help="Input sequence FILE", metavar="FILE", required=True)
sh_parse.add_argument("-c", "--column", dest="column", help="Sequence column NUMBER (0-index)", metavar="NUMBER",
required=True, type=int)
sh_parse.add_argument("-o", "--out", dest="outfile", help="Output matrix FILE", metavar="FILE", required=True)
sh_parse.add_argument("-d", "--db", dest="database", help="Database FILE", metavar="FILE", required=True)
sh_parse.add_argument("-m", "--model", dest="modelfile", help="Model joblib FILE", metavar="FILE", required=True)
sh_parse.add_argument("--cpu", dest="cores", help="Number of processor CORES to use", metavar="COREs", type=int,
default=1)
sh_args = sh_parse.parse_args()
go_predict_blast(sh_args.infile, sh_args.database, sh_args.modelfile, out_file=sh_args.outfile,
seq_column=sh_args.column, cores=sh_args.cores)
def go_predict_blast(infile_name, database_path, modelfile_name, out_file=None, seq_column=0, cores=2):
svc_model_est = joblib.load(modelfile_name)
hmmer_pool = multiprocessing.Pool(processes=cores, maxtasksperchild=1000)
with open(infile_name, mode="rU") as in_fh:
hmmer_imap = hmmer_pool.imap(PredictFromDomains(database_path, svc_model_est).hmmscan_predict,
line_generator(in_fh, column=seq_column))
with open(out_file, mode="w") as out_fh:
for line, prediction, proba in hmmer_imap:
print(line + "\t{}\t{}".format(prediction, proba), file=out_fh)
def line_generator(in_fh, column=0):
for line in in_fh:
line = line.strip()
if line[0] == "#":
continue
line_tabs = line.split("\t")
sequence = SeqRecord(Seq(line_tabs[column].strip(), alphabet=generic_protein),
id=line_tabs[1].strip(),
name=line_tabs[1].strip())
yield (sequence, line)
class PredictFromDomains:
def __init__(self, database, model, alpha=0.98):
self.database = database
self.domain_idx = hmmstats(database)
self.model = model
self.alpha = alpha
print("Protein domain file parsed: {} domains detected".format(len(self.domain_idx)))
def hmmscan_predict(self, data):
sequence, line = data
sparse_data, _ = hmmscan(sequence, self.database, self.domain_idx)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
predict_proba = self.model.predict_proba(normalize(sparse_data))[0]
if predict_proba[1] > self.alpha:
predict = True
else:
predict = False
return line, predict, predict_proba[1]
if __name__ == '__main__':
main()
| mit |
pascalgutjahr/Praktikum-1 | Schwingung/phaselinear.py | 1 | 1829 | import numpy as np
import uncertainties.unumpy as unp
from uncertainties.unumpy import (nominal_values as noms, std_devs as stds)
import matplotlib.pyplot as plt
import matplotlib as mpl
from scipy.optimize import curve_fit
plt.rcParams['figure.figsize'] = (12, 8)
plt.rcParams['font.size'] = 13
plt.rcParams['lines.linewidth'] = 1
csfont = {'fontname': 'Times New Roman'}
# lineare Darstellung
fre, t = np.genfromtxt('tables/phase.txt', unpack=True, skip_header=2)
fre *= 1000
t /= 1e6
phirad = 2 * np.pi * fre * t
#Theoriekurve
L = 3.53 * (10**-3)
C = 5.015 * (10**-9)
w = fre * 2 * np.pi
R = 271.6
# fre = np.linspace(np.log(15),np.log(20))
phi = np.arctan((w * R * C)/(1 - (L * C * (w**2))))
# bis zur Resonanz plotten
#plt.plot(fre/1000, phi, 'b-', label='Theoriekurve')
# plt.plot(fre/1000, -phi, 'b-', label='Theoriekurve')
fre_theo = np.linspace(15000, 37500, 100)
phi_theo = np.arctan((2*np.pi*fre_theo * R * C)/(1 - (L * C * ((2*np.pi*fre_theo)**2))))
fre_theo2 = np.linspace(38000, 55000, 100)
phi_theo2 = np.arctan((2*np.pi*fre_theo2 * R * C)/(1 - (L * C * ((2*np.pi*fre_theo2)**2))))+np.pi
plt.plot(fre_theo/1000, phi_theo, 'b-', label='Theoriekurve')
plt.plot(fre_theo2/1000, phi_theo2, 'b-')
plt.plot(fre/1000, phirad, 'rx', label='Messwerte')
plt.plot((32.196, 32.196), (0.5, 2.5), 'g--', label='untere/obere Grenzfrequenz')
plt.plot((44.442, 44.442), (0.5, 2.5), 'g--')
plt.plot((36.822, 36.822), (0.5, 2.5), 'k--', label='Resonanzfrequenz')
# plt.xlim(30, 45)
plt.yticks(np.arange(0, np.pi, np.pi/4), ['$0$','$\mathrm{\pi}/4$','$\mathrm{\pi}/2$', '$3\mathrm{\pi}/4$'])
# plt.ylim(min(phirad)-5, max(phirad)+5)
plt.xlabel(r'$\mathrm{\nu} \,/\, \mathrm{kHz}$')
plt.ylabel(r'$\mathrm{\varphi}$')
plt.legend(loc='lower right')
plt.grid()
plt.tight_layout()
plt.savefig('Bilder/phaselinear.pdf')
plt.show()
| mit |
mjabri/holoviews | holoviews/ipython/parser.py | 1 | 14035 | """
The magics offered by the HoloViews IPython extension are powerful and
support rich, compositional specifications. To avoid the the brittle,
convoluted code that results from trying to support the syntax in pure
Python, this file defines suitable parsers using pyparsing that are
cleaner and easier to understand.
Pyparsing is required by matplotlib and will therefore be available if
HoloViews is being used in conjunction with matplotlib.
"""
import param
from itertools import groupby
import pyparsing as pp
from holoviews.core.options import Options
from ..operation import Compositor
ascii_uppercase = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
allowed = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ!#$%&\()*+,-./:;<=>?@\\^_`{|}~'
# To generate warning in the standard param style
# Parameterize Parser and use warning method once param supports
# logging at the class level.
class ParserWarning(param.Parameterized):pass
parsewarning = ParserWarning(name='Warning')
class Parser(object):
"""
Base class for magic line parsers, designed for forgiving parsing
of keyword lists.
"""
# Static namespace set in __init__.py of the extension
namespace = {}
# If True, raise SyntaxError on eval error otherwise warn
abort_on_eval_failure = False
@classmethod
def _strip_commas(cls, kw):
"Strip out any leading/training commas from the token"
kw = kw[:-1] if kw[-1]==',' else kw
return kw[1:] if kw[0]==',' else kw
@classmethod
def collect_tokens(cls, parseresult, mode):
"""
Collect the tokens from a (potentially) nested parse result.
"""
inner = '(%s)' if mode=='parens' else '[%s]'
if parseresult is None: return []
tokens = []
for token in parseresult.asList():
# If value is a tuple, the token will be a list
if isinstance(token, list):
tokens[-1] = tokens[-1] + (inner % ''.join(token))
else:
if token.strip() == ',': continue
tokens.append(cls._strip_commas(token))
return tokens
@classmethod
def todict(cls, parseresult, mode='parens', ns={}):
"""
Helper function to return dictionary given the parse results
from a pyparsing.nestedExpr object (containing keywords).
The ns is a dynamic namespace (typically the IPython Notebook
namespace) used to update the class-level namespace.
"""
grouped, kwargs = [], {}
tokens = cls.collect_tokens(parseresult, mode)
# Group tokens without '=' and append to last token containing '='
for group in groupby(tokens, lambda el: '=' in el):
(val, items) = group
if val is True:
grouped += list(items)
if val is False:
elements =list(items)
# Assume anything before ) or } can be joined with commas
# (e.g tuples with spaces in them)
joiner=',' if any(((')' in el) or ('}' in el))
for el in elements) else ''
grouped[-1] += joiner + joiner.join(elements)
for keyword in grouped:
# Tuple ('a', 3) becomes (,'a',3) and '(,' is never valid
# Same for some of the other joining errors corrected here
for (fst,snd) in [('(,', '('), ('{,', '{'), ('=,','='), (',:',':')]:
keyword = keyword.replace(fst, snd)
try:
kwargs.update(eval('dict(%s)' % keyword,
dict(cls.namespace, **ns)))
except:
if cls.abort_on_eval_failure:
raise SyntaxError("Could not evaluate keyword: %r"
% keyword)
msg = "Ignoring keyword pair that fails to evaluate: '%s'"
parsewarning.warning(msg % keyword)
return kwargs
class OptsSpec(Parser):
"""
An OptsSpec is a string specification that describes an
OptionTree. It is a list of tree path specifications (using dotted
syntax) separated by keyword lists for any of the style, plotting
or normalization options. These keyword lists are denoted
'plot(..)', 'style(...)' and 'norm(...)' respectively. These
three groups may be specified even more concisely using keyword
lists delimited by square brackets, parentheses and braces
respectively. All these sets are optional and may be supplied in
any order.
For instance, the following string:
Image (interpolation=None) plot(show_title=False) Curve style(color='r')
Would specify an OptionTree where Image has "interpolation=None"
for style and 'show_title=False' for plot options. The Curve has a
style set such that color='r'.
The parser is fairly forgiving; commas between keywords are
optional and additional spaces are often allowed. The only
restriction is that keywords *must* be immediately followed by the
'=' sign (no space).
"""
plot_options_short = pp.nestedExpr('[',
']',
content=pp.OneOrMore(pp.Word(allowed) ^ pp.quotedString)
).setResultsName('plot_options')
plot_options_long = pp.nestedExpr(opener='plot[',
closer=']',
content=pp.OneOrMore(pp.Word(allowed) ^ pp.quotedString)
).setResultsName('plot_options')
plot_options = (plot_options_short | plot_options_long)
style_options_short = pp.nestedExpr(opener='(',
closer=')',
ignoreExpr=None
).setResultsName("style_options")
style_options_long = pp.nestedExpr(opener='style(',
closer=')',
ignoreExpr=None
).setResultsName("style_options")
style_options = (style_options_short | style_options_long)
norm_options_short = pp.nestedExpr(opener='{',
closer='}',
ignoreExpr=None
).setResultsName("norm_options")
norm_options_long = pp.nestedExpr(opener='norm{',
closer='}',
ignoreExpr=None
).setResultsName("norm_options")
norm_options = (norm_options_short | norm_options_long)
compositor_ops = pp.MatchFirst(
[pp.Literal(el.group) for el in Compositor.definitions])
dotted_path = pp.Combine( pp.Word(ascii_uppercase, exact=1)
+ pp.Word(pp.alphanums+'._'))
pathspec = (dotted_path | compositor_ops).setResultsName("pathspec")
spec_group = pp.Group(pathspec +
(pp.Optional(norm_options)
& pp.Optional(plot_options)
& pp.Optional(style_options)))
opts_spec = pp.OneOrMore(spec_group)
# Aliases that map to the current option name for backward compatibility
aliases = {'horizontal_spacing':'hspace',
'vertical_spacing': 'vspace',
'figure_alpha':' fig_alpha',
'figure_bounds': 'fig_bounds',
'figure_inches': 'fig_inches',
'figure_latex': 'fig_latex',
'figure_rcparams': 'fig_rcparams',
'figure_size': 'fig_size',
'show_xaxis': 'xaxis',
'show_yaxis': 'yaxis'}
@classmethod
def process_normalization(cls, parse_group):
"""
Given a normalization parse group (i.e. the contents of the
braces), validate the option list and compute the appropriate
integer value for the normalization plotting option.
"""
if ('norm_options' not in parse_group): return None
opts = parse_group['norm_options'][0].asList()
if opts == []: return None
options = ['+framewise', '-framewise', '+axiswise', '-axiswise']
for normopt in options:
if opts.count(normopt) > 1:
raise SyntaxError("Normalization specification must not"
" contain repeated %r" % normopt)
if not all(opt in options for opt in opts):
raise SyntaxError("Normalization option not one of %s"
% ", ".join(options))
excluded = [('+framewise', '-framewise'), ('+axiswise', '-axiswise')]
for pair in excluded:
if all(exclude in opts for exclude in pair):
raise SyntaxError("Normalization specification cannot"
" contain both %s and %s" % (pair[0], pair[1]))
# If unspecified, default is -axiswise and -framewise
if len(opts) == 1 and opts[0].endswith('framewise'):
axiswise = False
framewise = True if '+framewise' in opts else False
elif len(opts) == 1 and opts[0].endswith('axiswise'):
framewise = False
axiswise = True if '+axiswise' in opts else False
else:
axiswise = True if '+axiswise' in opts else False
framewise = True if '+framewise' in opts else False
return dict(axiswise=axiswise,
framewise=framewise)
@classmethod
def parse(cls, line, ns={}):
"""
Parse an options specification, returning a dictionary with
path keys and {'plot':<options>, 'style':<options>} values.
"""
parses = [p for p in cls.opts_spec.scanString(line)]
if len(parses) != 1:
raise SyntaxError("Invalid specification syntax.")
else:
(k,s,e) = parses[0]
processed = line[:e]
if (processed.strip() != line.strip()):
raise SyntaxError("Failed to parse remainder of string: %r" % line[e:])
parse = {}
for group in cls.opts_spec.parseString(line):
options = {}
normalization = cls.process_normalization(group)
if normalization is not None:
options['norm'] = Options(**normalization)
if 'plot_options' in group:
plotopts = group['plot_options'][0]
opts = cls.todict(plotopts, 'brackets', ns=ns)
options['plot'] = Options(**{cls.aliases.get(k,k):v for k,v in opts.items()})
if 'style_options' in group:
styleopts = group['style_options'][0]
opts = cls.todict(styleopts, 'parens', ns=ns)
options['style'] = Options(**{cls.aliases.get(k,k):v for k,v in opts.items()})
if group['pathspec'] in parse:
# Update in case same pathspec accidentally repeated by the user.
parse[group['pathspec']].update(options)
else:
parse[group['pathspec']] = options
return parse
class CompositorSpec(Parser):
"""
The syntax for defining a set of compositor is as follows:
[ mode op(spec) [settings] value ]+
The components are:
mode : Operation mode, either 'data' or 'display'.
group : Value identifier with capitalized initial letter.
op : The name of the operation to apply.
spec : Overlay specification of form (A * B) where A and B are
dotted path specifications.
settings : Optional list of keyword arguments to be used as
parameters to the operation (in square brackets).
"""
mode = pp.Word(pp.alphas+pp.nums+'_').setResultsName("mode")
op = pp.Word(pp.alphas+pp.nums+'_').setResultsName("op")
overlay_spec = pp.nestedExpr(opener='(',
closer=')',
ignoreExpr=None
).setResultsName("spec")
value = pp.Word(pp.alphas+pp.nums+'_').setResultsName("value")
op_settings = pp.nestedExpr(opener='[',
closer=']',
ignoreExpr=None
).setResultsName("op_settings")
compositor_spec = pp.OneOrMore(pp.Group(mode + op + overlay_spec + value
+ pp.Optional(op_settings)))
@classmethod
def parse(cls, line, ns={}):
"""
Parse compositor specifications, returning a list Compositors
"""
definitions = []
parses = [p for p in cls.compositor_spec.scanString(line)]
if len(parses) != 1:
raise SyntaxError("Invalid specification syntax.")
else:
(k,s,e) = parses[0]
processed = line[:e]
if (processed.strip() != line.strip()):
raise SyntaxError("Failed to parse remainder of string: %r" % line[e:])
opmap = {op.__name__:op for op in Compositor.operations}
for group in cls.compositor_spec.parseString(line):
if ('mode' not in group) or group['mode'] not in ['data', 'display']:
raise SyntaxError("Either data or display mode must be specified.")
mode = group['mode']
kwargs = {}
operation = opmap[group['op']]
spec = ' '.join(group['spec'].asList()[0])
if group['op'] not in opmap:
raise SyntaxError("Operation %s not available for use with compositors."
% group['op'])
if 'op_settings' in group:
kwargs = cls.todict(group['op_settings'][0], 'brackets', ns=ns)
definition = Compositor(str(spec), operation, str(group['value']), mode, **kwargs)
definitions.append(definition)
return definitions
| bsd-3-clause |
saskartt/P4UL | pyRaster/geoTif2NumpyZ.py | 1 | 2094 | #!/usr/bin/env python
import sys
import argparse
import numpy as np
from gdalTools import *
from mapTools import saveTileAsNumpyZ
from utilities import filesFromList, writeLog
from plotTools import addImagePlot
import matplotlib.pyplot as plt
'''
Description:
Author: Mikko Auvinen
[email protected]
University of Helsinki &
Finnish Meteorological Institute
'''
#==========================================================#
parser = argparse.ArgumentParser(prog='utmTilesFromGeoTiff.py')
parser.add_argument("-f", "--filename", type=str, help="Name of the target .tif file", \
default=None)
parser.add_argument("-b", "--bandSelect", help="Raster Band Selection. Default=1.",\
action="store_true", default=False)
parser.add_argument("-p", "--printOn", help="Print the extracted tile.",\
action="store_true", default=False)
parser.add_argument("-pp", "--printOnly", help="Only print the extracted tile. Don't save.",\
action="store_true", default=False)
args = parser.parse_args()
#==========================================================#
# Rename ... that's all.
filename = args.filename
bandSelect = args.bandSelect
printOn = args.printOn
printOnly = args.printOnly
dataset = openGeoTiff( filename )
nBands = numberOfRasterBands( dataset, True) # PrintOn = True/False
ROrig, dPx = getGeoTransform( dataset ) # Both 2d arrays. XOrig is Top Left!
# Ask for the band ID if the user so wants (given that it makes sense)
ib = selectBand( nBands, bandSelect , 1 ) # last argument is for default
rb = getRasterBand( dataset, ib )
printRasterBandStatistics(rb)
# Read the raster
R = readAsNumpyArray( rb )
# Construct the Raster dict
Rdict = dict()
Rdict['dPx'] = dPx
Rdict['rotation'] = 0.
Rdict['R'] = R
Rdict['GlobOrig'] = ROrig
if( not printOnly ):
saveTileAsNumpyZ( filename.replace('.tif','.npz'), Rdict )
Rdict = None
if( printOnly or printOn ):
Rdims = np.array( R.shape )
figDims = 13.*(Rdims[::-1].astype(float)/np.max(Rdims))
fig = plt.figure(num=1, figsize=figDims)
fig = addImagePlot( fig, R, filename )
plt.show()
| mit |
cl4rke/scikit-learn | examples/linear_model/plot_sparse_recovery.py | 243 | 7461 | """
============================================================
Sparse recovery: feature selection for sparse linear models
============================================================
Given a small number of observations, we want to recover which features
of X are relevant to explain y. For this :ref:`sparse linear models
<l1_feature_selection>` can outperform standard statistical tests if the
true model is sparse, i.e. if a small fraction of the features are
relevant.
As detailed in :ref:`the compressive sensing notes
<compressive_sensing>`, the ability of L1-based approach to identify the
relevant variables depends on the sparsity of the ground truth, the
number of samples, the number of features, the conditioning of the
design matrix on the signal subspace, the amount of noise, and the
absolute value of the smallest non-zero coefficient [Wainwright2006]
(http://statistics.berkeley.edu/tech-reports/709.pdf).
Here we keep all parameters constant and vary the conditioning of the
design matrix. For a well-conditioned design matrix (small mutual
incoherence) we are exactly in compressive sensing conditions (i.i.d
Gaussian sensing matrix), and L1-recovery with the Lasso performs very
well. For an ill-conditioned matrix (high mutual incoherence),
regressors are very correlated, and the Lasso randomly selects one.
However, randomized-Lasso can recover the ground truth well.
In each situation, we first vary the alpha parameter setting the sparsity
of the estimated model and look at the stability scores of the randomized
Lasso. This analysis, knowing the ground truth, shows an optimal regime
in which relevant features stand out from the irrelevant ones. If alpha
is chosen too small, non-relevant variables enter the model. On the
opposite, if alpha is selected too large, the Lasso is equivalent to
stepwise regression, and thus brings no advantage over a univariate
F-test.
In a second time, we set alpha and compare the performance of different
feature selection methods, using the area under curve (AUC) of the
precision-recall.
"""
print(__doc__)
# Author: Alexandre Gramfort and Gael Varoquaux
# License: BSD 3 clause
import warnings
import matplotlib.pyplot as plt
import numpy as np
from scipy import linalg
from sklearn.linear_model import (RandomizedLasso, lasso_stability_path,
LassoLarsCV)
from sklearn.feature_selection import f_regression
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import auc, precision_recall_curve
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.utils.extmath import pinvh
from sklearn.utils import ConvergenceWarning
def mutual_incoherence(X_relevant, X_irelevant):
"""Mutual incoherence, as defined by formula (26a) of [Wainwright2006].
"""
projector = np.dot(np.dot(X_irelevant.T, X_relevant),
pinvh(np.dot(X_relevant.T, X_relevant)))
return np.max(np.abs(projector).sum(axis=1))
for conditioning in (1, 1e-4):
###########################################################################
# Simulate regression data with a correlated design
n_features = 501
n_relevant_features = 3
noise_level = .2
coef_min = .2
# The Donoho-Tanner phase transition is around n_samples=25: below we
# will completely fail to recover in the well-conditioned case
n_samples = 25
block_size = n_relevant_features
rng = np.random.RandomState(42)
# The coefficients of our model
coef = np.zeros(n_features)
coef[:n_relevant_features] = coef_min + rng.rand(n_relevant_features)
# The correlation of our design: variables correlated by blocs of 3
corr = np.zeros((n_features, n_features))
for i in range(0, n_features, block_size):
corr[i:i + block_size, i:i + block_size] = 1 - conditioning
corr.flat[::n_features + 1] = 1
corr = linalg.cholesky(corr)
# Our design
X = rng.normal(size=(n_samples, n_features))
X = np.dot(X, corr)
# Keep [Wainwright2006] (26c) constant
X[:n_relevant_features] /= np.abs(
linalg.svdvals(X[:n_relevant_features])).max()
X = StandardScaler().fit_transform(X.copy())
# The output variable
y = np.dot(X, coef)
y /= np.std(y)
# We scale the added noise as a function of the average correlation
# between the design and the output variable
y += noise_level * rng.normal(size=n_samples)
mi = mutual_incoherence(X[:, :n_relevant_features],
X[:, n_relevant_features:])
###########################################################################
# Plot stability selection path, using a high eps for early stopping
# of the path, to save computation time
alpha_grid, scores_path = lasso_stability_path(X, y, random_state=42,
eps=0.05)
plt.figure()
# We plot the path as a function of alpha/alpha_max to the power 1/3: the
# power 1/3 scales the path less brutally than the log, and enables to
# see the progression along the path
hg = plt.plot(alpha_grid[1:] ** .333, scores_path[coef != 0].T[1:], 'r')
hb = plt.plot(alpha_grid[1:] ** .333, scores_path[coef == 0].T[1:], 'k')
ymin, ymax = plt.ylim()
plt.xlabel(r'$(\alpha / \alpha_{max})^{1/3}$')
plt.ylabel('Stability score: proportion of times selected')
plt.title('Stability Scores Path - Mutual incoherence: %.1f' % mi)
plt.axis('tight')
plt.legend((hg[0], hb[0]), ('relevant features', 'irrelevant features'),
loc='best')
###########################################################################
# Plot the estimated stability scores for a given alpha
# Use 6-fold cross-validation rather than the default 3-fold: it leads to
# a better choice of alpha:
# Stop the user warnings outputs- they are not necessary for the example
# as it is specifically set up to be challenging.
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
warnings.simplefilter('ignore', ConvergenceWarning)
lars_cv = LassoLarsCV(cv=6).fit(X, y)
# Run the RandomizedLasso: we use a paths going down to .1*alpha_max
# to avoid exploring the regime in which very noisy variables enter
# the model
alphas = np.linspace(lars_cv.alphas_[0], .1 * lars_cv.alphas_[0], 6)
clf = RandomizedLasso(alpha=alphas, random_state=42).fit(X, y)
trees = ExtraTreesRegressor(100).fit(X, y)
# Compare with F-score
F, _ = f_regression(X, y)
plt.figure()
for name, score in [('F-test', F),
('Stability selection', clf.scores_),
('Lasso coefs', np.abs(lars_cv.coef_)),
('Trees', trees.feature_importances_),
]:
precision, recall, thresholds = precision_recall_curve(coef != 0,
score)
plt.semilogy(np.maximum(score / np.max(score), 1e-4),
label="%s. AUC: %.3f" % (name, auc(recall, precision)))
plt.plot(np.where(coef != 0)[0], [2e-4] * n_relevant_features, 'mo',
label="Ground truth")
plt.xlabel("Features")
plt.ylabel("Score")
# Plot only the 100 first coefficients
plt.xlim(0, 100)
plt.legend(loc='best')
plt.title('Feature selection scores - Mutual incoherence: %.1f'
% mi)
plt.show()
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.