repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
davidpvilaca/TEP | aula1/aula1.py | 1 | 1609 | # -*- coding: utf-8 -*-
"""
Spyder Editor
Este é um arquivo de script temporário.
"""
import matplotlib.pyplot as plt
import numpy as np
f = plt.imread('field.png')
plt.imshow(f)
# red
r = f.copy()
plt.imshow(r)
r[:,:,1] = 0
r[:,:,2] = 0
plt.imshow(r)
# green
g = f.copy()
g[:,:,0] = 0
g[:,:,2] = 0
plt.imshow(g)
# blue
b = f.copy()
b[:,:,0] = 0
b[:,:,1] = 0
plt.imshow(b)
plt.imshow(f)
# grayscale
gs1 = (f[:,:,0] + f[:,:,1] + f[:,:,2]) / 3
plt.imshow(gs1)
plt.imshow(gs1, cmap=plt.cm.Greys_r)
plt.imshow(f)
gs2 = 0.299*f[:,:,0] + 0.587*f[:,:,1] + 0.114*f[:,:,2]
plt.imshow(gs2, cmap=plt.cm.Greys_r)
plt.imshow(f)
h = f.copy()
plt.imshow(h)
idx = h[:,:,1] > 0.5
idx.shape
h[idx,1] = 0
plt.imshow(h)
h = f.copy()
idx = h[:,:,1] > h[:,:,0]
h[idx,1] = 0
plt.imshow(h)
h.shape
plt.imshow(f)
# histograma
plt.imshow(b)
plt.hist(b.ravel(), 256, [0, 1])
plt.hist(f[:,:,2].ravel(), 256, [0, 1])
plt.hist(f[:,:,0].ravel(), 256, [0, 1])
plt.hist(f[:,:,1].ravel(), 256, [0, 1])
plt.hist(f[:,:,2].ravel(), 256, [0, 1])
plt.hist(f[:,:,0].ravel(), 256, [0, 1], color='r')
plt.hist(f[:,:,1].ravel(), 256, [0, 1], color='g')
plt.hist(f[:,:,2].ravel(), 256, [0, 1], color='b')
plt.hist(gs2.ravel(), 256, [0, 1])
hs,bins = np.histogram(gs2, bins=256)
plt.plot(hs)
plt.hist(gs2.ravel(), 256, [0, 1])
hr,bins = np.histogram(f[:,:,0], bins=256)
hg,bins = np.histogram(f[:,:,1], bins=256)
hb,bins = np.histogram(f[:,:,2], bins=256)
plt.plot(hr, color='r')
plt.plot(hg, color='g')
plt.plot(hb, color='b')
nf = 1 - f
plt.imshow(nf)
| mit |
qrsforever/workspace | python/test/fangzu/collect.py | 1 | 2931 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
import re
from room import RoomInfo
from urllib.request import Request, urlopen
from urllib.error import URLError
import matplotlib.pyplot as plt
WEB_SITE = "http://bj.58.com"
AREA = "gaobeidianbj"
TYPE = "zufang"
SUFFIX = "pn"
MAX_PAGES = 20
RE_ROOM = r'<p class="room">(\d{1}.*?)\s+(\d*).*?</p>.*?<div class="money">.*?(\d+).*?</div>'
COLLECT_RE = re.compile(RE_ROOM, re.S)
all_rooms = []
rooms_30to50 = []
rooms_50to70 = []
rooms_70to90 = []
def get_url(page):
if page > MAX_PAGES:
return None
url = "/".join([WEB_SITE, AREA, TYPE, SUFFIX]) + str(page)
print(url)
return url
def show():
# xdata = []
# ydata = []
# plt.figure()
# print("")
print("##############面积大小 30 ~ 50 ##########")
# cnt = rooms_30to50.count()
# plt.xlim(0, cnt);
# plt.ylim(2000, 5000)
# line, = plt.plot(xdata, ydata, 'r-')
for i, x in enumerate(rooms_30to50):
print(x)
# xdata.append(i)
# ydata.append(x[3])
print("")
print("##############面积大小 50 ~ 70 ##########")
for x in rooms_50to70:
print(x)
print("")
print("##############面积大小 70 ~ 90 ##########")
for x in rooms_70to90:
print(x)
pass
for i in range(20):
pagei = i + 1
req = Request(get_url(pagei))
try:
html = urlopen(req)
data = html.read()
data = data.decode(encoding='UTF-8', errors='ignore')
with open('pages/' + str(pagei) + '.html', 'wt') as f:
f.write(data)
# with open('pages/' + str(pagei) + '.html', 'rt') as f:
# data = f.read()
data = data.replace(" ", "")
result = COLLECT_RE.findall(data)
for item in result:
flg = False
for x in item:
if x == '':
flg = True
break
if flg == True:
continue
all_rooms.append(RoomInfo(AREA, *item))
for item in all_rooms:
# print(item)
area = int(item.getArea())
if area < 29 or area > 91:
continue
if area > 29 and 51 > area:
rooms_30to50.append(item)
elif area > 49 and 71 > area:
rooms_50to70.append(item)
elif area > 69 and 91 > area:
rooms_70to90.append(item)
else:
pass
show()
except URLError as e:
if hasattr(e, 'reason'):
print('We failed to reach a server.')
print('Reason: ', e.reason)
elif hasattr(e, 'code'):
print("The server couldn't fulfill the request.")
print('Error code: ', e.code)
else:
print("Unknown error!")
# data = "<p class=\"room\">主卧(2室) 20㎡</p>"
| mit |
JPFrancoia/scikit-learn | sklearn/semi_supervised/tests/test_label_propagation.py | 44 | 2262 | """ test the label propagation module """
import numpy as np
from sklearn.utils.testing import assert_equal
from sklearn.semi_supervised import label_propagation
from sklearn.metrics.pairwise import rbf_kernel
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
ESTIMATORS = [
(label_propagation.LabelPropagation, {'kernel': 'rbf'}),
(label_propagation.LabelPropagation, {'kernel': 'knn', 'n_neighbors': 2}),
(label_propagation.LabelPropagation, {
'kernel': lambda x, y: rbf_kernel(x, y, gamma=20)
}),
(label_propagation.LabelSpreading, {'kernel': 'rbf'}),
(label_propagation.LabelSpreading, {'kernel': 'knn', 'n_neighbors': 2}),
(label_propagation.LabelSpreading, {
'kernel': lambda x, y: rbf_kernel(x, y, gamma=20)
}),
]
def test_fit_transduction():
samples = [[1., 0.], [0., 2.], [1., 3.]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
assert_equal(clf.transduction_[2], 1)
def test_distribution():
samples = [[1., 0.], [0., 1.], [1., 1.]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
if parameters['kernel'] == 'knn':
continue # unstable test; changes in k-NN ordering break it
assert_array_almost_equal(clf.predict_proba([[1., 0.0]]),
np.array([[1., 0.]]), 2)
else:
assert_array_almost_equal(np.asarray(clf.label_distributions_[2]),
np.array([.5, .5]), 2)
def test_predict():
samples = [[1., 0.], [0., 2.], [1., 3.]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
assert_array_equal(clf.predict([[0.5, 2.5]]), np.array([1]))
def test_predict_proba():
samples = [[1., 0.], [0., 1.], [1., 2.5]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
assert_array_almost_equal(clf.predict_proba([[1., 1.]]),
np.array([[0.5, 0.5]]))
| bsd-3-clause |
wgm2111/wgm-coursera | machine-learning/machine-learning-ex5/ex5/wgm-ex5.py | 1 | 2281 |
"""
An example script that fits the data with linear regression with a different orders of polynomial.
"""
# imports
import scipy as sp
import numpy as np
import scipy.io as sio
import sklearn.linear_model as linear_model
import matplotlib.pyplot as plt
# import data
ex5_data = sio.loadmat('ex5data1.mat') # Loads the matlab/octave file as a dict
# Define variables
X = ex5_data['X']
y = ex5_data["y"]
Xtest = ex5_data['Xtest']
ytest = ex5_data['ytest']
Xval = ex5_data['Xval']
yval = ex5_data['yval']
# Define higer order features up to polynomial 10
N = 10
X10 = np.array([X.squeeze()**n for n in range(1,N+1)]).transpose()
Xtest10 = np.array([Xtest.squeeze()**n for n in range(1,N+1)]).transpose()
# Define a lr model and fit for each order polynomial
lr_models = [linear_model.LinearRegression(normalize=True) for n in range(N)]
[lr_model.fit(X10[:,:n+1], y) for n, lr_model in zip(range(N), lr_models)]
lr_models_ridgeCV = [linear_model.RidgeCV([1e-5, 1e-4, 1e-3, 1e-2, 1e-1], normalize=True) for n in range(N)]
[lr_model_ridgeCV.fit(X10[:,:n+1], y) for n, lr_model_ridgeCV in zip(range(N), lr_models_ridgeCV)]
# Compute the training and test errors
for i, models in zip([0,1], [lr_models, lr_models_ridgeCV]):
yfit_train = np.array([lr_model.predict(X10[:,:n+1]) for n, lr_model in zip(range(N), models)])
yfit_test = np.array([lr_model.predict(Xtest10[:,:n+1]) for n, lr_model in zip(range(N), models)])
# Cost functions for
Npoly = sp.arange(1,11)
J_train = 1 / (2.0 * yfit_train.shape[1]) * ((y - yfit_train)**2).sum(1)
J_test = 1 / (2.0 * yfit_test.shape[1]) * ((ytest - yfit_test)**2).sum(1)
# Make a plot
if i == 0 :
f0 = plt.figure(0, (5,5), facecolor='white')
f0.clf()
a0 = f0.add_axes([.1, .1, .85, .85])
a0.plot(Npoly, J_train, 'b', linewidth=2, label="err-train")
a0.plot(Npoly, J_test, 'g', linewidth=2, label="err-test")
a0.set_title("Error as a function of polynomial order")
else:
a0.plot(Npoly, J_train, '--b', linewidth=2, label="err-train-RidgeCV")
a0.plot(Npoly, J_test, '--g', linewidth=2, label="err-test-RidgeCV")
a0.set_ybound(.001, 40)
a0.set_xbound(.5, 9.5)
a0.legend()
f0.show()
f0.savefig("wgm-ex5-learning-curve.png")
| gpl-2.0 |
lakshayg/tensorflow | tensorflow/contrib/factorization/python/ops/gmm_test.py | 41 | 9763 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ops.gmm."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.contrib.factorization.python.ops import gmm as gmm_lib
from tensorflow.contrib.learn.python.learn.estimators import kmeans
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import random_seed as random_seed_lib
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import flags
from tensorflow.python.platform import test
from tensorflow.python.training import queue_runner
FLAGS = flags.FLAGS
class GMMTest(test.TestCase):
def input_fn(self, batch_size=None, points=None):
batch_size = batch_size or self.batch_size
points = points if points is not None else self.points
num_points = points.shape[0]
def _fn():
x = constant_op.constant(points)
if batch_size == num_points:
return x, None
indices = random_ops.random_uniform(constant_op.constant([batch_size]),
minval=0, maxval=num_points-1,
dtype=dtypes.int32,
seed=10)
return array_ops.gather(x, indices), None
return _fn
def setUp(self):
np.random.seed(3)
random_seed_lib.set_random_seed(2)
self.num_centers = 2
self.num_dims = 2
self.num_points = 4000
self.batch_size = self.num_points
self.true_centers = self.make_random_centers(self.num_centers,
self.num_dims)
self.points, self.assignments, self.scores = self.make_random_points(
self.true_centers, self.num_points)
self.true_score = np.add.reduce(self.scores)
# Use initial means from kmeans (just like scikit-learn does).
clusterer = kmeans.KMeansClustering(num_clusters=self.num_centers)
clusterer.fit(input_fn=lambda: (constant_op.constant(self.points), None),
steps=30)
self.initial_means = clusterer.clusters()
@staticmethod
def make_random_centers(num_centers, num_dims):
return np.round(
np.random.rand(num_centers, num_dims).astype(np.float32) * 500)
@staticmethod
def make_random_points(centers, num_points):
num_centers, num_dims = centers.shape
assignments = np.random.choice(num_centers, num_points)
offsets = np.round(
np.random.randn(num_points, num_dims).astype(np.float32) * 20)
points = centers[assignments] + offsets
means = [
np.mean(
points[assignments == center], axis=0)
for center in xrange(num_centers)
]
covs = [
np.cov(points[assignments == center].T)
for center in xrange(num_centers)
]
scores = []
for r in xrange(num_points):
scores.append(
np.sqrt(
np.dot(
np.dot(points[r, :] - means[assignments[r]],
np.linalg.inv(covs[assignments[r]])), points[r, :] -
means[assignments[r]])))
return (points, assignments, scores)
def test_weights(self):
"""Tests the shape of the weights."""
gmm = gmm_lib.GMM(self.num_centers,
initial_clusters=self.initial_means,
random_seed=4,
config=run_config.RunConfig(tf_random_seed=2))
gmm.fit(input_fn=self.input_fn(), steps=0)
weights = gmm.weights()
self.assertAllEqual(list(weights.shape), [self.num_centers])
def test_clusters(self):
"""Tests the shape of the clusters."""
gmm = gmm_lib.GMM(self.num_centers,
initial_clusters=self.initial_means,
random_seed=4,
config=run_config.RunConfig(tf_random_seed=2))
gmm.fit(input_fn=self.input_fn(), steps=0)
clusters = gmm.clusters()
self.assertAllEqual(list(clusters.shape), [self.num_centers, self.num_dims])
def test_fit(self):
gmm = gmm_lib.GMM(self.num_centers,
initial_clusters='random',
random_seed=4,
config=run_config.RunConfig(tf_random_seed=2))
gmm.fit(input_fn=self.input_fn(), steps=1)
score1 = gmm.score(input_fn=self.input_fn(batch_size=self.num_points),
steps=1)
gmm.fit(input_fn=self.input_fn(), steps=10)
score2 = gmm.score(input_fn=self.input_fn(batch_size=self.num_points),
steps=1)
self.assertGreater(score1, score2)
self.assertNear(self.true_score, score2, self.true_score * 0.15)
def test_infer(self):
gmm = gmm_lib.GMM(self.num_centers,
initial_clusters=self.initial_means,
random_seed=4,
config=run_config.RunConfig(tf_random_seed=2))
gmm.fit(input_fn=self.input_fn(), steps=60)
clusters = gmm.clusters()
# Make a small test set
num_points = 40
points, true_assignments, true_offsets = (
self.make_random_points(clusters, num_points))
assignments = []
for item in gmm.predict_assignments(
input_fn=self.input_fn(points=points, batch_size=num_points)):
assignments.append(item)
assignments = np.ravel(assignments)
self.assertAllEqual(true_assignments, assignments)
# Test score
score = gmm.score(input_fn=self.input_fn(points=points,
batch_size=num_points), steps=1)
self.assertNear(score, np.sum(true_offsets), 4.05)
def _compare_with_sklearn(self, cov_type):
# sklearn version.
iterations = 40
np.random.seed(5)
sklearn_assignments = np.asarray([0, 0, 1, 0, 0, 0, 1, 0, 0, 1])
sklearn_means = np.asarray([[144.83417719, 254.20130341],
[274.38754816, 353.16074346]])
sklearn_covs = np.asarray([[[395.0081194, -4.50389512],
[-4.50389512, 408.27543989]],
[[385.17484203, -31.27834935],
[-31.27834935, 391.74249925]]])
# skflow version.
gmm = gmm_lib.GMM(self.num_centers,
initial_clusters=self.initial_means,
covariance_type=cov_type,
config=run_config.RunConfig(tf_random_seed=2))
gmm.fit(input_fn=self.input_fn(), steps=iterations)
points = self.points[:10, :]
skflow_assignments = []
for item in gmm.predict_assignments(
input_fn=self.input_fn(points=points, batch_size=10)):
skflow_assignments.append(item)
self.assertAllClose(sklearn_assignments,
np.ravel(skflow_assignments).astype(int))
self.assertAllClose(sklearn_means, gmm.clusters())
if cov_type == 'full':
self.assertAllClose(sklearn_covs, gmm.covariances(), rtol=0.01)
else:
for d in [0, 1]:
self.assertAllClose(
np.diag(sklearn_covs[d]), gmm.covariances()[d, :], rtol=0.01)
def test_compare_full(self):
self._compare_with_sklearn('full')
def test_compare_diag(self):
self._compare_with_sklearn('diag')
def test_random_input_large(self):
# sklearn version.
iterations = 5 # that should be enough to know whether this diverges
np.random.seed(5)
num_classes = 20
x = np.array([[np.random.random() for _ in range(100)]
for _ in range(num_classes)], dtype=np.float32)
# skflow version.
gmm = gmm_lib.GMM(num_classes,
covariance_type='full',
config=run_config.RunConfig(tf_random_seed=2))
def get_input_fn(x):
def input_fn():
return constant_op.constant(x.astype(np.float32)), None
return input_fn
gmm.fit(input_fn=get_input_fn(x), steps=iterations)
self.assertFalse(np.isnan(gmm.clusters()).any())
class GMMTestQueues(test.TestCase):
def input_fn(self):
def _fn():
queue = data_flow_ops.FIFOQueue(capacity=10,
dtypes=dtypes.float32,
shapes=[10, 3])
enqueue_op = queue.enqueue(array_ops.zeros([10, 3], dtype=dtypes.float32))
queue_runner.add_queue_runner(queue_runner.QueueRunner(queue,
[enqueue_op]))
return queue.dequeue(), None
return _fn
# This test makes sure that there are no deadlocks when using a QueueRunner.
# Note that since cluster initialization is dependendent on inputs, if input
# is generated using a QueueRunner, one has to make sure that these runners
# are started before the initialization.
def test_queues(self):
gmm = gmm_lib.GMM(2, covariance_type='diag')
gmm.fit(input_fn=self.input_fn(), steps=1)
if __name__ == '__main__':
test.main()
| apache-2.0 |
toastedcornflakes/scikit-learn | examples/manifold/plot_compare_methods.py | 39 | 4036 | """
=========================================
Comparison of Manifold Learning methods
=========================================
An illustration of dimensionality reduction on the S-curve dataset
with various manifold learning methods.
For a discussion and comparison of these algorithms, see the
:ref:`manifold module page <manifold>`
For a similar example, where the methods are applied to a
sphere dataset, see :ref:`example_manifold_plot_manifold_sphere.py`
Note that the purpose of the MDS is to find a low-dimensional
representation of the data (here 2D) in which the distances respect well
the distances in the original high-dimensional space, unlike other
manifold-learning algorithms, it does not seeks an isotropic
representation of the data in the low-dimensional space.
"""
# Author: Jake Vanderplas -- <[email protected]>
print(__doc__)
from time import time
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import NullFormatter
from sklearn import manifold, datasets
# Next line to silence pyflakes. This import is needed.
Axes3D
n_points = 1000
X, color = datasets.samples_generator.make_s_curve(n_points, random_state=0)
n_neighbors = 10
n_components = 2
fig = plt.figure(figsize=(15, 8))
plt.suptitle("Manifold Learning with %i points, %i neighbors"
% (1000, n_neighbors), fontsize=14)
try:
# compatibility matplotlib < 1.0
ax = fig.add_subplot(251, projection='3d')
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=color, cmap=plt.cm.Spectral)
ax.view_init(4, -72)
except:
ax = fig.add_subplot(251, projection='3d')
plt.scatter(X[:, 0], X[:, 2], c=color, cmap=plt.cm.Spectral)
methods = ['standard', 'ltsa', 'hessian', 'modified']
labels = ['LLE', 'LTSA', 'Hessian LLE', 'Modified LLE']
for i, method in enumerate(methods):
t0 = time()
Y = manifold.LocallyLinearEmbedding(n_neighbors, n_components,
eigen_solver='auto',
method=method).fit_transform(X)
t1 = time()
print("%s: %.2g sec" % (methods[i], t1 - t0))
ax = fig.add_subplot(252 + i)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("%s (%.2g sec)" % (labels[i], t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
Y = manifold.Isomap(n_neighbors, n_components).fit_transform(X)
t1 = time()
print("Isomap: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(257)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("Isomap (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
mds = manifold.MDS(n_components, max_iter=100, n_init=1)
Y = mds.fit_transform(X)
t1 = time()
print("MDS: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(258)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("MDS (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
se = manifold.SpectralEmbedding(n_components=n_components,
n_neighbors=n_neighbors)
Y = se.fit_transform(X)
t1 = time()
print("SpectralEmbedding: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(259)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("SpectralEmbedding (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
tsne = manifold.TSNE(n_components=n_components, init='pca', random_state=0)
Y = tsne.fit_transform(X)
t1 = time()
print("t-SNE: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(2, 5, 10)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("t-SNE (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
plt.show()
| bsd-3-clause |
probml/pyprobml | scripts/kf_parallel_demo.py | 1 | 4123 | # Parallel Kalman Filter demo: this script simulates
# 4 missiles as described in the section "state-space models".
# Each of the missiles is then filtered and smoothed in parallel
# Author: Gerardo Durán-Martín (@gerdm)
import jax.numpy as jnp
import lds_lib as lds
import matplotlib.pyplot as plt
import pyprobml_utils as pml
from jax import random
plt.rcParams["axes.spines.right"] = False
plt.rcParams["axes.spines.top"] = False
def sample_filter_smooth(key, lds_model, n_samples, noisy_init):
"""
Sample from a linear dynamical system, apply the kalman filter
(forward pass), and performs smoothing.
Parameters
----------
lds: LinearDynamicalSystem
Instance of a linear dynamical system with known parameters
Returns
-------
Dictionary with the following key, values
* (z_hist) array(n_samples, timesteps, state_size):
Simulation of Latent states
* (x_hist) array(n_samples, timesteps, observation_size):
Simulation of observed states
* (mu_hist) array(n_samples, timesteps, state_size):
Filtered means mut
* (Sigma_hist) array(n_samples, timesteps, state_size, state_size)
Filtered covariances Sigmat
* (mu_cond_hist) array(n_samples, timesteps, state_size)
Filtered conditional means mut|t-1
* (Sigma_cond_hist) array(n_samples, timesteps, state_size, state_size)
Filtered conditional covariances Sigmat|t-1
* (mu_hist_smooth) array(n_samples, timesteps, state_size):
Smoothed means mut
* (Sigma_hist_smooth) array(n_samples, timesteps, state_size, state_size)
Smoothed covariances Sigmat
"""
z_hist, x_hist = lds_model.sample(key, n_samples, noisy_init)
mu_hist, Sigma_hist, mu_cond_hist, Sigma_cond_hist = lds_model.filter(x_hist)
mu_hist_smooth, Sigma_hist_smooth = lds_model.smooth(mu_hist, Sigma_hist, mu_cond_hist, Sigma_cond_hist)
return {
"z_hist": z_hist,
"x_hist": x_hist,
"mu_hist": mu_hist,
"Sigma_hist": Sigma_hist,
"mu_cond_hist": mu_cond_hist,
"Sigma_cond_hist": Sigma_cond_hist,
"mu_hist_smooth": mu_hist_smooth,
"Sigma_hist_smooth": Sigma_hist_smooth
}
def plot_collection(obs, ax, means=None, covs=None, **kwargs):
n_samples, n_steps, _ = obs.shape
for nsim in range(n_samples):
X = obs[nsim]
if means is not None:
mean = means[nsim]
ax.scatter(*mean[0, :2], marker="o", s=20, c="black", zorder=2)
ax.plot(*mean[:, :2].T, marker="o", markersize=2, **kwargs, zorder=1)
if covs is not None:
cov = covs[nsim]
for t in range(1, n_steps, 3):
pml.plot_ellipse(cov[t][:2, :2], mean[t, :2], ax,
plot_center=False, alpha=0.7)
ax.scatter(*X.T, marker="+", s=60)
if __name__ == "__main__":
Δ = 1.0
A = jnp.array([
[1, 0, Δ, 0],
[0, 1, 0, Δ],
[0, 0, 1, 0],
[0, 0, 0, 1]
])
C = jnp.array([
[1, 0, 0, 0],
[0, 1, 0, 0]
])
state_size, _ = A.shape
observation_size, _ = C.shape
Q = jnp.eye(state_size) * 0.01
R = jnp.eye(observation_size) * 1.2
# Prior parameter distribution
mu0 = jnp.array([8, 10, 1, 0])
Sigma0 = jnp.eye(state_size) * 0.1
n_samples = 4
n_steps = 15
key = random.PRNGKey(3141)
lds_instance = lds.KalmanFilter(A, C, Q, R, mu0, Sigma0, n_steps)
result = sample_filter_smooth(key, lds_instance, n_samples, True)
fig, ax = plt.subplots()
plot_collection(result["x_hist"], ax, result["z_hist"], linestyle="--")
ax.set_title("State space")
pml.savefig("missiles_latent.pdf")
fig, ax = plt.subplots()
plot_collection(result["x_hist"], ax, result["mu_hist"], result["Sigma_hist"])
ax.set_title("Filtered")
pml.savefig("missiles_filtered.pdf")
fig, ax = plt.subplots()
plot_collection(result["x_hist"], ax, result["mu_hist_smooth"], result["Sigma_hist_smooth"])
ax.set_title("Smoothed")
pml.savefig("missiles_smoothed.pdf")
plt.show()
| mit |
chebee7i/twitter | scripts/fisher.py | 1 | 4408 | """
Write hashtag Frobenius scores to file.
"""
import io
from operator import itemgetter
import time
import numpy as np
from scipy.stats.mstats import mquantiles
import twitterproj
from twitterproj.fisher import *
from twitterproj.fisher import pipeline
db = twitterproj.connect()
def hashtag_scores():
N = 5000
lines = frobenius_hashtags(5000)
lines = [u','.join(map(unicode, line)) for line in lines]
lines.insert(0, u'# hashtag, count, user count, frobenius norm of FIM')
filename = 'htscores.csv'
with io.open(filename, 'w') as fobj:
fobj.write(u'\n'.join(lines))
def scatter():
with open('htscores.csv') as f:
lines = list(f.readlines())[1:]
counts = []
usercounts = []
scores = []
for line in lines:
ht, c, uc, score = line.strip().split(',')
counts.append(float(c))
usercounts.append(float(uc))
scores.append(float(score))
import matplotlib.pyplot as plt
plt.style.use('ggplot')
f, axes = plt.subplots(1,2)
axes = list(reversed(axes))
plt.sca(axes[0])
clip = None
scat = axes[0].scatter(counts[:clip], usercounts[:clip], c=scores[:clip], s=10, cmap=plt.cm.cool, edgecolors='none', alpha=.2)
cb = f.colorbar(scat)
axes[0].set_xlabel('Hashtag Count')
axes[0].set_ylabel('Hashtag User Count')
axes[0].set_xscale('log')
axes[0].set_yscale('log')
cb.set_label('Frobenius Norm')
if clip is not None:
axes[0].set_title('Lowest {0} Scores'.format(clip))
axes[1].hist(scores, bins=15)
axes[1].set_xlabel('$d$, Frobenius Norm')
axes[1].set_ylabel('Bin count of $d$')
if clip is not None:
axes[1].set_title('Histogram of all scores' )
f.tight_layout()
f.savefig('scores.pdf')
class Runner(object):
def __init__(self, hashtags):
self.hashtags = hashtags
def __call__(self, k):
htags = self.hashtags[:k]
counties = frobenius_counties(htags)
scores = [x[1] for x in counties]
quants = mquantiles(scores)
return quants
def county_scores(k=None, relative=True, to_csv=True):
if k is None:
import sys
try:
k = int(sys.argv[1])
except IndexError:
k = 50
N = 5000
lines = frobenius_hashtags(N)
hashtags = [line[0] for line in lines]
htags = hashtags[:k]
counties = frobenius_counties(htags, relative=relative)
import json
d = {}
for geoid, score, counts in counties:
d[geoid] = score
d['min'] = 0
d['max'] = 1
with open('json/grids.counties.bot_filtered.fisherscores.json', 'w') as f:
json.dump(d, f)
d = {}
for geoid, score, counts in counties:
d[geoid] = (score, counts)
if to_csv:
lines = []
for geoid, score, counts in counties:
line = [geoid, score]
line.extend(counts)
line = map(str, line)
lines.append(','.join(line))
header = '# geoid,{0}fisher score, [counts]'
if relative:
header = header.format(' relative ')
else:
header = header.format(' ')
lines.insert(0, header)
filename = 'hashtag_fisherscores_relative_n{0}.csv'
filename = filename.format(k)
with open(filename, 'w') as f:
f.write('\n'.join(lines))
return htags, counties, d
def county_quants(k=None):
if k is None:
import sys
try:
k = int(sys.argv[1])
except IndexError:
k = 50
N = 5000
lines = frobenius_hashtags(N)
hashtags = [line[0] for line in lines]
from multiprocessing import Pool
import json
p = Pool(22)
kvals = range(10, 205, 5)
runner = Runner(hashtags)
quants = p.map(runner, kvals)
quants = map(list, quants)
d = [kvals, quants]
with open('quants.json', 'w') as f:
json.dump(d, f)
return kvals, quants
def plot_quants(kvals, quants):
import matplotlib.pyplot as plt
import seaborn
quants = np.array(quants)
quants = quants.transpose()
plt.plot(quants[0], 'o-', label="25th percentile")
plt.plot(quants[1], 'o-', label="50th percentile")
plt.plot(quants[2], 'o-', label="75th percentile")
plt.ylabel('Relative Fisher Score')
plt.xlabel('Number of Hashtags')
plt.legend(loc='best')
plt.savefig('fisherscores.pdf')
| unlicense |
Midafi/scikit-image | skimage/viewer/utils/core.py | 18 | 6556 | import warnings
import numpy as np
from ..qt import QtWidgets, has_qt, FigureManagerQT, FigureCanvasQTAgg
import matplotlib as mpl
from matplotlib.figure import Figure
from matplotlib import _pylab_helpers
from matplotlib.colors import LinearSegmentedColormap
if has_qt and 'agg' not in mpl.get_backend().lower():
warnings.warn("Recommended matplotlib backend is `Agg` for full "
"skimage.viewer functionality.")
__all__ = ['init_qtapp', 'start_qtapp', 'RequiredAttr', 'figimage',
'LinearColormap', 'ClearColormap', 'FigureCanvas', 'new_plot',
'update_axes_image']
QApp = None
def init_qtapp():
"""Initialize QAppliction.
The QApplication needs to be initialized before creating any QWidgets
"""
global QApp
QApp = QtWidgets.QApplication.instance()
if QApp is None:
QApp = QtWidgets.QApplication([])
return QApp
def is_event_loop_running(app=None):
"""Return True if event loop is running."""
if app is None:
app = init_qtapp()
if hasattr(app, '_in_event_loop'):
return app._in_event_loop
else:
return False
def start_qtapp(app=None):
"""Start Qt mainloop"""
if app is None:
app = init_qtapp()
if not is_event_loop_running(app):
app._in_event_loop = True
app.exec_()
app._in_event_loop = False
else:
app._in_event_loop = True
class RequiredAttr(object):
"""A class attribute that must be set before use."""
instances = dict()
def __init__(self, init_val=None):
self.instances[self, None] = init_val
def __get__(self, obj, objtype):
value = self.instances[self, obj]
if value is None:
raise AttributeError('Required attribute not set')
return value
def __set__(self, obj, value):
self.instances[self, obj] = value
class LinearColormap(LinearSegmentedColormap):
"""LinearSegmentedColormap in which color varies smoothly.
This class is a simplification of LinearSegmentedColormap, which doesn't
support jumps in color intensities.
Parameters
----------
name : str
Name of colormap.
segmented_data : dict
Dictionary of 'red', 'green', 'blue', and (optionally) 'alpha' values.
Each color key contains a list of `x`, `y` tuples. `x` must increase
monotonically from 0 to 1 and corresponds to input values for a
mappable object (e.g. an image). `y` corresponds to the color
intensity.
"""
def __init__(self, name, segmented_data, **kwargs):
segmented_data = dict((key, [(x, y, y) for x, y in value])
for key, value in segmented_data.items())
LinearSegmentedColormap.__init__(self, name, segmented_data, **kwargs)
class ClearColormap(LinearColormap):
"""Color map that varies linearly from alpha = 0 to 1
"""
def __init__(self, rgb, max_alpha=1, name='clear_color'):
r, g, b = rgb
cg_speq = {'blue': [(0.0, b), (1.0, b)],
'green': [(0.0, g), (1.0, g)],
'red': [(0.0, r), (1.0, r)],
'alpha': [(0.0, 0.0), (1.0, max_alpha)]}
LinearColormap.__init__(self, name, cg_speq)
class FigureCanvas(FigureCanvasQTAgg):
"""Canvas for displaying images."""
def __init__(self, figure, **kwargs):
self.fig = figure
FigureCanvasQTAgg.__init__(self, self.fig)
FigureCanvasQTAgg.setSizePolicy(self,
QtWidgets.QSizePolicy.Expanding,
QtWidgets.QSizePolicy.Expanding)
FigureCanvasQTAgg.updateGeometry(self)
def resizeEvent(self, event):
FigureCanvasQTAgg.resizeEvent(self, event)
# Call to `resize_event` missing in FigureManagerQT.
# See https://github.com/matplotlib/matplotlib/pull/1585
self.resize_event()
def new_canvas(*args, **kwargs):
"""Return a new figure canvas."""
allnums = _pylab_helpers.Gcf.figs.keys()
num = max(allnums) + 1 if allnums else 1
FigureClass = kwargs.pop('FigureClass', Figure)
figure = FigureClass(*args, **kwargs)
canvas = FigureCanvas(figure)
fig_manager = FigureManagerQT(canvas, num)
return fig_manager.canvas
def new_plot(parent=None, subplot_kw=None, **fig_kw):
"""Return new figure and axes.
Parameters
----------
parent : QtWidget
Qt widget that displays the plot objects. If None, you must manually
call ``canvas.setParent`` and pass the parent widget.
subplot_kw : dict
Keyword arguments passed ``matplotlib.figure.Figure.add_subplot``.
fig_kw : dict
Keyword arguments passed ``matplotlib.figure.Figure``.
"""
if subplot_kw is None:
subplot_kw = {}
canvas = new_canvas(**fig_kw)
canvas.setParent(parent)
fig = canvas.figure
ax = fig.add_subplot(1, 1, 1, **subplot_kw)
return fig, ax
def figimage(image, scale=1, dpi=None, **kwargs):
"""Return figure and axes with figure tightly surrounding image.
Unlike pyplot.figimage, this actually plots onto an axes object, which
fills the figure. Plotting the image onto an axes allows for subsequent
overlays of axes artists.
Parameters
----------
image : array
image to plot
scale : float
If scale is 1, the figure and axes have the same dimension as the
image. Smaller values of `scale` will shrink the figure.
dpi : int
Dots per inch for figure. If None, use the default rcParam.
"""
dpi = dpi if dpi is not None else mpl.rcParams['figure.dpi']
kwargs.setdefault('interpolation', 'nearest')
kwargs.setdefault('cmap', 'gray')
h, w, d = np.atleast_3d(image).shape
figsize = np.array((w, h), dtype=float) / dpi * scale
fig, ax = new_plot(figsize=figsize, dpi=dpi)
fig.subplots_adjust(left=0, bottom=0, right=1, top=1)
ax.set_axis_off()
ax.imshow(image, **kwargs)
ax.figure.canvas.draw()
return fig, ax
def update_axes_image(image_axes, image):
"""Update the image displayed by an image plot.
This sets the image plot's array and updates its shape appropriately
Parameters
----------
image_axes : `matplotlib.image.AxesImage`
Image axes to update.
image : array
Image array.
"""
image_axes.set_array(image)
# Adjust size if new image shape doesn't match the original
h, w = image.shape[:2]
image_axes.set_extent((0, w, h, 0))
| bsd-3-clause |
HesselTjeerdsma/Cyber-Physical-Pacman-Game | Algor/flask/lib/python2.7/site-packages/scipy/interpolate/tests/test_rbf.py | 14 | 4604 | # Created by John Travers, Robert Hetland, 2007
""" Test functions for rbf module """
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import (assert_, assert_array_almost_equal,
assert_almost_equal, run_module_suite)
from numpy import linspace, sin, random, exp, allclose
from scipy.interpolate.rbf import Rbf
FUNCTIONS = ('multiquadric', 'inverse multiquadric', 'gaussian',
'cubic', 'quintic', 'thin-plate', 'linear')
def check_rbf1d_interpolation(function):
# Check that the Rbf function interpolates through the nodes (1D)
x = linspace(0,10,9)
y = sin(x)
rbf = Rbf(x, y, function=function)
yi = rbf(x)
assert_array_almost_equal(y, yi)
assert_almost_equal(rbf(float(x[0])), y[0])
def check_rbf2d_interpolation(function):
# Check that the Rbf function interpolates through the nodes (2D).
x = random.rand(50,1)*4-2
y = random.rand(50,1)*4-2
z = x*exp(-x**2-1j*y**2)
rbf = Rbf(x, y, z, epsilon=2, function=function)
zi = rbf(x, y)
zi.shape = x.shape
assert_array_almost_equal(z, zi)
def check_rbf3d_interpolation(function):
# Check that the Rbf function interpolates through the nodes (3D).
x = random.rand(50, 1)*4 - 2
y = random.rand(50, 1)*4 - 2
z = random.rand(50, 1)*4 - 2
d = x*exp(-x**2 - y**2)
rbf = Rbf(x, y, z, d, epsilon=2, function=function)
di = rbf(x, y, z)
di.shape = x.shape
assert_array_almost_equal(di, d)
def test_rbf_interpolation():
for function in FUNCTIONS:
yield check_rbf1d_interpolation, function
yield check_rbf2d_interpolation, function
yield check_rbf3d_interpolation, function
def check_rbf1d_regularity(function, atol):
# Check that the Rbf function approximates a smooth function well away
# from the nodes.
x = linspace(0, 10, 9)
y = sin(x)
rbf = Rbf(x, y, function=function)
xi = linspace(0, 10, 100)
yi = rbf(xi)
# import matplotlib.pyplot as plt
# plt.figure()
# plt.plot(x, y, 'o', xi, sin(xi), ':', xi, yi, '-')
# plt.plot(x, y, 'o', xi, yi-sin(xi), ':')
# plt.title(function)
# plt.show()
msg = "abs-diff: %f" % abs(yi - sin(xi)).max()
assert_(allclose(yi, sin(xi), atol=atol), msg)
def test_rbf_regularity():
tolerances = {
'multiquadric': 0.1,
'inverse multiquadric': 0.15,
'gaussian': 0.15,
'cubic': 0.15,
'quintic': 0.1,
'thin-plate': 0.1,
'linear': 0.2
}
for function in FUNCTIONS:
yield check_rbf1d_regularity, function, tolerances.get(function, 1e-2)
def check_rbf1d_stability(function):
# Check that the Rbf function with default epsilon is not subject
# to overshoot. Regression for issue #4523.
#
# Generate some data (fixed random seed hence deterministic)
np.random.seed(1234)
x = np.linspace(0, 10, 50)
z = x + 4.0 * np.random.randn(len(x))
rbf = Rbf(x, z, function=function)
xi = np.linspace(0, 10, 1000)
yi = rbf(xi)
# subtract the linear trend and make sure there no spikes
assert_(np.abs(yi-xi).max() / np.abs(z-x).max() < 1.1)
def test_rbf_stability():
for function in FUNCTIONS:
yield check_rbf1d_stability, function
def test_default_construction():
# Check that the Rbf class can be constructed with the default
# multiquadric basis function. Regression test for ticket #1228.
x = linspace(0,10,9)
y = sin(x)
rbf = Rbf(x, y)
yi = rbf(x)
assert_array_almost_equal(y, yi)
def test_function_is_callable():
# Check that the Rbf class can be constructed with function=callable.
x = linspace(0,10,9)
y = sin(x)
linfunc = lambda x:x
rbf = Rbf(x, y, function=linfunc)
yi = rbf(x)
assert_array_almost_equal(y, yi)
def test_two_arg_function_is_callable():
# Check that the Rbf class can be constructed with a two argument
# function=callable.
def _func(self, r):
return self.epsilon + r
x = linspace(0,10,9)
y = sin(x)
rbf = Rbf(x, y, function=_func)
yi = rbf(x)
assert_array_almost_equal(y, yi)
def test_rbf_epsilon_none():
x = linspace(0, 10, 9)
y = sin(x)
rbf = Rbf(x, y, epsilon=None)
def test_rbf_epsilon_none_collinear():
# Check that collinear points in one dimension doesn't cause an error
# due to epsilon = 0
x = [1, 2, 3]
y = [4, 4, 4]
z = [5, 6, 7]
rbf = Rbf(x, y, z, epsilon=None)
assert_(rbf.epsilon > 0)
if __name__ == "__main__":
run_module_suite()
| apache-2.0 |
great-expectations/great_expectations | tests/integration/test_script_runner.py | 1 | 16781 | import enum
import os
import shutil
import subprocess
import sys
import pytest
from assets.scripts.build_gallery import execute_shell_command
from great_expectations.data_context.util import file_relative_path
class BackendDependencies(enum.Enum):
BIGQUERY = "BIGQUERY"
MYSQL = "MYSQL"
MSSQL = "MSSQL"
PANDAS = "PANDAS"
POSTGRESQL = "POSTGRESQL"
REDSHIFT = "REDSHIFT"
SPARK = "SPARK"
SQLALCHEMY = "SQLALCHEMY"
SNOWFLAKE = "SNOWFLAKE"
docs_test_matrix = [
# {
# "user_flow_script": "tests/integration/docusaurus/connecting_to_your_data/database/bigquery_yaml_example.py",
# "data_context_dir": "tests/integration/fixtures/no_datasources/great_expectations",
# "data_dir": "tests/test_sets/taxi_yellow_trip_data_samples",
# "util_script": "tests/integration/docusaurus/connecting_to_your_data/database/util.py",
# "extra_backend_dependencies": BackendDependencies.BIGQUERY,
# },
# {
# "user_flow_script": "tests/integration/docusaurus/connecting_to_your_data/database/bigquery_python_example.py",
# "data_context_dir": "tests/integration/fixtures/no_datasources/great_expectations",
# "data_dir": "tests/test_sets/taxi_yellow_trip_data_samples",
# "util_script": "tests/integration/docusaurus/connecting_to_your_data/database/util.py",
# "extra_backend_dependencies": BackendDependencies.BIGQUERY,
# },
# {
# "user_flow_script": "tests/integration/docusaurus/connecting_to_your_data/cloud/pandas_s3_yaml_example.py",
# "data_context_dir": "tests/integration/fixtures/no_datasources/great_expectations",
# },
# {
# "user_flow_script": "tests/integration/docusaurus/connecting_to_your_data/cloud/pandas_s3_python_example.py",
# "data_context_dir": "tests/integration/fixtures/no_datasources/great_expectations",
# },
# {
# "user_flow_script": "tests/integration/docusaurus/connecting_to_your_data/cloud/spark_s3_yaml_example.py",
# "extra_backend_dependencies": BackendDependencies.SPARK,
# },
# {
# "user_flow_script": "tests/integration/docusaurus/connecting_to_your_data/cloud/spark_s3_python_example.py",
# "extra_backend_dependencies": BackendDependencies.SPARK,
# },
# {
# "user_flow_script": "tests/integration/docusaurus/connecting_to_your_data/database/redshift_python_example.py",
# "data_context_dir": "tests/integration/fixtures/no_datasources/great_expectations",
# "data_dir": "tests/test_sets/taxi_yellow_trip_data_samples",
# "util_script": "tests/integration/docusaurus/connecting_to_your_data/database/util.py",
# "extra_backend_dependencies": BackendDependencies.REDSHIFT,
# },
# {
# "user_flow_script": "tests/integration/docusaurus/connecting_to_your_data/database/redshift_yaml_example.py",
# "data_context_dir": "tests/integration/fixtures/no_datasources/great_expectations",
# "data_dir": "tests/test_sets/taxi_yellow_trip_data_samples",
# "util_script": "tests/integration/docusaurus/connecting_to_your_data/database/util.py",
# "extra_backend_dependencies": BackendDependencies.REDSHIFT,
# },
{
"name": "getting_started",
"data_context_dir": "tests/integration/fixtures/yellow_trip_data_pandas_fixture/great_expectations",
"data_dir": "tests/test_sets/taxi_yellow_trip_data_samples",
"user_flow_script": "tests/integration/docusaurus/tutorials/getting-started/getting_started.py",
},
{
"user_flow_script": "tests/integration/docusaurus/connecting_to_your_data/filesystem/pandas_yaml_example.py",
"data_context_dir": "tests/integration/fixtures/no_datasources/great_expectations",
"data_dir": "tests/test_sets/taxi_yellow_trip_data_samples",
},
{
"user_flow_script": "tests/integration/docusaurus/connecting_to_your_data/filesystem/pandas_python_example.py",
"data_context_dir": "tests/integration/fixtures/no_datasources/great_expectations",
"data_dir": "tests/test_sets/taxi_yellow_trip_data_samples",
},
{
"user_flow_script": "tests/integration/docusaurus/connecting_to_your_data/database/postgres_yaml_example.py",
"data_context_dir": "tests/integration/fixtures/no_datasources/great_expectations",
"data_dir": "tests/test_sets/taxi_yellow_trip_data_samples",
"util_script": "tests/integration/docusaurus/connecting_to_your_data/database/util.py",
"extra_backend_dependencies": BackendDependencies.POSTGRESQL,
},
{
"user_flow_script": "tests/integration/docusaurus/connecting_to_your_data/database/postgres_python_example.py",
"data_context_dir": "tests/integration/fixtures/no_datasources/great_expectations",
"data_dir": "tests/test_sets/taxi_yellow_trip_data_samples",
"util_script": "tests/integration/docusaurus/connecting_to_your_data/database/util.py",
"extra_backend_dependencies": BackendDependencies.POSTGRESQL,
},
{
"user_flow_script": "tests/integration/docusaurus/connecting_to_your_data/database/snowflake_python_example.py",
"data_context_dir": "tests/integration/fixtures/no_datasources/great_expectations",
"data_dir": "tests/test_sets/taxi_yellow_trip_data_samples",
"util_script": "tests/integration/docusaurus/connecting_to_your_data/database/util.py",
"extra_backend_dependencies": BackendDependencies.SNOWFLAKE,
},
{
"user_flow_script": "tests/integration/docusaurus/connecting_to_your_data/database/snowflake_yaml_example.py",
"data_context_dir": "tests/integration/fixtures/no_datasources/great_expectations",
"data_dir": "tests/test_sets/taxi_yellow_trip_data_samples",
"util_script": "tests/integration/docusaurus/connecting_to_your_data/database/util.py",
"extra_backend_dependencies": BackendDependencies.SNOWFLAKE,
},
{
"user_flow_script": "tests/integration/docusaurus/connecting_to_your_data/database/sqlite_yaml_example.py",
"data_context_dir": "tests/integration/fixtures/no_datasources/great_expectations",
"data_dir": "tests/test_sets/taxi_yellow_trip_data_samples/sqlite/",
"extra_backend_dependencies": BackendDependencies.SQLALCHEMY,
},
{
"user_flow_script": "tests/integration/docusaurus/connecting_to_your_data/database/sqlite_python_example.py",
"data_context_dir": "tests/integration/fixtures/no_datasources/great_expectations",
"data_dir": "tests/test_sets/taxi_yellow_trip_data_samples/sqlite/",
"extra_backend_dependencies": BackendDependencies.SQLALCHEMY,
},
{
"user_flow_script": "tests/integration/docusaurus/connecting_to_your_data/in_memory/pandas_yaml_example.py",
"data_context_dir": "tests/integration/fixtures/no_datasources/great_expectations",
},
{
"user_flow_script": "tests/integration/docusaurus/connecting_to_your_data/in_memory/pandas_python_example.py",
"data_context_dir": "tests/integration/fixtures/no_datasources/great_expectations",
},
{
"user_flow_script": "tests/integration/docusaurus/template/script_example.py",
"data_context_dir": "tests/integration/fixtures/no_datasources/great_expectations",
},
{
"user_flow_script": "tests/integration/docusaurus/connecting_to_your_data/in_memory/spark_yaml_example.py",
"extra_backend_dependencies": BackendDependencies.SPARK,
},
{
"user_flow_script": "tests/integration/docusaurus/connecting_to_your_data/in_memory/spark_python_example.py",
"extra_backend_dependencies": BackendDependencies.SPARK,
},
{
"user_flow_script": "tests/integration/docusaurus/connecting_to_your_data/filesystem/spark_yaml_example.py",
"data_dir": "tests/test_sets/taxi_yellow_trip_data_samples",
"extra_backend_dependencies": BackendDependencies.SPARK,
},
{
"user_flow_script": "tests/integration/docusaurus/connecting_to_your_data/filesystem/spark_python_example.py",
"data_dir": "tests/test_sets/taxi_yellow_trip_data_samples",
"extra_backend_dependencies": BackendDependencies.SPARK,
},
{
"user_flow_script": "tests/integration/docusaurus/connecting_to_your_data/database/mysql_yaml_example.py",
"data_context_dir": "tests/integration/fixtures/no_datasources/great_expectations",
"data_dir": "tests/test_sets/taxi_yellow_trip_data_samples",
"util_script": "tests/integration/docusaurus/connecting_to_your_data/database/util.py",
"extra_backend_dependencies": BackendDependencies.MYSQL,
},
{
"user_flow_script": "tests/integration/docusaurus/connecting_to_your_data/database/mysql_python_example.py",
"data_context_dir": "tests/integration/fixtures/no_datasources/great_expectations",
"data_dir": "tests/test_sets/taxi_yellow_trip_data_samples",
"util_script": "tests/integration/docusaurus/connecting_to_your_data/database/util.py",
"extra_backend_dependencies": BackendDependencies.MYSQL,
},
{
"name": "rule_base_profiler_multi_batch_example",
"data_context_dir": "tests/integration/fixtures/yellow_trip_data_pandas_fixture/great_expectations",
"data_dir": "tests/test_sets/taxi_yellow_trip_data_samples",
"user_flow_script": "tests/integration/docusaurus/expectations/advanced/multi_batch_rule_based_profiler_example.py",
},
]
"""
TODO(cdkini): Kept running into a sqlalchemy.exc.OperationalError when running Snowflake tests.
Per discussion with Will, issue appears to be on the Snowflake side as opposed to something in our code.
Commenting out until we figure out the issue.
"""
integration_test_matrix = [
{
"name": "pandas_one_multi_batch_request_one_validator",
"data_context_dir": "tests/integration/fixtures/yellow_trip_data_pandas_fixture/great_expectations",
"data_dir": "tests/test_sets/taxi_yellow_trip_data_samples",
"user_flow_script": "tests/integration/fixtures/yellow_trip_data_pandas_fixture/one_multi_batch_request_one_validator.py",
},
{
"name": "pandas_two_batch_requests_two_validators",
"data_context_dir": "tests/integration/fixtures/yellow_trip_data_pandas_fixture/great_expectations",
"data_dir": "tests/test_sets/taxi_yellow_trip_data_samples",
"user_flow_script": "tests/integration/fixtures/yellow_trip_data_pandas_fixture/two_batch_requests_two_validators.py",
"expected_stderrs": "",
"expected_stdouts": "",
},
{
"name": "pandas_multiple_batch_requests_one_validator_multiple_steps",
"data_context_dir": "tests/integration/fixtures/yellow_trip_data_pandas_fixture/great_expectations",
"data_dir": "tests/test_sets/taxi_yellow_trip_data_samples",
"user_flow_script": "tests/integration/fixtures/yellow_trip_data_pandas_fixture/multiple_batch_requests_one_validator_multiple_steps.py",
},
{
"name": "pandas_multiple_batch_requests_one_validator_one_step",
"data_context_dir": "tests/integration/fixtures/yellow_trip_data_pandas_fixture/great_expectations",
"data_dir": "tests/test_sets/taxi_yellow_trip_data_samples",
"user_flow_script": "tests/integration/fixtures/yellow_trip_data_pandas_fixture/multiple_batch_requests_one_validator_one_step.py",
},
]
def idfn(test_configuration):
return test_configuration.get("user_flow_script")
@pytest.fixture
def pytest_parsed_arguments(request):
return request.config.option
@pytest.mark.docs
@pytest.mark.integration
@pytest.mark.parametrize("test_configuration", docs_test_matrix, ids=idfn)
@pytest.mark.skipif(sys.version_info < (3, 7), reason="requires Python3.7")
def test_docs(test_configuration, tmp_path, pytest_parsed_arguments):
_check_for_skipped_tests(pytest_parsed_arguments, test_configuration)
_execute_integration_test(test_configuration, tmp_path)
@pytest.mark.integration
@pytest.mark.parametrize("test_configuration", integration_test_matrix, ids=idfn)
@pytest.mark.skipif(sys.version_info < (3, 7), reason="requires Python3.7")
def test_integration_tests(test_configuration, tmp_path, pytest_parsed_arguments):
_check_for_skipped_tests(pytest_parsed_arguments, test_configuration)
_execute_integration_test(test_configuration, tmp_path)
def _execute_integration_test(test_configuration, tmp_path):
"""
Prepare and environment and run integration tests from a list of tests.
Note that the only required parameter for a test in the matrix is
`user_flow_script` and that all other parameters are optional.
"""
assert (
"user_flow_script" in test_configuration.keys()
), "a `user_flow_script` is required"
workdir = os.getcwd()
try:
base_dir = test_configuration.get(
"base_dir", file_relative_path(__file__, "../../")
)
os.chdir(tmp_path)
# Ensure GE is installed in our environment
ge_requirement = test_configuration.get("ge_requirement", "great_expectations")
execute_shell_command(f"pip install {ge_requirement}")
#
# Build test state
#
# DataContext
if test_configuration.get("data_context_dir"):
context_source_dir = os.path.join(
base_dir, test_configuration.get("data_context_dir")
)
test_context_dir = os.path.join(tmp_path, "great_expectations")
shutil.copytree(
context_source_dir,
test_context_dir,
)
# Test Data
if test_configuration.get("data_dir") is not None:
source_data_dir = os.path.join(base_dir, test_configuration.get("data_dir"))
test_data_dir = os.path.join(tmp_path, "data")
shutil.copytree(
source_data_dir,
test_data_dir,
)
# UAT Script
script_source = os.path.join(
base_dir,
test_configuration.get("user_flow_script"),
)
script_path = os.path.join(tmp_path, "test_script.py")
shutil.copyfile(script_source, script_path)
# Util Script
if test_configuration.get("util_script") is not None:
script_source = os.path.join(
base_dir,
test_configuration.get("util_script"),
)
util_script_path = os.path.join(tmp_path, "util.py")
shutil.copyfile(script_source, util_script_path)
# Check initial state
# Execute test
res = subprocess.run(["python", script_path], capture_output=True)
# Check final state
expected_stderrs = test_configuration.get("expected_stderrs")
expected_stdouts = test_configuration.get("expected_stdouts")
expected_failure = test_configuration.get("expected_failure")
outs = res.stdout.decode("utf-8")
errs = res.stderr.decode("utf-8")
print(outs)
print(errs)
if expected_stderrs:
assert expected_stderrs == errs
if expected_stdouts:
assert expected_stdouts == outs
if expected_failure:
assert res.returncode != 0
else:
assert res.returncode == 0
except:
raise
finally:
os.chdir(workdir)
def _check_for_skipped_tests(pytest_args, test_configuration) -> None:
"""Enable scripts to be skipped based on pytest invocation flags."""
dependencies = test_configuration.get("extra_backend_dependencies", None)
if not dependencies:
return
elif dependencies == BackendDependencies.POSTGRESQL and (
pytest_args.no_postgresql or pytest_args.no_sqlalchemy
):
pytest.skip("Skipping postgres tests")
elif dependencies == BackendDependencies.MYSQL and (
not pytest_args.mysql or pytest_args.no_sqlalchemy
):
pytest.skip("Skipping mysql tests")
elif dependencies == BackendDependencies.MSSQL and (
pytest_args.no_mssql or pytest_args.no_sqlalchemy
):
pytest.skip("Skipping mssql tests")
elif dependencies == BackendDependencies.BIGQUERY and pytest_args.no_sqlalchemy:
pytest.skip("Skipping bigquery tests")
elif dependencies == BackendDependencies.REDSHIFT and pytest_args.no_sqlalchemy:
pytest.skip("Skipping redshift tests")
elif dependencies == BackendDependencies.SPARK and pytest_args.no_spark:
pytest.skip("Skipping spark tests")
elif dependencies == BackendDependencies.SNOWFLAKE and pytest_args.no_sqlalchemy:
pytest.skip("Skipping snowflake tests")
| apache-2.0 |
JPFrancoia/scikit-learn | sklearn/decomposition/dict_learning.py | 13 | 46149 | """ Dictionary learning
"""
from __future__ import print_function
# Author: Vlad Niculae, Gael Varoquaux, Alexandre Gramfort
# License: BSD 3 clause
import time
import sys
import itertools
from math import sqrt, ceil
import numpy as np
from scipy import linalg
from numpy.lib.stride_tricks import as_strided
from ..base import BaseEstimator, TransformerMixin
from ..externals.joblib import Parallel, delayed, cpu_count
from ..externals.six.moves import zip
from ..utils import (check_array, check_random_state, gen_even_slices,
gen_batches, _get_n_jobs)
from ..utils.extmath import randomized_svd, row_norms
from ..utils.validation import check_is_fitted
from ..linear_model import Lasso, orthogonal_mp_gram, LassoLars, Lars
def _sparse_encode(X, dictionary, gram, cov=None, algorithm='lasso_lars',
regularization=None, copy_cov=True,
init=None, max_iter=1000, check_input=True, verbose=0):
"""Generic sparse coding
Each column of the result is the solution to a Lasso problem.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix.
dictionary: array of shape (n_components, n_features)
The dictionary matrix against which to solve the sparse coding of
the data. Some of the algorithms assume normalized rows.
gram: None | array, shape=(n_components, n_components)
Precomputed Gram matrix, dictionary * dictionary'
gram can be None if method is 'threshold'.
cov: array, shape=(n_components, n_samples)
Precomputed covariance, dictionary * X'
algorithm: {'lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'}
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than regularization
from the projection dictionary * data'
regularization : int | float
The regularization parameter. It corresponds to alpha when
algorithm is 'lasso_lars', 'lasso_cd' or 'threshold'.
Otherwise it corresponds to n_nonzero_coefs.
init: array of shape (n_samples, n_components)
Initialization value of the sparse code. Only used if
`algorithm='lasso_cd'`.
max_iter: int, 1000 by default
Maximum number of iterations to perform if `algorithm='lasso_cd'`.
copy_cov: boolean, optional
Whether to copy the precomputed covariance matrix; if False, it may be
overwritten.
check_input: boolean, optional
If False, the input arrays X and dictionary will not be checked.
verbose: int
Controls the verbosity; the higher, the more messages. Defaults to 0.
Returns
-------
code: array of shape (n_components, n_features)
The sparse codes
See also
--------
sklearn.linear_model.lars_path
sklearn.linear_model.orthogonal_mp
sklearn.linear_model.Lasso
SparseCoder
"""
if X.ndim == 1:
X = X[:, np.newaxis]
n_samples, n_features = X.shape
if cov is None and algorithm != 'lasso_cd':
# overwriting cov is safe
copy_cov = False
cov = np.dot(dictionary, X.T)
if algorithm == 'lasso_lars':
alpha = float(regularization) / n_features # account for scaling
try:
err_mgt = np.seterr(all='ignore')
# Not passing in verbose=max(0, verbose-1) because Lars.fit already
# corrects the verbosity level.
lasso_lars = LassoLars(alpha=alpha, fit_intercept=False,
verbose=verbose, normalize=False,
precompute=gram, fit_path=False)
lasso_lars.fit(dictionary.T, X.T, Xy=cov)
new_code = lasso_lars.coef_
finally:
np.seterr(**err_mgt)
elif algorithm == 'lasso_cd':
alpha = float(regularization) / n_features # account for scaling
# TODO: Make verbosity argument for Lasso?
# sklearn.linear_model.coordinate_descent.enet_path has a verbosity
# argument that we could pass in from Lasso.
clf = Lasso(alpha=alpha, fit_intercept=False, normalize=False,
precompute=gram, max_iter=max_iter, warm_start=True)
clf.coef_ = init
clf.fit(dictionary.T, X.T, check_input=check_input)
new_code = clf.coef_
elif algorithm == 'lars':
try:
err_mgt = np.seterr(all='ignore')
# Not passing in verbose=max(0, verbose-1) because Lars.fit already
# corrects the verbosity level.
lars = Lars(fit_intercept=False, verbose=verbose, normalize=False,
precompute=gram, n_nonzero_coefs=int(regularization),
fit_path=False)
lars.fit(dictionary.T, X.T, Xy=cov)
new_code = lars.coef_
finally:
np.seterr(**err_mgt)
elif algorithm == 'threshold':
new_code = ((np.sign(cov) *
np.maximum(np.abs(cov) - regularization, 0)).T)
elif algorithm == 'omp':
# TODO: Should verbose argument be passed to this?
new_code = orthogonal_mp_gram(
Gram=gram, Xy=cov, n_nonzero_coefs=int(regularization),
tol=None, norms_squared=row_norms(X, squared=True),
copy_Xy=copy_cov).T
else:
raise ValueError('Sparse coding method must be "lasso_lars" '
'"lasso_cd", "lasso", "threshold" or "omp", got %s.'
% algorithm)
return new_code
# XXX : could be moved to the linear_model module
def sparse_encode(X, dictionary, gram=None, cov=None, algorithm='lasso_lars',
n_nonzero_coefs=None, alpha=None, copy_cov=True, init=None,
max_iter=1000, n_jobs=1, check_input=True, verbose=0):
"""Sparse coding
Each row of the result is the solution to a sparse coding problem.
The goal is to find a sparse array `code` such that::
X ~= code * dictionary
Read more in the :ref:`User Guide <SparseCoder>`.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix
dictionary: array of shape (n_components, n_features)
The dictionary matrix against which to solve the sparse coding of
the data. Some of the algorithms assume normalized rows for meaningful
output.
gram: array, shape=(n_components, n_components)
Precomputed Gram matrix, dictionary * dictionary'
cov: array, shape=(n_components, n_samples)
Precomputed covariance, dictionary' * X
algorithm: {'lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'}
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection dictionary * X'
n_nonzero_coefs: int, 0.1 * n_features by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
alpha: float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
init: array of shape (n_samples, n_components)
Initialization value of the sparse codes. Only used if
`algorithm='lasso_cd'`.
max_iter: int, 1000 by default
Maximum number of iterations to perform if `algorithm='lasso_cd'`.
copy_cov: boolean, optional
Whether to copy the precomputed covariance matrix; if False, it may be
overwritten.
n_jobs: int, optional
Number of parallel jobs to run.
check_input: boolean, optional
If False, the input arrays X and dictionary will not be checked.
verbose : int, optional
Controls the verbosity; the higher, the more messages. Defaults to 0.
Returns
-------
code: array of shape (n_samples, n_components)
The sparse codes
See also
--------
sklearn.linear_model.lars_path
sklearn.linear_model.orthogonal_mp
sklearn.linear_model.Lasso
SparseCoder
"""
if check_input:
if algorithm == 'lasso_cd':
dictionary = check_array(dictionary, order='C', dtype='float64')
X = check_array(X, order='C', dtype='float64')
else:
dictionary = check_array(dictionary)
X = check_array(X)
n_samples, n_features = X.shape
n_components = dictionary.shape[0]
if gram is None and algorithm != 'threshold':
gram = np.dot(dictionary, dictionary.T)
if cov is None and algorithm != 'lasso_cd':
copy_cov = False
cov = np.dot(dictionary, X.T)
if algorithm in ('lars', 'omp'):
regularization = n_nonzero_coefs
if regularization is None:
regularization = min(max(n_features / 10, 1), n_components)
else:
regularization = alpha
if regularization is None:
regularization = 1.
if n_jobs == 1 or algorithm == 'threshold':
code = _sparse_encode(X,
dictionary, gram, cov=cov,
algorithm=algorithm,
regularization=regularization, copy_cov=copy_cov,
init=init,
max_iter=max_iter,
check_input=False,
verbose=verbose)
# This ensure that dimensionality of code is always 2,
# consistant with the case n_jobs > 1
if code.ndim == 1:
code = code[np.newaxis, :]
return code
# Enter parallel code block
code = np.empty((n_samples, n_components))
slices = list(gen_even_slices(n_samples, _get_n_jobs(n_jobs)))
code_views = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_sparse_encode)(
X[this_slice], dictionary, gram,
cov[:, this_slice] if cov is not None else None,
algorithm,
regularization=regularization, copy_cov=copy_cov,
init=init[this_slice] if init is not None else None,
max_iter=max_iter,
check_input=False)
for this_slice in slices)
for this_slice, this_view in zip(slices, code_views):
code[this_slice] = this_view
return code
def _update_dict(dictionary, Y, code, verbose=False, return_r2=False,
random_state=None):
"""Update the dense dictionary factor in place.
Parameters
----------
dictionary: array of shape (n_features, n_components)
Value of the dictionary at the previous iteration.
Y: array of shape (n_features, n_samples)
Data matrix.
code: array of shape (n_components, n_samples)
Sparse coding of the data against which to optimize the dictionary.
verbose:
Degree of output the procedure will print.
return_r2: bool
Whether to compute and return the residual sum of squares corresponding
to the computed solution.
random_state: int or RandomState
Pseudo number generator state used for random sampling.
Returns
-------
dictionary: array of shape (n_features, n_components)
Updated dictionary.
"""
n_components = len(code)
n_samples = Y.shape[0]
random_state = check_random_state(random_state)
# Residuals, computed 'in-place' for efficiency
R = -np.dot(dictionary, code)
R += Y
R = np.asfortranarray(R)
ger, = linalg.get_blas_funcs(('ger',), (dictionary, code))
for k in range(n_components):
# R <- 1.0 * U_k * V_k^T + R
R = ger(1.0, dictionary[:, k], code[k, :], a=R, overwrite_a=True)
dictionary[:, k] = np.dot(R, code[k, :].T)
# Scale k'th atom
atom_norm_square = np.dot(dictionary[:, k], dictionary[:, k])
if atom_norm_square < 1e-20:
if verbose == 1:
sys.stdout.write("+")
sys.stdout.flush()
elif verbose:
print("Adding new random atom")
dictionary[:, k] = random_state.randn(n_samples)
# Setting corresponding coefs to 0
code[k, :] = 0.0
dictionary[:, k] /= sqrt(np.dot(dictionary[:, k],
dictionary[:, k]))
else:
dictionary[:, k] /= sqrt(atom_norm_square)
# R <- -1.0 * U_k * V_k^T + R
R = ger(-1.0, dictionary[:, k], code[k, :], a=R, overwrite_a=True)
if return_r2:
R **= 2
# R is fortran-ordered. For numpy version < 1.6, sum does not
# follow the quick striding first, and is thus inefficient on
# fortran ordered data. We take a flat view of the data with no
# striding
R = as_strided(R, shape=(R.size, ), strides=(R.dtype.itemsize,))
R = np.sum(R)
return dictionary, R
return dictionary
def dict_learning(X, n_components, alpha, max_iter=100, tol=1e-8,
method='lars', n_jobs=1, dict_init=None, code_init=None,
callback=None, verbose=False, random_state=None,
return_n_iter=False):
"""Solves a dictionary learning matrix factorization problem.
Finds the best dictionary and the corresponding sparse code for
approximating the data matrix X by solving::
(U^*, V^*) = argmin 0.5 || X - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
where V is the dictionary and U is the sparse code.
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
X : array of shape (n_samples, n_features)
Data matrix.
n_components : int,
Number of dictionary atoms to extract.
alpha : int,
Sparsity controlling parameter.
max_iter : int,
Maximum number of iterations to perform.
tol : float,
Tolerance for the stopping condition.
method : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
n_jobs : int,
Number of parallel jobs to run, or -1 to autodetect.
dict_init : array of shape (n_components, n_features),
Initial value for the dictionary for warm restart scenarios.
code_init : array of shape (n_samples, n_components),
Initial value for the sparse code for warm restart scenarios.
callback :
Callable that gets invoked every five iterations.
verbose :
Degree of output the procedure will print.
random_state : int or RandomState
Pseudo number generator state used for random sampling.
return_n_iter : bool
Whether or not to return the number of iterations.
Returns
-------
code : array of shape (n_samples, n_components)
The sparse code factor in the matrix factorization.
dictionary : array of shape (n_components, n_features),
The dictionary factor in the matrix factorization.
errors : array
Vector of errors at each iteration.
n_iter : int
Number of iterations run. Returned only if `return_n_iter` is
set to True.
See also
--------
dict_learning_online
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
if method not in ('lars', 'cd'):
raise ValueError('Coding method %r not supported as a fit algorithm.'
% method)
method = 'lasso_' + method
t0 = time.time()
# Avoid integer division problems
alpha = float(alpha)
random_state = check_random_state(random_state)
if n_jobs == -1:
n_jobs = cpu_count()
# Init the code and the dictionary with SVD of Y
if code_init is not None and dict_init is not None:
code = np.array(code_init, order='F')
# Don't copy V, it will happen below
dictionary = dict_init
else:
code, S, dictionary = linalg.svd(X, full_matrices=False)
dictionary = S[:, np.newaxis] * dictionary
r = len(dictionary)
if n_components <= r: # True even if n_components=None
code = code[:, :n_components]
dictionary = dictionary[:n_components, :]
else:
code = np.c_[code, np.zeros((len(code), n_components - r))]
dictionary = np.r_[dictionary,
np.zeros((n_components - r, dictionary.shape[1]))]
# Fortran-order dict, as we are going to access its row vectors
dictionary = np.array(dictionary, order='F')
residuals = 0
errors = []
current_cost = np.nan
if verbose == 1:
print('[dict_learning]', end=' ')
# If max_iter is 0, number of iterations returned should be zero
ii = -1
for ii in range(max_iter):
dt = (time.time() - t0)
if verbose == 1:
sys.stdout.write(".")
sys.stdout.flush()
elif verbose:
print("Iteration % 3i "
"(elapsed time: % 3is, % 4.1fmn, current cost % 7.3f)"
% (ii, dt, dt / 60, current_cost))
# Update code
code = sparse_encode(X, dictionary, algorithm=method, alpha=alpha,
init=code, n_jobs=n_jobs)
# Update dictionary
dictionary, residuals = _update_dict(dictionary.T, X.T, code.T,
verbose=verbose, return_r2=True,
random_state=random_state)
dictionary = dictionary.T
# Cost function
current_cost = 0.5 * residuals + alpha * np.sum(np.abs(code))
errors.append(current_cost)
if ii > 0:
dE = errors[-2] - errors[-1]
# assert(dE >= -tol * errors[-1])
if dE < tol * errors[-1]:
if verbose == 1:
# A line return
print("")
elif verbose:
print("--- Convergence reached after %d iterations" % ii)
break
if ii % 5 == 0 and callback is not None:
callback(locals())
if return_n_iter:
return code, dictionary, errors, ii + 1
else:
return code, dictionary, errors
def dict_learning_online(X, n_components=2, alpha=1, n_iter=100,
return_code=True, dict_init=None, callback=None,
batch_size=3, verbose=False, shuffle=True, n_jobs=1,
method='lars', iter_offset=0, random_state=None,
return_inner_stats=False, inner_stats=None,
return_n_iter=False):
"""Solves a dictionary learning matrix factorization problem online.
Finds the best dictionary and the corresponding sparse code for
approximating the data matrix X by solving::
(U^*, V^*) = argmin 0.5 || X - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
where V is the dictionary and U is the sparse code. This is
accomplished by repeatedly iterating over mini-batches by slicing
the input data.
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix.
n_components : int,
Number of dictionary atoms to extract.
alpha : float,
Sparsity controlling parameter.
n_iter : int,
Number of iterations to perform.
return_code : boolean,
Whether to also return the code U or just the dictionary V.
dict_init : array of shape (n_components, n_features),
Initial value for the dictionary for warm restart scenarios.
callback :
Callable that gets invoked every five iterations.
batch_size : int,
The number of samples to take in each batch.
verbose :
Degree of output the procedure will print.
shuffle : boolean,
Whether to shuffle the data before splitting it in batches.
n_jobs : int,
Number of parallel jobs to run, or -1 to autodetect.
method : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
iter_offset : int, default 0
Number of previous iterations completed on the dictionary used for
initialization.
random_state : int or RandomState
Pseudo number generator state used for random sampling.
return_inner_stats : boolean, optional
Return the inner statistics A (dictionary covariance) and B
(data approximation). Useful to restart the algorithm in an
online setting. If return_inner_stats is True, return_code is
ignored
inner_stats : tuple of (A, B) ndarrays
Inner sufficient statistics that are kept by the algorithm.
Passing them at initialization is useful in online settings, to
avoid loosing the history of the evolution.
A (n_components, n_components) is the dictionary covariance matrix.
B (n_features, n_components) is the data approximation matrix
return_n_iter : bool
Whether or not to return the number of iterations.
Returns
-------
code : array of shape (n_samples, n_components),
the sparse code (only returned if `return_code=True`)
dictionary : array of shape (n_components, n_features),
the solutions to the dictionary learning problem
n_iter : int
Number of iterations run. Returned only if `return_n_iter` is
set to `True`.
See also
--------
dict_learning
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
if n_components is None:
n_components = X.shape[1]
if method not in ('lars', 'cd'):
raise ValueError('Coding method not supported as a fit algorithm.')
method = 'lasso_' + method
t0 = time.time()
n_samples, n_features = X.shape
# Avoid integer division problems
alpha = float(alpha)
random_state = check_random_state(random_state)
if n_jobs == -1:
n_jobs = cpu_count()
# Init V with SVD of X
if dict_init is not None:
dictionary = dict_init
else:
_, S, dictionary = randomized_svd(X, n_components,
random_state=random_state)
dictionary = S[:, np.newaxis] * dictionary
r = len(dictionary)
if n_components <= r:
dictionary = dictionary[:n_components, :]
else:
dictionary = np.r_[dictionary,
np.zeros((n_components - r, dictionary.shape[1]))]
if verbose == 1:
print('[dict_learning]', end=' ')
if shuffle:
X_train = X.copy()
random_state.shuffle(X_train)
else:
X_train = X
dictionary = check_array(dictionary.T, order='F', dtype=np.float64,
copy=False)
X_train = check_array(X_train, order='C', dtype=np.float64, copy=False)
batches = gen_batches(n_samples, batch_size)
batches = itertools.cycle(batches)
# The covariance of the dictionary
if inner_stats is None:
A = np.zeros((n_components, n_components))
# The data approximation
B = np.zeros((n_features, n_components))
else:
A = inner_stats[0].copy()
B = inner_stats[1].copy()
# If n_iter is zero, we need to return zero.
ii = iter_offset - 1
for ii, batch in zip(range(iter_offset, iter_offset + n_iter), batches):
this_X = X_train[batch]
dt = (time.time() - t0)
if verbose == 1:
sys.stdout.write(".")
sys.stdout.flush()
elif verbose:
if verbose > 10 or ii % ceil(100. / verbose) == 0:
print ("Iteration % 3i (elapsed time: % 3is, % 4.1fmn)"
% (ii, dt, dt / 60))
this_code = sparse_encode(this_X, dictionary.T, algorithm=method,
alpha=alpha, n_jobs=n_jobs).T
# Update the auxiliary variables
if ii < batch_size - 1:
theta = float((ii + 1) * batch_size)
else:
theta = float(batch_size ** 2 + ii + 1 - batch_size)
beta = (theta + 1 - batch_size) / (theta + 1)
A *= beta
A += np.dot(this_code, this_code.T)
B *= beta
B += np.dot(this_X.T, this_code.T)
# Update dictionary
dictionary = _update_dict(dictionary, B, A, verbose=verbose,
random_state=random_state)
# XXX: Can the residuals be of any use?
# Maybe we need a stopping criteria based on the amount of
# modification in the dictionary
if callback is not None:
callback(locals())
if return_inner_stats:
if return_n_iter:
return dictionary.T, (A, B), ii - iter_offset + 1
else:
return dictionary.T, (A, B)
if return_code:
if verbose > 1:
print('Learning code...', end=' ')
elif verbose == 1:
print('|', end=' ')
code = sparse_encode(X, dictionary.T, algorithm=method, alpha=alpha,
n_jobs=n_jobs, check_input=False)
if verbose > 1:
dt = (time.time() - t0)
print('done (total time: % 3is, % 4.1fmn)' % (dt, dt / 60))
if return_n_iter:
return code, dictionary.T, ii - iter_offset + 1
else:
return code, dictionary.T
if return_n_iter:
return dictionary.T, ii - iter_offset + 1
else:
return dictionary.T
class SparseCodingMixin(TransformerMixin):
"""Sparse coding mixin"""
def _set_sparse_coding_params(self, n_components,
transform_algorithm='omp',
transform_n_nonzero_coefs=None,
transform_alpha=None, split_sign=False,
n_jobs=1):
self.n_components = n_components
self.transform_algorithm = transform_algorithm
self.transform_n_nonzero_coefs = transform_n_nonzero_coefs
self.transform_alpha = transform_alpha
self.split_sign = split_sign
self.n_jobs = n_jobs
def transform(self, X, y=None):
"""Encode the data as a sparse combination of the dictionary atoms.
Coding method is determined by the object parameter
`transform_algorithm`.
Parameters
----------
X : array of shape (n_samples, n_features)
Test data to be transformed, must have the same number of
features as the data used to train the model.
Returns
-------
X_new : array, shape (n_samples, n_components)
Transformed data
"""
check_is_fitted(self, 'components_')
# XXX : kwargs is not documented
X = check_array(X)
n_samples, n_features = X.shape
code = sparse_encode(
X, self.components_, algorithm=self.transform_algorithm,
n_nonzero_coefs=self.transform_n_nonzero_coefs,
alpha=self.transform_alpha, n_jobs=self.n_jobs)
if self.split_sign:
# feature vector is split into a positive and negative side
n_samples, n_features = code.shape
split_code = np.empty((n_samples, 2 * n_features))
split_code[:, :n_features] = np.maximum(code, 0)
split_code[:, n_features:] = -np.minimum(code, 0)
code = split_code
return code
class SparseCoder(BaseEstimator, SparseCodingMixin):
"""Sparse coding
Finds a sparse representation of data against a fixed, precomputed
dictionary.
Each row of the result is the solution to a sparse coding problem.
The goal is to find a sparse array `code` such that::
X ~= code * dictionary
Read more in the :ref:`User Guide <SparseCoder>`.
Parameters
----------
dictionary : array, [n_components, n_features]
The dictionary atoms used for sparse coding. Lines are assumed to be
normalized to unit norm.
transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
'threshold'}
Algorithm used to transform the data:
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection ``dictionary * X'``
transform_n_nonzero_coefs : int, ``0.1 * n_features`` by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
transform_alpha : float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
split_sign : bool, False by default
Whether to split the sparse feature vector into the concatenation of
its negative part and its positive part. This can improve the
performance of downstream classifiers.
n_jobs : int,
number of parallel jobs to run
Attributes
----------
components_ : array, [n_components, n_features]
The unchanged dictionary atoms
See also
--------
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
sparse_encode
"""
def __init__(self, dictionary, transform_algorithm='omp',
transform_n_nonzero_coefs=None, transform_alpha=None,
split_sign=False, n_jobs=1):
self._set_sparse_coding_params(dictionary.shape[0],
transform_algorithm,
transform_n_nonzero_coefs,
transform_alpha, split_sign, n_jobs)
self.components_ = dictionary
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
return self
class DictionaryLearning(BaseEstimator, SparseCodingMixin):
"""Dictionary learning
Finds a dictionary (a set of atoms) that can best be used to represent data
using a sparse code.
Solves the optimization problem::
(U^*,V^*) = argmin 0.5 || Y - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
n_components : int,
number of dictionary elements to extract
alpha : float,
sparsity controlling parameter
max_iter : int,
maximum number of iterations to perform
tol : float,
tolerance for numerical error
fit_algorithm : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
.. versionadded:: 0.17
*cd* coordinate descent method to improve speed.
transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
'threshold'}
Algorithm used to transform the data
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection ``dictionary * X'``
.. versionadded:: 0.17
*lasso_cd* coordinate descent method to improve speed.
transform_n_nonzero_coefs : int, ``0.1 * n_features`` by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
transform_alpha : float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
split_sign : bool, False by default
Whether to split the sparse feature vector into the concatenation of
its negative part and its positive part. This can improve the
performance of downstream classifiers.
n_jobs : int,
number of parallel jobs to run
code_init : array of shape (n_samples, n_components),
initial value for the code, for warm restart
dict_init : array of shape (n_components, n_features),
initial values for the dictionary, for warm restart
verbose :
degree of verbosity of the printed output
random_state : int or RandomState
Pseudo number generator state used for random sampling.
Attributes
----------
components_ : array, [n_components, n_features]
dictionary atoms extracted from the data
error_ : array
vector of errors at each iteration
n_iter_ : int
Number of iterations run.
Notes
-----
**References:**
J. Mairal, F. Bach, J. Ponce, G. Sapiro, 2009: Online dictionary learning
for sparse coding (http://www.di.ens.fr/sierra/pdfs/icml09.pdf)
See also
--------
SparseCoder
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
def __init__(self, n_components=None, alpha=1, max_iter=1000, tol=1e-8,
fit_algorithm='lars', transform_algorithm='omp',
transform_n_nonzero_coefs=None, transform_alpha=None,
n_jobs=1, code_init=None, dict_init=None, verbose=False,
split_sign=False, random_state=None):
self._set_sparse_coding_params(n_components, transform_algorithm,
transform_n_nonzero_coefs,
transform_alpha, split_sign, n_jobs)
self.alpha = alpha
self.max_iter = max_iter
self.tol = tol
self.fit_algorithm = fit_algorithm
self.code_init = code_init
self.dict_init = dict_init
self.verbose = verbose
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self: object
Returns the object itself
"""
random_state = check_random_state(self.random_state)
X = check_array(X)
if self.n_components is None:
n_components = X.shape[1]
else:
n_components = self.n_components
V, U, E, self.n_iter_ = dict_learning(
X, n_components, self.alpha,
tol=self.tol, max_iter=self.max_iter,
method=self.fit_algorithm,
n_jobs=self.n_jobs,
code_init=self.code_init,
dict_init=self.dict_init,
verbose=self.verbose,
random_state=random_state,
return_n_iter=True)
self.components_ = U
self.error_ = E
return self
class MiniBatchDictionaryLearning(BaseEstimator, SparseCodingMixin):
"""Mini-batch dictionary learning
Finds a dictionary (a set of atoms) that can best be used to represent data
using a sparse code.
Solves the optimization problem::
(U^*,V^*) = argmin 0.5 || Y - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
n_components : int,
number of dictionary elements to extract
alpha : float,
sparsity controlling parameter
n_iter : int,
total number of iterations to perform
fit_algorithm : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
'threshold'}
Algorithm used to transform the data.
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection dictionary * X'
transform_n_nonzero_coefs : int, ``0.1 * n_features`` by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
transform_alpha : float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
split_sign : bool, False by default
Whether to split the sparse feature vector into the concatenation of
its negative part and its positive part. This can improve the
performance of downstream classifiers.
n_jobs : int,
number of parallel jobs to run
dict_init : array of shape (n_components, n_features),
initial value of the dictionary for warm restart scenarios
verbose :
degree of verbosity of the printed output
batch_size : int,
number of samples in each mini-batch
shuffle : bool,
whether to shuffle the samples before forming batches
random_state : int or RandomState
Pseudo number generator state used for random sampling.
Attributes
----------
components_ : array, [n_components, n_features]
components extracted from the data
inner_stats_ : tuple of (A, B) ndarrays
Internal sufficient statistics that are kept by the algorithm.
Keeping them is useful in online settings, to avoid loosing the
history of the evolution, but they shouldn't have any use for the
end user.
A (n_components, n_components) is the dictionary covariance matrix.
B (n_features, n_components) is the data approximation matrix
n_iter_ : int
Number of iterations run.
Notes
-----
**References:**
J. Mairal, F. Bach, J. Ponce, G. Sapiro, 2009: Online dictionary learning
for sparse coding (http://www.di.ens.fr/sierra/pdfs/icml09.pdf)
See also
--------
SparseCoder
DictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
def __init__(self, n_components=None, alpha=1, n_iter=1000,
fit_algorithm='lars', n_jobs=1, batch_size=3,
shuffle=True, dict_init=None, transform_algorithm='omp',
transform_n_nonzero_coefs=None, transform_alpha=None,
verbose=False, split_sign=False, random_state=None):
self._set_sparse_coding_params(n_components, transform_algorithm,
transform_n_nonzero_coefs,
transform_alpha, split_sign, n_jobs)
self.alpha = alpha
self.n_iter = n_iter
self.fit_algorithm = fit_algorithm
self.dict_init = dict_init
self.verbose = verbose
self.shuffle = shuffle
self.batch_size = batch_size
self.split_sign = split_sign
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
random_state = check_random_state(self.random_state)
X = check_array(X)
U, (A, B), self.n_iter_ = dict_learning_online(
X, self.n_components, self.alpha,
n_iter=self.n_iter, return_code=False,
method=self.fit_algorithm,
n_jobs=self.n_jobs, dict_init=self.dict_init,
batch_size=self.batch_size, shuffle=self.shuffle,
verbose=self.verbose, random_state=random_state,
return_inner_stats=True,
return_n_iter=True)
self.components_ = U
# Keep track of the state of the algorithm to be able to do
# some online fitting (partial_fit)
self.inner_stats_ = (A, B)
self.iter_offset_ = self.n_iter
return self
def partial_fit(self, X, y=None, iter_offset=None):
"""Updates the model using the data in X as a mini-batch.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
iter_offset: integer, optional
The number of iteration on data batches that has been
performed before this call to partial_fit. This is optional:
if no number is passed, the memory of the object is
used.
Returns
-------
self : object
Returns the instance itself.
"""
if not hasattr(self, 'random_state_'):
self.random_state_ = check_random_state(self.random_state)
X = check_array(X)
if hasattr(self, 'components_'):
dict_init = self.components_
else:
dict_init = self.dict_init
inner_stats = getattr(self, 'inner_stats_', None)
if iter_offset is None:
iter_offset = getattr(self, 'iter_offset_', 0)
U, (A, B) = dict_learning_online(
X, self.n_components, self.alpha,
n_iter=self.n_iter, method=self.fit_algorithm,
n_jobs=self.n_jobs, dict_init=dict_init,
batch_size=len(X), shuffle=False,
verbose=self.verbose, return_code=False,
iter_offset=iter_offset, random_state=self.random_state_,
return_inner_stats=True, inner_stats=inner_stats)
self.components_ = U
# Keep track of the state of the algorithm to be able to do
# some online fitting (partial_fit)
self.inner_stats_ = (A, B)
self.iter_offset_ = iter_offset + self.n_iter
return self
| bsd-3-clause |
briancappello/PyTradeLib | pytradelib/store.py | 1 | 8189 | import os
import pytz
import talib as ta
import pandas as pd
from pandas.tseries.offsets import DateOffset
from pytradelib.settings import DATA_DIR, ZIPLINE_CACHE_DIR
from pytradelib.utils import (
percent_change,
within_percent_of_value,
crossed,
)
class CSVStore(object):
_symbols = []
_csv_files = {}
_start_dates = {}
_end_dates = {}
_df_cache = {}
def __init__(self):
self._store_contents = self._get_store_contents()
for d in self._store_contents:
symbol = d['symbol']
self._symbols.append(symbol)
self._csv_files[symbol] = d['csv_path']
self._start_dates[symbol] = d['start']
self._end_dates[symbol] = d['end']
self._symbols.sort()
@property
def symbols(self):
return self._symbols
def get_df(self, symbol, start=None, end=None):
symbol = symbol.upper()
def to_timestamp(dt, default):
if dt:
return pd.Timestamp(dt, tz=pytz.UTC)
return default
start = to_timestamp(start, 0)
end = to_timestamp(end, None)
df = self._df_cache.get(symbol, None)
if df is None:
csv_path = self._csv_files[symbol]
df = pd.DataFrame.from_csv(csv_path).tz_localize(pytz.UTC)
self._df_cache[symbol] = df
return df[start:end]
def set_df(self, symbol, df):
self._update_df(symbol, df)
def _update_df(self, symbol, df):
symbol = symbol.upper()
if symbol in self.symbols:
existing_df = self.get_df(symbol)
df = existing_df.append(df[existing_df.index[-1] + DateOffset(days=1):])
os.remove(self.get_csv_path(symbol))
df = self._add_ta(df)
csv_path = self._get_store_path(symbol, df.index[0], df.index[-1])
df.to_csv(csv_path)
self._set_csv_path(symbol, csv_path)
self._set_end_date(symbol, df.index[-1])
self._df_cache[symbol] = df
def get_dfs(self, symbols=None, start=None, end=None):
symbols = symbols or self.symbols
if not isinstance(symbols, list):
symbols = [symbols]
return dict(zip(
[symbol.upper() for symbol in symbols],
[self.get_df(symbol, start, end) for symbol in symbols]
))
def set_dfs(self, symbol_df_dict):
for symbol, df in symbol_df_dict.items():
self.set_df(symbol, df)
def analyze(self, symbols=None, use_adjusted=True):
'''
:param symbols: list of symbols (defaults to all in the store)
:param use_adjusted: whether or not to use adjusted prices
:return: DataFrame
'''
def key(price_key):
return 'Adj ' + price_key if use_adjusted else price_key
results = {}
for symbol, df in self.get_dfs(symbols).items():
today = df.iloc[-1]
yesterday = df.iloc[-2]
most_recent_year = df[df.index[-1] - DateOffset(years=1):]
year_low = most_recent_year[key('Low')][:-1].min()
year_high = most_recent_year[key('High')][:-1].max()
dollar_volume_desc = most_recent_year.dollar_volume.describe()
results[symbol] = {
# last-bar metrics
'previous_close': yesterday[key('Close')],
'close': today[key('Close')],
'percent_change': percent_change(yesterday[key('Close')], today[key('Close')]),
'dollar_volume': today.dollar_volume,
'percent_of_median_volume': (today.Volume * today[key('Close')]) / dollar_volume_desc['50%'],
'advancing': today[key('Close')] > yesterday[key('Close')],
'declining': today[key('Close')] < yesterday[key('Close')],
'new_low': today[key('Close')] < year_low,
'new_high': today[key('Close')] > year_high,
'percent_of_high': today[key('Close')] / year_high,
'near_sma100': within_percent_of_value(today[key('Close')], today['sma100'], 2),
'near_sma200': within_percent_of_value(today[key('Close')], today['sma200'], 2),
'crossed_sma100': crossed(today['sma100'], yesterday, today),
'crossed_sma200': crossed(today['sma200'], yesterday, today),
'crossed_5': crossed(5, yesterday, today),
'crossed_10': crossed(10, yesterday, today),
'crossed_50': crossed(50, yesterday, today),
'crossed_100': crossed(100, yesterday, today),
# stock "health" metrics
'year_high': year_high,
'year_low': year_low,
'min_volume': int(dollar_volume_desc['min']),
'dollar_volume_25th_percentile': int(dollar_volume_desc['25%']),
'dollar_volume_75th_percentile': int(dollar_volume_desc['75%']),
'atr': most_recent_year.atr.describe()['50%'],
}
# transpose the DataFrame so we have rows of symbols, and metrics as columns
return pd.DataFrame(results).T
def get_start_date(self, symbol):
return self._start_dates[symbol.upper()]
def _set_start_date(self, symbol, start_date):
self._start_dates[symbol] = start_date
def get_end_date(self, symbol):
return self._end_dates[symbol.upper()]
def _set_end_date(self, symbol, end_date):
self._end_dates[symbol] = end_date
def get_csv_path(self, symbol):
return self._csv_files[symbol]
def _set_csv_path(self, symbol, csv_path):
self._csv_files[symbol] = csv_path
def _get_store_contents(self):
csv_files = [os.path.join(ZIPLINE_CACHE_DIR, f)\
for f in os.listdir(ZIPLINE_CACHE_DIR)\
if f.endswith('.csv')]
return [self._decode_store_path(path) for path in csv_files]
def _decode_store_path(self, csv_path):
filename = os.path.basename(csv_path).replace('--', os.path.sep)
symbol = filename[:filename.find('-')]
dates = filename[len(symbol)+1:].replace('.csv', '')
start = dates[:len(dates)/2]
end = dates[len(dates)/2:]
def to_dt(dt_str):
date, time = dt_str.split(' ')
return pd.Timestamp(' '.join([date, time.replace('-', ':')]))
return {
'symbol': symbol,
'csv_path': csv_path,
'start': to_dt(start),
'end': to_dt(end),
}
def _get_store_path(self, symbol, start, end):
'''
:param symbol: string - the ticker
:param start: pd.Timestamp - the earliest date
:param end: pd.Timestamp - the latest date
:return: string - path for the CSV
'''
filename_format = '%(symbol)s-%(start)s-%(end)s.csv'
return os.path.join(ZIPLINE_CACHE_DIR, filename_format % {
'symbol': symbol.upper().replace(os.path.sep, '--'),
'start': start,
'end': end,
}).replace(':', '-')
def _add_ta(self, df, use_adjusted=True):
def key(price_key):
return 'Adj ' + price_key if use_adjusted else price_key
df['dollar_volume'] = df[key('Close')] * df.Volume
df['atr'] = ta.NATR(df[key('High')].values, df[key('Low')].values, df[key('Close')].values) # timeperiod 14
df['sma100'] = ta.SMA(df[key('Close')].values, timeperiod=100)
df['sma200'] = ta.SMA(df[key('Close')].values, timeperiod=200)
df['bbands_upper'], df['sma20'], df['bbands_lower'] = ta.BBANDS(df[key('Close')].values, timeperiod=20)
df['macd_lead'], df['macd_lag'], df['macd_divergence'] = ta.MACD(df[key('Close')].values) # 12, 26, 9
df['rsi'] = ta.RSI(df[key('Close')].values) # 14
df['stoch_lead'], df['stoch_lag'] = ta.STOCH(df[key('High')].values, df[key('Low')].values, df[key('Close')].values,
fastk_period=14, slowk_period=1, slowd_period=3)
df['slope'] = ta.LINEARREG_SLOPE(df[key('Close')].values) * -1 # talib returns the inverse of what we want
return df
| gpl-3.0 |
rgommers/scipy | scipy/stats/_multivariate.py | 7 | 153934 | #
# Author: Joris Vankerschaver 2013
#
import math
import numpy as np
from numpy import asarray_chkfinite, asarray
import scipy.linalg
from scipy._lib import doccer
from scipy.special import gammaln, psi, multigammaln, xlogy, entr, betaln
from scipy._lib._util import check_random_state
from scipy.linalg.blas import drot
from scipy.linalg.misc import LinAlgError
from scipy.linalg.lapack import get_lapack_funcs
from ._discrete_distns import binom
from . import mvn
__all__ = ['multivariate_normal',
'matrix_normal',
'dirichlet',
'wishart',
'invwishart',
'multinomial',
'special_ortho_group',
'ortho_group',
'random_correlation',
'unitary_group',
'multivariate_t',
'multivariate_hypergeom']
_LOG_2PI = np.log(2 * np.pi)
_LOG_2 = np.log(2)
_LOG_PI = np.log(np.pi)
_doc_random_state = """\
random_state : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance then
that instance is used.
"""
def _squeeze_output(out):
"""
Remove single-dimensional entries from array and convert to scalar,
if necessary.
"""
out = out.squeeze()
if out.ndim == 0:
out = out[()]
return out
def _eigvalsh_to_eps(spectrum, cond=None, rcond=None):
"""Determine which eigenvalues are "small" given the spectrum.
This is for compatibility across various linear algebra functions
that should agree about whether or not a Hermitian matrix is numerically
singular and what is its numerical matrix rank.
This is designed to be compatible with scipy.linalg.pinvh.
Parameters
----------
spectrum : 1d ndarray
Array of eigenvalues of a Hermitian matrix.
cond, rcond : float, optional
Cutoff for small eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are
considered zero.
If None or -1, suitable machine precision is used.
Returns
-------
eps : float
Magnitude cutoff for numerical negligibility.
"""
if rcond is not None:
cond = rcond
if cond in [None, -1]:
t = spectrum.dtype.char.lower()
factor = {'f': 1E3, 'd': 1E6}
cond = factor[t] * np.finfo(t).eps
eps = cond * np.max(abs(spectrum))
return eps
def _pinv_1d(v, eps=1e-5):
"""A helper function for computing the pseudoinverse.
Parameters
----------
v : iterable of numbers
This may be thought of as a vector of eigenvalues or singular values.
eps : float
Values with magnitude no greater than eps are considered negligible.
Returns
-------
v_pinv : 1d float ndarray
A vector of pseudo-inverted numbers.
"""
return np.array([0 if abs(x) <= eps else 1/x for x in v], dtype=float)
class _PSD:
"""
Compute coordinated functions of a symmetric positive semidefinite matrix.
This class addresses two issues. Firstly it allows the pseudoinverse,
the logarithm of the pseudo-determinant, and the rank of the matrix
to be computed using one call to eigh instead of three.
Secondly it allows these functions to be computed in a way
that gives mutually compatible results.
All of the functions are computed with a common understanding as to
which of the eigenvalues are to be considered negligibly small.
The functions are designed to coordinate with scipy.linalg.pinvh()
but not necessarily with np.linalg.det() or with np.linalg.matrix_rank().
Parameters
----------
M : array_like
Symmetric positive semidefinite matrix (2-D).
cond, rcond : float, optional
Cutoff for small eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are
considered zero.
If None or -1, suitable machine precision is used.
lower : bool, optional
Whether the pertinent array data is taken from the lower
or upper triangle of M. (Default: lower)
check_finite : bool, optional
Whether to check that the input matrices contain only finite
numbers. Disabling may give a performance gain, but may result
in problems (crashes, non-termination) if the inputs do contain
infinities or NaNs.
allow_singular : bool, optional
Whether to allow a singular matrix. (Default: True)
Notes
-----
The arguments are similar to those of scipy.linalg.pinvh().
"""
def __init__(self, M, cond=None, rcond=None, lower=True,
check_finite=True, allow_singular=True):
# Compute the symmetric eigendecomposition.
# Note that eigh takes care of array conversion, chkfinite,
# and assertion that the matrix is square.
s, u = scipy.linalg.eigh(M, lower=lower, check_finite=check_finite)
eps = _eigvalsh_to_eps(s, cond, rcond)
if np.min(s) < -eps:
raise ValueError('the input matrix must be positive semidefinite')
d = s[s > eps]
if len(d) < len(s) and not allow_singular:
raise np.linalg.LinAlgError('singular matrix')
s_pinv = _pinv_1d(s, eps)
U = np.multiply(u, np.sqrt(s_pinv))
# Initialize the eagerly precomputed attributes.
self.rank = len(d)
self.U = U
self.log_pdet = np.sum(np.log(d))
# Initialize an attribute to be lazily computed.
self._pinv = None
@property
def pinv(self):
if self._pinv is None:
self._pinv = np.dot(self.U, self.U.T)
return self._pinv
class multi_rv_generic:
"""
Class which encapsulates common functionality between all multivariate
distributions.
"""
def __init__(self, seed=None):
super().__init__()
self._random_state = check_random_state(seed)
@property
def random_state(self):
""" Get or set the Generator object for generating random variates.
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance then
that instance is used.
"""
return self._random_state
@random_state.setter
def random_state(self, seed):
self._random_state = check_random_state(seed)
def _get_random_state(self, random_state):
if random_state is not None:
return check_random_state(random_state)
else:
return self._random_state
class multi_rv_frozen:
"""
Class which encapsulates common functionality between all frozen
multivariate distributions.
"""
@property
def random_state(self):
return self._dist._random_state
@random_state.setter
def random_state(self, seed):
self._dist._random_state = check_random_state(seed)
_mvn_doc_default_callparams = """\
mean : array_like, optional
Mean of the distribution (default zero)
cov : array_like, optional
Covariance matrix of the distribution (default one)
allow_singular : bool, optional
Whether to allow a singular covariance matrix. (Default: False)
"""
_mvn_doc_callparams_note = \
"""Setting the parameter `mean` to `None` is equivalent to having `mean`
be the zero-vector. The parameter `cov` can be a scalar, in which case
the covariance matrix is the identity times that value, a vector of
diagonal entries for the covariance matrix, or a two-dimensional
array_like.
"""
_mvn_doc_frozen_callparams = ""
_mvn_doc_frozen_callparams_note = \
"""See class definition for a detailed description of parameters."""
mvn_docdict_params = {
'_mvn_doc_default_callparams': _mvn_doc_default_callparams,
'_mvn_doc_callparams_note': _mvn_doc_callparams_note,
'_doc_random_state': _doc_random_state
}
mvn_docdict_noparams = {
'_mvn_doc_default_callparams': _mvn_doc_frozen_callparams,
'_mvn_doc_callparams_note': _mvn_doc_frozen_callparams_note,
'_doc_random_state': _doc_random_state
}
class multivariate_normal_gen(multi_rv_generic):
r"""A multivariate normal random variable.
The `mean` keyword specifies the mean. The `cov` keyword specifies the
covariance matrix.
Methods
-------
``pdf(x, mean=None, cov=1, allow_singular=False)``
Probability density function.
``logpdf(x, mean=None, cov=1, allow_singular=False)``
Log of the probability density function.
``cdf(x, mean=None, cov=1, allow_singular=False, maxpts=1000000*dim, abseps=1e-5, releps=1e-5)``
Cumulative distribution function.
``logcdf(x, mean=None, cov=1, allow_singular=False, maxpts=1000000*dim, abseps=1e-5, releps=1e-5)``
Log of the cumulative distribution function.
``rvs(mean=None, cov=1, size=1, random_state=None)``
Draw random samples from a multivariate normal distribution.
``entropy()``
Compute the differential entropy of the multivariate normal.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_mvn_doc_default_callparams)s
%(_doc_random_state)s
Alternatively, the object may be called (as a function) to fix the mean
and covariance parameters, returning a "frozen" multivariate normal
random variable:
rv = multivariate_normal(mean=None, cov=1, allow_singular=False)
- Frozen object with the same methods but holding the given
mean and covariance fixed.
Notes
-----
%(_mvn_doc_callparams_note)s
The covariance matrix `cov` must be a (symmetric) positive
semi-definite matrix. The determinant and inverse of `cov` are computed
as the pseudo-determinant and pseudo-inverse, respectively, so
that `cov` does not need to have full rank.
The probability density function for `multivariate_normal` is
.. math::
f(x) = \frac{1}{\sqrt{(2 \pi)^k \det \Sigma}}
\exp\left( -\frac{1}{2} (x - \mu)^T \Sigma^{-1} (x - \mu) \right),
where :math:`\mu` is the mean, :math:`\Sigma` the covariance matrix,
and :math:`k` is the dimension of the space where :math:`x` takes values.
.. versionadded:: 0.14.0
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.stats import multivariate_normal
>>> x = np.linspace(0, 5, 10, endpoint=False)
>>> y = multivariate_normal.pdf(x, mean=2.5, cov=0.5); y
array([ 0.00108914, 0.01033349, 0.05946514, 0.20755375, 0.43939129,
0.56418958, 0.43939129, 0.20755375, 0.05946514, 0.01033349])
>>> fig1 = plt.figure()
>>> ax = fig1.add_subplot(111)
>>> ax.plot(x, y)
The input quantiles can be any shape of array, as long as the last
axis labels the components. This allows us for instance to
display the frozen pdf for a non-isotropic random variable in 2D as
follows:
>>> x, y = np.mgrid[-1:1:.01, -1:1:.01]
>>> pos = np.dstack((x, y))
>>> rv = multivariate_normal([0.5, -0.2], [[2.0, 0.3], [0.3, 0.5]])
>>> fig2 = plt.figure()
>>> ax2 = fig2.add_subplot(111)
>>> ax2.contourf(x, y, rv.pdf(pos))
"""
def __init__(self, seed=None):
super().__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, mvn_docdict_params)
def __call__(self, mean=None, cov=1, allow_singular=False, seed=None):
"""Create a frozen multivariate normal distribution.
See `multivariate_normal_frozen` for more information.
"""
return multivariate_normal_frozen(mean, cov,
allow_singular=allow_singular,
seed=seed)
def _process_parameters(self, dim, mean, cov):
"""
Infer dimensionality from mean or covariance matrix, ensure that
mean and covariance are full vector resp. matrix.
"""
# Try to infer dimensionality
if dim is None:
if mean is None:
if cov is None:
dim = 1
else:
cov = np.asarray(cov, dtype=float)
if cov.ndim < 2:
dim = 1
else:
dim = cov.shape[0]
else:
mean = np.asarray(mean, dtype=float)
dim = mean.size
else:
if not np.isscalar(dim):
raise ValueError("Dimension of random variable must be "
"a scalar.")
# Check input sizes and return full arrays for mean and cov if
# necessary
if mean is None:
mean = np.zeros(dim)
mean = np.asarray(mean, dtype=float)
if cov is None:
cov = 1.0
cov = np.asarray(cov, dtype=float)
if dim == 1:
mean.shape = (1,)
cov.shape = (1, 1)
if mean.ndim != 1 or mean.shape[0] != dim:
raise ValueError("Array 'mean' must be a vector of length %d." %
dim)
if cov.ndim == 0:
cov = cov * np.eye(dim)
elif cov.ndim == 1:
cov = np.diag(cov)
elif cov.ndim == 2 and cov.shape != (dim, dim):
rows, cols = cov.shape
if rows != cols:
msg = ("Array 'cov' must be square if it is two dimensional,"
" but cov.shape = %s." % str(cov.shape))
else:
msg = ("Dimension mismatch: array 'cov' is of shape %s,"
" but 'mean' is a vector of length %d.")
msg = msg % (str(cov.shape), len(mean))
raise ValueError(msg)
elif cov.ndim > 2:
raise ValueError("Array 'cov' must be at most two-dimensional,"
" but cov.ndim = %d" % cov.ndim)
return dim, mean, cov
def _process_quantiles(self, x, dim):
"""
Adjust quantiles array so that last axis labels the components of
each data point.
"""
x = np.asarray(x, dtype=float)
if x.ndim == 0:
x = x[np.newaxis]
elif x.ndim == 1:
if dim == 1:
x = x[:, np.newaxis]
else:
x = x[np.newaxis, :]
return x
def _logpdf(self, x, mean, prec_U, log_det_cov, rank):
"""Log of the multivariate normal probability density function.
Parameters
----------
x : ndarray
Points at which to evaluate the log of the probability
density function
mean : ndarray
Mean of the distribution
prec_U : ndarray
A decomposition such that np.dot(prec_U, prec_U.T)
is the precision matrix, i.e. inverse of the covariance matrix.
log_det_cov : float
Logarithm of the determinant of the covariance matrix
rank : int
Rank of the covariance matrix.
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'logpdf' instead.
"""
dev = x - mean
maha = np.sum(np.square(np.dot(dev, prec_U)), axis=-1)
return -0.5 * (rank * _LOG_2PI + log_det_cov + maha)
def logpdf(self, x, mean=None, cov=1, allow_singular=False):
"""Log of the multivariate normal probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_mvn_doc_default_callparams)s
Returns
-------
pdf : ndarray or scalar
Log of the probability density function evaluated at `x`
Notes
-----
%(_mvn_doc_callparams_note)s
"""
dim, mean, cov = self._process_parameters(None, mean, cov)
x = self._process_quantiles(x, dim)
psd = _PSD(cov, allow_singular=allow_singular)
out = self._logpdf(x, mean, psd.U, psd.log_pdet, psd.rank)
return _squeeze_output(out)
def pdf(self, x, mean=None, cov=1, allow_singular=False):
"""Multivariate normal probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_mvn_doc_default_callparams)s
Returns
-------
pdf : ndarray or scalar
Probability density function evaluated at `x`
Notes
-----
%(_mvn_doc_callparams_note)s
"""
dim, mean, cov = self._process_parameters(None, mean, cov)
x = self._process_quantiles(x, dim)
psd = _PSD(cov, allow_singular=allow_singular)
out = np.exp(self._logpdf(x, mean, psd.U, psd.log_pdet, psd.rank))
return _squeeze_output(out)
def _cdf(self, x, mean, cov, maxpts, abseps, releps):
"""Log of the multivariate normal cumulative distribution function.
Parameters
----------
x : ndarray
Points at which to evaluate the cumulative distribution function.
mean : ndarray
Mean of the distribution
cov : array_like
Covariance matrix of the distribution
maxpts : integer
The maximum number of points to use for integration
abseps : float
Absolute error tolerance
releps : float
Relative error tolerance
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'cdf' instead.
.. versionadded:: 1.0.0
"""
lower = np.full(mean.shape, -np.inf)
# mvnun expects 1-d arguments, so process points sequentially
func1d = lambda x_slice: mvn.mvnun(lower, x_slice, mean, cov,
maxpts, abseps, releps)[0]
out = np.apply_along_axis(func1d, -1, x)
return _squeeze_output(out)
def logcdf(self, x, mean=None, cov=1, allow_singular=False, maxpts=None,
abseps=1e-5, releps=1e-5):
"""Log of the multivariate normal cumulative distribution function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_mvn_doc_default_callparams)s
maxpts : integer, optional
The maximum number of points to use for integration
(default `1000000*dim`)
abseps : float, optional
Absolute error tolerance (default 1e-5)
releps : float, optional
Relative error tolerance (default 1e-5)
Returns
-------
cdf : ndarray or scalar
Log of the cumulative distribution function evaluated at `x`
Notes
-----
%(_mvn_doc_callparams_note)s
.. versionadded:: 1.0.0
"""
dim, mean, cov = self._process_parameters(None, mean, cov)
x = self._process_quantiles(x, dim)
# Use _PSD to check covariance matrix
_PSD(cov, allow_singular=allow_singular)
if not maxpts:
maxpts = 1000000 * dim
out = np.log(self._cdf(x, mean, cov, maxpts, abseps, releps))
return out
def cdf(self, x, mean=None, cov=1, allow_singular=False, maxpts=None,
abseps=1e-5, releps=1e-5):
"""Multivariate normal cumulative distribution function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_mvn_doc_default_callparams)s
maxpts : integer, optional
The maximum number of points to use for integration
(default `1000000*dim`)
abseps : float, optional
Absolute error tolerance (default 1e-5)
releps : float, optional
Relative error tolerance (default 1e-5)
Returns
-------
cdf : ndarray or scalar
Cumulative distribution function evaluated at `x`
Notes
-----
%(_mvn_doc_callparams_note)s
.. versionadded:: 1.0.0
"""
dim, mean, cov = self._process_parameters(None, mean, cov)
x = self._process_quantiles(x, dim)
# Use _PSD to check covariance matrix
_PSD(cov, allow_singular=allow_singular)
if not maxpts:
maxpts = 1000000 * dim
out = self._cdf(x, mean, cov, maxpts, abseps, releps)
return out
def rvs(self, mean=None, cov=1, size=1, random_state=None):
"""Draw random samples from a multivariate normal distribution.
Parameters
----------
%(_mvn_doc_default_callparams)s
size : integer, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray or scalar
Random variates of size (`size`, `N`), where `N` is the
dimension of the random variable.
Notes
-----
%(_mvn_doc_callparams_note)s
"""
dim, mean, cov = self._process_parameters(None, mean, cov)
random_state = self._get_random_state(random_state)
out = random_state.multivariate_normal(mean, cov, size)
return _squeeze_output(out)
def entropy(self, mean=None, cov=1):
"""Compute the differential entropy of the multivariate normal.
Parameters
----------
%(_mvn_doc_default_callparams)s
Returns
-------
h : scalar
Entropy of the multivariate normal distribution
Notes
-----
%(_mvn_doc_callparams_note)s
"""
dim, mean, cov = self._process_parameters(None, mean, cov)
_, logdet = np.linalg.slogdet(2 * np.pi * np.e * cov)
return 0.5 * logdet
multivariate_normal = multivariate_normal_gen()
class multivariate_normal_frozen(multi_rv_frozen):
def __init__(self, mean=None, cov=1, allow_singular=False, seed=None,
maxpts=None, abseps=1e-5, releps=1e-5):
"""Create a frozen multivariate normal distribution.
Parameters
----------
mean : array_like, optional
Mean of the distribution (default zero)
cov : array_like, optional
Covariance matrix of the distribution (default one)
allow_singular : bool, optional
If this flag is True then tolerate a singular
covariance matrix (default False).
seed : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance
then that instance is used.
maxpts : integer, optional
The maximum number of points to use for integration of the
cumulative distribution function (default `1000000*dim`)
abseps : float, optional
Absolute error tolerance for the cumulative distribution function
(default 1e-5)
releps : float, optional
Relative error tolerance for the cumulative distribution function
(default 1e-5)
Examples
--------
When called with the default parameters, this will create a 1D random
variable with mean 0 and covariance 1:
>>> from scipy.stats import multivariate_normal
>>> r = multivariate_normal()
>>> r.mean
array([ 0.])
>>> r.cov
array([[1.]])
"""
self._dist = multivariate_normal_gen(seed)
self.dim, self.mean, self.cov = self._dist._process_parameters(
None, mean, cov)
self.cov_info = _PSD(self.cov, allow_singular=allow_singular)
if not maxpts:
maxpts = 1000000 * self.dim
self.maxpts = maxpts
self.abseps = abseps
self.releps = releps
def logpdf(self, x):
x = self._dist._process_quantiles(x, self.dim)
out = self._dist._logpdf(x, self.mean, self.cov_info.U,
self.cov_info.log_pdet, self.cov_info.rank)
return _squeeze_output(out)
def pdf(self, x):
return np.exp(self.logpdf(x))
def logcdf(self, x):
return np.log(self.cdf(x))
def cdf(self, x):
x = self._dist._process_quantiles(x, self.dim)
out = self._dist._cdf(x, self.mean, self.cov, self.maxpts, self.abseps,
self.releps)
return _squeeze_output(out)
def rvs(self, size=1, random_state=None):
return self._dist.rvs(self.mean, self.cov, size, random_state)
def entropy(self):
"""Computes the differential entropy of the multivariate normal.
Returns
-------
h : scalar
Entropy of the multivariate normal distribution
"""
log_pdet = self.cov_info.log_pdet
rank = self.cov_info.rank
return 0.5 * (rank * (_LOG_2PI + 1) + log_pdet)
# Set frozen generator docstrings from corresponding docstrings in
# multivariate_normal_gen and fill in default strings in class docstrings
for name in ['logpdf', 'pdf', 'logcdf', 'cdf', 'rvs']:
method = multivariate_normal_gen.__dict__[name]
method_frozen = multivariate_normal_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(method.__doc__,
mvn_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__, mvn_docdict_params)
_matnorm_doc_default_callparams = """\
mean : array_like, optional
Mean of the distribution (default: `None`)
rowcov : array_like, optional
Among-row covariance matrix of the distribution (default: `1`)
colcov : array_like, optional
Among-column covariance matrix of the distribution (default: `1`)
"""
_matnorm_doc_callparams_note = \
"""If `mean` is set to `None` then a matrix of zeros is used for the mean.
The dimensions of this matrix are inferred from the shape of `rowcov` and
`colcov`, if these are provided, or set to `1` if ambiguous.
`rowcov` and `colcov` can be two-dimensional array_likes specifying the
covariance matrices directly. Alternatively, a one-dimensional array will
be be interpreted as the entries of a diagonal matrix, and a scalar or
zero-dimensional array will be interpreted as this value times the
identity matrix.
"""
_matnorm_doc_frozen_callparams = ""
_matnorm_doc_frozen_callparams_note = \
"""See class definition for a detailed description of parameters."""
matnorm_docdict_params = {
'_matnorm_doc_default_callparams': _matnorm_doc_default_callparams,
'_matnorm_doc_callparams_note': _matnorm_doc_callparams_note,
'_doc_random_state': _doc_random_state
}
matnorm_docdict_noparams = {
'_matnorm_doc_default_callparams': _matnorm_doc_frozen_callparams,
'_matnorm_doc_callparams_note': _matnorm_doc_frozen_callparams_note,
'_doc_random_state': _doc_random_state
}
class matrix_normal_gen(multi_rv_generic):
r"""A matrix normal random variable.
The `mean` keyword specifies the mean. The `rowcov` keyword specifies the
among-row covariance matrix. The 'colcov' keyword specifies the
among-column covariance matrix.
Methods
-------
``pdf(X, mean=None, rowcov=1, colcov=1)``
Probability density function.
``logpdf(X, mean=None, rowcov=1, colcov=1)``
Log of the probability density function.
``rvs(mean=None, rowcov=1, colcov=1, size=1, random_state=None)``
Draw random samples.
Parameters
----------
X : array_like
Quantiles, with the last two axes of `X` denoting the components.
%(_matnorm_doc_default_callparams)s
%(_doc_random_state)s
Alternatively, the object may be called (as a function) to fix the mean
and covariance parameters, returning a "frozen" matrix normal
random variable:
rv = matrix_normal(mean=None, rowcov=1, colcov=1)
- Frozen object with the same methods but holding the given
mean and covariance fixed.
Notes
-----
%(_matnorm_doc_callparams_note)s
The covariance matrices specified by `rowcov` and `colcov` must be
(symmetric) positive definite. If the samples in `X` are
:math:`m \times n`, then `rowcov` must be :math:`m \times m` and
`colcov` must be :math:`n \times n`. `mean` must be the same shape as `X`.
The probability density function for `matrix_normal` is
.. math::
f(X) = (2 \pi)^{-\frac{mn}{2}}|U|^{-\frac{n}{2}} |V|^{-\frac{m}{2}}
\exp\left( -\frac{1}{2} \mathrm{Tr}\left[ U^{-1} (X-M) V^{-1}
(X-M)^T \right] \right),
where :math:`M` is the mean, :math:`U` the among-row covariance matrix,
:math:`V` the among-column covariance matrix.
The `allow_singular` behaviour of the `multivariate_normal`
distribution is not currently supported. Covariance matrices must be
full rank.
The `matrix_normal` distribution is closely related to the
`multivariate_normal` distribution. Specifically, :math:`\mathrm{Vec}(X)`
(the vector formed by concatenating the columns of :math:`X`) has a
multivariate normal distribution with mean :math:`\mathrm{Vec}(M)`
and covariance :math:`V \otimes U` (where :math:`\otimes` is the Kronecker
product). Sampling and pdf evaluation are
:math:`\mathcal{O}(m^3 + n^3 + m^2 n + m n^2)` for the matrix normal, but
:math:`\mathcal{O}(m^3 n^3)` for the equivalent multivariate normal,
making this equivalent form algorithmically inefficient.
.. versionadded:: 0.17.0
Examples
--------
>>> from scipy.stats import matrix_normal
>>> M = np.arange(6).reshape(3,2); M
array([[0, 1],
[2, 3],
[4, 5]])
>>> U = np.diag([1,2,3]); U
array([[1, 0, 0],
[0, 2, 0],
[0, 0, 3]])
>>> V = 0.3*np.identity(2); V
array([[ 0.3, 0. ],
[ 0. , 0.3]])
>>> X = M + 0.1; X
array([[ 0.1, 1.1],
[ 2.1, 3.1],
[ 4.1, 5.1]])
>>> matrix_normal.pdf(X, mean=M, rowcov=U, colcov=V)
0.023410202050005054
>>> # Equivalent multivariate normal
>>> from scipy.stats import multivariate_normal
>>> vectorised_X = X.T.flatten()
>>> equiv_mean = M.T.flatten()
>>> equiv_cov = np.kron(V,U)
>>> multivariate_normal.pdf(vectorised_X, mean=equiv_mean, cov=equiv_cov)
0.023410202050005054
"""
def __init__(self, seed=None):
super().__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, matnorm_docdict_params)
def __call__(self, mean=None, rowcov=1, colcov=1, seed=None):
"""Create a frozen matrix normal distribution.
See `matrix_normal_frozen` for more information.
"""
return matrix_normal_frozen(mean, rowcov, colcov, seed=seed)
def _process_parameters(self, mean, rowcov, colcov):
"""
Infer dimensionality from mean or covariance matrices. Handle
defaults. Ensure compatible dimensions.
"""
# Process mean
if mean is not None:
mean = np.asarray(mean, dtype=float)
meanshape = mean.shape
if len(meanshape) != 2:
raise ValueError("Array `mean` must be two dimensional.")
if np.any(meanshape == 0):
raise ValueError("Array `mean` has invalid shape.")
# Process among-row covariance
rowcov = np.asarray(rowcov, dtype=float)
if rowcov.ndim == 0:
if mean is not None:
rowcov = rowcov * np.identity(meanshape[0])
else:
rowcov = rowcov * np.identity(1)
elif rowcov.ndim == 1:
rowcov = np.diag(rowcov)
rowshape = rowcov.shape
if len(rowshape) != 2:
raise ValueError("`rowcov` must be a scalar or a 2D array.")
if rowshape[0] != rowshape[1]:
raise ValueError("Array `rowcov` must be square.")
if rowshape[0] == 0:
raise ValueError("Array `rowcov` has invalid shape.")
numrows = rowshape[0]
# Process among-column covariance
colcov = np.asarray(colcov, dtype=float)
if colcov.ndim == 0:
if mean is not None:
colcov = colcov * np.identity(meanshape[1])
else:
colcov = colcov * np.identity(1)
elif colcov.ndim == 1:
colcov = np.diag(colcov)
colshape = colcov.shape
if len(colshape) != 2:
raise ValueError("`colcov` must be a scalar or a 2D array.")
if colshape[0] != colshape[1]:
raise ValueError("Array `colcov` must be square.")
if colshape[0] == 0:
raise ValueError("Array `colcov` has invalid shape.")
numcols = colshape[0]
# Ensure mean and covariances compatible
if mean is not None:
if meanshape[0] != numrows:
raise ValueError("Arrays `mean` and `rowcov` must have the "
"same number of rows.")
if meanshape[1] != numcols:
raise ValueError("Arrays `mean` and `colcov` must have the "
"same number of columns.")
else:
mean = np.zeros((numrows, numcols))
dims = (numrows, numcols)
return dims, mean, rowcov, colcov
def _process_quantiles(self, X, dims):
"""
Adjust quantiles array so that last two axes labels the components of
each data point.
"""
X = np.asarray(X, dtype=float)
if X.ndim == 2:
X = X[np.newaxis, :]
if X.shape[-2:] != dims:
raise ValueError("The shape of array `X` is not compatible "
"with the distribution parameters.")
return X
def _logpdf(self, dims, X, mean, row_prec_rt, log_det_rowcov,
col_prec_rt, log_det_colcov):
"""Log of the matrix normal probability density function.
Parameters
----------
dims : tuple
Dimensions of the matrix variates
X : ndarray
Points at which to evaluate the log of the probability
density function
mean : ndarray
Mean of the distribution
row_prec_rt : ndarray
A decomposition such that np.dot(row_prec_rt, row_prec_rt.T)
is the inverse of the among-row covariance matrix
log_det_rowcov : float
Logarithm of the determinant of the among-row covariance matrix
col_prec_rt : ndarray
A decomposition such that np.dot(col_prec_rt, col_prec_rt.T)
is the inverse of the among-column covariance matrix
log_det_colcov : float
Logarithm of the determinant of the among-column covariance matrix
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'logpdf' instead.
"""
numrows, numcols = dims
roll_dev = np.rollaxis(X-mean, axis=-1, start=0)
scale_dev = np.tensordot(col_prec_rt.T,
np.dot(roll_dev, row_prec_rt), 1)
maha = np.sum(np.sum(np.square(scale_dev), axis=-1), axis=0)
return -0.5 * (numrows*numcols*_LOG_2PI + numcols*log_det_rowcov
+ numrows*log_det_colcov + maha)
def logpdf(self, X, mean=None, rowcov=1, colcov=1):
"""Log of the matrix normal probability density function.
Parameters
----------
X : array_like
Quantiles, with the last two axes of `X` denoting the components.
%(_matnorm_doc_default_callparams)s
Returns
-------
logpdf : ndarray
Log of the probability density function evaluated at `X`
Notes
-----
%(_matnorm_doc_callparams_note)s
"""
dims, mean, rowcov, colcov = self._process_parameters(mean, rowcov,
colcov)
X = self._process_quantiles(X, dims)
rowpsd = _PSD(rowcov, allow_singular=False)
colpsd = _PSD(colcov, allow_singular=False)
out = self._logpdf(dims, X, mean, rowpsd.U, rowpsd.log_pdet, colpsd.U,
colpsd.log_pdet)
return _squeeze_output(out)
def pdf(self, X, mean=None, rowcov=1, colcov=1):
"""Matrix normal probability density function.
Parameters
----------
X : array_like
Quantiles, with the last two axes of `X` denoting the components.
%(_matnorm_doc_default_callparams)s
Returns
-------
pdf : ndarray
Probability density function evaluated at `X`
Notes
-----
%(_matnorm_doc_callparams_note)s
"""
return np.exp(self.logpdf(X, mean, rowcov, colcov))
def rvs(self, mean=None, rowcov=1, colcov=1, size=1, random_state=None):
"""Draw random samples from a matrix normal distribution.
Parameters
----------
%(_matnorm_doc_default_callparams)s
size : integer, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray or scalar
Random variates of size (`size`, `dims`), where `dims` is the
dimension of the random matrices.
Notes
-----
%(_matnorm_doc_callparams_note)s
"""
size = int(size)
dims, mean, rowcov, colcov = self._process_parameters(mean, rowcov,
colcov)
rowchol = scipy.linalg.cholesky(rowcov, lower=True)
colchol = scipy.linalg.cholesky(colcov, lower=True)
random_state = self._get_random_state(random_state)
std_norm = random_state.standard_normal(size=(dims[1], size, dims[0]))
roll_rvs = np.tensordot(colchol, np.dot(std_norm, rowchol.T), 1)
out = np.rollaxis(roll_rvs.T, axis=1, start=0) + mean[np.newaxis, :, :]
if size == 1:
out = out.reshape(mean.shape)
return out
matrix_normal = matrix_normal_gen()
class matrix_normal_frozen(multi_rv_frozen):
"""Create a frozen matrix normal distribution.
Parameters
----------
%(_matnorm_doc_default_callparams)s
seed : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance
then that instance is used.
Examples
--------
>>> from scipy.stats import matrix_normal
>>> distn = matrix_normal(mean=np.zeros((3,3)))
>>> X = distn.rvs(); X
array([[-0.02976962, 0.93339138, -0.09663178],
[ 0.67405524, 0.28250467, -0.93308929],
[-0.31144782, 0.74535536, 1.30412916]])
>>> distn.pdf(X)
2.5160642368346784e-05
>>> distn.logpdf(X)
-10.590229595124615
"""
def __init__(self, mean=None, rowcov=1, colcov=1, seed=None):
self._dist = matrix_normal_gen(seed)
self.dims, self.mean, self.rowcov, self.colcov = \
self._dist._process_parameters(mean, rowcov, colcov)
self.rowpsd = _PSD(self.rowcov, allow_singular=False)
self.colpsd = _PSD(self.colcov, allow_singular=False)
def logpdf(self, X):
X = self._dist._process_quantiles(X, self.dims)
out = self._dist._logpdf(self.dims, X, self.mean, self.rowpsd.U,
self.rowpsd.log_pdet, self.colpsd.U,
self.colpsd.log_pdet)
return _squeeze_output(out)
def pdf(self, X):
return np.exp(self.logpdf(X))
def rvs(self, size=1, random_state=None):
return self._dist.rvs(self.mean, self.rowcov, self.colcov, size,
random_state)
# Set frozen generator docstrings from corresponding docstrings in
# matrix_normal_gen and fill in default strings in class docstrings
for name in ['logpdf', 'pdf', 'rvs']:
method = matrix_normal_gen.__dict__[name]
method_frozen = matrix_normal_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(method.__doc__,
matnorm_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__, matnorm_docdict_params)
_dirichlet_doc_default_callparams = """\
alpha : array_like
The concentration parameters. The number of entries determines the
dimensionality of the distribution.
"""
_dirichlet_doc_frozen_callparams = ""
_dirichlet_doc_frozen_callparams_note = \
"""See class definition for a detailed description of parameters."""
dirichlet_docdict_params = {
'_dirichlet_doc_default_callparams': _dirichlet_doc_default_callparams,
'_doc_random_state': _doc_random_state
}
dirichlet_docdict_noparams = {
'_dirichlet_doc_default_callparams': _dirichlet_doc_frozen_callparams,
'_doc_random_state': _doc_random_state
}
def _dirichlet_check_parameters(alpha):
alpha = np.asarray(alpha)
if np.min(alpha) <= 0:
raise ValueError("All parameters must be greater than 0")
elif alpha.ndim != 1:
raise ValueError("Parameter vector 'a' must be one dimensional, "
"but a.shape = %s." % (alpha.shape, ))
return alpha
def _dirichlet_check_input(alpha, x):
x = np.asarray(x)
if x.shape[0] + 1 != alpha.shape[0] and x.shape[0] != alpha.shape[0]:
raise ValueError("Vector 'x' must have either the same number "
"of entries as, or one entry fewer than, "
"parameter vector 'a', but alpha.shape = %s "
"and x.shape = %s." % (alpha.shape, x.shape))
if x.shape[0] != alpha.shape[0]:
xk = np.array([1 - np.sum(x, 0)])
if xk.ndim == 1:
x = np.append(x, xk)
elif xk.ndim == 2:
x = np.vstack((x, xk))
else:
raise ValueError("The input must be one dimensional or a two "
"dimensional matrix containing the entries.")
if np.min(x) < 0:
raise ValueError("Each entry in 'x' must be greater than or equal "
"to zero.")
if np.max(x) > 1:
raise ValueError("Each entry in 'x' must be smaller or equal one.")
# Check x_i > 0 or alpha_i > 1
xeq0 = (x == 0)
alphalt1 = (alpha < 1)
if x.shape != alpha.shape:
alphalt1 = np.repeat(alphalt1, x.shape[-1], axis=-1).reshape(x.shape)
chk = np.logical_and(xeq0, alphalt1)
if np.sum(chk):
raise ValueError("Each entry in 'x' must be greater than zero if its "
"alpha is less than one.")
if (np.abs(np.sum(x, 0) - 1.0) > 10e-10).any():
raise ValueError("The input vector 'x' must lie within the normal "
"simplex. but np.sum(x, 0) = %s." % np.sum(x, 0))
return x
def _lnB(alpha):
r"""Internal helper function to compute the log of the useful quotient.
.. math::
B(\alpha) = \frac{\prod_{i=1}{K}\Gamma(\alpha_i)}
{\Gamma\left(\sum_{i=1}^{K} \alpha_i \right)}
Parameters
----------
%(_dirichlet_doc_default_callparams)s
Returns
-------
B : scalar
Helper quotient, internal use only
"""
return np.sum(gammaln(alpha)) - gammaln(np.sum(alpha))
class dirichlet_gen(multi_rv_generic):
r"""A Dirichlet random variable.
The `alpha` keyword specifies the concentration parameters of the
distribution.
.. versionadded:: 0.15.0
Methods
-------
``pdf(x, alpha)``
Probability density function.
``logpdf(x, alpha)``
Log of the probability density function.
``rvs(alpha, size=1, random_state=None)``
Draw random samples from a Dirichlet distribution.
``mean(alpha)``
The mean of the Dirichlet distribution
``var(alpha)``
The variance of the Dirichlet distribution
``entropy(alpha)``
Compute the differential entropy of the Dirichlet distribution.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_dirichlet_doc_default_callparams)s
%(_doc_random_state)s
Alternatively, the object may be called (as a function) to fix
concentration parameters, returning a "frozen" Dirichlet
random variable:
rv = dirichlet(alpha)
- Frozen object with the same methods but holding the given
concentration parameters fixed.
Notes
-----
Each :math:`\alpha` entry must be positive. The distribution has only
support on the simplex defined by
.. math::
\sum_{i=1}^{K} x_i = 1
where 0 < x_i < 1.
If the quantiles don't lie within the simplex, a ValueError is raised.
The probability density function for `dirichlet` is
.. math::
f(x) = \frac{1}{\mathrm{B}(\boldsymbol\alpha)} \prod_{i=1}^K x_i^{\alpha_i - 1}
where
.. math::
\mathrm{B}(\boldsymbol\alpha) = \frac{\prod_{i=1}^K \Gamma(\alpha_i)}
{\Gamma\bigl(\sum_{i=1}^K \alpha_i\bigr)}
and :math:`\boldsymbol\alpha=(\alpha_1,\ldots,\alpha_K)`, the
concentration parameters and :math:`K` is the dimension of the space
where :math:`x` takes values.
Note that the dirichlet interface is somewhat inconsistent.
The array returned by the rvs function is transposed
with respect to the format expected by the pdf and logpdf.
Examples
--------
>>> from scipy.stats import dirichlet
Generate a dirichlet random variable
>>> quantiles = np.array([0.2, 0.2, 0.6]) # specify quantiles
>>> alpha = np.array([0.4, 5, 15]) # specify concentration parameters
>>> dirichlet.pdf(quantiles, alpha)
0.2843831684937255
The same PDF but following a log scale
>>> dirichlet.logpdf(quantiles, alpha)
-1.2574327653159187
Once we specify the dirichlet distribution
we can then calculate quantities of interest
>>> dirichlet.mean(alpha) # get the mean of the distribution
array([0.01960784, 0.24509804, 0.73529412])
>>> dirichlet.var(alpha) # get variance
array([0.00089829, 0.00864603, 0.00909517])
>>> dirichlet.entropy(alpha) # calculate the differential entropy
-4.3280162474082715
We can also return random samples from the distribution
>>> dirichlet.rvs(alpha, size=1, random_state=1)
array([[0.00766178, 0.24670518, 0.74563305]])
>>> dirichlet.rvs(alpha, size=2, random_state=2)
array([[0.01639427, 0.1292273 , 0.85437844],
[0.00156917, 0.19033695, 0.80809388]])
"""
def __init__(self, seed=None):
super().__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, dirichlet_docdict_params)
def __call__(self, alpha, seed=None):
return dirichlet_frozen(alpha, seed=seed)
def _logpdf(self, x, alpha):
"""Log of the Dirichlet probability density function.
Parameters
----------
x : ndarray
Points at which to evaluate the log of the probability
density function
%(_dirichlet_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'logpdf' instead.
"""
lnB = _lnB(alpha)
return - lnB + np.sum((xlogy(alpha - 1, x.T)).T, 0)
def logpdf(self, x, alpha):
"""Log of the Dirichlet probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_dirichlet_doc_default_callparams)s
Returns
-------
pdf : ndarray or scalar
Log of the probability density function evaluated at `x`.
"""
alpha = _dirichlet_check_parameters(alpha)
x = _dirichlet_check_input(alpha, x)
out = self._logpdf(x, alpha)
return _squeeze_output(out)
def pdf(self, x, alpha):
"""The Dirichlet probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_dirichlet_doc_default_callparams)s
Returns
-------
pdf : ndarray or scalar
The probability density function evaluated at `x`.
"""
alpha = _dirichlet_check_parameters(alpha)
x = _dirichlet_check_input(alpha, x)
out = np.exp(self._logpdf(x, alpha))
return _squeeze_output(out)
def mean(self, alpha):
"""Compute the mean of the dirichlet distribution.
Parameters
----------
%(_dirichlet_doc_default_callparams)s
Returns
-------
mu : ndarray or scalar
Mean of the Dirichlet distribution.
"""
alpha = _dirichlet_check_parameters(alpha)
out = alpha / (np.sum(alpha))
return _squeeze_output(out)
def var(self, alpha):
"""Compute the variance of the dirichlet distribution.
Parameters
----------
%(_dirichlet_doc_default_callparams)s
Returns
-------
v : ndarray or scalar
Variance of the Dirichlet distribution.
"""
alpha = _dirichlet_check_parameters(alpha)
alpha0 = np.sum(alpha)
out = (alpha * (alpha0 - alpha)) / ((alpha0 * alpha0) * (alpha0 + 1))
return _squeeze_output(out)
def entropy(self, alpha):
"""Compute the differential entropy of the dirichlet distribution.
Parameters
----------
%(_dirichlet_doc_default_callparams)s
Returns
-------
h : scalar
Entropy of the Dirichlet distribution
"""
alpha = _dirichlet_check_parameters(alpha)
alpha0 = np.sum(alpha)
lnB = _lnB(alpha)
K = alpha.shape[0]
out = lnB + (alpha0 - K) * scipy.special.psi(alpha0) - np.sum(
(alpha - 1) * scipy.special.psi(alpha))
return _squeeze_output(out)
def rvs(self, alpha, size=1, random_state=None):
"""Draw random samples from a Dirichlet distribution.
Parameters
----------
%(_dirichlet_doc_default_callparams)s
size : int, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray or scalar
Random variates of size (`size`, `N`), where `N` is the
dimension of the random variable.
"""
alpha = _dirichlet_check_parameters(alpha)
random_state = self._get_random_state(random_state)
return random_state.dirichlet(alpha, size=size)
dirichlet = dirichlet_gen()
class dirichlet_frozen(multi_rv_frozen):
def __init__(self, alpha, seed=None):
self.alpha = _dirichlet_check_parameters(alpha)
self._dist = dirichlet_gen(seed)
def logpdf(self, x):
return self._dist.logpdf(x, self.alpha)
def pdf(self, x):
return self._dist.pdf(x, self.alpha)
def mean(self):
return self._dist.mean(self.alpha)
def var(self):
return self._dist.var(self.alpha)
def entropy(self):
return self._dist.entropy(self.alpha)
def rvs(self, size=1, random_state=None):
return self._dist.rvs(self.alpha, size, random_state)
# Set frozen generator docstrings from corresponding docstrings in
# multivariate_normal_gen and fill in default strings in class docstrings
for name in ['logpdf', 'pdf', 'rvs', 'mean', 'var', 'entropy']:
method = dirichlet_gen.__dict__[name]
method_frozen = dirichlet_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(
method.__doc__, dirichlet_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__, dirichlet_docdict_params)
_wishart_doc_default_callparams = """\
df : int
Degrees of freedom, must be greater than or equal to dimension of the
scale matrix
scale : array_like
Symmetric positive definite scale matrix of the distribution
"""
_wishart_doc_callparams_note = ""
_wishart_doc_frozen_callparams = ""
_wishart_doc_frozen_callparams_note = \
"""See class definition for a detailed description of parameters."""
wishart_docdict_params = {
'_doc_default_callparams': _wishart_doc_default_callparams,
'_doc_callparams_note': _wishart_doc_callparams_note,
'_doc_random_state': _doc_random_state
}
wishart_docdict_noparams = {
'_doc_default_callparams': _wishart_doc_frozen_callparams,
'_doc_callparams_note': _wishart_doc_frozen_callparams_note,
'_doc_random_state': _doc_random_state
}
class wishart_gen(multi_rv_generic):
r"""A Wishart random variable.
The `df` keyword specifies the degrees of freedom. The `scale` keyword
specifies the scale matrix, which must be symmetric and positive definite.
In this context, the scale matrix is often interpreted in terms of a
multivariate normal precision matrix (the inverse of the covariance
matrix). These arguments must satisfy the relationship
``df > scale.ndim - 1``, but see notes on using the `rvs` method with
``df < scale.ndim``.
Methods
-------
``pdf(x, df, scale)``
Probability density function.
``logpdf(x, df, scale)``
Log of the probability density function.
``rvs(df, scale, size=1, random_state=None)``
Draw random samples from a Wishart distribution.
``entropy()``
Compute the differential entropy of the Wishart distribution.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_doc_default_callparams)s
%(_doc_random_state)s
Alternatively, the object may be called (as a function) to fix the degrees
of freedom and scale parameters, returning a "frozen" Wishart random
variable:
rv = wishart(df=1, scale=1)
- Frozen object with the same methods but holding the given
degrees of freedom and scale fixed.
See Also
--------
invwishart, chi2
Notes
-----
%(_doc_callparams_note)s
The scale matrix `scale` must be a symmetric positive definite
matrix. Singular matrices, including the symmetric positive semi-definite
case, are not supported.
The Wishart distribution is often denoted
.. math::
W_p(\nu, \Sigma)
where :math:`\nu` is the degrees of freedom and :math:`\Sigma` is the
:math:`p \times p` scale matrix.
The probability density function for `wishart` has support over positive
definite matrices :math:`S`; if :math:`S \sim W_p(\nu, \Sigma)`, then
its PDF is given by:
.. math::
f(S) = \frac{|S|^{\frac{\nu - p - 1}{2}}}{2^{ \frac{\nu p}{2} }
|\Sigma|^\frac{\nu}{2} \Gamma_p \left ( \frac{\nu}{2} \right )}
\exp\left( -tr(\Sigma^{-1} S) / 2 \right)
If :math:`S \sim W_p(\nu, \Sigma)` (Wishart) then
:math:`S^{-1} \sim W_p^{-1}(\nu, \Sigma^{-1})` (inverse Wishart).
If the scale matrix is 1-dimensional and equal to one, then the Wishart
distribution :math:`W_1(\nu, 1)` collapses to the :math:`\chi^2(\nu)`
distribution.
The algorithm [2]_ implemented by the `rvs` method may
produce numerically singular matrices with :math:`p - 1 < \nu < p`; the
user may wish to check for this condition and generate replacement samples
as necessary.
.. versionadded:: 0.16.0
References
----------
.. [1] M.L. Eaton, "Multivariate Statistics: A Vector Space Approach",
Wiley, 1983.
.. [2] W.B. Smith and R.R. Hocking, "Algorithm AS 53: Wishart Variate
Generator", Applied Statistics, vol. 21, pp. 341-345, 1972.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.stats import wishart, chi2
>>> x = np.linspace(1e-5, 8, 100)
>>> w = wishart.pdf(x, df=3, scale=1); w[:5]
array([ 0.00126156, 0.10892176, 0.14793434, 0.17400548, 0.1929669 ])
>>> c = chi2.pdf(x, 3); c[:5]
array([ 0.00126156, 0.10892176, 0.14793434, 0.17400548, 0.1929669 ])
>>> plt.plot(x, w)
The input quantiles can be any shape of array, as long as the last
axis labels the components.
"""
def __init__(self, seed=None):
super().__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, wishart_docdict_params)
def __call__(self, df=None, scale=None, seed=None):
"""Create a frozen Wishart distribution.
See `wishart_frozen` for more information.
"""
return wishart_frozen(df, scale, seed)
def _process_parameters(self, df, scale):
if scale is None:
scale = 1.0
scale = np.asarray(scale, dtype=float)
if scale.ndim == 0:
scale = scale[np.newaxis, np.newaxis]
elif scale.ndim == 1:
scale = np.diag(scale)
elif scale.ndim == 2 and not scale.shape[0] == scale.shape[1]:
raise ValueError("Array 'scale' must be square if it is two"
" dimensional, but scale.scale = %s."
% str(scale.shape))
elif scale.ndim > 2:
raise ValueError("Array 'scale' must be at most two-dimensional,"
" but scale.ndim = %d" % scale.ndim)
dim = scale.shape[0]
if df is None:
df = dim
elif not np.isscalar(df):
raise ValueError("Degrees of freedom must be a scalar.")
elif df <= dim - 1:
raise ValueError("Degrees of freedom must be greater than the "
"dimension of scale matrix minus 1.")
return dim, df, scale
def _process_quantiles(self, x, dim):
"""
Adjust quantiles array so that last axis labels the components of
each data point.
"""
x = np.asarray(x, dtype=float)
if x.ndim == 0:
x = x * np.eye(dim)[:, :, np.newaxis]
if x.ndim == 1:
if dim == 1:
x = x[np.newaxis, np.newaxis, :]
else:
x = np.diag(x)[:, :, np.newaxis]
elif x.ndim == 2:
if not x.shape[0] == x.shape[1]:
raise ValueError("Quantiles must be square if they are two"
" dimensional, but x.shape = %s."
% str(x.shape))
x = x[:, :, np.newaxis]
elif x.ndim == 3:
if not x.shape[0] == x.shape[1]:
raise ValueError("Quantiles must be square in the first two"
" dimensions if they are three dimensional"
", but x.shape = %s." % str(x.shape))
elif x.ndim > 3:
raise ValueError("Quantiles must be at most two-dimensional with"
" an additional dimension for multiple"
"components, but x.ndim = %d" % x.ndim)
# Now we have 3-dim array; should have shape [dim, dim, *]
if not x.shape[0:2] == (dim, dim):
raise ValueError('Quantiles have incompatible dimensions: should'
' be %s, got %s.' % ((dim, dim), x.shape[0:2]))
return x
def _process_size(self, size):
size = np.asarray(size)
if size.ndim == 0:
size = size[np.newaxis]
elif size.ndim > 1:
raise ValueError('Size must be an integer or tuple of integers;'
' thus must have dimension <= 1.'
' Got size.ndim = %s' % str(tuple(size)))
n = size.prod()
shape = tuple(size)
return n, shape
def _logpdf(self, x, dim, df, scale, log_det_scale, C):
"""Log of the Wishart probability density function.
Parameters
----------
x : ndarray
Points at which to evaluate the log of the probability
density function
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
scale : ndarray
Scale matrix
log_det_scale : float
Logarithm of the determinant of the scale matrix
C : ndarray
Cholesky factorization of the scale matrix, lower triagular.
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'logpdf' instead.
"""
# log determinant of x
# Note: x has components along the last axis, so that x.T has
# components alone the 0-th axis. Then since det(A) = det(A'), this
# gives us a 1-dim vector of determinants
# Retrieve tr(scale^{-1} x)
log_det_x = np.empty(x.shape[-1])
scale_inv_x = np.empty(x.shape)
tr_scale_inv_x = np.empty(x.shape[-1])
for i in range(x.shape[-1]):
_, log_det_x[i] = self._cholesky_logdet(x[:, :, i])
scale_inv_x[:, :, i] = scipy.linalg.cho_solve((C, True), x[:, :, i])
tr_scale_inv_x[i] = scale_inv_x[:, :, i].trace()
# Log PDF
out = ((0.5 * (df - dim - 1) * log_det_x - 0.5 * tr_scale_inv_x) -
(0.5 * df * dim * _LOG_2 + 0.5 * df * log_det_scale +
multigammaln(0.5*df, dim)))
return out
def logpdf(self, x, df, scale):
"""Log of the Wishart probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
Each quantile must be a symmetric positive definite matrix.
%(_doc_default_callparams)s
Returns
-------
pdf : ndarray
Log of the probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
dim, df, scale = self._process_parameters(df, scale)
x = self._process_quantiles(x, dim)
# Cholesky decomposition of scale, get log(det(scale))
C, log_det_scale = self._cholesky_logdet(scale)
out = self._logpdf(x, dim, df, scale, log_det_scale, C)
return _squeeze_output(out)
def pdf(self, x, df, scale):
"""Wishart probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
Each quantile must be a symmetric positive definite matrix.
%(_doc_default_callparams)s
Returns
-------
pdf : ndarray
Probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
return np.exp(self.logpdf(x, df, scale))
def _mean(self, dim, df, scale):
"""Mean of the Wishart distribution.
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'mean' instead.
"""
return df * scale
def mean(self, df, scale):
"""Mean of the Wishart distribution.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
mean : float
The mean of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._mean(dim, df, scale)
return _squeeze_output(out)
def _mode(self, dim, df, scale):
"""Mode of the Wishart distribution.
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'mode' instead.
"""
if df >= dim + 1:
out = (df-dim-1) * scale
else:
out = None
return out
def mode(self, df, scale):
"""Mode of the Wishart distribution
Only valid if the degrees of freedom are greater than the dimension of
the scale matrix.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
mode : float or None
The Mode of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._mode(dim, df, scale)
return _squeeze_output(out) if out is not None else out
def _var(self, dim, df, scale):
"""Variance of the Wishart distribution.
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'var' instead.
"""
var = scale**2
diag = scale.diagonal() # 1 x dim array
var += np.outer(diag, diag)
var *= df
return var
def var(self, df, scale):
"""Variance of the Wishart distribution.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
var : float
The variance of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._var(dim, df, scale)
return _squeeze_output(out)
def _standard_rvs(self, n, shape, dim, df, random_state):
"""
Parameters
----------
n : integer
Number of variates to generate
shape : iterable
Shape of the variates to generate
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
random_state : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance
then that instance is used.
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'rvs' instead.
"""
# Random normal variates for off-diagonal elements
n_tril = dim * (dim-1) // 2
covariances = random_state.normal(
size=n*n_tril).reshape(shape+(n_tril,))
# Random chi-square variates for diagonal elements
variances = (np.r_[[random_state.chisquare(df-(i+1)+1, size=n)**0.5
for i in range(dim)]].reshape((dim,) +
shape[::-1]).T)
# Create the A matri(ces) - lower triangular
A = np.zeros(shape + (dim, dim))
# Input the covariances
size_idx = tuple([slice(None, None, None)]*len(shape))
tril_idx = np.tril_indices(dim, k=-1)
A[size_idx + tril_idx] = covariances
# Input the variances
diag_idx = np.diag_indices(dim)
A[size_idx + diag_idx] = variances
return A
def _rvs(self, n, shape, dim, df, C, random_state):
"""Draw random samples from a Wishart distribution.
Parameters
----------
n : integer
Number of variates to generate
shape : iterable
Shape of the variates to generate
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
C : ndarray
Cholesky factorization of the scale matrix, lower triangular.
%(_doc_random_state)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'rvs' instead.
"""
random_state = self._get_random_state(random_state)
# Calculate the matrices A, which are actually lower triangular
# Cholesky factorizations of a matrix B such that B ~ W(df, I)
A = self._standard_rvs(n, shape, dim, df, random_state)
# Calculate SA = C A A' C', where SA ~ W(df, scale)
# Note: this is the product of a (lower) (lower) (lower)' (lower)'
# or, denoting B = AA', it is C B C' where C is the lower
# triangular Cholesky factorization of the scale matrix.
# this appears to conflict with the instructions in [1]_, which
# suggest that it should be D' B D where D is the lower
# triangular factorization of the scale matrix. However, it is
# meant to refer to the Bartlett (1933) representation of a
# Wishart random variate as L A A' L' where L is lower triangular
# so it appears that understanding D' to be upper triangular
# is either a typo in or misreading of [1]_.
for index in np.ndindex(shape):
CA = np.dot(C, A[index])
A[index] = np.dot(CA, CA.T)
return A
def rvs(self, df, scale, size=1, random_state=None):
"""Draw random samples from a Wishart distribution.
Parameters
----------
%(_doc_default_callparams)s
size : integer or iterable of integers, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray
Random variates of shape (`size`) + (`dim`, `dim), where `dim` is
the dimension of the scale matrix.
Notes
-----
%(_doc_callparams_note)s
"""
n, shape = self._process_size(size)
dim, df, scale = self._process_parameters(df, scale)
# Cholesky decomposition of scale
C = scipy.linalg.cholesky(scale, lower=True)
out = self._rvs(n, shape, dim, df, C, random_state)
return _squeeze_output(out)
def _entropy(self, dim, df, log_det_scale):
"""Compute the differential entropy of the Wishart.
Parameters
----------
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
log_det_scale : float
Logarithm of the determinant of the scale matrix
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'entropy' instead.
"""
return (
0.5 * (dim+1) * log_det_scale +
0.5 * dim * (dim+1) * _LOG_2 +
multigammaln(0.5*df, dim) -
0.5 * (df - dim - 1) * np.sum(
[psi(0.5*(df + 1 - (i+1))) for i in range(dim)]
) +
0.5 * df * dim
)
def entropy(self, df, scale):
"""Compute the differential entropy of the Wishart.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
h : scalar
Entropy of the Wishart distribution
Notes
-----
%(_doc_callparams_note)s
"""
dim, df, scale = self._process_parameters(df, scale)
_, log_det_scale = self._cholesky_logdet(scale)
return self._entropy(dim, df, log_det_scale)
def _cholesky_logdet(self, scale):
"""Compute Cholesky decomposition and determine (log(det(scale)).
Parameters
----------
scale : ndarray
Scale matrix.
Returns
-------
c_decomp : ndarray
The Cholesky decomposition of `scale`.
logdet : scalar
The log of the determinant of `scale`.
Notes
-----
This computation of ``logdet`` is equivalent to
``np.linalg.slogdet(scale)``. It is ~2x faster though.
"""
c_decomp = scipy.linalg.cholesky(scale, lower=True)
logdet = 2 * np.sum(np.log(c_decomp.diagonal()))
return c_decomp, logdet
wishart = wishart_gen()
class wishart_frozen(multi_rv_frozen):
"""Create a frozen Wishart distribution.
Parameters
----------
df : array_like
Degrees of freedom of the distribution
scale : array_like
Scale matrix of the distribution
seed : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance then
that instance is used.
"""
def __init__(self, df, scale, seed=None):
self._dist = wishart_gen(seed)
self.dim, self.df, self.scale = self._dist._process_parameters(
df, scale)
self.C, self.log_det_scale = self._dist._cholesky_logdet(self.scale)
def logpdf(self, x):
x = self._dist._process_quantiles(x, self.dim)
out = self._dist._logpdf(x, self.dim, self.df, self.scale,
self.log_det_scale, self.C)
return _squeeze_output(out)
def pdf(self, x):
return np.exp(self.logpdf(x))
def mean(self):
out = self._dist._mean(self.dim, self.df, self.scale)
return _squeeze_output(out)
def mode(self):
out = self._dist._mode(self.dim, self.df, self.scale)
return _squeeze_output(out) if out is not None else out
def var(self):
out = self._dist._var(self.dim, self.df, self.scale)
return _squeeze_output(out)
def rvs(self, size=1, random_state=None):
n, shape = self._dist._process_size(size)
out = self._dist._rvs(n, shape, self.dim, self.df,
self.C, random_state)
return _squeeze_output(out)
def entropy(self):
return self._dist._entropy(self.dim, self.df, self.log_det_scale)
# Set frozen generator docstrings from corresponding docstrings in
# Wishart and fill in default strings in class docstrings
for name in ['logpdf', 'pdf', 'mean', 'mode', 'var', 'rvs', 'entropy']:
method = wishart_gen.__dict__[name]
method_frozen = wishart_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(
method.__doc__, wishart_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__, wishart_docdict_params)
def _cho_inv_batch(a, check_finite=True):
"""
Invert the matrices a_i, using a Cholesky factorization of A, where
a_i resides in the last two dimensions of a and the other indices describe
the index i.
Overwrites the data in a.
Parameters
----------
a : array
Array of matrices to invert, where the matrices themselves are stored
in the last two dimensions.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
x : array
Array of inverses of the matrices ``a_i``.
See Also
--------
scipy.linalg.cholesky : Cholesky factorization of a matrix
"""
if check_finite:
a1 = asarray_chkfinite(a)
else:
a1 = asarray(a)
if len(a1.shape) < 2 or a1.shape[-2] != a1.shape[-1]:
raise ValueError('expected square matrix in last two dimensions')
potrf, potri = get_lapack_funcs(('potrf', 'potri'), (a1,))
triu_rows, triu_cols = np.triu_indices(a.shape[-2], k=1)
for index in np.ndindex(a1.shape[:-2]):
# Cholesky decomposition
a1[index], info = potrf(a1[index], lower=True, overwrite_a=False,
clean=False)
if info > 0:
raise LinAlgError("%d-th leading minor not positive definite"
% info)
if info < 0:
raise ValueError('illegal value in %d-th argument of internal'
' potrf' % -info)
# Inversion
a1[index], info = potri(a1[index], lower=True, overwrite_c=False)
if info > 0:
raise LinAlgError("the inverse could not be computed")
if info < 0:
raise ValueError('illegal value in %d-th argument of internal'
' potrf' % -info)
# Make symmetric (dpotri only fills in the lower triangle)
a1[index][triu_rows, triu_cols] = a1[index][triu_cols, triu_rows]
return a1
class invwishart_gen(wishart_gen):
r"""An inverse Wishart random variable.
The `df` keyword specifies the degrees of freedom. The `scale` keyword
specifies the scale matrix, which must be symmetric and positive definite.
In this context, the scale matrix is often interpreted in terms of a
multivariate normal covariance matrix.
Methods
-------
``pdf(x, df, scale)``
Probability density function.
``logpdf(x, df, scale)``
Log of the probability density function.
``rvs(df, scale, size=1, random_state=None)``
Draw random samples from an inverse Wishart distribution.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_doc_default_callparams)s
%(_doc_random_state)s
Alternatively, the object may be called (as a function) to fix the degrees
of freedom and scale parameters, returning a "frozen" inverse Wishart
random variable:
rv = invwishart(df=1, scale=1)
- Frozen object with the same methods but holding the given
degrees of freedom and scale fixed.
See Also
--------
wishart
Notes
-----
%(_doc_callparams_note)s
The scale matrix `scale` must be a symmetric positive definite
matrix. Singular matrices, including the symmetric positive semi-definite
case, are not supported.
The inverse Wishart distribution is often denoted
.. math::
W_p^{-1}(\nu, \Psi)
where :math:`\nu` is the degrees of freedom and :math:`\Psi` is the
:math:`p \times p` scale matrix.
The probability density function for `invwishart` has support over positive
definite matrices :math:`S`; if :math:`S \sim W^{-1}_p(\nu, \Sigma)`,
then its PDF is given by:
.. math::
f(S) = \frac{|\Sigma|^\frac{\nu}{2}}{2^{ \frac{\nu p}{2} }
|S|^{\frac{\nu + p + 1}{2}} \Gamma_p \left(\frac{\nu}{2} \right)}
\exp\left( -tr(\Sigma S^{-1}) / 2 \right)
If :math:`S \sim W_p^{-1}(\nu, \Psi)` (inverse Wishart) then
:math:`S^{-1} \sim W_p(\nu, \Psi^{-1})` (Wishart).
If the scale matrix is 1-dimensional and equal to one, then the inverse
Wishart distribution :math:`W_1(\nu, 1)` collapses to the
inverse Gamma distribution with parameters shape = :math:`\frac{\nu}{2}`
and scale = :math:`\frac{1}{2}`.
.. versionadded:: 0.16.0
References
----------
.. [1] M.L. Eaton, "Multivariate Statistics: A Vector Space Approach",
Wiley, 1983.
.. [2] M.C. Jones, "Generating Inverse Wishart Matrices", Communications
in Statistics - Simulation and Computation, vol. 14.2, pp.511-514,
1985.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.stats import invwishart, invgamma
>>> x = np.linspace(0.01, 1, 100)
>>> iw = invwishart.pdf(x, df=6, scale=1)
>>> iw[:3]
array([ 1.20546865e-15, 5.42497807e-06, 4.45813929e-03])
>>> ig = invgamma.pdf(x, 6/2., scale=1./2)
>>> ig[:3]
array([ 1.20546865e-15, 5.42497807e-06, 4.45813929e-03])
>>> plt.plot(x, iw)
The input quantiles can be any shape of array, as long as the last
axis labels the components.
"""
def __init__(self, seed=None):
super().__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, wishart_docdict_params)
def __call__(self, df=None, scale=None, seed=None):
"""Create a frozen inverse Wishart distribution.
See `invwishart_frozen` for more information.
"""
return invwishart_frozen(df, scale, seed)
def _logpdf(self, x, dim, df, scale, log_det_scale):
"""Log of the inverse Wishart probability density function.
Parameters
----------
x : ndarray
Points at which to evaluate the log of the probability
density function.
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
scale : ndarray
Scale matrix
log_det_scale : float
Logarithm of the determinant of the scale matrix
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'logpdf' instead.
"""
log_det_x = np.empty(x.shape[-1])
x_inv = np.copy(x).T
if dim > 1:
_cho_inv_batch(x_inv) # works in-place
else:
x_inv = 1./x_inv
tr_scale_x_inv = np.empty(x.shape[-1])
for i in range(x.shape[-1]):
C, lower = scipy.linalg.cho_factor(x[:, :, i], lower=True)
log_det_x[i] = 2 * np.sum(np.log(C.diagonal()))
tr_scale_x_inv[i] = np.dot(scale, x_inv[i]).trace()
# Log PDF
out = ((0.5 * df * log_det_scale - 0.5 * tr_scale_x_inv) -
(0.5 * df * dim * _LOG_2 + 0.5 * (df + dim + 1) * log_det_x) -
multigammaln(0.5*df, dim))
return out
def logpdf(self, x, df, scale):
"""Log of the inverse Wishart probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
Each quantile must be a symmetric positive definite matrix.
%(_doc_default_callparams)s
Returns
-------
pdf : ndarray
Log of the probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
dim, df, scale = self._process_parameters(df, scale)
x = self._process_quantiles(x, dim)
_, log_det_scale = self._cholesky_logdet(scale)
out = self._logpdf(x, dim, df, scale, log_det_scale)
return _squeeze_output(out)
def pdf(self, x, df, scale):
"""Inverse Wishart probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
Each quantile must be a symmetric positive definite matrix.
%(_doc_default_callparams)s
Returns
-------
pdf : ndarray
Probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
return np.exp(self.logpdf(x, df, scale))
def _mean(self, dim, df, scale):
"""Mean of the inverse Wishart distribution.
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'mean' instead.
"""
if df > dim + 1:
out = scale / (df - dim - 1)
else:
out = None
return out
def mean(self, df, scale):
"""Mean of the inverse Wishart distribution.
Only valid if the degrees of freedom are greater than the dimension of
the scale matrix plus one.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
mean : float or None
The mean of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._mean(dim, df, scale)
return _squeeze_output(out) if out is not None else out
def _mode(self, dim, df, scale):
"""Mode of the inverse Wishart distribution.
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'mode' instead.
"""
return scale / (df + dim + 1)
def mode(self, df, scale):
"""Mode of the inverse Wishart distribution.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
mode : float
The Mode of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._mode(dim, df, scale)
return _squeeze_output(out)
def _var(self, dim, df, scale):
"""Variance of the inverse Wishart distribution.
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'var' instead.
"""
if df > dim + 3:
var = (df - dim + 1) * scale**2
diag = scale.diagonal() # 1 x dim array
var += (df - dim - 1) * np.outer(diag, diag)
var /= (df - dim) * (df - dim - 1)**2 * (df - dim - 3)
else:
var = None
return var
def var(self, df, scale):
"""Variance of the inverse Wishart distribution.
Only valid if the degrees of freedom are greater than the dimension of
the scale matrix plus three.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
var : float
The variance of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._var(dim, df, scale)
return _squeeze_output(out) if out is not None else out
def _rvs(self, n, shape, dim, df, C, random_state):
"""Draw random samples from an inverse Wishart distribution.
Parameters
----------
n : integer
Number of variates to generate
shape : iterable
Shape of the variates to generate
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
C : ndarray
Cholesky factorization of the scale matrix, lower triagular.
%(_doc_random_state)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'rvs' instead.
"""
random_state = self._get_random_state(random_state)
# Get random draws A such that A ~ W(df, I)
A = super()._standard_rvs(n, shape, dim, df, random_state)
# Calculate SA = (CA)'^{-1} (CA)^{-1} ~ iW(df, scale)
eye = np.eye(dim)
trtrs = get_lapack_funcs(('trtrs'), (A,))
for index in np.ndindex(A.shape[:-2]):
# Calculate CA
CA = np.dot(C, A[index])
# Get (C A)^{-1} via triangular solver
if dim > 1:
CA, info = trtrs(CA, eye, lower=True)
if info > 0:
raise LinAlgError("Singular matrix.")
if info < 0:
raise ValueError('Illegal value in %d-th argument of'
' internal trtrs' % -info)
else:
CA = 1. / CA
# Get SA
A[index] = np.dot(CA.T, CA)
return A
def rvs(self, df, scale, size=1, random_state=None):
"""Draw random samples from an inverse Wishart distribution.
Parameters
----------
%(_doc_default_callparams)s
size : integer or iterable of integers, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray
Random variates of shape (`size`) + (`dim`, `dim), where `dim` is
the dimension of the scale matrix.
Notes
-----
%(_doc_callparams_note)s
"""
n, shape = self._process_size(size)
dim, df, scale = self._process_parameters(df, scale)
# Invert the scale
eye = np.eye(dim)
L, lower = scipy.linalg.cho_factor(scale, lower=True)
inv_scale = scipy.linalg.cho_solve((L, lower), eye)
# Cholesky decomposition of inverted scale
C = scipy.linalg.cholesky(inv_scale, lower=True)
out = self._rvs(n, shape, dim, df, C, random_state)
return _squeeze_output(out)
def entropy(self):
# Need to find reference for inverse Wishart entropy
raise AttributeError
invwishart = invwishart_gen()
class invwishart_frozen(multi_rv_frozen):
def __init__(self, df, scale, seed=None):
"""Create a frozen inverse Wishart distribution.
Parameters
----------
df : array_like
Degrees of freedom of the distribution
scale : array_like
Scale matrix of the distribution
seed : {None, int, `numpy.random.Generator`}, optional
If `seed` is None the `numpy.random.Generator` singleton is used.
If `seed` is an int, a new ``Generator`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` instance then that instance is
used.
"""
self._dist = invwishart_gen(seed)
self.dim, self.df, self.scale = self._dist._process_parameters(
df, scale
)
# Get the determinant via Cholesky factorization
C, lower = scipy.linalg.cho_factor(self.scale, lower=True)
self.log_det_scale = 2 * np.sum(np.log(C.diagonal()))
# Get the inverse using the Cholesky factorization
eye = np.eye(self.dim)
self.inv_scale = scipy.linalg.cho_solve((C, lower), eye)
# Get the Cholesky factorization of the inverse scale
self.C = scipy.linalg.cholesky(self.inv_scale, lower=True)
def logpdf(self, x):
x = self._dist._process_quantiles(x, self.dim)
out = self._dist._logpdf(x, self.dim, self.df, self.scale,
self.log_det_scale)
return _squeeze_output(out)
def pdf(self, x):
return np.exp(self.logpdf(x))
def mean(self):
out = self._dist._mean(self.dim, self.df, self.scale)
return _squeeze_output(out) if out is not None else out
def mode(self):
out = self._dist._mode(self.dim, self.df, self.scale)
return _squeeze_output(out)
def var(self):
out = self._dist._var(self.dim, self.df, self.scale)
return _squeeze_output(out) if out is not None else out
def rvs(self, size=1, random_state=None):
n, shape = self._dist._process_size(size)
out = self._dist._rvs(n, shape, self.dim, self.df,
self.C, random_state)
return _squeeze_output(out)
def entropy(self):
# Need to find reference for inverse Wishart entropy
raise AttributeError
# Set frozen generator docstrings from corresponding docstrings in
# inverse Wishart and fill in default strings in class docstrings
for name in ['logpdf', 'pdf', 'mean', 'mode', 'var', 'rvs']:
method = invwishart_gen.__dict__[name]
method_frozen = wishart_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(
method.__doc__, wishart_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__, wishart_docdict_params)
_multinomial_doc_default_callparams = """\
n : int
Number of trials
p : array_like
Probability of a trial falling into each category; should sum to 1
"""
_multinomial_doc_callparams_note = \
"""`n` should be a positive integer. Each element of `p` should be in the
interval :math:`[0,1]` and the elements should sum to 1. If they do not sum to
1, the last element of the `p` array is not used and is replaced with the
remaining probability left over from the earlier elements.
"""
_multinomial_doc_frozen_callparams = ""
_multinomial_doc_frozen_callparams_note = \
"""See class definition for a detailed description of parameters."""
multinomial_docdict_params = {
'_doc_default_callparams': _multinomial_doc_default_callparams,
'_doc_callparams_note': _multinomial_doc_callparams_note,
'_doc_random_state': _doc_random_state
}
multinomial_docdict_noparams = {
'_doc_default_callparams': _multinomial_doc_frozen_callparams,
'_doc_callparams_note': _multinomial_doc_frozen_callparams_note,
'_doc_random_state': _doc_random_state
}
class multinomial_gen(multi_rv_generic):
r"""A multinomial random variable.
Methods
-------
``pmf(x, n, p)``
Probability mass function.
``logpmf(x, n, p)``
Log of the probability mass function.
``rvs(n, p, size=1, random_state=None)``
Draw random samples from a multinomial distribution.
``entropy(n, p)``
Compute the entropy of the multinomial distribution.
``cov(n, p)``
Compute the covariance matrix of the multinomial distribution.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_doc_default_callparams)s
%(_doc_random_state)s
Notes
-----
%(_doc_callparams_note)s
Alternatively, the object may be called (as a function) to fix the `n` and
`p` parameters, returning a "frozen" multinomial random variable:
The probability mass function for `multinomial` is
.. math::
f(x) = \frac{n!}{x_1! \cdots x_k!} p_1^{x_1} \cdots p_k^{x_k},
supported on :math:`x=(x_1, \ldots, x_k)` where each :math:`x_i` is a
nonnegative integer and their sum is :math:`n`.
.. versionadded:: 0.19.0
Examples
--------
>>> from scipy.stats import multinomial
>>> rv = multinomial(8, [0.3, 0.2, 0.5])
>>> rv.pmf([1, 3, 4])
0.042000000000000072
The multinomial distribution for :math:`k=2` is identical to the
corresponding binomial distribution (tiny numerical differences
notwithstanding):
>>> from scipy.stats import binom
>>> multinomial.pmf([3, 4], n=7, p=[0.4, 0.6])
0.29030399999999973
>>> binom.pmf(3, 7, 0.4)
0.29030400000000012
The functions ``pmf``, ``logpmf``, ``entropy``, and ``cov`` support
broadcasting, under the convention that the vector parameters (``x`` and
``p``) are interpreted as if each row along the last axis is a single
object. For instance:
>>> multinomial.pmf([[3, 4], [3, 5]], n=[7, 8], p=[.3, .7])
array([0.2268945, 0.25412184])
Here, ``x.shape == (2, 2)``, ``n.shape == (2,)``, and ``p.shape == (2,)``,
but following the rules mentioned above they behave as if the rows
``[3, 4]`` and ``[3, 5]`` in ``x`` and ``[.3, .7]`` in ``p`` were a single
object, and as if we had ``x.shape = (2,)``, ``n.shape = (2,)``, and
``p.shape = ()``. To obtain the individual elements without broadcasting,
we would do this:
>>> multinomial.pmf([3, 4], n=7, p=[.3, .7])
0.2268945
>>> multinomial.pmf([3, 5], 8, p=[.3, .7])
0.25412184
This broadcasting also works for ``cov``, where the output objects are
square matrices of size ``p.shape[-1]``. For example:
>>> multinomial.cov([4, 5], [[.3, .7], [.4, .6]])
array([[[ 0.84, -0.84],
[-0.84, 0.84]],
[[ 1.2 , -1.2 ],
[-1.2 , 1.2 ]]])
In this example, ``n.shape == (2,)`` and ``p.shape == (2, 2)``, and
following the rules above, these broadcast as if ``p.shape == (2,)``.
Thus the result should also be of shape ``(2,)``, but since each output is
a :math:`2 \times 2` matrix, the result in fact has shape ``(2, 2, 2)``,
where ``result[0]`` is equal to ``multinomial.cov(n=4, p=[.3, .7])`` and
``result[1]`` is equal to ``multinomial.cov(n=5, p=[.4, .6])``.
See also
--------
scipy.stats.binom : The binomial distribution.
numpy.random.Generator.multinomial : Sampling from the multinomial distribution.
scipy.stats.multivariate_hypergeom :
The multivariate hypergeometric distribution.
""" # noqa: E501
def __init__(self, seed=None):
super().__init__(seed)
self.__doc__ = \
doccer.docformat(self.__doc__, multinomial_docdict_params)
def __call__(self, n, p, seed=None):
"""Create a frozen multinomial distribution.
See `multinomial_frozen` for more information.
"""
return multinomial_frozen(n, p, seed)
def _process_parameters(self, n, p):
"""Returns: n_, p_, npcond.
n_ and p_ are arrays of the correct shape; npcond is a boolean array
flagging values out of the domain.
"""
p = np.array(p, dtype=np.float64, copy=True)
p[..., -1] = 1. - p[..., :-1].sum(axis=-1)
# true for bad p
pcond = np.any(p < 0, axis=-1)
pcond |= np.any(p > 1, axis=-1)
n = np.array(n, dtype=np.int_, copy=True)
# true for bad n
ncond = n <= 0
return n, p, ncond | pcond
def _process_quantiles(self, x, n, p):
"""Returns: x_, xcond.
x_ is an int array; xcond is a boolean array flagging values out of the
domain.
"""
xx = np.asarray(x, dtype=np.int_)
if xx.ndim == 0:
raise ValueError("x must be an array.")
if xx.size != 0 and not xx.shape[-1] == p.shape[-1]:
raise ValueError("Size of each quantile should be size of p: "
"received %d, but expected %d." %
(xx.shape[-1], p.shape[-1]))
# true for x out of the domain
cond = np.any(xx != x, axis=-1)
cond |= np.any(xx < 0, axis=-1)
cond = cond | (np.sum(xx, axis=-1) != n)
return xx, cond
def _checkresult(self, result, cond, bad_value):
result = np.asarray(result)
if cond.ndim != 0:
result[cond] = bad_value
elif cond:
if result.ndim == 0:
return bad_value
result[...] = bad_value
return result
def _logpmf(self, x, n, p):
return gammaln(n+1) + np.sum(xlogy(x, p) - gammaln(x+1), axis=-1)
def logpmf(self, x, n, p):
"""Log of the Multinomial probability mass function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_doc_default_callparams)s
Returns
-------
logpmf : ndarray or scalar
Log of the probability mass function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
n, p, npcond = self._process_parameters(n, p)
x, xcond = self._process_quantiles(x, n, p)
result = self._logpmf(x, n, p)
# replace values for which x was out of the domain; broadcast
# xcond to the right shape
xcond_ = xcond | np.zeros(npcond.shape, dtype=np.bool_)
result = self._checkresult(result, xcond_, np.NINF)
# replace values bad for n or p; broadcast npcond to the right shape
npcond_ = npcond | np.zeros(xcond.shape, dtype=np.bool_)
return self._checkresult(result, npcond_, np.NAN)
def pmf(self, x, n, p):
"""Multinomial probability mass function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_doc_default_callparams)s
Returns
-------
pmf : ndarray or scalar
Probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
return np.exp(self.logpmf(x, n, p))
def mean(self, n, p):
"""Mean of the Multinomial distribution.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
mean : float
The mean of the distribution
"""
n, p, npcond = self._process_parameters(n, p)
result = n[..., np.newaxis]*p
return self._checkresult(result, npcond, np.NAN)
def cov(self, n, p):
"""Covariance matrix of the multinomial distribution.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
cov : ndarray
The covariance matrix of the distribution
"""
n, p, npcond = self._process_parameters(n, p)
nn = n[..., np.newaxis, np.newaxis]
result = nn * np.einsum('...j,...k->...jk', -p, p)
# change the diagonal
for i in range(p.shape[-1]):
result[..., i, i] += n*p[..., i]
return self._checkresult(result, npcond, np.nan)
def entropy(self, n, p):
r"""Compute the entropy of the multinomial distribution.
The entropy is computed using this expression:
.. math::
f(x) = - \log n! - n\sum_{i=1}^k p_i \log p_i +
\sum_{i=1}^k \sum_{x=0}^n \binom n x p_i^x(1-p_i)^{n-x} \log x!
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
h : scalar
Entropy of the Multinomial distribution
Notes
-----
%(_doc_callparams_note)s
"""
n, p, npcond = self._process_parameters(n, p)
x = np.r_[1:np.max(n)+1]
term1 = n*np.sum(entr(p), axis=-1)
term1 -= gammaln(n+1)
n = n[..., np.newaxis]
new_axes_needed = max(p.ndim, n.ndim) - x.ndim + 1
x.shape += (1,)*new_axes_needed
term2 = np.sum(binom.pmf(x, n, p)*gammaln(x+1),
axis=(-1, -1-new_axes_needed))
return self._checkresult(term1 + term2, npcond, np.nan)
def rvs(self, n, p, size=None, random_state=None):
"""Draw random samples from a Multinomial distribution.
Parameters
----------
%(_doc_default_callparams)s
size : integer or iterable of integers, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray or scalar
Random variates of shape (`size`, `len(p)`)
Notes
-----
%(_doc_callparams_note)s
"""
n, p, npcond = self._process_parameters(n, p)
random_state = self._get_random_state(random_state)
return random_state.multinomial(n, p, size)
multinomial = multinomial_gen()
class multinomial_frozen(multi_rv_frozen):
r"""Create a frozen Multinomial distribution.
Parameters
----------
n : int
number of trials
p: array_like
probability of a trial falling into each category; should sum to 1
seed : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance then
that instance is used.
"""
def __init__(self, n, p, seed=None):
self._dist = multinomial_gen(seed)
self.n, self.p, self.npcond = self._dist._process_parameters(n, p)
# monkey patch self._dist
def _process_parameters(n, p):
return self.n, self.p, self.npcond
self._dist._process_parameters = _process_parameters
def logpmf(self, x):
return self._dist.logpmf(x, self.n, self.p)
def pmf(self, x):
return self._dist.pmf(x, self.n, self.p)
def mean(self):
return self._dist.mean(self.n, self.p)
def cov(self):
return self._dist.cov(self.n, self.p)
def entropy(self):
return self._dist.entropy(self.n, self.p)
def rvs(self, size=1, random_state=None):
return self._dist.rvs(self.n, self.p, size, random_state)
# Set frozen generator docstrings from corresponding docstrings in
# multinomial and fill in default strings in class docstrings
for name in ['logpmf', 'pmf', 'mean', 'cov', 'rvs']:
method = multinomial_gen.__dict__[name]
method_frozen = multinomial_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(
method.__doc__, multinomial_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__,
multinomial_docdict_params)
class special_ortho_group_gen(multi_rv_generic):
r"""A matrix-valued SO(N) random variable.
Return a random rotation matrix, drawn from the Haar distribution
(the only uniform distribution on SO(n)).
The `dim` keyword specifies the dimension N.
Methods
-------
``rvs(dim=None, size=1, random_state=None)``
Draw random samples from SO(N).
Parameters
----------
dim : scalar
Dimension of matrices
Notes
-----
This class is wrapping the random_rot code from the MDP Toolkit,
https://github.com/mdp-toolkit/mdp-toolkit
Return a random rotation matrix, drawn from the Haar distribution
(the only uniform distribution on SO(n)).
The algorithm is described in the paper
Stewart, G.W., "The efficient generation of random orthogonal
matrices with an application to condition estimators", SIAM Journal
on Numerical Analysis, 17(3), pp. 403-409, 1980.
For more information see
https://en.wikipedia.org/wiki/Orthogonal_matrix#Randomization
See also the similar `ortho_group`. For a random rotation in three
dimensions, see `scipy.spatial.transform.Rotation.random`.
Examples
--------
>>> from scipy.stats import special_ortho_group
>>> x = special_ortho_group.rvs(3)
>>> np.dot(x, x.T)
array([[ 1.00000000e+00, 1.13231364e-17, -2.86852790e-16],
[ 1.13231364e-17, 1.00000000e+00, -1.46845020e-16],
[ -2.86852790e-16, -1.46845020e-16, 1.00000000e+00]])
>>> import scipy.linalg
>>> scipy.linalg.det(x)
1.0
This generates one random matrix from SO(3). It is orthogonal and
has a determinant of 1.
See Also
--------
ortho_group, scipy.spatial.transform.Rotation.random
"""
def __init__(self, seed=None):
super().__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__)
def __call__(self, dim=None, seed=None):
"""Create a frozen SO(N) distribution.
See `special_ortho_group_frozen` for more information.
"""
return special_ortho_group_frozen(dim, seed=seed)
def _process_parameters(self, dim):
"""Dimension N must be specified; it cannot be inferred."""
if dim is None or not np.isscalar(dim) or dim <= 1 or dim != int(dim):
raise ValueError("""Dimension of rotation must be specified,
and must be a scalar greater than 1.""")
return dim
def rvs(self, dim, size=1, random_state=None):
"""Draw random samples from SO(N).
Parameters
----------
dim : integer
Dimension of rotation space (N).
size : integer, optional
Number of samples to draw (default 1).
Returns
-------
rvs : ndarray or scalar
Random size N-dimensional matrices, dimension (size, dim, dim)
"""
random_state = self._get_random_state(random_state)
size = int(size)
if size > 1:
return np.array([self.rvs(dim, size=1, random_state=random_state)
for i in range(size)])
dim = self._process_parameters(dim)
H = np.eye(dim)
D = np.empty((dim,))
for n in range(dim-1):
x = random_state.normal(size=(dim-n,))
norm2 = np.dot(x, x)
x0 = x[0].item()
D[n] = np.sign(x[0]) if x[0] != 0 else 1
x[0] += D[n]*np.sqrt(norm2)
x /= np.sqrt((norm2 - x0**2 + x[0]**2) / 2.)
# Householder transformation
H[:, n:] -= np.outer(np.dot(H[:, n:], x), x)
D[-1] = (-1)**(dim-1)*D[:-1].prod()
# Equivalent to np.dot(np.diag(D), H) but faster, apparently
H = (D*H.T).T
return H
special_ortho_group = special_ortho_group_gen()
class special_ortho_group_frozen(multi_rv_frozen):
def __init__(self, dim=None, seed=None):
"""Create a frozen SO(N) distribution.
Parameters
----------
dim : scalar
Dimension of matrices
seed : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance
then that instance is used.
Examples
--------
>>> from scipy.stats import special_ortho_group
>>> g = special_ortho_group(5)
>>> x = g.rvs()
"""
self._dist = special_ortho_group_gen(seed)
self.dim = self._dist._process_parameters(dim)
def rvs(self, size=1, random_state=None):
return self._dist.rvs(self.dim, size, random_state)
class ortho_group_gen(multi_rv_generic):
r"""A matrix-valued O(N) random variable.
Return a random orthogonal matrix, drawn from the O(N) Haar
distribution (the only uniform distribution on O(N)).
The `dim` keyword specifies the dimension N.
Methods
-------
``rvs(dim=None, size=1, random_state=None)``
Draw random samples from O(N).
Parameters
----------
dim : scalar
Dimension of matrices
Notes
-----
This class is closely related to `special_ortho_group`.
Some care is taken to avoid numerical error, as per the paper by Mezzadri.
References
----------
.. [1] F. Mezzadri, "How to generate random matrices from the classical
compact groups", :arXiv:`math-ph/0609050v2`.
Examples
--------
>>> from scipy.stats import ortho_group
>>> x = ortho_group.rvs(3)
>>> np.dot(x, x.T)
array([[ 1.00000000e+00, 1.13231364e-17, -2.86852790e-16],
[ 1.13231364e-17, 1.00000000e+00, -1.46845020e-16],
[ -2.86852790e-16, -1.46845020e-16, 1.00000000e+00]])
>>> import scipy.linalg
>>> np.fabs(scipy.linalg.det(x))
1.0
This generates one random matrix from O(3). It is orthogonal and
has a determinant of +1 or -1.
"""
def __init__(self, seed=None):
super().__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__)
def _process_parameters(self, dim):
"""Dimension N must be specified; it cannot be inferred."""
if dim is None or not np.isscalar(dim) or dim <= 1 or dim != int(dim):
raise ValueError("Dimension of rotation must be specified,"
"and must be a scalar greater than 1.")
return dim
def rvs(self, dim, size=1, random_state=None):
"""Draw random samples from O(N).
Parameters
----------
dim : integer
Dimension of rotation space (N).
size : integer, optional
Number of samples to draw (default 1).
Returns
-------
rvs : ndarray or scalar
Random size N-dimensional matrices, dimension (size, dim, dim)
"""
random_state = self._get_random_state(random_state)
size = int(size)
if size > 1:
return np.array([self.rvs(dim, size=1, random_state=random_state)
for i in range(size)])
dim = self._process_parameters(dim)
H = np.eye(dim)
for n in range(dim):
x = random_state.normal(size=(dim-n,))
norm2 = np.dot(x, x)
x0 = x[0].item()
# random sign, 50/50, but chosen carefully to avoid roundoff error
D = np.sign(x[0]) if x[0] != 0 else 1
x[0] += D * np.sqrt(norm2)
x /= np.sqrt((norm2 - x0**2 + x[0]**2) / 2.)
# Householder transformation
H[:, n:] = -D * (H[:, n:] - np.outer(np.dot(H[:, n:], x), x))
return H
ortho_group = ortho_group_gen()
class random_correlation_gen(multi_rv_generic):
r"""A random correlation matrix.
Return a random correlation matrix, given a vector of eigenvalues.
The `eigs` keyword specifies the eigenvalues of the correlation matrix,
and implies the dimension.
Methods
-------
``rvs(eigs=None, random_state=None)``
Draw random correlation matrices, all with eigenvalues eigs.
Parameters
----------
eigs : 1d ndarray
Eigenvalues of correlation matrix.
Notes
-----
Generates a random correlation matrix following a numerically stable
algorithm spelled out by Davies & Higham. This algorithm uses a single O(N)
similarity transformation to construct a symmetric positive semi-definite
matrix, and applies a series of Givens rotations to scale it to have ones
on the diagonal.
References
----------
.. [1] Davies, Philip I; Higham, Nicholas J; "Numerically stable generation
of correlation matrices and their factors", BIT 2000, Vol. 40,
No. 4, pp. 640 651
Examples
--------
>>> from scipy.stats import random_correlation
>>> rng = np.random.default_rng()
>>> x = random_correlation.rvs((.5, .8, 1.2, 1.5), random_state=rng)
>>> x
array([[ 1. , -0.07198934, -0.20411041, -0.24385796],
[-0.07198934, 1. , 0.12968613, -0.29471382],
[-0.20411041, 0.12968613, 1. , 0.2828693 ],
[-0.24385796, -0.29471382, 0.2828693 , 1. ]])
>>> import scipy.linalg
>>> e, v = scipy.linalg.eigh(x)
>>> e
array([ 0.5, 0.8, 1.2, 1.5])
"""
def __init__(self, seed=None):
super().__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__)
def _process_parameters(self, eigs, tol):
eigs = np.asarray(eigs, dtype=float)
dim = eigs.size
if eigs.ndim != 1 or eigs.shape[0] != dim or dim <= 1:
raise ValueError("Array 'eigs' must be a vector of length "
"greater than 1.")
if np.fabs(np.sum(eigs) - dim) > tol:
raise ValueError("Sum of eigenvalues must equal dimensionality.")
for x in eigs:
if x < -tol:
raise ValueError("All eigenvalues must be non-negative.")
return dim, eigs
def _givens_to_1(self, aii, ajj, aij):
"""Computes a 2x2 Givens matrix to put 1's on the diagonal.
The input matrix is a 2x2 symmetric matrix M = [ aii aij ; aij ajj ].
The output matrix g is a 2x2 anti-symmetric matrix of the form
[ c s ; -s c ]; the elements c and s are returned.
Applying the output matrix to the input matrix (as b=g.T M g)
results in a matrix with bii=1, provided tr(M) - det(M) >= 1
and floating point issues do not occur. Otherwise, some other
valid rotation is returned. When tr(M)==2, also bjj=1.
"""
aiid = aii - 1.
ajjd = ajj - 1.
if ajjd == 0:
# ajj==1, so swap aii and ajj to avoid division by zero
return 0., 1.
dd = math.sqrt(max(aij**2 - aiid*ajjd, 0))
# The choice of t should be chosen to avoid cancellation [1]
t = (aij + math.copysign(dd, aij)) / ajjd
c = 1. / math.sqrt(1. + t*t)
if c == 0:
# Underflow
s = 1.0
else:
s = c*t
return c, s
def _to_corr(self, m):
"""
Given a psd matrix m, rotate to put one's on the diagonal, turning it
into a correlation matrix. This also requires the trace equal the
dimensionality. Note: modifies input matrix
"""
# Check requirements for in-place Givens
if not (m.flags.c_contiguous and m.dtype == np.float64 and
m.shape[0] == m.shape[1]):
raise ValueError()
d = m.shape[0]
for i in range(d-1):
if m[i, i] == 1:
continue
elif m[i, i] > 1:
for j in range(i+1, d):
if m[j, j] < 1:
break
else:
for j in range(i+1, d):
if m[j, j] > 1:
break
c, s = self._givens_to_1(m[i, i], m[j, j], m[i, j])
# Use BLAS to apply Givens rotations in-place. Equivalent to:
# g = np.eye(d)
# g[i, i] = g[j,j] = c
# g[j, i] = -s; g[i, j] = s
# m = np.dot(g.T, np.dot(m, g))
mv = m.ravel()
drot(mv, mv, c, -s, n=d,
offx=i*d, incx=1, offy=j*d, incy=1,
overwrite_x=True, overwrite_y=True)
drot(mv, mv, c, -s, n=d,
offx=i, incx=d, offy=j, incy=d,
overwrite_x=True, overwrite_y=True)
return m
def rvs(self, eigs, random_state=None, tol=1e-13, diag_tol=1e-7):
"""Draw random correlation matrices.
Parameters
----------
eigs : 1d ndarray
Eigenvalues of correlation matrix
tol : float, optional
Tolerance for input parameter checks
diag_tol : float, optional
Tolerance for deviation of the diagonal of the resulting
matrix. Default: 1e-7
Raises
------
RuntimeError
Floating point error prevented generating a valid correlation
matrix.
Returns
-------
rvs : ndarray or scalar
Random size N-dimensional matrices, dimension (size, dim, dim),
each having eigenvalues eigs.
"""
dim, eigs = self._process_parameters(eigs, tol=tol)
random_state = self._get_random_state(random_state)
m = ortho_group.rvs(dim, random_state=random_state)
m = np.dot(np.dot(m, np.diag(eigs)), m.T) # Set the trace of m
m = self._to_corr(m) # Carefully rotate to unit diagonal
# Check diagonal
if abs(m.diagonal() - 1).max() > diag_tol:
raise RuntimeError("Failed to generate a valid correlation matrix")
return m
random_correlation = random_correlation_gen()
class unitary_group_gen(multi_rv_generic):
r"""A matrix-valued U(N) random variable.
Return a random unitary matrix.
The `dim` keyword specifies the dimension N.
Methods
-------
``rvs(dim=None, size=1, random_state=None)``
Draw random samples from U(N).
Parameters
----------
dim : scalar
Dimension of matrices
Notes
-----
This class is similar to `ortho_group`.
References
----------
.. [1] F. Mezzadri, "How to generate random matrices from the classical
compact groups", :arXiv:`math-ph/0609050v2`.
Examples
--------
>>> from scipy.stats import unitary_group
>>> x = unitary_group.rvs(3)
>>> np.dot(x, x.conj().T)
array([[ 1.00000000e+00, 1.13231364e-17, -2.86852790e-16],
[ 1.13231364e-17, 1.00000000e+00, -1.46845020e-16],
[ -2.86852790e-16, -1.46845020e-16, 1.00000000e+00]])
This generates one random matrix from U(3). The dot product confirms that
it is unitary up to machine precision.
"""
def __init__(self, seed=None):
super().__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__)
def _process_parameters(self, dim):
"""Dimension N must be specified; it cannot be inferred."""
if dim is None or not np.isscalar(dim) or dim <= 1 or dim != int(dim):
raise ValueError("Dimension of rotation must be specified,"
"and must be a scalar greater than 1.")
return dim
def rvs(self, dim, size=1, random_state=None):
"""Draw random samples from U(N).
Parameters
----------
dim : integer
Dimension of space (N).
size : integer, optional
Number of samples to draw (default 1).
Returns
-------
rvs : ndarray or scalar
Random size N-dimensional matrices, dimension (size, dim, dim)
"""
random_state = self._get_random_state(random_state)
size = int(size)
if size > 1:
return np.array([self.rvs(dim, size=1, random_state=random_state)
for i in range(size)])
dim = self._process_parameters(dim)
z = 1/math.sqrt(2)*(random_state.normal(size=(dim, dim)) +
1j*random_state.normal(size=(dim, dim)))
q, r = scipy.linalg.qr(z)
d = r.diagonal()
q *= d/abs(d)
return q
unitary_group = unitary_group_gen()
_mvt_doc_default_callparams = \
"""
loc : array_like, optional
Location of the distribution. (default ``0``)
shape : array_like, optional
Positive semidefinite matrix of the distribution. (default ``1``)
df : float, optional
Degrees of freedom of the distribution; must be greater than zero.
If ``np.inf`` then results are multivariate normal. The default is ``1``.
allow_singular : bool, optional
Whether to allow a singular matrix. (default ``False``)
"""
_mvt_doc_callparams_note = \
"""Setting the parameter `loc` to ``None`` is equivalent to having `loc`
be the zero-vector. The parameter `shape` can be a scalar, in which case
the shape matrix is the identity times that value, a vector of
diagonal entries for the shape matrix, or a two-dimensional array_like.
"""
_mvt_doc_frozen_callparams_note = \
"""See class definition for a detailed description of parameters."""
mvt_docdict_params = {
'_mvt_doc_default_callparams': _mvt_doc_default_callparams,
'_mvt_doc_callparams_note': _mvt_doc_callparams_note,
'_doc_random_state': _doc_random_state
}
mvt_docdict_noparams = {
'_mvt_doc_default_callparams': "",
'_mvt_doc_callparams_note': _mvt_doc_frozen_callparams_note,
'_doc_random_state': _doc_random_state
}
class multivariate_t_gen(multi_rv_generic):
r"""A multivariate t-distributed random variable.
The `loc` parameter specifies the location. The `shape` parameter specifies
the positive semidefinite shape matrix. The `df` parameter specifies the
degrees of freedom.
In addition to calling the methods below, the object itself may be called
as a function to fix the location, shape matrix, and degrees of freedom
parameters, returning a "frozen" multivariate t-distribution random.
Methods
-------
``pdf(x, loc=None, shape=1, df=1, allow_singular=False)``
Probability density function.
``logpdf(x, loc=None, shape=1, df=1, allow_singular=False)``
Log of the probability density function.
``rvs(loc=None, shape=1, df=1, size=1, random_state=None)``
Draw random samples from a multivariate t-distribution.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_mvt_doc_default_callparams)s
%(_doc_random_state)s
Notes
-----
%(_mvt_doc_callparams_note)s
The matrix `shape` must be a (symmetric) positive semidefinite matrix. The
determinant and inverse of `shape` are computed as the pseudo-determinant
and pseudo-inverse, respectively, so that `shape` does not need to have
full rank.
The probability density function for `multivariate_t` is
.. math::
f(x) = \frac{\Gamma(\nu + p)/2}{\Gamma(\nu/2)\nu^{p/2}\pi^{p/2}|\Sigma|^{1/2}}
\exp\left[1 + \frac{1}{\nu} (\mathbf{x} - \boldsymbol{\mu})^{\top}
\boldsymbol{\Sigma}^{-1}
(\mathbf{x} - \boldsymbol{\mu}) \right]^{-(\nu + p)/2},
where :math:`p` is the dimension of :math:`\mathbf{x}`,
:math:`\boldsymbol{\mu}` is the :math:`p`-dimensional location,
:math:`\boldsymbol{\Sigma}` the :math:`p \times p`-dimensional shape
matrix, and :math:`\nu` is the degrees of freedom.
.. versionadded:: 1.6.0
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.stats import multivariate_t
>>> x, y = np.mgrid[-1:3:.01, -2:1.5:.01]
>>> pos = np.dstack((x, y))
>>> rv = multivariate_t([1.0, -0.5], [[2.1, 0.3], [0.3, 1.5]], df=2)
>>> fig, ax = plt.subplots(1, 1)
>>> ax.set_aspect('equal')
>>> plt.contourf(x, y, rv.pdf(pos))
"""
def __init__(self, seed=None):
"""Initialize a multivariate t-distributed random variable.
Parameters
----------
seed : Random state.
"""
super().__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, mvt_docdict_params)
self._random_state = check_random_state(seed)
def __call__(self, loc=None, shape=1, df=1, allow_singular=False,
seed=None):
"""Create a frozen multivariate t-distribution.
See `multivariate_t_frozen` for parameters.
"""
if df == np.inf:
return multivariate_normal_frozen(mean=loc, cov=shape,
allow_singular=allow_singular,
seed=seed)
return multivariate_t_frozen(loc=loc, shape=shape, df=df,
allow_singular=allow_singular, seed=seed)
def pdf(self, x, loc=None, shape=1, df=1, allow_singular=False):
"""Multivariate t-distribution probability density function.
Parameters
----------
x : array_like
Points at which to evaluate the probability density function.
%(_mvt_doc_default_callparams)s
Returns
-------
pdf : Probability density function evaluated at `x`.
Examples
--------
>>> from scipy.stats import multivariate_t
>>> x = [0.4, 5]
>>> loc = [0, 1]
>>> shape = [[1, 0.1], [0.1, 1]]
>>> df = 7
>>> multivariate_t.pdf(x, loc, shape, df)
array([0.00075713])
"""
dim, loc, shape, df = self._process_parameters(loc, shape, df)
x = self._process_quantiles(x, dim)
shape_info = _PSD(shape, allow_singular=allow_singular)
logpdf = self._logpdf(x, loc, shape_info.U, shape_info.log_pdet, df,
dim, shape_info.rank)
return np.exp(logpdf)
def logpdf(self, x, loc=None, shape=1, df=1):
"""Log of the multivariate t-distribution probability density function.
Parameters
----------
x : array_like
Points at which to evaluate the log of the probability density
function.
%(_mvt_doc_default_callparams)s
Returns
-------
logpdf : Log of the probability density function evaluated at `x`.
Examples
--------
>>> from scipy.stats import multivariate_t
>>> x = [0.4, 5]
>>> loc = [0, 1]
>>> shape = [[1, 0.1], [0.1, 1]]
>>> df = 7
>>> multivariate_t.logpdf(x, loc, shape, df)
array([-7.1859802])
See Also
--------
pdf : Probability density function.
"""
dim, loc, shape, df = self._process_parameters(loc, shape, df)
x = self._process_quantiles(x, dim)
shape_info = _PSD(shape)
return self._logpdf(x, loc, shape_info.U, shape_info.log_pdet, df, dim,
shape_info.rank)
def _logpdf(self, x, loc, prec_U, log_pdet, df, dim, rank):
"""Utility method `pdf`, `logpdf` for parameters.
Parameters
----------
x : ndarray
Points at which to evaluate the log of the probability density
function.
loc : ndarray
Location of the distribution.
prec_U : ndarray
A decomposition such that `np.dot(prec_U, prec_U.T)` is the inverse
of the shape matrix.
log_pdet : float
Logarithm of the determinant of the shape matrix.
df : float
Degrees of freedom of the distribution.
dim : int
Dimension of the quantiles x.
rank : int
Rank of the shape matrix.
Notes
-----
As this function does no argument checking, it should not be called
directly; use 'logpdf' instead.
"""
if df == np.inf:
return multivariate_normal._logpdf(x, loc, prec_U, log_pdet, rank)
dev = x - loc
maha = np.square(np.dot(dev, prec_U)).sum(axis=-1)
t = 0.5 * (df + dim)
A = gammaln(t)
B = gammaln(0.5 * df)
C = dim/2. * np.log(df * np.pi)
D = 0.5 * log_pdet
E = -t * np.log(1 + (1./df) * maha)
return _squeeze_output(A - B - C - D + E)
def rvs(self, loc=None, shape=1, df=1, size=1, random_state=None):
"""Draw random samples from a multivariate t-distribution.
Parameters
----------
%(_mvt_doc_default_callparams)s
size : integer, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray or scalar
Random variates of size (`size`, `P`), where `P` is the
dimension of the random variable.
Examples
--------
>>> from scipy.stats import multivariate_t
>>> x = [0.4, 5]
>>> loc = [0, 1]
>>> shape = [[1, 0.1], [0.1, 1]]
>>> df = 7
>>> multivariate_t.rvs(loc, shape, df)
array([[0.93477495, 3.00408716]])
"""
# For implementation details, see equation (3):
#
# Hofert, "On Sampling from the Multivariatet Distribution", 2013
# http://rjournal.github.io/archive/2013-2/hofert.pdf
#
dim, loc, shape, df = self._process_parameters(loc, shape, df)
if random_state is not None:
rng = check_random_state(random_state)
else:
rng = self._random_state
if np.isinf(df):
x = np.ones(size)
else:
x = rng.chisquare(df, size=size) / df
z = rng.multivariate_normal(np.zeros(dim), shape, size=size)
samples = loc + z / np.sqrt(x)[:, None]
return _squeeze_output(samples)
def _process_quantiles(self, x, dim):
"""
Adjust quantiles array so that last axis labels the components of
each data point.
"""
x = np.asarray(x, dtype=float)
if x.ndim == 0:
x = x[np.newaxis]
elif x.ndim == 1:
if dim == 1:
x = x[:, np.newaxis]
else:
x = x[np.newaxis, :]
return x
def _process_parameters(self, loc, shape, df):
"""
Infer dimensionality from location array and shape matrix, handle
defaults, and ensure compatible dimensions.
"""
if loc is None and shape is None:
loc = np.asarray(0, dtype=float)
shape = np.asarray(1, dtype=float)
dim = 1
elif loc is None:
shape = np.asarray(shape, dtype=float)
if shape.ndim < 2:
dim = 1
else:
dim = shape.shape[0]
loc = np.zeros(dim)
elif shape is None:
loc = np.asarray(loc, dtype=float)
dim = loc.size
shape = np.eye(dim)
else:
shape = np.asarray(shape, dtype=float)
loc = np.asarray(loc, dtype=float)
dim = loc.size
if dim == 1:
loc.shape = (1,)
shape.shape = (1, 1)
if loc.ndim != 1 or loc.shape[0] != dim:
raise ValueError("Array 'loc' must be a vector of length %d." %
dim)
if shape.ndim == 0:
shape = shape * np.eye(dim)
elif shape.ndim == 1:
shape = np.diag(shape)
elif shape.ndim == 2 and shape.shape != (dim, dim):
rows, cols = shape.shape
if rows != cols:
msg = ("Array 'cov' must be square if it is two dimensional,"
" but cov.shape = %s." % str(shape.shape))
else:
msg = ("Dimension mismatch: array 'cov' is of shape %s,"
" but 'loc' is a vector of length %d.")
msg = msg % (str(shape.shape), len(loc))
raise ValueError(msg)
elif shape.ndim > 2:
raise ValueError("Array 'cov' must be at most two-dimensional,"
" but cov.ndim = %d" % shape.ndim)
# Process degrees of freedom.
if df is None:
df = 1
elif df <= 0:
raise ValueError("'df' must be greater than zero.")
elif np.isnan(df):
raise ValueError("'df' is 'nan' but must be greater than zero or 'np.inf'.")
return dim, loc, shape, df
class multivariate_t_frozen(multi_rv_frozen):
def __init__(self, loc=None, shape=1, df=1, allow_singular=False,
seed=None):
"""Create a frozen multivariate t distribution.
Parameters
----------
%(_mvt_doc_default_callparams)s
Examples
--------
>>> loc = np.zeros(3)
>>> shape = np.eye(3)
>>> df = 10
>>> dist = multivariate_t(loc, shape, df)
>>> dist.rvs()
array([[ 0.81412036, -1.53612361, 0.42199647]])
>>> dist.pdf([1, 1, 1])
array([0.01237803])
"""
self._dist = multivariate_t_gen(seed)
dim, loc, shape, df = self._dist._process_parameters(loc, shape, df)
self.dim, self.loc, self.shape, self.df = dim, loc, shape, df
self.shape_info = _PSD(shape, allow_singular=allow_singular)
def logpdf(self, x):
x = self._dist._process_quantiles(x, self.dim)
U = self.shape_info.U
log_pdet = self.shape_info.log_pdet
return self._dist._logpdf(x, self.loc, U, log_pdet, self.df, self.dim,
self.shape_info.rank)
def pdf(self, x):
return np.exp(self.logpdf(x))
def rvs(self, size=1, random_state=None):
return self._dist.rvs(loc=self.loc,
shape=self.shape,
df=self.df,
size=size,
random_state=random_state)
multivariate_t = multivariate_t_gen()
# Set frozen generator docstrings from corresponding docstrings in
# matrix_normal_gen and fill in default strings in class docstrings
for name in ['logpdf', 'pdf', 'rvs']:
method = multivariate_t_gen.__dict__[name]
method_frozen = multivariate_t_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(method.__doc__,
mvt_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__, mvt_docdict_params)
_mhg_doc_default_callparams = """\
m : array_like
The number of each type of object in the population.
That is, :math:`m[i]` is the number of objects of
type :math:`i`.
n : array_like
The number of samples taken from the population.
"""
_mhg_doc_callparams_note = """\
`m` must be an array of positive integers. If the quantile
:math:`i` contains values out of the range :math:`[0, m_i]`
where :math:`m_i` is the number of objects of type :math:`i`
in the population or if the parameters are inconsistent with one
another (e.g. ``x.sum() != n``), methods return the appropriate
value (e.g. ``0`` for ``pmf``). If `m` or `n` contain negative
values, the result will contain ``nan`` there.
"""
_mhg_doc_frozen_callparams = ""
_mhg_doc_frozen_callparams_note = \
"""See class definition for a detailed description of parameters."""
mhg_docdict_params = {
'_doc_default_callparams': _mhg_doc_default_callparams,
'_doc_callparams_note': _mhg_doc_callparams_note,
'_doc_random_state': _doc_random_state
}
mhg_docdict_noparams = {
'_doc_default_callparams': _mhg_doc_frozen_callparams,
'_doc_callparams_note': _mhg_doc_frozen_callparams_note,
'_doc_random_state': _doc_random_state
}
class multivariate_hypergeom_gen(multi_rv_generic):
r"""A multivariate hypergeometric random variable.
Methods
-------
``pmf(x, m, n)``
Probability mass function.
``logpmf(x, m, n)``
Log of the probability mass function.
``rvs(m, n, size=1, random_state=None)``
Draw random samples from a multivariate hypergeometric
distribution.
``mean(m, n)``
Mean of the multivariate hypergeometric distribution.
``var(m, n)``
Variance of the multivariate hypergeometric distribution.
``cov(m, n)``
Compute the covariance matrix of the multivariate
hypergeometric distribution.
Parameters
----------
%(_doc_default_callparams)s
%(_doc_random_state)s
Notes
-----
%(_doc_callparams_note)s
The probability mass function for `multivariate_hypergeom` is
.. math::
P(X_1 = x_1, X_2 = x_2, \ldots, X_k = x_k) = \frac{\binom{m_1}{x_1}
\binom{m_2}{x_2} \cdots \binom{m_k}{x_k}}{\binom{M}{n}}, \\ \quad
(x_1, x_2, \ldots, x_k) \in \mathbb{N}^k \text{ with }
\sum_{i=1}^k x_i = n
where :math:`m_i` are the number of objects of type :math:`i`, :math:`M`
is the total number of objects in the population (sum of all the
:math:`m_i`), and :math:`n` is the size of the sample to be taken
from the population.
.. versionadded:: 1.6.0
Examples
--------
To evaluate the probability mass function of the multivariate
hypergeometric distribution, with a dichotomous population of size
:math:`10` and :math:`20`, at a sample of size :math:`12` with
:math:`8` objects of the first type and :math:`4` objects of the
second type, use:
>>> from scipy.stats import multivariate_hypergeom
>>> multivariate_hypergeom.pmf(x=[8, 4], m=[10, 20], n=12)
0.0025207176631464523
The `multivariate_hypergeom` distribution is identical to the
corresponding `hypergeom` distribution (tiny numerical differences
notwithstanding) when only two types (good and bad) of objects
are present in the population as in the example above. Consider
another example for a comparison with the hypergeometric distribution:
>>> from scipy.stats import hypergeom
>>> multivariate_hypergeom.pmf(x=[3, 1], m=[10, 5], n=4)
0.4395604395604395
>>> hypergeom.pmf(k=3, M=15, n=4, N=10)
0.43956043956044005
The functions ``pmf``, ``logpmf``, ``mean``, ``var``, ``cov``, and ``rvs``
support broadcasting, under the convention that the vector parameters
(``x``, ``m``, and ``n``) are interpreted as if each row along the last
axis is a single object. For instance, we can combine the previous two
calls to `multivariate_hypergeom` as
>>> multivariate_hypergeom.pmf(x=[[8, 4], [3, 1]], m=[[10, 20], [10, 5]],
... n=[12, 4])
array([0.00252072, 0.43956044])
This broadcasting also works for ``cov``, where the output objects are
square matrices of size ``m.shape[-1]``. For example:
>>> multivariate_hypergeom.cov(m=[[7, 9], [10, 15]], n=[8, 12])
array([[[ 1.05, -1.05],
[-1.05, 1.05]],
[[ 1.56, -1.56],
[-1.56, 1.56]]])
That is, ``result[0]`` is equal to
``multivariate_hypergeom.cov(m=[7, 9], n=8)`` and ``result[1]`` is equal
to ``multivariate_hypergeom.cov(m=[10, 15], n=12)``.
Alternatively, the object may be called (as a function) to fix the `m`
and `n` parameters, returning a "frozen" multivariate hypergeometric
random variable.
>>> rv = multivariate_hypergeom(m=[10, 20], n=12)
>>> rv.pmf(x=[8, 4])
0.0025207176631464523
See Also
--------
scipy.stats.hypergeom : The hypergeometric distribution.
scipy.stats.multinomial : The multinomial distribution.
References
----------
.. [1] The Multivariate Hypergeometric Distribution,
http://www.randomservices.org/random/urn/MultiHypergeometric.html
.. [2] Thomas J. Sargent and John Stachurski, 2020,
Multivariate Hypergeometric Distribution
https://python.quantecon.org/_downloads/pdf/multi_hyper.pdf
"""
def __init__(self, seed=None):
super().__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, mhg_docdict_params)
def __call__(self, m, n, seed=None):
"""Create a frozen multivariate_hypergeom distribution.
See `multivariate_hypergeom_frozen` for more information.
"""
return multivariate_hypergeom_frozen(m, n, seed=seed)
def _process_parameters(self, m, n):
m = np.asarray(m)
n = np.asarray(n)
if m.size == 0:
m = m.astype(int)
if n.size == 0:
n = n.astype(int)
if not np.issubdtype(m.dtype, np.integer):
raise TypeError("'m' must an array of integers.")
if not np.issubdtype(n.dtype, np.integer):
raise TypeError("'n' must an array of integers.")
if m.ndim == 0:
raise ValueError("'m' must be an array with"
" at least one dimension.")
# check for empty arrays
if m.size != 0:
n = n[..., np.newaxis]
m, n = np.broadcast_arrays(m, n)
# check for empty arrays
if m.size != 0:
n = n[..., 0]
mcond = m < 0
M = m.sum(axis=-1)
ncond = (n < 0) | (n > M)
return M, m, n, mcond, ncond, np.any(mcond, axis=-1) | ncond
def _process_quantiles(self, x, M, m, n):
x = np.asarray(x)
if not np.issubdtype(x.dtype, np.integer):
raise TypeError("'x' must an array of integers.")
if x.ndim == 0:
raise ValueError("'x' must be an array with"
" at least one dimension.")
if not x.shape[-1] == m.shape[-1]:
raise ValueError(f"Size of each quantile must be size of 'm': "
f"received {x.shape[-1]}, "
f"but expected {m.shape[-1]}.")
# check for empty arrays
if m.size != 0:
n = n[..., np.newaxis]
M = M[..., np.newaxis]
x, m, n, M = np.broadcast_arrays(x, m, n, M)
# check for empty arrays
if m.size != 0:
n, M = n[..., 0], M[..., 0]
xcond = (x < 0) | (x > m)
return (x, M, m, n, xcond,
np.any(xcond, axis=-1) | (x.sum(axis=-1) != n))
def _checkresult(self, result, cond, bad_value):
result = np.asarray(result)
if cond.ndim != 0:
result[cond] = bad_value
elif cond:
return bad_value
if result.ndim == 0:
return result[()]
return result
def _logpmf(self, x, M, m, n, mxcond, ncond):
# This equation of the pmf comes from the relation,
# n combine r = beta(n+1, 1) / beta(r+1, n-r+1)
num = np.zeros_like(m, dtype=np.float_)
den = np.zeros_like(n, dtype=np.float_)
m, x = m[~mxcond], x[~mxcond]
M, n = M[~ncond], n[~ncond]
num[~mxcond] = (betaln(m+1, 1) - betaln(x+1, m-x+1))
den[~ncond] = (betaln(M+1, 1) - betaln(n+1, M-n+1))
num[mxcond] = np.nan
den[ncond] = np.nan
num = num.sum(axis=-1)
return num - den
def logpmf(self, x, m, n):
"""Log of the multivariate hypergeometric probability mass function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_doc_default_callparams)s
Returns
-------
logpmf : ndarray or scalar
Log of the probability mass function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
M, m, n, mcond, ncond, mncond = self._process_parameters(m, n)
(x, M, m, n, xcond,
xcond_reduced) = self._process_quantiles(x, M, m, n)
mxcond = mcond | xcond
ncond = ncond | np.zeros(n.shape, dtype=np.bool_)
result = self._logpmf(x, M, m, n, mxcond, ncond)
# replace values for which x was out of the domain; broadcast
# xcond to the right shape
xcond_ = xcond_reduced | np.zeros(mncond.shape, dtype=np.bool_)
result = self._checkresult(result, xcond_, np.NINF)
# replace values bad for n or m; broadcast
# mncond to the right shape
mncond_ = mncond | np.zeros(xcond_reduced.shape, dtype=np.bool_)
return self._checkresult(result, mncond_, np.nan)
def pmf(self, x, m, n):
"""Multivariate hypergeometric probability mass function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_doc_default_callparams)s
Returns
-------
pmf : ndarray or scalar
Probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
out = np.exp(self.logpmf(x, m, n))
return out
def mean(self, m, n):
"""Mean of the multivariate hypergeometric distribution.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
mean : array_like or scalar
The mean of the distribution
"""
M, m, n, _, _, mncond = self._process_parameters(m, n)
# check for empty arrays
if m.size != 0:
M, n = M[..., np.newaxis], n[..., np.newaxis]
cond = (M == 0)
M = np.ma.masked_array(M, mask=cond)
mu = n*(m/M)
if m.size != 0:
mncond = (mncond[..., np.newaxis] |
np.zeros(mu.shape, dtype=np.bool_))
return self._checkresult(mu, mncond, np.nan)
def var(self, m, n):
"""Variance of the multivariate hypergeometric distribution.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
array_like
The variances of the components of the distribution. This is
the diagonal of the covariance matrix of the distribution
"""
M, m, n, _, _, mncond = self._process_parameters(m, n)
# check for empty arrays
if m.size != 0:
M, n = M[..., np.newaxis], n[..., np.newaxis]
cond = (M == 0) & (M-1 == 0)
M = np.ma.masked_array(M, mask=cond)
output = n * m/M * (M-m)/M * (M-n)/(M-1)
if m.size != 0:
mncond = (mncond[..., np.newaxis] |
np.zeros(output.shape, dtype=np.bool_))
return self._checkresult(output, mncond, np.nan)
def cov(self, m, n):
"""Covariance matrix of the multivariate hypergeometric distribution.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
cov : array_like
The covariance matrix of the distribution
"""
# see [1]_ for the formula and [2]_ for implementation
# cov( x_i,x_j ) = -n * (M-n)/(M-1) * (K_i*K_j) / (M**2)
M, m, n, _, _, mncond = self._process_parameters(m, n)
# check for empty arrays
if m.size != 0:
M = M[..., np.newaxis, np.newaxis]
n = n[..., np.newaxis, np.newaxis]
cond = (M == 0) & (M-1 == 0)
M = np.ma.masked_array(M, mask=cond)
output = (-n * (M-n)/(M-1) *
np.einsum("...i,...j->...ij", m, m) / (M**2))
# check for empty arrays
if m.size != 0:
M, n = M[..., 0, 0], n[..., 0, 0]
cond = cond[..., 0, 0]
dim = m.shape[-1]
# diagonal entries need to be computed differently
for i in range(dim):
output[..., i, i] = (n * (M-n) * m[..., i]*(M-m[..., i]))
output[..., i, i] = output[..., i, i] / (M-1)
output[..., i, i] = output[..., i, i] / (M**2)
if m.size != 0:
mncond = (mncond[..., np.newaxis, np.newaxis] |
np.zeros(output.shape, dtype=np.bool_))
return self._checkresult(output, mncond, np.nan)
def rvs(self, m, n, size=None, random_state=None):
"""Draw random samples from a multivariate hypergeometric distribution.
Parameters
----------
%(_doc_default_callparams)s
size : integer or iterable of integers, optional
Number of samples to draw. Default is ``None``, in which case a
single variate is returned as an array with shape ``m.shape``.
%(_doc_random_state)s
Returns
-------
rvs : array_like
Random variates of shape ``size`` or ``m.shape``
(if ``size=None``).
Notes
-----
%(_doc_callparams_note)s
Also note that NumPy's `multivariate_hypergeometric` sampler is not
used as it doesn't support broadcasting.
"""
M, m, n, _, _, _ = self._process_parameters(m, n)
random_state = self._get_random_state(random_state)
if size is not None and isinstance(size, int):
size = (size, )
if size is None:
rvs = np.empty(m.shape, dtype=m.dtype)
else:
rvs = np.empty(size + (m.shape[-1], ), dtype=m.dtype)
rem = M
# This sampler has been taken from numpy gh-13794
# https://github.com/numpy/numpy/pull/13794
for c in range(m.shape[-1] - 1):
rem = rem - m[..., c]
rvs[..., c] = ((n != 0) *
random_state.hypergeometric(m[..., c], rem,
n + (n == 0),
size=size))
n = n - rvs[..., c]
rvs[..., m.shape[-1] - 1] = n
return rvs
multivariate_hypergeom = multivariate_hypergeom_gen()
class multivariate_hypergeom_frozen(multi_rv_frozen):
def __init__(self, m, n, seed=None):
self._dist = multivariate_hypergeom_gen(seed)
(self.M, self.m, self.n,
self.mcond, self.ncond,
self.mncond) = self._dist._process_parameters(m, n)
# monkey patch self._dist
def _process_parameters(m, n):
return (self.M, self.m, self.n,
self.mcond, self.ncond,
self.mncond)
self._dist._process_parameters = _process_parameters
def logpmf(self, x):
return self._dist.logpmf(x, self.m, self.n)
def pmf(self, x):
return self._dist.pmf(x, self.m, self.n)
def mean(self):
return self._dist.mean(self.m, self.n)
def var(self):
return self._dist.var(self.m, self.n)
def cov(self):
return self._dist.cov(self.m, self.n)
def rvs(self, size=1, random_state=None):
return self._dist.rvs(self.m, self.n,
size=size,
random_state=random_state)
# Set frozen generator docstrings from corresponding docstrings in
# multivariate_hypergeom and fill in default strings in class docstrings
for name in ['logpmf', 'pmf', 'mean', 'var', 'cov', 'rvs']:
method = multivariate_hypergeom_gen.__dict__[name]
method_frozen = multivariate_hypergeom_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(
method.__doc__, mhg_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__,
mhg_docdict_params)
| bsd-3-clause |
compston/TAP-Workshop | utilities/Gnip-Python-Search-API-Utilities/gnip_time_series.py | 1 | 28752 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
#######################################################
# This script wraps simple timeseries analysis tools
# and access to the Gnip Search API into a simple tool
# to help the analysis quickly iterate on filters
# a and understand time series trend and events.
#
# If you find this useful or find a bug you don't want
# to fix for yourself, please let me know at @drskippy
#######################################################
__author__="Scott Hendrickson"
import ConfigParser
import argparse
import calendar
import codecs
import csv
import datetime
import json
import logging
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import re
import statsmodels.api as sm
import string
import sys
import time
from functools import partial
from operator import itemgetter
from scipy import signal
from search.results import *
reload(sys)
sys.stdout = codecs.getwriter('utf-8')(sys.stdout)
sys.stdin = codecs.getreader('utf-8')(sys.stdin)
# basic defaults
FROM_PICKLE = False
DEFAULT_CONFIG_FILENAME = os.path.join(".",".gnip")
DATE_FMT = "%Y%m%d%H%M"
DATE_FMT2 = "%Y-%m-%dT%H:%M:%S"
LOG_FILE_PATH = os.path.join(".","time_series.log")
# set up simple logging
logging.basicConfig(filename=LOG_FILE_PATH,level=logging.DEBUG)
logging.info("#"*70)
logging.info("################# started {} #################".format(datetime.datetime.now()))
# tunable defaults
CHAR_UPPER_CUTOFF = 20 # don't include tokens longer than CHAR_UPPER_CUTOFF
TWEET_SAMPLE = 4000 # tweets to collect for peak topics
MIN_SNR = 2.0 # signal to noise threshold for peak detection
MAX_N_PEAKS = 7 # maximum number of peaks to output
MAX_PEAK_WIDTH = 20 # max peak width in periods
MIN_PEAK_WIDTH = 1 # min peak width in periods
SEARCH_PEAK_WIDTH = 3 # min peak width in periods
N_MOVING = 4 # average over buckets
OUTLIER_FRAC = 0.8 # cut off values over 80% above or below the average
PLOTS_PREFIX = os.path.join(".","plots")
PLOT_DELTA_Y = 1.2 # spacing of y values in dotplot
logging.debug("CHAR_UPPER_CUTOFF={},TWEET_SAMPLE={},MIN_SNR={},MAX_N_PEAKS={},MAX_PEAK_WIDTH={},MIN_PEAK_WIDTH={},SEARCH_PEAK_WIDTH={},N_MOVING={},OUTLIER_FRAC={},PLOTS_PREFIX={},PLOT_DELTA_Y={}".format(
CHAR_UPPER_CUTOFF
, TWEET_SAMPLE
, MIN_SNR
, MAX_N_PEAKS
, MAX_PEAK_WIDTH
, MIN_PEAK_WIDTH
, SEARCH_PEAK_WIDTH
, N_MOVING
, OUTLIER_FRAC
, PLOTS_PREFIX
, PLOT_DELTA_Y ))
class TimeSeries():
"""Containter class for data collected from the API and associated analysis outputs"""
pass
class GnipSearchTimeseries():
def __init__(self, token_list_size=40):
"""Retrieve and analysis timesseries and associated interesting trends, spikes and tweet content."""
# default tokenizer and character limit
char_upper_cutoff = CHAR_UPPER_CUTOFF
self.token_list_size = int(token_list_size)
#############################################
# CONFIG FILE/COMMAND LINE OPTIONS PATTERN
# parse config file
config_from_file = self.config_file()
# set required fields to None. Sequence of setting is:
# (1) config file
# (2) command line
# if still none, then fail
self.user = None
self.password = None
self.stream_url = None
if config_from_file is not None:
try:
# command line options take presidence if they exist
self.user = config_from_file.get('creds', 'un')
self.password = config_from_file.get('creds', 'pwd')
self.stream_url = config_from_file.get('endpoint', 'url')
except (ConfigParser.NoOptionError,
ConfigParser.NoSectionError) as e:
logging.warn("Error reading configuration file ({}), ignoring configuration file.".format(e))
# parse the command line options
self.options = self.args().parse_args()
self.options.filter = self.options.filter.decode("utf-8")
self.options.second_filter = self.options.second_filter.decode("utf-8")
# set up the job
# over ride config file with command line args if present
if self.options.user is not None:
self.user = self.options.user
if self.options.password is not None:
self.password = self.options.password
if self.options.stream_url is not None:
self.stream_url = self.options.stream_url
# search v2 uses a different url
if "data-api.twitter.com" not in self.stream_url:
logging.error("gnipSearch timeline tools require Search V2. Exiting.")
sys.stderr.write("gnipSearch timeline tools require Search V2. Exiting.\n")
sys.exit(-1)
# set some options that should not be changed for this anaysis
self.options.paged = True
self.options.search_v2 = True
self.options.max = 500
self.options.query = False
# check paths
if self.options.output_file_path is not None:
if not os.path.exists(self.options.output_file_path):
logging.error("Path {} doesn't exist. Please create it and try again. Exiting.".format(
self.options.output_file_path))
sys.stderr.write("Path {} doesn't exist. Please create it and try again. Exiting.\n".format(
self.options.output_file_path))
sys.exit(-1)
if not os.path.exists(PLOTS_PREFIX):
logging.error("Path {} doesn't exist. Please create it and try again. Exiting.".format(
PLOTS_PREFIX))
sys.stderr.write("Path {} doesn't exist. Please create it and try again. Exiting.\n".format(
PLOTS_PREFIX))
sys.exit(-1)
# log the attributes of this class including all of the options
for v in dir(self):
# except don't log the password!
if not v.startswith('__') and not callable(getattr(self,v)) and not v.lower().startswith('password'):
tmp = str(getattr(self,v))
tmp = re.sub("password=.*,", "password=XXXXXXX,", tmp)
logging.debug(" {}={}".format(v, tmp))
def config_file(self):
"""Search for a valid config file in the standard locations."""
config = ConfigParser.ConfigParser()
# (1) default file name precidence
config.read(DEFAULT_CONFIG_FILENAME)
logging.info("attempting to read config file {}".format(DEFAULT_CONFIG_FILENAME))
if not config.has_section("creds"):
# (2) environment variable file name second
if 'GNIP_CONFIG_FILE' in os.environ:
config_filename = os.environ['GNIP_CONFIG_FILE']
logging.info("attempting to read config file {}".format(config_filename))
config.read(config_filename)
if config.has_section("creds") and config.has_section("endpoint"):
return config
else:
logging.warn("no creds or endpoint section found in config file, attempting to proceed without config info from file")
return None
def args(self):
"Set up the command line argments and the associated help strings."""
twitter_parser = argparse.ArgumentParser(
description="GnipSearch timeline tools")
twitter_parser.add_argument("-b", "--bucket", dest="count_bucket",
default="day",
help="Bucket size for counts query. Options are day, hour, minute (default is 'day').")
twitter_parser.add_argument("-e", "--end-date", dest="end",
default=None,
help="End of datetime window, format 'YYYY-mm-DDTHH:MM' (default: most recent activities)")
twitter_parser.add_argument("-f", "--filter", dest="filter",
default="from:jrmontag OR from:gnip",
help="PowerTrack filter rule (See: http://support.gnip.com/customer/portal/articles/901152-powertrack-operators)")
twitter_parser.add_argument("-g", "--second_filter", dest="second_filter",
default=None,
help="Use a second filter to show correlation plots of -f timeline vs -g timeline.")
twitter_parser.add_argument("-l", "--stream-url", dest="stream_url",
default=None,
help="Url of search endpoint. (See your Gnip console.)")
twitter_parser.add_argument("-p", "--password", dest="password", default=None,
help="Password")
twitter_parser.add_argument("-s", "--start-date", dest="start",
default=None,
help="Start of datetime window, format 'YYYY-mm-DDTHH:MM' (default: 30 days ago)")
twitter_parser.add_argument("-u", "--user-name", dest="user",
default=None,
help="User name")
twitter_parser.add_argument("-t", "--get-topics", dest="get_topics", action="store_true",
default=False,
help="Set flag to evaluate peak topics (this may take a few minutes)")
twitter_parser.add_argument("-w", "--output-file-path", dest="output_file_path",
default=None,
help="Create files in ./OUTPUT-FILE-PATH. This path must exists and will not be created. This options is available only with -a option. Default is no output files.")
return twitter_parser
def get_results(self):
"""Execute API calls to the timeseries data and tweet data we need for analysis. Perform analysis
as we go because we often need results for next steps."""
######################
# (1) Get the timeline
######################
logging.info("retrieving timeline counts")
results_timeseries = Results( self.user
, self.password
, self.stream_url
, self.options.paged
, self.options.output_file_path
, pt_filter=self.options.filter
, max_results=int(self.options.max)
, start=self.options.start
, end=self.options.end
, count_bucket=self.options.count_bucket
, show_query=self.options.query
, search_v2=self.options.search_v2
)
# sort by date
res_timeseries = sorted(results_timeseries.get_time_series(), key = itemgetter(0))
# calculate total time interval span
time_min_date = min(res_timeseries, key = itemgetter(2))[2]
time_max_date = max(res_timeseries, key = itemgetter(2))[2]
time_min = float(calendar.timegm(time_min_date.timetuple()))
time_max = float(calendar.timegm(time_max_date.timetuple()))
time_span = time_max - time_min
logging.debug("time_min = {}, time_max = {}, time_span = {}".format(time_min, time_max, time_span))
# create a simple object to hold our data
ts = TimeSeries()
ts.dates = []
ts.x = []
ts.counts = []
# load and format data
for i in res_timeseries:
ts.dates.append(i[2])
ts.counts.append(float(i[1]))
# create a independent variable in interval [0.0,1.0]
ts.x.append((calendar.timegm(datetime.datetime.strptime(i[0], DATE_FMT).timetuple()) - time_min)/time_span)
logging.info("read {} time items from search API".format(len(ts.dates)))
if len(ts.dates) < 35:
logging.warn("peak detection with with fewer than ~35 points is unreliable!")
logging.debug('dates: ' + ','.join(map(str, ts.dates[:10])) + "...")
logging.debug('counts: ' + ','.join(map(str, ts.counts[:10])) + "...")
logging.debug('indep var: ' + ','.join(map(str, ts.x[:10])) + "...")
######################
# (1.1) Get a second timeline?
######################
if self.options.second_filter is not None:
logging.info("retrieving second timeline counts")
results_timeseries = Results( self.user
, self.password
, self.stream_url
, self.options.paged
, self.options.output_file_path
, pt_filter=self.options.second_filter
, max_results=int(self.options.max)
, start=self.options.start
, end=self.options.end
, count_bucket=self.options.count_bucket
, show_query=self.options.query
, search_v2=self.options.search_v2
)
# sort by date
second_res_timeseries = sorted(results_timeseries.get_time_series(), key = itemgetter(0))
if len(second_res_timeseries) != len(res_timeseries):
logging.error("time series of different sizes not allowed")
else:
ts.second_counts = []
# load and format data
for i in second_res_timeseries:
ts.second_counts.append(float(i[1]))
logging.info("read {} time items from search API".format(len(ts.second_counts)))
logging.debug('second counts: ' + ','.join(map(str, ts.second_counts[:10])) + "...")
######################
# (2) Detrend and remove prominent period
######################
logging.info("detrending timeline counts")
no_trend = signal.detrend(np.array(ts.counts))
# determine period of data
df = (ts.dates[1] - ts.dates[0]).total_seconds()
if df == 86400:
# day counts, average over week
n_buckets = 7
n_avgs = {i:[] for i in range(n_buckets)}
for t,c in zip(ts.dates, no_trend):
n_avgs[t.weekday()].append(c)
elif df == 3600:
# hour counts, average over day
n_buckets = 24
n_avgs = {i:[] for i in range(n_buckets)}
for t,c in zip(ts.dates, no_trend):
n_avgs[t.hour].append(c)
elif df == 60:
# minute counts; average over day
n_buckets = 24*60
n_avgs = {i:[] for i in range(n_buckets)}
for t,c in zip(ts.dates, no_trend):
n_avgs[t.minute].append(c)
else:
sys.stderr.write("Weird interval problem! Exiting.\n")
logging.error("Weird interval problem! Exiting.\n")
sys.exit()
logging.info("averaging over periods of {} buckets".format(n_buckets))
# remove upper outliers from averages
df_avg_all = {i:np.average(n_avgs[i]) for i in range(n_buckets)}
logging.debug("bucket averages: {}".format(','.join(map(str, [df_avg_all[i] for i in df_avg_all]))))
n_avgs_remove_outliers = {i: [j for j in n_avgs[i]
if abs(j - df_avg_all[i])/df_avg_all[i] < (1. + OUTLIER_FRAC) ]
for i in range(n_buckets)}
df_avg = {i:np.average(n_avgs_remove_outliers[i]) for i in range(n_buckets)}
logging.debug("bucket averages w/o outliers: {}".format(','.join(map(str, [df_avg[i] for i in df_avg]))))
# flatten cycle
ts.counts_no_cycle_trend = np.array([no_trend[i] - df_avg[ts.dates[i].hour] for i in range(len(ts.counts))])
logging.debug('no trend: ' + ','.join(map(str, ts.counts_no_cycle_trend[:10])) + "...")
######################
# (3) Moving average
######################
ts.moving = np.convolve(ts.counts, np.ones((N_MOVING,))/N_MOVING, mode='valid')
logging.debug('moving ({}): '.format(N_MOVING) + ','.join(map(str, ts.moving[:10])) + "...")
######################
# (4) Peak detection
######################
peakind = signal.find_peaks_cwt(ts.counts_no_cycle_trend, np.arange(MIN_PEAK_WIDTH, MAX_PEAK_WIDTH), min_snr = MIN_SNR)
n_peaks = min(MAX_N_PEAKS, len(peakind))
logging.debug('peaks ({}): '.format(n_peaks) + ','.join(map(str, peakind)))
logging.debug('peaks ({}): '.format(n_peaks) + ','.join(map(str, [ts.dates[i] for i in peakind])))
# top peaks determined by peak volume, better way?
# peak detector algorithm:
# * middle of peak (of unknown width)
# * finds peaks up to MAX_PEAK_WIDTH wide
#
# algorithm for geting peak start, peak and end parameters:
# find max, find fwhm,
# find start, step past peak, keep track of volume and peak height,
# stop at end of period or when timeseries turns upward
peaks = []
for i in peakind:
# find the first max in the possible window
i_start = max(0, i - SEARCH_PEAK_WIDTH)
i_finish = min(len(ts.counts) - 1, i + SEARCH_PEAK_WIDTH)
p_max = max(ts.counts[i_start:i_finish])
h_max = p_max/2.
# i_max not center
i_max = i_start + ts.counts[i_start:i_finish].index(p_max)
i_start, i_finish = i_max, i_max
# start at peak, and go back and forward to find start and end
while i_start >= 1:
if (ts.counts[i_start - 1] <= h_max or
ts.counts[i_start - 1] >= ts.counts[i_start] or
i_start - 1 <= 0):
break
i_start -= 1
while i_finish < len(ts.counts) - 1:
if (ts.counts[i_finish + 1] <= h_max or
ts.counts[i_finish + 1] >= ts.counts[i_finish] or
i_finish + 1 >= len(ts.counts)):
break
i_finish += 1
# i is center of peak so balance window
delta_i = max(1, i - i_start)
if i_finish - i > delta_i:
delta_i = i_finish - i
# final est of start and finish
i_finish = min(len(ts.counts) - 1, i + delta_i)
i_start = max(0, i - delta_i)
p_volume = sum(ts.counts[i_start:i_finish])
peaks.append([ i , p_volume , (i, i_start, i_max, i_finish
, h_max , p_max, p_volume
, ts.dates[i_start], ts.dates[i_max], ts.dates[i_finish])])
# top n_peaks by volume
top_peaks = sorted(peaks, key = itemgetter(1))[-n_peaks:]
# re-sort peaks by date
ts.top_peaks = sorted(top_peaks, key = itemgetter(0))
logging.debug('top peaks ({}): '.format(len(ts.top_peaks)) + ','.join(map(str, ts.top_peaks[:4])) + "...")
######################
# (5) high/low frequency
######################
ts.cycle, ts.trend = sm.tsa.filters.hpfilter(np.array(ts.counts))
logging.debug('cycle: ' + ','.join(map(str, ts.cycle[:10])) + "...")
logging.debug('trend: ' + ','.join(map(str, ts.trend[:10])) + "...")
######################
# (6) n-grams for top peaks
######################
ts.topics = []
if self.options.get_topics:
logging.info("retrieving tweets for peak topics")
for a in ts.top_peaks:
# start at peak
ds = datetime.datetime.strftime(a[2][8], DATE_FMT2)
# estimate how long to get TWEET_SAMPLE tweets
# a[1][5] is max tweets per period
if a[2][5] > 0:
est_periods = float(TWEET_SAMPLE)/a[2][5]
else:
logging.warn("peak with zero max tweets ({}), setting est_periods to 1".format(a))
est_periods = 1
# df comes from above, in seconds
# time resolution is hours
est_time = max(int(est_periods * df), 60)
logging.debug("est_periods={}, est_time={}".format(est_periods, est_time))
#
if a[2][8] + datetime.timedelta(seconds=est_time) < a[2][9]:
de = datetime.datetime.strftime(a[2][8] + datetime.timedelta(seconds=est_time), DATE_FMT2)
elif a[2][8] < a[2][9]:
de = datetime.datetime.strftime(a[2][9], DATE_FMT2)
else:
de = datetime.datetime.strftime(a[2][8] + datetime.timedelta(seconds=60), DATE_FMT2)
logging.info("retreive data for peak index={} in date range [{},{}]".format(a[0], ds, de))
res = Results(
self.user
, self.password
, self.stream_url
, self.options.paged
, self.options.output_file_path
, pt_filter=self.options.filter
, max_results=int(self.options.max)
, start=ds
, end=de
, count_bucket=None
, show_query=self.options.query
, search_v2=self.options.search_v2
, hard_max = TWEET_SAMPLE
)
logging.info("retrieved {} records".format(len(res)))
n_grams_counts = list(res.get_top_grams(n=self.token_list_size))
ts.topics.append(n_grams_counts)
logging.debug('n_grams for peak index={}: '.format(a[0]) + ','.join(
map(str, [i[4].encode("utf-8","ignore") for i in n_grams_counts][:10])) + "...")
return ts
def dotplot(self, x, labels, path = "dotplot.png"):
"""Makeshift dotplots in matplotlib. This is not completely general and encodes labels and
parameter selections that are particular to n-gram dotplots."""
logging.info("dotplot called, writing image to path={}".format(path))
if len(x) <= 1 or len(labels) <= 1:
raise ValueError("cannot make a dot plot with only 1 point")
# split n_gram_counts into 2 data sets
n = len(labels)/2
x1, x2 = x[:n], x[n:]
labels1, labels2 = labels[:n], labels[n:]
# create enough equally spaced y values for the horizontal lines
ys = [r*PLOT_DELTA_Y for r in range(1,len(labels2)+1)]
# give ourselves a little extra room on the plot
maxx = max(x)*1.05
maxy = max(ys)*1.05
# set up plots to be a factor taller than the default size
# make factor proportional to the number of n-grams plotted
size = plt.gcf().get_size_inches()
# factor of n/10 is empirical
scale_denom = 10
fig, (ax1, ax2) = plt.subplots(nrows=2, ncols=1,figsize=(size[0], size[1]*n/scale_denom))
logging.debug("plotting top {} terms".format(n))
logging.debug("plot size=({},{})".format(size[0], size[1]*n/scale_denom))
# first plot 1-grams
ax1.set_xlim(0,maxx)
ax1.set_ylim(0,maxy)
ticks = ax1.yaxis.set_ticks(ys)
text = ax1.yaxis.set_ticklabels(labels1)
for ct, item in enumerate(labels1):
ax1.hlines(ys[ct], 0, maxx, linestyle='dashed', color='0.9')
ax1.plot(x1, ys, 'ko')
ax1.set_title("1-grams")
# second plot 2-grams
ax2.set_xlim(0,maxx)
ax2.set_ylim(0,maxy)
ticks = ax2.yaxis.set_ticks(ys)
text = ax2.yaxis.set_ticklabels(labels2)
for ct, item in enumerate(labels2):
ax2.hlines(ys[ct], 0, maxx, linestyle='dashed', color='0.9')
ax2.plot(x2, ys, 'ko')
ax2.set_title("2-grams")
ax2.set_xlabel("Fraction of Mentions")
#
plt.tight_layout()
plt.savefig(path)
plt.close("all")
def plots(self, ts, out_type="png"):
"""Basic choice for plotting analysis. If you wish to extend this class, over-
write this method."""
# creat a valid file name, in this case and additional requirement is no spaces
valid_chars = "-_.() %s%s" % (string.ascii_letters, string.digits)
filter_prefix_name = ''.join(c for c in self.options.filter if c in valid_chars)
filter_prefix_name = filter_prefix_name.replace(" ", "_")
if len(filter_prefix_name) > 16:
filter_prefix_name = filter_prefix_name[:16]
if self.options.second_filter is not None:
second_filter_prefix_name = ''.join(c for c in self.options.second_filter if c in valid_chars)
second_filter_prefix_name = second_filter_prefix_name.replace(" ", "_")
if len(second_filter_prefix_name) > 16:
second_filter_prefix_name = second_filter_prefix_name[:16]
######################
# timeline
######################
df0 = pd.Series(ts.counts, index=ts.dates)
df0.plot()
plt.ylabel("Counts")
plt.title(filter_prefix_name)
plt.tight_layout()
plt.savefig(os.path.join(PLOTS_PREFIX, '{}_{}.{}'.format(filter_prefix_name, "time_line", out_type)))
plt.close("all")
######################
# cycle and trend
######################
df1 = pd.DataFrame({"cycle":ts.cycle, "trend":ts.trend}, index=ts.dates)
df1.plot()
plt.ylabel("Counts")
plt.title(filter_prefix_name)
plt.tight_layout()
plt.savefig(os.path.join(PLOTS_PREFIX, '{}_{}.{}'.format(filter_prefix_name, "cycle_trend_line", out_type)))
plt.close("all")
######################
# moving avg
######################
df2 = pd.DataFrame({"moving":ts.moving}, index=ts.dates[:len(ts.moving)])
df2.plot()
plt.ylabel("Counts")
plt.title(filter_prefix_name)
plt.tight_layout()
plt.savefig(os.path.join(PLOTS_PREFIX, '{}_{}.{}'.format(filter_prefix_name, "mov_avg_line", out_type)))
plt.close("all")
######################
# timeline with peaks marked by vertical bands
######################
df3 = pd.Series(ts.counts, index=ts.dates)
df3.plot()
# peaks
for a in ts.top_peaks:
xs = a[2][7]
xp = a[2][8]
xe = a[2][9]
y = a[2][5]
# need to get x and y locs
plt.axvspan(xs, xe, ymin=0, ymax = y, linewidth=1, color='g', alpha=0.2)
plt.axvline(xp, ymin=0, ymax = y, linewidth=1, color='y')
plt.ylabel("Counts")
plt.title(filter_prefix_name)
plt.tight_layout()
plt.savefig(os.path.join(PLOTS_PREFIX, '{}_{}.{}'.format(filter_prefix_name, "time_peaks_line", out_type)))
plt.close("all")
######################
# n-grams to help determine topics of peaks
######################
for n, p in enumerate(ts.topics):
x = []
labels = []
for i in p:
x.append(i[1])
labels.append(i[4])
try:
logging.info("creating n-grams dotplot for peak {}".format(n))
path = os.path.join(PLOTS_PREFIX, "{}_{}_{}.{}".format(filter_prefix_name, "peak", n, out_type))
self.dotplot(x, labels, path)
except ValueError, e:
logging.error("{} - plot path={} skipped".format(e, path))
######################
# x vs y scatter plot for correlations
######################
if self.options.second_filter is not None:
logging.info("creating scatter for queries {} and {}".format(self.options.filter, self.options.second_filter))
df4 = pd.DataFrame({filter_prefix_name: ts.counts, second_filter_prefix_name:ts.second_counts})
df4.plot(kind='scatter', x=filter_prefix_name, y=second_filter_prefix_name)
plt.ylabel(second_filter_prefix_name)
plt.xlabel(filter_prefix_name)
plt.xlim([0, 1.05 * max(ts.counts)])
plt.ylim([0, 1.05 * max(ts.second_counts)])
plt.title("{} vs. {}".format(second_filter_prefix_name, filter_prefix_name))
plt.tight_layout()
plt.savefig(os.path.join(PLOTS_PREFIX, '{}_v_{}_{}.{}'.format(filter_prefix_name,
second_filter_prefix_name,
"scatter",
out_type)))
plt.close("all")
if __name__ == "__main__":
""" Simple command line utility."""
import pickle
g = GnipSearchTimeseries()
if FROM_PICKLE:
ts = pickle.load(open("./time_series.pickle", "rb"))
else:
ts = g.get_results()
pickle.dump(ts,open("./time_series.pickle", "wb"))
g.plots(ts)
| mit |
mpoquet/execo | src/execo_g5k/planning.py | 1 | 49114 | # Copyright 2009-2016 INRIA Rhone-Alpes, Service Experimentation et
# Developpement
#
# This file is part of Execo.
#
# Execo is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Execo is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Execo. If not, see <http://www.gnu.org/licenses/>
"""Module provides functions to help you to plan your experiment on Grid'5000.
"""
from .charter import g5k_charter_time, get_next_charter_period
from copy import deepcopy
from datetime import timedelta
from execo import logger, Host
from execo.log import style
from execo.time_utils import timedelta_to_seconds, get_seconds, \
unixts_to_datetime, get_unixts, format_date
from execo_g5k import OarSubmission, get_current_oar_jobs, get_oar_job_info, \
get_current_oargrid_jobs, get_oargrid_job_oar_jobs
from execo_g5k.api_utils import get_g5k_sites, get_g5k_clusters, \
get_cluster_site, get_site_clusters, get_resource_attributes, get_host_cluster, \
get_host_site, get_host_attributes, get_g5k_hosts, get_host_shortname, \
get_host_longname
from execo_g5k.config import g5k_configuration
from execo_g5k.utils import G5kAutoPortForwarder
from itertools import cycle
from math import ceil, floor
from operator import itemgetter
from pprint import pformat
from threading import Thread, currentThread
from time import time
from traceback import format_exc
try:
import matplotlib.pyplot as PLT
import matplotlib.dates as MD
except ImportError:
pass
try:
import psycopg2
_retrieve_method = 'PostgreSQL'
except:
_retrieve_method = 'API'
def get_job_by_name(job_name, sites=None):
""" """
logger.detail('Looking for a job named %s', style.emph(job_name))
if not sites:
sites = get_g5k_sites()
oargrid_jobs = get_current_oargrid_jobs()
if len(oargrid_jobs) > 0:
for g_job in oargrid_jobs:
for job in get_oargrid_job_oar_jobs(g_job):
info = get_oar_job_info(job[0], job[1])
if info['name'] == job_name:
logger.info('Oargridjob %s found !', style.emph(g_job))
return g_job, None
running_jobs = get_current_oar_jobs(sites)
for job in running_jobs:
info = get_oar_job_info(job[0], job[1])
if info['name'] == job_name:
logger.info('Job %s found on site %s !', style.emph(job[0]),
style.host(job[1]))
return job
return None, None
def get_planning(elements=['grid5000'], vlan=False, subnet=False, storage=False,
out_of_chart=False, starttime=None, endtime=None,
ignore_besteffort=True, queues='default'):
"""Retrieve the planning of the elements (site, cluster) and others resources.
Element planning structure is ``{'busy': [(123456,123457), ... ], 'free': [(123457,123460), ... ]}.``
:param elements: a list of Grid'5000 elements ('grid5000', <site>, <cluster>)
:param vlan: a boolean to ask for KaVLAN computation
:param subnet: a boolean to ask for subnets computation
:param storage: a boolean to ask for sorage computation
:param out_of_chart: if True, consider that days outside weekends are busy
:param starttime: start of time period for which to compute the planning, defaults to now + 1 minute
:param endtime: end of time period for which to compute the planning, defaults to 4 weeks from now
:param ignore_besteffort: True by default, to consider the resources with besteffort jobs as available
:param queues: list of oar queues for which to get the planning
Return a dict whose keys are sites, whose values are dict whose keys
are cluster, subnets, kavlan or storage,
whose values are planning dicts, whose keys are hosts, subnet address range,
vlan number or chunk id planning respectively.
"""
if not starttime: starttime = int(time() + timedelta_to_seconds(timedelta(minutes = 1)))
starttime = int(get_unixts(starttime))
if not endtime: endtime = int(starttime + timedelta_to_seconds(timedelta(weeks = 4, minutes = 1)))
endtime = int(get_unixts(endtime))
if 'grid5000' in elements:
sites = elements = get_g5k_sites()
else:
sites = list(set([site for site in elements
if site in get_g5k_sites()] +
[get_cluster_site(cluster) for cluster in elements
if cluster in get_g5k_clusters(queues=queues)] +
[get_host_site(host) for host in elements
if host in get_g5k_hosts()
or get_host_shortname(host) in get_g5k_hosts()]))
if len(sites) == 0:
logger.error('Wrong elements given: %s' % (elements,))
return None
planning = {}
for site in sites:
planning[site] = {}
for cluster in get_site_clusters(site, queues=queues):
planning[site][cluster] = {}
for site in sites:
if vlan:
planning[site].update({'vlans': {}})
if subnet:
planning[site].update({'subnets': {}})
if storage:
planning[site].update({'storage': {}})
if _retrieve_method == 'API':
_get_planning_API(planning, ignore_besteffort)
elif _retrieve_method == 'PostgreSQL':
_get_planning_PGSQL(planning, ignore_besteffort)
if out_of_chart:
_add_charter_to_planning(planning, starttime, endtime)
for site_pl in planning.values():
for res_pl in site_pl.values():
for el_planning in res_pl.values():
el_planning['busy'].sort()
_merge_el_planning(el_planning['busy'])
_trunc_el_planning(el_planning['busy'], starttime, endtime)
_fill_el_planning_free(el_planning, starttime, endtime)
# cleaning
real_planning = deepcopy(planning)
for site, site_pl in planning.items():
for cl, cl_pl in site_pl.items():
if cl in ['vlans']:
continue
keep_cluster = False
for h in cl_pl:
if not (get_host_site(h) in elements or
get_host_cluster(h) in elements or
get_host_shortname(h) in elements or
h in elements):
del real_planning[site][cl][h]
else:
keep_cluster = True
if not keep_cluster:
del real_planning[site][cl]
return real_planning
def compute_slots(planning, walltime, excluded_elements=None):
"""Compute the slots limits and find the number of available nodes for
each elements and for the given walltime.
Return the list of slots where a slot is ``[ start, stop, freehosts ]`` and
freehosts is a dict of Grid'5000 element with number of nodes available
``{'grid5000': 40, 'lyon': 20, 'reims': 10, 'stremi': 10 }``.
WARNING: slots does not includes subnets
:param planning: a dict of the resources planning, returned by ``get_planning``
:param walltime: a duration in a format supported by get_seconds where the resources
are available
:param excluded_elements: list of elements that will not be included in the slots
computation
"""
slots = []
walltime = get_seconds(walltime)
if excluded_elements is not None:
_remove_excluded(planning, excluded_elements)
limits = _slots_limits(planning)
# Checking if we need to compile vlans planning
kavlan = False
kavlan_global = False
if len(planning) > 0:
if 'vlans' in next(iter(planning.values())):
if len(planning) > 1:
kavlan_global = True
else:
kavlan = True
for limit in limits:
log = ''
free_elements = {'grid5000': 0}
if kavlan_global:
free_vlans_global = []
for site, site_planning in planning.items():
free_elements[site] = 0
for cluster, cluster_planning in site_planning.items():
if cluster in get_g5k_clusters(queues=None):
free_elements[cluster] = 0
for host, host_planning in cluster_planning.items():
host_free = False
for free_slot in host_planning['free']:
if free_slot[0] <= limit and free_slot[1] >= limit + walltime:
host_free = True
if host_free:
free_elements['grid5000'] += 1
free_elements[site] += 1
free_elements[cluster] += 1
log += ', ' + host
if kavlan:
free_vlans = 0
for vlan, vlan_planning in site_planning['vlans'].items():
if int(vlan.split('-')[1]) < 10:
kavlan_free = False
for free_slot in vlan_planning['free']:
if free_slot[0] <= limit and free_slot[1] >= limit + walltime:
kavlan_free = True
if kavlan_free:
free_vlans += 1
free_elements['kavlan'] = free_vlans
elif kavlan_global:
for vlan, vlan_planning in site_planning['vlans'].items():
if int(vlan.split('-')[1]) > 10:
kavlan_global_free = False
for free_slot in vlan_planning['free']:
if free_slot[0] <= limit and free_slot[1] >= limit + walltime:
kavlan_global_free = True
if kavlan_global_free:
free_vlans_global.append(site)
free_elements['kavlan'] = free_vlans_global
## MISSING OTHER RESOURCES COMPUTATION
logger.debug(log)
slots.append([limit, limit + walltime, free_elements])
slots.sort(key=itemgetter(0))
return slots
def max_resources(planning):
""" """
resources = {"grid5000": 0}
for site, site_pl in planning.items():
resources[site] = 0
for cl, cl_pl in site_pl.items():
if cl in ['vlans']:
continue
resources[cl] = 0
keep_cluster = False
for h in cl_pl:
resources["grid5000"] += 1
resources[cl] += 1
resources[site] +=1
return resources
def compute_coorm_slots(planning, excluded_elements=None):
""" """
slots = []
limits = _slots_limits(planning)
for start in limits:
stop = 10 ** 25
free_cores = {'grid5000': 0}
for site, site_planning in planning.items():
free_cores[site] = 0
for cluster, cluster_planning in site_planning.items():
free_cores[cluster] = 0
if cluster in get_g5k_clusters(queues=None):
for host, host_planning in cluster_planning.items():
for free_slot in host_planning['free']:
if free_slot[0] <= start and free_slot[0] < stop:
free_cores[cluster] += get_host_attributes(host)['architecture']['nb_cores']
free_cores[site] += get_host_attributes(host)['architecture']['nb_cores']
free_cores['grid5000'] += get_host_attributes(host)['architecture']['nb_cores']
if free_slot[1] < stop:
stop = free_slot[1]
slots.append((start, stop, free_cores))
return slots
def find_first_slot(slots, resources_wanted):
""" Return the first slot (a tuple start date, end date, resources) where some resources are available
:param slots: list of slots returned by ``compute_slots``
:param resources_wanted: a dict of elements that must have some free hosts
"""
for slot in slots:
vlan_free = True
if 'kavlan' in resources_wanted:
if isinstance(slot[2]['kavlan'], int):
if slot[2]['kavlan'] == 0:
vlan_free = False
elif isinstance(slot[2]['kavlan'], list):
if len(slot[2]['kavlan']) == 0:
vlan_free = False
res_nodes = sum([nodes for element, nodes in slot[2].items()
if element in resources_wanted and element != 'kavlan'])
if res_nodes > 0 and vlan_free:
return slot
return None, None, None
def find_max_slot(slots, resources_wanted):
"""Return the slot (a tuple start date, end date, resources) with the maximum nodes available for the given elements
:param slots: list of slots returned by ``compute_slots``
:param resources_wanted: a dict of elements that must be maximized"""
max_nodes = 0
max_slot = None, None, None
for slot in slots:
vlan_free = True
if 'kavlan' in resources_wanted:
if isinstance(slot[2]['kavlan'], int):
if slot[2]['kavlan'] == 0:
vlan_free = False
elif isinstance(slot[2]['kavlan'], list):
if len(slot[2]['kavlan']) == 0:
vlan_free = False
res_nodes = sum([nodes for element, nodes in slot[2].items()
if element in resources_wanted and element != 'kavlan'])
if res_nodes > max_nodes and vlan_free:
max_nodes = res_nodes
max_slot = slot
return max_slot
def find_free_slot(slots, resources_wanted):
"""Return the first slot (a tuple start date, end date, resources) with enough resources
:param slots: list of slots returned by ``compute_slots``
:param resources_wanted: a dict describing the wanted ressources
``{'grid5000': 50, 'lyon': 20, 'stremi': 10 }``"""
# We need to add the clusters nodes to the total nodes of a site
real_wanted = resources_wanted.copy()
for cluster, n_nodes in resources_wanted.items():
if cluster in get_g5k_clusters(queues=None):
site = get_cluster_site(cluster)
if site in resources_wanted:
real_wanted[site] += n_nodes
for slot in slots:
vlan_free = True
if 'kavlan' in resources_wanted:
if isinstance(slot[2]['kavlan'], int):
if slot[2]['kavlan'] == 0:
vlan_free = False
elif isinstance(slot[2]['kavlan'], list):
if len(slot[2]['kavlan']) == 0:
vlan_free = False
slot_ok = True
for element, n_nodes in slot[2].items():
if element in real_wanted and real_wanted[element] > n_nodes \
and real_wanted != 'kavlan':
slot_ok = False
if slot_ok and vlan_free:
if 'kavlan' in resources_wanted:
resources_wanted['kavlan'] = slot[2]['kavlan']
return slot
return None, None, None
def find_coorm_slot(slots, resources_wanted):
""" """
for start, stop, res in slots:
logger.debug("%s %s %s" % (format_date(start), format_date(stop), res))
slot_ok = True
for element, cpu in resources_wanted.items():
logger.debug("%s %s" % (element, cpu))
if res[element] < cpu * (stop - start) / 3600:
slot_ok = False
if slot_ok:
return start, stop, res
def get_hosts_jobs(hosts, walltime, out_of_chart=False):
"""Find the first slot when the hosts are available and return a
list of jobs_specs
:param hosts: list of hosts
:param walltime: duration of reservation
"""
hosts = [x.address if isinstance(x, Host) else x for x in hosts]
planning = get_planning(elements=hosts, out_of_chart=out_of_chart)
limits = _slots_limits(planning)
walltime = get_seconds(walltime)
for limit in limits:
all_host_free = True
for site_planning in planning.values():
for cluster, cluster_planning in site_planning.items():
if cluster in get_g5k_clusters(queues=None):
for host_planning in cluster_planning.values():
host_free = False
for free_slot in host_planning['free']:
if free_slot[0] <= limit and free_slot[1] >= limit + walltime:
host_free = True
if not host_free:
all_host_free = False
if all_host_free:
startdate = limit
break
jobs_specs = []
for site in planning:
site_hosts = [ get_host_longname(h) for h in hosts if get_host_site(h) == site ]
sub_res = "{host in ('" + "','".join(site_hosts) + "')}/nodes=" + str(len(site_hosts))
jobs_specs.append((OarSubmission(resources=sub_res,
reservation_date=startdate), site))
return jobs_specs
def show_resources(resources, msg='Resources', max_resources=None, queues='default'):
"""Print the resources in a fancy way"""
if not max_resources:
max_resources = {}
total_hosts = 0
log = style.log_header(msg) + '\n'
for site in get_g5k_sites():
site_added = False
if site in resources:
log += style.log_header(site).ljust(20) + ' ' + str(resources[site])
if site in max_resources:
log += '/' + str(max_resources[site])
log += ' '
site_added = True
for cluster in get_site_clusters(site, queues=queues):
if len(list(set(get_site_clusters(site)) & set(resources.keys()))) > 0 \
and not site_added:
log += style.log_header(site).ljust(20)
if site in max_resources:
log += '/' + str(max_resources[site])
log += ' '
site_added = True
if cluster in resources:
log += style.emph(cluster) + ': ' + str(resources[cluster])
if cluster in max_resources:
log += '/' + str(max_resources[cluster])
log += ' '
total_hosts += resources[cluster]
if site_added:
log += '\n'
if 'grid5000' in resources:
log += style.log_header('Grid5000').ljust(20) + str(resources['grid5000'])
if "grid5000" in max_resources:
log += '/' + str(max_resources["grid5000"])
elif total_hosts > 0:
log += style.log_header('Total ').ljust(20) + str(total_hosts)
logger.info(log)
def get_jobs_specs(resources, excluded_elements=None, name=None):
""" Generate the several job specifications from the dict of resources and
the blacklisted elements
:param resources: a dict, whose keys are Grid'5000 element and values the
corresponding number of n_nodes
:param excluded_elements: a list of elements that won't be used
:param name: the name of the jobs that will be given
"""
jobs_specs = []
if excluded_elements == None:
excluded_elements = []
# Creating the list of sites used
sites = []
real_resources = resources.copy()
for resource in resources:
if resource in get_g5k_sites() and resource not in sites:
sites.append(resource)
if resource in get_g5k_clusters(queues=None):
if resource not in excluded_elements:
site = get_cluster_site(resource)
if site not in sites:
sites.append(site)
if site not in real_resources:
real_resources[site] = 0
# Checking if we need a Kavlan, a KaVLAN global or none
get_kavlan = 'kavlan' in resources
if get_kavlan:
kavlan = 'kavlan'
n_sites = 0
for resource in real_resources:
if resource in sites:
n_sites += 1
if n_sites > 1:
kavlan += '-global'
break
blacklisted_hosts = {}
for element in excluded_elements:
if element not in get_g5k_clusters(queues=None) + get_g5k_sites():
site = get_host_site(element)
if not 'site' in blacklisted_hosts:
blacklisted_hosts[site] = [element]
else:
blacklisted_hosts[site].append(element)
for site in sites:
sub_resources = ''
# Adding a KaVLAN if needed
if get_kavlan:
if not 'global' in kavlan:
sub_resources = "{type='" + kavlan + "'}/vlan=1+"
get_kavlan = False
elif site in resources['kavlan']:
sub_resources = "{type='" + kavlan + "'}/vlan=1+"
get_kavlan = False
base_sql = '{'
end_sql = '}/'
# Creating blacklist SQL string for hosts
host_blacklist = False
str_hosts = ''
if site in blacklisted_hosts and len(blacklisted_hosts[site]) > 0:
str_hosts = ''.join(["host not in ('" + get_host_longname(host) + "') and "
for host in blacklisted_hosts[site]])
host_blacklist = True
#Adding the clusters blacklist
str_clusters = str_hosts if host_blacklist else ''
cl_blacklist = False
clusters_nodes = 0
for cluster in get_site_clusters(site, queues=None):
if cluster in resources and resources[cluster] > 0:
if str_hosts == '':
sub_resources += "{cluster='" + cluster + "'}"
else:
sub_resources += base_sql + str_hosts + "cluster='" + \
cluster + "'" + end_sql
sub_resources += "/nodes=" + str(resources[cluster]) + '+'
clusters_nodes += resources[cluster]
if cluster in excluded_elements:
str_clusters += "cluster not in ('" + cluster + "') and "
cl_blacklist = True
# Generating the site blacklist string from host and cluster blacklist
str_site = ''
if host_blacklist or cl_blacklist:
str_site += base_sql
if not cl_blacklist:
str_site += str_hosts[:-4]
else:
str_site += str_clusters[:-4]
str_site = str_site + end_sql
if real_resources[site] > 0:
sub_resources += str_site + "nodes=" + str(real_resources[site]) +\
'+'
if sub_resources != '':
jobs_specs.append((OarSubmission(resources=sub_resources[:-1],
name=name), site))
return jobs_specs
def distribute_hosts(resources_available, resources_wanted,
excluded_elements=None, ratio=None):
""" Distribute the resources on the different sites and cluster
:param resources_available: a dict defining the resources available
:param resources_wanted: a dict defining the resources available you really want
:param excluded_elements: a list of elements that won't be used
:param ratio: if not None (the default), a float between 0 and 1,
to actually only use a fraction of the resources."""
if excluded_elements == None: excluded_elements = []
resources = {}
#Defining the cluster you want
clusters_wanted = {}
for element, n_nodes in resources_wanted.items():
if element in get_g5k_clusters(queues=None):
clusters_wanted[element] = n_nodes
for cluster, n_nodes in clusters_wanted.items():
nodes = n_nodes if n_nodes > 0 else resources_available[cluster]
resources_available[get_cluster_site(cluster)] -= nodes
resources[cluster] = nodes
# Blacklisting clusters
for element in excluded_elements:
if element in get_g5k_clusters(queues=None) and element in resources_available:
resources_available['grid5000'] -= resources_available[element]
resources_available[get_cluster_site(element)] -= resources_available[element]
resources_available[element] = 0
#Defining the sites you want
sites_wanted = {}
for element, n_nodes in resources_wanted.items():
if element in get_g5k_sites() and element not in excluded_elements:
sites_wanted[element] = n_nodes
for site, n_nodes in sites_wanted.items():
resources[site] = n_nodes if n_nodes > 0 else resources_available[site]
# Blacklisting sites
for element in excluded_elements:
if element in get_g5k_sites() and element in resources_available:
resources_available['grid5000'] -= resources_available[element]
resources_available[element] = 0
#Distributing hosts on grid5000 elements
logger.debug(pformat(resources_wanted))
if 'grid5000' in resources_wanted:
g5k_nodes = resources_wanted['grid5000'] if resources_wanted['grid5000'] > 0 else resources_available['grid5000']
total_nodes = 0
sites = [element for element in resources_available if element in get_g5k_sites() ]
iter_sites = cycle(sites)
while total_nodes < g5k_nodes:
site = next(iter_sites)
if resources_available[site] == 0:
sites.remove(site)
iter_sites = cycle(sites)
else:
resources_available[site] -= 1
if site in resources:
resources[site] += 1
else:
resources[site] = 1
total_nodes += 1
logger.debug(pformat(resources))
if 'kavlan' in resources_wanted:
resources['kavlan'] = resources_available['kavlan']
# apply optional ratio
if ratio != None:
resources.update((x, int(floor(y * ratio))) for x, y in resources.items())
return resources
def _fix_job(start_time, end_time):
return int(start_time), int(end_time) + 120
def _get_vlans_API(site):
"""Retrieve the list of VLAN of a site from the 3.0 Grid'5000 API"""
equips = get_resource_attributes('/sites/'+site+'/network_equipments/')
vlans = []
for equip in equips['items']:
if 'vlans' in equip and len(equip['vlans']) >2:
for params in equip['vlans'].values():
if type( params ) == type({}) and 'name' in params \
and int(params['name'].split('-')[1])>3:
# > 3 because vlans 1, 2, 3 are not routed
vlans.append(params['name'])
return vlans
def _get_job_link_attr_API(p):
try:
currentThread().attr = get_resource_attributes(p)
except Exception as e:
currentThread().broken = True
currentThread().ex = e
def _get_site_planning_API(site, site_planning, ignore_besteffort):
try:
alive_nodes = set([str(node['network_address'])
for node in get_resource_attributes('/sites/'+site+'/internal/oarapi/resources/details.json?limit=2^30')['items']
if node['type'] == 'default' and node['state'] != 'Dead' and node['maintenance'] != 'YES'])
for host in alive_nodes:
host_cluster = get_host_cluster(str(host))
if host_cluster in site_planning:
site_planning[host_cluster].update({host: {'busy': [], 'free': []}})
if 'vlans' in site_planning:
site_planning['vlans'] = {}
for vlan in _get_vlans_API(site):
site_planning['vlans'][vlan] = {'busy': [], 'free': []}
# STORAGE AND SUBNETS MISSING
# Retrieving jobs
site_jobs = get_resource_attributes('/sites/'+site+'/jobs?limit=1073741824&state=waiting,launching,running')['items']
jobs_links = [ link['href'] for job in site_jobs for link in job['links'] \
if link['rel'] == 'self' and (ignore_besteffort == False or job['queue'] != 'besteffort') ]
threads = []
for link in jobs_links:
t = Thread(target = _get_job_link_attr_API, args = ('/'+str(link).split('/', 2)[2], ))
t.broken = False
t.attr = None
t.ex = None
threads.append(t)
t.start()
for t in threads:
t.join()
if t.broken:
raise t.ex
attr = t.attr
try:
start_time = attr['started_at'] if attr['started_at'] != 0 else attr['scheduled_at']
end_time = start_time + attr['walltime']
except:
continue
start_time, end_time = _fix_job(start_time, end_time)
nodes = attr['assigned_nodes']
for node in nodes:
cluster = node.split('.',1)[0].split('-')[0]
if cluster in site_planning and node in site_planning[cluster]:
site_planning[cluster][node]['busy'].append( (start_time, end_time))
if 'vlans' in site_planning and 'vlans' in attr['resources_by_type'] \
and int(attr['resources_by_type']['vlans'][0]) > 3:
kavname ='kavlan-'+str(attr['resources_by_type']['vlans'][0])
site_planning['vlans'][kavname]['busy'].append( (start_time, end_time))
if 'subnets' in site_planning and 'subnets' in attr['resources_by_type']:
for subnet in attr['resources_by_type']['subnets']:
if subnet not in site_planning['subnets']:
site_planning['subnets'][subnet] = {'busy': [], 'free': []}
site_planning['subnets'][subnet]['busy'].append( (start_time, end_time))
# STORAGE IS MISSING
except Exception as e:
logger.warn('error connecting to oar database / getting planning from ' + site)
logger.detail("exception:\n" + format_exc())
currentThread().broken = True
def _get_planning_API(planning, ignore_besteffort):
"""Retrieve the planning using the 3.0 Grid'5000 API """
broken_sites = []
threads = {}
for site in planning:
t = Thread(target = _get_site_planning_API, args = (site, planning[site], ignore_besteffort))
threads[site] = t
t.broken = False
t.start()
for site, t in threads.items():
t.join()
if t.broken:
broken_sites.append(site)
# Removing sites not reachable
for site in broken_sites:
del planning[site]
def _get_site_planning_PGSQL(site, site_planning, ignore_besteffort):
try:
with G5kAutoPortForwarder(site,
'oardb.' + site + '.grid5000.fr',
g5k_configuration['oar_pgsql_ro_port']) as (host, port):
conn = psycopg2.connect(host=host, port=port,
user=g5k_configuration['oar_pgsql_ro_user'],
password=g5k_configuration['oar_pgsql_ro_password'],
database=g5k_configuration['oar_pgsql_ro_db']
)
try:
cur = conn.cursor()
# Retrieving alive resources
sql = """SELECT DISTINCT R.type, R.network_address, R.vlan, R.subnet_address
FROM resources R
WHERE state <> 'Dead' AND R.maintenance <> 'YES';"""
cur.execute(sql)
for data in cur.fetchall():
if data[0] == "default":
cluster = get_host_cluster(data[1])
if cluster in site_planning:
site_planning[cluster][data[1]] = {'busy': [],
'free': []}
if data[0] in ['kavlan', 'kavlan-global'] \
and 'vlans' in site_planning:
site_planning['vlans']['kavlan-' + data[2]] = {'busy': [],
'free': []}
if data[0] == "subnet" and 'subnet' in site_planning:
site_planning['subnets'][data[3]] = {'busy': [],
'free': []}
sql = ("""SELECT J.job_id, J.state, GJP.start_time AS start_time,
GJP.start_time+MJD.moldable_walltime,
array_agg(DISTINCT R.network_address) AS hosts,
array_agg(DISTINCT R.vlan) AS vlan,
array_agg(DISTINCT R.subnet_address) AS subnets
FROM jobs J
LEFT JOIN moldable_job_descriptions MJD
ON MJD.moldable_job_id=J.job_id
LEFT JOIN gantt_jobs_predictions GJP
ON GJP.moldable_job_id=MJD.moldable_id
INNER JOIN gantt_jobs_resources AR
ON AR.moldable_job_id=MJD.moldable_id
LEFT JOIN resources R
ON AR.resource_id=R.resource_id
WHERE ( J.state='Launching' OR J.state='Running' OR J.state='Waiting')
""" +
(""" AND queue_name<>'besteffort'""" if ignore_besteffort else """""") +
"""GROUP BY J.job_id, GJP.start_time, MJD.moldable_walltime
ORDER BY J.start_time""")
# CONVERT(SUBSTRING_INDEX(SUBSTRING_INDEX(R.network_address,'.',1),'-',-1), SIGNED)"""
cur.execute(sql)
for job in cur.fetchall():
start_time = job[2]
end_time = job[3]
start_time, end_time = _fix_job(start_time, end_time)
if len(job[4]) > 0:
for host in job[4]:
if host != '':
cluster = get_host_cluster(host)
if cluster in site_planning:
if host in site_planning[cluster]:
site_planning[cluster][host]['busy'].append((start_time, end_time))
if job[5][0] and 'vlans' in site_planning:
for vlan in job[5]:
if isinstance(vlan, str) and int(vlan) > 3:
# only routed vlan
site_planning['vlans']['kavlan-' + vlan]['busy'].append((start_time, end_time))
if len(job[6]) > 0 and 'subnet' in site_planning:
for subnet in job[6]:
site_planning['subnets'][subnet]['busy'].append((start_time, end_time))
finally:
conn.close()
except Exception as e:
logger.warn('error connecting to oar database / getting planning from ' + site)
logger.detail("exception:\n" + format_exc())
currentThread().broken = True
def _get_planning_PGSQL(planning, ignore_besteffort):
"""Retrieve the planning using the oar2 database"""
broken_sites = []
threads = {}
for site in planning:
t = Thread(target = _get_site_planning_PGSQL, args = (site, planning[site], ignore_besteffort))
threads[site] = t
t.broken = False
t.start()
for site, t in threads.items():
t.join()
if t.broken:
broken_sites.append(site)
# Removing sites not reachable
for site in broken_sites:
del planning[site]
def _remove_excluded(planning, excluded_resources):
"""This function remove elements from planning"""
# first removing the site
for element in excluded_resources:
if element in get_g5k_sites() and element in planning:
del planning[element]
# then removing specific clusters
for site_pl in planning.values():
for res in list(site_pl):
if res in excluded_resources:
del site_pl[res]
continue
for element in list(site_pl[res]):
if element in excluded_resources:
del site_pl[res][element]
def _merge_el_planning(el_planning):
"""An internal function to merge the busy or free planning of an element"""
if len(el_planning) > 1:
for i in range(len(el_planning)):
j = i+1
if j == len(el_planning)-1:
break
while True:
condition = el_planning[i][1] >= el_planning[j][0]
if condition:
if el_planning[j][1] > el_planning[i][1]:
el_planning[i]=(el_planning[i][0], el_planning[j][1])
el_planning.pop(j)
if j == len(el_planning) - 1:
break
else:
break
if j == len(el_planning) - 1:
break
def _trunc_el_planning(el_planning, starttime, endtime):
"""Modify (start, stop) tuple that are not within the (starttime, endtime) interval """
if len(el_planning) > 0:
el_planning.sort()
# Truncating jobs that end before starttime
i = 0
while True:
if i == len(el_planning):
break
start, stop = el_planning[i]
if stop < starttime or start > endtime:
el_planning.remove( (start, stop ))
else:
if start < starttime:
if stop < endtime:
el_planning.remove( (start, stop ) )
el_planning.append( (starttime, stop) )
else:
el_planning.remove( (start, stop ) )
el_planning.append( (starttime, endtime) )
elif stop > endtime:
el_planning.remove( (start, stop ) )
el_planning.append( (start, endtime) )
else:
i += 1
if i == len(el_planning):
break
el_planning.sort()
def _fill_el_planning_free(el_planning, starttime, endtime):
"""An internal function to compute the planning free of all elements"""
if len(el_planning['busy']) > 0:
if el_planning['busy'][0][0] > starttime:
el_planning['free'].append((starttime, el_planning['busy'][0][0]))
for i in range(0, len(el_planning['busy'])-1):
el_planning['free'].append((el_planning['busy'][i][1], el_planning['busy'][i+1][0]))
if el_planning['busy'][len(el_planning['busy'])-1][1] < endtime:
el_planning['free'].append((el_planning['busy'][len(el_planning['busy'])-1][1], endtime))
else:
el_planning['free'].append((starttime, endtime))
def _slots_limits(planning):
"""Return the limits of slots, defined by a resource state change."""
limits = set()
for site in planning.values():
for res_pl in site.values():
for el_planning in res_pl.values():
for start, stop in el_planning['busy']:
limits.add(start)
limits.add(stop)
for start, stop in el_planning['free']:
limits.add(start)
limits.add(stop)
limits = sorted(limits)
if len(limits) > 0:
limits.pop()
return limits
def _add_charter_to_planning(planning, starttime, endtime):
charter_el_planning = get_charter_el_planning(starttime, endtime)
for site in planning.values():
for res_pl in site.values():
for el_planning in res_pl.values():
el_planning['busy'] += charter_el_planning
el_planning['busy'].sort()
def get_charter_el_planning(start_time, end_time):
"""Returns the list of tuples (start, end) of g5k charter time periods between start_time and end_time.
:param start_time: a date in one of the types supported by
`execo.time_utils.get_unixts`
:param end_time: a date in one of the types supported by
`execo.time_utils.get_unixts`
"""
start_time = unixts_to_datetime(get_unixts(start_time))
end_time = unixts_to_datetime(get_unixts(end_time))
el_planning = []
while True:
charter_start, charter_end = get_next_charter_period(start_time, end_time)
if charter_start == None: break
el_planning.append((int(charter_start), int(charter_end)))
start_time = charter_end
return el_planning
"""Functions to draw the Gantt chart, the slots available, and other plots """
def _set_colors():
colors = {}
colors['busy'] = '#666666'
rgb_colors = [(x[0]/255., x[1]/255., x[2]/255.) for x in \
[(255., 122., 122.), (255., 204., 122.), (255., 255., 122.), (255., 246., 153.), (204., 255., 122.),
(122., 255., 122.), (122., 255., 255.), (122., 204., 255.), (204., 188., 255.), (255., 188., 255.)]]
i_site = 0
for site in sorted(get_g5k_sites()):
colors[site] = rgb_colors[i_site]
i_cluster = 0
for cluster in sorted(get_site_clusters(site, queues=None)):
min_index = colors[site].index(min(colors[site]))
color = [0., 0., 0.]
for i in range(3):
color[i] = min(colors[site][i], 1.)
if i == min_index:
color[i] += i_cluster * 0.12
colors[cluster] = tuple(color)
i_cluster += 1
i_site += 1
return colors
def draw_gantt(planning, colors = None, show = False, save = True, outfile = None):
""" Draw the hosts planning for the elements you ask (requires Matplotlib)
:param planning: the dict of elements planning
:param colors: a dict to define element coloring ``{'element': (255., 122., 122.)}``
:param show: display the Gantt diagram
:param save: save the Gantt diagram to outfile
:param outfile: specify the output file"""
if colors is None:
colors = _set_colors()
n_sites = len(planning)
startstamp = None
endstamp = None
for clusters_hosts in planning.values():
for hosts_kinds in clusters_hosts.values():
for kinds_slots in hosts_kinds.values():
for slots in kinds_slots.values():
for slot in slots:
if startstamp == None or slot[0] < startstamp:
startstamp = slot[0]
if endstamp == None or slot[1] > endstamp:
endstamp = slot[1]
if startstamp and endstamp: break
if startstamp and endstamp: break
for slot in slots:
if slot[0] < startstamp:
startstamp = slot[0]
if slot[1] > endstamp:
endstamp = slot[1]
if outfile is None:
outfile = 'gantt_' + "_".join([site for site in planning]) \
+ '_' + format_date(startstamp)
logger.info('Saving Gantt chart to %s', style.emph(outfile))
n_col = 2 if n_sites > 1 else 1
n_row = int(ceil(float(n_sites) / float(n_col)))
x_major_locator = MD.AutoDateLocator()
xfmt = MD.DateFormatter('%d %b, %H:%M ')
PLT.ioff()
fig = PLT.figure(figsize=(15, 5 * n_row), dpi=80)
i_site = 1
for site, clusters in planning.items():
n_hosts = 0
for hosts in clusters.values():
n_hosts += len(hosts)
if n_hosts == 0: continue
ax = fig.add_subplot(n_row, n_col, i_site, title=site.title())
ax.title.set_fontsize(18)
ax.xaxis_date()
ax.set_xlim(unixts_to_datetime(startstamp), unixts_to_datetime(endstamp))
ax.xaxis.set_major_formatter(xfmt)
ax.xaxis.set_major_locator(x_major_locator)
ax.xaxis.grid(color='black', linestyle='dashed')
PLT.xticks(rotation=15)
ax.set_ylim(0, 1)
ax.get_yaxis().set_ticks([])
ax.yaxis.label.set_fontsize(16)
pos = 0.0
inc = 1.0 / n_hosts
ylabel = ''
for cluster, hosts in clusters.items():
ylabel += cluster + ' '
i_host = 0
for key in sorted(list(hosts), key = lambda name: (name.split('.',1)[0].split('-')[0],
int( name.split('.',1)[0].split('-')[1] ))):
slots = hosts[key]
i_host +=1
cl_colors = {'free': colors[cluster], 'busy': colors['busy']}
for kind in cl_colors:
for freeslot in slots[kind]:
edate, bdate = [MD.date2num(item) for item in
(unixts_to_datetime(freeslot[1]), unixts_to_datetime(freeslot[0]))]
ax.barh(pos, edate - bdate , 1, left = bdate,
color = cl_colors[kind], edgecolor = 'none' )
pos += inc
if i_host == len(hosts):
ax.axhline(y = pos, color = cl_colors['busy'], linestyle ='-', linewidth = 1)
ax.set_ylabel(ylabel)
i_site += 1
fig.tight_layout()
if show:
PLT.show()
if save:
logger.debug('Saving file %s ...', outfile)
PLT.savefig (outfile, dpi=300)
def draw_slots(slots, colors=None, show=False, save=True, outfile=None):
"""Draw the number of nodes available for the clusters (requires Matplotlib >= 1.2.0)
:param slots: a list of slot, as returned by ``compute_slots``
:param colors: a dict to define element coloring ``{'element': (255., 122., 122.)}``
:param show: display the slots versus time
:param save: save the plot to outfile
:param outfile: specify the output file"""
startstamp = slots[0][0]
endstamp = slots[-1][1]
if outfile is None:
outfile = 'slots_' + format_date(startstamp)
logger.info('Saving slots diagram to %s', style.emph(outfile))
if colors is None:
colors = _set_colors()
xfmt = MD.DateFormatter('%d %b, %H:%M ')
if endstamp - startstamp <= timedelta_to_seconds(timedelta(days=7)):
x_major_locator = MD.HourLocator(byhour=[9, 19])
elif endstamp - startstamp <= timedelta_to_seconds(timedelta(days=17)):
x_major_locator = MD.HourLocator(byhour=[9])
else:
x_major_locator = MD.AutoDateLocator()
max_nodes = {}
total_nodes = 0
slot_limits = []
total_list = []
i_slot = 0
for slot in slots:
slot_limits.append(slot[0])
if i_slot + 1 < len(slots):
slot_limits.append(slots[i_slot + 1][0])
i_slot += 1
for element, n_nodes in slot[2].items():
if element in get_g5k_clusters(queues=None):
if not element in max_nodes:
max_nodes[element] = []
max_nodes[element].append(n_nodes)
max_nodes[element].append(n_nodes)
if element == 'grid5000':
total_list.append(n_nodes)
total_list.append(n_nodes)
if n_nodes > total_nodes:
total_nodes = n_nodes
slot_limits.append(endstamp)
slot_limits.sort()
dates = [unixts_to_datetime(ts) for ts in slot_limits]
datenums = MD.date2num(dates)
fig = PLT.figure(figsize=(15,10), dpi=80)
ax = PLT.subplot(111)
ax.xaxis_date()
box = ax.get_position()
ax.set_position([box.x0-0.07, box.y0, box.width, box.height])
ax.set_xlim(unixts_to_datetime(startstamp), unixts_to_datetime(endstamp))
ax.set_xlabel('Time')
ax.set_ylabel('Nodes available')
ax.set_ylim(0, total_nodes*1.1)
ax.axhline(y = total_nodes, color = '#000000', linestyle ='-', linewidth = 2, label = 'ABSOLUTE MAXIMUM')
ax.yaxis.grid(color='gray', linestyle='dashed')
ax.xaxis.set_major_formatter(xfmt)
ax.xaxis.set_major_locator(x_major_locator )
PLT.xticks(rotation = 15)
max_nodes_list = []
p_legend = []
p_rects = []
p_colors = []
for key, value in sorted(max_nodes.items()):
if key != 'grid5000':
max_nodes_list.append(value)
p_legend.append(key)
p_rects.append(PLT.Rectangle((0, 0), 1, 1, fc = colors[key]))
p_colors.append(colors[key])
plots = PLT.stackplot(datenums, max_nodes_list, colors = p_colors)
PLT.legend(p_rects, p_legend, loc='center right', ncol = 1, shadow = True, bbox_to_anchor=(1.2, 0.5))
if show:
PLT.show()
if save:
logger.debug('Saving file %s ...', outfile)
PLT.savefig (outfile, dpi=300)
| gpl-3.0 |
numenta-archive/htmresearch | projects/capybara/anomaly_detection/run_models.py | 6 | 4879 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Groups together code used for creating a NuPIC model and dealing with IO.
"""
import importlib
import csv
import os
from nupic.data.inference_shifter import InferenceShifter
from nupic.frameworks.opf.model_factory import ModelFactory
import nupic_anomaly_output
from settings import METRICS, PATIENT_IDS, SENSORS, CONVERTED_DATA_DIR, MODEL_PARAMS_DIR, MODEL_RESULTS_DIR
def createModel(modelParams):
"""
Given a model params dictionary, create a CLA Model. Automatically enables
inference for metric_value.
:param modelParams: Model params dict
:return: OPF Model object
"""
model = ModelFactory.create(modelParams)
model.enableInference({"predictedField": "metric_value"})
return model
def getModelParamsFromName(csvName):
"""
Given a csv name, assumes a matching model params python module exists within
the model_params directory and attempts to import it.
:param csvName: CSV name, used to guess the model params module name.
:return: OPF Model params dictionary
"""
print "Creating model from %s..." % csvName
importName = "%s.%s" % (MODEL_PARAMS_DIR, csvName.replace(" ", "_"))
print "Importing model params from %s" % importName
try:
importedModelParams = importlib.import_module(importName).MODEL_PARAMS
except ImportError:
raise Exception("No model params exist for '%s'. "
"Run trajectory_converter.py first!"
% csvName)
return importedModelParams
def runIoThroughNupic(inputData, model, metric, sensor, patientId, plot):
"""
Handles looping over the input data and passing each row into the given model
object, as well as extracting the result object and passing it into an output
handler.
:param inputData: file path to input data CSV
:param model: OPF Model object
:param csvName: CSV name, used for output handler naming
:param plot: Whether to use matplotlib or not. If false, uses file output.
"""
inputFile = open(inputData, "rb")
csvReader = csv.reader(inputFile.read().splitlines())
# skip header rows
csvReader.next()
csvReader.next()
csvReader.next()
csvName = "%s_%s_%s" % (metric, sensor, patientId)
print "running model with model_params '%s'" % csvName
shifter = InferenceShifter()
if plot:
output = nupic_anomaly_output.NuPICPlotOutput(csvName)
else:
if not os.path.exists(MODEL_RESULTS_DIR):
os.makedirs(MODEL_RESULTS_DIR)
output = nupic_anomaly_output.NuPICFileOutput("%s/%s" % (MODEL_RESULTS_DIR,
csvName))
counter = 0
for row in csvReader:
counter += 1
if (counter % 100 == 0):
print "Read %i lines..." % counter
metric_value = float(row[0])
result = model.run({
"metric_value": metric_value
})
if plot:
result = shifter.shift(result)
prediction = result.inferences["multiStepBestPredictions"][0]
anomalyScore = result.inferences["anomalyScore"]
output.write(counter, metric_value, prediction, anomalyScore)
output.close()
inputFile.close()
def runModel(metric, sensor, patientId, plot=False):
"""
Assumes the CSV Name corresponds to both a like-named model_params file in the
model_params directory, and that the data exists in a like-named CSV file in
the current directory.
:param csvName: Important for finding model params and input CSV file
:param plot: Plot in matplotlib? Don't use this unless matplotlib is
installed.
"""
csvName = "%s_%s_%s" % (metric, sensor, patientId)
model = createModel(getModelParamsFromName(csvName))
inputData = "%s/%s.csv" % (CONVERTED_DATA_DIR, csvName)
runIoThroughNupic(inputData, model, metric, sensor, patientId, plot)
if __name__ == "__main__":
for sensor in SENSORS:
for patientId in PATIENT_IDS:
for metric in METRICS:
runModel(metric, sensor, patientId)
| agpl-3.0 |
jsseb/gcode_parser | test.py | 1 | 2801 | #!/usr/bin/python
# -*- coding: utf-8 -*-
'''
Generate Gcode for Lissajous Curve
Future ref will generate Gcode for other curves
Test range : [1,1 + np.pi,0.1]
Test delta : 0
'''
import numpy as np
import argparse
try:
import matplotlib.pyplot as plt
except ImportError:
pass
def lissajous(a,b,rng,delta=None):
X = []
Y = []
if delta == None:
delta = ((b-1)/b) * np.pi/2
N = (rng[1]-rng[0])/rng[2]
for t in np.linspace(rng[0], rng[1], num=N):
#X = a*sin(a*t + delta)
#Y = b*sin(b*t)
X.append(a*np.sin(a*t + delta))
Y.append(b*np.sin(b*t))
curve = [X,Y]
return curve
def squares(a,b,n,d,change):
X = []
Y = []
x = a
y = b
l = d
for i in range(n):
X.append(x)
Y.append(y)
X.append(x+l)
Y.append(y)
X.append(x+l)
Y.append(y-l)
X.append(x)
Y.append(y-l)
X.append(x)
Y.append(y)
x = x+change
y = y-change
l = l-2*change
return [X,Y]
def lines(x,y,n,d,change):
X = []
Y = []
for i in range(n):
X.append(x+2*i)
Y.append(y)
X.append(x+2*i)
Y.append(y+d+i*change)
return [X,Y]
def print_data(curve,filename=None):
n = [[x,y] for x,y in zip(curve[0],curve[1])]
if filename is None:
f = open('lissa.txt','w')
else:
f = open(filename,'w')
n = str(n)
n = n.replace('[','')
n = n.replace(',','')
n = n.replace(']','\n')
n = n.replace('\'','')
f.write("{}".format(n))
f.close()
def plot_data(curve):
plt.plot(curve[0],curve[1],'-')
plt.show()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Prints and Plots the Lissajous Knot')
parser.add_argument('--x',dest='x',required=True)
parser.add_argument('--y',dest='y',required=True)
parser.add_argument('--c',dest='c',required=False)
parser.add_argument('--d',dest='d',required=False)
parser.add_argument('--n',dest='n',required=False)
parser.add_argument('--delta',dest='delta',required=False)
parser.add_argument('--precission',dest='precission',required=False)
parser.add_argument('--o',dest='option',required=True)
parser.add_argument('--plot',dest='plot',required=False)
parser.add_argument('--print',dest='file',required=False)
parser.add_argument('--output',dest='filename',required=False)
args = parser.parse_args()
if args.precission is None:
precission = 0.01
else:
precission = float(args.precission)
if args.x is not None and args.y is not None:
if args.option == 'squares':
points = squares(int(args.x),int(args.y),int(args.n),int(args.d),int(args.c))
if args.option == 'lines':
points = lines(int(args.x),int(args.y),int(args.n),int(args.d),int(args.c))
if args.option == 'lissa':
points = lissajous(int(args.x),int(args.y),[0,2*np.pi, precission],delta = args.delta)
if args.file is not None:
print_data(points,filename=args.filename)
if args.plot is not None:
plot_data(points) | mit |
shikhardb/scikit-learn | examples/cluster/plot_digits_linkage.py | 369 | 2959 | """
=============================================================================
Various Agglomerative Clustering on a 2D embedding of digits
=============================================================================
An illustration of various linkage option for agglomerative clustering on
a 2D embedding of the digits dataset.
The goal of this example is to show intuitively how the metrics behave, and
not to find good clusters for the digits. This is why the example works on a
2D embedding.
What this example shows us is the behavior "rich getting richer" of
agglomerative clustering that tends to create uneven cluster sizes.
This behavior is especially pronounced for the average linkage strategy,
that ends up with a couple of singleton clusters.
"""
# Authors: Gael Varoquaux
# License: BSD 3 clause (C) INRIA 2014
print(__doc__)
from time import time
import numpy as np
from scipy import ndimage
from matplotlib import pyplot as plt
from sklearn import manifold, datasets
digits = datasets.load_digits(n_class=10)
X = digits.data
y = digits.target
n_samples, n_features = X.shape
np.random.seed(0)
def nudge_images(X, y):
# Having a larger dataset shows more clearly the behavior of the
# methods, but we multiply the size of the dataset only by 2, as the
# cost of the hierarchical clustering methods are strongly
# super-linear in n_samples
shift = lambda x: ndimage.shift(x.reshape((8, 8)),
.3 * np.random.normal(size=2),
mode='constant',
).ravel()
X = np.concatenate([X, np.apply_along_axis(shift, 1, X)])
Y = np.concatenate([y, y], axis=0)
return X, Y
X, y = nudge_images(X, y)
#----------------------------------------------------------------------
# Visualize the clustering
def plot_clustering(X_red, X, labels, title=None):
x_min, x_max = np.min(X_red, axis=0), np.max(X_red, axis=0)
X_red = (X_red - x_min) / (x_max - x_min)
plt.figure(figsize=(6, 4))
for i in range(X_red.shape[0]):
plt.text(X_red[i, 0], X_red[i, 1], str(y[i]),
color=plt.cm.spectral(labels[i] / 10.),
fontdict={'weight': 'bold', 'size': 9})
plt.xticks([])
plt.yticks([])
if title is not None:
plt.title(title, size=17)
plt.axis('off')
plt.tight_layout()
#----------------------------------------------------------------------
# 2D embedding of the digits dataset
print("Computing embedding")
X_red = manifold.SpectralEmbedding(n_components=2).fit_transform(X)
print("Done.")
from sklearn.cluster import AgglomerativeClustering
for linkage in ('ward', 'average', 'complete'):
clustering = AgglomerativeClustering(linkage=linkage, n_clusters=10)
t0 = time()
clustering.fit(X_red)
print("%s : %.2fs" % (linkage, time() - t0))
plot_clustering(X_red, X, clustering.labels_, "%s linkage" % linkage)
plt.show()
| bsd-3-clause |
loli/medpy | doc/numpydoc/numpydoc/docscrape_sphinx.py | 1 | 9399 |
import sys, re, inspect, textwrap, pydoc
import sphinx
import collections
from .docscrape import NumpyDocString, FunctionDoc, ClassDoc
if sys.version_info[0] >= 3:
sixu = lambda s: s
else:
sixu = lambda s: str(s, 'unicode_escape')
class SphinxDocString(NumpyDocString):
def __init__(self, docstring, config={}):
NumpyDocString.__init__(self, docstring, config=config)
self.load_config(config)
def load_config(self, config):
self.use_plots = config.get('use_plots', False)
self.class_members_toctree = config.get('class_members_toctree', True)
# string conversion routines
def _str_header(self, name, symbol='`'):
return ['.. rubric:: ' + name, '']
def _str_field_list(self, name):
return [':' + name + ':']
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' '*indent + line]
return out
def _str_signature(self):
return ['']
if self['Signature']:
return ['``%s``' % self['Signature']] + ['']
else:
return ['']
def _str_summary(self):
return self['Summary'] + ['']
def _str_extended_summary(self):
return self['Extended Summary'] + ['']
def _str_returns(self):
out = []
if self['Returns']:
out += self._str_field_list('Returns')
out += ['']
for param, param_type, desc in self['Returns']:
if param_type:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
else:
out += self._str_indent([param.strip()])
if desc:
out += ['']
out += self._str_indent(desc, 8)
out += ['']
return out
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_field_list(name)
out += ['']
for param, param_type, desc in self[name]:
if param_type:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
else:
out += self._str_indent(['**%s**' % param.strip()])
if desc:
out += ['']
out += self._str_indent(desc, 8)
out += ['']
return out
@property
def _obj(self):
if hasattr(self, '_cls'):
return self._cls
elif hasattr(self, '_f'):
return self._f
return None
def _str_member_list(self, name):
"""
Generate a member listing, autosummary:: table where possible,
and a table where not.
"""
out = []
if self[name]:
out += ['.. rubric:: %s' % name, '']
prefix = getattr(self, '_name', '')
if prefix:
prefix = '~%s.' % prefix
autosum = []
others = []
for param, param_type, desc in self[name]:
param = param.strip()
# Check if the referenced member can have a docstring or not
param_obj = getattr(self._obj, param, None)
if not (isinstance(param_obj, collections.Callable)
or isinstance(param_obj, property)
or inspect.isgetsetdescriptor(param_obj)):
param_obj = None
if param_obj and (pydoc.getdoc(param_obj) or not desc):
# Referenced object has a docstring
autosum += [" %s%s" % (prefix, param)]
else:
others.append((param, param_type, desc))
if autosum:
out += ['.. autosummary::']
if self.class_members_toctree:
out += [' :toctree:']
out += [''] + autosum
if others:
maxlen_0 = max(3, max([len(x[0]) for x in others]))
hdr = sixu("=")*maxlen_0 + sixu(" ") + sixu("=")*10
fmt = sixu('%%%ds %%s ') % (maxlen_0,)
out += ['', hdr]
for param, param_type, desc in others:
desc = sixu(" ").join(x.strip() for x in desc).strip()
if param_type:
desc = "(%s) %s" % (param_type, desc)
out += [fmt % (param.strip(), desc)]
out += [hdr]
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += ['']
content = textwrap.dedent("\n".join(self[name])).split("\n")
out += content
out += ['']
return out
def _str_see_also(self, func_role):
out = []
if self['See Also']:
see_also = super(SphinxDocString, self)._str_see_also(func_role)
out = ['.. seealso::', '']
out += self._str_indent(see_also[2:])
return out
def _str_warnings(self):
out = []
if self['Warnings']:
out = ['.. warning::', '']
out += self._str_indent(self['Warnings'])
return out
def _str_index(self):
idx = self['index']
out = []
if len(idx) == 0:
return out
out += ['.. index:: %s' % idx.get('default','')]
for section, references in list(idx.items()):
if section == 'default':
continue
elif section == 'refguide':
out += [' single: %s' % (', '.join(references))]
else:
out += [' %s: %s' % (section, ','.join(references))]
return out
def _str_references(self):
out = []
if self['References']:
out += self._str_header('References')
if isinstance(self['References'], str):
self['References'] = [self['References']]
out.extend(self['References'])
out += ['']
# Latex collects all references to a separate bibliography,
# so we need to insert links to it
if sphinx.__version__ >= "0.6":
out += ['.. only:: latex','']
else:
out += ['.. latexonly::','']
items = []
for line in self['References']:
m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I)
if m:
items.append(m.group(1))
out += [' ' + ", ".join(["[%s]_" % item for item in items]), '']
return out
def _str_examples(self):
examples_str = "\n".join(self['Examples'])
if (self.use_plots and 'import matplotlib' in examples_str
and 'plot::' not in examples_str):
out = []
out += self._str_header('Examples')
out += ['.. plot::', '']
out += self._str_indent(self['Examples'])
out += ['']
return out
else:
return self._str_section('Examples')
def __str__(self, indent=0, func_role="obj"):
out = []
out += self._str_signature()
out += self._str_index() + ['']
out += self._str_summary()
out += self._str_extended_summary()
out += self._str_param_list('Parameters')
out += self._str_returns()
for param_list in ('Other Parameters', 'Raises', 'Warns'):
out += self._str_param_list(param_list)
out += self._str_warnings()
out += self._str_see_also(func_role)
out += self._str_section('Notes')
out += self._str_references()
out += self._str_examples()
for param_list in ('Attributes', 'Methods'):
out += self._str_member_list(param_list)
out = self._str_indent(out,indent)
return '\n'.join(out)
class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
def __init__(self, obj, doc=None, config={}):
self.load_config(config)
FunctionDoc.__init__(self, obj, doc=doc, config=config)
class SphinxClassDoc(SphinxDocString, ClassDoc):
def __init__(self, obj, doc=None, func_doc=None, config={}):
self.load_config(config)
ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config)
class SphinxObjDoc(SphinxDocString):
def __init__(self, obj, doc=None, config={}):
self._f = obj
self.load_config(config)
SphinxDocString.__init__(self, doc, config=config)
def get_doc_object(obj, what=None, doc=None, config={}):
if what is None:
if inspect.isclass(obj):
what = 'class'
elif inspect.ismodule(obj):
what = 'module'
elif isinstance(obj, collections.Callable):
what = 'function'
else:
what = 'object'
if what == 'class':
return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc,
config=config)
elif what in ('function', 'method'):
return SphinxFunctionDoc(obj, doc=doc, config=config)
else:
if doc is None:
doc = pydoc.getdoc(obj)
return SphinxObjDoc(obj, doc, config=config)
| gpl-3.0 |
Habasari/sms-tools | lectures/03-Fourier-properties/plots-code/convolution-2.py | 24 | 1259 | import matplotlib.pyplot as plt
import numpy as np
from scipy.fftpack import fft, fftshift
plt.figure(1, figsize=(9.5, 7))
M = 64
N = 64
x1 = np.hanning(M)
x2 = np.cos(2*np.pi*2/M*np.arange(M))
y1 = x1*x2
mY1 = 20 * np.log10(np.abs(fftshift(fft(y1, N))))
plt.subplot(3,2,1)
plt.title('x1 (hanning)')
plt.plot(np.arange(-M/2, M/2), x1, 'b', lw=1.5)
plt.axis([-M/2,M/2,0,1])
plt.subplot(3,2,2)
plt.title('x2 (cosine)')
plt.plot(np.arange(-M/2, M/2),x2, 'b', lw=1.5)
plt.axis([-M/2,M/2,-1,1])
mX1 = 20 * np.log10(np.abs(fftshift(fft(x1, M)))/M)
plt.subplot(3,2,3)
plt.title('X1')
plt.plot(np.arange(-N/2, N/2),mX1, 'r', lw=1.5)
plt.axis([-N/2,N/2,-80,max(mX1)])
mX2 = 20 * np.log10(np.abs(fftshift(fft(x2, M)))/M)
plt.subplot(3,2,4)
plt.title('X2')
plt.plot(np.arange(-N/2, N/2),mX2, 'r', lw=1.5)
plt.axis([-N/2,N/2,-80,max(mX2)])
plt.subplot(3,2,5)
plt.title('DFT(x1 x x2)')
plt.plot(np.arange(-N/2, N/2),mY1, 'r', lw=1.5)
plt.axis([-N/2,N/2,-80,max(mY1)])
Y2 = np.convolve(fftshift(fft(x1, M)), fftshift(fft(x2, M)))
mY2 = 20 * np.log10(np.abs(Y2)) - 40
plt.subplot(3,2,6)
plt.title('X1 * X2')
plt.plot(np.arange(-N/2, N/2),mY2[M/2:M+M/2], 'r', lw=1.5)
plt.axis([-N/2,N/2,-80,max(mY2)])
plt.tight_layout()
plt.savefig('convolution-2.png')
plt.show()
| agpl-3.0 |
maciejkula/spotlight | examples/bloom_embeddings/performance.py | 1 | 5801 | import os
import pickle
import time
import numpy as np
import torch
from spotlight.layers import BloomEmbedding, ScaledEmbedding
from spotlight.factorization.implicit import ImplicitFactorizationModel
from spotlight.factorization.representations import BilinearNet
from spotlight.sequence.implicit import ImplicitSequenceModel
from spotlight.sequence.representations import LSTMNet
from spotlight.datasets.movielens import get_movielens_dataset
CUDA = torch.cuda.is_available()
EMBEDDING_DIM = 64
N_ITER = 2
NUM_HASH_FUNCTIONS = 4
def time_fitting(model, data, repetitions=2):
timings = []
# Warm-up epoch
model.fit(data)
for _ in range(repetitions):
start_time = time.time()
model.fit(data)
timings.append(time.time() - start_time)
print(min(timings))
return min(timings)
def factorization_model(num_embeddings, bloom):
if bloom:
user_embeddings = BloomEmbedding(num_embeddings, EMBEDDING_DIM,
num_hash_functions=NUM_HASH_FUNCTIONS)
item_embeddings = BloomEmbedding(num_embeddings, EMBEDDING_DIM,
num_hash_functions=NUM_HASH_FUNCTIONS)
else:
user_embeddings = ScaledEmbedding(num_embeddings, EMBEDDING_DIM)
item_embeddings = ScaledEmbedding(num_embeddings, EMBEDDING_DIM)
network = BilinearNet(num_embeddings,
num_embeddings,
user_embedding_layer=user_embeddings,
item_embedding_layer=item_embeddings)
model = ImplicitFactorizationModel(loss='adaptive_hinge',
n_iter=N_ITER,
embedding_dim=EMBEDDING_DIM,
batch_size=2048,
learning_rate=1e-2,
l2=1e-6,
representation=network,
use_cuda=CUDA)
return model
def sequence_model(num_embeddings, bloom):
if bloom:
item_embeddings = BloomEmbedding(num_embeddings, EMBEDDING_DIM,
num_hash_functions=NUM_HASH_FUNCTIONS)
else:
item_embeddings = ScaledEmbedding(num_embeddings, EMBEDDING_DIM)
network = LSTMNet(num_embeddings, EMBEDDING_DIM,
item_embedding_layer=item_embeddings)
model = ImplicitSequenceModel(loss='adaptive_hinge',
n_iter=N_ITER,
batch_size=512,
learning_rate=1e-3,
l2=1e-2,
representation=network,
use_cuda=CUDA)
return model
def get_sequence_data():
dataset = get_movielens_dataset('1M')
max_sequence_length = 200
min_sequence_length = 20
data = dataset.to_sequence(max_sequence_length=max_sequence_length,
min_sequence_length=min_sequence_length,
step_size=max_sequence_length)
print(data.sequences.shape)
return data
def get_factorization_data():
dataset = get_movielens_dataset('1M')
return dataset
def embedding_size_scalability():
sequence_data = get_sequence_data()
factorization_data = get_factorization_data()
embedding_dims = (1e4,
1e4 * 5,
1e5,
1e5 * 5,
1e6,
1e6 * 5)
bloom_sequence = np.array([time_fitting(sequence_model(int(dim), True),
sequence_data)
for dim in embedding_dims])
baseline_sequence = np.array([time_fitting(sequence_model(int(dim), False),
sequence_data)
for dim in embedding_dims])
sequence_ratio = bloom_sequence / baseline_sequence
print('Sequence ratio {}'.format(sequence_ratio))
bloom_factorization = np.array([time_fitting(factorization_model(int(dim), True),
factorization_data)
for dim in embedding_dims])
baseline_factorization = np.array([time_fitting(factorization_model(int(dim), False),
factorization_data)
for dim in embedding_dims])
factorization_ratio = bloom_factorization / baseline_factorization
print('Factorization ratio {}'.format(factorization_ratio))
return np.array(embedding_dims), sequence_ratio, factorization_ratio
def plot(dims, sequence, factorization):
import matplotlib
matplotlib.use('Agg') # NOQA
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style("darkgrid")
plt.ylabel("Speed improvement")
plt.xlabel("Size of embedding layers")
plt.title("Fitting speed (1.0 = no change)")
plt.xscale('log')
plt.plot(dims,
1.0 / sequence,
label='Sequence model')
plt.plot(dims,
1.0 / factorization,
label='Factorization model')
plt.legend(loc='lower right')
plt.savefig('speed.png')
plt.close()
if __name__ == '__main__':
fname = 'performance.pickle'
if not os.path.exists(fname):
dims, sequence, factorization = embedding_size_scalability()
with open(fname, 'wb') as fle:
pickle.dump((dims, sequence, factorization), fle)
with open(fname, 'rb') as fle:
(dims, sequence, factorization) = pickle.load(fle)
plot(dims, sequence, factorization)
| mit |
huiyi1990/RosbagPandas | rosbag_pandas.py | 1 | 8453 | #!/usr/bin/env python
import warnings
import re
import subprocess
import types
import yaml
import pandas as pd
import numpy as np
import rosbag
import rospy
from roslib.message import get_message_class
def bag_to_dataframe(bag_name, include=None, exclude=None, parse_header=False, seconds=False):
'''
Read in a rosbag file and create a pandas data frame that
is indexed by the time the message was recorded in the bag.
:bag_name: String name for the bag file
:include: None, String, or List Topics to include in the dataframe
if None all topics added, if string it is used as regular
expression, if list that list is used.
:exclude: None, String, or List Topics to be removed from those added
using the include option using set difference. If None no topics
removed. If String it is treated as a regular expression. A list
removes those in the list.
:seconds: time index is in seconds
:returns: a pandas dataframe object
'''
# get list of topics to parse
yaml_info = get_bag_info(bag_name)
bag_topics = get_topics(yaml_info)
bag_topics = prune_topics(bag_topics, include, exclude)
length = get_length(bag_topics, yaml_info)
msgs_to_read, msg_type = get_msg_info(yaml_info, bag_topics, parse_header)
bag = rosbag.Bag(bag_name)
dmap = create_data_map(msgs_to_read)
# create datastore
datastore = {}
for topic in dmap.keys():
for f, key in dmap[topic].items():
t = msg_type[topic][f]
if isinstance(t, int) or isinstance(t, float):
arr = np.empty(length)
arr.fill(np.NAN)
else:
arr = np.array([None] * length)
datastore[key] = arr
# create the index
index = np.empty(length)
index.fill(np.NAN)
# all of the data is loaded
idx = 0
for topic, msg, mt in bag.read_messages(topics=bag_topics):
try:
if seconds:
index[idx] = msg.header.stamp.to_sec()
else:
index[idx] = msg.header.stamp.to_nsec()
except:
if seconds:
index[idx] = mt.to_sec()
else:
index[idx] = mt.to_nsec()
fields = dmap[topic]
for f, key in fields.items():
try:
d = get_message_data(msg, f)
datastore[key][idx] = d
except:
pass
idx = idx + 1
bag.close()
# convert the index
if not seconds:
index = pd.to_datetime(index, unit='ns')
# now we have read all of the messages its time to assemble the dataframe
return pd.DataFrame(data=datastore, index=index)
def get_length(topics, yaml_info):
'''
Find the length (# of rows) in the created dataframe
'''
total = 0
info = yaml_info['topics']
for topic in topics:
for t in info:
if t['topic'] == topic:
total = total + t['messages']
break
return total
def create_data_map(msgs_to_read):
'''
Create a data map for usage when parsing the bag
'''
dmap = {}
for topic in msgs_to_read.keys():
base_name = get_key_name(topic) + '__'
fields = {}
for f in msgs_to_read[topic]:
key = (base_name + f).replace('.', '_')
fields[f] = key
dmap[topic] = fields
return dmap
def prune_topics(bag_topics, include, exclude):
'''prune the topics. If include is None add all to the set of topics to
use if include is a string regex match that string,
if it is a list use the list
If exclude is None do nothing, if string remove the topics with regex,
if it is a list remove those topics'''
topics_to_use = set()
# add all of the topics
if include is None:
for t in bag_topics:
topics_to_use.add(t)
elif isinstance(include, basestring):
check = re.compile(include)
for t in bag_topics:
if re.match(check, t) is not None:
topics_to_use.add(t)
else:
try:
# add all of the includes if it is in the topic
for topic in include:
if topic in bag_topics:
topics_to_use.add(topic)
except:
warnings.warn('Error in topic selection Using All!')
topics_to_use = set()
for t in bag_topics:
topics_to_use.add(t)
to_remove = set()
# now exclude the exclusions
if exclude is None:
pass
elif isinstance(exclude, basestring):
check = re.compile(exclude)
for t in list(topics_to_use):
if re.match(check, t) is not None:
to_remove.add(t)
else:
for remove in exclude:
if remove in exclude:
to_remove.add(remove)
# final set stuff to get topics to use
topics_to_use = topics_to_use - to_remove
# return a list for the results
return list(topics_to_use)
def get_msg_info(yaml_info, topics, parse_header=True):
'''
Get info from all of the messages about what they contain
and will be added to the dataframe
'''
topic_info = yaml_info['topics']
msgs = {}
classes = {}
for topic in topics:
base_key = get_key_name(topic)
msg_paths = []
msg_types = {}
for info in topic_info:
if info['topic'] == topic:
msg_class = get_message_class(info['type'])
if msg_class is None:
warnings.warn(
'Could not find types for ' + topic + ' skpping ')
else:
(msg_paths, msg_types) = get_base_fields(msg_class(), "",
parse_header)
msgs[topic] = msg_paths
classes[topic] = msg_types
return (msgs, classes)
def get_bag_info(bag_file):
'''Get uamle dict of the bag information
by calling the subprocess -- used to create correct sized
arrays'''
# Get the info on the bag
bag_info = yaml.load(subprocess.Popen(
['rosbag', 'info', '--yaml', bag_file],
stdout=subprocess.PIPE).communicate()[0])
return bag_info
def get_topics(yaml_info):
''' Returns the names of all of the topics in the bag, and prints them
to stdout if requested
'''
# Pull out the topic info
names = []
# Store all of the topics in a dictionary
topics = yaml_info['topics']
for topic in topics:
names.append(topic['topic'])
return names
def get_base_fields(msg, prefix='', parse_header=True):
'''function to get the full names of every message field in the message'''
slots = msg.__slots__
ret_val = []
msg_types = dict()
for i in slots:
slot_msg = getattr(msg, i)
if not parse_header and i == 'header':
continue
if hasattr(slot_msg, '__slots__'):
(subs, type_map) = get_base_fields(
slot_msg, prefix=prefix + i + '.',
parse_header=parse_header,
)
for i in subs:
ret_val.append(i)
for k, v in type_map.items():
msg_types[k] = v
else:
ret_val.append(prefix + i)
msg_types[prefix + i] = slot_msg
return (ret_val, msg_types)
def get_message_data(msg, key):
'''get the datapoint from the dot delimited message field key
e.g. translation.x looks up translation than x and returns the value found
in x'''
data = msg
paths = key.split('.')
for i in paths:
data = getattr(data, i)
return data
def get_key_name(name):
'''fix up topic to key names to make them a little prettier'''
if name[0] == '/':
name = name[1:]
name = name.replace('/', '.')
return name
def clean_for_export(df):
new_df = pd.DataFrame()
for c, t in df.dtypes.iteritems():
if t.kind in 'OSUV':
s = df[c].dropna().apply(func=str)
s = s.str.replace('\n', '')
s = s.str.replace('\r', '')
s = s.str.replace(',','\t')
new_df[c] = s
else:
new_df[c] = df[c]
return new_df
if __name__ == '__main__':
print 'hello'
| apache-2.0 |
jlandmann/oggm | oggm/workflow.py | 2 | 6116 | """Wrappers for the single tasks, multi processor handling."""
from __future__ import division
# Built ins
import logging
import os
from shutil import rmtree
import collections
# External libs
import pandas as pd
import multiprocessing as mp
# Locals
import oggm
from oggm import cfg, tasks, utils
# MPI
try:
import oggm.mpi as ogmpi
_have_ogmpi = True
except ImportError:
_have_ogmpi = False
# Module logger
log = logging.getLogger(__name__)
# Multiprocessing Pool
_mp_pool = None
def _init_pool_globals(_cfg_contents, global_lock):
cfg.unpack_config(_cfg_contents)
utils.lock = global_lock
def init_mp_pool(reset=False):
"""Necessary because at import time, cfg might be uninitialized"""
global _mp_pool
if _mp_pool and not reset:
return _mp_pool
cfg_contents = cfg.pack_config()
global_lock = mp.Manager().Lock()
mpp = cfg.PARAMS['mp_processes']
if mpp == -1:
try:
mpp = int(os.environ['SLURM_JOB_CPUS_PER_NODE'])
log.info('Multiprocessing: using slurm allocated '
'processors (N={})'.format(mpp))
except KeyError:
mpp = mp.cpu_count()
log.info('Multiprocessing: using all available '
'processors (N={})'.format(mpp))
else:
log.info('Multiprocessing: using the requested number of '
'processors (N={})'.format(mpp))
_mp_pool = mp.Pool(mpp, initializer=_init_pool_globals,
initargs=(cfg_contents, global_lock))
return _mp_pool
def _merge_dicts(*dicts):
r = {}
for d in dicts:
r.update(d)
return r
class _pickle_copier(object):
"""Pickleable alternative to functools.partial,
Which is not pickleable in python2 and thus doesn't work
with Multiprocessing."""
def __init__(self, func, kwargs):
self.call_func = func
self.out_kwargs = kwargs
def __call__(self, gdir):
try:
if isinstance(gdir, collections.Sequence):
gdir, gdir_kwargs = gdir
gdir_kwargs = _merge_dicts(self.out_kwargs, gdir_kwargs)
return self.call_func(gdir, **gdir_kwargs)
else:
return self.call_func(gdir, **self.out_kwargs)
except Exception as e:
try:
err_msg = '{0}: exception occured while processing task ' \
'{1}'.format(gdir.rgi_id, self.call_func.__name__)
raise RuntimeError(err_msg) from e
except AttributeError:
pass
raise
def reset_multiprocessing():
"""Reset multiprocessing state
Call this if you changed configuration parameters mid-run and need them to
be re-propagated to child processes.
"""
global _mp_pool
if _mp_pool:
_mp_pool.terminate()
_mp_pool = None
def execute_entity_task(task, gdirs, **kwargs):
"""Execute a task on gdirs.
If you asked for multiprocessing, it will do it.
Parameters
----------
task : function
the entity task to apply
gdirs : list
the list of oggm.GlacierDirectory to process.
Optionally, each list element can be a tuple, with the first element
being the ``oggm.GlacierDirectory``, and the second element a dict that
will be passed to the task function as ``**kwargs``.
"""
if task.__dict__.get('global_task', False):
return task(gdirs, **kwargs)
pc = _pickle_copier(task, kwargs)
if _have_ogmpi:
if ogmpi.OGGM_MPI_COMM is not None:
ogmpi.mpi_master_spin_tasks(pc, gdirs)
return
if cfg.PARAMS['use_multiprocessing']:
mppool = init_mp_pool()
mppool.map(pc, gdirs, chunksize=1)
else:
for gdir in gdirs:
pc(gdir)
def init_glacier_regions(rgidf, reset=False, force=False):
"""Very first task to do (always).
Set reset=True in order to delete the content of the directories.
"""
if reset and not force:
reset = utils.query_yes_no('Delete all glacier directories?')
# if reset delete also the log directory
if reset:
fpath = os.path.join(cfg.PATHS['working_dir'], 'log')
if os.path.exists(fpath):
rmtree(fpath)
gdirs = []
new_gdirs = []
for _, entity in rgidf.iterrows():
gdir = oggm.GlacierDirectory(entity, reset=reset)
if not os.path.exists(gdir.get_filepath('dem')):
new_gdirs.append((gdir, dict(entity=entity)))
gdirs.append(gdir)
execute_entity_task(tasks.define_glacier_region, new_gdirs)
return gdirs
def gis_prepro_tasks(gdirs):
"""Helper function: run all flowlines tasks."""
task_list = [
tasks.glacier_masks,
tasks.compute_centerlines,
tasks.compute_downstream_lines,
tasks.initialize_flowlines,
tasks.compute_downstream_bedshape,
tasks.catchment_area,
tasks.catchment_intersections,
tasks.catchment_width_geom,
tasks.catchment_width_correction
]
for task in task_list:
execute_entity_task(task, gdirs)
def climate_tasks(gdirs):
"""Helper function: run all climate tasks."""
# I don't know where this logic is best placed...
if ('climate_file' in cfg.PATHS) and \
os.path.exists(cfg.PATHS['climate_file']):
_process_task = tasks.process_custom_climate_data
else:
# OK, so use the default CRU "high-resolution" method
_process_task = tasks.process_cru_data
execute_entity_task(_process_task, gdirs)
# Then, only global tasks
tasks.compute_ref_t_stars(gdirs)
tasks.distribute_t_stars(gdirs)
def inversion_tasks(gdirs):
"""Helper function: run all bed inversion tasks."""
# Init
execute_entity_task(tasks.prepare_for_inversion, gdirs)
# Global task
tasks.optimize_inversion_params(gdirs)
# Inversion for all glaciers
execute_entity_task(tasks.volume_inversion, gdirs)
# Filter
execute_entity_task(tasks.filter_inversion_output, gdirs)
| gpl-3.0 |
dwettstein/pattern-recognition-2016 | mlp/model_selection/exceptions.py | 35 | 4329 | """
The :mod:`sklearn.exceptions` module includes all custom warnings and error
classes used across scikit-learn.
"""
__all__ = ['NotFittedError',
'ChangedBehaviorWarning',
'ConvergenceWarning',
'DataConversionWarning',
'DataDimensionalityWarning',
'EfficiencyWarning',
'FitFailedWarning',
'NonBLASDotWarning',
'UndefinedMetricWarning']
class NotFittedError(ValueError, AttributeError):
"""Exception class to raise if estimator is used before fitting.
This class inherits from both ValueError and AttributeError to help with
exception handling and backward compatibility.
Examples
--------
>>> from sklearn.svm import LinearSVC
>>> from sklearn.exceptions import NotFittedError
>>> try:
... LinearSVC().predict([[1, 2], [2, 3], [3, 4]])
... except NotFittedError as e:
... print(repr(e))
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
NotFittedError('This LinearSVC instance is not fitted yet',)
"""
class ChangedBehaviorWarning(UserWarning):
"""Warning class used to notify the user of any change in the behavior."""
class ConvergenceWarning(UserWarning):
"""Custom warning to capture convergence problems"""
class DataConversionWarning(UserWarning):
"""Warning used to notify implicit data conversions happening in the code.
This warning occurs when some input data needs to be converted or
interpreted in a way that may not match the user's expectations.
For example, this warning may occur when the user
- passes an integer array to a function which expects float input and
will convert the input
- requests a non-copying operation, but a copy is required to meet the
implementation's data-type expectations;
- passes an input whose shape can be interpreted ambiguously.
"""
class DataDimensionalityWarning(UserWarning):
"""Custom warning to notify potential issues with data dimensionality.
For example, in random projection, this warning is raised when the
number of components, which quantifies the dimensionality of the target
projection space, is higher than the number of features, which quantifies
the dimensionality of the original source space, to imply that the
dimensionality of the problem will not be reduced.
"""
class EfficiencyWarning(UserWarning):
"""Warning used to notify the user of inefficient computation.
This warning notifies the user that the efficiency may not be optimal due
to some reason which may be included as a part of the warning message.
This may be subclassed into a more specific Warning class.
"""
class FitFailedWarning(RuntimeWarning):
"""Warning class used if there is an error while fitting the estimator.
This Warning is used in meta estimators GridSearchCV and RandomizedSearchCV
and the cross-validation helper function cross_val_score to warn when there
is an error while fitting the estimator.
Examples
--------
>>> from sklearn.model_selection import GridSearchCV
>>> from sklearn.svm import LinearSVC
>>> from sklearn.exceptions import FitFailedWarning
>>> import warnings
>>> warnings.simplefilter('always', FitFailedWarning)
>>> gs = GridSearchCV(LinearSVC(), {'C': [-1, -2]}, error_score=0)
>>> X, y = [[1, 2], [3, 4], [5, 6], [7, 8], [8, 9]], [0, 0, 0, 1, 1]
>>> with warnings.catch_warnings(record=True) as w:
... try:
... gs.fit(X, y) # This will raise a ValueError since C is < 0
... except ValueError:
... pass
... print(repr(w[-1].message))
... # doctest: +NORMALIZE_WHITESPACE
FitFailedWarning("Classifier fit failed. The score on this train-test
partition for these parameters will be set to 0.000000. Details:
\\nValueError('Penalty term must be positive; got (C=-2)',)",)
"""
class NonBLASDotWarning(EfficiencyWarning):
"""Warning used when the dot operation does not use BLAS.
This warning is used to notify the user that BLAS was not used for dot
operation and hence the efficiency may be affected.
"""
class UndefinedMetricWarning(UserWarning):
"""Warning used when the metric is invalid"""
| mit |
numenta/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/offsetbox.py | 69 | 17728 | """
The OffsetBox is a simple container artist. The child artist are meant
to be drawn at a relative position to its parent. The [VH]Packer,
DrawingArea and TextArea are derived from the OffsetBox.
The [VH]Packer automatically adjust the relative postisions of their
children, which should be instances of the OffsetBox. This is used to
align similar artists together, e.g., in legend.
The DrawingArea can contain any Artist as a child. The
DrawingArea has a fixed width and height. The position of children
relative to the parent is fixed. The TextArea is contains a single
Text instance. The width and height of the TextArea instance is the
width and height of the its child text.
"""
import matplotlib.transforms as mtransforms
import matplotlib.artist as martist
import matplotlib.text as mtext
import numpy as np
from matplotlib.patches import bbox_artist as mbbox_artist
DEBUG=False
# for debuging use
def bbox_artist(*args, **kwargs):
if DEBUG:
mbbox_artist(*args, **kwargs)
# _get_packed_offsets() and _get_aligned_offsets() are coded assuming
# that we are packing boxes horizontally. But same function will be
# used with vertical packing.
def _get_packed_offsets(wd_list, total, sep, mode="fixed"):
"""
Geiven a list of (width, xdescent) of each boxes, calculate the
total width and the x-offset positions of each items according to
*mode*. xdescent is analagous to the usual descent, but along the
x-direction. xdescent values are currently ignored.
*wd_list* : list of (width, xdescent) of boxes to be packed.
*sep* : spacing between boxes
*total* : Intended total length. None if not used.
*mode* : packing mode. 'fixed', 'expand', or 'equal'.
"""
w_list, d_list = zip(*wd_list)
# d_list is currently not used.
if mode == "fixed":
offsets_ = np.add.accumulate([0]+[w + sep for w in w_list])
offsets = offsets_[:-1]
if total is None:
total = offsets_[-1] - sep
return total, offsets
elif mode == "expand":
sep = (total - sum(w_list))/(len(w_list)-1.)
offsets_ = np.add.accumulate([0]+[w + sep for w in w_list])
offsets = offsets_[:-1]
return total, offsets
elif mode == "equal":
maxh = max(w_list)
if total is None:
total = (maxh+sep)*len(w_list)
else:
sep = float(total)/(len(w_list)) - maxh
offsets = np.array([(maxh+sep)*i for i in range(len(w_list))])
return total, offsets
else:
raise ValueError("Unknown mode : %s" % (mode,))
def _get_aligned_offsets(hd_list, height, align="baseline"):
"""
Geiven a list of (height, descent) of each boxes, align the boxes
with *align* and calculate the y-offsets of each boxes.
total width and the offset positions of each items according to
*mode*. xdescent is analagous to the usual descent, but along the
x-direction. xdescent values are currently ignored.
*hd_list* : list of (width, xdescent) of boxes to be aligned.
*sep* : spacing between boxes
*height* : Intended total length. None if not used.
*align* : align mode. 'baseline', 'top', 'bottom', or 'center'.
"""
if height is None:
height = max([h for h, d in hd_list])
if align == "baseline":
height_descent = max([h-d for h, d in hd_list])
descent = max([d for h, d in hd_list])
height = height_descent + descent
offsets = [0. for h, d in hd_list]
elif align in ["left","top"]:
descent=0.
offsets = [d for h, d in hd_list]
elif align in ["right","bottom"]:
descent=0.
offsets = [height-h+d for h, d in hd_list]
elif align == "center":
descent=0.
offsets = [(height-h)*.5+d for h, d in hd_list]
else:
raise ValueError("Unknown Align mode : %s" % (align,))
return height, descent, offsets
class OffsetBox(martist.Artist):
"""
The OffsetBox is a simple container artist. The child artist are meant
to be drawn at a relative position to its parent.
"""
def __init__(self, *args, **kwargs):
super(OffsetBox, self).__init__(*args, **kwargs)
self._children = []
self._offset = (0, 0)
def set_figure(self, fig):
"""
Set the figure
accepts a class:`~matplotlib.figure.Figure` instance
"""
martist.Artist.set_figure(self, fig)
for c in self.get_children():
c.set_figure(fig)
def set_offset(self, xy):
"""
Set the offset
accepts x, y, tuple, or a callable object.
"""
self._offset = xy
def get_offset(self, width, height, xdescent, ydescent):
"""
Get the offset
accepts extent of the box
"""
if callable(self._offset):
return self._offset(width, height, xdescent, ydescent)
else:
return self._offset
def set_width(self, width):
"""
Set the width
accepts float
"""
self.width = width
def set_height(self, height):
"""
Set the height
accepts float
"""
self.height = height
def get_children(self):
"""
Return a list of artists it contains.
"""
return self._children
def get_extent_offsets(self, renderer):
raise Exception("")
def get_extent(self, renderer):
"""
Return with, height, xdescent, ydescent of box
"""
w, h, xd, yd, offsets = self.get_extent_offsets(renderer)
return w, h, xd, yd
def get_window_extent(self, renderer):
'''
get the bounding box in display space.
'''
w, h, xd, yd, offsets = self.get_extent_offsets(renderer)
px, py = self.get_offset(w, h, xd, yd)
return mtransforms.Bbox.from_bounds(px-xd, py-yd, w, h)
def draw(self, renderer):
"""
Update the location of children if necessary and draw them
to the given *renderer*.
"""
width, height, xdescent, ydescent, offsets = self.get_extent_offsets(renderer)
px, py = self.get_offset(width, height, xdescent, ydescent)
for c, (ox, oy) in zip(self.get_children(), offsets):
c.set_offset((px+ox, py+oy))
c.draw(renderer)
bbox_artist(self, renderer, fill=False, props=dict(pad=0.))
class PackerBase(OffsetBox):
def __init__(self, pad=None, sep=None, width=None, height=None,
align=None, mode=None,
children=None):
"""
*pad* : boundary pad
*sep* : spacing between items
*width*, *height* : width and height of the container box.
calculated if None.
*align* : alignment of boxes
*mode* : packing mode
"""
super(PackerBase, self).__init__()
self.height = height
self.width = width
self.sep = sep
self.pad = pad
self.mode = mode
self.align = align
self._children = children
class VPacker(PackerBase):
"""
The VPacker has its children packed vertically. It automatically
adjust the relative postisions of children in the drawing time.
"""
def __init__(self, pad=None, sep=None, width=None, height=None,
align="baseline", mode="fixed",
children=None):
"""
*pad* : boundary pad
*sep* : spacing between items
*width*, *height* : width and height of the container box.
calculated if None.
*align* : alignment of boxes
*mode* : packing mode
"""
super(VPacker, self).__init__(pad, sep, width, height,
align, mode,
children)
def get_extent_offsets(self, renderer):
"""
update offset of childrens and return the extents of the box
"""
whd_list = [c.get_extent(renderer) for c in self.get_children()]
whd_list = [(w, h, xd, (h-yd)) for w, h, xd, yd in whd_list]
wd_list = [(w, xd) for w, h, xd, yd in whd_list]
width, xdescent, xoffsets = _get_aligned_offsets(wd_list,
self.width,
self.align)
pack_list = [(h, yd) for w,h,xd,yd in whd_list]
height, yoffsets_ = _get_packed_offsets(pack_list, self.height,
self.sep, self.mode)
yoffsets = yoffsets_ + [yd for w,h,xd,yd in whd_list]
ydescent = height - yoffsets[0]
yoffsets = height - yoffsets
#w, h, xd, h_yd = whd_list[-1]
yoffsets = yoffsets - ydescent
return width + 2*self.pad, height + 2*self.pad, \
xdescent+self.pad, ydescent+self.pad, \
zip(xoffsets, yoffsets)
class HPacker(PackerBase):
"""
The HPacker has its children packed horizontally. It automatically
adjust the relative postisions of children in the drawing time.
"""
def __init__(self, pad=None, sep=None, width=None, height=None,
align="baseline", mode="fixed",
children=None):
"""
*pad* : boundary pad
*sep* : spacing between items
*width*, *height* : width and height of the container box.
calculated if None.
*align* : alignment of boxes
*mode* : packing mode
"""
super(HPacker, self).__init__(pad, sep, width, height,
align, mode, children)
def get_extent_offsets(self, renderer):
"""
update offset of childrens and return the extents of the box
"""
whd_list = [c.get_extent(renderer) for c in self.get_children()]
if self.height is None:
height_descent = max([h-yd for w,h,xd,yd in whd_list])
ydescent = max([yd for w,h,xd,yd in whd_list])
height = height_descent + ydescent
else:
height = self.height - 2*self._pad # width w/o pad
hd_list = [(h, yd) for w, h, xd, yd in whd_list]
height, ydescent, yoffsets = _get_aligned_offsets(hd_list,
self.height,
self.align)
pack_list = [(w, xd) for w,h,xd,yd in whd_list]
width, xoffsets_ = _get_packed_offsets(pack_list, self.width,
self.sep, self.mode)
xoffsets = xoffsets_ + [xd for w,h,xd,yd in whd_list]
xdescent=whd_list[0][2]
xoffsets = xoffsets - xdescent
return width + 2*self.pad, height + 2*self.pad, \
xdescent + self.pad, ydescent + self.pad, \
zip(xoffsets, yoffsets)
class DrawingArea(OffsetBox):
"""
The DrawingArea can contain any Artist as a child. The DrawingArea
has a fixed width and height. The position of children relative to
the parent is fixed.
"""
def __init__(self, width, height, xdescent=0.,
ydescent=0., clip=True):
"""
*width*, *height* : width and height of the container box.
*xdescent*, *ydescent* : descent of the box in x- and y-direction.
"""
super(DrawingArea, self).__init__()
self.width = width
self.height = height
self.xdescent = xdescent
self.ydescent = ydescent
self.offset_transform = mtransforms.Affine2D()
self.offset_transform.clear()
self.offset_transform.translate(0, 0)
def get_transform(self):
"""
Return the :class:`~matplotlib.transforms.Transform` applied
to the children
"""
return self.offset_transform
def set_transform(self, t):
"""
set_transform is ignored.
"""
pass
def set_offset(self, xy):
"""
set offset of the container.
Accept : tuple of x,y cooridnate in disokay units.
"""
self._offset = xy
self.offset_transform.clear()
self.offset_transform.translate(xy[0], xy[1])
def get_offset(self):
"""
return offset of the container.
"""
return self._offset
def get_window_extent(self, renderer):
'''
get the bounding box in display space.
'''
w, h, xd, yd = self.get_extent(renderer)
ox, oy = self.get_offset() #w, h, xd, yd)
return mtransforms.Bbox.from_bounds(ox-xd, oy-yd, w, h)
def get_extent(self, renderer):
"""
Return with, height, xdescent, ydescent of box
"""
return self.width, self.height, self.xdescent, self.ydescent
def add_artist(self, a):
'Add any :class:`~matplotlib.artist.Artist` to the container box'
self._children.append(a)
a.set_transform(self.get_transform())
def draw(self, renderer):
"""
Draw the children
"""
for c in self._children:
c.draw(renderer)
bbox_artist(self, renderer, fill=False, props=dict(pad=0.))
class TextArea(OffsetBox):
"""
The TextArea is contains a single Text instance. The text is
placed at (0,0) with baseline+left alignment. The width and height
of the TextArea instance is the width and height of the its child
text.
"""
def __init__(self, s,
textprops=None,
multilinebaseline=None,
minimumdescent=True,
):
"""
*s* : a string to be displayed.
*textprops* : property dictionary for the text
*multilinebaseline* : If True, baseline for multiline text is
adjusted so that it is (approximatedly)
center-aligned with singleline text.
*minimumdescent* : If True, the box has a minimum descent of "p".
"""
if textprops is None:
textprops = {}
if not textprops.has_key("va"):
textprops["va"]="baseline"
self._text = mtext.Text(0, 0, s, **textprops)
OffsetBox.__init__(self)
self._children = [self._text]
self.offset_transform = mtransforms.Affine2D()
self.offset_transform.clear()
self.offset_transform.translate(0, 0)
self._baseline_transform = mtransforms.Affine2D()
self._text.set_transform(self.offset_transform+self._baseline_transform)
self._multilinebaseline = multilinebaseline
self._minimumdescent = minimumdescent
def set_multilinebaseline(self, t):
"""
Set multilinebaseline .
If True, baseline for multiline text is
adjusted so that it is (approximatedly) center-aligned with
singleline text.
"""
self._multilinebaseline = t
def get_multilinebaseline(self):
"""
get multilinebaseline .
"""
return self._multilinebaseline
def set_minimumdescent(self, t):
"""
Set minimumdescent .
If True, extent of the single line text is adjusted so that
it has minimum descent of "p"
"""
self._minimumdescent = t
def get_minimumdescent(self):
"""
get minimumdescent.
"""
return self._minimumdescent
def set_transform(self, t):
"""
set_transform is ignored.
"""
pass
def set_offset(self, xy):
"""
set offset of the container.
Accept : tuple of x,y cooridnate in disokay units.
"""
self._offset = xy
self.offset_transform.clear()
self.offset_transform.translate(xy[0], xy[1])
def get_offset(self):
"""
return offset of the container.
"""
return self._offset
def get_window_extent(self, renderer):
'''
get the bounding box in display space.
'''
w, h, xd, yd = self.get_extent(renderer)
ox, oy = self.get_offset() #w, h, xd, yd)
return mtransforms.Bbox.from_bounds(ox-xd, oy-yd, w, h)
def get_extent(self, renderer):
clean_line, ismath = self._text.is_math_text(self._text._text)
_, h_, d_ = renderer.get_text_width_height_descent(
"lp", self._text._fontproperties, ismath=False)
bbox, info = self._text._get_layout(renderer)
w, h = bbox.width, bbox.height
line = info[0][0] # first line
_, hh, dd = renderer.get_text_width_height_descent(
clean_line, self._text._fontproperties, ismath=ismath)
self._baseline_transform.clear()
if len(info) > 1 and self._multilinebaseline: # multi line
d = h-(hh-dd) # the baseline of the first line
d_new = 0.5 * h - 0.5 * (h_ - d_)
self._baseline_transform.translate(0, d - d_new)
d = d_new
else: # single line
h_d = max(h_ - d_, h-dd)
if self.get_minimumdescent():
## to have a minimum descent, #i.e., "l" and "p" have same
## descents.
d = max(dd, d_)
else:
d = dd
h = h_d + d
return w, h, 0., d
def draw(self, renderer):
"""
Draw the children
"""
self._text.draw(renderer)
bbox_artist(self, renderer, fill=False, props=dict(pad=0.))
| agpl-3.0 |
hsuantien/scikit-learn | examples/applications/svm_gui.py | 287 | 11161 | """
==========
Libsvm GUI
==========
A simple graphical frontend for Libsvm mainly intended for didactic
purposes. You can create data points by point and click and visualize
the decision region induced by different kernels and parameter settings.
To create positive examples click the left mouse button; to create
negative examples click the right button.
If all examples are from the same class, it uses a one-class SVM.
"""
from __future__ import division, print_function
print(__doc__)
# Author: Peter Prettenhoer <[email protected]>
#
# License: BSD 3 clause
import matplotlib
matplotlib.use('TkAgg')
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.backends.backend_tkagg import NavigationToolbar2TkAgg
from matplotlib.figure import Figure
from matplotlib.contour import ContourSet
import Tkinter as Tk
import sys
import numpy as np
from sklearn import svm
from sklearn.datasets import dump_svmlight_file
from sklearn.externals.six.moves import xrange
y_min, y_max = -50, 50
x_min, x_max = -50, 50
class Model(object):
"""The Model which hold the data. It implements the
observable in the observer pattern and notifies the
registered observers on change event.
"""
def __init__(self):
self.observers = []
self.surface = None
self.data = []
self.cls = None
self.surface_type = 0
def changed(self, event):
"""Notify the observers. """
for observer in self.observers:
observer.update(event, self)
def add_observer(self, observer):
"""Register an observer. """
self.observers.append(observer)
def set_surface(self, surface):
self.surface = surface
def dump_svmlight_file(self, file):
data = np.array(self.data)
X = data[:, 0:2]
y = data[:, 2]
dump_svmlight_file(X, y, file)
class Controller(object):
def __init__(self, model):
self.model = model
self.kernel = Tk.IntVar()
self.surface_type = Tk.IntVar()
# Whether or not a model has been fitted
self.fitted = False
def fit(self):
print("fit the model")
train = np.array(self.model.data)
X = train[:, 0:2]
y = train[:, 2]
C = float(self.complexity.get())
gamma = float(self.gamma.get())
coef0 = float(self.coef0.get())
degree = int(self.degree.get())
kernel_map = {0: "linear", 1: "rbf", 2: "poly"}
if len(np.unique(y)) == 1:
clf = svm.OneClassSVM(kernel=kernel_map[self.kernel.get()],
gamma=gamma, coef0=coef0, degree=degree)
clf.fit(X)
else:
clf = svm.SVC(kernel=kernel_map[self.kernel.get()], C=C,
gamma=gamma, coef0=coef0, degree=degree)
clf.fit(X, y)
if hasattr(clf, 'score'):
print("Accuracy:", clf.score(X, y) * 100)
X1, X2, Z = self.decision_surface(clf)
self.model.clf = clf
self.model.set_surface((X1, X2, Z))
self.model.surface_type = self.surface_type.get()
self.fitted = True
self.model.changed("surface")
def decision_surface(self, cls):
delta = 1
x = np.arange(x_min, x_max + delta, delta)
y = np.arange(y_min, y_max + delta, delta)
X1, X2 = np.meshgrid(x, y)
Z = cls.decision_function(np.c_[X1.ravel(), X2.ravel()])
Z = Z.reshape(X1.shape)
return X1, X2, Z
def clear_data(self):
self.model.data = []
self.fitted = False
self.model.changed("clear")
def add_example(self, x, y, label):
self.model.data.append((x, y, label))
self.model.changed("example_added")
# update decision surface if already fitted.
self.refit()
def refit(self):
"""Refit the model if already fitted. """
if self.fitted:
self.fit()
class View(object):
"""Test docstring. """
def __init__(self, root, controller):
f = Figure()
ax = f.add_subplot(111)
ax.set_xticks([])
ax.set_yticks([])
ax.set_xlim((x_min, x_max))
ax.set_ylim((y_min, y_max))
canvas = FigureCanvasTkAgg(f, master=root)
canvas.show()
canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
canvas._tkcanvas.pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
canvas.mpl_connect('button_press_event', self.onclick)
toolbar = NavigationToolbar2TkAgg(canvas, root)
toolbar.update()
self.controllbar = ControllBar(root, controller)
self.f = f
self.ax = ax
self.canvas = canvas
self.controller = controller
self.contours = []
self.c_labels = None
self.plot_kernels()
def plot_kernels(self):
self.ax.text(-50, -60, "Linear: $u^T v$")
self.ax.text(-20, -60, "RBF: $\exp (-\gamma \| u-v \|^2)$")
self.ax.text(10, -60, "Poly: $(\gamma \, u^T v + r)^d$")
def onclick(self, event):
if event.xdata and event.ydata:
if event.button == 1:
self.controller.add_example(event.xdata, event.ydata, 1)
elif event.button == 3:
self.controller.add_example(event.xdata, event.ydata, -1)
def update_example(self, model, idx):
x, y, l = model.data[idx]
if l == 1:
color = 'w'
elif l == -1:
color = 'k'
self.ax.plot([x], [y], "%so" % color, scalex=0.0, scaley=0.0)
def update(self, event, model):
if event == "examples_loaded":
for i in xrange(len(model.data)):
self.update_example(model, i)
if event == "example_added":
self.update_example(model, -1)
if event == "clear":
self.ax.clear()
self.ax.set_xticks([])
self.ax.set_yticks([])
self.contours = []
self.c_labels = None
self.plot_kernels()
if event == "surface":
self.remove_surface()
self.plot_support_vectors(model.clf.support_vectors_)
self.plot_decision_surface(model.surface, model.surface_type)
self.canvas.draw()
def remove_surface(self):
"""Remove old decision surface."""
if len(self.contours) > 0:
for contour in self.contours:
if isinstance(contour, ContourSet):
for lineset in contour.collections:
lineset.remove()
else:
contour.remove()
self.contours = []
def plot_support_vectors(self, support_vectors):
"""Plot the support vectors by placing circles over the
corresponding data points and adds the circle collection
to the contours list."""
cs = self.ax.scatter(support_vectors[:, 0], support_vectors[:, 1],
s=80, edgecolors="k", facecolors="none")
self.contours.append(cs)
def plot_decision_surface(self, surface, type):
X1, X2, Z = surface
if type == 0:
levels = [-1.0, 0.0, 1.0]
linestyles = ['dashed', 'solid', 'dashed']
colors = 'k'
self.contours.append(self.ax.contour(X1, X2, Z, levels,
colors=colors,
linestyles=linestyles))
elif type == 1:
self.contours.append(self.ax.contourf(X1, X2, Z, 10,
cmap=matplotlib.cm.bone,
origin='lower', alpha=0.85))
self.contours.append(self.ax.contour(X1, X2, Z, [0.0], colors='k',
linestyles=['solid']))
else:
raise ValueError("surface type unknown")
class ControllBar(object):
def __init__(self, root, controller):
fm = Tk.Frame(root)
kernel_group = Tk.Frame(fm)
Tk.Radiobutton(kernel_group, text="Linear", variable=controller.kernel,
value=0, command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(kernel_group, text="RBF", variable=controller.kernel,
value=1, command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(kernel_group, text="Poly", variable=controller.kernel,
value=2, command=controller.refit).pack(anchor=Tk.W)
kernel_group.pack(side=Tk.LEFT)
valbox = Tk.Frame(fm)
controller.complexity = Tk.StringVar()
controller.complexity.set("1.0")
c = Tk.Frame(valbox)
Tk.Label(c, text="C:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(c, width=6, textvariable=controller.complexity).pack(
side=Tk.LEFT)
c.pack()
controller.gamma = Tk.StringVar()
controller.gamma.set("0.01")
g = Tk.Frame(valbox)
Tk.Label(g, text="gamma:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(g, width=6, textvariable=controller.gamma).pack(side=Tk.LEFT)
g.pack()
controller.degree = Tk.StringVar()
controller.degree.set("3")
d = Tk.Frame(valbox)
Tk.Label(d, text="degree:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(d, width=6, textvariable=controller.degree).pack(side=Tk.LEFT)
d.pack()
controller.coef0 = Tk.StringVar()
controller.coef0.set("0")
r = Tk.Frame(valbox)
Tk.Label(r, text="coef0:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(r, width=6, textvariable=controller.coef0).pack(side=Tk.LEFT)
r.pack()
valbox.pack(side=Tk.LEFT)
cmap_group = Tk.Frame(fm)
Tk.Radiobutton(cmap_group, text="Hyperplanes",
variable=controller.surface_type, value=0,
command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(cmap_group, text="Surface",
variable=controller.surface_type, value=1,
command=controller.refit).pack(anchor=Tk.W)
cmap_group.pack(side=Tk.LEFT)
train_button = Tk.Button(fm, text='Fit', width=5,
command=controller.fit)
train_button.pack()
fm.pack(side=Tk.LEFT)
Tk.Button(fm, text='Clear', width=5,
command=controller.clear_data).pack(side=Tk.LEFT)
def get_parser():
from optparse import OptionParser
op = OptionParser()
op.add_option("--output",
action="store", type="str", dest="output",
help="Path where to dump data.")
return op
def main(argv):
op = get_parser()
opts, args = op.parse_args(argv[1:])
root = Tk.Tk()
model = Model()
controller = Controller(model)
root.wm_title("Scikit-learn Libsvm GUI")
view = View(root, controller)
model.add_observer(view)
Tk.mainloop()
if opts.output:
model.dump_svmlight_file(opts.output)
if __name__ == "__main__":
main(sys.argv)
| bsd-3-clause |
saiwing-yeung/scikit-learn | examples/cluster/plot_dict_face_patches.py | 337 | 2747 | """
Online learning of a dictionary of parts of faces
==================================================
This example uses a large dataset of faces to learn a set of 20 x 20
images patches that constitute faces.
From the programming standpoint, it is interesting because it shows how
to use the online API of the scikit-learn to process a very large
dataset by chunks. The way we proceed is that we load an image at a time
and extract randomly 50 patches from this image. Once we have accumulated
500 of these patches (using 10 images), we run the `partial_fit` method
of the online KMeans object, MiniBatchKMeans.
The verbose setting on the MiniBatchKMeans enables us to see that some
clusters are reassigned during the successive calls to
partial-fit. This is because the number of patches that they represent
has become too low, and it is better to choose a random new
cluster.
"""
print(__doc__)
import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets
from sklearn.cluster import MiniBatchKMeans
from sklearn.feature_extraction.image import extract_patches_2d
faces = datasets.fetch_olivetti_faces()
###############################################################################
# Learn the dictionary of images
print('Learning the dictionary... ')
rng = np.random.RandomState(0)
kmeans = MiniBatchKMeans(n_clusters=81, random_state=rng, verbose=True)
patch_size = (20, 20)
buffer = []
index = 1
t0 = time.time()
# The online learning part: cycle over the whole dataset 6 times
index = 0
for _ in range(6):
for img in faces.images:
data = extract_patches_2d(img, patch_size, max_patches=50,
random_state=rng)
data = np.reshape(data, (len(data), -1))
buffer.append(data)
index += 1
if index % 10 == 0:
data = np.concatenate(buffer, axis=0)
data -= np.mean(data, axis=0)
data /= np.std(data, axis=0)
kmeans.partial_fit(data)
buffer = []
if index % 100 == 0:
print('Partial fit of %4i out of %i'
% (index, 6 * len(faces.images)))
dt = time.time() - t0
print('done in %.2fs.' % dt)
###############################################################################
# Plot the results
plt.figure(figsize=(4.2, 4))
for i, patch in enumerate(kmeans.cluster_centers_):
plt.subplot(9, 9, i + 1)
plt.imshow(patch.reshape(patch_size), cmap=plt.cm.gray,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('Patches of faces\nTrain time %.1fs on %d patches' %
(dt, 8 * len(faces.images)), fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
plt.show()
| bsd-3-clause |
allenlavoie/tensorflow | tensorflow/contrib/learn/python/learn/estimators/kmeans.py | 15 | 11087 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of k-means clustering on top of `Estimator` API (deprecated).
This module is deprecated. Please use
@{tf.contrib.factorization.KMeansClustering} instead of
@{tf.contrib.learn.KMeansClustering}. It has a similar interface, but uses the
@{tf.estimator.Estimator} API instead of @{tf.contrib.learn.Estimator}.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
from tensorflow.contrib.factorization.python.ops import clustering_ops
from tensorflow.python.training import training_util
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators.model_fn import ModelFnOps
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops.control_flow_ops import with_dependencies
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary
from tensorflow.python.training import session_run_hook
from tensorflow.python.training.session_run_hook import SessionRunArgs
from tensorflow.python.util.deprecation import deprecated
_USE_TF_CONTRIB_FACTORIZATION = (
'Please use tf.contrib.factorization.KMeansClustering instead of'
' tf.contrib.learn.KMeansClustering. It has a similar interface, but uses'
' the tf.estimator.Estimator API instead of tf.contrib.learn.Estimator.')
class _LossRelativeChangeHook(session_run_hook.SessionRunHook):
"""Stops when the change in loss goes below a tolerance."""
def __init__(self, tolerance):
"""Initializes _LossRelativeChangeHook.
Args:
tolerance: A relative tolerance of change between iterations.
"""
self._tolerance = tolerance
self._prev_loss = None
def begin(self):
self._loss_tensor = ops.get_default_graph().get_tensor_by_name(
KMeansClustering.LOSS_OP_NAME + ':0')
assert self._loss_tensor is not None
def before_run(self, run_context):
del run_context
return SessionRunArgs(
fetches={KMeansClustering.LOSS_OP_NAME: self._loss_tensor})
def after_run(self, run_context, run_values):
loss = run_values.results[KMeansClustering.LOSS_OP_NAME]
assert loss is not None
if self._prev_loss is not None:
relative_change = (abs(loss - self._prev_loss) /
(1 + abs(self._prev_loss)))
if relative_change < self._tolerance:
run_context.request_stop()
self._prev_loss = loss
class _InitializeClustersHook(session_run_hook.SessionRunHook):
"""Initializes clusters or waits for cluster initialization."""
def __init__(self, init_op, is_initialized_op, is_chief):
self._init_op = init_op
self._is_chief = is_chief
self._is_initialized_op = is_initialized_op
def after_create_session(self, session, _):
assert self._init_op.graph == ops.get_default_graph()
assert self._is_initialized_op.graph == self._init_op.graph
while True:
try:
if session.run(self._is_initialized_op):
break
elif self._is_chief:
session.run(self._init_op)
else:
time.sleep(1)
except RuntimeError as e:
logging.info(e)
def _parse_tensor_or_dict(features):
"""Helper function to parse features."""
if isinstance(features, dict):
keys = sorted(features.keys())
with ops.colocate_with(features[keys[0]]):
features = array_ops.concat([features[k] for k in keys], 1)
return features
def _kmeans_clustering_model_fn(features, labels, mode, params, config):
"""Model function for KMeansClustering estimator."""
assert labels is None, labels
(all_scores, model_predictions, losses,
is_initialized, init_op, training_op) = clustering_ops.KMeans(
_parse_tensor_or_dict(features),
params.get('num_clusters'),
initial_clusters=params.get('training_initial_clusters'),
distance_metric=params.get('distance_metric'),
use_mini_batch=params.get('use_mini_batch'),
mini_batch_steps_per_iteration=params.get(
'mini_batch_steps_per_iteration'),
random_seed=params.get('random_seed'),
kmeans_plus_plus_num_retries=params.get(
'kmeans_plus_plus_num_retries')).training_graph()
incr_step = state_ops.assign_add(training_util.get_global_step(), 1)
loss = math_ops.reduce_sum(losses, name=KMeansClustering.LOSS_OP_NAME)
summary.scalar('loss/raw', loss)
training_op = with_dependencies([training_op, incr_step], loss)
predictions = {
KMeansClustering.ALL_SCORES: all_scores[0],
KMeansClustering.CLUSTER_IDX: model_predictions[0],
}
eval_metric_ops = {KMeansClustering.SCORES: loss}
training_hooks = [_InitializeClustersHook(
init_op, is_initialized, config.is_chief)]
relative_tolerance = params.get('relative_tolerance')
if relative_tolerance is not None:
training_hooks.append(_LossRelativeChangeHook(relative_tolerance))
return ModelFnOps(
mode=mode,
predictions=predictions,
eval_metric_ops=eval_metric_ops,
loss=loss,
train_op=training_op,
training_hooks=training_hooks)
# TODO(agarwal,ands): support sharded input.
class KMeansClustering(estimator.Estimator):
"""An Estimator for K-Means clustering.
THIS CLASS IS DEPRECATED. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for general migration instructions.
"""
SQUARED_EUCLIDEAN_DISTANCE = clustering_ops.SQUARED_EUCLIDEAN_DISTANCE
COSINE_DISTANCE = clustering_ops.COSINE_DISTANCE
RANDOM_INIT = clustering_ops.RANDOM_INIT
KMEANS_PLUS_PLUS_INIT = clustering_ops.KMEANS_PLUS_PLUS_INIT
SCORES = 'scores'
CLUSTER_IDX = 'cluster_idx'
CLUSTERS = 'clusters'
ALL_SCORES = 'all_scores'
LOSS_OP_NAME = 'kmeans_loss'
@deprecated(None, _USE_TF_CONTRIB_FACTORIZATION)
def __init__(self,
num_clusters,
model_dir=None,
initial_clusters=RANDOM_INIT,
distance_metric=SQUARED_EUCLIDEAN_DISTANCE,
random_seed=0,
use_mini_batch=True,
mini_batch_steps_per_iteration=1,
kmeans_plus_plus_num_retries=2,
relative_tolerance=None,
config=None):
"""Creates a model for running KMeans training and inference.
Args:
num_clusters: number of clusters to train.
model_dir: the directory to save the model results and log files.
initial_clusters: specifies how to initialize the clusters for training.
See clustering_ops.kmeans for the possible values.
distance_metric: the distance metric used for clustering.
See clustering_ops.kmeans for the possible values.
random_seed: Python integer. Seed for PRNG used to initialize centers.
use_mini_batch: If true, use the mini-batch k-means algorithm. Else assume
full batch.
mini_batch_steps_per_iteration: number of steps after which the updated
cluster centers are synced back to a master copy. See clustering_ops.py
for more details.
kmeans_plus_plus_num_retries: For each point that is sampled during
kmeans++ initialization, this parameter specifies the number of
additional points to draw from the current distribution before selecting
the best. If a negative value is specified, a heuristic is used to
sample O(log(num_to_sample)) additional points.
relative_tolerance: A relative tolerance of change in the loss between
iterations. Stops learning if the loss changes less than this amount.
Note that this may not work correctly if use_mini_batch=True.
config: See Estimator
"""
params = {}
params['num_clusters'] = num_clusters
params['training_initial_clusters'] = initial_clusters
params['distance_metric'] = distance_metric
params['random_seed'] = random_seed
params['use_mini_batch'] = use_mini_batch
params['mini_batch_steps_per_iteration'] = mini_batch_steps_per_iteration
params['kmeans_plus_plus_num_retries'] = kmeans_plus_plus_num_retries
params['relative_tolerance'] = relative_tolerance
super(KMeansClustering, self).__init__(
model_fn=_kmeans_clustering_model_fn,
params=params,
model_dir=model_dir,
config=config)
@deprecated(None, _USE_TF_CONTRIB_FACTORIZATION)
def predict_cluster_idx(self, input_fn=None):
"""Yields predicted cluster indices."""
key = KMeansClustering.CLUSTER_IDX
results = super(KMeansClustering, self).predict(
input_fn=input_fn, outputs=[key])
for result in results:
yield result[key]
@deprecated(None, _USE_TF_CONTRIB_FACTORIZATION)
def score(self, input_fn=None, steps=None):
"""Predict total sum of distances to nearest clusters.
Note that this function is different from the corresponding one in sklearn
which returns the negative of the sum of distances.
Args:
input_fn: see predict.
steps: see predict.
Returns:
Total sum of distances to nearest clusters.
"""
return np.sum(
self.evaluate(
input_fn=input_fn, steps=steps)[KMeansClustering.SCORES])
@deprecated(None, _USE_TF_CONTRIB_FACTORIZATION)
def transform(self, input_fn=None, as_iterable=False):
"""Transforms each element to distances to cluster centers.
Note that this function is different from the corresponding one in sklearn.
For SQUARED_EUCLIDEAN distance metric, sklearn transform returns the
EUCLIDEAN distance, while this function returns the SQUARED_EUCLIDEAN
distance.
Args:
input_fn: see predict.
as_iterable: see predict
Returns:
Array with same number of rows as x, and num_clusters columns, containing
distances to the cluster centers.
"""
key = KMeansClustering.ALL_SCORES
results = super(KMeansClustering, self).predict(
input_fn=input_fn,
outputs=[key],
as_iterable=as_iterable)
if not as_iterable:
return results[key]
else:
return results
@deprecated(None, _USE_TF_CONTRIB_FACTORIZATION)
def clusters(self):
"""Returns cluster centers."""
return super(KMeansClustering, self).get_variable_value(self.CLUSTERS)
| apache-2.0 |
tardis-sn/tardis | tardis/plasma/properties/general.py | 1 | 4003 | import logging
import numpy as np
import pandas as pd
from astropy import units as u
from tardis import constants as const
from tardis.plasma.properties.base import ProcessingPlasmaProperty
logger = logging.getLogger(__name__)
__all__ = [
"BetaRadiation",
"GElectron",
"NumberDensity",
"SelectedAtoms",
"ElectronTemperature",
"BetaElectron",
"LuminosityInner",
"TimeSimulation",
"ThermalGElectron",
]
class BetaRadiation(ProcessingPlasmaProperty):
"""
Attributes
----------
beta_rad : Numpy Array, dtype float
"""
outputs = ("beta_rad",)
latex_name = (r"\beta_{\textrm{rad}}",)
latex_formula = (r"\dfrac{1}{k_{B} T_{\textrm{rad}}}",)
def __init__(self, plasma_parent):
super(BetaRadiation, self).__init__(plasma_parent)
self.k_B_cgs = const.k_B.cgs.value
def calculate(self, t_rad):
return 1 / (self.k_B_cgs * t_rad)
class GElectron(ProcessingPlasmaProperty):
"""
Attributes
----------
g_electron : Numpy Array, dtype float
"""
outputs = ("g_electron",)
latex_name = (r"g_{\textrm{electron}}",)
latex_formula = (
r"\Big(\dfrac{2\pi m_{e}/\beta_{\textrm{rad}}}{h^2}\Big)^{3/2}",
)
def calculate(self, beta_rad):
return (
(2 * np.pi * const.m_e.cgs.value / beta_rad)
/ (const.h.cgs.value ** 2)
) ** 1.5
class ThermalGElectron(GElectron):
"""
Attributes
----------
thermal_g_electron : Numpy Array, dtype float
"""
outputs = ("thermal_g_electron",)
latex_name = (r"g_{\textrm{electron_thermal}}",)
latex_formula = (
r"\Big(\dfrac{2\pi m_{e}/\beta_{\textrm{electron}}}{h^2}\Big)^{3/2}",
)
def calculate(self, beta_electron):
return super(ThermalGElectron, self).calculate(beta_electron)
class NumberDensity(ProcessingPlasmaProperty):
"""
Attributes
----------
number_density : Pandas DataFrame, dtype float
Indexed by atomic number, columns corresponding to zones
"""
outputs = ("number_density",)
latex_name = ("N_{i}",)
@staticmethod
def calculate(atomic_mass, abundance, density):
number_densities = abundance * density
return number_densities.div(atomic_mass.loc[abundance.index], axis=0)
class SelectedAtoms(ProcessingPlasmaProperty):
"""
Attributes
----------
selected_atoms : Pandas Int64Index, dtype int
Atomic numbers of elements required for particular simulation
"""
outputs = ("selected_atoms",)
def calculate(self, abundance):
return abundance.index
class ElectronTemperature(ProcessingPlasmaProperty):
"""
Attributes
----------
t_electron : Numpy Array, dtype float
"""
outputs = ("t_electrons",)
latex_name = (r"T_{\textrm{electron}}",)
latex_formula = (r"\textrm{const.}\times T_{\textrm{rad}}",)
def calculate(self, t_rad, link_t_rad_t_electron):
return t_rad * link_t_rad_t_electron
class BetaElectron(ProcessingPlasmaProperty):
"""
Attributes
----------
beta_electron : Numpy Array, dtype float
"""
outputs = ("beta_electron",)
latex_name = (r"\beta_{\textrm{electron}}",)
latex_formula = (r"\frac{1}{K_{B} T_{\textrm{electron}}}",)
def __init__(self, plasma_parent):
super(BetaElectron, self).__init__(plasma_parent)
self.k_B_cgs = const.k_B.cgs.value
def calculate(self, t_electrons):
return 1 / (self.k_B_cgs * t_electrons)
class LuminosityInner(ProcessingPlasmaProperty):
outputs = ("luminosity_inner",)
@staticmethod
def calculate(r_inner, t_inner):
return (
4 * np.pi * const.sigma_sb.cgs * r_inner[0] ** 2 * t_inner ** 4
).to("erg/s")
class TimeSimulation(ProcessingPlasmaProperty):
outputs = ("time_simulation",)
@staticmethod
def calculate(luminosity_inner):
return 1.0 * u.erg / luminosity_inner
| bsd-3-clause |
chrhartm/SORN | common/sorn_stats.py | 2 | 74077 | from __future__ import division
from pylab import *
import utils
utils.backup(__file__)
from stats import AbstractStat
from stats import HistoryStat
from stats import _getvar
from common.sources import TrialSource
from utils.lstsq_reg import lstsq_reg
import cPickle as pickle
import gzip
def load_source(name,c):
try:
filename = c.logfilepath+name+".pickle"
sourcefile = gzip.open(filename,"r")
except IOError: # Cluster
filename = c.logfilepath+\
name+"_%s_%.3f.pickle"\
%(c.cluster.vary_param,\
c.cluster.current_param)
sourcefile = gzip.open(filename,"r")
source = pickle.load(sourcefile)
if isinstance(source,TrialSource):
source = source.source
return source
class CounterStat(AbstractStat):
def __init__(self):
self.name = 'num_steps'
self.collection = "reduce"
def start(self,c,obj):
c[self.name] = 0.0 # Everything needs to be a float :-/
def add(self,c,obj):
c[self.name] += 1
def report(self,c,obj):
return array(c[self.name]) # And an array :-/
# By making CounterStat a little longer we can make ClearCounterStat a
# lot shorter
class ClearCounterStat(CounterStat):
def __init__(self):
self.name = 'counter'
self.collection = "ignore"
(self.clear,self.start) = (self.start,self.clear)
class PopulationVariance(AbstractStat):
def __init__(self):
self.name = 'pop_var'
self.collection = 'reduce'
def clear(self,c,obj):
N = obj.c.N_e
c.pop_var = zeros(N+1)
def add(self,c,obj):
n = sum(obj.x)
c.pop_var[n] += 1.0
def report(self,c,obj):
return c.pop_var
class ActivityStat(AbstractStat):
"""
Gathers the state of the network at each step
If the parameter only_last is set, only the first and last steps are
collected
"""
def __init__(self):
self.name = 'activity'
self.collection = 'gather'
def clear(self,c,sorn):
if sorn.c.stats.has_key('only_last'):
c.activity = zeros(sorn.c.stats.only_last\
+sorn.c.stats.only_last)
else:
c.activity = zeros(sorn.c.N_steps)
self.step = 0
def add(self,c,sorn):
if sorn.c.stats.has_key('only_last'):
new_step = self.step - (sorn.c.N_steps\
-sorn.c.stats.only_last)
if new_step >= 0:
c.activity[new_step+sorn.c.stats.only_last] \
= sum(sorn.x)/sorn.c.N_e
elif self.step % (sorn.c.N_steps\
//sorn.c.stats.only_last) == 0:
c.activity[self.step//(sorn.c.N_steps\
//sorn.c.stats.only_last)] = sum(sorn.x)/sorn.c.N_e
else:
c.activity[self.step] = sum(sorn.x)/sorn.c.N_e
self.step += 1
def report(self,c,sorn):
return c.activity
class InputIndexStat(AbstractStat):
"""
Gathers the index of the input at each step
"""
def __init__(self):
self.name = 'InputIndex'
self.collection = 'gather'
def clear(self,c,sorn):
if sorn.c.stats.has_key('only_last'):
c.inputindex = zeros(sorn.c.stats.only_last\
+sorn.c.stats.only_last)
else:
c.inputindex = zeros(sorn.c.N_steps)
self.step = 0
def add(self,c,sorn):
if sorn.c.stats.has_key('only_last'):
new_step = self.step - (sorn.c.N_steps\
-sorn.c.stats.only_last)
if new_step >= 0:
c.inputindex[new_step+sorn.c.stats.only_last] \
= sorn.source.global_index()
elif self.step % (sorn.c.N_steps\
//sorn.c.stats.only_last) == 0:
c.inputindex[self.step//(sorn.c.N_steps\
//sorn.c.stats.only_last)] = sorn.source.global_index()
else:
c.inputindex[self.step] = sorn.source.global_index()
self.step += 1
def report(self,c,sorn):
return c.inputindex
class WordListStat(AbstractStat):
# OLD! use pickle of source instead!
def __init__(self):
self.name = 'WordList'
self.collection = 'gather'
def report(self,c,sorn):
return sorn.c.words
class InputUnitsStat(AbstractStat):
def __init__(self):
self.name = 'InputUnits'
self.collection = 'gather'
def report(self,c,sorn):
input_units = where(sum(sorn.W_eu.get_synapses(),1)>0)[0]
# to make them equal in size
tmp = array([z in input_units for z in arange(sorn.c.N_e)])
return tmp+0 # cast as double
class NormLastStat(AbstractStat):
'''
This is a helper Stat that computes the normalized last spikes
and input indices
'''
def __init__(self):
self.name = 'NormLast'
self.collection = 'gather'
def report(self,c,sorn):
steps_plastic = sorn.c.steps_plastic
steps_noplastic_train = sorn.c.steps_noplastic_train
steps_noplastic_test = sorn.c.steps_noplastic_test
plastic_train = steps_plastic+steps_noplastic_train
input_spikes = c.spikes[:,steps_plastic:plastic_train]
input_index = c.inputindex[steps_plastic:plastic_train]
# Filter out empty states
input_spikes = input_spikes[:,input_index != -1]
input_index = input_index[input_index != -1]
if sorn.c.stats.has_key('only_last'):
N_comparison = sorn.c.stats.only_last
else:
N_comparison = 2500
assert(N_comparison > 0)
assert(N_comparison <= steps_noplastic_test \
and N_comparison <= steps_noplastic_train)
maxindex = int(max(input_index))
# Only use spikes that occured at the end of learning and spont
last_input_spikes = input_spikes[:,-N_comparison:]
last_input_index = input_index[-N_comparison:]
# Get the minimal occurence of an index in the last steps
min_letter_count = inf
for i in range(maxindex+1):
tmp = sum(last_input_index == i)
if min_letter_count > tmp:
min_letter_count = tmp
# For each index, take the same number of states from the
# end phase of learning to avoid a bias in comparing states
norm_last_input_spikes = np.zeros((shape(last_input_spikes)[0],\
min_letter_count*(maxindex+1)))
norm_last_input_index = np.zeros(min_letter_count*(maxindex+1))
for i in range(maxindex+1):
indices = find(last_input_index == i)
norm_last_input_spikes[:,min_letter_count*i\
: min_letter_count*(i+1)]\
= last_input_spikes[:, indices[-min_letter_count:]]
norm_last_input_index[min_letter_count*i\
: min_letter_count*(i+1)]\
= last_input_index[indices[-min_letter_count:]]
# Shuffle to avoid argmin-problem of selecting only first match
indices = arange(shape(norm_last_input_index)[0])
shuffle(indices)
norm_last_input_index = norm_last_input_index[indices]
norm_last_input_spikes = norm_last_input_spikes[:,indices]
c.norm_last_input_index = norm_last_input_index
c.norm_last_input_spikes = norm_last_input_spikes
c.maxindex = maxindex
c.N_comparison = N_comparison
to_return = array([float(N_comparison)])
return to_return
class SpontPatternStat(AbstractStat):
"""
Computes the frequency of each pattern in the spontaneous activity
"""
def __init__(self):
self.name = 'SpontPattern'
self.collection = 'gather'
def report(self,c,sorn):
source_plastic = load_source("source_plastic",sorn.c)
steps_noplastic_test = sorn.c.steps_noplastic_test
spont_spikes = c.spikes[:,-steps_noplastic_test:]
norm_last_input_index = c.norm_last_input_index
norm_last_input_spikes = c.norm_last_input_spikes
maxindex = c.maxindex
N_comparison = c.N_comparison
last_spont_spikes = spont_spikes[:,-N_comparison:]
# Remove silent periods from spontspikes
last_spont_spikes = last_spont_spikes[:,sum(last_spont_spikes,0)>0]
N_comp_spont = shape(last_spont_spikes)[1]
# Find for each spontaneous state the evoked state with the
# smallest hamming distance and store the corresponding index
similar_input = zeros(N_comp_spont)
for i in xrange(N_comp_spont):
most_similar = argmin(sum(abs(norm_last_input_spikes.T\
-last_spont_spikes[:,i]),axis=1))
similar_input[i] = norm_last_input_index[most_similar]
# Count the number of spontaneous states for each index and plot
index = range(maxindex+1)
if self.collection == 'gatherv':
adding = 2
else:
adding = 1
pattern_freqs = zeros((2,maxindex+adding))
barcolor = []
for i in index:
pattern_freqs[0,i] = sum(similar_input==index[i])
# Compare patterns
# Forward patterns ([0,1,2,3],[4,5,6,7],...)
patterns = array([arange(len(w))+source_plastic.glob_ind[i] \
for (i,w) in enumerate(source_plastic.words)])
rev_patterns = array([x[::-1] for x in patterns])
maxlen = max([len(x) for x in patterns])
# Also get the reversed patterns
if maxlen>1: # Single letters can't be reversed
allpatterns = array(patterns.tolist()+rev_patterns.tolist())
else:
allpatterns = array(patterns.tolist())
for (i,p) in enumerate(allpatterns):
patternlen = len(p)
for j in xrange(N_comp_spont-maxlen):
if all(similar_input[j:j+patternlen] == p):
pattern_freqs[1,i] += 1
# Marker for end of freqs
if self.collection == 'gatherv':
pattern_freqs[:,-1] = -1
c.similar_input = similar_input
return(pattern_freqs)
class SpontTransitionStat(AbstractStat):
def __init__(self):
self.name = 'SpontTransition'
self.collection = 'gather'
def report(self,c,sorn):
similar_input = c.similar_input # from SpontPatternStat
maxindex = c.maxindex
transitions = np.zeros((maxindex+1,maxindex+1))
for (i_from, i_to) in zip(similar_input[:-1],similar_input[1:]):
transitions[i_to,i_from] += 1
return transitions
class SpontIndexStat(AbstractStat):
def __init__(self):
self.name = 'SpontIndex'
self.collection = 'gather'
def report (self,c,sorn):
return c.similar_input
class BayesStat(AbstractStat):
def __init__(self,pred_pos = 0):
self.name = 'Bayes'
self.collection = 'gather'
self.pred_pos = pred_pos # steps before M/N
def clear(self,c,sorn):
pass
# If raw_prediction is input to M/N neurons, this is needed
#~ self.M_neurons = where(sorn.W_eu.W[:,
#~ sorn.source.source.lookup['M']]==1)[0]
#~ self.N_neurons = where(sorn.W_eu.W[:,
#~ sorn.source.source.lookup['N']]==1)[0]
def report(self,c,sorn):
### Prepare spike train matrices for training and testing
# Separate training and test data according to steps
source_plastic = load_source("source_plastic",sorn.c)
steps_plastic = sorn.c.steps_plastic
N_train_steps = sorn.c.steps_noplastic_train
N_inputtrain_steps = steps_plastic + N_train_steps
N_test_steps = sorn.c.steps_noplastic_test
burnin = 3000
# Transpose because this is the way they are in test_bayes.py
Xtrain = c.spikes[:,steps_plastic+burnin:N_inputtrain_steps].T
Xtest = c.spikes[:,N_inputtrain_steps:].T
assert(shape(Xtest)[0] == N_test_steps)
inputi_train = c.inputindex[steps_plastic+burnin
:N_inputtrain_steps]
assert(shape(Xtrain)[0] == shape(inputi_train)[0])
inputi_test = c.inputindex[N_inputtrain_steps:]
assert(shape(inputi_test)[0]== N_test_steps)
N_fracs = len(sorn.c.frac_A)
# Filter out empty states
if isinstance(sorn.source,TrialSource): # if TrialSource
source = sorn.source.source
else:
source = sorn.source
Xtrain = Xtrain[inputi_train != -1,:]
inputi_train = inputi_train[inputi_train != -1]
Xtest = Xtest[inputi_test != -1,:]
inputi_test = inputi_test[inputi_test != -1]
# Following snipplet modified from sorn_stats spont_stat
# Get the minimal occurence of an index in the last steps
maxindex = int(max(inputi_train))
min_letter_count = inf
for i in range(maxindex+1):
tmp = sum(inputi_train == i)
if min_letter_count > tmp:
min_letter_count = tmp
# For each index, take the same number of states from the
# end phase of learning to avoid a bias in comparing states
norm_Xtrain = np.zeros((min_letter_count*(maxindex+1),
shape(Xtrain)[1]))
norm_inputi_train = np.zeros(min_letter_count*(maxindex+1))
for i in range(maxindex+1):
indices = find(inputi_train == i)
norm_Xtrain[min_letter_count*i
: min_letter_count*(i+1), :]\
= Xtrain[indices[-min_letter_count:],:]
norm_inputi_train[min_letter_count*i
: min_letter_count*(i+1)]\
= inputi_train[indices[-min_letter_count:]]
Xtrain = norm_Xtrain
inputi_train = norm_inputi_train
noinput_units = where(sum(sorn.W_eu.W,1)==0)[0]
if sorn.c.stats.bayes_noinput:
Xtrain_noinput = Xtrain[:,noinput_units]
Xtest_noinput = Xtest[:,noinput_units]
else:
Xtrain_noinput = Xtrain
Xtest_noinput = Xtest
assert(source_plastic.words[0][0]=="A" and
source_plastic.words[1][0]=="B")
A_index = source_plastic.glob_ind[0] # start of first word
B_index = source_plastic.glob_ind[1] # start of second word
# position from which to predict end of word
pred_pos = len(source_plastic.words[0])-1-self.pred_pos
assert(pred_pos>=0
and pred_pos <= source_plastic.global_range())
R = np.zeros((2,shape(inputi_train)[0]))
R[0,:] = inputi_train == A_index+pred_pos
R[1,:] = inputi_train == B_index+pred_pos
if sorn.c.stats.relevant_readout:
Xtrain_relevant = Xtrain_noinput[((inputi_train ==
A_index+pred_pos) +
(inputi_train == B_index+pred_pos))>0,:]
R_relevant = R[:,((inputi_train == A_index+pred_pos) +
(inputi_train == B_index+pred_pos))>0]
classifier = lstsq_reg(Xtrain_relevant,R_relevant.T,
sorn.c.stats.lstsq_mue)
else:
classifier = lstsq_reg(Xtrain_noinput,R.T,
sorn.c.stats.lstsq_mue)
#~ # No real difference between LogReg, BayesRidge and my thing
#~ # If you do this, comment out raw_predictions further down
#~ from sklearn import linear_model
#~ clf0 = linear_model.LogisticRegression(C=1)#BayesianRidge()
#~ clf1 = linear_model.LogisticRegression(C=1)#BayesianRidge()
#~ clf0.fit(Xtrain_noinput,R.T[:,0])
#~ clf1.fit(Xtrain_noinput,R.T[:,1])
#~ raw_predictions = vstack((clf0.predict_proba(Xtest_noinput)[:,1]
#~ ,clf1.predict_proba(Xtest_noinput)[:,1])).T
# predict
#~ raw_predictions = Xtest.dot(classifier)
#~ # comment this out if you use sklearn
raw_predictions = Xtest_noinput.dot(classifier)
#~ # Historical stuff
#~ # Raw predictions = total synaptic input to M/N neurons
#~ raw_predictions[1:,0] = sum((sorn.W_ee*Xtest[:-1].T)[
#~ self.M_neurons],0)
#~ raw_predictions[1:,1] = sum((sorn.W_ee*Xtest[:-1].T)[
#~ self.N_neurons],0)
#~ # Raw predictions = total activation of M/N neurons
#~ raw_predictions[:,0] = sum(Xtest.T[self.M_neurons],0)
#~ raw_predictions[:,1] = sum(Xtest.T[self.N_neurons],0)
#~ # for testing: sum(raw_predictions[indices,0])>indices+-1,2,3
letters_for_frac = ['B']
# Because alphabet is sorted alphabetically, this list will
# have the letters corresponding to the list frac_A
for l in source.alphabet:
if not ((l=='A') or (l=='B') or (l=='M') or (l=='N')
or (l=='X') or (l=='_')):
letters_for_frac.append(l)
letters_for_frac.append('A')
output_drive = np.zeros((N_fracs,2))
output_std = np.zeros((N_fracs,2))
decisions = np.zeros((N_fracs,2))
denom = np.zeros(N_fracs)
for (s_word,s_index) in zip(source.words,source.glob_ind):
i = ''.join(letters_for_frac).find(s_word[0])
indices = find(inputi_test==s_index+pred_pos)
# A predicted
output_drive[i,0] += mean(raw_predictions[indices,0])
# B predicted
output_drive[i,1] += mean(raw_predictions[indices,1])
decisions[i,0] += mean(raw_predictions[indices,0]>\
raw_predictions[indices,1])
decisions[i,1] += mean(raw_predictions[indices,1]>=\
raw_predictions[indices,0])
output_std[i,0] += std(raw_predictions[indices,0])
output_std[i,1] += std(raw_predictions[indices,1])
denom[i] += 1
# Some words occur more than once
output_drive[:,0] /= denom
output_drive[:,1] /= denom
output_std[:,0] /= denom
output_std[:,1] /= denom
decisions[:,0] /= denom
decisions[:,1] /= denom
# for other stats (e.g. SpontBayesStat)
c.pred_pos = pred_pos
c.Xtest = Xtest
c.raw_predictions = raw_predictions
c.inputi_test = inputi_test
c.letters_for_frac = letters_for_frac
c.classifier = classifier
c.noinput_units = noinput_units
to_return = hstack((output_drive,output_std,decisions))
return to_return
class AttractorDynamicsStat(AbstractStat):
"""
This stat tracks the distance between output gains during the
input presentation to determine whether the decision is based on
attractor dynamics
"""
def __init__(self):
self.name = 'AttractorDynamics'
self.collection = 'gather'
def report(self,c,sorn):
# Read stuff in
letters_for_frac = c.letters_for_frac
if isinstance(sorn.source,TrialSource): # if TrialSource
source = sorn.source.source
else:
source = sorn.source
word_length = min([len(x) for x in source.words])
N_words = len(source.words)
N_fracs = len(sorn.c.frac_A)
bayes_stat = None
for stat in sorn.stats.methods:
if stat.name is 'Bayes':
bayes_stat = stat
break
assert(bayes_stat is not None)
pred_pos_old = bayes_stat.pred_pos
#output_dist = np.zeros((word_length-1,N_fracs))
output_dist = np.zeros((word_length,N_fracs))
min_trials = inf
for i in range(int(max(c.inputi_test))+1):
tmp = sum(c.inputi_test == i)
if min_trials > tmp:
min_trials = tmp
decisions = np.zeros((N_words,word_length,min_trials),\
dtype=np.bool)
seq_count = np.zeros((N_words,4))
for (p,pp) in enumerate(arange(0,word_length)):
bayes_stat.pred_pos = pp
bayes_stat.report(c,sorn)
pred_pos = c.pred_pos
raw_predictions = c.raw_predictions
inputi_test = c.inputi_test
#~ summed = abs(raw_predictions[:,0])+abs(raw_predictions[:,1])
#~ summed[summed<1e-10] = 1 # if predicted 0, leave at 0
#~ raw_predictions[:,0] /= summed
#~ raw_predictions[:,1] /= summed
denom = np.zeros((N_fracs))
for (w,(s_word,s_index)) in enumerate(zip(source.words,
source.glob_ind)):
i = ''.join(letters_for_frac).find(s_word[0])
indices = find(inputi_test==s_index+pred_pos)
tmp = abs(raw_predictions[indices,0]-
raw_predictions[indices,1])
output_dist[p,i] += mean(tmp)
decisions[w,p,:] = raw_predictions[
indices[-min_trials:],0]>\
raw_predictions[indices[-min_trials:],1]
denom[i] += 1
output_dist[p,:] /= denom
for i in range(N_words):
# Full-length 1s to be expected
seq_count[i,0] = ((sum(decisions[i])/(1.*min_trials*
word_length))**(word_length))*min_trials
# Actual 1-series
seq_count[i,1] = sum(sum(decisions[i],0)==word_length)
# Same for 0-series
seq_count[i,2] = ((1-(sum(decisions[i])/(1.*min_trials*
word_length)))**(word_length))*min_trials
seq_count[i,3] = sum(sum(decisions[i],0)==0)
bayes_stat.pred_pos = pred_pos_old
bayes_stat.report(c,sorn)
return output_dist
class OutputDistStat(AbstractStat):
"""
This stat reports the distance between output gains as an indicator
for whether the decision is based on chance or on attractor dynamics
"""
def __init__(self):
self.name = 'OutputDist'
self.collection = 'gather'
def report(self,c,sorn):
# Read stuff in
letters_for_frac = c.letters_for_frac
raw_predictions = c.raw_predictions
inputi_test = c.inputi_test
pred_pos = c.pred_pos
if isinstance(sorn.source,TrialSource): # if TrialSource
source = sorn.source.source
else:
source = sorn.source
N_fracs = len(sorn.c.frac_A)
summed = abs(raw_predictions[:,0])+abs(raw_predictions[:,1])
summed[summed<1e-10] = 1 # if predicted 0, leave at 0
raw_predictions[:,0] /= summed
raw_predictions[:,1] /= summed
output_dist = np.zeros((N_fracs))
output_std = np.zeros((N_fracs))
denom = np.zeros((N_fracs))
for (s_word,s_index) in zip(source.words,source.glob_ind):
i = ''.join(letters_for_frac).find(s_word[0])
indices = find(inputi_test==s_index+pred_pos)
tmp = abs(raw_predictions[indices,0]-
raw_predictions[indices,1])
output_dist[i] += mean(tmp)
output_std[i] += std(tmp)
denom[i] += 1
output_dist /= denom
output_std /= denom
to_return = vstack((output_dist,output_std))
return to_return
class TrialBayesStat(AbstractStat):
"""
This stat looks at the interaction of spontaneous activity before
stimulus onset with the final prediction
index: int
Word index (global) for which prediction is done
"""
def __init__(self):
self.name = 'TrialBayes'
self.collection = 'gather'
def report(self,c,sorn):
# Read stuff in
STA_window = 50
pred_pos = c.pred_pos
classifier_old = c.classifier
noinput_units = c.noinput_units
steps_plastic = sorn.c.steps_plastic
N_train_steps = sorn.c.steps_noplastic_train
N_inputtrain_steps = steps_plastic + N_train_steps
N_test_steps = sorn.c.steps_noplastic_test
# Transpose because this is the way they are in test_bayes.py
# Use all neurons because we're predicting from spont activity
Xtest = c.spikes[:,N_inputtrain_steps:].T
inputi_test = c.inputindex[N_inputtrain_steps:]
N_exc = shape(Xtest)[1]
if isinstance(sorn.source,TrialSource): # if TrialSource
source = sorn.source.source
else:
raise NotImplementedError
# select middle word
index = source.glob_ind[1+(shape(source.glob_ind)[0]-3)//2]
forward_pred = sorn.c.stats.forward_pred
start_indices = find(inputi_test==index)
# * is element-wise AND
start_indices = start_indices[(start_indices>STA_window) *
((start_indices+pred_pos+forward_pred)<shape(inputi_test)[0])]
N_samples = shape(start_indices)[0]
pred_indices = find(inputi_test==(index+pred_pos))
pred_indices = pred_indices[(pred_indices>=start_indices[0])*
((pred_indices+forward_pred)<shape(inputi_test)[0])]
assert(N_samples == shape(pred_indices)[0])
if sorn.c.stats.bayes_noinput:
raw_predictions = Xtest[:,noinput_units].dot(classifier_old)
else:
raw_predictions = Xtest.dot(classifier_old)
predictions = raw_predictions[pred_indices,:]
# Two different baselines
#~ test_base = ones((shape(Xtest)[0],1))
test_base = Xtest.copy()
shuffle(test_base) # without shuffle, identical predictions
test_base = hstack((test_base,ones((shape(Xtest)[0],1))))
# Add bias term to exclude effects of varability
N_exc += 1
Xtest = hstack((Xtest,ones((shape(Xtest)[0],1))))
# Divide into train and test set
predictions_train = predictions[:N_samples//2]
predictions_test = predictions[N_samples//2:]
train_A = predictions_train[:,0]>predictions_train[:,1]
train_B = train_A==False
train_A = find(train_A==True)
train_B = find(train_B==True)
# This case is filtered out during plotting
if not(shape(train_A)[0]>0 and shape(train_B)[0]>0):
return np.ones((2,STA_window))*-1
agreement_lstsq = np.zeros(STA_window)
agreement_base = np.zeros(STA_window)
# This maps 0/1 spikes to -1/1 spikes for later * comparison
predtrain_lstsq = (predictions_train[:,0]>\
predictions_train[:,1])*2-1
predtest_lstsq = (predictions_test[:,0]>\
predictions_test[:,1])*2-1
# Prediction with spontaneous activity
for i in range(-STA_window,0):
classifier_lstsq = lstsq_reg(Xtest[\
start_indices[:N_samples//2]+i+forward_pred,:],\
predtrain_lstsq,sorn.c.stats.lstsq_mue)
predictions_lstsq = (Xtest[start_indices[N_samples//2:]+i\
+forward_pred,:]).dot(classifier_lstsq)
# this is where the -1/1 comes in
agreement_lstsq[i] = sum((predictions_lstsq*predtest_lstsq)\
>0)/(1.*N_samples//2)
# Baseline prediction (loop is unnecessary and for similarity)
for i in range(-STA_window,0):
classifier_base = lstsq_reg(test_base[\
start_indices[:N_samples//2]+i+forward_pred,:],\
predtrain_lstsq,sorn.c.stats.lstsq_mue)
predictions_base = (test_base[start_indices[N_samples//2:]+i\
+forward_pred,:]).dot(classifier_base)
agreement_base[i] = sum((predictions_base*predtest_lstsq)\
>0)/(1.*N_samples//2)
# STA - not used
trials = np.zeros((N_samples,STA_window,N_exc))
for i in range(N_samples):
trials[i,:,:] = Xtest[start_indices[i]-STA_window\
+forward_pred:start_indices[i]+forward_pred,:]
STA_A = mean(trials[train_A,:,:],0)
STA_B = mean(trials[train_B,:,:],0)
N_test = N_samples-N_samples//2
overlap_A = np.zeros((N_test,STA_window,N_exc))
overlap_B = np.zeros((N_test,STA_window,N_exc))
for i in range(N_samples//2,N_samples):
overlap_A[i-N_samples//2] = trials[i]*STA_A
overlap_B[i-N_samples//2] = trials[i]*STA_B
agreement = np.zeros(STA_window)
pred_gain_A = predictions_test[:,0]>predictions_test[:,1]
for i in range(STA_window):
pred_STA_A = sum(overlap_A[:,i,:],1)>sum(overlap_B[:,i,:],1)
agreement[i] = sum(pred_gain_A == pred_STA_A)
agreement /= float(shape(pred_gain_A)[0])
return vstack((agreement_base, agreement_lstsq))
class SpontBayesStat(AbstractStat):
def __init__(self):
self.name = 'SpontBayes'
self.collection = 'gather'
def report(self,c,sorn):
# Read stuff in
pred_pos = c.pred_pos
inputi_test = c.inputi_test
raw_predictions = c.raw_predictions
Xtest = c.Xtest
# Filter out empty states
if isinstance(sorn.source,TrialSource): # if TrialSource
source = sorn.source.source
else:
source = sorn.source
Xtest = Xtest[inputi_test != -1,:]
inputi_test = inputi_test[inputi_test != -1]
letters_for_frac = c.letters_for_frac
# Results will first be saved in dict for simplicity and later
# subsampled to an array
cue_act = {}
pred_gain = {}
minlen = inf
for (s_word,s_index) in zip(source.words,source.glob_ind):
i = ''.join(letters_for_frac).find(s_word[0])
# Indices that point to the presentation of the cue relative
# to the readout
cue_indices = find(inputi_test==s_index)
pred_indices = cue_indices+pred_pos
pred_indices = pred_indices[pred_indices
<shape(inputi_test)[0]]
# Get x-states at cue_indices and figure out the number of
# active input units for A and B
tmp_cue = Xtest[cue_indices]
tmp_cue = vstack((
sum(tmp_cue[:,1==sorn.W_eu.W[:,
source.lookup['A']]],1),
sum(tmp_cue[:,1==sorn.W_eu.W[:,
source.lookup['B']]],1))).T
tmp_gain = raw_predictions[pred_indices,:]
if cue_act.has_key(i):
cue_act[i] = np.append(cue_act[i],tmp_cue,axis=0)
pred_gain[i] = np.append(pred_gain[i],tmp_gain,axis=0)
else:
cue_act[i] = tmp_cue
pred_gain[i] = tmp_gain
if shape(cue_act[i])[0]<minlen:
minlen = shape(cue_act[i])[0]
# TODO super ugly - try to make prettier
minlen = 18 # hack for cluster - otherwise variable minlen
# subsample to make suitable for array
n_conditions = max(cue_act.keys())+1
to_return = np.zeros((n_conditions,minlen,4))
for i in range(n_conditions):
to_return[i,:,:2] = cue_act[i][-minlen:]
to_return[i,:,2:] = pred_gain[i][-minlen:]
return to_return
class EvokedPredStat(AbstractStat):
"""
This stat predicts evoked activity from spontaneous activity
traintimes is an interval of training data
testtimes is an interval of testing data
"""
def __init__(self,traintimes,testtimes,traintest):
self.name = 'EvokedPred'
self.collection = 'gather'
self.traintimes = traintimes
self.testtimes = testtimes
self.traintest = traintest
def report(self,c,sorn):
# Read data
traintimes = self.traintimes
testtimes = self.testtimes
Xtrain = c.spikes[:,traintimes[0]:traintimes[1]].T
Xtest = c.spikes[:,testtimes[0]:testtimes[1]].T
inputi_train = c.inputindex[traintimes[0]:traintimes[1]]
inputi_test = c.inputindex[testtimes[0]:testtimes[1]]
# Determine word length
source = load_source("source_%s"%self.traintest,sorn.c)
N_words = len(source.words)
max_word_length = int(max([len(x) for x in source.words]))
max_spont_length = int(sorn.c['wait_min_%s'%self.traintest]
+sorn.c['wait_var_%s'%self.traintest])
pred_window = max_word_length + max_spont_length+max_word_length
correlations = zeros((N_words,pred_window,2))
import scipy.stats as stats
# Convert 0/1 spike trains to -1/1 spike trains if needed
if sorn.c.stats.match:
Xtrain *= 2
Xtrain -= 1
Xtest *= 2
Xtest -= 1
word_length = 0
for (w,word) in enumerate(source.words):
word_starts_train = find(inputi_train==(word_length))
word_starts_train = word_starts_train[(word_starts_train>0)\
*(word_starts_train<(shape(Xtrain)[0]-pred_window))]
word_starts_test = find(inputi_test==(word_length))
word_starts_test = word_starts_test[word_starts_test<\
(shape(Xtest)[0]-pred_window)]
bias_train = ones((shape(word_starts_train)[0],1))
bias_test = ones((shape(word_starts_test)[0],1))
base_train = Xtrain[word_starts_train-1,:].copy()
base_test = Xtest[word_starts_test-1,:].copy()
shuffle(base_train)
shuffle(base_test)
base_train = hstack((bias_train,base_train))
base_test = hstack((bias_test,base_test))
sp_train = hstack((bias_train,Xtrain[word_starts_train-1,:]))
sp_test = hstack((bias_test,Xtest[word_starts_test-1,:]))
#~ sp_train = bias_train <-- this is a STA!
#~ sp_test = bias_test
for t in range(pred_window):
# First do a least-squares fit
Xt_train = Xtrain[word_starts_train+t,:]
Xt_test = Xtest[word_starts_test+t,:]
# regularize with mue to avoid problems when #samples <
# #neurons
classifier = lstsq_reg(sp_train,Xt_train,
sorn.c.stats.lstsq_mue)
classifier_base = lstsq_reg(base_train,Xt_train,
sorn.c.stats.lstsq_mue)
Xt_pred = sp_test.dot(classifier)
base_pred = base_test.dot(classifier)
# Baseline = STA
#~ base = mean(Xt_train,0)
#~ base_pred = array([base,]*shape(Xt_test)[0])
# Don't use this because the paper uses correlation
# Don't use this because of lower bound for zeros
# instead of pearsonr - lower bound = 1-h.ip
# -> spont pred always better
def match(x,y):
assert(shape(x) == shape(y))
x = x>0
y = y>0
return sum(x==y)/(1.0*shape(x)[0])
if not sorn.c.stats.match:
correlations[w,t,0] = stats.pearsonr(
Xt_pred.flatten(),Xt_test.flatten())[0]
correlations[w,t,1] = stats.pearsonr(
base_pred.flatten(),Xt_test.flatten())[0]
else:
correlations[w,t,0] = match(Xt_pred.flatten(),
Xt_test.flatten())
correlations[w,t,1] = match(base_pred.flatten(),
Xt_test.flatten())
word_length += len(word)
# Correlations are sorted like the words:
# A B C D E ... B = 0*A C = 0.1*A, D=0.2*A ...
return correlations
class SpikesStat(AbstractStat):
def __init__(self,inhibitory = False):
if inhibitory:
self.name = 'SpikesInh'
self.sattr = 'spikes_inh'
else:
self.name = 'Spikes'
self.sattr = 'spikes'
self.collection = 'gather'
self.inh = inhibitory
def clear(self,c,sorn):
if self.inh:
self.neurons = sorn.c.N_i
else:
self.neurons = sorn.c.N_e
if sorn.c.stats.has_key('only_last'):
steps = sorn.c.stats.only_last+sorn.c.stats.only_last
c[self.sattr] = zeros((self.neurons,steps))
else:
c[self.sattr] = zeros((self.neurons,sorn.c.N_steps))
self.step = 0
def add(self,c,sorn):
if self.inh:
spikes = sorn.y
else:
spikes = sorn.x
if sorn.c.stats.has_key('only_last'):
new_step = self.step - (sorn.c.N_steps\
-sorn.c.stats.only_last)
if new_step >= 0:
c[self.sattr][:,new_step+sorn.c.stats.only_last] \
= spikes
elif self.step % (sorn.c.N_steps\
//sorn.c.stats.only_last) == 0:
c[self.sattr][:,self.step//(sorn.c.N_steps\
//sorn.c.stats.only_last)] = spikes
else:
c[self.sattr][:,self.step] = spikes
self.step += 1
def report(self,c,sorn):
if sorn.c.stats.save_spikes:
return c[self.sattr]
else:
return zeros(0)
class CondProbStat(AbstractStat):
def __init__(self):
self.name='CondProb'
self.collection='gather'
def clear(self,c,sorn):
pass
def add(self,c,sorn):
pass
def report(self,c,sorn):
# return a marix with M_ij = frequency of a spike in i following
# a spike in j
# Look at test instead of training to get more diverse data
steps = sorn.c.steps_noplastic_test
spikes = c.spikes[:,-steps:]
N = shape(spikes)[0] # number of neurons
condspikes = np.zeros((N,N))
for t in xrange(1,steps):
condspikes[spikes[:,t]==1,:] += spikes[:,t-1]
spike_sum = sum(spikes,1)
for i in xrange(N):
condspikes[i,:] /= spike_sum
return condspikes
class BalancedStat(AbstractStat):
"""
This stat records the excitatory and inhibitory input and thresholds
to determine how balanced the network operates
"""
def __init__(self):
self.name='Balanced'
self.collection='gather'
def clear(self,c,sorn):
c.balanced = zeros((sorn.c.N_e*3,sorn.c.N_steps))
self.step = 0
self.N_e = sorn.c.N_e
def add(self,c,sorn):
c.balanced[:self.N_e,self.step] = sorn.W_ee*sorn.x
c.balanced[self.N_e:2*self.N_e,self.step] = sorn.W_ei*sorn.y
c.balanced[2*self.N_e:,self.step] = sorn.T_e
self.step += 1
def report(self,c,sorn):
return c.balanced
class RateStat(AbstractStat):
"""
This stat returns a matrix of firing rates of each presynaptic
neuron
"""
def __init__(self):
self.name = 'Rate'
self.collection='gather'
def clear(self,c,sorn):
pass
def add(self,c,sorn):
pass
def report(self,c,sorn):
# same interval as for condprob
steps = sorn.c.steps_noplastic_test
spikes = c.spikes[:,-steps:]
N = shape(spikes)[0] # number of neurons
rates = mean(spikes,1)
return array([rates,]*N)
class InputStat(AbstractStat):
def __init__(self):
self.name = 'Input'
self.collection = 'gather'
def clear(self,c,sorn):
c.inputs = zeros((sorn.c.N_e,sorn.c.N_steps))
self.step = 0
def add(self,c,sorn):
c.inputs[:,self.step] = sorn.W_eu*sorn.u
self.step += 1
def report(self,c,sorn):
return c.inputs
class FullEndWeightStat(AbstractStat):
def __init__(self):
self.name = 'FullEndWeight'
self.collection = 'gather'
def clear(self,c,sorn):
pass
def add(self,c,sorn):
pass
def report(self,c,sorn):
tmp1 = np.vstack((sorn.W_ee.get_synapses(),\
sorn.W_ie.get_synapses()))
tmp2 = np.vstack((sorn.W_ei.get_synapses(),\
np.zeros((sorn.c.N_i,sorn.c.N_i))))
return np.array(hstack((tmp1,tmp2)))
class EndWeightStat(AbstractStat):
def __init__(self):
self.name = 'endweight'
self.collection = 'gather'
def clear(self,c,sorn):
pass
def add(self,c,sorn):
pass
def report(self,c,sorn):
if sorn.c.W_ee.use_sparse:
return np.array(sorn.W_ee.W.todense())
else:
return sorn.W_ee.W*(sorn.W_ee.M==1)
class ISIsStat(AbstractStat):
def __init__(self,interval=[]):
self.name = 'ISIs'
self.collection = 'gather'
self.interval = interval
def clear(self,c,sorn):
self.mask = sum(sorn.W_eu.get_synapses(),1)==0
self.N_noinput = sum(self.mask)
self.ISIs = zeros((self.N_noinput,100))
self.isis = zeros(self.N_noinput)
self.step = 0
if self.interval == []:
self.interval = [0,sorn.c.N_steps]
def add(self,c,sorn):
if ((self.step > self.interval[0] and
self.step < self.interval[1]) and
((not sorn.c.stats.has_key('only_last')) \
or (self.step > sorn.c.stats.only_last))):
spikes = sorn.x[self.mask]
self.isis[spikes==0] += 1
isis_tmp = self.isis[spikes==1]
isis_tmp = isis_tmp[isis_tmp<100]
tmp = zip(where(spikes==1)[0],isis_tmp.astype(int))
for pair in tmp:
self.ISIs[pair] += 1
self.isis[spikes==1] = 0
self.step += 1
def report(self,c,sorn):
return self.ISIs
class SynapseFractionStat(AbstractStat):
def __init__(self):
self.name = 'SynapseFraction'
self.collection = 'reduce'
def report(self,c,sorn):
if sorn.c.W_ee.use_sparse:
return array(sum((sorn.W_ee.W.data>0)+0.0)\
/(sorn.c.N_e*sorn.c.N_e))
else:
return array(sum(sorn.W_ee.M)/(sorn.c.N_e*sorn.c.N_e))
class ConnectionFractionStat(AbstractStat):
def __init__(self):
self.name = 'ConnectionFraction'
self.collection = 'gather'
def clear(self,c,sorn):
self.step = 0
if sorn.c.stats.has_key('only_last'):
self.cf = zeros(sorn.c.stats.only_last\
+sorn.c.stats.only_last)
else:
self.cf = zeros(sorn.c.N_steps)
def add(self,c,sorn):
if sorn.c.stats.has_key('only_last'):
new_step = self.step \
- (sorn.c.N_steps-sorn.c.stats.only_last)
if new_step >= 0:
if sorn.c.W_ee.use_sparse:
self.cf[new_step+sorn.c.stats.only_last] = sum(\
(sorn.W_ee.W.data>0)+0)/(sorn.c.N_e*sorn.c.N_e)
else:
self.cf[new_step+sorn.c.stats.only_last] = sum(\
sorn.W_ee.M)/(sorn.c.N_e*sorn.c.N_e)
elif self.step%(sorn.c.N_steps\
//sorn.c.stats.only_last) == 0:
if sorn.c.W_ee.use_sparse:
self.cf[self.step//(sorn.c.N_steps\
//sorn.c.stats.only_last)] = sum(\
(sorn.W_ee.W.data>0)+0)/(sorn.c.N_e*sorn.c.N_e)
else:
self.cf[self.step//(sorn.c.N_steps\
//sorn.c.stats.only_last)] = sum(\
sorn.W_ee.M)/(sorn.c.N_e*sorn.c.N_e)
else:
if sorn.c.W_ee.use_sparse:
self.cf[self.step] = sum((sorn.W_ee.W.data>0)+0)\
/(sorn.c.N_e*sorn.c.N_e)
else:
self.cf[self.step] = sum(sorn.W_ee.M)\
/(sorn.c.N_e*sorn.c.N_e)
self.step += 1
def report(self,c,sorn):
return self.cf
class WeightLifetimeStat(AbstractStat):
def __init__(self):
self.name = 'WeightLifetime'
self.collection = 'gather'
def clear(self,c,sorn):
if sorn.c.W_ee.use_sparse:
self.last_M_ee = np.array(sorn.W_ee.W.todense())>0
else:
self.last_M_ee = sorn.W_ee.M.copy()
self.lifetimes = zeros((sorn.c.N_e,sorn.c.N_e))
self.diedat = np.zeros((1,0))
def add(self,c,sorn):
if sorn.c.W_ee.use_sparse:
new_M_ee = np.array(sorn.W_ee.W.todense())>0
else:
new_M_ee = sorn.W_ee.M
self.diedat = append(self.diedat, \
self.lifetimes[(new_M_ee+0-self.last_M_ee+0)==-1])
# remove dead synapses
self.lifetimes *= new_M_ee+0
#increase lifetime of existing ones
self.lifetimes += (self.lifetimes>0)+0
#add new ones
self.lifetimes += ((new_M_ee+0-self.last_M_ee+0)==1)+0
self.last_M_ee = new_M_ee.copy()
def report(self,c,sorn):
padding = (-1)*np.ones(2*sorn.c.N_steps\
+shape(self.last_M_ee)[0]**2-self.diedat.size)
return np.append(self.diedat,padding)
class WeightChangeStat(AbstractStat):
def __init__(self):
self.name = 'WeightChange'
self.collection = 'gather'
def clear(self,c,sorn):
self.step = 0
self.start = 2999
self.end = 5999
self.save_W_ee = []
self.abschange = []
self.relchange = []
self.weights = []
def add(self,c,sorn):
if(self.step == self.start):
if sorn.c.W_ee.use_sparse:
self.save_W_ee = np.array(sorn.W_ee.W.todense())
else:
self.save_W_ee = sorn.W_ee.W.copy()
if(self.step == self.end):
if sorn.c.W_ee.use_sparse:
diff = np.array(sorn.W_ee.W.todense())-self.save_W_ee
else:
diff = sorn.W_ee.W-self.save_W_ee
self.weights = self.save_W_ee[diff!=0]
self.abschange = (diff[diff!=0])
seterr(divide='ignore')
# Some weights become 0 and thereby elicit division by 0
# and try except RuntimeWarning didn't work
self.relchange = self.abschange/self.weights*100
seterr(divide='warn')
# append zeros to always have the same size
tmp_zeros = np.zeros(shape(self.save_W_ee)[0]**2\
-self.weights.size)
self.weights = np.append(self.weights,tmp_zeros)
self.abschange = np.append(self.abschange,tmp_zeros)
self.relchange = np.append(self.relchange,tmp_zeros)
self.step += 1
def report(self,c,sorn):
stacked = np.vstack((self.weights, self.abschange,\
self.relchange))
return stacked
class WeightChangeRumpelStat(AbstractStat):
def __init__(self):
self.name = 'WeightChangeRumpel'
self.collection = 'gather'
def clear(self,c,sorn):
self.step = 0
self.interval = 0
self.start = 50001
self.started = False
self.imaging_interval = 50000
self.N_intervals = (sorn.c.N_steps-self.start)\
//self.imaging_interval+1
self.save_W_ees = np.zeros((self.N_intervals,sorn.c.N_e,\
sorn.c.N_e))
self.constant_weights = []
self.abschange = []
self.relchange = []
self.weights = []
def add(self,c,sorn):
if(self.step%self.imaging_interval == 0 and self.started):
self.save_W_ees[self.interval,:,:] \
= sorn.W_ee.get_synapses()
self.constant_weights *= (self.save_W_ees[self.interval,\
:,:]>0)
self.interval += 1
if(self.step == self.start):
self.save_W_ees[self.interval,:,:] \
= sorn.W_ee.get_synapses()
self.constant_weights \
= (self.save_W_ees[self.interval,:,:].copy()>0)
self.interval = 1
self.started = True
self.step += 1
def report(self,c,sorn):
# compute diffs and multiply with const
import pdb
pdb.set_trace()
diffs = self.save_W_ees[1:,:,:] - self.save_W_ees[:-1,:,:]
diffs *= self.constant_weights
self.abschange = (diffs[diffs!=0])
self.weights = self.save_W_ees[:-1,:,:][diffs!=0]
self.relchange = self.abschange/self.weights*100
# append zeros to always have the same size
tmp_zeros = np.zeros((self.N_intervals-1)\
*shape(self.save_W_ees)[1]**2-self.weights.size)
self.weights = np.append(self.weights,tmp_zeros)
self.abschange = np.append(self.abschange,tmp_zeros)
self.relchange = np.append(self.relchange,tmp_zeros)
stacked = np.vstack((self.weights, self.abschange,\
self.relchange))
return stacked
class SmallWorldStat(AbstractStat):
def __init__(self):
self.name = 'smallworld'
self.collection = 'gather'
def clear(self,c,sorn):
pass
def add(self,c,sorn):
pass
def report(self,c,sorn):
if sorn.c.stats.rand_networks <= 0:
return np.array([])
if sorn.c.W_ee.use_sparse:
weights = np.array(sorn.W_ee.W.todense())
else:
weights = sorn.W_ee.W*(sorn.W_ee.M==1)
tmp = weights>0.0+0.0
binary_connections = tmp+0.0
def all_pairs_shortest_path(graph_matrix):
# adapted Floyd-Warshall Algorithm
N = shape(graph_matrix)[0]
distances = graph_matrix.copy()
#Set missing connections to max length
distances[distances==0] += N*N
for k in range(N):
for i in range(N):
for j in range(N):
if i==j:
distances[i,j] = 0
else:
distances[i,j] = min(distances[i,j],
distances[i,k]
+distances[k,j])
return distances
def characteristic_path_length(graph_matrix):
N = shape(graph_matrix)[0]
distances = all_pairs_shortest_path(graph_matrix.T)
if any(distances == N*N):
print 'Disconnected elements in char. path len calc.'
# ignore disconnected elements
distances[distances==N*N] = 0
average_length = sum(distances[distances>0]*1.0)\
/sum(graph_matrix[distances>0]*1.0)
return average_length
def cluster_coefficient(graph_matrix):
# From Fagiolo, 2007 and Gerhard, 2011
N = shape(graph_matrix)[0]
in_degree = sum(graph_matrix,1)
out_degree = sum(graph_matrix,0)
k = in_degree+out_degree
A = graph_matrix
A_T = A.transpose()
A_A_T = A + A_T
A_2 = np.dot(A,A)
nominator = np.dot(A_A_T,np.dot(A_A_T,A_A_T))
single_coeff = np.zeros(N)
for i in range(N):
single_coeff[i] = nominator[i,i]/(2.0*(k[i]*(k[i]-1)\
-2.0*(A_2[i,i])))
if(np.isnan(single_coeff[i])):
# if total degree <= 1, the formula divides by 0
single_coeff[i] = 0
return 1.0*sum(single_coeff)/(N*1.0)
L = characteristic_path_length(binary_connections)
C = cluster_coefficient(binary_connections)
# Average over some random networks
N = shape(binary_connections)[0]
edge_density = sum(binary_connections)/(1.0*N*N-N)
num_rand = sorn.c.stats.rand_networks
L_rand = np.zeros(num_rand)
C_rand = np.zeros(num_rand)
delete_diagonal = np.ones((N,N))
for i in range(N):
delete_diagonal[i,i] = 0
for i in range(num_rand):
sys.stdout.write('\rRand Graph No.%3i of %3i'%(i+1,\
num_rand))
sys.stdout.flush()
tmp = np.random.rand(N,N)<edge_density
rand_graph = tmp*delete_diagonal
L_rand[i] = characteristic_path_length(rand_graph)
C_rand[i] = cluster_coefficient(rand_graph)
sys.stdout.write('\rAll %i Graphs Done '%num_rand)
sys.stdout.flush()
L_r = sum(L_rand)*1.0/(num_rand*1.0)
C_r = sum(C_rand)*1.0/(num_rand*1.0)
gamma = C/C_r
lam = L/L_r
S_w = gamma/lam
return np.array([gamma, lam, S_w])
class ParamTrackerStat(AbstractStat):
def __init__(self):
self.name = 'paramtracker'
self.collection = 'gather'
def clear(self,c,sorn):
pass
def add(self,c,sorn):
pass
def report(self,c,sorn):
tmp = sorn.c
for item in sorn.c.cluster.vary_param.split('.'):
tmp = tmp[item]
return np.array([tmp*1.0])
class InputWeightStat(AbstractStat):
def __init__(self):
self.name = 'InputWeight'
self.collection = 'gather'
def clear(self,c,sorn):
self.step = 0
self.weights = np.zeros((sorn.c.N_e,sorn.c.N_u_e,\
sorn.c.stats.only_last*2))
def add(self,c,sorn):
if self.step % (sorn.c.N_steps//sorn.c.stats.only_last) == 0:
self.weights[:,:,self.step//(sorn.c.N_steps\
//sorn.c.stats.only_last)] = sorn.W_eu.get_synapses()
self.step += 1
def report(self,c,sorn):
return self.weights
class SVDStat(AbstractStat):
def __init__(self,nth = 200):
self.name = 'SVD'
self.collection = 'gather'
self.nth = nth
def clear(self,c,sorn):
self.step = 0
# Quick hack - there must be a prettier solution
if sorn.c.steps_plastic % self.nth == 0:
add1 = 0
else:
add1 = 1
c.SVD_singulars = np.zeros((sorn.c.steps_plastic//self.nth+add1
,sorn.c.N_e))
c.SVD_U = np.zeros((sorn.c.steps_plastic//self.nth+add1,
sorn.c.N_e,sorn.c.N_e))
c.SVD_V = np.zeros((sorn.c.steps_plastic//self.nth+add1,
sorn.c.N_e,sorn.c.N_e))
def add(self,c,sorn):
if self.step < sorn.c.steps_plastic and self.step%self.nth == 0:
# Time intensive!
synapses = sorn.W_ee.get_synapses()
U,s,V = linalg.svd(synapses)
c.SVD_singulars[self.step//self.nth,:] = s
step = self.step//self.nth
c.SVD_U[step] = U
# this returns the real V
# see http://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.svd.html
c.SVD_V[step] = V.T
# Resolve sign ambiguity
# from http://www.models.life.ku.dk/signflipsvd
# http://prod.sandia.gov/techlib/access-control.cgi/2007/076422.pdf
for i in range(sorn.c.N_e):
tmp = synapses.T.dot(c.SVD_U[step,:,i])
tmp = np.squeeze(asarray(tmp))
s_left = sum(sign(tmp)*tmp**2)
tmp = synapses.T.dot(c.SVD_V[step,:,i])
tmp = np.squeeze(asarray(tmp))
s_right = sum(sign(tmp)*tmp**2)
if s_right*s_left < 0:
if s_left < s_right:
s_left = -s_left
else:
s_right = -s_right
c.SVD_U[step,:,i] *= sign(s_left)
c.SVD_V[step,:,i] *= sign(s_right)
self.step += 1
def report(self,c,sorn):
#~ figure() # combine same submatrices!
#~ imshow(c.SVD_U[-1][:,0].dot(c.SVD_V[-1][:,0].T)\
#~ *c.SVD_singulars[-1,0], interpolation='none')
return c.SVD_singulars
class SVDStat_U(AbstractStat):
def __init__(self):
self.name = 'SVD_U'
self.collection = 'gather'
def report(self,c,sorn):
rec_steps = shape(c.SVD_U)[0]
similar_input = zeros((rec_steps,sorn.c.N_e))
N_indices = max(c.norm_last_input_index)+1
indices = [where(c.norm_last_input_index==i)[0] for i in
range(int(N_indices))]
for s in xrange(rec_steps):
for i in xrange(sorn.c.N_e):
# U transforms back to "spike space"
# Check for best similarities
# Convolution works best:
#~ overlaps = c.norm_last_input_spikes.T.dot(
#~ c.SVD_U[s,:,i])
#~ index_overlap = np.zeros(N_indices)
#~ for j in range(int(N_indices)):
#~ index_overlap[j] = mean(overlaps[indices[j]])
#~ similar_input[s,i] = argmax(index_overlap)
# No big difference to this, but probably more robust
max_overlap = argmax(c.norm_last_input_spikes.T.dot(
c.SVD_U[s,:,i]))
similar_input[s,i] = c.norm_last_input_index[
max_overlap]
c.SVD_U_sim = similar_input # for debugging
return similar_input
class SVDStat_V(AbstractStat):
def __init__(self):
self.name = 'SVD_V'
self.collection = 'gather'
def report(self,c,sorn):
rec_steps = shape(c.SVD_V)[0]
similar_input = zeros((rec_steps,sorn.c.N_e))
N_indices = max(c.norm_last_input_index)+1
indices = [where(c.norm_last_input_index==i)[0] for i in
range(int(N_indices))]
for s in xrange(rec_steps):
for i in xrange(sorn.c.N_e):
# V transforms input by taking product
# Do same here and look which spike vector works best
#~ overlaps = c.norm_last_input_spikes.T.dot(
#~ c.SVD_V[s,:,i])
#~ index_overlap = np.zeros(N_indices)
#~ for j in range(int(N_indices)):
#~ index_overlap[j] = mean(overlaps[indices[j]])
#~ similar_input[s,i] = argmax(index_overlap)
# No big difference to this, but probably more robust
max_overlap = argmax(c.norm_last_input_spikes.T.dot(
c.SVD_V[s,:,i])) # euclidean norm w/o sqrt
similar_input[s,i] = c.norm_last_input_index[
max_overlap]
'''
# For testing purposes command line
!i = 30
!similar_input[:,i]
!c.SVD_U_sim[:,i]
!figure()
!plot(c.SVD_V[-1,:,i])
!max_overlap = argmax(c.norm_last_input_spikes.T.dot(c.SVD_V[s,:,i]))
!plot(c.norm_last_input_spikes[:,max_overlap])
!figure()
!plot(c.SVD_U[-1,:,i])
!max_overlap = argmax(c.norm_last_input_spikes.T.dot(c.SVD_U[s,:,i]))
!plot(c.norm_last_input_spikes[:,max_overlap])
!show()
'''
return similar_input
class MeanActivityStat(AbstractStat):
"""
This stat returns the mean activity for each inputindex
"""
def __init__(self,start,stop,N_indices,LFP=False):
self._start = start
self._stop = stop
self._N_indices = N_indices
self.name = 'meanactivity'
self.collection = 'gather'
self.LFP = LFP
self.tmp = -1
def clear(self,c,sorn):
self.means = zeros(self._N_indices)
self.counter = zeros(self._N_indices)
self.step = 0
self.index = None
def add(self,c,sorn):
if self.step > self._start and self.step < self._stop\
and self.step>0:
# for proper assignment, blank(-1)->0, 0->1...
self.index = sorn.source.global_index()+1
if self.index is not None:
if self.tmp >= 0:
self.counter[self.index] += 1.
if self.LFP:
# save input at current step, but can only compute
# input for next step!
if self.tmp >= 0:
self.means[self.index] += self.tmp+sum(sorn.W_eu
*sorn.u)
self.tmp = sum(sorn.W_ee*sorn.x)
else:
if self.tmp >= 0:
self.means[self.index] += sum(sorn.x)
self.tmp = 0 # dummy value never used
#~ # +1 due to -1 for blank trials
#~ self.index = sorn.source.global_index()+1
self.step += 1
def report(self,c,sorn):
return self.means/self.counter
class MeanPatternStat(AbstractStat):
"""
This stat returns the mean activity for each inputindex
"""
def __init__(self,start,stop,N_indices):
self._start = start
self._stop = stop
self._N_indices = N_indices
self.name = 'meanpattern'
self.collection = 'gather'
def clear(self,c,sorn):
self.means = zeros((self._N_indices,sorn.c.N_e))
self.counter = zeros(self._N_indices)
self.step = 0
self.index = None
def add(self,c,sorn):
if self.step > self._start and self.step < self._stop\
and self.step>0:
# for proper assignment, blank(-1)->0, 0->1...
self.index = sorn.source.global_index()+1
if self.index is not None:
self.counter[self.index] += 1.
self.means[self.index] += sorn.x
self.step += 1
def report(self,c,sorn):
return self.means/self.counter[:,None]
class PatternProbabilityStat(AbstractStat):
"""
This stat estimates the probability distribution of patterns
for different time intervals
Intervals: List of 2-entry lists
[[start1,stop1],...,[startn,stopn]]
zero_correction: Bool
Correct estimates by adding one observation to each pattern
subset: 1-D array
List of neuron indices that create the pattern
"""
def __init__(self,intervals,subset,zero_correction=True):
self.N_intervals = len(intervals)
self.intervals = intervals
self.zero_correction = zero_correction
self.N_nodes = len(subset)
self.subset = subset
self.name = 'patternprobability'
self.collection = 'gather'
self.conversion_array = [2**x for x in range(self.N_nodes)][::-1]
def convert(x):
return np.dot(x,self.conversion_array)
self.convert = convert
def clear(self,c,sorn):
self.patterns = zeros((self.N_intervals,2**self.N_nodes))
self.step = 0
def add(self,c,sorn):
for (i,(start,stop)) in enumerate(self.intervals):
if self.step > start and self.step < stop:
# Convert spiking pattern to integer by taking the
# pattern as a binary number
self.patterns[i,self.convert(sorn.x[self.subset])] += 1
self.step += 1
def report(self,c,sorn):
if self.zero_correction:
self.patterns += 1
# Normalize to probabilities
self.patterns /= self.patterns.sum(1)[:,None]
return self.patterns
class WeeFailureStat(AbstractStat):
def __init__(self):
self.name = 'weefail'
self.collection = 'gather'
def clear(self,c,sorn):
c.weefail = zeros(sorn.c.N_steps)
self.step = 0
def add(self,c,sorn):
if sorn.c.W_ee.use_sparse:
N_weights = sorn.W_ee.W.data.shape[0]
N_fail = N_weights-sum(sorn.W_ee.mask)
else:
N_weights = sum(sorn.W_ee.get_synapses()>0)
N_fail = N_weights-sum(sorn.W_ee.masked>0)
c.weefail[self.step] = N_fail/N_weights
self.step += 1
def report(self,c,sorn):
return c.weefail
class WeeFailureFuncStat(AbstractStat):
def __init__(self):
self.name = 'weefailfunc'
self.collection = 'gather'
def clear(self,c,sorn):
self.x = np.linspace(0,1,1000)
self.y = sorn.W_ee.fail_f(self.x)
def add(self,c,sorn):
pass
def report(self,c,sorn):
return np.array([self.x,self.y])
# From Philip
class XClassifierStat(AbstractStat):
def __init__(self,steps=None, classify_x=True, \
classify_r=False,detailed=False,**args):
'''Steps is a list with the step sizes over which to predict.
e.g.
- a step of +1 means predict the next state
- a step of 0 means identify the current state
- a step of -1 means identify the previous state
'''
if steps is None:
steps = [0]
self.steps = steps
self.classify_x = classify_x
self.classify_r = classify_r
self.detailed = detailed
@property
def name(self):
ans = []
if self.classify_x:
ans.append('xclassifier')
if self.classify_r:
ans.append('rclassifier')
return ans
def build_classifier(self,inp,out,offset):
# Use the input to build a classifier of the output with an
# offset
N = inp.shape[0]
inp_aug = hstack([inp, ones((N,1))])
(ib,ie) = (max(-offset,0),min(N-offset,N))
(ob,oe) = (max(+offset,0),min(N+offset,N))
try:
ans = linalg.lstsq(inp_aug[ib:ie,:],out[ob:oe,:])[0]
except LinAlgError:
ans = zeros( (inp.shape[1]+1,out.shape[1]) )
return ans
def use_classifier(self,inp,classifier,offset,correct):
N = inp.shape[0]
L = classifier.shape[1]
inp_aug = hstack([inp, ones((N,1))])
(ib,ie) = (max(-offset,0),min(N-offset,N))
(ob,oe) = (max(+offset,0),min(N+offset,N))
ind = argmax(inp_aug[ib:ie,:].dot(classifier),1)
actual = argmax(correct,1)[ob:oe]
num = zeros(L)
den = zeros(L)
for l in range(L):
l_ind = actual==l
num[l] = sum(actual[l_ind]==ind[l_ind])
den[l] = sum(l_ind)
return (num,den)
def report(self,_,sorn):
c = sorn.c
#Disable plasticity when measuring network
sorn.update = False
#Don't track statistics when measuring either
self.parent.disable = True
#Build classifiers
Nr = c.test_num_train
Nt = c.test_num_test
#~ (Xr,Rr,Ur) = sorn.simulation(Nr)
dic = sorn.simulation(Nr,['X','R_x','U'])
Xr = dic['X']
Rr = dic['R_x']
Ur = dic['U']
#~ (Xt,Rt,Ut) = sorn.simulation(Nt)
dic = sorn.simulation(Nt,['X','R_x','U'])
Xt = dic['X']
Rt = dic['R_x']
Ut = dic['U']
L = Ur.shape[1]
Rr = (Rr >= 0.0)+0
Rt = (Rt >= 0.0)+0
r = []
x = []
detail_r=[]
detail_x=[]
for step in self.steps:
if self.classify_x:
classifier = self.build_classifier(Xr,Ur,step)
(num,den) = self.use_classifier(Xt,classifier,step,Ut)
ans = sum(num)/sum(den)
x.append(ans)
if self.detailed:
detail_x.append(num/(den+1e-20))
if self.classify_r:
classifier = self.build_classifier(Rr,Ur,step)
(num,den) = self.use_classifier(Rt,classifier,step,Ut)
ans = sum(num)/sum(den)
r.append(ans)
if self.detailed:
detail_r.append(num/(den+1e-20))
ans = []
if self.classify_x:
ans.append( ('xclassifier', 'reduce', array(x)) )
if self.detailed:
ans.append( ('x_detail_classifier%d'%L,'reduce',\
array(detail_x)) )
if self.classify_r:
ans.append( ('rclassifier', 'reduce', array(r)) )
if self.detailed:
ans.append( ('r_detail_classifier%d'%L,'reduce',\
array(detail_r)) )
sorn.update = True
self.parent.disable = False
return ans
# From Philip
class XTotalsStat(AbstractStat):
def __init__(self):
self.name = 'x_tot'
self.collection = 'gather'
def clear(self,c,obj):
N = obj.c.N_e
c.x_tot = zeros(N)
def add(self,c,obj):
c.x_tot += obj.x
def report(self,c,obj):
return c.x_tot
# From Philip
class YTotalsStat(AbstractStat):
def __init__(self):
self.name = 'y_tot'
self.collection = 'gather'
def clear(self,c,obj):
N = obj.c.N_i
c.y_tot = zeros(N)
def add(self,c,obj):
c.y_tot += obj.y
def report(self,c,obj):
return c.y_tot
# From Philip
class SynapticDistributionStat(AbstractStat):
def __init__(self,collection='gatherv'):
self.name = 'synaptic_strength'
self.collection = collection
def report(self,_,sorn):
W = sorn.W_ee.T
Mask = sorn.M_ee.T
# This code might be a little fragile but fast
# (note transposes rely on memory laid out in particular order)
#~ N = sorn.c.N_e
#~ M = sorn.c.lamb
#This relies on a fixed # of non-zero synapses per neuron
#~ ans = (W[Mask]).reshape(N,M).T.copy()
ans = W[Mask]
return ans
# From Philip
class SuccessiveStat(AbstractStat):
def __init__(self):
self.name = 'successive'
self.collection = 'reduce'
def clear(self,c,sorn):
N = sorn.c.N_e
c.successive = zeros( (N+1,N+1) )
c.successive_prev = sum(sorn.x)
def add(self, c, sorn):
curr = sum(sorn.x)
c.successive[c.successive_prev,curr] += 1.0
c.successive_prev = curr
def report(self,c,sorn):
return c.successive
# From Philip
class RClassifierStat(AbstractStat):
def __init__(self,select=None):
if select is None:
select = [True,True,True]
self.name = 'classifier'
self.collection = 'reduce'
self.select = select
def report(self,_,sorn):
c = sorn.c
sorn.update = False
self.parent.disable = True
#Build classifiers
N = c.test_num_train
#~ (X,R,U) = sorn.simulation(N)
dic = sorn.simulation(N,['X','R_x','U'])
X = dic['X']
R = dic['R_x']
U = dic['U']
R = hstack([R>=0,ones((N,1))])
if self.select[0]:
classifier0 = linalg.lstsq(R,U)[0]
if self.select[1]:
classifier1 = dot(linalg.pinv(R),U)
if self.select[2]:
X_aug = hstack([X, ones((N,1))])
classifier2 = linalg.lstsq(X_aug[:-1,:],U[1:,:])[0]
#Now test classifiers
N = c.test_num_test
#~ (X,R,U) = sorn.simulation(N)
dic = sorn.simulation(N,['X','R_x','U'])
X = dic['X']
R = dic['R_x']
U = dic['U']
R = hstack([R>=0,ones((N,1))])
if self.select[0]:
ind0 = argmax(dot(R,classifier0),1)
if self.select[1]:
ind1 = argmax(dot(R,classifier1),1)
if self.select[2]:
X_aug = hstack([X, ones((N,1))])
ind2 = argmax(dot(X_aug[:-1,:],classifier2),1)
actual = argmax(U,1)
ans = []
if self.select[0]:
ans.append(mean(actual==ind0))
if self.select[1]:
ans.append(mean(actual==ind1))
if self.select[2]:
ans.append(mean(actual[1:]==ind2))
sorn.update = True
self.parent.disable = False
return array(ans)
class WeightHistoryStat(HistoryStat):
def add(self,c,obj):
if not (c.history[self.counter] % self.record_every_nth):
c.history[self.name].append(np.copy(
_getvar(obj,self.var).get_synapses()))
c.history[self.counter] += 1
| mit |
AnasGhrab/scikit-learn | examples/model_selection/plot_roc_crossval.py | 247 | 3253 | """
=============================================================
Receiver Operating Characteristic (ROC) with cross validation
=============================================================
Example of Receiver Operating Characteristic (ROC) metric to evaluate
classifier output quality using cross-validation.
ROC curves typically feature true positive rate on the Y axis, and false
positive rate on the X axis. This means that the top left corner of the plot is
the "ideal" point - a false positive rate of zero, and a true positive rate of
one. This is not very realistic, but it does mean that a larger area under the
curve (AUC) is usually better.
The "steepness" of ROC curves is also important, since it is ideal to maximize
the true positive rate while minimizing the false positive rate.
This example shows the ROC response of different datasets, created from K-fold
cross-validation. Taking all of these curves, it is possible to calculate the
mean area under curve, and see the variance of the curve when the
training set is split into different subsets. This roughly shows how the
classifier output is affected by changes in the training data, and how
different the splits generated by K-fold cross-validation are from one another.
.. note::
See also :func:`sklearn.metrics.auc_score`,
:func:`sklearn.cross_validation.cross_val_score`,
:ref:`example_model_selection_plot_roc.py`,
"""
print(__doc__)
import numpy as np
from scipy import interp
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from sklearn.metrics import roc_curve, auc
from sklearn.cross_validation import StratifiedKFold
###############################################################################
# Data IO and generation
# import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
X, y = X[y != 2], y[y != 2]
n_samples, n_features = X.shape
# Add noisy features
random_state = np.random.RandomState(0)
X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
###############################################################################
# Classification and ROC analysis
# Run classifier with cross-validation and plot ROC curves
cv = StratifiedKFold(y, n_folds=6)
classifier = svm.SVC(kernel='linear', probability=True,
random_state=random_state)
mean_tpr = 0.0
mean_fpr = np.linspace(0, 1, 100)
all_tpr = []
for i, (train, test) in enumerate(cv):
probas_ = classifier.fit(X[train], y[train]).predict_proba(X[test])
# Compute ROC curve and area the curve
fpr, tpr, thresholds = roc_curve(y[test], probas_[:, 1])
mean_tpr += interp(mean_fpr, fpr, tpr)
mean_tpr[0] = 0.0
roc_auc = auc(fpr, tpr)
plt.plot(fpr, tpr, lw=1, label='ROC fold %d (area = %0.2f)' % (i, roc_auc))
plt.plot([0, 1], [0, 1], '--', color=(0.6, 0.6, 0.6), label='Luck')
mean_tpr /= len(cv)
mean_tpr[-1] = 1.0
mean_auc = auc(mean_fpr, mean_tpr)
plt.plot(mean_fpr, mean_tpr, 'k--',
label='Mean ROC (area = %0.2f)' % mean_auc, lw=2)
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
| bsd-3-clause |
Aasmi/scikit-learn | examples/ensemble/plot_adaboost_hastie_10_2.py | 355 | 3576 | """
=============================
Discrete versus Real AdaBoost
=============================
This example is based on Figure 10.2 from Hastie et al 2009 [1] and illustrates
the difference in performance between the discrete SAMME [2] boosting
algorithm and real SAMME.R boosting algorithm. Both algorithms are evaluated
on a binary classification task where the target Y is a non-linear function
of 10 input features.
Discrete SAMME AdaBoost adapts based on errors in predicted class labels
whereas real SAMME.R uses the predicted class probabilities.
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
.. [2] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>,
# Noel Dawe <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import zero_one_loss
from sklearn.ensemble import AdaBoostClassifier
n_estimators = 400
# A learning rate of 1. may not be optimal for both SAMME and SAMME.R
learning_rate = 1.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_test, y_test = X[2000:], y[2000:]
X_train, y_train = X[:2000], y[:2000]
dt_stump = DecisionTreeClassifier(max_depth=1, min_samples_leaf=1)
dt_stump.fit(X_train, y_train)
dt_stump_err = 1.0 - dt_stump.score(X_test, y_test)
dt = DecisionTreeClassifier(max_depth=9, min_samples_leaf=1)
dt.fit(X_train, y_train)
dt_err = 1.0 - dt.score(X_test, y_test)
ada_discrete = AdaBoostClassifier(
base_estimator=dt_stump,
learning_rate=learning_rate,
n_estimators=n_estimators,
algorithm="SAMME")
ada_discrete.fit(X_train, y_train)
ada_real = AdaBoostClassifier(
base_estimator=dt_stump,
learning_rate=learning_rate,
n_estimators=n_estimators,
algorithm="SAMME.R")
ada_real.fit(X_train, y_train)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot([1, n_estimators], [dt_stump_err] * 2, 'k-',
label='Decision Stump Error')
ax.plot([1, n_estimators], [dt_err] * 2, 'k--',
label='Decision Tree Error')
ada_discrete_err = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_discrete.staged_predict(X_test)):
ada_discrete_err[i] = zero_one_loss(y_pred, y_test)
ada_discrete_err_train = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_discrete.staged_predict(X_train)):
ada_discrete_err_train[i] = zero_one_loss(y_pred, y_train)
ada_real_err = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_real.staged_predict(X_test)):
ada_real_err[i] = zero_one_loss(y_pred, y_test)
ada_real_err_train = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_real.staged_predict(X_train)):
ada_real_err_train[i] = zero_one_loss(y_pred, y_train)
ax.plot(np.arange(n_estimators) + 1, ada_discrete_err,
label='Discrete AdaBoost Test Error',
color='red')
ax.plot(np.arange(n_estimators) + 1, ada_discrete_err_train,
label='Discrete AdaBoost Train Error',
color='blue')
ax.plot(np.arange(n_estimators) + 1, ada_real_err,
label='Real AdaBoost Test Error',
color='orange')
ax.plot(np.arange(n_estimators) + 1, ada_real_err_train,
label='Real AdaBoost Train Error',
color='green')
ax.set_ylim((0.0, 0.5))
ax.set_xlabel('n_estimators')
ax.set_ylabel('error rate')
leg = ax.legend(loc='upper right', fancybox=True)
leg.get_frame().set_alpha(0.7)
plt.show()
| bsd-3-clause |
ryandougherty/mwa-capstone | MWA_Tools/build/matplotlib/lib/mpl_examples/api/colorbar_only.py | 3 | 1941 | '''
Make a colorbar as a separate figure.
'''
from matplotlib import pyplot, mpl
# Make a figure and axes with dimensions as desired.
fig = pyplot.figure(figsize=(8,3))
ax1 = fig.add_axes([0.05, 0.65, 0.9, 0.15])
ax2 = fig.add_axes([0.05, 0.25, 0.9, 0.15])
# Set the colormap and norm to correspond to the data for which
# the colorbar will be used.
cmap = mpl.cm.cool
norm = mpl.colors.Normalize(vmin=5, vmax=10)
# ColorbarBase derives from ScalarMappable and puts a colorbar
# in a specified axes, so it has everything needed for a
# standalone colorbar. There are many more kwargs, but the
# following gives a basic continuous colorbar with ticks
# and labels.
cb1 = mpl.colorbar.ColorbarBase(ax1, cmap=cmap,
norm=norm,
orientation='horizontal')
cb1.set_label('Some Units')
# The second example illustrates the use of a ListedColormap, a
# BoundaryNorm, and extended ends to show the "over" and "under"
# value colors.
cmap = mpl.colors.ListedColormap(['r', 'g', 'b', 'c'])
cmap.set_over('0.25')
cmap.set_under('0.75')
# If a ListedColormap is used, the length of the bounds array must be
# one greater than the length of the color list. The bounds must be
# monotonically increasing.
bounds = [1, 2, 4, 7, 8]
norm = mpl.colors.BoundaryNorm(bounds, cmap.N)
cb2 = mpl.colorbar.ColorbarBase(ax2, cmap=cmap,
norm=norm,
# to use 'extend', you must
# specify two extra boundaries:
boundaries=[0]+bounds+[13],
extend='both',
ticks=bounds, # optional
spacing='proportional',
orientation='horizontal')
cb2.set_label('Discrete intervals, some other units')
pyplot.show()
| gpl-2.0 |
griffincalme/MicroDeconvolution | website/MicroDeconvolution/RandomWalkScript.py | 1 | 5032 | import numpy as np
from numpy import linalg
import matplotlib.pyplot as plt
from skimage.exposure import rescale_intensity
from skimage.segmentation import random_walker
from skimage.color import separate_stains
from skimage.color import rgb2grey
from skimage.io import imread
import time
from pyamg import *
#Color deconvolution
#Hematoxylin(0), Red(1), DAB(2)
rgb_from_hrd = np.array([[0.65, 0.70, 0.29],
[0.1, 0.95, 0.95],
[0.27, 0.57, 0.78]])
hrd_from_rgb = linalg.inv(rgb_from_hrd)
def stainspace_to_2d_array(ihc_xyz, channel):
rescale = rescale_intensity(ihc_xyz[:, :, channel], out_range=(0,1))
stain_array = np.dstack((np.zeros_like(rescale), rescale, rescale))
grey_array = rgb2grey(stain_array)
return grey_array
#Get markers for random walk
def get_markers(grey_array, bottom_thresh, top_thresh):
markers = np.zeros_like(grey_array)
markers[grey_array < bottom_thresh] = 1
markers[grey_array > top_thresh] = 2
return markers
def random_walk_segmentation(input_image, output_folder):
input_image = imread(input_image)
ihc_hrd = separate_stains(input_image, hrd_from_rgb)
DAB_Grey_Array = stainspace_to_2d_array(ihc_hrd, 2)
Hema_Gray_Array = stainspace_to_2d_array(ihc_hrd, 0)
GBIred_Gray_Array = stainspace_to_2d_array(ihc_hrd, 1)
#Perform Random Walker, fills in positive regions
DAB_segmentation = random_walker(DAB_Grey_Array, get_markers(DAB_Grey_Array, .3, .5), beta=130, mode='cg_mg')
Hema_segmentation = random_walker(Hema_Gray_Array, get_markers(Hema_Gray_Array, .2, .4), beta=130, mode='cg_mg')
GBIred_segmentation = random_walker(GBIred_Gray_Array, get_markers(GBIred_Gray_Array, .4, .5), beta=130,
mode='cg_mg')
'''Compute and Output'''
#Compute and output percentages of pixels stained by each chromagen
pic_dimensions = np.shape(DAB_segmentation) # both arrays same shape
total_pixels = pic_dimensions[0] * pic_dimensions[1]
#Change negative pixel values from 1 -> 0, positives 2 -> 1
subtrahend_array = np.ones_like(DAB_segmentation)
DAB_segmentation = np.subtract(DAB_segmentation, subtrahend_array)
Hema_segmentation = np.subtract(Hema_segmentation, subtrahend_array)
GBIred_segmentation = np.subtract(GBIred_segmentation, subtrahend_array)
#Count positive pixels
DAB_pixels = np.count_nonzero(DAB_segmentation)
Hema_pixels = np.count_nonzero(Hema_segmentation)
red_pixels = np.count_nonzero(GBIred_segmentation)
#Percent of image covered by positive staining
DAB_coverage_percent = (round((DAB_pixels / total_pixels * 100), 1))
Hema_coverage_percent = (round((Hema_pixels / total_pixels * 100), 1))
#An overlay of the DAB and Hematoxylin segmented images, for total cellular area
total_cell_array = np.add(DAB_segmentation, Hema_segmentation)
#Number of pixels covered by cellular area
total_cell_pixels = np.count_nonzero(total_cell_array)
#Percent of image covered by cellular area (DAB OR Hematoxylin)
total_cell_percent = (round((total_cell_pixels / total_pixels * 100), 1))
#The percentage of DAB/CD3+ cells out of the total number of cells
percent_pos_cells = (round((DAB_pixels / total_cell_pixels * 100), 1))
#The percentage of the image covered by cytokines
Red_coverage_percent = (round((red_pixels / total_pixels * 100), 1))
red_plus_total_array = np.add(total_cell_array, GBIred_segmentation)
red_plus_total_pixels = np.count_nonzero(red_plus_total_array)
#The percentage of the area covered by cytokines, with non-cellular regions subtracted
adjusted_red_coverage_percent = (round((red_pixels / red_plus_total_pixels * 100), 1))
# Plot images
fig, axes = plt.subplots(2, 2, figsize=(12, 11))
ax0, ax1, ax2, ax3 = axes.ravel()
ax0.imshow(input_image, cmap=plt.cm.gray, interpolation='nearest')
ax0.set_title("Original")
ax1.imshow(DAB_segmentation, cmap=plt.cm.gray, interpolation='nearest')
ax1.set_title("DAB")
ax2.imshow(GBIred_segmentation, cmap=plt.cm.gray)
ax2.set_title("GBI red")
ax3.imshow(Hema_segmentation, cmap=plt.cm.gray)
ax3.set_title("Hematoxylin")
for ax in axes.ravel():
ax.axis('off')
fig.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=None, hspace=None)
output_filename = 'output' + time.strftime("%Y-%m-%d %H:%M:%S")
plt.savefig(output_folder + output_filename)
#do a save csv here, maybe delete return statement after this comment
return output_filename#, DAB_coverage_percent, Hema_coverage_percent, total_cell_percent, percent_pos_cells, Red_coverage_percent, adjusted_red_coverage_percent
#--- Test ---
#file_path = '/home/griffin/Desktop/MicroDeconvolution/TestingScripts/SamplePics/TestImage.jpg'
#save_directory = '/home/griffin/Desktop/MicroDeconvolution/website/media/images/output/'
#random_walk_segmentation(file_path, save_directory)
| apache-2.0 |
marcocaccin/scikit-learn | examples/model_selection/plot_roc_crossval.py | 247 | 3253 | """
=============================================================
Receiver Operating Characteristic (ROC) with cross validation
=============================================================
Example of Receiver Operating Characteristic (ROC) metric to evaluate
classifier output quality using cross-validation.
ROC curves typically feature true positive rate on the Y axis, and false
positive rate on the X axis. This means that the top left corner of the plot is
the "ideal" point - a false positive rate of zero, and a true positive rate of
one. This is not very realistic, but it does mean that a larger area under the
curve (AUC) is usually better.
The "steepness" of ROC curves is also important, since it is ideal to maximize
the true positive rate while minimizing the false positive rate.
This example shows the ROC response of different datasets, created from K-fold
cross-validation. Taking all of these curves, it is possible to calculate the
mean area under curve, and see the variance of the curve when the
training set is split into different subsets. This roughly shows how the
classifier output is affected by changes in the training data, and how
different the splits generated by K-fold cross-validation are from one another.
.. note::
See also :func:`sklearn.metrics.auc_score`,
:func:`sklearn.cross_validation.cross_val_score`,
:ref:`example_model_selection_plot_roc.py`,
"""
print(__doc__)
import numpy as np
from scipy import interp
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from sklearn.metrics import roc_curve, auc
from sklearn.cross_validation import StratifiedKFold
###############################################################################
# Data IO and generation
# import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
X, y = X[y != 2], y[y != 2]
n_samples, n_features = X.shape
# Add noisy features
random_state = np.random.RandomState(0)
X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
###############################################################################
# Classification and ROC analysis
# Run classifier with cross-validation and plot ROC curves
cv = StratifiedKFold(y, n_folds=6)
classifier = svm.SVC(kernel='linear', probability=True,
random_state=random_state)
mean_tpr = 0.0
mean_fpr = np.linspace(0, 1, 100)
all_tpr = []
for i, (train, test) in enumerate(cv):
probas_ = classifier.fit(X[train], y[train]).predict_proba(X[test])
# Compute ROC curve and area the curve
fpr, tpr, thresholds = roc_curve(y[test], probas_[:, 1])
mean_tpr += interp(mean_fpr, fpr, tpr)
mean_tpr[0] = 0.0
roc_auc = auc(fpr, tpr)
plt.plot(fpr, tpr, lw=1, label='ROC fold %d (area = %0.2f)' % (i, roc_auc))
plt.plot([0, 1], [0, 1], '--', color=(0.6, 0.6, 0.6), label='Luck')
mean_tpr /= len(cv)
mean_tpr[-1] = 1.0
mean_auc = auc(mean_fpr, mean_tpr)
plt.plot(mean_fpr, mean_tpr, 'k--',
label='Mean ROC (area = %0.2f)' % mean_auc, lw=2)
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
| bsd-3-clause |
bundgus/python-playground | matplotlib-playground/examples/pylab_examples/fonts_demo_kw.py | 1 | 2105 | """
Same as fonts_demo using kwargs. If you prefer a more pythonic, OO
style of coding, see examples/fonts_demo.py.
"""
from matplotlib.font_manager import FontProperties
import matplotlib.pyplot as plt
import numpy as np
plt.subplot(111, axisbg='w')
alignment = {'horizontalalignment': 'center', 'verticalalignment': 'baseline'}
# Show family options
families = ['serif', 'sans-serif', 'cursive', 'fantasy', 'monospace']
t = plt.text(-0.8, 0.9, 'family', size='large', **alignment)
yp = [0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2]
for k, family in enumerate(families):
t = plt.text(-0.8, yp[k], family, family=family, **alignment)
# Show style options
styles = ['normal', 'italic', 'oblique']
t = plt.text(-0.4, 0.9, 'style', **alignment)
for k, style in enumerate(styles):
t = plt.text(-0.4, yp[k], style, family='sans-serif', style=style,
**alignment)
# Show variant options
variants = ['normal', 'small-caps']
t = plt.text(0.0, 0.9, 'variant', **alignment)
for k, variant in enumerate(variants):
t = plt.text(0.0, yp[k], variant, family='serif', variant=variant,
**alignment)
# Show weight options
weights = ['light', 'normal', 'medium', 'semibold', 'bold', 'heavy', 'black']
t = plt.text(0.4, 0.9, 'weight', **alignment)
for k, weight in enumerate(weights):
t = plt.text(0.4, yp[k], weight, weight=weight,
**alignment)
# Show size options
sizes = ['xx-small', 'x-small', 'small', 'medium', 'large',
'x-large', 'xx-large']
t = plt.text(0.8, 0.9, 'size', **alignment)
for k, size in enumerate(sizes):
t = plt.text(0.8, yp[k], size, size=size,
**alignment)
x = -0.4
# Show bold italic
t = plt.text(x, 0.1, 'bold italic', style='italic',
weight='bold', size='x-small',
**alignment)
t = plt.text(x, 0.2, 'bold italic',
style='italic', weight='bold', size='medium',
**alignment)
t = plt.text(x, 0.3, 'bold italic',
style='italic', weight='bold', size='x-large',
**alignment)
plt.axis([-1, 1, 0, 1])
plt.show()
| mit |
mne-tools/mne-tools.github.io | 0.21/_downloads/c62fefd70047b3cfac89143cb5c9badf/plot_read_events.py | 5 | 2492 | """
.. _ex-read-events:
=====================
Reading an event file
=====================
Read events from a file. For a more detailed discussion of events in
MNE-Python, see :ref:`tut-events-vs-annotations` and :ref:`tut-event-arrays`.
"""
# Authors: Alexandre Gramfort <[email protected]>
# Chris Holdgraf <[email protected]>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
import mne
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
fname = data_path + '/MEG/sample/sample_audvis_raw-eve.fif'
###############################################################################
# Reading events
# --------------
#
# Below we'll read in an events file. We suggest that this file end in
# ``-eve.fif``. Note that we can read in the entire events file, or only
# events corresponding to particular event types with the ``include`` and
# ``exclude`` parameters.
events_1 = mne.read_events(fname, include=1)
events_1_2 = mne.read_events(fname, include=[1, 2])
events_not_4_32 = mne.read_events(fname, exclude=[4, 32])
###############################################################################
# Events objects are essentially numpy arrays with three columns:
# ``event_sample | previous_event_id | event_id``
print(events_1[:5], '\n\n---\n\n', events_1_2[:5], '\n\n')
for ind, before, after in events_1[:5]:
print("At sample %d stim channel went from %d to %d"
% (ind, before, after))
###############################################################################
# Plotting events
# ---------------
#
# We can also plot events in order to visualize how events occur over the
# course of our recording session. Below we'll plot our three event types
# to see which ones were included.
fig, axs = plt.subplots(1, 3, figsize=(15, 5))
mne.viz.plot_events(events_1, axes=axs[0], show=False)
axs[0].set(title="restricted to event 1")
mne.viz.plot_events(events_1_2, axes=axs[1], show=False)
axs[1].set(title="restricted to event 1 or 2")
mne.viz.plot_events(events_not_4_32, axes=axs[2], show=False)
axs[2].set(title="keep all but 4 and 32")
plt.setp([ax.get_xticklabels() for ax in axs], rotation=45)
plt.tight_layout()
plt.show()
###############################################################################
# Writing events
# --------------
#
# Finally, we can write events to disk. Remember to use the naming convention
# ``-eve.fif`` for your file.
mne.write_events('example-eve.fif', events_1)
| bsd-3-clause |
CrazyGuo/bokeh | examples/compat/mpl/lc_offsets.py | 34 | 1096 | from matplotlib.collections import LineCollection
import matplotlib.pyplot as plt
import numpy as np
from bokeh import mpl
from bokeh.plotting import output_file, show
# Simulate a series of ocean current profiles, successively
# offset by 0.1 m/s so that they form what is sometimes called
# a "waterfall" plot or a "stagger" plot.
nverts = 60
ncurves = 20
offs = (0.1, 0.0)
rs = np.random.RandomState([12345678])
yy = np.linspace(0, 2 * np.pi, nverts)
ym = np.amax(yy)
xx = (0.2 + (ym - yy) / ym) ** 2 * np.cos(yy - 0.4) * 0.5
segs = []
for i in range(ncurves):
xxx = xx + 0.02 * rs.randn(nverts)
curve = list(zip(xxx, yy * 100))
segs.append(curve)
colors = [(1.0, 0.0, 0.0, 1.0), (0.0, 0.5, 0.0, 1.0), (0.0, 0.0, 1.0, 1.0),
(0.0, 0.75, 0.75, 1.0), (0.75, 0.75, 0, 1.0), (0.75, 0, 0.75, 1.0),
(0.0, 0.0, 0.0, 1.0)]
col = LineCollection(segs, linewidth=5, offsets=offs)
ax = plt.axes()
ax.add_collection(col, autolim=True)
col.set_color(colors)
ax.set_title('Successive data offsets')
fig = plt.gcf()
output_file("lc_offsets.html")
show(mpl.to_bokeh())
| bsd-3-clause |
UCL-InfoSec/loopix | setup.py | 1 | 1187 | #!/usr/bin/env python
from setuptools import setup
import loopix
setup(name='loopix',
version=loopix.VERSION,
description='The Loopix mix system.',
author='Ania Piotrowska (UCL Information Security)',
author_email='[email protected]',
url=r'https://pypi.python.org/pypi/loopix/',
packages=['loopix'],
license="2-clause BSD",
long_description="""The Loopix mix system for anonymous communications.""",
# setup_requires=["pytest >= 2.6.4"],
setup_requires=['pytest-runner', "pytest"],
tests_require=[
"pytest",
"future >= 0.14.3",
"pytest >= 3.0.0",
"msgpack-python >= 0.4.6",
"petlib >= 0.0.38",
],
install_requires=[
#"future >= 0.14.3",
"numpy >= 1.9.1",
"pytest >= 2.6.4",
"twisted >= 15.5.0",
"msgpack-python >= 0.4.6",
"petlib >= 0.0.34",
"sphinxmix>=0.0.6",
"fabric>=1.12.0",
"boto3>=1.4.0",
"matplotlib>=1.4.2",
"scipy>=0.16.1",
"scapy>=2.3.3",
"pybloom>=1.1"
],
zip_safe=False,
)
| bsd-2-clause |
JackKelly/neuralnilm_prototype | scripts/e445.py | 2 | 38141 | from __future__ import print_function, division
import matplotlib
import logging
from sys import stdout
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import (Net, RealApplianceSource,
BLSTMLayer, DimshuffleLayer,
BidirectionalRecurrentLayer)
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment, init_experiment
from neuralnilm.net import TrainingError
from neuralnilm.layers import (MixtureDensityLayer, DeConv1DLayer,
SharedWeightsDenseLayer)
from neuralnilm.objectives import (scaled_cost, mdn_nll,
scaled_cost_ignore_inactive, ignore_inactive,
scaled_cost3)
from neuralnilm.plot import MDNPlotter, CentralOutputPlotter, Plotter
from neuralnilm.updates import clipped_nesterov_momentum
from lasagne.nonlinearities import sigmoid, rectify, tanh, identity
from lasagne.objectives import mse, binary_crossentropy
from lasagne.init import Uniform, Normal, Identity
from lasagne.layers import (LSTMLayer, DenseLayer, Conv1DLayer,
ReshapeLayer, FeaturePoolLayer, RecurrentLayer)
from lasagne.layers.batch_norm import BatchNormLayer
from lasagne.updates import nesterov_momentum, momentum
from functools import partial
import os
import __main__
from copy import deepcopy
from math import sqrt
import numpy as np
import theano.tensor as T
import gc
"""
444: just testing new DimshuffleLayer.
"""
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
#PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
PATH = "/data/dk3810/figures"
SAVE_PLOT_INTERVAL = 5000
N_SEQ_PER_BATCH = 64
SEQ_LENGTH = 1024
source_dict = dict(
filename='/data/dk3810/ukdale.h5',
appliances=[
['washer dryer', 'washing machine'],
'hair straighteners'
# 'television',
# 'dish washer',
# ['fridge freezer', 'fridge', 'freezer']
],
max_appliance_powers=[2400, 500, 200, 2500, 200],
# max_input_power=200,
max_diff=200,
on_power_thresholds=[5] * 5,
min_on_durations=[1800, 60, 60, 1800, 60],
min_off_durations=[600, 12, 12, 1800, 12],
window=("2013-06-01", "2014-07-01"),
seq_length=SEQ_LENGTH,
# random_window=64,
output_one_appliance=True,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
skip_probability=0.75,
skip_probability_for_first_appliance=0.2,
one_target_per_seq=False,
n_seq_per_batch=N_SEQ_PER_BATCH,
# subsample_target=4,
include_diff=False,
include_power=True,
clip_appliance_power=False,
target_is_prediction=False,
# independently_center_inputs=True,
standardise_input=True,
standardise_targets=True,
# unit_variance_targets=False,
# input_padding=2,
lag=0,
clip_input=False,
# two_pass=True,
# clock_type='ramp',
# clock_period=SEQ_LENGTH
# classification=True
# reshape_target_to_2D=True
# input_stats={'mean': np.array([ 0.05526326], dtype=np.float32),
# 'std': np.array([ 0.12636775], dtype=np.float32)},
# target_stats={
# 'mean': np.array([ 0.04066789, 0.01881946,
# 0.24639061, 0.17608672, 0.10273963],
# dtype=np.float32),
# 'std': np.array([ 0.11449792, 0.07338708,
# 0.26608968, 0.33463112, 0.21250485],
# dtype=np.float32)}
)
net_dict = dict(
save_plot_interval=SAVE_PLOT_INTERVAL,
# loss_function=partial(ignore_inactive, loss_func=mdn_nll, seq_length=SEQ_LENGTH),
# loss_function=lambda x, t: mdn_nll(x, t).mean(),
# loss_function=lambda x, t: (mse(x, t) * MASK).mean(),
loss_function=lambda x, t: mse(x, t).mean(),
# loss_function=lambda x, t: binary_crossentropy(x, t).mean(),
# loss_function=partial(scaled_cost, loss_func=mse),
# loss_function=ignore_inactive,
# loss_function=partial(scaled_cost3, ignore_inactive=False),
# updates_func=momentum,
updates_func=clipped_nesterov_momentum,
updates_kwargs={'clip_range': (0, 10)},
learning_rate=1e-2,
learning_rate_changes_by_iteration={
2000: 1e-3,
10000: 1e-4
},
do_save_activations=True,
auto_reshape=False,
# plotter=CentralOutputPlotter
plotter=Plotter(n_seq_to_plot=32)
)
def exp_a(name):
global source
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
net_dict_copy['layers_config'] = [
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 16,
'filter_length': 4,
'stride': 1,
'nonlinearity': rectify,
'border_mode': 'valid'
},
{
'type': DeConv1DLayer,
'num_output_channels': 1,
'filter_length': 4,
'stride': 1,
'nonlinearity': rectify,
'border_mode': 'full'
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
},
{
'type': DenseLayer,
'num_units': SEQ_LENGTH,
'nonlinearity': None
}
]
net = Net(**net_dict_copy)
return net
def exp_b(name):
global source
# source_dict_copy = deepcopy(source_dict)
# source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
net_dict_copy['layers_config'] = [
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 16,
'filter_length': 4,
'stride': 1,
'nonlinearity': rectify,
'border_mode': 'valid'
},
{
'type': Conv1DLayer,
'num_filters': 1,
'filter_length': 4,
'stride': 1,
'nonlinearity': rectify,
'border_mode': 'full'
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
},
{
'type': DenseLayer,
'num_units': SEQ_LENGTH,
'nonlinearity': None
}
]
net = Net(**net_dict_copy)
return net
def exp_c(name):
global source
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
net_dict_copy['layers_config'] = [
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 16,
'filter_length': 2,
'stride': 1,
'nonlinearity': rectify,
'border_mode': 'valid'
},
{
'type': DeConv1DLayer,
'num_output_channels': 1,
'filter_length': 2,
'stride': 1,
'nonlinearity': rectify,
'border_mode': 'full'
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
},
{
'type': DenseLayer,
'num_units': SEQ_LENGTH,
'nonlinearity': None
}
]
net = Net(**net_dict_copy)
return net
def exp_d(name):
global source
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
net_dict_copy['layers_config'] = [
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'label': 'conv0',
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 16,
'filter_length': 2,
'stride': 1,
'nonlinearity': rectify,
'border_mode': 'valid'
},
{
'type': DeConv1DLayer,
'num_output_channels': 1,
'filter_length': 2,
'stride': 1,
'nonlinearity': rectify,
'border_mode': 'full',
'W': 'ref:conv0.W.dimshuffle(1, 0, 2)',
'b': None
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
},
{
'type': DenseLayer,
'num_units': SEQ_LENGTH,
'nonlinearity': None
}
]
net = Net(**net_dict_copy)
return net
def exp_e(name):
global source
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
net_dict_copy['layers_config'] = [
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'label': 'conv0',
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 16,
'filter_length': 2,
'stride': 1,
'nonlinearity': rectify,
'border_mode': 'valid'
},
{
'type': DeConv1DLayer,
'num_output_channels': 1,
'filter_length': 2,
'stride': 1,
'nonlinearity': rectify,
'border_mode': 'full',
'W': 'ref:conv0.W.dimshuffle(1, 0, 2)',
'b': None,
'shared_weights': True
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
},
{
'type': DenseLayer,
'num_units': SEQ_LENGTH,
'nonlinearity': None
}
]
net = Net(**net_dict_copy)
return net
def exp_f(name):
global source
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
net_dict_copy['layers_config'] = [
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'label': 'conv0',
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 16,
'filter_length': 2,
'stride': 1,
'nonlinearity': rectify,
'border_mode': 'valid'
},
{
'type': DeConv1DLayer,
'num_output_channels': 1,
'filter_length': 2,
'stride': 1,
'nonlinearity': rectify,
'border_mode': 'full',
'W': 'ref:conv0.W.dimshuffle(1, 0, 2)[:, :, ::-1]',
'b': None,
'shared_weights': True
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
},
{
'type': DenseLayer,
'num_units': SEQ_LENGTH,
'nonlinearity': None
}
]
net = Net(**net_dict_copy)
return net
def exp_g(name):
global source
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
net_dict_copy['layers_config'] = [
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'label': 'conv0',
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 16,
'filter_length': 4,
'stride': 1,
'nonlinearity': rectify,
'border_mode': 'valid'
},
{
'type': DeConv1DLayer,
'num_output_channels': 1,
'filter_length': 4,
'stride': 1,
'nonlinearity': rectify,
'border_mode': 'full',
'W': 'ref:conv0.W.dimshuffle(1, 0, 2)',
'b': None,
'shared_weights': True
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
},
{
'type': DenseLayer,
'num_units': SEQ_LENGTH,
'nonlinearity': None
}
]
net = Net(**net_dict_copy)
return net
def exp_h(name):
global source
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
net_dict_copy['layers_config'] = [
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'label': 'conv0',
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 16,
'filter_length': 4,
'stride': 1,
'nonlinearity': rectify,
'border_mode': 'valid'
},
{
'type': DeConv1DLayer,
'num_output_channels': 1,
'filter_length': 4,
'stride': 1,
'nonlinearity': rectify,
'border_mode': 'full',
'W': 'ref:conv0.W.dimshuffle(1, 0, 2)[:, :, ::-1]',
'b': None,
'shared_weights': True
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
},
{
'type': DenseLayer,
'num_units': SEQ_LENGTH,
'nonlinearity': None
}
]
net = Net(**net_dict_copy)
return net
def exp_i(name):
global source
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
net_dict_copy['layers_config'] = [
{
'label': 'dense0',
'type': DenseLayer,
'num_units': SEQ_LENGTH,
'nonlinearity': None
},
{
'type': SharedWeightsDenseLayer,
'num_units': SEQ_LENGTH,
'nonlinearity': None,
'W': 'ref:dense0.W.T'
}
]
net = Net(**net_dict_copy)
return net
def exp_j(name):
global source
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
net_dict_copy['layers_config'] = [
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'label': 'conv0',
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 16,
'filter_length': 4,
'stride': 1,
'nonlinearity': rectify,
'border_mode': 'valid'
},
{
'type': DeConv1DLayer,
'num_output_channels': 1,
'filter_length': 4,
'stride': 1,
'nonlinearity': rectify,
'border_mode': 'full',
'W': 'ref:conv0.W.dimshuffle(1, 0, 2)',
'b': None,
'shared_weights': True
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
}
]
net = Net(**net_dict_copy)
return net
def exp_k(name):
global source
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
net_dict_copy['layers_config'] = [
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'label': 'conv0',
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 16,
'filter_length': 4,
'stride': 1,
'nonlinearity': rectify,
'border_mode': 'valid'
},
{
'type': DeConv1DLayer,
'num_output_channels': 1,
'filter_length': 4,
'stride': 1,
'nonlinearity': rectify,
'border_mode': 'full',
'W': 'ref:conv0.W.dimshuffle(1, 0, 2)[:, :, ::-1]',
'b': None,
'shared_weights': True
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
}
]
net = Net(**net_dict_copy)
return net
def exp_l(name):
global source
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
net_dict_copy['layers_config'] = [
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'label': 'conv0',
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 16,
'filter_length': 4,
'stride': 1,
'nonlinearity': None,
'border_mode': 'valid'
},
{
'type': DeConv1DLayer,
'num_output_channels': 1,
'filter_length': 4,
'stride': 1,
'nonlinearity': None,
'border_mode': 'full',
'W': 'ref:conv0.W.dimshuffle(1, 0, 2)',
'b': None,
'shared_weights': True
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
}
]
net = Net(**net_dict_copy)
return net
def exp_m(name):
# avg valid cost = 0.0016604423
global source
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
net_dict_copy['layers_config'] = [
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'label': 'conv0',
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 16,
'filter_length': 4,
'stride': 1,
'nonlinearity': None,
'border_mode': 'valid'
},
{
'type': DeConv1DLayer,
'num_output_channels': 1,
'filter_length': 4,
'stride': 1,
'nonlinearity': None,
'border_mode': 'full',
'W': 'ref:conv0.W.dimshuffle(1, 0, 2)[:, :, ::-1]',
'b': None,
'shared_weights': True
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
}
]
net = Net(**net_dict_copy)
return net
def exp_n(name):
# i but with no biases
global source
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
net_dict_copy['layers_config'] = [
{
'label': 'dense0',
'type': DenseLayer,
'num_units': SEQ_LENGTH,
'nonlinearity': None,
'b': None
},
{
'type': SharedWeightsDenseLayer,
'num_units': SEQ_LENGTH,
'nonlinearity': None,
'W': 'ref:dense0.W.T',
'b': None
}
]
net = Net(**net_dict_copy)
return net
def exp_o(name):
# tied biases (forwards)
global source
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
net_dict_copy['layers_config'] = [
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'label': 'conv0',
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 16,
'filter_length': 4,
'stride': 1,
'nonlinearity': None,
'border_mode': 'valid'
},
{
'type': DeConv1DLayer,
'num_output_channels': 1,
'filter_length': 4,
'stride': 1,
'nonlinearity': None,
'border_mode': 'full',
'W': 'ref:conv0.W.dimshuffle(1, 0, 2)[:, :, ::-1]',
'b': 'ref:conv0.b.T',
'shared_weights': True,
'shared_biases': True
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
}
]
net = Net(**net_dict_copy)
return net
def exp_p(name):
# tied biases (backwards)
global source
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
net_dict_copy['layers_config'] = [
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'label': 'conv0',
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 16,
'filter_length': 4,
'stride': 1,
'nonlinearity': None,
'border_mode': 'valid'
},
{
'type': DeConv1DLayer,
'num_output_channels': 1,
'filter_length': 4,
'stride': 1,
'nonlinearity': None,
'border_mode': 'full',
'W': 'ref:conv0.W.dimshuffle(1, 0, 2)[:, :, ::-1]',
'b': 'ref:conv0.b.T[::-1]',
'shared_weights': True,
'shared_biases': True
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
}
]
net = Net(**net_dict_copy)
return net
def exp_q(name):
# separate biases
# avg valid cost = 0.0012744671
global source
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
net_dict_copy['layers_config'] = [
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'label': 'conv0',
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 16,
'filter_length': 4,
'stride': 1,
'nonlinearity': None,
'border_mode': 'valid'
},
{
'type': DeConv1DLayer,
'num_output_channels': 1,
'filter_length': 4,
'stride': 1,
'nonlinearity': None,
'border_mode': 'full',
'W': 'ref:conv0.W.dimshuffle(1, 0, 2)[:, :, ::-1]',
'shared_weights': True,
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
}
]
net = Net(**net_dict_copy)
return net
def exp_r(name):
# no biases
global source
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
net_dict_copy['layers_config'] = [
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'label': 'conv0',
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 16,
'filter_length': 4,
'stride': 1,
'nonlinearity': None,
'border_mode': 'valid',
'b': None
},
{
'type': DeConv1DLayer,
'num_output_channels': 1,
'filter_length': 4,
'stride': 1,
'nonlinearity': None,
'border_mode': 'full',
'W': 'ref:conv0.W.dimshuffle(1, 0, 2)[:, :, ::-1]',
'b': None,
'shared_weights': True,
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
}
]
net = Net(**net_dict_copy)
return net
def exp_s(name):
# separate biases
# q but don't reverse W
global source
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
net_dict_copy['layers_config'] = [
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'label': 'conv0',
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 16,
'filter_length': 4,
'stride': 1,
'nonlinearity': None,
'border_mode': 'valid'
},
{
'type': DeConv1DLayer,
'num_output_channels': 1,
'filter_length': 4,
'stride': 1,
'nonlinearity': None,
'border_mode': 'full',
'W': 'ref:conv0.W.dimshuffle(1, 0, 2)',
'shared_weights': True,
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
}
]
net = Net(**net_dict_copy)
return net
def exp_t(name):
# separate biases
# based on s
# but with dense layers in between
global source
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
NUM_FILTERS = 4
net_dict_copy['layers_config'] = [
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'label': 'conv0',
'type': Conv1DLayer, # convolve over the time axis
'num_filters': NUM_FILTERS,
'filter_length': 4,
'stride': 1,
'nonlinearity': None,
'border_mode': 'valid'
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
},
{
'type': DenseLayer,
'num_units': 1021 * NUM_FILTERS,
'nonlinearity': rectify
},
{
'type': ReshapeLayer,
'shape': (N_SEQ_PER_BATCH, 1021, NUM_FILTERS)
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'type': DeConv1DLayer,
'num_output_channels': 1,
'filter_length': 4,
'stride': 1,
'nonlinearity': None,
'border_mode': 'full',
'W': 'ref:conv0.W.dimshuffle(1, 0, 2)[:, :, ::-1]',
'shared_weights': True,
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
}
]
net = Net(**net_dict_copy)
return net
def exp_u(name):
# separate biases
# based on s
global source
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
NUM_FILTERS = 4
net_dict_copy['layers_config'] = [
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'label': 'conv0',
'type': Conv1DLayer, # convolve over the time axis
'num_filters': NUM_FILTERS,
'filter_length': 4,
'stride': 1,
'nonlinearity': None,
'border_mode': 'valid'
},
# {
# 'type': DimshuffleLayer,
# 'pattern': (0, 2, 1) # back to (batch, time, features)
# },
# {
# 'type': DenseLayer,
# 'num_units': 1021 * NUM_FILTERS,
# 'nonlinearity': rectify
# },
# {
# 'type': ReshapeLayer,
# 'shape': (N_SEQ_PER_BATCH, 1021, NUM_FILTERS)
# },
# {
# 'type': DimshuffleLayer,
# 'pattern': (0, 2, 1) # (batch, features, time)
# },
{
'type': DeConv1DLayer,
'num_output_channels': 1,
'filter_length': 4,
'stride': 1,
'nonlinearity': None,
'border_mode': 'full',
'W': 'ref:conv0.W.dimshuffle(1, 0, 2)[:, :, ::-1]',
'shared_weights': True,
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
}
]
net = Net(**net_dict_copy)
return net
def exp_v(name):
# separate biases
# based on s
global source
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source,
learning_rate=1e-1,
learning_rate_changes_by_iteration={}
))
NUM_FILTERS = 4
net_dict_copy['layers_config'] = [
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'label': 'conv0',
'type': Conv1DLayer, # convolve over the time axis
'num_filters': NUM_FILTERS,
'filter_length': 4,
'stride': 1,
'nonlinearity': None,
'border_mode': 'valid'
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
},
{
'type': DenseLayer,
'num_units': 1021 * NUM_FILTERS,
'nonlinearity': rectify
},
{
'type': ReshapeLayer,
'shape': (N_SEQ_PER_BATCH, 1021, NUM_FILTERS)
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'type': DeConv1DLayer,
'num_output_channels': 1,
'filter_length': 4,
'stride': 1,
'nonlinearity': None,
'border_mode': 'full',
'W': 'ref:conv0.W.dimshuffle(1, 0, 2)[:, :, ::-1]',
'shared_weights': True,
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
}
]
net = Net(**net_dict_copy)
return net
def exp_w(name):
# separate biases
# based on s
# like v but with a Dense linear layer
global source
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source,
learning_rate=1e-2,
learning_rate_changes_by_iteration={}
))
NUM_FILTERS = 4
net_dict_copy['layers_config'] = [
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'label': 'conv0',
'type': Conv1DLayer, # convolve over the time axis
'num_filters': NUM_FILTERS,
'filter_length': 4,
'stride': 1,
'nonlinearity': None,
'border_mode': 'valid'
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
},
{
'type': DenseLayer,
'num_units': 1021 * NUM_FILTERS,
'nonlinearity': None
},
{
'type': ReshapeLayer,
'shape': (N_SEQ_PER_BATCH, 1021, NUM_FILTERS)
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'type': DeConv1DLayer,
'num_output_channels': 1,
'filter_length': 4,
'stride': 1,
'nonlinearity': None,
'border_mode': 'full',
'W': 'ref:conv0.W.dimshuffle(1, 0, 2)[:, :, ::-1]',
'shared_weights': True,
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
}
]
net = Net(**net_dict_copy)
return net
def exp_x(name):
# separate biases
# based on s
# like v but with a Dense linear layer
global source
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source,
learning_rate=1e-2,
learning_rate_changes_by_iteration={}
))
NUM_FILTERS = 4
net_dict_copy['layers_config'] = [
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'label': 'conv0',
'type': Conv1DLayer, # convolve over the time axis
'num_filters': NUM_FILTERS,
'filter_length': 4,
'stride': 1,
'nonlinearity': None,
'border_mode': 'valid'
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
},
{
'label': 'dense0',
'type': DenseLayer,
'num_units': 1021,
'nonlinearity': rectify
},
{
'label': 'dense1',
'type': DenseLayer,
'num_units': 256,
'nonlinearity': rectify
},
{
'label': 'dense2',
'type': DenseLayer,
'num_units': 32,
'nonlinearity': rectify
},
{
'type': SharedWeightsDenseLayer,
'num_units': 256,
'nonlinearity': rectify,
'W': 'ref:dense2.W.T'
},
{
'type': SharedWeightsDenseLayer,
'num_units': 1021,
'nonlinearity': rectify,
'W': 'ref:dense1.W.T'
},
{
'type': SharedWeightsDenseLayer,
'num_units': 1021 * NUM_FILTERS,
'nonlinearity': None,
'W': 'ref:dense0.W.T'
},
{
'type': ReshapeLayer,
'shape': (N_SEQ_PER_BATCH, 1021, NUM_FILTERS)
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'type': DeConv1DLayer,
'num_output_channels': 1,
'filter_length': 4,
'stride': 1,
'nonlinearity': None,
'border_mode': 'full',
'W': 'ref:conv0.W.dimshuffle(1, 0, 2)[:, :, ::-1]',
'shared_weights': True,
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
}
]
net = Net(**net_dict_copy)
return net
def main():
EXPERIMENTS = list('x')
for experiment in EXPERIMENTS:
full_exp_name = NAME + experiment
func_call = init_experiment(PATH, experiment, full_exp_name)
logger = logging.getLogger(full_exp_name)
try:
net = eval(func_call)
run_experiment(net, epochs=10000)
except KeyboardInterrupt:
logger.info("KeyboardInterrupt")
break
except Exception as exception:
logger.exception("Exception")
# raise
else:
del net.source.train_activations
gc.collect()
finally:
logging.shutdown()
if __name__ == "__main__":
main()
"""
Emacs variables
Local Variables:
compile-command: "cp /home/jack/workspace/python/neuralnilm/scripts/e445.py /mnt/sshfs/imperial/workspace/python/neuralnilm/scripts/"
End:
"""
| mit |
jwlawson/tensorflow | tensorflow/examples/tutorials/word2vec/word2vec_basic.py | 6 | 10430 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Basic word2vec example."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
import os
import random
from tempfile import gettempdir
import zipfile
import numpy as np
from six.moves import urllib
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
# Step 1: Download the data.
url = 'http://mattmahoney.net/dc/'
# pylint: disable=redefined-outer-name
def maybe_download(filename, expected_bytes):
"""Download a file if not present, and make sure it's the right size."""
local_filename = os.path.join(gettempdir(), filename)
if not os.path.exists(local_filename):
local_filename, _ = urllib.request.urlretrieve(url + filename,
local_filename)
statinfo = os.stat(local_filename)
if statinfo.st_size == expected_bytes:
print('Found and verified', filename)
else:
print(statinfo.st_size)
raise Exception('Failed to verify ' + local_filename +
'. Can you get to it with a browser?')
return local_filename
filename = maybe_download('text8.zip', 31344016)
# Read the data into a list of strings.
def read_data(filename):
"""Extract the first file enclosed in a zip file as a list of words."""
with zipfile.ZipFile(filename) as f:
data = tf.compat.as_str(f.read(f.namelist()[0])).split()
return data
vocabulary = read_data(filename)
print('Data size', len(vocabulary))
# Step 2: Build the dictionary and replace rare words with UNK token.
vocabulary_size = 50000
def build_dataset(words, n_words):
"""Process raw inputs into a dataset."""
count = [['UNK', -1]]
count.extend(collections.Counter(words).most_common(n_words - 1))
dictionary = dict()
for word, _ in count:
dictionary[word] = len(dictionary)
data = list()
unk_count = 0
for word in words:
index = dictionary.get(word, 0)
if index == 0: # dictionary['UNK']
unk_count += 1
data.append(index)
count[0][1] = unk_count
reversed_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
return data, count, dictionary, reversed_dictionary
# Filling 4 global variables:
# data - list of codes (integers from 0 to vocabulary_size-1).
# This is the original text but words are replaced by their codes
# count - map of words(strings) to count of occurrences
# dictionary - map of words(strings) to their codes(integers)
# reverse_dictionary - maps codes(integers) to words(strings)
data, count, dictionary, reverse_dictionary = build_dataset(vocabulary,
vocabulary_size)
del vocabulary # Hint to reduce memory.
print('Most common words (+UNK)', count[:5])
print('Sample data', data[:10], [reverse_dictionary[i] for i in data[:10]])
data_index = 0
# Step 3: Function to generate a training batch for the skip-gram model.
def generate_batch(batch_size, num_skips, skip_window):
global data_index
assert batch_size % num_skips == 0
assert num_skips <= 2 * skip_window
batch = np.ndarray(shape=(batch_size), dtype=np.int32)
labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)
span = 2 * skip_window + 1 # [ skip_window target skip_window ]
buffer = collections.deque(maxlen=span)
if data_index + span > len(data):
data_index = 0
buffer.extend(data[data_index:data_index + span])
data_index += span
for i in range(batch_size // num_skips):
context_words = [w for w in range(span) if w != skip_window]
words_to_use = random.sample(context_words, num_skips)
for j, context_word in enumerate(words_to_use):
batch[i * num_skips + j] = buffer[skip_window]
labels[i * num_skips + j, 0] = buffer[context_word]
if data_index == len(data):
buffer.extend(data[0:span])
data_index = span
else:
buffer.append(data[data_index])
data_index += 1
# Backtrack a little bit to avoid skipping words in the end of a batch
data_index = (data_index + len(data) - span) % len(data)
return batch, labels
batch, labels = generate_batch(batch_size=8, num_skips=2, skip_window=1)
for i in range(8):
print(batch[i], reverse_dictionary[batch[i]],
'->', labels[i, 0], reverse_dictionary[labels[i, 0]])
# Step 4: Build and train a skip-gram model.
batch_size = 128
embedding_size = 128 # Dimension of the embedding vector.
skip_window = 1 # How many words to consider left and right.
num_skips = 2 # How many times to reuse an input to generate a label.
num_sampled = 64 # Number of negative examples to sample.
# We pick a random validation set to sample nearest neighbors. Here we limit the
# validation samples to the words that have a low numeric ID, which by
# construction are also the most frequent. These 3 variables are used only for
# displaying model accuracy, they don't affect calculation.
valid_size = 16 # Random set of words to evaluate similarity on.
valid_window = 100 # Only pick dev samples in the head of the distribution.
valid_examples = np.random.choice(valid_window, valid_size, replace=False)
graph = tf.Graph()
with graph.as_default():
# Input data.
train_inputs = tf.placeholder(tf.int32, shape=[batch_size])
train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
# Ops and variables pinned to the CPU because of missing GPU implementation
with tf.device('/cpu:0'):
# Look up embeddings for inputs.
embeddings = tf.Variable(
tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
embed = tf.nn.embedding_lookup(embeddings, train_inputs)
# Construct the variables for the NCE loss
nce_weights = tf.Variable(
tf.truncated_normal([vocabulary_size, embedding_size],
stddev=1.0 / math.sqrt(embedding_size)))
nce_biases = tf.Variable(tf.zeros([vocabulary_size]))
# Compute the average NCE loss for the batch.
# tf.nce_loss automatically draws a new sample of the negative labels each
# time we evaluate the loss.
# Explanation of the meaning of NCE loss:
# http://mccormickml.com/2016/04/19/word2vec-tutorial-the-skip-gram-model/
loss = tf.reduce_mean(
tf.nn.nce_loss(weights=nce_weights,
biases=nce_biases,
labels=train_labels,
inputs=embed,
num_sampled=num_sampled,
num_classes=vocabulary_size))
# Construct the SGD optimizer using a learning rate of 1.0.
optimizer = tf.train.GradientDescentOptimizer(1.0).minimize(loss)
# Compute the cosine similarity between minibatch examples and all embeddings.
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
normalized_embeddings = embeddings / norm
valid_embeddings = tf.nn.embedding_lookup(
normalized_embeddings, valid_dataset)
similarity = tf.matmul(
valid_embeddings, normalized_embeddings, transpose_b=True)
# Add variable initializer.
init = tf.global_variables_initializer()
# Step 5: Begin training.
num_steps = 100001
with tf.Session(graph=graph) as session:
# We must initialize all variables before we use them.
init.run()
print('Initialized')
average_loss = 0
for step in xrange(num_steps):
batch_inputs, batch_labels = generate_batch(
batch_size, num_skips, skip_window)
feed_dict = {train_inputs: batch_inputs, train_labels: batch_labels}
# We perform one update step by evaluating the optimizer op (including it
# in the list of returned values for session.run()
_, loss_val = session.run([optimizer, loss], feed_dict=feed_dict)
average_loss += loss_val
if step % 2000 == 0:
if step > 0:
average_loss /= 2000
# The average loss is an estimate of the loss over the last 2000 batches.
print('Average loss at step ', step, ': ', average_loss)
average_loss = 0
# Note that this is expensive (~20% slowdown if computed every 500 steps)
if step % 10000 == 0:
sim = similarity.eval()
for i in xrange(valid_size):
valid_word = reverse_dictionary[valid_examples[i]]
top_k = 8 # number of nearest neighbors
nearest = (-sim[i, :]).argsort()[1:top_k + 1]
log_str = 'Nearest to %s:' % valid_word
for k in xrange(top_k):
close_word = reverse_dictionary[nearest[k]]
log_str = '%s %s,' % (log_str, close_word)
print(log_str)
final_embeddings = normalized_embeddings.eval()
# Step 6: Visualize the embeddings.
# pylint: disable=missing-docstring
# Function to draw visualization of distance between embeddings.
def plot_with_labels(low_dim_embs, labels, filename):
assert low_dim_embs.shape[0] >= len(labels), 'More labels than embeddings'
plt.figure(figsize=(18, 18)) # in inches
for i, label in enumerate(labels):
x, y = low_dim_embs[i, :]
plt.scatter(x, y)
plt.annotate(label,
xy=(x, y),
xytext=(5, 2),
textcoords='offset points',
ha='right',
va='bottom')
plt.savefig(filename)
try:
# pylint: disable=g-import-not-at-top
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000, method='exact')
plot_only = 500
low_dim_embs = tsne.fit_transform(final_embeddings[:plot_only, :])
labels = [reverse_dictionary[i] for i in xrange(plot_only)]
plot_with_labels(low_dim_embs, labels, os.path.join(gettempdir(), 'tsne.png'))
except ImportError as ex:
print('Please install sklearn, matplotlib, and scipy to show embeddings.')
print(ex)
| apache-2.0 |
lordkman/burnman | misc/benchmarks/benchmark.py | 4 | 29408 | from __future__ import absolute_import
# This file is part of BurnMan - a thermoelastic and thermodynamic toolkit for the Earth and Planetary Sciences
# Copyright (C) 2012 - 2015 by the BurnMan team, released under the GNU
# GPL v2 or later.
import os.path
import sys
sys.path.insert(1, os.path.abspath('../..'))
import numpy as np
import matplotlib.pyplot as plt
import burnman
import burnman.eos.birch_murnaghan as bm
import burnman.eos.birch_murnaghan_4th as bm4
import burnman.eos.mie_grueneisen_debye as mgd
import burnman.eos.slb as slb
import burnman.eos.vinet as vinet
import matplotlib.image as mpimg
def check_birch_murnaghan():
"""
Recreates Stixrude and Lithgow-Bertelloni (2005) Figure 1, bulk and shear modulus without thermal corrections
"""
plt.close()
# make a test mineral
test_mineral = burnman.Mineral()
test_mineral.params = {'name': 'test',
'V_0': 6.844e-6,
'K_0': 259.0e9,
'Kprime_0': 4.0,
'G_0': 175.0e9,
'Gprime_0': 1.7,
'molar_mass': .0,
}
test_mineral.set_method('bm3')
pressure = np.linspace(0., 140.e9, 100)
volume = np.empty_like(pressure)
bulk_modulus = np.empty_like(pressure)
shear_modulus = np.empty_like(pressure)
# calculate its static properties
for i in range(len(pressure)):
volume[i] = bm.volume(pressure[i], test_mineral.params)
bulk_modulus[i] = bm.bulk_modulus(volume[i], test_mineral.params)
shear_modulus[i] = bm.shear_modulus_third_order(
volume[i], test_mineral.params) # third order is used for the plot we are comparing against
# compare with figure 1
plt.plot(pressure / 1.e9, bulk_modulus /
1.e9, pressure / 1.e9, shear_modulus / 1.e9)
fig1 = mpimg.imread('../../burnman/data/input_figures/slb_fig1.png')
plt.imshow(fig1, extent=[0, 140, 0, 800], aspect='auto')
plt.plot(pressure / 1.e9, bulk_modulus / 1.e9,
'g+', pressure / 1.e9, shear_modulus / 1.e9, 'g+')
plt.ylim(0, 800)
plt.xlim(0, 140)
plt.xlabel("Pressure (GPa)")
plt.ylabel("Modulus (GPa)")
plt.title(
"Comparing with Figure 1 of Stixrude and Lithgow-Bertelloni (2005)")
plt.show()
def check_birch_murnaghan_4th():
"""
Recreates the formulation of the 4th order Birch-Murnaghan EOS as in Ahmad and Alkammash, 2012; Figure 1.
"""
plt.close()
# make a test mineral
test_mineral = burnman.Mineral()
test_mineral.params = {'name': 'test',
'V_0': 10.e-6,
'K_0': 72.7e9,
'Kprime_0': 4.14,
'Kprime_prime_0': -0.0484e-9,
}
test_mineral.set_method('bm4')
pressure = np.linspace(0., 90.e9, 20)
volume = np.empty_like(pressure)
# calculate its static properties
for i in range(len(pressure)):
volume[i] = bm4.volume_fourth_order(
pressure[i], test_mineral.params) / test_mineral.params.get('V_0')
# compare with figure 1
plt.plot(pressure / 1.e9, volume)
fig1 = mpimg.imread('../../burnman/data/input_figures/Ahmad.png')
plt.imshow(fig1, extent=[0., 90., .65, 1.], aspect='auto')
plt.plot(pressure / 1.e9, volume, marker='o',
color='r', linestyle='', label='BM4')
plt.legend(loc='lower left')
plt.xlim(0., 90.)
plt.ylim(.65, 1.)
plt.xlabel("Volume/V0")
plt.ylabel("Pressure (GPa)")
plt.title("Comparing with Figure 1 of Ahmad et al., (2012)")
plt.show()
def check_vinet():
"""
Recreates Dewaele et al., 2006, Figure 1, fitting a Vinet EOS to Fe data
"""
plt.close()
# make a test mineral
test_mineral = burnman.Mineral()
test_mineral.params = {'name': 'test',
'V_0': 6.75e-6,
'K_0': 163.4e9,
'Kprime_0': 5.38,
}
test_mineral.set_method('vinet')
pressure = np.linspace(17.7e9, 300.e9, 20)
volume = np.empty_like(pressure)
# calculate its static properties
for i in range(len(pressure)):
volume[i] = vinet.volume(pressure[i], test_mineral.params)
# compare with figure 1
plt.plot(pressure / 1.e9, volume / 6.02e-7)
fig1 = mpimg.imread('../../burnman/data/input_figures/Dewaele.png')
plt.imshow(fig1, extent=[0., 300., 6.8, 11.8], aspect='auto')
plt.plot(pressure / 1.e9, volume / 6.02e-7, marker='o',
color='r', linestyle='', label='Vinet Fit')
plt.legend(loc='lower left')
plt.xlim(0., 300.)
plt.ylim(6.8, 11.8)
plt.ylabel("Volume (Angstroms^3/atom")
plt.xlabel("Pressure (GPa)")
plt.title("Comparing with Figure 1 of Dewaele et al., (2006)")
plt.show()
def check_mgd_shim_duffy_kenichi():
"""
Attemmpts to recreate Shim Duffy Kenichi (2002)
"""
plt.close()
# Create gold material from Table 1
gold = burnman.Mineral()
gold.params = {'name': 'gold',
'V_0': 10.22e-6,
'K_0': 167.0e9,
'Kprime_0': 5.0,
'G_0': 0.0e9,
'Gprime_0': 0.0,
'molar_mass': .196966,
'n': 1.0,
'Debye_0': 170.,
'grueneisen_0': 2.97, # this does better with gr = 2.93. Why?
'q_0': 1.0}
gold.set_method('mgd3')
# Total pressures, pulled from Table 2
ref_pressures = [
np.array([0., 3.55, 7.55, 12.06, 17.16, 22.91, 29.42, 36.77, 45.11, 54.56, 65.29, 77.50, 91.42, 107.32, 125.51, 146.38, 170.38, 198.07])]
ref_pressures.append(
np.array([4.99, 8.53, 12.53, 17.04, 22.13, 27.88, 34.38, 41.73, 50.06, 59.50, 70.22, 82.43, 96.33, 112.22, 130.40, 151.25, 175.24, 202.90]))
ref_pressures.append(
np.array([12.14, 15.69, 19.68, 24.19, 29.28, 35.03, 41.53, 48.88, 57.20, 66.64, 77.37, 89.57, 103.47, 119.35, 137.53, 158.38, 182.36, 210.02]))
ref_pressures.append(
np.array([19.30, 22.84, 26.84, 31.35, 36.44, 42.19, 48.68, 56.03, 64.35, 73.80, 84.52, 96.72, 110.62, 126.50, 144.68, 165.53, 189.51, 217.17]))
eos = mgd.MGD3()
pressures = np.empty_like(ref_pressures)
ref_dv = np.linspace(0.0, 0.34, len(pressures[0]))
ref_volumes = (1 - ref_dv) * gold.params['V_0']
T = np.array([300., 1000., 2000., 3000.])
for t in range(len(pressures)):
for i in range(len(pressures[t])):
pressures[t][i] = eos.pressure(T[t], ref_volumes[i], gold.params)
plt.plot(ref_dv, (pressures[t] / 1.e9 - ref_pressures[t]))
plt.ylim(-1, 1)
plt.ylabel("Difference in pressure (GPa)")
plt.xlabel("1-dV/V")
plt.title("Comparing with Shim, Duffy, and Kenichi (2002)")
plt.show()
def check_mgd_fei_mao_shu_hu():
"""
Benchmark agains Fei Mao Shu Hu (1991)
"""
mgfeo = burnman.Mineral()
mgfeo.params = {'name': 'MgFeO',
'V_0': 11.657e-6,
'K_0': 157.0e9,
'Kprime_0': 4.0,
'G_0': 0.0e9,
'Gprime_0': 0.0,
'molar_mass': .196966,
'n': 2.0,
'Debye_0': 500.,
'grueneisen_0': 1.50,
'q_0': 1.1}
mgfeo.set_method('mgd3')
# pulled from table 1
temperatures = np.array(
[300, 300, 483, 483, 483, 590, 593, 593, 593, 700, 600, 500, 650, 600,
600, 650, 700, 737, 727, 673, 600, 543, 565, 585, 600, 628, 654, 745, 768, 747, 726, 700, 676])
volumes = np.array(
[77.418, 72.327, 74.427, 73.655, 72.595, 74.1, 73.834, 73.101, 70.845, 73.024, 72.630, 68.644, 72.969, 72.324, 71.857,
72.128, 73.283, 73.337, 72.963, 71.969, 69.894, 67.430, 67.607, 67.737, 68.204, 68.518, 68.955, 70.777, 72.921, 72.476, 72.152, 71.858, 71.473])
# change from cubic angstroms per unit cell to cubic meters per mol of
# molecules.
volumes = volumes / 1.e30 * 6.022141e23 / 4.0
ref_pressures = np.array(
[0.0, 12.23, 7.77, 9.69, 12.54, 9.21, 9.90, 11.83, 18.35, 12.68, 13.15, 25.16, 12.53, 14.01, 15.34,
14.86, 11.99, 12.08, 13.03, 15.46, 21.44, 29.98, 29.41, 29.05, 27.36, 26.38, 24.97, 19.49, 13.39, 14.48, 15.27, 15.95, 16.94])
ref_pressures = ref_pressures
pressures = np.empty_like(volumes)
eos = mgd.MGD3()
for i in range(len(temperatures)):
pressures[i] = eos.pressure(temperatures[i], volumes[i], mgfeo.params)
plt.scatter(temperatures, (pressures / 1.e9 - ref_pressures))
plt.ylim(-1, 1)
plt.title("Comparing with Fei, Mao, Shu, and Hu (1991)")
plt.xlabel("Temperature (K) at various volumes")
plt.ylabel("Difference in total pressure (GPa)")
plt.show()
def check_slb_fig3():
"""
Benchmark grueneisen parameter against figure 3 of Stixrude and Lithgow-Bertelloni (2005b)
"""
perovskite = burnman.Mineral()
perovskite.params = {'name': 'perovksite',
'V_0': burnman.tools.molar_volume_from_unit_cell_volume(168.27, 4.),
'grueneisen_0': 1.63,
'q_0': 1.7}
volume = np.linspace(0.6, 1.0, 100)
grueneisen_slb = np.empty_like(volume)
grueneisen_mgd = np.empty_like(volume)
q_slb = np.empty_like(volume)
q_mgd = np.empty_like(volume)
slb_eos = slb.SLB2()
mgd_eos = mgd.MGD2()
# calculate its thermal properties
for i in range(len(volume)):
# call with dummy pressure and temperatures, they do not change it
grueneisen_slb[i] = slb_eos.grueneisen_parameter(
0., 0., volume[i] * perovskite.params['V_0'], perovskite.params)
grueneisen_mgd[i] = mgd_eos.grueneisen_parameter(
0., 0., volume[i] * perovskite.params['V_0'], perovskite.params)
q_slb[i] = slb_eos.volume_dependent_q(
1. / volume[i], perovskite.params)
q_mgd[i] = perovskite.params['q_0']
# compare with figure 7
fig1 = mpimg.imread('../../burnman/data/input_figures/slb_fig3.png')
plt.imshow(fig1, extent=[0.6, 1.0, 0.35, 2.0], aspect='auto')
plt.plot(volume, grueneisen_slb, 'g+', volume, grueneisen_mgd, 'b+')
plt.plot(volume, q_slb, 'g+', volume, q_mgd, 'b+')
plt.xlim(0.6, 1.0)
plt.ylim(0.35, 2.0)
plt.ylabel("Grueneisen parameter")
plt.xlabel("Relative Volume V/V0")
plt.title(
"Comparing with Figure 3 of Stixrude and Lithgow-Bertelloni (2005)")
plt.show()
def check_slb_fig7_txt():
"""
Calculates all values for forsterite and benchmarks with values from Stixrude and Lithgow-Bertelloni (personal communication)
"""
forsterite = burnman.Mineral()
forsterite.params = {'name': 'forsterite',
'V_0': 43.603e-6,
'K_0': 127.955e9,
'Kprime_0': 4.232,
'G_0': 81.6e9,
'Gprime_0': 1.4,
'molar_mass': .140695,
'n': 7.0,
'Debye_0': 809.183,
'grueneisen_0': .993,
'q_0': 2.093,
'F_0': -1.1406e5,
'eta_s_0': 2.364}
forsterite.set_method('slb3')
data = np.loadtxt(
"../../burnman/data/input_minphys/slb_fig7.txt", skiprows=2)
temperature = np.array(data[:, 2])
pressure = np.array(data[:, 0])
rho = np.array(data[:, 3])
rho_comp = np.empty_like(rho)
Kt = np.array(data[:, 4])
Kt_comp = np.empty_like(Kt)
Ks = np.array(data[:, 5])
Ks_comp = np.empty_like(Ks)
G = np.array(data[:, 6])
G_comp = np.empty_like(G)
VB = np.array(data[:, 7])
VB_comp = np.empty_like(VB)
VS = np.array(data[:, 8])
VS_comp = np.empty_like(VS)
VP = np.array(data[:, 9])
VP_comp = np.empty_like(VP)
vol = np.array(data[:, 10])
vol_comp = np.empty_like(vol)
alpha = np.array(data[:, 11])
alpha_comp = np.empty_like(alpha)
Cp = np.array(data[:, 12])
Cp_comp = np.empty_like(Cp)
gr = np.array(data[:, 13])
gr_comp = np.empty_like(gr)
gibbs = np.array(data[:, 14])
gibbs_comp = np.empty_like(gibbs)
entropy = np.array(data[:, 15])
entropy_comp = np.empty_like(gibbs)
enthalpy = np.array(data[:, 16])
enthalpy_comp = np.empty_like(gibbs)
for i in range(len(temperature)):
forsterite.set_state(pressure[i], temperature[i])
rho_comp[i] = 100. * (forsterite.density / 1000. - rho[i]) / rho[i]
Kt_comp[i] = 100. * (
forsterite.isothermal_bulk_modulus / 1.e9 - Kt[i]) / Kt[i]
Ks_comp[i] = 100. * (
forsterite.adiabatic_bulk_modulus / 1.e9 - Ks[i]) / Ks[i]
G_comp[i] = 100. * (forsterite.shear_modulus / 1.e9 - G[i]) / G[i]
VB_comp[i] = 100. * (forsterite.v_phi / 1000. - VB[i]) / VB[i]
VS_comp[i] = 100. * (forsterite.v_s / 1000. - VS[i]) / VS[i]
VP_comp[i] = 100. * (forsterite.v_p / 1000. - VP[i]) / VP[i]
vol_comp[i] = 100. * (forsterite.molar_volume * 1.e6 - vol[i]) / vol[i]
alpha_comp[i] = 100. * (
forsterite.thermal_expansivity / 1.e-5 - alpha[i]) / (alpha[-1])
Cp_comp[i] = 100. * (forsterite.heat_capacity_p /
forsterite.params['molar_mass'] / 1000. - Cp[i]) / (Cp[-1])
gr_comp[i] = (forsterite.grueneisen_parameter - gr[i]) / gr[i]
gibbs_comp[i] = 100. * (
forsterite.molar_gibbs / 1.e6 - gibbs[i]) / gibbs[i]
entropy_comp[i] = 100. * (
forsterite.molar_entropy - entropy[i]) / (entropy[i] if entropy[i] != 0. else 1.)
enthalpy_comp[i] = 100. * (
forsterite.molar_enthalpy / 1.e6 - enthalpy[i]) / (enthalpy[i] if enthalpy[i] != 0. else 1.)
plt.plot(temperature, rho_comp, label=r'$\rho$')
plt.plot(temperature, Kt_comp, label=r'$K_S$')
plt.plot(temperature, Ks_comp, label=r'$K_T$')
plt.plot(temperature, G_comp, label=r'$G$')
plt.plot(temperature, VS_comp, label=r'$V_S$')
plt.plot(temperature, VP_comp, label=r'$V_P$')
plt.plot(temperature, VB_comp, label=r'$V_\phi$')
plt.plot(temperature, vol_comp, label=r'$V$')
plt.plot(temperature, alpha_comp, label=r'$\alpha$')
plt.plot(temperature, Cp_comp, label=r'$c_P$')
plt.plot(temperature, gr_comp, label=r'$\gamma$')
plt.plot(temperature, gibbs_comp, label=r'Gibbs')
plt.plot(temperature, enthalpy_comp, label=r'Enthalpy')
plt.plot(temperature, entropy_comp, label=r'Entropy')
plt.xlim([0, 2750])
plt.ylim([-0.001, 0.001])
plt.xticks([0, 800, 1600, 2200])
plt.xlabel("Temperature (K)")
plt.ylabel("Percent Difference from HeFESTo")
plt.legend(loc="center right")
# plt.savefig("output_figures/benchmark1.pdf")
plt.show()
def check_slb_fig7():
"""
Calculates all values for forsterite and benchmarks with figure 7 from Stixrude and Lithgow-Bertelloni (2005)
"""
forsterite = burnman.Mineral()
forsterite.params = {'name': 'forsterite',
'V_0': 43.60e-6,
'K_0': 128.0e9,
'Kprime_0': 4.2,
'G_0': 82.0e9,
'Gprime_0': 1.4,
'n': 7.0,
'molar_mass': .140695,
'Debye_0': 809.,
'grueneisen_0': .99,
'q_0': 2.1,
'eta_s_0': 2.4}
forsterite.set_method('slb3')
temperature = np.linspace(0., 2000., 200)
volume = np.empty_like(temperature)
bulk_modulus = np.empty_like(temperature)
shear_modulus = np.empty_like(temperature)
heat_capacity = np.empty_like(temperature)
pressure = 1.0e5
forsterite.set_state(pressure, 300.)
Ks_0 = forsterite.adiabatic_bulk_modulus
# calculate its thermal properties
for i in range(len(temperature)):
forsterite.set_state(pressure, temperature[i])
volume[i] = forsterite.molar_volume / forsterite.params['V_0']
bulk_modulus[i] = forsterite.adiabatic_bulk_modulus / Ks_0
shear_modulus[i] = forsterite.shear_modulus / forsterite.params['G_0']
heat_capacity[i] = forsterite.heat_capacity_p / forsterite.params['n']
# compare with figure 7
fig1 = mpimg.imread('../../burnman/data/input_figures/slb_fig7_vol.png')
plt.imshow(fig1, extent=[0, 2200, 0.99, 1.08], aspect='auto')
plt.plot(temperature, volume, 'g+')
plt.ylim(0.99, 1.08)
plt.xlim(0, 2200)
plt.xlabel("Temperature (K)")
plt.ylabel("Relative Volume V/V0")
plt.title(
"Comparing with Figure 7 of Stixrude and Lithgow-Bertelloni (2005)")
plt.show()
fig1 = mpimg.imread('../../burnman/data/input_figures/slb_fig7_Cp.png')
plt.imshow(fig1, extent=[0, 2200, 0., 70.], aspect='auto')
plt.plot(temperature, heat_capacity, 'g+')
plt.ylim(0, 70)
plt.xlim(0, 2200)
plt.xlabel("Temperature (K)")
plt.ylabel("Heat Capacity Cp")
plt.title(
"Comparing with adiabatic_bulk_modulus7 of Stixrude and Lithgow-Bertelloni (2005)")
plt.show()
fig1 = mpimg.imread('../../burnman/data/input_figures/slb_fig7_K.png')
plt.imshow(fig1, extent=[0, 2200, 0.6, 1.02], aspect='auto')
plt.plot(temperature, bulk_modulus, 'g+')
plt.ylim(0.6, 1.02)
plt.xlim(0, 2200)
plt.xlabel("Temperature (K)")
plt.ylabel("Relative Bulk Modulus K/K0")
plt.title(
"Comparing with Figure 7 of Stixrude and Lithgow-Bertelloni (2005)")
plt.show()
fig1 = mpimg.imread('../../burnman/data/input_figures/slb_fig7_G.png')
plt.imshow(fig1, extent=[0, 2200, 0.6, 1.02], aspect='auto')
plt.plot(temperature, shear_modulus, 'g+')
plt.ylim(0.6, 1.02)
plt.xlim(0, 2200)
plt.xlabel("Temperature (K)")
plt.ylabel("Relative Shear Modulus G/G0")
plt.title(
"Comparing with Figure 7 of Stixrude and Lithgow-Bertelloni (2005)")
plt.show()
def check_averaging():
"""
Reproduce Figure 1a from Watt et. al. 1976 to check the Voigt, Reuss,
Voigt-Reuss-Hill, and Hashin-Shtrikman bounds for an elastic composite
"""
voigt = burnman.averaging_schemes.Voigt()
reuss = burnman.averaging_schemes.Reuss()
voigt_reuss_hill = burnman.averaging_schemes.VoigtReussHill()
hashin_shtrikman_upper = burnman.averaging_schemes.HashinShtrikmanUpper()
hashin_shtrikman_lower = burnman.averaging_schemes.HashinShtrikmanLower()
# create arrays for sampling in volume fraction
volumes = np.linspace(0.0, 1.0, 100)
v_bulk_modulus = np.empty_like(volumes)
v_shear_modulus = np.empty_like(volumes)
r_bulk_modulus = np.empty_like(volumes)
r_shear_modulus = np.empty_like(volumes)
vrh_bulk_modulus = np.empty_like(volumes)
vrh_shear_modulus = np.empty_like(volumes)
hsu_bulk_modulus = np.empty_like(volumes)
hsu_shear_modulus = np.empty_like(volumes)
hsl_bulk_modulus = np.empty_like(volumes)
hsl_shear_modulus = np.empty_like(volumes)
# MgO bulk and shear moduli taken from Landolt-Boernstein
# - Group III Condensed Matter Volume 41B, 1999, pp 1-3
K2 = 152. # Bulk modulus, GPa
G2 = 155. # Shear modulus, GPa
# AgCl bulk and shear moduli (estimated from plot)
G1 = G2 * 0.07
K1 = K2 * 0.27
for i in range(len(volumes)):
v_bulk_modulus[i] = voigt.average_bulk_moduli(
[volumes[i], 1.0 - volumes[i]], [K1, K2], [G1, G2])
v_shear_modulus[i] = voigt.average_shear_moduli(
[volumes[i], 1.0 - volumes[i]], [K1, K2], [G1, G2])
r_bulk_modulus[i] = reuss.average_bulk_moduli(
[volumes[i], 1.0 - volumes[i]], [K1, K2], [G1, G2])
r_shear_modulus[i] = reuss.average_shear_moduli(
[volumes[i], 1.0 - volumes[i]], [K1, K2], [G1, G2])
vrh_bulk_modulus[i] = voigt_reuss_hill.average_bulk_moduli(
[volumes[i], 1.0 - volumes[i]], [K1, K2], [G1, G2])
vrh_shear_modulus[i] = voigt_reuss_hill.average_shear_moduli(
[volumes[i], 1.0 - volumes[i]], [K1, K2], [G1, G2])
hsu_bulk_modulus[i] = hashin_shtrikman_upper.average_bulk_moduli(
[volumes[i], 1.0 - volumes[i]], [K1, K2], [G1, G2])
hsu_shear_modulus[i] = hashin_shtrikman_upper.average_shear_moduli(
[volumes[i], 1.0 - volumes[i]], [K1, K2], [G1, G2])
hsl_bulk_modulus[i] = hashin_shtrikman_lower.average_bulk_moduli(
[volumes[i], 1.0 - volumes[i]], [K1, K2], [G1, G2])
hsl_shear_modulus[i] = hashin_shtrikman_lower.average_shear_moduli(
[volumes[i], 1.0 - volumes[i]], [K1, K2], [G1, G2])
fig = mpimg.imread('../../burnman/data/input_figures/watt_1976_a1.png')
plt.imshow(fig, extent=[0, 1.0, 0.25, 1.0], aspect='auto')
plt.plot(volumes, v_bulk_modulus / K2, 'g-')
plt.plot(volumes, r_bulk_modulus / K2, 'g-')
plt.plot(volumes, vrh_bulk_modulus / K2, 'g-')
plt.plot(volumes, hsu_bulk_modulus / K2, 'g-')
plt.plot(volumes, hsl_bulk_modulus / K2, 'g-')
plt.ylim(0.25, 1.00)
plt.xlim(0, 1.0)
plt.xlabel("Volume fraction")
plt.ylabel("Averaged bulk modulus")
plt.title("Comparing with Figure 1 of Watt et al 1976")
plt.show()
fig = mpimg.imread('../../burnman/data/input_figures/watt_1976_a2.png')
plt.imshow(fig, extent=[0, 1.0, 0.0, 1.0], aspect='auto')
plt.plot(volumes, v_shear_modulus / G2, 'g-')
plt.plot(volumes, r_shear_modulus / G2, 'g-')
plt.plot(volumes, vrh_shear_modulus / G2, 'g-')
plt.plot(volumes, hsu_shear_modulus / G2, 'g-')
plt.plot(volumes, hsl_shear_modulus / G2, 'g-')
plt.ylim(0.0, 1.00)
plt.xlim(0, 1.0)
plt.xlabel("Volume fraction")
plt.ylabel("Averaged shear modulus")
plt.title("Comparing with Figure 1 of Watt et al 1976")
plt.show()
# also check against some numerical values given in Berryman (1995) for
# porous glass
K = 46.3
G = 30.5
# the value for porosity=0.46 in the table appears to be a typo. Remove
# it here
porosity = np.array(
[0.0, 0.05, 0.11, 0.13, 0.25, 0.33, 0.36, 0.39, 0.44, 0.50, 0.70])
berryman_bulk_modulus = np.array(
[46.3, 41.6, 36.6, 35.1, 27.0, 22.5, 21.0, 19.6, 17.3, 14.8, 7.7]) # 15.5 probably a typo?
hsu_bulk_modulus_vals = np.empty_like(porosity)
for i in range(len(porosity)):
hsu_bulk_modulus_vals[i] = hashin_shtrikman_upper.average_bulk_moduli(
[porosity[i], 1.0 - porosity[i]], [0.0, K], [0.0, G])
for i in range(len(volumes)):
hsu_bulk_modulus[i] = hashin_shtrikman_upper.average_bulk_moduli(
[volumes[i], 1.0 - volumes[i]], [0.0, K], [0.0, G])
fig = mpimg.imread('../../burnman/data/input_figures/berryman_fig4.png')
plt.imshow(fig, extent=[0, 1.0, 0.0, 50.0], aspect='auto')
plt.plot(volumes, hsu_bulk_modulus, 'g-')
plt.scatter(porosity, hsu_bulk_modulus_vals, c='r')
plt.scatter(porosity, berryman_bulk_modulus, c='y')
plt.ylim(0.0, 50.0)
plt.xlim(0, 1.0)
plt.xlabel("Porosity")
plt.ylabel("Averaged bulk modulus")
plt.title("Comparing with Figure 4 of Berryman (1995)")
plt.show()
def check_averaging_2():
"""
Reproduce Figure 1 from Hashin and Shtrikman (1963) to check the
Hashin-Shtrikman bounds for an elastic composite
"""
hashin_shtrikman_upper = burnman.averaging_schemes.HashinShtrikmanUpper()
hashin_shtrikman_lower = burnman.averaging_schemes.HashinShtrikmanLower()
# create arrays for sampling in volume fraction
volumes = np.linspace(0.0, 1.0, 100)
hsu_bulk_modulus = np.empty_like(volumes)
hsu_shear_modulus = np.empty_like(volumes)
hsl_bulk_modulus = np.empty_like(volumes)
hsl_shear_modulus = np.empty_like(volumes)
# These values are from Hashin and Shtrikman (1963)
K1 = 25.0
K2 = 60.7
G1 = 11.5
G2 = 41.8
for i in range(len(volumes)):
hsu_bulk_modulus[i] = hashin_shtrikman_upper.average_bulk_moduli(
[1.0 - volumes[i], volumes[i]], [K1, K2], [G1, G2])
hsu_shear_modulus[i] = hashin_shtrikman_upper.average_shear_moduli(
[1.0 - volumes[i], volumes[i]], [K1, K2], [G1, G2])
hsl_bulk_modulus[i] = hashin_shtrikman_lower.average_bulk_moduli(
[1.0 - volumes[i], volumes[i]], [K1, K2], [G1, G2])
hsl_shear_modulus[i] = hashin_shtrikman_lower.average_shear_moduli(
[1.0 - volumes[i], volumes[i]], [K1, K2], [G1, G2])
fig = mpimg.imread(
'../../burnman/data/input_figures/Hashin_Shtrikman_1963_fig1_K.png')
plt.imshow(fig, extent=[0, 1.0, 1.1, K2 + 0.3], aspect='auto')
plt.plot(volumes, hsu_bulk_modulus, 'g-')
plt.plot(volumes, hsl_bulk_modulus, 'g-')
plt.ylim(K1, K2)
plt.xlim(0, 1.0)
plt.xlabel("Volume fraction")
plt.ylabel("Averaged bulk modulus")
plt.title("Comparing with Figure 1 of Hashin and Shtrikman (1963)")
plt.show()
fig = mpimg.imread(
'../../burnman/data/input_figures/Hashin_Shtrikman_1963_fig2_G.png')
plt.imshow(fig, extent=[0, 1.0, 0.3, G2], aspect='auto')
plt.plot(volumes, hsu_shear_modulus, 'g-')
plt.plot(volumes, hsl_shear_modulus, 'g-')
plt.ylim(G1, G2)
plt.xlim(0, 1.0)
plt.xlabel("Volume fraction")
plt.ylabel("Averaged shear modulus")
plt.title("Comparing with Figure 2 of Hashin and Shtrikman (1963)")
plt.show()
def check_averaging_3():
"""
Reproduce Figure 3 from Avseth et al. (2010) to check the Voigt, Reuss,
Voigt-Reuss-Hill, and Hashin-Shtrikman bounds for an elastic composite
"""
voigt = burnman.averaging_schemes.Voigt()
reuss = burnman.averaging_schemes.Reuss()
voigt_reuss_hill = burnman.averaging_schemes.VoigtReussHill()
hashin_shtrikman_upper = burnman.averaging_schemes.HashinShtrikmanUpper()
hashin_shtrikman_lower = burnman.averaging_schemes.HashinShtrikmanLower()
# create arrays for sampling in volume fraction
volumes = np.linspace(0.0, 1.0, 100)
v_bulk_modulus = np.empty_like(volumes)
v_shear_modulus = np.empty_like(volumes)
r_bulk_modulus = np.empty_like(volumes)
r_shear_modulus = np.empty_like(volumes)
vrh_bulk_modulus = np.empty_like(volumes)
vrh_shear_modulus = np.empty_like(volumes)
hsu_bulk_modulus = np.empty_like(volumes)
hsu_shear_modulus = np.empty_like(volumes)
hsl_bulk_modulus = np.empty_like(volumes)
hsl_shear_modulus = np.empty_like(volumes)
hs_av_bulk_modulus = np.empty_like(volumes)
hs_av_shear_modulus = np.empty_like(volumes)
# Quartz bulk and shear moduli
K2 = 37.
G2 = 45.
# Fluid bulk and shear moduli
G1 = 0.00001
K1 = 2.35
for i in range(len(volumes)):
v_bulk_modulus[i] = voigt.average_bulk_moduli(
[volumes[i], 1.0 - volumes[i]], [K1, K2], [G1, G2])
v_shear_modulus[i] = voigt.average_shear_moduli(
[volumes[i], 1.0 - volumes[i]], [K1, K2], [G1, G2])
r_bulk_modulus[i] = reuss.average_bulk_moduli(
[volumes[i], 1.0 - volumes[i]], [K1, K2], [G1, G2])
r_shear_modulus[i] = reuss.average_shear_moduli(
[volumes[i], 1.0 - volumes[i]], [K1, K2], [G1, G2])
vrh_bulk_modulus[i] = voigt_reuss_hill.average_bulk_moduli(
[volumes[i], 1.0 - volumes[i]], [K1, K2], [G1, G2])
vrh_shear_modulus[i] = voigt_reuss_hill.average_shear_moduli(
[volumes[i], 1.0 - volumes[i]], [K1, K2], [G1, G2])
hsu_bulk_modulus[i] = hashin_shtrikman_upper.average_bulk_moduli(
[volumes[i], 1.0 - volumes[i]], [K1, K2], [G1, G2])
hsu_shear_modulus[i] = hashin_shtrikman_upper.average_shear_moduli(
[volumes[i], 1.0 - volumes[i]], [K1, K2], [G1, G2])
hsl_bulk_modulus[i] = hashin_shtrikman_lower.average_bulk_moduli(
[volumes[i], 1.0 - volumes[i]], [K1, K2], [G1, G2])
hsl_shear_modulus[i] = hashin_shtrikman_lower.average_shear_moduli(
[volumes[i], 1.0 - volumes[i]], [K1, K2], [G1, G2])
hs_av_bulk_modulus[i] = 0.5 * hsl_bulk_modulus[
i] + 0.5 * hsu_bulk_modulus[i]
hs_av_shear_modulus[i] = 0.5 * hsl_shear_modulus[
i] + 0.5 * hsu_shear_modulus[i]
fig = mpimg.imread(
'../../burnman/data/input_figures/Avseth_et_al_2010_fig3_K.png')
plt.imshow(fig, extent=[0, 1.0, 0., 40.0], aspect='auto')
plt.plot(volumes, v_bulk_modulus, 'g-')
plt.plot(volumes, r_bulk_modulus, 'g-')
plt.plot(volumes, vrh_bulk_modulus, 'g-')
plt.plot(volumes, hsu_bulk_modulus, 'g-')
plt.plot(volumes, hsl_bulk_modulus, 'g-')
plt.plot(volumes, hs_av_bulk_modulus, 'g-')
plt.ylim(0., 40.00)
plt.xlim(0., 1.0)
plt.xlabel("Volume fraction")
plt.ylabel("Averaged bulk modulus")
plt.title("Comparing with Figure 3 of Avseth et al., 2010")
plt.show()
if __name__ == "__main__":
check_averaging()
check_averaging_2()
check_averaging_3()
check_birch_murnaghan()
check_birch_murnaghan_4th()
check_vinet()
check_slb_fig7()
check_slb_fig3()
check_mgd_shim_duffy_kenichi()
check_mgd_fei_mao_shu_hu()
check_slb_fig7_txt()
| gpl-2.0 |
stylianos-kampakis/scikit-learn | examples/applications/svm_gui.py | 287 | 11161 | """
==========
Libsvm GUI
==========
A simple graphical frontend for Libsvm mainly intended for didactic
purposes. You can create data points by point and click and visualize
the decision region induced by different kernels and parameter settings.
To create positive examples click the left mouse button; to create
negative examples click the right button.
If all examples are from the same class, it uses a one-class SVM.
"""
from __future__ import division, print_function
print(__doc__)
# Author: Peter Prettenhoer <[email protected]>
#
# License: BSD 3 clause
import matplotlib
matplotlib.use('TkAgg')
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.backends.backend_tkagg import NavigationToolbar2TkAgg
from matplotlib.figure import Figure
from matplotlib.contour import ContourSet
import Tkinter as Tk
import sys
import numpy as np
from sklearn import svm
from sklearn.datasets import dump_svmlight_file
from sklearn.externals.six.moves import xrange
y_min, y_max = -50, 50
x_min, x_max = -50, 50
class Model(object):
"""The Model which hold the data. It implements the
observable in the observer pattern and notifies the
registered observers on change event.
"""
def __init__(self):
self.observers = []
self.surface = None
self.data = []
self.cls = None
self.surface_type = 0
def changed(self, event):
"""Notify the observers. """
for observer in self.observers:
observer.update(event, self)
def add_observer(self, observer):
"""Register an observer. """
self.observers.append(observer)
def set_surface(self, surface):
self.surface = surface
def dump_svmlight_file(self, file):
data = np.array(self.data)
X = data[:, 0:2]
y = data[:, 2]
dump_svmlight_file(X, y, file)
class Controller(object):
def __init__(self, model):
self.model = model
self.kernel = Tk.IntVar()
self.surface_type = Tk.IntVar()
# Whether or not a model has been fitted
self.fitted = False
def fit(self):
print("fit the model")
train = np.array(self.model.data)
X = train[:, 0:2]
y = train[:, 2]
C = float(self.complexity.get())
gamma = float(self.gamma.get())
coef0 = float(self.coef0.get())
degree = int(self.degree.get())
kernel_map = {0: "linear", 1: "rbf", 2: "poly"}
if len(np.unique(y)) == 1:
clf = svm.OneClassSVM(kernel=kernel_map[self.kernel.get()],
gamma=gamma, coef0=coef0, degree=degree)
clf.fit(X)
else:
clf = svm.SVC(kernel=kernel_map[self.kernel.get()], C=C,
gamma=gamma, coef0=coef0, degree=degree)
clf.fit(X, y)
if hasattr(clf, 'score'):
print("Accuracy:", clf.score(X, y) * 100)
X1, X2, Z = self.decision_surface(clf)
self.model.clf = clf
self.model.set_surface((X1, X2, Z))
self.model.surface_type = self.surface_type.get()
self.fitted = True
self.model.changed("surface")
def decision_surface(self, cls):
delta = 1
x = np.arange(x_min, x_max + delta, delta)
y = np.arange(y_min, y_max + delta, delta)
X1, X2 = np.meshgrid(x, y)
Z = cls.decision_function(np.c_[X1.ravel(), X2.ravel()])
Z = Z.reshape(X1.shape)
return X1, X2, Z
def clear_data(self):
self.model.data = []
self.fitted = False
self.model.changed("clear")
def add_example(self, x, y, label):
self.model.data.append((x, y, label))
self.model.changed("example_added")
# update decision surface if already fitted.
self.refit()
def refit(self):
"""Refit the model if already fitted. """
if self.fitted:
self.fit()
class View(object):
"""Test docstring. """
def __init__(self, root, controller):
f = Figure()
ax = f.add_subplot(111)
ax.set_xticks([])
ax.set_yticks([])
ax.set_xlim((x_min, x_max))
ax.set_ylim((y_min, y_max))
canvas = FigureCanvasTkAgg(f, master=root)
canvas.show()
canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
canvas._tkcanvas.pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
canvas.mpl_connect('button_press_event', self.onclick)
toolbar = NavigationToolbar2TkAgg(canvas, root)
toolbar.update()
self.controllbar = ControllBar(root, controller)
self.f = f
self.ax = ax
self.canvas = canvas
self.controller = controller
self.contours = []
self.c_labels = None
self.plot_kernels()
def plot_kernels(self):
self.ax.text(-50, -60, "Linear: $u^T v$")
self.ax.text(-20, -60, "RBF: $\exp (-\gamma \| u-v \|^2)$")
self.ax.text(10, -60, "Poly: $(\gamma \, u^T v + r)^d$")
def onclick(self, event):
if event.xdata and event.ydata:
if event.button == 1:
self.controller.add_example(event.xdata, event.ydata, 1)
elif event.button == 3:
self.controller.add_example(event.xdata, event.ydata, -1)
def update_example(self, model, idx):
x, y, l = model.data[idx]
if l == 1:
color = 'w'
elif l == -1:
color = 'k'
self.ax.plot([x], [y], "%so" % color, scalex=0.0, scaley=0.0)
def update(self, event, model):
if event == "examples_loaded":
for i in xrange(len(model.data)):
self.update_example(model, i)
if event == "example_added":
self.update_example(model, -1)
if event == "clear":
self.ax.clear()
self.ax.set_xticks([])
self.ax.set_yticks([])
self.contours = []
self.c_labels = None
self.plot_kernels()
if event == "surface":
self.remove_surface()
self.plot_support_vectors(model.clf.support_vectors_)
self.plot_decision_surface(model.surface, model.surface_type)
self.canvas.draw()
def remove_surface(self):
"""Remove old decision surface."""
if len(self.contours) > 0:
for contour in self.contours:
if isinstance(contour, ContourSet):
for lineset in contour.collections:
lineset.remove()
else:
contour.remove()
self.contours = []
def plot_support_vectors(self, support_vectors):
"""Plot the support vectors by placing circles over the
corresponding data points and adds the circle collection
to the contours list."""
cs = self.ax.scatter(support_vectors[:, 0], support_vectors[:, 1],
s=80, edgecolors="k", facecolors="none")
self.contours.append(cs)
def plot_decision_surface(self, surface, type):
X1, X2, Z = surface
if type == 0:
levels = [-1.0, 0.0, 1.0]
linestyles = ['dashed', 'solid', 'dashed']
colors = 'k'
self.contours.append(self.ax.contour(X1, X2, Z, levels,
colors=colors,
linestyles=linestyles))
elif type == 1:
self.contours.append(self.ax.contourf(X1, X2, Z, 10,
cmap=matplotlib.cm.bone,
origin='lower', alpha=0.85))
self.contours.append(self.ax.contour(X1, X2, Z, [0.0], colors='k',
linestyles=['solid']))
else:
raise ValueError("surface type unknown")
class ControllBar(object):
def __init__(self, root, controller):
fm = Tk.Frame(root)
kernel_group = Tk.Frame(fm)
Tk.Radiobutton(kernel_group, text="Linear", variable=controller.kernel,
value=0, command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(kernel_group, text="RBF", variable=controller.kernel,
value=1, command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(kernel_group, text="Poly", variable=controller.kernel,
value=2, command=controller.refit).pack(anchor=Tk.W)
kernel_group.pack(side=Tk.LEFT)
valbox = Tk.Frame(fm)
controller.complexity = Tk.StringVar()
controller.complexity.set("1.0")
c = Tk.Frame(valbox)
Tk.Label(c, text="C:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(c, width=6, textvariable=controller.complexity).pack(
side=Tk.LEFT)
c.pack()
controller.gamma = Tk.StringVar()
controller.gamma.set("0.01")
g = Tk.Frame(valbox)
Tk.Label(g, text="gamma:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(g, width=6, textvariable=controller.gamma).pack(side=Tk.LEFT)
g.pack()
controller.degree = Tk.StringVar()
controller.degree.set("3")
d = Tk.Frame(valbox)
Tk.Label(d, text="degree:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(d, width=6, textvariable=controller.degree).pack(side=Tk.LEFT)
d.pack()
controller.coef0 = Tk.StringVar()
controller.coef0.set("0")
r = Tk.Frame(valbox)
Tk.Label(r, text="coef0:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(r, width=6, textvariable=controller.coef0).pack(side=Tk.LEFT)
r.pack()
valbox.pack(side=Tk.LEFT)
cmap_group = Tk.Frame(fm)
Tk.Radiobutton(cmap_group, text="Hyperplanes",
variable=controller.surface_type, value=0,
command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(cmap_group, text="Surface",
variable=controller.surface_type, value=1,
command=controller.refit).pack(anchor=Tk.W)
cmap_group.pack(side=Tk.LEFT)
train_button = Tk.Button(fm, text='Fit', width=5,
command=controller.fit)
train_button.pack()
fm.pack(side=Tk.LEFT)
Tk.Button(fm, text='Clear', width=5,
command=controller.clear_data).pack(side=Tk.LEFT)
def get_parser():
from optparse import OptionParser
op = OptionParser()
op.add_option("--output",
action="store", type="str", dest="output",
help="Path where to dump data.")
return op
def main(argv):
op = get_parser()
opts, args = op.parse_args(argv[1:])
root = Tk.Tk()
model = Model()
controller = Controller(model)
root.wm_title("Scikit-learn Libsvm GUI")
view = View(root, controller)
model.add_observer(view)
Tk.mainloop()
if opts.output:
model.dump_svmlight_file(opts.output)
if __name__ == "__main__":
main(sys.argv)
| bsd-3-clause |
AlexGrig/GPy | GPy/models/mrd.py | 8 | 14617 | # ## Copyright (c) 2013, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
import itertools, logging
from ..kern import Kern
from ..core.parameterization.variational import NormalPosterior, NormalPrior
from ..core.parameterization import Param, Parameterized
from ..core.parameterization.observable_array import ObsAr
from ..inference.latent_function_inference.var_dtc import VarDTC
from ..inference.latent_function_inference import InferenceMethodList
from ..likelihoods import Gaussian
from ..util.initialization import initialize_latent
from ..core.sparse_gp import SparseGP, GP
from GPy.core.parameterization.variational import VariationalPosterior
from GPy.models.bayesian_gplvm_minibatch import BayesianGPLVMMiniBatch
from GPy.models.sparse_gp_minibatch import SparseGPMiniBatch
class MRD(BayesianGPLVMMiniBatch):
"""
!WARNING: This is bleeding edge code and still in development.
Functionality may change fundamentally during development!
Apply MRD to all given datasets Y in Ylist.
Y_i in [n x p_i]
If Ylist is a dictionary, the keys of the dictionary are the names, and the
values are the different datasets to compare.
The samples n in the datasets need
to match up, whereas the dimensionality p_d can differ.
:param [array-like] Ylist: List of datasets to apply MRD on
:param input_dim: latent dimensionality
:type input_dim: int
:param array-like X: mean of starting latent space q in [n x q]
:param array-like X_variance: variance of starting latent space q in [n x q]
:param initx: initialisation method for the latent space :
* 'concat' - PCA on concatenation of all datasets
* 'single' - Concatenation of PCA on datasets, respectively
* 'random' - Random draw from a Normal(0,1)
:type initx: ['concat'|'single'|'random']
:param initz: initialisation method for inducing inputs
:type initz: 'permute'|'random'
:param num_inducing: number of inducing inputs to use
:param Z: initial inducing inputs
:param kernel: list of kernels or kernel to copy for each output
:type kernel: [GPy.kernels.kernels] | GPy.kernels.kernels | None (default)
:param :class:`~GPy.inference.latent_function_inference inference_method:
InferenceMethodList of inferences, or one inference method for all
:param :class:`~GPy.likelihoodss.likelihoods.likelihoods` likelihoods: the likelihoods to use
:param str name: the name of this model
:param [str] Ynames: the names for the datasets given, must be of equal length as Ylist or None
:param bool|Norm normalizer: How to normalize the data?
:param bool stochastic: Should this model be using stochastic gradient descent over the dimensions?
:param bool|[bool] batchsize: either one batchsize for all, or one batchsize per dataset.
"""
def __init__(self, Ylist, input_dim, X=None, X_variance=None,
initx = 'PCA', initz = 'permute',
num_inducing=10, Z=None, kernel=None,
inference_method=None, likelihoods=None, name='mrd',
Ynames=None, normalizer=False, stochastic=False, batchsize=10):
self.logger = logging.getLogger(self.__class__.__name__)
self.input_dim = input_dim
self.num_inducing = num_inducing
if isinstance(Ylist, dict):
Ynames, Ylist = zip(*Ylist.items())
self.logger.debug("creating observable arrays")
self.Ylist = [ObsAr(Y) for Y in Ylist]
#The next line is a fix for Python 3. It replicates the python 2 behaviour from the above comprehension
Y = Ylist[-1]
if Ynames is None:
self.logger.debug("creating Ynames")
Ynames = ['Y{}'.format(i) for i in range(len(Ylist))]
self.names = Ynames
assert len(self.names) == len(self.Ylist), "one name per dataset, or None if Ylist is a dict"
if inference_method is None:
self.inference_method = InferenceMethodList([VarDTC() for _ in range(len(self.Ylist))])
else:
assert isinstance(inference_method, InferenceMethodList), "please provide one inference method per Y in the list and provide it as InferenceMethodList, inference_method given: {}".format(inference_method)
self.inference_method = inference_method
if X is None:
X, fracs = self._init_X(initx, Ylist)
else:
fracs = [X.var(0)]*len(Ylist)
Z = self._init_Z(initz, X)
self.Z = Param('inducing inputs', Z)
self.num_inducing = self.Z.shape[0] # ensure M==N if M>N
# sort out the kernels
self.logger.info("building kernels")
if kernel is None:
from ..kern import RBF
kernels = [RBF(input_dim, ARD=1, lengthscale=1./fracs[i]) for i in range(len(Ylist))]
elif isinstance(kernel, Kern):
kernels = []
for i in range(len(Ylist)):
k = kernel.copy()
kernels.append(k)
else:
assert len(kernel) == len(Ylist), "need one kernel per output"
assert all([isinstance(k, Kern) for k in kernel]), "invalid kernel object detected!"
kernels = kernel
self.variational_prior = NormalPrior()
#self.X = NormalPosterior(X, X_variance)
if likelihoods is None:
likelihoods = [Gaussian(name='Gaussian_noise'.format(i)) for i in range(len(Ylist))]
else: likelihoods = likelihoods
self.logger.info("adding X and Z")
super(MRD, self).__init__(Y, input_dim, X=X, X_variance=X_variance, num_inducing=num_inducing,
Z=self.Z, kernel=None, inference_method=self.inference_method, likelihood=Gaussian(),
name='manifold relevance determination', normalizer=None,
missing_data=False, stochastic=False, batchsize=1)
self._log_marginal_likelihood = 0
self.unlink_parameter(self.likelihood)
self.unlink_parameter(self.kern)
del self.kern
del self.likelihood
self.num_data = Ylist[0].shape[0]
if isinstance(batchsize, int):
batchsize = itertools.repeat(batchsize)
self.bgplvms = []
for i, n, k, l, Y, im, bs in zip(itertools.count(), Ynames, kernels, likelihoods, Ylist, self.inference_method, batchsize):
assert Y.shape[0] == self.num_data, "All datasets need to share the number of datapoints, and those have to correspond to one another"
md = np.isnan(Y).any()
spgp = BayesianGPLVMMiniBatch(Y, input_dim, X, X_variance,
Z=Z, kernel=k, likelihood=l,
inference_method=im, name=n,
normalizer=normalizer,
missing_data=md,
stochastic=stochastic,
batchsize=bs)
spgp.kl_factr = 1./len(Ynames)
spgp.unlink_parameter(spgp.Z)
spgp.unlink_parameter(spgp.X)
del spgp.Z
del spgp.X
spgp.Z = self.Z
spgp.X = self.X
self.link_parameter(spgp, i+2)
self.bgplvms.append(spgp)
self.posterior = None
self.logger.info("init done")
def parameters_changed(self):
self._log_marginal_likelihood = 0
self.Z.gradient[:] = 0.
self.X.gradient[:] = 0.
for b, i in zip(self.bgplvms, self.inference_method):
self._log_marginal_likelihood += b._log_marginal_likelihood
self.logger.info('working on im <{}>'.format(hex(id(i))))
self.Z.gradient[:] += b.Z.gradient#full_values['Zgrad']
#grad_dict = b.full_values
if self.has_uncertain_inputs():
self.X.gradient += b._Xgrad
else:
self.X.gradient += b._Xgrad
#if self.has_uncertain_inputs():
# # update for the KL divergence
# self.variational_prior.update_gradients_KL(self.X)
# self._log_marginal_likelihood -= self.variational_prior.KL_divergence(self.X)
# pass
def log_likelihood(self):
return self._log_marginal_likelihood
def _init_X(self, init='PCA', Ylist=None):
if Ylist is None:
Ylist = self.Ylist
if init in "PCA_concat":
X, fracs = initialize_latent('PCA', self.input_dim, np.hstack(Ylist))
fracs = [fracs]*len(Ylist)
elif init in "PCA_single":
X = np.zeros((Ylist[0].shape[0], self.input_dim))
fracs = []
for qs, Y in zip(np.array_split(np.arange(self.input_dim), len(Ylist)), Ylist):
x,frcs = initialize_latent('PCA', len(qs), Y)
X[:, qs] = x
fracs.append(frcs)
else: # init == 'random':
X = np.random.randn(Ylist[0].shape[0], self.input_dim)
fracs = X.var(0)
fracs = [fracs]*len(Ylist)
X -= X.mean()
X /= X.std()
return X, fracs
def _init_Z(self, init="permute", X=None):
if X is None:
X = self.X
if init in "permute":
Z = np.random.permutation(X.copy())[:self.num_inducing]
elif init in "random":
Z = np.random.randn(self.num_inducing, self.input_dim) * X.var()
return Z
def _handle_plotting(self, fignum, axes, plotf, sharex=False, sharey=False):
import matplotlib.pyplot as plt
if axes is None:
fig = plt.figure(num=fignum)
sharex_ax = None
sharey_ax = None
plots = []
for i, g in enumerate(self.bgplvms):
try:
if sharex:
sharex_ax = ax # @UndefinedVariable
sharex = False # dont set twice
if sharey:
sharey_ax = ax # @UndefinedVariable
sharey = False # dont set twice
except:
pass
if axes is None:
ax = fig.add_subplot(1, len(self.bgplvms), i + 1, sharex=sharex_ax, sharey=sharey_ax)
elif isinstance(axes, (tuple, list, np.ndarray)):
ax = axes[i]
else:
raise ValueError("Need one axes per latent dimension input_dim")
plots.append(plotf(i, g, ax))
if sharey_ax is not None:
plt.setp(ax.get_yticklabels(), visible=False)
plt.draw()
if axes is None:
try:
fig.tight_layout()
except:
pass
return plots
def predict(self, Xnew, full_cov=False, Y_metadata=None, kern=None, Yindex=0):
"""
Prediction for data set Yindex[default=0].
This predicts the output mean and variance for the dataset given in Ylist[Yindex]
"""
b = self.bgplvms[Yindex]
self.posterior = b.posterior
self.kern = b.kern
self.likelihood = b.likelihood
return super(MRD, self).predict(Xnew, full_cov, Y_metadata, kern)
#===============================================================================
# TODO: Predict! Maybe even change to several bgplvms, which share an X?
#===============================================================================
# def plot_predict(self, fignum=None, ax=None, sharex=False, sharey=False, **kwargs):
# fig = self._handle_plotting(fignum,
# ax,
# lambda i, g, ax: ax.imshow(g.predict(g.X)[0], **kwargs),
# sharex=sharex, sharey=sharey)
# return fig
def plot_scales(self, fignum=None, ax=None, titles=None, sharex=False, sharey=True, *args, **kwargs):
"""
TODO: Explain other parameters
:param titles: titles for axes of datasets
"""
if titles is None:
titles = [r'${}$'.format(name) for name in self.names]
ymax = reduce(max, [np.ceil(max(g.kern.input_sensitivity())) for g in self.bgplvms])
def plotf(i, g, ax):
#ax.set_ylim([0,ymax])
return g.kern.plot_ARD(ax=ax, title=titles[i], *args, **kwargs)
fig = self._handle_plotting(fignum, ax, plotf, sharex=sharex, sharey=sharey)
return fig
def plot_latent(self, labels=None, which_indices=None,
resolution=50, ax=None, marker='o', s=40,
fignum=None, plot_inducing=True, legend=True,
plot_limits=None,
aspect='auto', updates=False, predict_kwargs={}, imshow_kwargs={}):
"""
see plotting.matplot_dep.dim_reduction_plots.plot_latent
if predict_kwargs is None, will plot latent spaces for 0th dataset (and kernel), otherwise give
predict_kwargs=dict(Yindex='index') for plotting only the latent space of dataset with 'index'.
"""
import sys
assert "matplotlib" in sys.modules, "matplotlib package has not been imported."
from matplotlib import pyplot as plt
from ..plotting.matplot_dep import dim_reduction_plots
if "Yindex" not in predict_kwargs:
predict_kwargs['Yindex'] = 0
Yindex = predict_kwargs['Yindex']
if ax is None:
fig = plt.figure(num=fignum)
ax = fig.add_subplot(111)
else:
fig = ax.figure
self.kern = self.bgplvms[Yindex].kern
self.likelihood = self.bgplvms[Yindex].likelihood
plot = dim_reduction_plots.plot_latent(self, labels, which_indices,
resolution, ax, marker, s,
fignum, plot_inducing, legend,
plot_limits, aspect, updates, predict_kwargs, imshow_kwargs)
ax.set_title(self.bgplvms[Yindex].name)
try:
fig.tight_layout()
except:
pass
return plot
def __getstate__(self):
state = super(MRD, self).__getstate__()
if 'kern' in state:
del state['kern']
if 'likelihood' in state:
del state['likelihood']
return state
def __setstate__(self, state):
# TODO:
super(MRD, self).__setstate__(state)
self.kern = self.bgplvms[0].kern
self.likelihood = self.bgplvms[0].likelihood
self.parameters_changed()
| bsd-3-clause |
pombredanne/bokeh | bokeh/models/tests/test_sources.py | 6 | 4842 | from __future__ import absolute_import
import unittest
from unittest import skipIf
import warnings
import numpy as np
try:
import pandas as pd
is_pandas = True
except ImportError as e:
is_pandas = False
from bokeh.models.sources import DataSource, ColumnDataSource
class TestColumnDataSource(unittest.TestCase):
def test_basic(self):
ds = ColumnDataSource()
self.assertTrue(isinstance(ds, DataSource))
def test_init_dict_arg(self):
data = dict(a=[1], b=[2])
ds = ColumnDataSource(data)
self.assertEquals(ds.data, data)
self.assertEquals(set(ds.column_names), set(data.keys()))
def test_init_dict_data_kwarg(self):
data = dict(a=[1], b=[2])
ds = ColumnDataSource(data=data)
self.assertEquals(ds.data, data)
self.assertEquals(set(ds.column_names), set(data.keys()))
@skipIf(not is_pandas, "pandas not installed")
def test_init_pandas_arg(self):
data = dict(a=[1, 2], b=[2, 3])
df = pd.DataFrame(data)
ds = ColumnDataSource(df)
self.assertTrue(set(df.columns).issubset(set(ds.column_names)))
for key in data.keys():
self.assertEquals(list(df[key]), data[key])
self.assertEqual(set(ds.column_names) - set(df.columns), set(["index"]))
@skipIf(not is_pandas, "pandas not installed")
def test_init_pandas_data_kwarg(self):
data = dict(a=[1, 2], b=[2, 3])
df = pd.DataFrame(data)
ds = ColumnDataSource(data=df)
self.assertTrue(set(df.columns).issubset(set(ds.column_names)))
for key in data.keys():
self.assertEquals(list(df[key]), data[key])
self.assertEqual(set(ds.column_names) - set(df.columns), set(["index"]))
def test_add_with_name(self):
ds = ColumnDataSource()
name = ds.add([1,2,3], name="foo")
self.assertEquals(name, "foo")
name = ds.add([4,5,6], name="bar")
self.assertEquals(name, "bar")
def test_add_without_name(self):
ds = ColumnDataSource()
name = ds.add([1,2,3])
self.assertEquals(name, "Series 0")
name = ds.add([4,5,6])
self.assertEquals(name, "Series 1")
def test_add_with_and_without_name(self):
ds = ColumnDataSource()
name = ds.add([1,2,3], "foo")
self.assertEquals(name, "foo")
name = ds.add([4,5,6])
self.assertEquals(name, "Series 1")
def test_remove_exists(self):
ds = ColumnDataSource()
name = ds.add([1,2,3], "foo")
assert name
ds.remove("foo")
self.assertEquals(ds.column_names, [])
def test_remove_exists2(self):
with warnings.catch_warnings(record=True) as w:
ds = ColumnDataSource()
ds.remove("foo")
self.assertEquals(ds.column_names, [])
self.assertEquals(len(w), 1)
self.assertEquals(w[0].category, UserWarning)
self.assertEquals(str(w[0].message), "Unable to find column 'foo' in data source")
def test_stream_bad_data(self):
ds = ColumnDataSource(data=dict(a=[10], b=[20]))
with self.assertRaises(ValueError) as cm:
ds.stream(dict())
self.assertEqual(str(cm.exception), "Must stream updates to all existing columns (missing: a, b)")
with self.assertRaises(ValueError) as cm:
ds.stream(dict(a=[10]))
self.assertEqual(str(cm.exception), "Must stream updates to all existing columns (missing: b)")
with self.assertRaises(ValueError) as cm:
ds.stream(dict(a=[10], b=[10], x=[10]))
self.assertEqual(str(cm.exception), "Must stream updates to all existing columns (extra: x)")
with self.assertRaises(ValueError) as cm:
ds.stream(dict(a=[10], x=[10]))
self.assertEqual(str(cm.exception), "Must stream updates to all existing columns (missing: b, extra: x)")
with self.assertRaises(ValueError) as cm:
ds.stream(dict(a=[10], b=[10, 20]))
self.assertEqual(str(cm.exception), "All streaming column updates must be the same length")
with self.assertRaises(ValueError) as cm:
ds.stream(dict(a=[10], b=np.ones((1,1))))
self.assertEqual(str(cm.exception), "stream(...) only supports 1d sequences, got ndarray with size (1, 1)")
def test_stream_good_data(self):
ds = ColumnDataSource(data=dict(a=[10], b=[20]))
ds._document = "doc"
stuff = {}
def mock(*args, **kw):
stuff['args'] = args
stuff['kw'] = kw
ds.data._stream = mock
ds.stream(dict(a=[11, 12], b=[21, 22]), "foo")
self.assertEqual(stuff['args'], ("doc", ds, dict(a=[11, 12], b=[21, 22]), "foo"))
self.assertEqual(stuff['kw'], {})
if __name__ == "__main__":
unittest.main()
| bsd-3-clause |
YuwenXiong/py-R-FCN | lib/pycocotools/coco.py | 16 | 14881 | __author__ = 'tylin'
__version__ = '1.0.1'
# Interface for accessing the Microsoft COCO dataset.
# Microsoft COCO is a large image dataset designed for object detection,
# segmentation, and caption generation. pycocotools is a Python API that
# assists in loading, parsing and visualizing the annotations in COCO.
# Please visit http://mscoco.org/ for more information on COCO, including
# for the data, paper, and tutorials. The exact format of the annotations
# is also described on the COCO website. For example usage of the pycocotools
# please see pycocotools_demo.ipynb. In addition to this API, please download both
# the COCO images and annotations in order to run the demo.
# An alternative to using the API is to load the annotations directly
# into Python dictionary
# Using the API provides additional utility functions. Note that this API
# supports both *instance* and *caption* annotations. In the case of
# captions not all functions are defined (e.g. categories are undefined).
# The following API functions are defined:
# COCO - COCO api class that loads COCO annotation file and prepare data structures.
# decodeMask - Decode binary mask M encoded via run-length encoding.
# encodeMask - Encode binary mask M using run-length encoding.
# getAnnIds - Get ann ids that satisfy given filter conditions.
# getCatIds - Get cat ids that satisfy given filter conditions.
# getImgIds - Get img ids that satisfy given filter conditions.
# loadAnns - Load anns with the specified ids.
# loadCats - Load cats with the specified ids.
# loadImgs - Load imgs with the specified ids.
# segToMask - Convert polygon segmentation to binary mask.
# showAnns - Display the specified annotations.
# loadRes - Load algorithm results and create API for accessing them.
# download - Download COCO images from mscoco.org server.
# Throughout the API "ann"=annotation, "cat"=category, and "img"=image.
# Help on each functions can be accessed by: "help COCO>function".
# See also COCO>decodeMask,
# COCO>encodeMask, COCO>getAnnIds, COCO>getCatIds,
# COCO>getImgIds, COCO>loadAnns, COCO>loadCats,
# COCO>loadImgs, COCO>segToMask, COCO>showAnns
# Microsoft COCO Toolbox. version 2.0
# Data, paper, and tutorials available at: http://mscoco.org/
# Code written by Piotr Dollar and Tsung-Yi Lin, 2014.
# Licensed under the Simplified BSD License [see bsd.txt]
import json
import datetime
import time
import matplotlib.pyplot as plt
from matplotlib.collections import PatchCollection
from matplotlib.patches import Polygon
import numpy as np
from skimage.draw import polygon
import urllib
import copy
import itertools
import mask
import os
class COCO:
def __init__(self, annotation_file=None):
"""
Constructor of Microsoft COCO helper class for reading and visualizing annotations.
:param annotation_file (str): location of annotation file
:param image_folder (str): location to the folder that hosts images.
:return:
"""
# load dataset
self.dataset = {}
self.anns = []
self.imgToAnns = {}
self.catToImgs = {}
self.imgs = {}
self.cats = {}
if not annotation_file == None:
print 'loading annotations into memory...'
tic = time.time()
dataset = json.load(open(annotation_file, 'r'))
print 'Done (t=%0.2fs)'%(time.time()- tic)
self.dataset = dataset
self.createIndex()
def createIndex(self):
# create index
print 'creating index...'
anns = {}
imgToAnns = {}
catToImgs = {}
cats = {}
imgs = {}
if 'annotations' in self.dataset:
imgToAnns = {ann['image_id']: [] for ann in self.dataset['annotations']}
anns = {ann['id']: [] for ann in self.dataset['annotations']}
for ann in self.dataset['annotations']:
imgToAnns[ann['image_id']] += [ann]
anns[ann['id']] = ann
if 'images' in self.dataset:
imgs = {im['id']: {} for im in self.dataset['images']}
for img in self.dataset['images']:
imgs[img['id']] = img
if 'categories' in self.dataset:
cats = {cat['id']: [] for cat in self.dataset['categories']}
for cat in self.dataset['categories']:
cats[cat['id']] = cat
catToImgs = {cat['id']: [] for cat in self.dataset['categories']}
if 'annotations' in self.dataset:
for ann in self.dataset['annotations']:
catToImgs[ann['category_id']] += [ann['image_id']]
print 'index created!'
# create class members
self.anns = anns
self.imgToAnns = imgToAnns
self.catToImgs = catToImgs
self.imgs = imgs
self.cats = cats
def info(self):
"""
Print information about the annotation file.
:return:
"""
for key, value in self.dataset['info'].items():
print '%s: %s'%(key, value)
def getAnnIds(self, imgIds=[], catIds=[], areaRng=[], iscrowd=None):
"""
Get ann ids that satisfy given filter conditions. default skips that filter
:param imgIds (int array) : get anns for given imgs
catIds (int array) : get anns for given cats
areaRng (float array) : get anns for given area range (e.g. [0 inf])
iscrowd (boolean) : get anns for given crowd label (False or True)
:return: ids (int array) : integer array of ann ids
"""
imgIds = imgIds if type(imgIds) == list else [imgIds]
catIds = catIds if type(catIds) == list else [catIds]
if len(imgIds) == len(catIds) == len(areaRng) == 0:
anns = self.dataset['annotations']
else:
if not len(imgIds) == 0:
# this can be changed by defaultdict
lists = [self.imgToAnns[imgId] for imgId in imgIds if imgId in self.imgToAnns]
anns = list(itertools.chain.from_iterable(lists))
else:
anns = self.dataset['annotations']
anns = anns if len(catIds) == 0 else [ann for ann in anns if ann['category_id'] in catIds]
anns = anns if len(areaRng) == 0 else [ann for ann in anns if ann['area'] > areaRng[0] and ann['area'] < areaRng[1]]
if not iscrowd == None:
ids = [ann['id'] for ann in anns if ann['iscrowd'] == iscrowd]
else:
ids = [ann['id'] for ann in anns]
return ids
def getCatIds(self, catNms=[], supNms=[], catIds=[]):
"""
filtering parameters. default skips that filter.
:param catNms (str array) : get cats for given cat names
:param supNms (str array) : get cats for given supercategory names
:param catIds (int array) : get cats for given cat ids
:return: ids (int array) : integer array of cat ids
"""
catNms = catNms if type(catNms) == list else [catNms]
supNms = supNms if type(supNms) == list else [supNms]
catIds = catIds if type(catIds) == list else [catIds]
if len(catNms) == len(supNms) == len(catIds) == 0:
cats = self.dataset['categories']
else:
cats = self.dataset['categories']
cats = cats if len(catNms) == 0 else [cat for cat in cats if cat['name'] in catNms]
cats = cats if len(supNms) == 0 else [cat for cat in cats if cat['supercategory'] in supNms]
cats = cats if len(catIds) == 0 else [cat for cat in cats if cat['id'] in catIds]
ids = [cat['id'] for cat in cats]
return ids
def getImgIds(self, imgIds=[], catIds=[]):
'''
Get img ids that satisfy given filter conditions.
:param imgIds (int array) : get imgs for given ids
:param catIds (int array) : get imgs with all given cats
:return: ids (int array) : integer array of img ids
'''
imgIds = imgIds if type(imgIds) == list else [imgIds]
catIds = catIds if type(catIds) == list else [catIds]
if len(imgIds) == len(catIds) == 0:
ids = self.imgs.keys()
else:
ids = set(imgIds)
for i, catId in enumerate(catIds):
if i == 0 and len(ids) == 0:
ids = set(self.catToImgs[catId])
else:
ids &= set(self.catToImgs[catId])
return list(ids)
def loadAnns(self, ids=[]):
"""
Load anns with the specified ids.
:param ids (int array) : integer ids specifying anns
:return: anns (object array) : loaded ann objects
"""
if type(ids) == list:
return [self.anns[id] for id in ids]
elif type(ids) == int:
return [self.anns[ids]]
def loadCats(self, ids=[]):
"""
Load cats with the specified ids.
:param ids (int array) : integer ids specifying cats
:return: cats (object array) : loaded cat objects
"""
if type(ids) == list:
return [self.cats[id] for id in ids]
elif type(ids) == int:
return [self.cats[ids]]
def loadImgs(self, ids=[]):
"""
Load anns with the specified ids.
:param ids (int array) : integer ids specifying img
:return: imgs (object array) : loaded img objects
"""
if type(ids) == list:
return [self.imgs[id] for id in ids]
elif type(ids) == int:
return [self.imgs[ids]]
def showAnns(self, anns):
"""
Display the specified annotations.
:param anns (array of object): annotations to display
:return: None
"""
if len(anns) == 0:
return 0
if 'segmentation' in anns[0]:
datasetType = 'instances'
elif 'caption' in anns[0]:
datasetType = 'captions'
if datasetType == 'instances':
ax = plt.gca()
polygons = []
color = []
for ann in anns:
c = np.random.random((1, 3)).tolist()[0]
if type(ann['segmentation']) == list:
# polygon
for seg in ann['segmentation']:
poly = np.array(seg).reshape((len(seg)/2, 2))
polygons.append(Polygon(poly, True,alpha=0.4))
color.append(c)
else:
# mask
t = self.imgs[ann['image_id']]
if type(ann['segmentation']['counts']) == list:
rle = mask.frPyObjects([ann['segmentation']], t['height'], t['width'])
else:
rle = [ann['segmentation']]
m = mask.decode(rle)
img = np.ones( (m.shape[0], m.shape[1], 3) )
if ann['iscrowd'] == 1:
color_mask = np.array([2.0,166.0,101.0])/255
if ann['iscrowd'] == 0:
color_mask = np.random.random((1, 3)).tolist()[0]
for i in range(3):
img[:,:,i] = color_mask[i]
ax.imshow(np.dstack( (img, m*0.5) ))
p = PatchCollection(polygons, facecolors=color, edgecolors=(0,0,0,1), linewidths=3, alpha=0.4)
ax.add_collection(p)
elif datasetType == 'captions':
for ann in anns:
print ann['caption']
def loadRes(self, resFile):
"""
Load result file and return a result api object.
:param resFile (str) : file name of result file
:return: res (obj) : result api object
"""
res = COCO()
res.dataset['images'] = [img for img in self.dataset['images']]
# res.dataset['info'] = copy.deepcopy(self.dataset['info'])
# res.dataset['licenses'] = copy.deepcopy(self.dataset['licenses'])
print 'Loading and preparing results... '
tic = time.time()
anns = json.load(open(resFile))
assert type(anns) == list, 'results in not an array of objects'
annsImgIds = [ann['image_id'] for ann in anns]
assert set(annsImgIds) == (set(annsImgIds) & set(self.getImgIds())), \
'Results do not correspond to current coco set'
if 'caption' in anns[0]:
imgIds = set([img['id'] for img in res.dataset['images']]) & set([ann['image_id'] for ann in anns])
res.dataset['images'] = [img for img in res.dataset['images'] if img['id'] in imgIds]
for id, ann in enumerate(anns):
ann['id'] = id+1
elif 'bbox' in anns[0] and not anns[0]['bbox'] == []:
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
for id, ann in enumerate(anns):
bb = ann['bbox']
x1, x2, y1, y2 = [bb[0], bb[0]+bb[2], bb[1], bb[1]+bb[3]]
if not 'segmentation' in ann:
ann['segmentation'] = [[x1, y1, x1, y2, x2, y2, x2, y1]]
ann['area'] = bb[2]*bb[3]
ann['id'] = id+1
ann['iscrowd'] = 0
elif 'segmentation' in anns[0]:
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
for id, ann in enumerate(anns):
# now only support compressed RLE format as segmentation results
ann['area'] = mask.area([ann['segmentation']])[0]
if not 'bbox' in ann:
ann['bbox'] = mask.toBbox([ann['segmentation']])[0]
ann['id'] = id+1
ann['iscrowd'] = 0
print 'DONE (t=%0.2fs)'%(time.time()- tic)
res.dataset['annotations'] = anns
res.createIndex()
return res
def download( self, tarDir = None, imgIds = [] ):
'''
Download COCO images from mscoco.org server.
:param tarDir (str): COCO results directory name
imgIds (list): images to be downloaded
:return:
'''
if tarDir is None:
print 'Please specify target directory'
return -1
if len(imgIds) == 0:
imgs = self.imgs.values()
else:
imgs = self.loadImgs(imgIds)
N = len(imgs)
if not os.path.exists(tarDir):
os.makedirs(tarDir)
for i, img in enumerate(imgs):
tic = time.time()
fname = os.path.join(tarDir, img['file_name'])
if not os.path.exists(fname):
urllib.urlretrieve(img['coco_url'], fname)
print 'downloaded %d/%d images (t=%.1fs)'%(i, N, time.time()- tic)
| mit |
zihua/scikit-learn | sklearn/cluster/__init__.py | 364 | 1228 | """
The :mod:`sklearn.cluster` module gathers popular unsupervised clustering
algorithms.
"""
from .spectral import spectral_clustering, SpectralClustering
from .mean_shift_ import (mean_shift, MeanShift,
estimate_bandwidth, get_bin_seeds)
from .affinity_propagation_ import affinity_propagation, AffinityPropagation
from .hierarchical import (ward_tree, AgglomerativeClustering, linkage_tree,
FeatureAgglomeration)
from .k_means_ import k_means, KMeans, MiniBatchKMeans
from .dbscan_ import dbscan, DBSCAN
from .bicluster import SpectralBiclustering, SpectralCoclustering
from .birch import Birch
__all__ = ['AffinityPropagation',
'AgglomerativeClustering',
'Birch',
'DBSCAN',
'KMeans',
'FeatureAgglomeration',
'MeanShift',
'MiniBatchKMeans',
'SpectralClustering',
'affinity_propagation',
'dbscan',
'estimate_bandwidth',
'get_bin_seeds',
'k_means',
'linkage_tree',
'mean_shift',
'spectral_clustering',
'ward_tree',
'SpectralBiclustering',
'SpectralCoclustering']
| bsd-3-clause |
deepmind/open_spiel | open_spiel/python/egt/alpharank_visualizer.py | 1 | 17905 | # Copyright 2019 DeepMind Technologies Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Various visualization tools for Alpha-Rank.
All equations and variable names correspond to the following paper:
https://arxiv.org/abs/1903.01373
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import logging
try:
import matplotlib.patches as patches # pylint: disable=g-import-not-at-top
import matplotlib.patheffects as PathEffects # pylint: disable=g-import-not-at-top
import matplotlib.pyplot as plt # pylint: disable=g-import-not-at-top
except ImportError as e:
logging.info("If your tests failed with the error 'ImportError: No module "
"named functools_lru_cache', this is a known bug in matplotlib "
"and there is a workaround (run sudo apt install "
"python-backports.functools-lru-cache. See: "
"https://github.com/matplotlib/matplotlib/issues/9344.")
raise ImportError(str(e))
import networkx as nx # pylint: disable=g-import-not-at-top
import numpy as np
from open_spiel.python.egt import utils
class NetworkPlot(object):
"""A class for visualizing the Alpha-Rank interaction network."""
def __init__(self,
payoff_tables,
rhos,
rho_m,
pi,
state_labels,
num_top_profiles=None):
"""Initializes a network plotting object.
Args:
payoff_tables: List of game payoff tables, one for each agent identity.
Each payoff_table may be either a 2D numpy array, or a
_PayoffTableInterface object.
rhos: Fixation probabilities.
rho_m: Neutral fixation probability.
pi: Stationary distribution of fixation Markov chain defined by rhos.
state_labels: Labels corresponding to Markov states. For the
single-population case, state_labels should be a list of pure strategy
names. For the multi-population case, it
should be a dict with (key,value) pairs: (population
index,list of strategy names)
num_top_profiles: Set to (int) to show only the graph nodes corresponding
to the top k elements of stationary distribution, or None to show all.
"""
self.fig = plt.figure(figsize=(10, 10))
self.num_populations = len(payoff_tables)
payoffs_are_hpt_format = utils.check_payoffs_are_hpt(payoff_tables)
self.num_strats_per_population = (
utils.get_num_strats_per_population(payoff_tables,
payoffs_are_hpt_format))
self.rhos = rhos
self.rho_m = rho_m
self.pi = pi
self.num_profiles = len(pi)
self.state_labels = state_labels
self.first_run = True
self.num_top_profiles = num_top_profiles
if self.num_top_profiles:
# More than total number of strats requested for plotting
if self.num_top_profiles > self.num_profiles:
self.num_top_profiles = self.num_profiles
# Skip the bottom num_profiles-k stationary strategies.
self.nodes_to_skip = list(self.pi.argsort()[:self.num_profiles -
self.num_top_profiles])
else:
self.nodes_to_skip = []
self._reset_cycle_counter()
def _reset_cycle_counter(self):
self.i_cycle_to_show = -1
def _draw_network(self):
"""Draws the NetworkX object representing the underlying graph."""
plt.clf()
if self.num_populations == 1:
node_sizes = 5000
node_border_width = 1.
else:
node_sizes = 15000
node_border_width = 3.
vmin, vmax = 0, np.max(self.pi) + 0.1
nx.draw_networkx_nodes(
self.g,
self.pos,
node_size=node_sizes,
node_color=self.node_colors,
edgecolors="k",
cmap=plt.cm.Blues,
vmin=vmin,
vmax=vmax,
linewidths=node_border_width)
nx.draw_networkx_edges(
self.g,
self.pos,
node_size=node_sizes,
arrowstyle="->",
arrowsize=10,
edge_color=self.edge_colors,
edge_cmap=plt.cm.Blues,
width=5)
nx.draw_networkx_edge_labels(self.g, self.pos, edge_labels=self.edge_labels)
if self.num_populations > 1:
subnode_separation = 0.1
subgraph = nx.Graph()
for i_population in range(self.num_populations):
subgraph.add_node(i_population)
for i_strat_profile in self.g:
x, y = self.pos[i_strat_profile]
if self.num_populations == 1:
node_text = "$\\pi_{" + self.state_labels[i_strat_profile] + "}=$"
node_text += str(np.round(self.pi[i_strat_profile], decimals=2))
else:
node_text = "" # No text for multi-population case as plot gets messy
txt = plt.text(
x,
y,
node_text,
horizontalalignment="center",
verticalalignment="center",
fontsize=12)
txt.set_path_effects(
[PathEffects.withStroke(linewidth=3, foreground="w")])
if self.num_populations > 1:
sub_pos = nx.circular_layout(subgraph)
subnode_labels = dict()
strat_profile = utils.get_strat_profile_from_id(
self.num_strats_per_population, i_strat_profile)
for i_population in subgraph.nodes():
i_strat = strat_profile[i_population]
subnode_labels[i_population] = "$s^{" + str(i_population + 1) + "}="
subnode_labels[i_population] += (
self.state_labels[i_population][i_strat] + "$")
# Adjust the node positions generated by NetworkX's circular_layout(),
# such that the node for the 1st strategy starts on the left.
sub_pos[i_population] = (-sub_pos[i_population] * subnode_separation +
self.pos[i_strat_profile])
nx.draw(
subgraph,
pos=sub_pos,
with_labels=True,
width=0.,
node_color="w",
labels=subnode_labels,
node_size=2500)
def compute_and_draw_network(self):
"""Computes the various node/edge connections of the graph and draws it."""
if np.max(self.rhos) < self.rho_m:
print("All node-to-node fixation probabilities (not including self-cycles"
" are lower than neutral. Thus, no graph will be drawn.")
return
self.g = nx.MultiDiGraph()
self.edge_labels = {}
self.edge_alphas = []
rho_max = np.max(self.rhos / self.rho_m)
rho_m_alpha = 0.1 # Transparency of neutral selection edges
for i in range(self.num_profiles):
for j in range(self.num_profiles):
# Do not draw edge if any node involved is skipped
if j not in self.nodes_to_skip and i not in self.nodes_to_skip:
rate = self.rhos[i][j] / self.rho_m
# Draws edges when fixation from one strategy to another occurs (i.e.,
# rate > 1), or with fixation equal to neutral selection probability
# (i.e., rate == 1). This is consistent with visualizations used in
# finite-population literature.
if rate > 1:
# Compute alphas. Clip needed due to numerical precision.
alpha = np.clip(rho_m_alpha + (1 - rho_m_alpha) * rate / rho_max,
None, 1.)
self.g.add_edge(i, j, weight=alpha, label="{:.01f}".format(rate))
self.edge_alphas.append(alpha)
elif np.isclose(rate, 1):
alpha = rho_m_alpha
self.g.add_edge(i, j, weight=alpha, label="{:.01f}".format(rate))
self.edge_alphas.append(alpha)
# Label edges for non-self-loops with sufficient flowrate
if i != j and rate > 1:
edge_string = "$" + str(np.round(rate, decimals=2)) + "\\rho_m$"
else:
edge_string = ""
self.edge_labels[(i, j)] = edge_string
# MultiDiGraph nodes are not ordered, so order the node colors accordingly
self.node_colors = [self.pi[node] for node in self.g.nodes()]
self.cycles = list(nx.simple_cycles(self.g))
self.num_cycles = len(self.cycles)
# Color the edges of cycles if user requested it
if self.i_cycle_to_show >= 0:
all_cycle_edges = [
zip(nodes, (nodes[1:] + nodes[:1])) for nodes in self.cycles
]
cur_cycle_edges = all_cycle_edges[self.i_cycle_to_show]
self.edge_colors = []
for u, v in self.g.edges():
if (u, v) in cur_cycle_edges:
self.edge_colors.append([1., 0., 0.])
else:
self.edge_colors.append([1. - self.g[u][v][0]["weight"]] * 3)
else:
self.edge_colors = [
[1. - self.g[u][v][0]["weight"]] * 3 for u, v in self.g.edges()
]
self.edge_alphas = [self.g[u][v][0]["weight"] for u, v in self.g.edges()]
ax = plt.gca()
# Centered circular pose
self.pos = nx.layout.circular_layout(self.g)
all_x = [node_pos[0] for node, node_pos in self.pos.items()]
all_y = [node_pos[1] for node, node_pos in self.pos.items()]
min_x = np.min(all_x)
max_x = np.max(all_x)
min_y = np.min(all_y)
max_y = np.max(all_y)
for _, node_pos in self.pos.items():
node_pos[0] -= (max_x + min_x) / 2
node_pos[1] -= (max_y + min_y) / 2
# Rendering
self._draw_network()
if self.first_run:
ax.autoscale_view()
ax.set_axis_off()
ax.set_aspect("equal")
plt.ylim(-1.3, 1.3)
plt.xlim(-1.3, 1.3)
if self.first_run:
self.first_run = False
plt.axis("off")
plt.show()
def _draw_pie(ax,
ratios,
colors,
x_center=0,
y_center=0,
size=100,
clip_on=True,
zorder=0):
"""Plots a pie chart.
Args:
ax: plot axis.
ratios: list indicating size of each pie slice, with elements summing to 1.
colors: list indicating color of each pie slice.
x_center: x coordinate of pie center.
y_center: y coordinate of pie center.
size: pie size.
clip_on: control clipping of pie (e.g., to show it when it's out of axis).
zorder: plot z order (e.g., to show pie on top of other plot elements).
"""
xy = []
start = 0.
for ratio in ratios:
x = [0] + np.cos(
np.linspace(2 * np.pi * start, 2 * np.pi *
(start + ratio), 30)).tolist()
y = [0] + np.sin(
np.linspace(2 * np.pi * start, 2 * np.pi *
(start + ratio), 30)).tolist()
xy.append(list(zip(x, y)))
start += ratio
for i, xyi in enumerate(xy):
ax.scatter([x_center], [y_center],
marker=xyi,
s=size,
facecolor=colors[i],
edgecolors="none",
clip_on=clip_on,
zorder=zorder)
def generate_sorted_masses_strats(pi_list, curr_alpha_idx, strats_to_go):
"""Generates a sorted list of (mass, strats) tuples.
Args:
pi_list: List of stationary distributions, pi
curr_alpha_idx: Index in alpha_list for which to start clustering
strats_to_go: List of strategies that still need to be ordered
Returns:
Sorted list of (mass, strats) tuples.
"""
if curr_alpha_idx > 0:
sorted_masses_strats = list()
masses_to_strats = utils.cluster_strats(pi_list[curr_alpha_idx,
strats_to_go])
for mass, strats in sorted(masses_to_strats.items(), reverse=True):
if len(strats) > 1:
to_append = generate_sorted_masses_strats(pi_list, curr_alpha_idx - 1,
strats)
to_append = [(mass, [strats_to_go[s]
for s in strats_list])
for (mass, strats_list) in to_append]
sorted_masses_strats.extend(to_append)
else:
sorted_masses_strats.append((mass, [
strats_to_go[strats[0]],
]))
return sorted_masses_strats
else:
to_return = sorted(
utils.cluster_strats(pi_list[curr_alpha_idx, strats_to_go]).items(),
reverse=True)
to_return = [(mass, [strats_to_go[s]
for s in strats_list])
for (mass, strats_list) in to_return]
return to_return
def plot_pi_vs_alpha(pi_list,
alpha_list,
num_populations,
num_strats_per_population,
strat_labels,
num_strats_to_label,
plot_semilogx=True,
xlabel=r"Ranking-intensity $\alpha$",
ylabel=r"Strategy mass in stationary distribution $\pi$",
legend_sort_clusters=False):
"""Plots stationary distributions, pi, against selection intensities, alpha.
Args:
pi_list: List of stationary distributions, pi.
alpha_list: List of selection intensities, alpha.
num_populations: The number of populations.
num_strats_per_population: List of the number of strategies per population.
strat_labels: Human-readable strategy labels.
num_strats_to_label: The number of top strategies to label in the legend.
plot_semilogx: Boolean set to enable/disable semilogx plot.
xlabel: Plot xlabel.
ylabel: Plot ylabel.
legend_sort_clusters: If true, strategies in the same cluster are sorted in
the legend according to orderings for earlier alpha values. Primarily for
visualization purposes! Rankings for lower alpha values should be
interpreted carefully.
"""
# Cluster strategies for which the stationary distribution has similar masses
masses_to_strats = utils.cluster_strats(pi_list[-1, :])
# Set colors
num_strat_profiles = np.shape(pi_list)[1]
num_strats_to_label = min(num_strats_to_label, num_strat_profiles)
cmap = plt.get_cmap("Paired")
colors = [cmap(i) for i in np.linspace(0, 1, num_strat_profiles)]
# Plots stationary distribution vs. alpha series
plt.figure(facecolor="w")
axes = plt.gca()
legend_line_objects = []
legend_labels = []
rank = 1
num_strats_printed = 0
add_legend_entries = True
if legend_sort_clusters:
sorted_masses_strats = generate_sorted_masses_strats(
pi_list, pi_list.shape[0] - 1, range(pi_list.shape[1]))
else:
sorted_masses_strats = sorted(masses_to_strats.items(), reverse=True)
for mass, strats in sorted_masses_strats:
for profile_id in strats:
if num_populations == 1:
strat_profile = profile_id
else:
strat_profile = utils.get_strat_profile_from_id(
num_strats_per_population, profile_id)
if plot_semilogx:
series = plt.semilogx(
alpha_list,
pi_list[:, profile_id],
color=colors[profile_id],
linewidth=2)
else:
series = plt.plot(
alpha_list,
pi_list[:, profile_id],
color=colors[profile_id],
linewidth=2)
if add_legend_entries:
if num_strats_printed >= num_strats_to_label:
# Placeholder blank series for remaining entries
series = plt.semilogx(np.NaN, np.NaN, "-", color="none")
label = "..."
add_legend_entries = False
else:
label = utils.get_label_from_strat_profile(num_populations,
strat_profile,
strat_labels)
legend_labels.append(label)
legend_line_objects.append(series[0])
num_strats_printed += 1
rank += 1
# Plots pie charts on far right of figure to indicate clusters of strategies
# with identical rank
for mass, strats in iter(masses_to_strats.items()):
_draw_pie(
axes,
ratios=[1 / len(strats)] * len(strats),
colors=[colors[i] for i in strats],
x_center=alpha_list[-1],
y_center=mass,
size=200,
clip_on=False,
zorder=10)
# Axes ymax set slightly above highest stationary distribution mass
max_mass = np.amax(pi_list)
axes_y_max = np.ceil(
10. * max_mass) / 10 # Round upward to nearest first decimal
axes_y_max = np.clip(axes_y_max, 0., 1.)
# Plots a rectangle highlighting the rankings on the far right of the figure
box_x_min = alpha_list[-1] * 0.7
box_y_min = np.min(pi_list[-1, :]) - 0.05 * axes_y_max
width = 0.7 * alpha_list[-1]
height = np.max(pi_list[-1, :]) - np.min(
pi_list[-1, :]) + 0.05 * axes_y_max * 2
axes.add_patch(
patches.Rectangle((box_x_min, box_y_min),
width,
height,
edgecolor="b",
facecolor=(1, 0, 0, 0),
clip_on=False,
linewidth=5,
zorder=20))
# Plot formatting
axes.set_xlim(np.min(alpha_list), np.max(alpha_list))
axes.set_ylim([0.0, axes_y_max])
axes.set_xlabel(xlabel)
axes.set_ylabel(ylabel)
axes.set_axisbelow(True) # Axes appear below data series in terms of zorder
# Legend on the right side of the current axis
box = axes.get_position()
axes.set_position([box.x0, box.y0, box.width * 0.8, box.height])
axes.legend(
legend_line_objects,
legend_labels,
loc="center left",
bbox_to_anchor=(1.05, 0.5))
plt.grid()
plt.show()
| apache-2.0 |
giserh/mpld3 | examples/custom_plugin.py | 21 | 2557 | """
Defining a Custom Plugin
========================
Test the custom plugin demoed on the `Pythonic Perambulations
<http://jakevdp.github.io/blog/2014/01/10/d3-plugins-truly-interactive/>`_
blog. Hover over the points to see the associated sinusoid.
Use the toolbar buttons at the bottom-right of the plot to enable zooming
and panning, and to reset the view.
"""
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import mpld3
from mpld3 import plugins, utils
class LinkedView(plugins.PluginBase):
"""A simple plugin showing how multiple axes can be linked"""
JAVASCRIPT = """
mpld3.register_plugin("linkedview", LinkedViewPlugin);
LinkedViewPlugin.prototype = Object.create(mpld3.Plugin.prototype);
LinkedViewPlugin.prototype.constructor = LinkedViewPlugin;
LinkedViewPlugin.prototype.requiredProps = ["idpts", "idline", "data"];
LinkedViewPlugin.prototype.defaultProps = {}
function LinkedViewPlugin(fig, props){
mpld3.Plugin.call(this, fig, props);
};
LinkedViewPlugin.prototype.draw = function(){
var pts = mpld3.get_element(this.props.idpts);
var line = mpld3.get_element(this.props.idline);
var data = this.props.data;
function mouseover(d, i){
line.data = data[i];
line.elements().transition()
.attr("d", line.datafunc(line.data))
.style("stroke", this.style.fill);
}
pts.elements().on("mouseover", mouseover);
};
"""
def __init__(self, points, line, linedata):
if isinstance(points, matplotlib.lines.Line2D):
suffix = "pts"
else:
suffix = None
self.dict_ = {"type": "linkedview",
"idpts": utils.get_id(points, suffix),
"idline": utils.get_id(line),
"data": linedata}
fig, ax = plt.subplots(2)
# scatter periods and amplitudes
np.random.seed(0)
P = 0.2 + np.random.random(size=20)
A = np.random.random(size=20)
x = np.linspace(0, 10, 100)
data = np.array([[x, Ai * np.sin(x / Pi)]
for (Ai, Pi) in zip(A, P)])
points = ax[1].scatter(P, A, c=P + A,
s=200, alpha=0.5)
ax[1].set_xlabel('Period')
ax[1].set_ylabel('Amplitude')
# create the line object
lines = ax[0].plot(x, 0 * x, '-w', lw=3, alpha=0.5)
ax[0].set_ylim(-1, 1)
ax[0].set_title("Hover over points to see lines")
# transpose line data and add plugin
linedata = data.transpose(0, 2, 1).tolist()
plugins.connect(fig, LinkedView(points, lines[0], linedata))
mpld3.show()
| bsd-3-clause |
imaculate/scikit-learn | sklearn/ensemble/tests/test_iforest.py | 9 | 6928 | """
Testing for Isolation Forest algorithm (sklearn.ensemble.iforest).
"""
# Authors: Nicolas Goix <[email protected]>
# Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import ignore_warnings
from sklearn.grid_search import ParameterGrid
from sklearn.ensemble import IsolationForest
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_boston, load_iris
from sklearn.utils import check_random_state
from sklearn.metrics import roc_auc_score
from scipy.sparse import csc_matrix, csr_matrix
rng = check_random_state(0)
# load the iris dataset
# and randomly permute it
iris = load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
def test_iforest():
"""Check Isolation Forest for various parameter settings."""
X_train = np.array([[0, 1], [1, 2]])
X_test = np.array([[2, 1], [1, 1]])
grid = ParameterGrid({"n_estimators": [3],
"max_samples": [0.5, 1.0, 3],
"bootstrap": [True, False]})
with ignore_warnings():
for params in grid:
IsolationForest(random_state=rng,
**params).fit(X_train).predict(X_test)
def test_iforest_sparse():
"""Check IForest for various parameter settings on sparse input."""
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data[:50],
boston.target[:50],
random_state=rng)
grid = ParameterGrid({"max_samples": [0.5, 1.0],
"bootstrap": [True, False]})
for sparse_format in [csc_matrix, csr_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
for params in grid:
# Trained on sparse format
sparse_classifier = IsolationForest(
n_estimators=10, random_state=1, **params).fit(X_train_sparse)
sparse_results = sparse_classifier.predict(X_test_sparse)
# Trained on dense format
dense_classifier = IsolationForest(
n_estimators=10, random_state=1, **params).fit(X_train)
dense_results = dense_classifier.predict(X_test)
assert_array_equal(sparse_results, dense_results)
assert_array_equal(sparse_results, dense_results)
def test_iforest_error():
"""Test that it gives proper exception on deficient input."""
X = iris.data
# Test max_samples
assert_raises(ValueError,
IsolationForest(max_samples=-1).fit, X)
assert_raises(ValueError,
IsolationForest(max_samples=0.0).fit, X)
assert_raises(ValueError,
IsolationForest(max_samples=2.0).fit, X)
# The dataset has less than 256 samples, explicitly setting
# max_samples > n_samples should result in a warning. If not set
# explicitly there should be no warning
assert_warns_message(UserWarning,
"max_samples will be set to n_samples for estimation",
IsolationForest(max_samples=1000).fit, X)
assert_no_warnings(IsolationForest(max_samples='auto').fit, X)
assert_no_warnings(IsolationForest(max_samples=np.int64(2)).fit, X)
assert_raises(ValueError, IsolationForest(max_samples='foobar').fit, X)
assert_raises(ValueError, IsolationForest(max_samples=1.5).fit, X)
def test_recalculate_max_depth():
"""Check max_depth recalculation when max_samples is reset to n_samples"""
X = iris.data
clf = IsolationForest().fit(X)
for est in clf.estimators_:
assert_equal(est.max_depth, int(np.ceil(np.log2(X.shape[0]))))
def test_max_samples_attribute():
X = iris.data
clf = IsolationForest().fit(X)
assert_equal(clf.max_samples_, X.shape[0])
clf = IsolationForest(max_samples=500)
assert_warns_message(UserWarning,
"max_samples will be set to n_samples for estimation",
clf.fit, X)
assert_equal(clf.max_samples_, X.shape[0])
clf = IsolationForest(max_samples=0.4).fit(X)
assert_equal(clf.max_samples_, 0.4*X.shape[0])
def test_iforest_parallel_regression():
"""Check parallel regression."""
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
ensemble = IsolationForest(n_jobs=3,
random_state=0).fit(X_train)
ensemble.set_params(n_jobs=1)
y1 = ensemble.predict(X_test)
ensemble.set_params(n_jobs=2)
y2 = ensemble.predict(X_test)
assert_array_almost_equal(y1, y2)
ensemble = IsolationForest(n_jobs=1,
random_state=0).fit(X_train)
y3 = ensemble.predict(X_test)
assert_array_almost_equal(y1, y3)
def test_iforest_performance():
"""Test Isolation Forest performs well"""
# Generate train/test data
rng = check_random_state(2)
X = 0.3 * rng.randn(120, 2)
X_train = np.r_[X + 2, X - 2]
X_train = X[:100]
# Generate some abnormal novel observations
X_outliers = rng.uniform(low=-4, high=4, size=(20, 2))
X_test = np.r_[X[100:], X_outliers]
y_test = np.array([0] * 20 + [1] * 20)
# fit the model
clf = IsolationForest(max_samples=100, random_state=rng).fit(X_train)
# predict scores (the lower, the more normal)
y_pred = - clf.decision_function(X_test)
# check that there is at most 6 errors (false positive or false negative)
assert_greater(roc_auc_score(y_test, y_pred), 0.98)
def test_iforest_works():
# toy sample (the last two samples are outliers)
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1], [6, 3], [-4, 7]]
# Test LOF
clf = IsolationForest(random_state=rng, contamination=0.25)
clf.fit(X)
decision_func = - clf.decision_function(X)
pred = clf.predict(X)
# assert detect outliers:
assert_greater(np.min(decision_func[-2:]), np.max(decision_func[:-2]))
assert_array_equal(pred, 6 * [1] + 2 * [-1])
| bsd-3-clause |
jfinkels/networkx | examples/graph/atlas.py | 4 | 2761 | #!/usr/bin/env python
"""
Atlas of all graphs of 6 nodes or less.
"""
# Author: Aric Hagberg ([email protected])
# Copyright (C) 2004-2016 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
import networkx as nx
from networkx.generators.atlas import *
from networkx.algorithms.isomorphism.isomorph import graph_could_be_isomorphic as isomorphic
import random
def atlas6():
""" Return the atlas of all connected graphs of 6 nodes or less.
Attempt to check for isomorphisms and remove.
"""
Atlas = graph_atlas_g()[0:208] # 208
# remove isolated nodes, only connected graphs are left
U = nx.Graph() # graph for union of all graphs in atlas
for G in Atlas:
zerodegree = [n for n in G if G.degree(n)==0]
for n in zerodegree:
G.remove_node(n)
U = nx.disjoint_union(U, G)
# list of graphs of all connected components
C = nx.connected_component_subgraphs(U)
UU = nx.Graph()
# do quick isomorphic-like check, not a true isomorphism checker
nlist = [] # list of nonisomorphic graphs
for G in C:
# check against all nonisomorphic graphs so far
if not iso(G, nlist):
nlist.append(G)
UU = nx.disjoint_union(UU, G) # union the nonisomorphic graphs
return UU
def iso(G1, glist):
"""Quick and dirty nonisomorphism checker used to check isomorphisms."""
for G2 in glist:
if isomorphic(G1, G2):
return True
return False
if __name__ == '__main__':
G=atlas6()
print("graph has %d nodes with %d edges"\
%(nx.number_of_nodes(G), nx.number_of_edges(G)))
print(nx.number_connected_components(G), "connected components")
try:
import pygraphviz
from networkx.drawing.nx_agraph import graphviz_layout
except ImportError:
try:
import pydot
from networkx.drawing.nx_pydot import graphviz_layout
except ImportError:
raise ImportError("This example needs Graphviz and either "
"PyGraphviz or pydot")
import matplotlib.pyplot as plt
plt.figure(1, figsize=(8, 8))
# layout graphs with positions using graphviz neato
pos = graphviz_layout(G, prog="neato")
# color nodes the same in each connected subgraph
C = nx.connected_component_subgraphs(G)
for g in C:
c = [random.random()] * nx.number_of_nodes(g) # random color...
nx.draw(g,
pos,
node_size=40,
node_color=c,
vmin=0.0,
vmax=1.0,
with_labels=False
)
plt.savefig("atlas.png", dpi=75)
| bsd-3-clause |
siutanwong/scikit-learn | examples/manifold/plot_manifold_sphere.py | 258 | 5101 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=============================================
Manifold Learning methods on a severed sphere
=============================================
An application of the different :ref:`manifold` techniques
on a spherical data-set. Here one can see the use of
dimensionality reduction in order to gain some intuition
regarding the manifold learning methods. Regarding the dataset,
the poles are cut from the sphere, as well as a thin slice down its
side. This enables the manifold learning techniques to
'spread it open' whilst projecting it onto two dimensions.
For a similar example, where the methods are applied to the
S-curve dataset, see :ref:`example_manifold_plot_compare_methods.py`
Note that the purpose of the :ref:`MDS <multidimensional_scaling>` is
to find a low-dimensional representation of the data (here 2D) in
which the distances respect well the distances in the original
high-dimensional space, unlike other manifold-learning algorithms,
it does not seeks an isotropic representation of the data in
the low-dimensional space. Here the manifold problem matches fairly
that of representing a flat map of the Earth, as with
`map projection <http://en.wikipedia.org/wiki/Map_projection>`_
"""
# Author: Jaques Grobler <[email protected]>
# License: BSD 3 clause
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import NullFormatter
from sklearn import manifold
from sklearn.utils import check_random_state
# Next line to silence pyflakes.
Axes3D
# Variables for manifold learning.
n_neighbors = 10
n_samples = 1000
# Create our sphere.
random_state = check_random_state(0)
p = random_state.rand(n_samples) * (2 * np.pi - 0.55)
t = random_state.rand(n_samples) * np.pi
# Sever the poles from the sphere.
indices = ((t < (np.pi - (np.pi / 8))) & (t > ((np.pi / 8))))
colors = p[indices]
x, y, z = np.sin(t[indices]) * np.cos(p[indices]), \
np.sin(t[indices]) * np.sin(p[indices]), \
np.cos(t[indices])
# Plot our dataset.
fig = plt.figure(figsize=(15, 8))
plt.suptitle("Manifold Learning with %i points, %i neighbors"
% (1000, n_neighbors), fontsize=14)
ax = fig.add_subplot(251, projection='3d')
ax.scatter(x, y, z, c=p[indices], cmap=plt.cm.rainbow)
try:
# compatibility matplotlib < 1.0
ax.view_init(40, -10)
except:
pass
sphere_data = np.array([x, y, z]).T
# Perform Locally Linear Embedding Manifold learning
methods = ['standard', 'ltsa', 'hessian', 'modified']
labels = ['LLE', 'LTSA', 'Hessian LLE', 'Modified LLE']
for i, method in enumerate(methods):
t0 = time()
trans_data = manifold\
.LocallyLinearEmbedding(n_neighbors, 2,
method=method).fit_transform(sphere_data).T
t1 = time()
print("%s: %.2g sec" % (methods[i], t1 - t0))
ax = fig.add_subplot(252 + i)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("%s (%.2g sec)" % (labels[i], t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Perform Isomap Manifold learning.
t0 = time()
trans_data = manifold.Isomap(n_neighbors, n_components=2)\
.fit_transform(sphere_data).T
t1 = time()
print("%s: %.2g sec" % ('ISO', t1 - t0))
ax = fig.add_subplot(257)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("%s (%.2g sec)" % ('Isomap', t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Perform Multi-dimensional scaling.
t0 = time()
mds = manifold.MDS(2, max_iter=100, n_init=1)
trans_data = mds.fit_transform(sphere_data).T
t1 = time()
print("MDS: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(258)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("MDS (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Perform Spectral Embedding.
t0 = time()
se = manifold.SpectralEmbedding(n_components=2,
n_neighbors=n_neighbors)
trans_data = se.fit_transform(sphere_data).T
t1 = time()
print("Spectral Embedding: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(259)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("Spectral Embedding (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Perform t-distributed stochastic neighbor embedding.
t0 = time()
tsne = manifold.TSNE(n_components=2, init='pca', random_state=0)
trans_data = tsne.fit_transform(sphere_data).T
t1 = time()
print("t-SNE: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(250)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("t-SNE (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
plt.show()
| bsd-3-clause |
ddboline/pylearn2 | pylearn2/sandbox/cuda_convnet/specialized_bench.py | 44 | 3906 | __authors__ = "Ian Goodfellow"
__copyright__ = "Copyright 2010-2012, Universite de Montreal"
__credits__ = ["Ian Goodfellow"]
__license__ = "3-clause BSD"
__maintainer__ = "LISA Lab"
__email__ = "pylearn-dev@googlegroups"
from pylearn2.testing.skip import skip_if_no_gpu
skip_if_no_gpu()
import numpy as np
from theano.compat.six.moves import xrange
from theano import shared
from pylearn2.sandbox.cuda_convnet.filter_acts import FilterActs
from theano.tensor.nnet.conv import conv2d
from theano import function
import time
import matplotlib.pyplot as plt
def make_funcs(batch_size, rows, cols, channels, filter_rows,
num_filters):
rng = np.random.RandomState([2012,10,9])
filter_cols = filter_rows
base_image_value = rng.uniform(-1., 1., (channels, rows, cols,
batch_size)).astype('float32')
base_filters_value = rng.uniform(-1., 1., (channels, filter_rows,
filter_cols, num_filters)).astype('float32')
images = shared(base_image_value)
filters = shared(base_filters_value, name='filters')
# bench.py should always be run in gpu mode so we should not need a gpu_from_host here
layer_1_detector = FilterActs()(images, filters)
layer_1_pooled_fake = layer_1_detector[:,0:layer_1_detector.shape[0]:2,
0:layer_1_detector.shape[1]:2, :]
base_filters2_value = rng.uniform(-1., 1., (num_filters, filter_rows,
filter_cols, num_filters)).astype('float32')
filters2 = shared(base_filters_value, name='filters')
layer_2_detector = FilterActs()(images, filters2)
output = layer_2_detector
output_shared = shared( output.eval() )
cuda_convnet = function([], updates = { output_shared : output } )
cuda_convnet.name = 'cuda_convnet'
images_bc01 = base_image_value.transpose(3,0,1,2)
filters_bc01 = base_filters_value.transpose(3,0,1,2)
filters_bc01 = filters_bc01[:,:,::-1,::-1]
images_bc01 = shared(images_bc01)
filters_bc01 = shared(filters_bc01)
output_conv2d = conv2d(images_bc01, filters_bc01,
border_mode='valid')
output_conv2d_shared = shared(output_conv2d.eval())
baseline = function([], updates = { output_conv2d_shared : output_conv2d } )
baseline.name = 'baseline'
return cuda_convnet, baseline
def bench(f):
for i in xrange(3):
f()
trials = 10
t1 = time.time()
for i in xrange(trials):
f()
t2 = time.time()
return (t2-t1)/float(trials)
def get_speedup( *args, **kwargs):
cuda_convnet, baseline = make_funcs(*args, **kwargs)
return bench(baseline) / bench(cuda_convnet)
def get_time_per_10k_ex( *args, **kwargs):
cuda_convnet, baseline = make_funcs(*args, **kwargs)
batch_size = kwargs['batch_size']
return 10000 * bench(cuda_convnet) / float(batch_size)
def make_batch_size_plot(yfunc, yname, batch_sizes, rows, cols, channels, filter_rows, num_filters):
speedups = []
for batch_size in batch_sizes:
speedup = yfunc(batch_size = batch_size,
rows = rows,
cols = cols,
channels = channels,
filter_rows = filter_rows,
num_filters = num_filters)
speedups.append(speedup)
plt.plot(batch_sizes, speedups)
plt.title("cuda-convnet benchmark")
plt.xlabel("Batch size")
plt.ylabel(yname)
plt.show()
"""
make_batch_size_plot(get_speedup, "Speedup factor", batch_sizes = [1,2,5,25,32,50,63,64,65,96,100,127,128,129,159,160,161,191,192,193,200,255,256,257],
rows = 32,
cols = 32,
channels = 3,
filter_rows = 7,
num_filters = 64)
"""
make_batch_size_plot(get_time_per_10k_ex, "Time per 10k examples", batch_sizes = [1,2,5,25,32,50,63,64,65,96,100,127,128,129,159,160,161,191,192,193,200,255,256,257],
rows = 32,
cols = 32,
channels = 3,
filter_rows = 5,
num_filters = 64)
| bsd-3-clause |
Guneet-Dhillon/mxnet | example/rcnn/rcnn/core/tester.py | 25 | 10193 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import cPickle
import os
import time
import mxnet as mx
import numpy as np
from module import MutableModule
from rcnn.logger import logger
from rcnn.config import config
from rcnn.io import image
from rcnn.processing.bbox_transform import bbox_pred, clip_boxes
from rcnn.processing.nms import py_nms_wrapper, cpu_nms_wrapper, gpu_nms_wrapper
class Predictor(object):
def __init__(self, symbol, data_names, label_names,
context=mx.cpu(), max_data_shapes=None,
provide_data=None, provide_label=None,
arg_params=None, aux_params=None):
self._mod = MutableModule(symbol, data_names, label_names,
context=context, max_data_shapes=max_data_shapes)
self._mod.bind(provide_data, provide_label, for_training=False)
self._mod.init_params(arg_params=arg_params, aux_params=aux_params)
def predict(self, data_batch):
self._mod.forward(data_batch)
return dict(zip(self._mod.output_names, self._mod.get_outputs()))
def im_proposal(predictor, data_batch, data_names, scale):
data_dict = dict(zip(data_names, data_batch.data))
output = predictor.predict(data_batch)
# drop the batch index
boxes = output['rois_output'].asnumpy()[:, 1:]
scores = output['rois_score'].asnumpy()
# transform to original scale
boxes = boxes / scale
return scores, boxes, data_dict
def generate_proposals(predictor, test_data, imdb, vis=False, thresh=0.):
"""
Generate detections results using RPN.
:param predictor: Predictor
:param test_data: data iterator, must be non-shuffled
:param imdb: image database
:param vis: controls visualization
:param thresh: thresh for valid detections
:return: list of detected boxes
"""
assert vis or not test_data.shuffle
data_names = [k[0] for k in test_data.provide_data]
i = 0
t = time.time()
imdb_boxes = list()
original_boxes = list()
for im_info, data_batch in test_data:
t1 = time.time() - t
t = time.time()
scale = im_info[0, 2]
scores, boxes, data_dict = im_proposal(predictor, data_batch, data_names, scale)
t2 = time.time() - t
t = time.time()
# assemble proposals
dets = np.hstack((boxes, scores))
original_boxes.append(dets)
# filter proposals
keep = np.where(dets[:, 4:] > thresh)[0]
dets = dets[keep, :]
imdb_boxes.append(dets)
if vis:
vis_all_detection(data_dict['data'].asnumpy(), [dets], ['obj'], scale)
logger.info('generating %d/%d ' % (i + 1, imdb.num_images) +
'proposal %d ' % (dets.shape[0]) +
'data %.4fs net %.4fs' % (t1, t2))
i += 1
assert len(imdb_boxes) == imdb.num_images, 'calculations not complete'
# save results
rpn_folder = os.path.join(imdb.root_path, 'rpn_data')
if not os.path.exists(rpn_folder):
os.mkdir(rpn_folder)
rpn_file = os.path.join(rpn_folder, imdb.name + '_rpn.pkl')
with open(rpn_file, 'wb') as f:
cPickle.dump(imdb_boxes, f, cPickle.HIGHEST_PROTOCOL)
if thresh > 0:
full_rpn_file = os.path.join(rpn_folder, imdb.name + '_full_rpn.pkl')
with open(full_rpn_file, 'wb') as f:
cPickle.dump(original_boxes, f, cPickle.HIGHEST_PROTOCOL)
logger.info('wrote rpn proposals to %s' % rpn_file)
return imdb_boxes
def im_detect(predictor, data_batch, data_names, scale):
output = predictor.predict(data_batch)
data_dict = dict(zip(data_names, data_batch.data))
if config.TEST.HAS_RPN:
rois = output['rois_output'].asnumpy()[:, 1:]
else:
rois = data_dict['rois'].asnumpy().reshape((-1, 5))[:, 1:]
im_shape = data_dict['data'].shape
# save output
scores = output['cls_prob_reshape_output'].asnumpy()[0]
bbox_deltas = output['bbox_pred_reshape_output'].asnumpy()[0]
# post processing
pred_boxes = bbox_pred(rois, bbox_deltas)
pred_boxes = clip_boxes(pred_boxes, im_shape[-2:])
# we used scaled image & roi to train, so it is necessary to transform them back
pred_boxes = pred_boxes / scale
return scores, pred_boxes, data_dict
def pred_eval(predictor, test_data, imdb, vis=False, thresh=1e-3):
"""
wrapper for calculating offline validation for faster data analysis
in this example, all threshold are set by hand
:param predictor: Predictor
:param test_data: data iterator, must be non-shuffle
:param imdb: image database
:param vis: controls visualization
:param thresh: valid detection threshold
:return:
"""
assert vis or not test_data.shuffle
data_names = [k[0] for k in test_data.provide_data]
nms = py_nms_wrapper(config.TEST.NMS)
# limit detections to max_per_image over all classes
max_per_image = -1
num_images = imdb.num_images
# all detections are collected into:
# all_boxes[cls][image] = N x 5 array of detections in
# (x1, y1, x2, y2, score)
all_boxes = [[[] for _ in xrange(num_images)]
for _ in xrange(imdb.num_classes)]
i = 0
t = time.time()
for im_info, data_batch in test_data:
t1 = time.time() - t
t = time.time()
scale = im_info[0, 2]
scores, boxes, data_dict = im_detect(predictor, data_batch, data_names, scale)
t2 = time.time() - t
t = time.time()
for j in range(1, imdb.num_classes):
indexes = np.where(scores[:, j] > thresh)[0]
cls_scores = scores[indexes, j, np.newaxis]
cls_boxes = boxes[indexes, j * 4:(j + 1) * 4]
cls_dets = np.hstack((cls_boxes, cls_scores))
keep = nms(cls_dets)
all_boxes[j][i] = cls_dets[keep, :]
if max_per_image > 0:
image_scores = np.hstack([all_boxes[j][i][:, -1]
for j in range(1, imdb.num_classes)])
if len(image_scores) > max_per_image:
image_thresh = np.sort(image_scores)[-max_per_image]
for j in range(1, imdb.num_classes):
keep = np.where(all_boxes[j][i][:, -1] >= image_thresh)[0]
all_boxes[j][i] = all_boxes[j][i][keep, :]
if vis:
boxes_this_image = [[]] + [all_boxes[j][i] for j in range(1, imdb.num_classes)]
vis_all_detection(data_dict['data'].asnumpy(), boxes_this_image, imdb.classes, scale)
t3 = time.time() - t
t = time.time()
logger.info('testing %d/%d data %.4fs net %.4fs post %.4fs' % (i, imdb.num_images, t1, t2, t3))
i += 1
det_file = os.path.join(imdb.cache_path, imdb.name + '_detections.pkl')
with open(det_file, 'wb') as f:
cPickle.dump(all_boxes, f, protocol=cPickle.HIGHEST_PROTOCOL)
imdb.evaluate_detections(all_boxes)
def vis_all_detection(im_array, detections, class_names, scale):
"""
visualize all detections in one image
:param im_array: [b=1 c h w] in rgb
:param detections: [ numpy.ndarray([[x1 y1 x2 y2 score]]) for j in classes ]
:param class_names: list of names in imdb
:param scale: visualize the scaled image
:return:
"""
import matplotlib.pyplot as plt
import random
im = image.transform_inverse(im_array, config.PIXEL_MEANS)
plt.imshow(im)
for j, name in enumerate(class_names):
if name == '__background__':
continue
color = (random.random(), random.random(), random.random()) # generate a random color
dets = detections[j]
for det in dets:
bbox = det[:4] * scale
score = det[-1]
rect = plt.Rectangle((bbox[0], bbox[1]),
bbox[2] - bbox[0],
bbox[3] - bbox[1], fill=False,
edgecolor=color, linewidth=3.5)
plt.gca().add_patch(rect)
plt.gca().text(bbox[0], bbox[1] - 2,
'{:s} {:.3f}'.format(name, score),
bbox=dict(facecolor=color, alpha=0.5), fontsize=12, color='white')
plt.show()
def draw_all_detection(im_array, detections, class_names, scale):
"""
visualize all detections in one image
:param im_array: [b=1 c h w] in rgb
:param detections: [ numpy.ndarray([[x1 y1 x2 y2 score]]) for j in classes ]
:param class_names: list of names in imdb
:param scale: visualize the scaled image
:return:
"""
import cv2
import random
color_white = (255, 255, 255)
im = image.transform_inverse(im_array, config.PIXEL_MEANS)
# change to bgr
im = cv2.cvtColor(im, cv2.cv.CV_RGB2BGR)
for j, name in enumerate(class_names):
if name == '__background__':
continue
color = (random.randint(0, 256), random.randint(0, 256), random.randint(0, 256)) # generate a random color
dets = detections[j]
for det in dets:
bbox = det[:4] * scale
score = det[-1]
bbox = map(int, bbox)
cv2.rectangle(im, (bbox[0], bbox[1]), (bbox[2], bbox[3]), color=color, thickness=2)
cv2.putText(im, '%s %.3f' % (class_names[j], score), (bbox[0], bbox[1] + 10),
color=color_white, fontFace=cv2.FONT_HERSHEY_COMPLEX, fontScale=0.5)
return im
| apache-2.0 |
mehdidc/scikit-learn | sklearn/svm/classes.py | 3 | 36924 | import warnings
import numpy as np
from .base import _fit_liblinear, BaseSVC, BaseLibSVM
from ..base import BaseEstimator, RegressorMixin
from ..linear_model.base import LinearClassifierMixin, SparseCoefMixin, \
LinearModel
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import check_X_y
class LinearSVC(BaseEstimator, LinearClassifierMixin,
_LearntSelectorMixin, SparseCoefMixin):
"""Linear Support Vector Classification.
Similar to SVC with parameter kernel='linear', but implemented in terms of
liblinear rather than libsvm, so it has more flexibility in the choice of
penalties and loss functions and should scale better (to large numbers of
samples).
This class supports both dense and sparse input and the multiclass support
is handled according to a one-vs-the-rest scheme.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
loss : string, 'hinge' or 'squared_hinge' (default='squared_hinge')
Specifies the loss function. 'hinge' is the standard SVM loss
(used e.g. by the SVC class) while 'squared_hinge' is the
square of the hinge loss.
penalty : string, 'l1' or 'l2' (default='l2')
Specifies the norm used in the penalization. The 'l2'
penalty is the standard used in SVC. The 'l1' leads to `coef_`
vectors that are sparse.
dual : bool, (default=True)
Select the algorithm to either solve the dual or primal
optimization problem. Prefer dual=False when n_samples > n_features.
tol : float, optional (default=1e-4)
Tolerance for stopping criteria.
multi_class: string, 'ovr' or 'crammer_singer' (default='ovr')
Determines the multi-class strategy if `y` contains more than
two classes.
`ovr` trains n_classes one-vs-rest classifiers, while `crammer_singer`
optimizes a joint objective over all classes.
While `crammer_singer` is interesting from an theoretical perspective
as it is consistent it is seldom used in practice and rarely leads to
better accuracy and is more expensive to compute.
If `crammer_singer` is chosen, the options loss, penalty and dual will
be ignored.
fit_intercept : boolean, optional (default=True)
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
intercept_scaling : float, optional (default=1)
When self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased
class_weight : {dict, 'auto'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one. The 'auto' mode uses the values of y to
automatically adjust weights inversely proportional to
class frequencies.
verbose : int, (default=0)
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in liblinear that, if enabled, may not work
properly in a multithreaded context.
random_state : int seed, RandomState instance, or None (default=None)
The seed of the pseudo random number generator to use when
shuffling the data.
max_iter : int, (default=1000)
The maximum number of iterations to be run.
Attributes
----------
coef_ : array, shape = [n_features] if n_classes == 2 \
else [n_classes, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of linear kernel.
`coef_` is a readonly property derived from `raw_coef_` that \
follows the internal memory layout of liblinear.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
Notes
-----
The underlying C implementation uses a random number generator to
select features when fitting the model. It is thus not uncommon,
to have slightly different results for the same input data. If
that happens, try with a smaller tol parameter.
The underlying implementation (liblinear) uses a sparse internal
representation for the data that will incur a memory copy.
Predict output may not match that of standalone liblinear in certain
cases. See :ref:`differences from liblinear <liblinear_differences>`
in the narrative documentation.
**References:**
`LIBLINEAR: A Library for Large Linear Classification
<http://www.csie.ntu.edu.tw/~cjlin/liblinear/>`__
See also
--------
SVC
Implementation of Support Vector Machine classifier using libsvm:
the kernel can be non-linear but its SMO algorithm does not
scale to large number of samples as LinearSVC does.
Furthermore SVC multi-class mode is implemented using one
vs one scheme while LinearSVC uses one vs the rest. It is
possible to implement one vs the rest with SVC by using the
:class:`sklearn.multiclass.OneVsRestClassifier` wrapper.
Finally SVC can fit dense data without memory copy if the input
is C-contiguous. Sparse data will still incur memory copy though.
sklearn.linear_model.SGDClassifier
SGDClassifier can optimize the same cost function as LinearSVC
by adjusting the penalty and loss parameters. In addition it requires
less memory, allows incremental (online) learning, and implements
various loss functions and regularization regimes.
"""
def __init__(self, penalty='l2', loss='squared_hinge', dual=True, tol=1e-4,
C=1.0, multi_class='ovr', fit_intercept=True,
intercept_scaling=1, class_weight=None, verbose=0,
random_state=None, max_iter=1000):
self.dual = dual
self.tol = tol
self.C = C
self.multi_class = multi_class
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.class_weight = class_weight
self.verbose = verbose
self.random_state = random_state
self.max_iter = max_iter
self.penalty = penalty
self.loss = loss
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target vector relative to X
Returns
-------
self : object
Returns self.
"""
# FIXME Remove l1/l2 support in 1.0 -----------------------------------
loss_l = self.loss.lower()
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the loss='%s' will be removed in %s")
# FIXME change loss_l --> self.loss after 0.18
if loss_l in ('l1', 'l2'):
old_loss = self.loss
self.loss = {'l1': 'hinge', 'l2': 'squared_hinge'}.get(loss_l)
warnings.warn(msg % (old_loss, self.loss, old_loss, '1.0'),
DeprecationWarning)
# ---------------------------------------------------------------------
if self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
X, y = check_X_y(X, y, accept_sparse='csr',
dtype=np.float64, order="C")
self.classes_ = np.unique(y)
self.coef_, self.intercept_, self.n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
self.class_weight, self.penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state, self.multi_class,
self.loss
)
if self.multi_class == "crammer_singer" and len(self.classes_) == 2:
self.coef_ = (self.coef_[1] - self.coef_[0]).reshape(1, -1)
if self.fit_intercept:
intercept = self.intercept_[1] - self.intercept_[0]
self.intercept_ = np.array([intercept])
return self
class LinearSVR(LinearModel, RegressorMixin):
"""Linear Support Vector Regression.
Similar to SVR with parameter kernel='linear', but implemented in terms of
liblinear rather than libsvm, so it has more flexibility in the choice of
penalties and loss functions and should scale better (to large numbers of
samples).
This class supports both dense and sparse input.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term. The penalty is a squared
l2 penalty. The bigger this parameter, the less regularization is used.
loss : string, 'epsilon_insensitive' or 'squared_epsilon_insensitive'
(default='epsilon_insensitive')
Specifies the loss function. 'l1' is the epsilon-insensitive loss
(standard SVR) while 'l2' is the squared epsilon-insensitive loss.
epsilon : float, optional (default=0.1)
Epsilon parameter in the epsilon-insensitive loss function. Note
that the value of this parameter depends on the scale of the target
variable y. If unsure, set epsilon=0.
dual : bool, (default=True)
Select the algorithm to either solve the dual or primal
optimization problem. Prefer dual=False when n_samples > n_features.
tol : float, optional (default=1e-4)
Tolerance for stopping criteria.
fit_intercept : boolean, optional (default=True)
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
intercept_scaling : float, optional (default=1)
When self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
verbose : int, (default=0)
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in liblinear that, if enabled, may not work
properly in a multithreaded context.
random_state : int seed, RandomState instance, or None (default=None)
The seed of the pseudo random number generator to use when
shuffling the data.
max_iter : int, (default=1000)
The maximum number of iterations to be run.
Attributes
----------
coef_ : array, shape = [n_features] if n_classes == 2 \
else [n_classes, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of linear kernel.
`coef_` is a readonly property derived from `raw_coef_` that \
follows the internal memory layout of liblinear.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
See also
--------
LinearSVC
Implementation of Support Vector Machine classifier using the
same library as this class (liblinear).
SVR
Implementation of Support Vector Machine regression using libsvm:
the kernel can be non-linear but its SMO algorithm does not
scale to large number of samples as LinearSVC does.
sklearn.linear_model.SGDRegressor
SGDRegressor can optimize the same cost function as LinearSVR
by adjusting the penalty and loss parameters. In addition it requires
less memory, allows incremental (online) learning, and implements
various loss functions and regularization regimes.
"""
def __init__(self, epsilon=0.0, tol=1e-4, C=1.0,
loss='epsilon_insensitive', fit_intercept=True,
intercept_scaling=1., dual=True, verbose=0,
random_state=None, max_iter=1000):
self.tol = tol
self.C = C
self.epsilon = epsilon
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.verbose = verbose
self.random_state = random_state
self.max_iter = max_iter
self.dual = dual
self.loss = loss
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target vector relative to X
Returns
-------
self : object
Returns self.
"""
# FIXME Remove l1/l2 support in 1.0 -----------------------------------
loss_l = self.loss.lower()
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the loss='%s' will be removed in %s")
# FIXME change loss_l --> self.loss after 0.18
if loss_l in ('l1', 'l2'):
old_loss = self.loss
self.loss = {'l1': 'epsilon_insensitive',
'l2': 'squared_epsilon_insensitive'
}.get(loss_l)
warnings.warn(msg % (old_loss, self.loss, old_loss, '1.0'),
DeprecationWarning)
# ---------------------------------------------------------------------
if self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
X, y = check_X_y(X, y, accept_sparse='csr',
dtype=np.float64, order="C")
penalty = 'l2' # SVR only accepts l2 penalty
self.coef_, self.intercept_, self.n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
None, penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state, loss=self.loss,
epsilon=self.epsilon)
self.coef_ = self.coef_.ravel()
return self
class SVC(BaseSVC):
"""C-Support Vector Classification.
The implementation is based on libsvm. The fit time complexity
is more than quadratic with the number of samples which makes it hard
to scale to dataset with more than a couple of 10000 samples.
The multiclass support is handled according to a one-vs-one scheme.
For details on the precise mathematical formulation of the provided
kernel functions and how `gamma`, `coef0` and `degree` affect each
other, see the corresponding section in the narrative documentation:
:ref:`svm_kernels`.
.. The narrative documentation is available at http://scikit-learn.org/
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default=0.0)
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 0.0 then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
probability: boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling `fit`, and will slow down that method.
shrinking: boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
class_weight : {dict, 'auto'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one. The 'auto' mode uses the values of y to
automatically adjust weights inversely proportional to
class frequencies.
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [n_SV, n_features]
Support vectors.
n_support_ : array-like, dtype=int32, shape = [n_class]
Number of support vectors for each class.
dual_coef_ : array, shape = [n_class-1, n_SV]
Coefficients of the support vector in the decision function. \
For multiclass, coefficient for all 1-vs-1 classifiers. \
The layout of the coefficients in the multiclass case is somewhat \
non-trivial. See the section about multi-class classification in the \
SVM section of the User Guide for details.
coef_ : array, shape = [n_class-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of linear kernel.
`coef_` is a readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> from sklearn.svm import SVC
>>> clf = SVC()
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0, degree=3,
gamma=0.0, kernel='rbf', max_iter=-1, probability=False,
random_state=None, shrinking=True, tol=0.001, verbose=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
SVR
Support Vector Machine for Regression implemented using libsvm.
LinearSVC
Scalable Linear Support Vector Machine for classification
implemented using liblinear. Check the See also section of
LinearSVC for more comparison element.
"""
def __init__(self, C=1.0, kernel='rbf', degree=3, gamma=0.0,
coef0=0.0, shrinking=True, probability=False,
tol=1e-3, cache_size=200, class_weight=None,
verbose=False, max_iter=-1, random_state=None):
super(SVC, self).__init__(
'c_svc', kernel, degree, gamma, coef0, tol, C, 0., 0., shrinking,
probability, cache_size, class_weight, verbose, max_iter,
random_state)
class NuSVC(BaseSVC):
"""Nu-Support Vector Classification.
Similar to SVC but uses a parameter to control the number of support
vectors.
The implementation is based on libsvm.
Parameters
----------
nu : float, optional (default=0.5)
An upper bound on the fraction of training errors and a lower
bound of the fraction of support vectors. Should be in the
interval (0, 1].
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of kernel function
is significant only in poly, rbf, sigmoid.
gamma : float, optional (default=0.0)
Kernel coefficient for rbf and poly, if gamma is 0.0 then 1/n_features
will be taken.
coef0 : float, optional (default=0.0)
Independent term in kernel function. It is only significant
in poly/sigmoid.
probability: boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling `fit`, and will slow down that method.
shrinking: boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [n_SV, n_features]
Support vectors.
n_support_ : array-like, dtype=int32, shape = [n_class]
Number of support vector for each class.
dual_coef_ : array, shape = [n_class-1, n_SV]
Coefficients of the support vector in the decision function. \
For multiclass, coefficient for all 1-vs-1 classifiers. \
The layout of the coefficients in the multiclass case is somewhat \
non-trivial. See the section about multi-class classification in \
the SVM section of the User Guide for details.
coef_ : array, shape = [n_class-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> from sklearn.svm import NuSVC
>>> clf = NuSVC()
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
NuSVC(cache_size=200, coef0=0.0, degree=3, gamma=0.0, kernel='rbf',
max_iter=-1, nu=0.5, probability=False, random_state=None,
shrinking=True, tol=0.001, verbose=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
SVC
Support Vector Machine for classification using libsvm.
LinearSVC
Scalable linear Support Vector Machine for classification using
liblinear.
"""
def __init__(self, nu=0.5, kernel='rbf', degree=3, gamma=0.0,
coef0=0.0, shrinking=True, probability=False,
tol=1e-3, cache_size=200, verbose=False, max_iter=-1,
random_state=None):
super(NuSVC, self).__init__(
'nu_svc', kernel, degree, gamma, coef0, tol, 0., nu, 0., shrinking,
probability, cache_size, None, verbose, max_iter, random_state)
class SVR(BaseLibSVM, RegressorMixin):
"""Epsilon-Support Vector Regression.
The free parameters in the model are C and epsilon.
The implementation is based on libsvm.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
epsilon : float, optional (default=0.1)
Epsilon in the epsilon-SVR model. It specifies the epsilon-tube
within which no penalty is associated in the training loss function
with points predicted within a distance epsilon from the actual
value.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of kernel function
is significant only in poly, rbf, sigmoid.
gamma : float, optional (default=0.0)
Kernel coefficient for rbf and poly, if gamma is 0.0 then 1/n_features
will be taken.
coef0 : float, optional (default=0.0)
independent term in kernel function. It is only significant
in poly/sigmoid.
shrinking: boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [1, n_SV]
Coefficients of the support vector in the decision function.
coef_ : array, shape = [1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [1]
Constants in decision function.
Examples
--------
>>> from sklearn.svm import SVR
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = SVR(C=1.0, epsilon=0.2)
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
SVR(C=1.0, cache_size=200, coef0=0.0, degree=3, epsilon=0.2, gamma=0.0,
kernel='rbf', max_iter=-1, shrinking=True, tol=0.001, verbose=False)
See also
--------
NuSVR
Support Vector Machine for regression implemented using libsvm
using a parameter to control the number of support vectors.
LinearSVR
Scalable Linear Support Vector Machine for regression
implemented using liblinear.
"""
def __init__(self, kernel='rbf', degree=3, gamma=0.0, coef0=0.0, tol=1e-3,
C=1.0, epsilon=0.1, shrinking=True, cache_size=200,
verbose=False, max_iter=-1):
super(SVR, self).__init__(
'epsilon_svr', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=C, nu=0., epsilon=epsilon, verbose=verbose,
shrinking=shrinking, probability=False, cache_size=cache_size,
class_weight=None, max_iter=max_iter, random_state=None)
class NuSVR(BaseLibSVM, RegressorMixin):
"""Nu Support Vector Regression.
Similar to NuSVC, for regression, uses a parameter nu to control
the number of support vectors. However, unlike NuSVC, where nu
replaces C, here nu replaces the parameter epsilon of epsilon-SVR.
The implementation is based on libsvm.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
nu : float, optional
An upper bound on the fraction of training errors and a lower bound of
the fraction of support vectors. Should be in the interval (0, 1]. By
default 0.5 will be taken.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of kernel function
is significant only in poly, rbf, sigmoid.
gamma : float, optional (default=0.0)
Kernel coefficient for rbf and poly, if gamma is 0.0 then 1/n_features
will be taken.
coef0 : float, optional (default=0.0)
Independent term in kernel function. It is only significant
in poly/sigmoid.
shrinking: boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [1, n_SV]
Coefficients of the support vector in the decision function.
coef_ : array, shape = [1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [1]
Constants in decision function.
Examples
--------
>>> from sklearn.svm import NuSVR
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = NuSVR(C=1.0, nu=0.1)
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
NuSVR(C=1.0, cache_size=200, coef0=0.0, degree=3, gamma=0.0, kernel='rbf',
max_iter=-1, nu=0.1, shrinking=True, tol=0.001, verbose=False)
See also
--------
NuSVC
Support Vector Machine for classification implemented with libsvm
with a parameter to control the number of support vectors.
SVR
epsilon Support Vector Machine for regression implemented with libsvm.
"""
def __init__(self, nu=0.5, C=1.0, kernel='rbf', degree=3,
gamma=0.0, coef0=0.0, shrinking=True, tol=1e-3,
cache_size=200, verbose=False, max_iter=-1):
super(NuSVR, self).__init__(
'nu_svr', kernel=kernel, degree=degree, gamma=gamma, coef0=coef0,
tol=tol, C=C, nu=nu, epsilon=0., shrinking=shrinking,
probability=False, cache_size=cache_size, class_weight=None,
verbose=verbose, max_iter=max_iter, random_state=None)
class OneClassSVM(BaseLibSVM):
"""Unsupervised Outlier Detection.
Estimate the support of a high-dimensional distribution.
The implementation is based on libsvm.
Parameters
----------
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
nu : float, optional
An upper bound on the fraction of training
errors and a lower bound of the fraction of support
vectors. Should be in the interval (0, 1]. By default 0.5
will be taken.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default=0.0)
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 0.0 then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
tol : float, optional
Tolerance for stopping criterion.
shrinking: boolean, optional
Whether to use the shrinking heuristic.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [n_classes-1, n_SV]
Coefficients of the support vectors in the decision function.
coef_ : array, shape = [n_classes-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`
intercept_ : array, shape = [n_classes-1]
Constants in decision function.
"""
def __init__(self, kernel='rbf', degree=3, gamma=0.0, coef0=0.0, tol=1e-3,
nu=0.5, shrinking=True, cache_size=200, verbose=False,
max_iter=-1, random_state=None):
super(OneClassSVM, self).__init__(
'one_class', kernel, degree, gamma, coef0, tol, 0., nu, 0.,
shrinking, False, cache_size, None, verbose, max_iter,
random_state)
def fit(self, X, y=None, sample_weight=None, **params):
"""
Detects the soft boundary of the set of samples X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Set of samples, where n_samples is the number of samples and
n_features is the number of features.
sample_weight : array-like, shape (n_samples,)
Per-sample weights. Rescale C per sample. Higher weights
force the classifier to put more emphasis on these points.
Returns
-------
self : object
Returns self.
Notes
-----
If X is not a C-ordered contiguous array it is copied.
"""
super(OneClassSVM, self).fit(X, [], sample_weight=sample_weight,
**params)
return self
| bsd-3-clause |
jiangxb1987/spark | python/pyspark/sql/session.py | 3 | 37802 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import sys
import warnings
from functools import reduce
from threading import RLock
if sys.version >= '3':
basestring = unicode = str
xrange = range
else:
from itertools import izip as zip, imap as map
from pyspark import since
from pyspark.rdd import RDD, ignore_unicode_prefix
from pyspark.sql.conf import RuntimeConfig
from pyspark.sql.dataframe import DataFrame
from pyspark.sql.readwriter import DataFrameReader
from pyspark.sql.streaming import DataStreamReader
from pyspark.sql.types import Row, DataType, StringType, StructType, TimestampType, \
_make_type_verifier, _infer_schema, _has_nulltype, _merge_type, _create_converter, \
_parse_datatype_string
from pyspark.sql.utils import install_exception_handler
__all__ = ["SparkSession"]
def _monkey_patch_RDD(sparkSession):
def toDF(self, schema=None, sampleRatio=None):
"""
Converts current :class:`RDD` into a :class:`DataFrame`
This is a shorthand for ``spark.createDataFrame(rdd, schema, sampleRatio)``
:param schema: a :class:`pyspark.sql.types.StructType` or list of names of columns
:param samplingRatio: the sample ratio of rows used for inferring
:return: a DataFrame
>>> rdd.toDF().collect()
[Row(name=u'Alice', age=1)]
"""
return sparkSession.createDataFrame(self, schema, sampleRatio)
RDD.toDF = toDF
class SparkSession(object):
"""The entry point to programming Spark with the Dataset and DataFrame API.
A SparkSession can be used create :class:`DataFrame`, register :class:`DataFrame` as
tables, execute SQL over tables, cache tables, and read parquet files.
To create a SparkSession, use the following builder pattern:
>>> spark = SparkSession.builder \\
... .master("local") \\
... .appName("Word Count") \\
... .config("spark.some.config.option", "some-value") \\
... .getOrCreate()
.. autoattribute:: builder
:annotation:
"""
class Builder(object):
"""Builder for :class:`SparkSession`.
"""
_lock = RLock()
_options = {}
_sc = None
@since(2.0)
def config(self, key=None, value=None, conf=None):
"""Sets a config option. Options set using this method are automatically propagated to
both :class:`SparkConf` and :class:`SparkSession`'s own configuration.
For an existing SparkConf, use `conf` parameter.
>>> from pyspark.conf import SparkConf
>>> SparkSession.builder.config(conf=SparkConf())
<pyspark.sql.session...
For a (key, value) pair, you can omit parameter names.
>>> SparkSession.builder.config("spark.some.config.option", "some-value")
<pyspark.sql.session...
:param key: a key name string for configuration property
:param value: a value for configuration property
:param conf: an instance of :class:`SparkConf`
"""
with self._lock:
if conf is None:
self._options[key] = str(value)
else:
for (k, v) in conf.getAll():
self._options[k] = v
return self
@since(2.0)
def master(self, master):
"""Sets the Spark master URL to connect to, such as "local" to run locally, "local[4]"
to run locally with 4 cores, or "spark://master:7077" to run on a Spark standalone
cluster.
:param master: a url for spark master
"""
return self.config("spark.master", master)
@since(2.0)
def appName(self, name):
"""Sets a name for the application, which will be shown in the Spark web UI.
If no application name is set, a randomly generated name will be used.
:param name: an application name
"""
return self.config("spark.app.name", name)
@since(2.0)
def enableHiveSupport(self):
"""Enables Hive support, including connectivity to a persistent Hive metastore, support
for Hive SerDes, and Hive user-defined functions.
"""
return self.config("spark.sql.catalogImplementation", "hive")
def _sparkContext(self, sc):
with self._lock:
self._sc = sc
return self
@since(2.0)
def getOrCreate(self):
"""Gets an existing :class:`SparkSession` or, if there is no existing one, creates a
new one based on the options set in this builder.
This method first checks whether there is a valid global default SparkSession, and if
yes, return that one. If no valid global default SparkSession exists, the method
creates a new SparkSession and assigns the newly created SparkSession as the global
default.
>>> s1 = SparkSession.builder.config("k1", "v1").getOrCreate()
>>> s1.conf.get("k1") == "v1"
True
In case an existing SparkSession is returned, the config options specified
in this builder will be applied to the existing SparkSession.
>>> s2 = SparkSession.builder.config("k2", "v2").getOrCreate()
>>> s1.conf.get("k1") == s2.conf.get("k1")
True
>>> s1.conf.get("k2") == s2.conf.get("k2")
True
"""
with self._lock:
from pyspark.context import SparkContext
from pyspark.conf import SparkConf
session = SparkSession._instantiatedSession
if session is None or session._sc._jsc is None:
if self._sc is not None:
sc = self._sc
else:
sparkConf = SparkConf()
for key, value in self._options.items():
sparkConf.set(key, value)
# This SparkContext may be an existing one.
sc = SparkContext.getOrCreate(sparkConf)
# Do not update `SparkConf` for existing `SparkContext`, as it's shared
# by all sessions.
session = SparkSession(sc)
for key, value in self._options.items():
session._jsparkSession.sessionState().conf().setConfString(key, value)
return session
builder = Builder()
"""A class attribute having a :class:`Builder` to construct :class:`SparkSession` instances."""
_instantiatedSession = None
_activeSession = None
@ignore_unicode_prefix
def __init__(self, sparkContext, jsparkSession=None):
"""Creates a new SparkSession.
>>> from datetime import datetime
>>> spark = SparkSession(sc)
>>> allTypes = sc.parallelize([Row(i=1, s="string", d=1.0, l=1,
... b=True, list=[1, 2, 3], dict={"s": 0}, row=Row(a=1),
... time=datetime(2014, 8, 1, 14, 1, 5))])
>>> df = allTypes.toDF()
>>> df.createOrReplaceTempView("allTypes")
>>> spark.sql('select i+1, d+1, not b, list[1], dict["s"], time, row.a '
... 'from allTypes where b and i > 0').collect()
[Row((i + CAST(1 AS BIGINT))=2, (d + CAST(1 AS DOUBLE))=2.0, (NOT b)=False, list[1]=2, \
dict[s]=0, time=datetime.datetime(2014, 8, 1, 14, 1, 5), a=1)]
>>> df.rdd.map(lambda x: (x.i, x.s, x.d, x.l, x.b, x.time, x.row.a, x.list)).collect()
[(1, u'string', 1.0, 1, True, datetime.datetime(2014, 8, 1, 14, 1, 5), 1, [1, 2, 3])]
"""
from pyspark.sql.context import SQLContext
self._sc = sparkContext
self._jsc = self._sc._jsc
self._jvm = self._sc._jvm
if jsparkSession is None:
if self._jvm.SparkSession.getDefaultSession().isDefined() \
and not self._jvm.SparkSession.getDefaultSession().get() \
.sparkContext().isStopped():
jsparkSession = self._jvm.SparkSession.getDefaultSession().get()
else:
jsparkSession = self._jvm.SparkSession(self._jsc.sc())
self._jsparkSession = jsparkSession
self._jwrapped = self._jsparkSession.sqlContext()
self._wrapped = SQLContext(self._sc, self, self._jwrapped)
_monkey_patch_RDD(self)
install_exception_handler()
# If we had an instantiated SparkSession attached with a SparkContext
# which is stopped now, we need to renew the instantiated SparkSession.
# Otherwise, we will use invalid SparkSession when we call Builder.getOrCreate.
if SparkSession._instantiatedSession is None \
or SparkSession._instantiatedSession._sc._jsc is None:
SparkSession._instantiatedSession = self
SparkSession._activeSession = self
self._jvm.SparkSession.setDefaultSession(self._jsparkSession)
self._jvm.SparkSession.setActiveSession(self._jsparkSession)
def _repr_html_(self):
return """
<div>
<p><b>SparkSession - {catalogImplementation}</b></p>
{sc_HTML}
</div>
""".format(
catalogImplementation=self.conf.get("spark.sql.catalogImplementation"),
sc_HTML=self.sparkContext._repr_html_()
)
@since(2.0)
def newSession(self):
"""
Returns a new SparkSession as new session, that has separate SQLConf,
registered temporary views and UDFs, but shared SparkContext and
table cache.
"""
return self.__class__(self._sc, self._jsparkSession.newSession())
@classmethod
@since(3.0)
def getActiveSession(cls):
"""
Returns the active SparkSession for the current thread, returned by the builder.
>>> s = SparkSession.getActiveSession()
>>> l = [('Alice', 1)]
>>> rdd = s.sparkContext.parallelize(l)
>>> df = s.createDataFrame(rdd, ['name', 'age'])
>>> df.select("age").collect()
[Row(age=1)]
"""
from pyspark import SparkContext
sc = SparkContext._active_spark_context
if sc is None:
return None
else:
if sc._jvm.SparkSession.getActiveSession().isDefined():
SparkSession(sc, sc._jvm.SparkSession.getActiveSession().get())
return SparkSession._activeSession
else:
return None
@property
@since(2.0)
def sparkContext(self):
"""Returns the underlying :class:`SparkContext`."""
return self._sc
@property
@since(2.0)
def version(self):
"""The version of Spark on which this application is running."""
return self._jsparkSession.version()
@property
@since(2.0)
def conf(self):
"""Runtime configuration interface for Spark.
This is the interface through which the user can get and set all Spark and Hadoop
configurations that are relevant to Spark SQL. When getting the value of a config,
this defaults to the value set in the underlying :class:`SparkContext`, if any.
"""
if not hasattr(self, "_conf"):
self._conf = RuntimeConfig(self._jsparkSession.conf())
return self._conf
@property
@since(2.0)
def catalog(self):
"""Interface through which the user may create, drop, alter or query underlying
databases, tables, functions, etc.
:return: :class:`Catalog`
"""
from pyspark.sql.catalog import Catalog
if not hasattr(self, "_catalog"):
self._catalog = Catalog(self)
return self._catalog
@property
@since(2.0)
def udf(self):
"""Returns a :class:`UDFRegistration` for UDF registration.
:return: :class:`UDFRegistration`
"""
from pyspark.sql.udf import UDFRegistration
return UDFRegistration(self)
@since(2.0)
def range(self, start, end=None, step=1, numPartitions=None):
"""
Create a :class:`DataFrame` with single :class:`pyspark.sql.types.LongType` column named
``id``, containing elements in a range from ``start`` to ``end`` (exclusive) with
step value ``step``.
:param start: the start value
:param end: the end value (exclusive)
:param step: the incremental step (default: 1)
:param numPartitions: the number of partitions of the DataFrame
:return: :class:`DataFrame`
>>> spark.range(1, 7, 2).collect()
[Row(id=1), Row(id=3), Row(id=5)]
If only one argument is specified, it will be used as the end value.
>>> spark.range(3).collect()
[Row(id=0), Row(id=1), Row(id=2)]
"""
if numPartitions is None:
numPartitions = self._sc.defaultParallelism
if end is None:
jdf = self._jsparkSession.range(0, int(start), int(step), int(numPartitions))
else:
jdf = self._jsparkSession.range(int(start), int(end), int(step), int(numPartitions))
return DataFrame(jdf, self._wrapped)
def _inferSchemaFromList(self, data, names=None):
"""
Infer schema from list of Row or tuple.
:param data: list of Row or tuple
:param names: list of column names
:return: :class:`pyspark.sql.types.StructType`
"""
if not data:
raise ValueError("can not infer schema from empty dataset")
first = data[0]
if type(first) is dict:
warnings.warn("inferring schema from dict is deprecated,"
"please use pyspark.sql.Row instead")
schema = reduce(_merge_type, (_infer_schema(row, names) for row in data))
if _has_nulltype(schema):
raise ValueError("Some of types cannot be determined after inferring")
return schema
def _inferSchema(self, rdd, samplingRatio=None, names=None):
"""
Infer schema from an RDD of Row or tuple.
:param rdd: an RDD of Row or tuple
:param samplingRatio: sampling ratio, or no sampling (default)
:return: :class:`pyspark.sql.types.StructType`
"""
first = rdd.first()
if not first:
raise ValueError("The first row in RDD is empty, "
"can not infer schema")
if type(first) is dict:
warnings.warn("Using RDD of dict to inferSchema is deprecated. "
"Use pyspark.sql.Row instead")
if samplingRatio is None:
schema = _infer_schema(first, names=names)
if _has_nulltype(schema):
for row in rdd.take(100)[1:]:
schema = _merge_type(schema, _infer_schema(row, names=names))
if not _has_nulltype(schema):
break
else:
raise ValueError("Some of types cannot be determined by the "
"first 100 rows, please try again with sampling")
else:
if samplingRatio < 0.99:
rdd = rdd.sample(False, float(samplingRatio))
schema = rdd.map(lambda row: _infer_schema(row, names)).reduce(_merge_type)
return schema
def _createFromRDD(self, rdd, schema, samplingRatio):
"""
Create an RDD for DataFrame from an existing RDD, returns the RDD and schema.
"""
if schema is None or isinstance(schema, (list, tuple)):
struct = self._inferSchema(rdd, samplingRatio, names=schema)
converter = _create_converter(struct)
rdd = rdd.map(converter)
if isinstance(schema, (list, tuple)):
for i, name in enumerate(schema):
struct.fields[i].name = name
struct.names[i] = name
schema = struct
elif not isinstance(schema, StructType):
raise TypeError("schema should be StructType or list or None, but got: %s" % schema)
# convert python objects to sql data
rdd = rdd.map(schema.toInternal)
return rdd, schema
def _createFromLocal(self, data, schema):
"""
Create an RDD for DataFrame from a list or pandas.DataFrame, returns
the RDD and schema.
"""
# make sure data could consumed multiple times
if not isinstance(data, list):
data = list(data)
if schema is None or isinstance(schema, (list, tuple)):
struct = self._inferSchemaFromList(data, names=schema)
converter = _create_converter(struct)
data = map(converter, data)
if isinstance(schema, (list, tuple)):
for i, name in enumerate(schema):
struct.fields[i].name = name
struct.names[i] = name
schema = struct
elif not isinstance(schema, StructType):
raise TypeError("schema should be StructType or list or None, but got: %s" % schema)
# convert python objects to sql data
data = [schema.toInternal(row) for row in data]
return self._sc.parallelize(data), schema
def _get_numpy_record_dtype(self, rec):
"""
Used when converting a pandas.DataFrame to Spark using to_records(), this will correct
the dtypes of fields in a record so they can be properly loaded into Spark.
:param rec: a numpy record to check field dtypes
:return corrected dtype for a numpy.record or None if no correction needed
"""
import numpy as np
cur_dtypes = rec.dtype
col_names = cur_dtypes.names
record_type_list = []
has_rec_fix = False
for i in xrange(len(cur_dtypes)):
curr_type = cur_dtypes[i]
# If type is a datetime64 timestamp, convert to microseconds
# NOTE: if dtype is datetime[ns] then np.record.tolist() will output values as longs,
# conversion from [us] or lower will lead to py datetime objects, see SPARK-22417
if curr_type == np.dtype('datetime64[ns]'):
curr_type = 'datetime64[us]'
has_rec_fix = True
record_type_list.append((str(col_names[i]), curr_type))
return np.dtype(record_type_list) if has_rec_fix else None
def _convert_from_pandas(self, pdf, schema, timezone):
"""
Convert a pandas.DataFrame to list of records that can be used to make a DataFrame
:return list of records
"""
if timezone is not None:
from pyspark.sql.types import _check_series_convert_timestamps_tz_local
copied = False
if isinstance(schema, StructType):
for field in schema:
# TODO: handle nested timestamps, such as ArrayType(TimestampType())?
if isinstance(field.dataType, TimestampType):
s = _check_series_convert_timestamps_tz_local(pdf[field.name], timezone)
if s is not pdf[field.name]:
if not copied:
# Copy once if the series is modified to prevent the original
# Pandas DataFrame from being updated
pdf = pdf.copy()
copied = True
pdf[field.name] = s
else:
for column, series in pdf.iteritems():
s = _check_series_convert_timestamps_tz_local(series, timezone)
if s is not series:
if not copied:
# Copy once if the series is modified to prevent the original
# Pandas DataFrame from being updated
pdf = pdf.copy()
copied = True
pdf[column] = s
# Convert pandas.DataFrame to list of numpy records
np_records = pdf.to_records(index=False)
# Check if any columns need to be fixed for Spark to infer properly
if len(np_records) > 0:
record_dtype = self._get_numpy_record_dtype(np_records[0])
if record_dtype is not None:
return [r.astype(record_dtype).tolist() for r in np_records]
# Convert list of numpy records to python lists
return [r.tolist() for r in np_records]
def _create_from_pandas_with_arrow(self, pdf, schema, timezone):
"""
Create a DataFrame from a given pandas.DataFrame by slicing it into partitions, converting
to Arrow data, then sending to the JVM to parallelize. If a schema is passed in, the
data types will be used to coerce the data in Pandas to Arrow conversion.
"""
from pyspark.serializers import ArrowStreamPandasSerializer
from pyspark.sql.types import from_arrow_type, to_arrow_type, TimestampType
from pyspark.sql.utils import require_minimum_pandas_version, \
require_minimum_pyarrow_version
require_minimum_pandas_version()
require_minimum_pyarrow_version()
from pandas.api.types import is_datetime64_dtype, is_datetime64tz_dtype
import pyarrow as pa
# Create the Spark schema from list of names passed in with Arrow types
if isinstance(schema, (list, tuple)):
arrow_schema = pa.Schema.from_pandas(pdf, preserve_index=False)
struct = StructType()
for name, field in zip(schema, arrow_schema):
struct.add(name, from_arrow_type(field.type), nullable=field.nullable)
schema = struct
# Determine arrow types to coerce data when creating batches
if isinstance(schema, StructType):
arrow_types = [to_arrow_type(f.dataType) for f in schema.fields]
elif isinstance(schema, DataType):
raise ValueError("Single data type %s is not supported with Arrow" % str(schema))
else:
# Any timestamps must be coerced to be compatible with Spark
arrow_types = [to_arrow_type(TimestampType())
if is_datetime64_dtype(t) or is_datetime64tz_dtype(t) else None
for t in pdf.dtypes]
# Slice the DataFrame to be batched
step = -(-len(pdf) // self.sparkContext.defaultParallelism) # round int up
pdf_slices = (pdf[start:start + step] for start in xrange(0, len(pdf), step))
# Create list of Arrow (columns, type) for serializer dump_stream
arrow_data = [[(c, t) for (_, c), t in zip(pdf_slice.iteritems(), arrow_types)]
for pdf_slice in pdf_slices]
jsqlContext = self._wrapped._jsqlContext
safecheck = self._wrapped._conf.arrowSafeTypeConversion()
col_by_name = True # col by name only applies to StructType columns, can't happen here
ser = ArrowStreamPandasSerializer(timezone, safecheck, col_by_name)
def reader_func(temp_filename):
return self._jvm.PythonSQLUtils.readArrowStreamFromFile(jsqlContext, temp_filename)
def create_RDD_server():
return self._jvm.ArrowRDDServer(jsqlContext)
# Create Spark DataFrame from Arrow stream file, using one batch per partition
jrdd = self._sc._serialize_to_jvm(arrow_data, ser, reader_func, create_RDD_server)
jdf = self._jvm.PythonSQLUtils.toDataFrame(jrdd, schema.json(), jsqlContext)
df = DataFrame(jdf, self._wrapped)
df._schema = schema
return df
@staticmethod
def _create_shell_session():
"""
Initialize a SparkSession for a pyspark shell session. This is called from shell.py
to make error handling simpler without needing to declare local variables in that
script, which would expose those to users.
"""
import py4j
from pyspark.conf import SparkConf
from pyspark.context import SparkContext
try:
# Try to access HiveConf, it will raise exception if Hive is not added
conf = SparkConf()
if conf.get('spark.sql.catalogImplementation', 'hive').lower() == 'hive':
SparkContext._jvm.org.apache.hadoop.hive.conf.HiveConf()
return SparkSession.builder\
.enableHiveSupport()\
.getOrCreate()
else:
return SparkSession.builder.getOrCreate()
except (py4j.protocol.Py4JError, TypeError):
if conf.get('spark.sql.catalogImplementation', '').lower() == 'hive':
warnings.warn("Fall back to non-hive support because failing to access HiveConf, "
"please make sure you build spark with hive")
return SparkSession.builder.getOrCreate()
@since(2.0)
@ignore_unicode_prefix
def createDataFrame(self, data, schema=None, samplingRatio=None, verifySchema=True):
"""
Creates a :class:`DataFrame` from an :class:`RDD`, a list or a :class:`pandas.DataFrame`.
When ``schema`` is a list of column names, the type of each column
will be inferred from ``data``.
When ``schema`` is ``None``, it will try to infer the schema (column names and types)
from ``data``, which should be an RDD of either :class:`Row`,
:class:`namedtuple`, or :class:`dict`.
When ``schema`` is :class:`pyspark.sql.types.DataType` or a datatype string, it must match
the real data, or an exception will be thrown at runtime. If the given schema is not
:class:`pyspark.sql.types.StructType`, it will be wrapped into a
:class:`pyspark.sql.types.StructType` as its only field, and the field name will be "value".
Each record will also be wrapped into a tuple, which can be converted to row later.
If schema inference is needed, ``samplingRatio`` is used to determined the ratio of
rows used for schema inference. The first row will be used if ``samplingRatio`` is ``None``.
:param data: an RDD of any kind of SQL data representation (e.g. row, tuple, int, boolean,
etc.), :class:`list`, or :class:`pandas.DataFrame`.
:param schema: a :class:`pyspark.sql.types.DataType` or a datatype string or a list of
column names, default is ``None``. The data type string format equals to
:class:`pyspark.sql.types.DataType.simpleString`, except that top level struct type can
omit the ``struct<>`` and atomic types use ``typeName()`` as their format, e.g. use
``byte`` instead of ``tinyint`` for :class:`pyspark.sql.types.ByteType`. We can also use
``int`` as a short name for ``IntegerType``.
:param samplingRatio: the sample ratio of rows used for inferring
:param verifySchema: verify data types of every row against schema.
:return: :class:`DataFrame`
.. versionchanged:: 2.1
Added verifySchema.
.. note:: Usage with spark.sql.execution.arrow.pyspark.enabled=True is experimental.
.. note:: When Arrow optimization is enabled, strings inside Pandas DataFrame in Python
2 are converted into bytes as they are bytes in Python 2 whereas regular strings are
left as strings. When using strings in Python 2, use unicode `u""` as Python standard
practice.
>>> l = [('Alice', 1)]
>>> spark.createDataFrame(l).collect()
[Row(_1=u'Alice', _2=1)]
>>> spark.createDataFrame(l, ['name', 'age']).collect()
[Row(name=u'Alice', age=1)]
>>> d = [{'name': 'Alice', 'age': 1}]
>>> spark.createDataFrame(d).collect()
[Row(age=1, name=u'Alice')]
>>> rdd = sc.parallelize(l)
>>> spark.createDataFrame(rdd).collect()
[Row(_1=u'Alice', _2=1)]
>>> df = spark.createDataFrame(rdd, ['name', 'age'])
>>> df.collect()
[Row(name=u'Alice', age=1)]
>>> from pyspark.sql import Row
>>> Person = Row('name', 'age')
>>> person = rdd.map(lambda r: Person(*r))
>>> df2 = spark.createDataFrame(person)
>>> df2.collect()
[Row(name=u'Alice', age=1)]
>>> from pyspark.sql.types import *
>>> schema = StructType([
... StructField("name", StringType(), True),
... StructField("age", IntegerType(), True)])
>>> df3 = spark.createDataFrame(rdd, schema)
>>> df3.collect()
[Row(name=u'Alice', age=1)]
>>> spark.createDataFrame(df.toPandas()).collect() # doctest: +SKIP
[Row(name=u'Alice', age=1)]
>>> spark.createDataFrame(pandas.DataFrame([[1, 2]])).collect() # doctest: +SKIP
[Row(0=1, 1=2)]
>>> spark.createDataFrame(rdd, "a: string, b: int").collect()
[Row(a=u'Alice', b=1)]
>>> rdd = rdd.map(lambda row: row[1])
>>> spark.createDataFrame(rdd, "int").collect()
[Row(value=1)]
>>> spark.createDataFrame(rdd, "boolean").collect() # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
Py4JJavaError: ...
"""
SparkSession._activeSession = self
self._jvm.SparkSession.setActiveSession(self._jsparkSession)
if isinstance(data, DataFrame):
raise TypeError("data is already a DataFrame")
if isinstance(schema, basestring):
schema = _parse_datatype_string(schema)
elif isinstance(schema, (list, tuple)):
# Must re-encode any unicode strings to be consistent with StructField names
schema = [x.encode('utf-8') if not isinstance(x, str) else x for x in schema]
try:
import pandas
has_pandas = True
except Exception:
has_pandas = False
if has_pandas and isinstance(data, pandas.DataFrame):
from pyspark.sql.utils import require_minimum_pandas_version
require_minimum_pandas_version()
if self._wrapped._conf.pandasRespectSessionTimeZone():
timezone = self._wrapped._conf.sessionLocalTimeZone()
else:
timezone = None
# If no schema supplied by user then get the names of columns only
if schema is None:
schema = [str(x) if not isinstance(x, basestring) else
(x.encode('utf-8') if not isinstance(x, str) else x)
for x in data.columns]
if self._wrapped._conf.arrowPySparkEnabled() and len(data) > 0:
try:
return self._create_from_pandas_with_arrow(data, schema, timezone)
except Exception as e:
from pyspark.util import _exception_message
if self._wrapped._conf.arrowPySparkFallbackEnabled():
msg = (
"createDataFrame attempted Arrow optimization because "
"'spark.sql.execution.arrow.pyspark.enabled' is set to true; however, "
"failed by the reason below:\n %s\n"
"Attempting non-optimization as "
"'spark.sql.execution.arrow.pyspark.fallback.enabled' is set to "
"true." % _exception_message(e))
warnings.warn(msg)
else:
msg = (
"createDataFrame attempted Arrow optimization because "
"'spark.sql.execution.arrow.pyspark.enabled' is set to true, but has "
"reached the error below and will not continue because automatic "
"fallback with 'spark.sql.execution.arrow.pyspark.fallback.enabled' "
"has been set to false.\n %s" % _exception_message(e))
warnings.warn(msg)
raise
data = self._convert_from_pandas(data, schema, timezone)
if isinstance(schema, StructType):
verify_func = _make_type_verifier(schema) if verifySchema else lambda _: True
def prepare(obj):
verify_func(obj)
return obj
elif isinstance(schema, DataType):
dataType = schema
schema = StructType().add("value", schema)
verify_func = _make_type_verifier(
dataType, name="field value") if verifySchema else lambda _: True
def prepare(obj):
verify_func(obj)
return obj,
else:
prepare = lambda obj: obj
if isinstance(data, RDD):
rdd, schema = self._createFromRDD(data.map(prepare), schema, samplingRatio)
else:
rdd, schema = self._createFromLocal(map(prepare, data), schema)
jrdd = self._jvm.SerDeUtil.toJavaArray(rdd._to_java_object_rdd())
jdf = self._jsparkSession.applySchemaToPythonRDD(jrdd.rdd(), schema.json())
df = DataFrame(jdf, self._wrapped)
df._schema = schema
return df
@ignore_unicode_prefix
@since(2.0)
def sql(self, sqlQuery):
"""Returns a :class:`DataFrame` representing the result of the given query.
:return: :class:`DataFrame`
>>> df.createOrReplaceTempView("table1")
>>> df2 = spark.sql("SELECT field1 AS f1, field2 as f2 from table1")
>>> df2.collect()
[Row(f1=1, f2=u'row1'), Row(f1=2, f2=u'row2'), Row(f1=3, f2=u'row3')]
"""
return DataFrame(self._jsparkSession.sql(sqlQuery), self._wrapped)
@since(2.0)
def table(self, tableName):
"""Returns the specified table as a :class:`DataFrame`.
:return: :class:`DataFrame`
>>> df.createOrReplaceTempView("table1")
>>> df2 = spark.table("table1")
>>> sorted(df.collect()) == sorted(df2.collect())
True
"""
return DataFrame(self._jsparkSession.table(tableName), self._wrapped)
@property
@since(2.0)
def read(self):
"""
Returns a :class:`DataFrameReader` that can be used to read data
in as a :class:`DataFrame`.
:return: :class:`DataFrameReader`
"""
return DataFrameReader(self._wrapped)
@property
@since(2.0)
def readStream(self):
"""
Returns a :class:`DataStreamReader` that can be used to read data streams
as a streaming :class:`DataFrame`.
.. note:: Evolving.
:return: :class:`DataStreamReader`
"""
return DataStreamReader(self._wrapped)
@property
@since(2.0)
def streams(self):
"""Returns a :class:`StreamingQueryManager` that allows managing all the
:class:`StreamingQuery` instances active on `this` context.
.. note:: Evolving.
:return: :class:`StreamingQueryManager`
"""
from pyspark.sql.streaming import StreamingQueryManager
return StreamingQueryManager(self._jsparkSession.streams())
@since(2.0)
def stop(self):
"""Stop the underlying :class:`SparkContext`.
"""
self._sc.stop()
# We should clean the default session up. See SPARK-23228.
self._jvm.SparkSession.clearDefaultSession()
self._jvm.SparkSession.clearActiveSession()
SparkSession._instantiatedSession = None
SparkSession._activeSession = None
@since(2.0)
def __enter__(self):
"""
Enable 'with SparkSession.builder.(...).getOrCreate() as session: app' syntax.
"""
return self
@since(2.0)
def __exit__(self, exc_type, exc_val, exc_tb):
"""
Enable 'with SparkSession.builder.(...).getOrCreate() as session: app' syntax.
Specifically stop the SparkSession on exit of the with block.
"""
self.stop()
def _test():
import os
import doctest
from pyspark.context import SparkContext
from pyspark.sql import Row
import pyspark.sql.session
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.sql.session.__dict__.copy()
sc = SparkContext('local[4]', 'PythonTest')
globs['sc'] = sc
globs['spark'] = SparkSession(sc)
globs['rdd'] = rdd = sc.parallelize(
[Row(field1=1, field2="row1"),
Row(field1=2, field2="row2"),
Row(field1=3, field2="row3")])
globs['df'] = rdd.toDF()
(failure_count, test_count) = doctest.testmod(
pyspark.sql.session, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
globs['sc'].stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
berkeley-stat159/project-epsilon | code/utils/scripts/plot_mosaic.py | 4 | 2092 | """
"""
from __future__ import division, print_function
import sys, os, pdb
import numpy as np
import nibabel as nib
def plot_mosaic(img_data, transpose=False):
""" Return a mosaic plot for each slice of
the 3rd dimension of img_data
Parameters:
----------
img_data = 3D array
Returns:
-------
grid_2D : a 2D image with each slice of
the 3rd dimension of img_data plotted
in a mosaic
"""
n_slices = img_data.shape[2]
# Dimensions of the mosaic grid
n_rows = int(np.ceil(float(np.sqrt(n_slices))))
n_cols = int(np.ceil(float(n_slices)/float(n_rows)))
# Define the 2D mosaic
grid_2D = np.zeros((n_rows*img_data.shape[0], n_cols*img_data.shape[1]))
z = 0
for i in range(n_rows):
for j in range(n_cols):
if z < n_slices:
if transpose==True:
img_data_slice = img_data[:,::-1,z].T
else:
img_data_slice = img_data[:,::-1,z]
grid_2D[i*img_data.shape[0]:(i+1)*img_data.shape[0],\
j*img_data.shape[1]:(j+1)*img_data.shape[1]] = img_data_slice
z += 1
return grid_2D
if __name__=='__main__':
import matplotlib.pyplot as plt
plt.rcParams['image.cmap'] = 'gray'
plt.rcParams['image.interpolation'] = 'nearest'
project_path='../../../'
#img = nib.load(\
#'../../../data/ds005/sub001/BOLD/task001_run001/bold.nii.gz')
template = nib.load(project_path+\
'data/mni_icbm152_t1_tal_nlin_asym_09c_2mm.nii')
template_data_int = template.get_data()
template_data = template_data_int.astype(float)
img = nib.load(project_path+\
'data/ds005/sub001/model/model001/task001_run001.feat/' + \
'masked_filtered_func_data_mni.nii.gz')
img_data_int = img.get_data()
img_data = img_data_int.astype(float)
mean_data = np.mean(img_data, axis=-1)
plt.title('In brain voxels - mean values')
plt.imshow(plot_mosaic(template_data, transpose=False), cmap='gray', alpha=1)
plt.imshow(plot_mosaic(mean_data, transpose=False), cmap='gray', alpha=1)
plt.colorbar()
plt.show()
| bsd-3-clause |
harisbal/pandas | pandas/tests/io/parser/test_network.py | 4 | 7742 | # -*- coding: utf-8 -*-
"""
Tests parsers ability to read and parse non-local files
and hence require a network connection to be read.
"""
import logging
import numpy as np
import pytest
from pandas.compat import BytesIO, StringIO
import pandas.util._test_decorators as td
from pandas import DataFrame
import pandas.util.testing as tm
from pandas.io.parsers import read_csv
@pytest.mark.network
@pytest.mark.parametrize(
"compress_type, extension", [
('gzip', '.gz'), ('bz2', '.bz2'), ('zip', '.zip'),
pytest.param('xz', '.xz', marks=td.skip_if_no_lzma)
]
)
@pytest.mark.parametrize('mode', ['explicit', 'infer'])
@pytest.mark.parametrize('engine', ['python', 'c'])
def test_compressed_urls(salaries_table, compress_type, extension, mode,
engine):
check_compressed_urls(salaries_table, compress_type, extension, mode,
engine)
@tm.network
def check_compressed_urls(salaries_table, compression, extension, mode,
engine):
# test reading compressed urls with various engines and
# extension inference
base_url = ('https://github.com/pandas-dev/pandas/raw/master/'
'pandas/tests/io/parser/data/salaries.csv')
url = base_url + extension
if mode != 'explicit':
compression = mode
url_table = read_csv(url, sep='\t', compression=compression, engine=engine)
tm.assert_frame_equal(url_table, salaries_table)
@pytest.fixture
def tips_df(datapath):
"""DataFrame with the tips dataset."""
return read_csv(datapath('io', 'parser', 'data', 'tips.csv'))
@pytest.mark.usefixtures("s3_resource")
@td.skip_if_not_us_locale()
class TestS3(object):
def test_parse_public_s3_bucket(self, tips_df):
pytest.importorskip('s3fs')
# more of an integration test due to the not-public contents portion
# can probably mock this though.
for ext, comp in [('', None), ('.gz', 'gzip'), ('.bz2', 'bz2')]:
df = read_csv('s3://pandas-test/tips.csv' +
ext, compression=comp)
assert isinstance(df, DataFrame)
assert not df.empty
tm.assert_frame_equal(df, tips_df)
# Read public file from bucket with not-public contents
df = read_csv('s3://cant_get_it/tips.csv')
assert isinstance(df, DataFrame)
assert not df.empty
tm.assert_frame_equal(df, tips_df)
def test_parse_public_s3n_bucket(self, tips_df):
# Read from AWS s3 as "s3n" URL
df = read_csv('s3n://pandas-test/tips.csv', nrows=10)
assert isinstance(df, DataFrame)
assert not df.empty
tm.assert_frame_equal(tips_df.iloc[:10], df)
def test_parse_public_s3a_bucket(self, tips_df):
# Read from AWS s3 as "s3a" URL
df = read_csv('s3a://pandas-test/tips.csv', nrows=10)
assert isinstance(df, DataFrame)
assert not df.empty
tm.assert_frame_equal(tips_df.iloc[:10], df)
def test_parse_public_s3_bucket_nrows(self, tips_df):
for ext, comp in [('', None), ('.gz', 'gzip'), ('.bz2', 'bz2')]:
df = read_csv('s3://pandas-test/tips.csv' +
ext, nrows=10, compression=comp)
assert isinstance(df, DataFrame)
assert not df.empty
tm.assert_frame_equal(tips_df.iloc[:10], df)
def test_parse_public_s3_bucket_chunked(self, tips_df):
# Read with a chunksize
chunksize = 5
for ext, comp in [('', None), ('.gz', 'gzip'), ('.bz2', 'bz2')]:
df_reader = read_csv('s3://pandas-test/tips.csv' + ext,
chunksize=chunksize, compression=comp)
assert df_reader.chunksize == chunksize
for i_chunk in [0, 1, 2]:
# Read a couple of chunks and make sure we see them
# properly.
df = df_reader.get_chunk()
assert isinstance(df, DataFrame)
assert not df.empty
true_df = tips_df.iloc[
chunksize * i_chunk: chunksize * (i_chunk + 1)]
tm.assert_frame_equal(true_df, df)
def test_parse_public_s3_bucket_chunked_python(self, tips_df):
# Read with a chunksize using the Python parser
chunksize = 5
for ext, comp in [('', None), ('.gz', 'gzip'), ('.bz2', 'bz2')]:
df_reader = read_csv('s3://pandas-test/tips.csv' + ext,
chunksize=chunksize, compression=comp,
engine='python')
assert df_reader.chunksize == chunksize
for i_chunk in [0, 1, 2]:
# Read a couple of chunks and make sure we see them properly.
df = df_reader.get_chunk()
assert isinstance(df, DataFrame)
assert not df.empty
true_df = tips_df.iloc[
chunksize * i_chunk: chunksize * (i_chunk + 1)]
tm.assert_frame_equal(true_df, df)
def test_parse_public_s3_bucket_python(self, tips_df):
for ext, comp in [('', None), ('.gz', 'gzip'), ('.bz2', 'bz2')]:
df = read_csv('s3://pandas-test/tips.csv' + ext, engine='python',
compression=comp)
assert isinstance(df, DataFrame)
assert not df.empty
tm.assert_frame_equal(df, tips_df)
def test_infer_s3_compression(self, tips_df):
for ext in ['', '.gz', '.bz2']:
df = read_csv('s3://pandas-test/tips.csv' + ext,
engine='python', compression='infer')
assert isinstance(df, DataFrame)
assert not df.empty
tm.assert_frame_equal(df, tips_df)
def test_parse_public_s3_bucket_nrows_python(self, tips_df):
for ext, comp in [('', None), ('.gz', 'gzip'), ('.bz2', 'bz2')]:
df = read_csv('s3://pandas-test/tips.csv' + ext, engine='python',
nrows=10, compression=comp)
assert isinstance(df, DataFrame)
assert not df.empty
tm.assert_frame_equal(tips_df.iloc[:10], df)
def test_s3_fails(self):
with pytest.raises(IOError):
read_csv('s3://nyqpug/asdf.csv')
# Receive a permission error when trying to read a private bucket.
# It's irrelevant here that this isn't actually a table.
with pytest.raises(IOError):
read_csv('s3://cant_get_it/')
def test_read_csv_handles_boto_s3_object(self,
s3_resource,
tips_file):
# see gh-16135
s3_object = s3_resource.meta.client.get_object(
Bucket='pandas-test',
Key='tips.csv')
result = read_csv(BytesIO(s3_object["Body"].read()), encoding='utf8')
assert isinstance(result, DataFrame)
assert not result.empty
expected = read_csv(tips_file)
tm.assert_frame_equal(result, expected)
def test_read_csv_chunked_download(self, s3_resource, caplog):
# 8 MB, S3FS usees 5MB chunks
df = DataFrame(np.random.randn(100000, 4), columns=list('abcd'))
buf = BytesIO()
str_buf = StringIO()
df.to_csv(str_buf)
buf = BytesIO(str_buf.getvalue().encode('utf-8'))
s3_resource.Bucket("pandas-test").put_object(
Key="large-file.csv",
Body=buf)
with caplog.at_level(logging.DEBUG, logger='s3fs.core'):
read_csv("s3://pandas-test/large-file.csv", nrows=5)
# log of fetch_range (start, stop)
assert ((0, 5505024) in {x.args[-2:] for x in caplog.records})
| bsd-3-clause |
jkettleb/iris | lib/iris/io/format_picker.py | 1 | 11433 | # (C) British Crown Copyright 2010 - 2015, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
A module to provide convenient file format identification through a combination of filename extension
and file based *magic* numbers.
To manage a collection of FormatSpecifications for loading::
import iris.io.format_picker as fp
import matplotlib.pyplot as plt
fagent = fp.FormatAgent()
png_spec = fp.FormatSpecification('PNG image', fp.MagicNumber(8),
0x89504E470D0A1A0A,
handler=lambda filename: plt.imread(filename),
priority=5
)
fagent.add_spec(png_spec)
To identify a specific format from a file::
handling_spec = fagent.get_spec(png_filename, open(png_filename, 'rb'))
In the example, handling_spec will now be the png_spec previously added to the agent.
Now that a specification has been found, if a handler has been given with the specification, then the file can be handled::
handler = handling_spec.handler
if handler is None:
raise ValueError('File cannot be handled.')
else:
result = handler(filename)
The calling sequence of handler is dependent on the function given in the original specification and can be customised to your project's needs.
"""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
import collections
import functools
import os
import struct
import iris.io
class FormatAgent(object):
"""
The FormatAgent class is the containing object which is responsible for identifying the format of a given file
by interrogating its children FormatSpecification instances.
Typically a FormatAgent will be created empty and then extended with the :meth:`FormatAgent.add_spec` method::
agent = FormatAgent()
agent.add_spec(NetCDF_specification)
Less commonly, this can also be written::
agent = FormatAgent(NetCDF_specification)
"""
def __init__(self, format_specs=None):
""" """
self._format_specs = list(format_specs or [])
self._format_specs.sort()
def add_spec(self, format_spec):
"""Add a FormatSpecification instance to this agent for format consideration."""
self._format_specs.append(format_spec)
self._format_specs.sort()
def __repr__(self):
return 'FormatAgent(%r)' % self._format_specs
def __str__(self):
prefix = ' * ' if len(self._format_specs) > 1 else ''
return prefix + '\n * '.join(['%s' % format_spec for format_spec in self._format_specs])
def get_spec(self, basename, buffer_obj):
"""
Pick the first FormatSpecification which can handle the given
filename and file/buffer object.
.. note::
``buffer_obj`` may be ``None`` when a seekable file handle is not
feasible (such as over the http protocol). In these cases only the
format specifications which do not require a file handle are
tested.
"""
element_cache = {}
for format_spec in self._format_specs:
# For the case where a buffer_obj is None (such as for the
# http protocol) skip any specs which require a fh - they
# don't match.
if buffer_obj is None and format_spec.file_element.requires_fh:
continue
fmt_elem = format_spec.file_element
fmt_elem_value = format_spec.file_element_value
# cache the results for each file element
if repr(fmt_elem) not in element_cache:
# N.B. File oriented as this is assuming seekable stream.
if buffer_obj is not None and buffer_obj.tell() != 0:
# reset the buffer if tell != 0
buffer_obj.seek(0)
element_cache[repr(fmt_elem)] = \
fmt_elem.get_element(basename, buffer_obj)
# If we have a callable object, then call it and tests its result, otherwise test using basic equality
if isinstance(fmt_elem_value, collections.Callable):
matches = fmt_elem_value(element_cache[repr(fmt_elem)])
elif element_cache[repr(fmt_elem)] == fmt_elem_value:
matches = True
else:
matches = False
if matches:
return format_spec
printable_values = {}
for key, value in element_cache.iteritems():
value = str(value)
if len(value) > 50:
value = value[:50] + '...'
printable_values[key] = value
msg = ('No format specification could be found for the given buffer.'
' File element cache:\n {}'.format(printable_values))
raise ValueError(msg)
@functools.total_ordering
class FormatSpecification(object):
"""
Provides the base class for file type definition.
Every FormatSpecification instance has a name which can be accessed with the :attr:`FormatSpecification.name` property and
a FileElement, such as filename extension or 32-bit magic number, with an associated value for format identification.
"""
def __init__(self, format_name, file_element, file_element_value,
handler=None, priority=0, constraint_aware_handler=False):
"""
Constructs a new FormatSpecification given the format_name and particular FileElements
Args:
* format_name - string name of fileformat being described
* file_element - FileElement instance of the element which identifies this FormatSpecification
* file_element_value - The value that the file_element should take if a file matches this FormatSpecification
Kwargs:
* handler - function which will be called when the specification has been identified and is required to handler a format.
If None, then the file can still be identified but no handling can be done.
* priority - Integer giving a priority for considering this specification where higher priority means sooner consideration.
"""
if not isinstance(file_element, FileElement):
raise ValueError('file_element must be an instance of FileElement, got %r' % file_element)
self._file_element = file_element
self._file_element_value = file_element_value
self._format_name = format_name
self._handler = handler
self.priority = priority
self.constraint_aware_handler = constraint_aware_handler
def __hash__(self):
# Hashed by specification for consistent ordering in FormatAgent (including self._handler in this hash
# for example would order randomly according to object id)
return hash(self._file_element)
@property
def file_element(self):
return self._file_element
@property
def file_element_value(self):
return self._file_element_value
@property
def name(self):
"""The name of this FileFormat. (Read only)"""
return self._format_name
@property
def handler(self):
"""The handler function of this FileFormat. (Read only)"""
return self._handler
def __lt__(self, other):
if not isinstance(other, FormatSpecification):
return NotImplemented
return (-self.priority, hash(self)) < (-other.priority, hash(other))
def __eq__(self, other):
if not isinstance(other, FormatSpecification):
return NotImplemented
return self.priority == other.priority and hash(self) == hash(other)
def __ne__(self, other):
return not (self == other)
def __repr__(self):
# N.B. loader is not always going to provide a nice repr if it is a lambda function, hence a prettier version is available in __str__
return 'FormatSpecification(%r, %r, %r, handler=%r, priority=%s)' % (self._format_name, self._file_element,
self._file_element_value, self.handler, self.priority)
def __str__(self):
return '%s%s (priority %s)' % (self.name, ' (no handler available)' if self.handler is None else '', self.priority)
class FileElement(object):
"""
Represents a specific aspect of a FileFormat which can be identified using the given element getter function.
"""
def __init__(self, requires_fh=True):
"""
Constructs a new file element, which may require a file buffer.
Kwargs:
* requires_fh - Whether this FileElement needs a file buffer.
"""
self.requires_fh = requires_fh
def get_element(self, basename, file_handle):
"""Called when identifying the element of a file that this FileElement is representing."""
raise NotImplementedError("get_element must be defined in a subclass")
def __hash__(self):
return hash(repr(self))
def __repr__(self):
return '{}()'.format(self.__class__.__name__)
class MagicNumber(FileElement):
"""A :class:`FileElement` that returns a byte sequence in the file."""
len_formats = {4: ">L", 8: ">Q"}
def __init__(self, num_bytes, offset=None):
FileElement.__init__(self)
self._num_bytes = num_bytes
self._offset = offset
def get_element(self, basename, file_handle):
if self._offset is not None:
file_handle.seek(self._offset)
bytes = file_handle.read(self._num_bytes)
fmt = self.len_formats.get(self._num_bytes)
if len(bytes) != self._num_bytes:
raise EOFError(file_handle.name)
if fmt is None:
result = bytes
else:
result = struct.unpack(fmt, bytes)[0]
return result
def __repr__(self):
return 'MagicNumber({}, {})'.format(self._num_bytes, self._offset)
class FileExtension(FileElement):
"""A :class:`FileElement` that returns the extension from the filename."""
def get_element(self, basename, file_handle):
return os.path.splitext(basename)[1]
class LeadingLine(FileElement):
"""A :class:`FileElement` that returns the first line from the file."""
def get_element(self, basename, file_handle):
return file_handle.readline()
class UriProtocol(FileElement):
"""
A :class:`FileElement` that returns the "scheme" and "part" from a URI,
using :func:`~iris.io.decode_uri`.
"""
def __init__(self):
FileElement.__init__(self, requires_fh=False)
def get_element(self, basename, file_handle):
return iris.io.decode_uri(basename)[0]
| lgpl-3.0 |
VirusTotal/msticpy | msticpy/sectools/tiproviders/ti_provider_base.py | 1 | 21069 | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""
Module for TILookup classes.
Input can be a single IoC observable or a pandas DataFrame containing
multiple observables. Processing may require a an API key and
processing performance may be limited to a specific number of
requests per minute for the account type that you have.
"""
import abc
from abc import ABC
import math # noqa
import pprint
import re
from collections import Counter, namedtuple
from enum import Enum
from functools import lru_cache, singledispatch, total_ordering
from ipaddress import IPv4Address, IPv6Address, ip_address
from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Union
from urllib.parse import quote_plus
import attr
import pandas as pd
from urllib3.exceptions import LocationParseError
from urllib3.util import parse_url
from ..._version import VERSION
from ...common.utility import export
from ..iocextract import IoCExtract, IoCType
__version__ = VERSION
__author__ = "Ian Hellen"
SanitizedObservable = namedtuple("SanitizedObservable", ["observable", "status"])
# pylint: disable=too-few-public-methods
@total_ordering
class TISeverity(Enum):
"""Threat intelligence report severity."""
unknown = -1
information = 0
warning = 1
high = 2
@classmethod
def parse(cls, value) -> "TISeverity":
"""
Parse string or numeric value to TISeverity.
Parameters
----------
value : Any
TISeverity, str or int
Returns
-------
TISeverity
TISeverity instance.
"""
if isinstance(value, TISeverity):
return value
if isinstance(value, str) and value.lower() in cls.__members__:
return cls[value.lower()]
if isinstance(value, int):
if value in [v.value for v in cls.__members__.values()]:
return cls(value)
return TISeverity.unknown
# pylint: disable=comparison-with-callable
def __eq__(self, other) -> bool:
"""
Return True if severities are equal.
Parameters
----------
other : Any
TISeverity to compare to.
Can be a numeric value or name of TISeverity value.
Returns
-------
bool
If severities are equal
"""
other_sev = TISeverity.parse(other)
return self.value == other_sev.value
def __gt__(self, other) -> bool:
"""
Return True self is greater than other.
Parameters
----------
other : Any
TISeverity to compare to.
Can be a numeric value or name of TISeverity value.
Returns
-------
bool
If severities are equal
"""
other_sev = TISeverity.parse(other)
return self.value > other_sev.value
# pylint: enable=comparison-with-callable
# pylint: disable=too-many-instance-attributes
@attr.s(auto_attribs=True)
class LookupResult:
"""Lookup result for IoCs."""
ioc: str
ioc_type: str
safe_ioc: str = ""
query_subtype: Optional[str] = None
provider: Optional[str] = None
result: bool = False
severity: int = attr.ib(default=0)
details: Any = None
raw_result: Optional[Union[str, dict]] = None
reference: Optional[str] = None
status: int = 0
@severity.validator
def _check_severity(self, attribute, value):
del attribute
if isinstance(value, TISeverity):
self.severity = value.name
return
self.severity = TISeverity.parse(value).name
@property
def summary(self):
"""Print a summary of the Lookup Result."""
p_pr = pprint.PrettyPrinter(indent=4)
print("ioc:", self.ioc, "(", self.ioc_type, ")")
print("result:", self.result)
# print("severity:", self.severity)
p_pr.pprint(self.details)
print("reference: ", self.reference)
@property
def raw_result_fmtd(self):
"""Print raw results of the Lookup Result."""
p_pr = pprint.PrettyPrinter(indent=4)
p_pr.pprint(self.raw_result)
@property
def severity_name(self) -> str:
"""
Return text description of severity score.
Returns
-------
str
Severity description.
"""
try:
return TISeverity(self.severity).name
except ValueError:
return TISeverity.unknown.name
def set_severity(self, value: Any):
"""
Set the severity from enum, int or string.
Parameters
----------
value : Any
The severity value to set
"""
self._check_severity(None, value)
@classmethod
def column_map(cls):
"""Return a dictionary that maps fields to DF Names."""
col_mapping = {}
for name in attr.fields_dict(cls):
out_name = "".join([part.capitalize() for part in name.split("_")])
col_mapping[name] = out_name
return col_mapping
# pylint: enable=too-many-instance-attributes
# pylint: disable=too-few-public-methods
class TILookupStatus(Enum):
"""Threat intelligence lookup status."""
ok = 0
not_supported = 1
bad_format = 2
query_failed = 3
other = 10
# pylint: enable=too-few-public-methods
_IOC_EXTRACT = IoCExtract()
@export
class TIProvider(ABC):
"""Abstract base class for Threat Intel providers."""
_IOC_QUERIES: Dict[str, Any] = {}
# pylint: disable=unused-argument
def __init__(self, **kwargs):
"""Initialize the provider."""
self._supported_types: Set[IoCType] = set()
self.description: Optional[str] = None
self._supported_types = {
IoCType.parse(ioc_type.split("-")[0]) for ioc_type in self._IOC_QUERIES
}
if IoCType.unknown in self._supported_types:
self._supported_types.remove(IoCType.unknown)
self.require_url_encoding = False
# pylint: disable=duplicate-code
@abc.abstractmethod
def lookup_ioc(
self, ioc: str, ioc_type: str = None, query_type: str = None, **kwargs
) -> LookupResult:
"""
Lookup a single IoC observable.
Parameters
----------
ioc : str
IoC Observable value
ioc_type : str, optional
IoC Type, by default None (type will be inferred)
query_type : str, optional
Specify the data subtype to be queried, by default None.
If not specified the default record type for the IoC type
will be returned.
Returns
-------
LookupResult
The returned results.
"""
def lookup_iocs(
self,
data: Union[pd.DataFrame, Dict[str, str], Iterable[str]],
obs_col: str = None,
ioc_type_col: str = None,
query_type: str = None,
**kwargs,
) -> pd.DataFrame:
"""
Lookup collection of IoC observables.
Parameters
----------
data : Union[pd.DataFrame, Dict[str, str], Iterable[str]]
Data input in one of three formats:
1. Pandas dataframe (you must supply the column name in
`obs_col` parameter)
2. Dict of observable, IoCType
3. Iterable of observables - IoCTypes will be inferred
obs_col : str, optional
DataFrame column to use for observables, by default None
ioc_type_col : str, optional
DataFrame column to use for IoCTypes, by default None
query_type : str, optional
Specify the data subtype to be queried, by default None.
If not specified the default record type for the IoC type
will be returned.
Returns
-------
pd.DataFrame
DataFrame of results.
"""
results = []
for observable, ioc_type in generate_items(data, obs_col, ioc_type_col):
if not observable:
continue
item_result = self.lookup_ioc(
ioc=observable, ioc_type=ioc_type, query_type=query_type
)
results.append(pd.Series(attr.asdict(item_result)))
return pd.DataFrame(data=results).rename(columns=LookupResult.column_map())
@abc.abstractmethod
def parse_results(self, response: LookupResult) -> Tuple[bool, TISeverity, Any]:
"""
Return the details of the response.
Parameters
----------
response : LookupResult
The returned data response
Returns
-------
Tuple[bool, TISeverity, Any]
bool = positive or negative hit
TISeverity = enumeration of severity
Object with match details
"""
@property
def supported_types(self) -> List[str]:
"""
Return list of supported IoC types for this provider.
Returns
-------
List[str]
List of supported type names
"""
return [ioc.name for ioc in self._supported_types]
@classmethod
def is_known_type(cls, ioc_type: str) -> bool:
"""
Return True if this a known IoC Type.
Parameters
----------
ioc_type : str
IoCType string to test
Returns
-------
bool
True if known type.
"""
return ioc_type in IoCType.__members__ and ioc_type != "unknown"
@classmethod
def usage(cls):
"""Print usage of provider."""
print(f"{cls.__doc__} Supported query types:")
for ioc_key in sorted(cls._IOC_QUERIES):
ioc_key_elems = ioc_key.split("-", maxsplit=1)
if len(ioc_key_elems) == 1:
print(f"\tioc_type={ioc_key_elems[0]}")
if len(ioc_key_elems) == 2:
print(
f"\tioc_type={ioc_key_elems[0]}, ioc_query_type={ioc_key_elems[1]}"
)
def is_supported_type(self, ioc_type: Union[str, IoCType]) -> bool:
"""
Return True if the passed type is supported.
Parameters
----------
ioc_type : Union[str, IoCType]
IoC type name or instance
Returns
-------
bool
True if supported.
"""
if isinstance(ioc_type, str):
ioc_type = IoCType.parse(ioc_type)
return ioc_type.name in self.supported_types
@staticmethod
@lru_cache(maxsize=1024)
def resolve_ioc_type(observable: str) -> str:
"""
Return IoCType determined by IoCExtract.
Parameters
----------
observable : str
IoC observable string
Returns
-------
str
IoC Type (or unknown if type could not be determined)
"""
return _IOC_EXTRACT.get_ioc_type(observable)
def _check_ioc_type(
self, ioc: str, ioc_type: str = None, query_subtype: str = None
) -> LookupResult:
"""
Check IoC Type and cleans up observable.
Parameters
----------
ioc : str
IoC observable
ioc_type : str, optional
IoC type, by default None
query_subtype : str, optional
Query sub-type, if any, by default None
Returns
-------
LookupResult
Lookup result with resolved ioc_type and pre-processed
observable.
LookupResult.status is none-zero on failure.
"""
result = LookupResult(
ioc=ioc,
safe_ioc=ioc,
ioc_type=ioc_type if ioc_type else self.resolve_ioc_type(ioc),
query_subtype=query_subtype,
result=False,
details="",
raw_result=None,
reference=None,
)
if not self.is_supported_type(result.ioc_type):
result.details = f"IoC type {result.ioc_type} not supported."
result.status = TILookupStatus.not_supported.value
return result
clean_ioc = preprocess_observable(
ioc, result.ioc_type, self.require_url_encoding
)
result.safe_ioc = clean_ioc.observable
if clean_ioc.status != "ok":
result.details = clean_ioc.status
result.status = TILookupStatus.bad_format.value
return result
# slightly stricter than normal URL regex to exclude '() from host string
_HTTP_STRICT_REGEX = r"""
(?P<protocol>(https?|ftp|telnet|ldap|file)://)
(?P<userinfo>([a-z0-9-._~!$&*+,;=:]|%[0-9A-F]{2})*@)?
(?P<host>([a-z0-9-._~!$&\*+,;=]|%[0-9A-F]{2})*)
(:(?P<port>\d*))?
(/(?P<path>([^?\#| ]|%[0-9A-F]{2})*))?
(\?(?P<query>([a-z0-9-._~!$&'()*+,;=:/?@]|%[0-9A-F]{2})*))?
(\#(?P<fragment>([a-z0-9-._~!$&'()*+,;=:/?@]|%[0-9A-F]{2})*))?\b"""
_HTTP_STRICT_RGXC = re.compile(_HTTP_STRICT_REGEX, re.I | re.X | re.M)
# pylint: disable=too-many-return-statements, too-many-branches
def preprocess_observable(
observable, ioc_type, require_url_encoding: bool = False
) -> SanitizedObservable:
"""
Preprocesses and checks validity of observable against declared IoC type.
:param observable: the value of the IoC
:param ioc_type: the IoC type
"""
observable = observable.strip()
try:
validated = _IOC_EXTRACT.validate(observable, ioc_type)
except KeyError:
validated = False
if not validated:
return SanitizedObservable(
None, "Observable does not match expected pattern for " + ioc_type
)
if ioc_type == "url":
return _preprocess_url(observable, require_url_encoding)
if ioc_type == "ipv4":
return _preprocess_ip(observable, version=4)
if ioc_type == "ipv6":
return _preprocess_ip(observable, version=6)
if ioc_type in ["dns", "hostname"]:
return _preprocess_dns(observable)
if ioc_type in ["md5_hash", "sha1_hash", "sha256_hash", "file_hash"]:
return _preprocess_hash(observable)
return SanitizedObservable(observable, "ok")
# Would complicate code with too many branches
# pylint: disable=too-many-return-statements
def _preprocess_url(
url: str, require_url_encoding: bool = False
) -> SanitizedObservable:
"""
Check that URL can be parsed.
Parameters
----------
url : str
The URL to check
require_url_encoding : bool
Set to True if url's require encoding before passing to provider
Returns
-------
SanitizedObservable
Pre-processed result
"""
clean_url, scheme, host = get_schema_and_host(url, require_url_encoding)
if scheme is None or host is None:
return SanitizedObservable(None, f"Could not obtain scheme or host from {url}")
# get rid of some obvious false positives (localhost, local hostnames)
try:
addr = ip_address(host)
if addr.is_private:
return SanitizedObservable(None, "Host part of URL is a private IP address")
if addr.is_loopback:
return SanitizedObservable(
None, "Host part of URL is a loopback IP address"
)
except ValueError:
pass
if "." not in host:
return SanitizedObservable(None, "Host is unqualified domain name")
if scheme.lower() in ["file"]:
return SanitizedObservable(None, f"{scheme} URL scheme is not supported")
return SanitizedObservable(clean_url, "ok")
def get_schema_and_host(
url: str, require_url_encoding: bool = False
) -> Tuple[Optional[str], Optional[str], Optional[str]]:
"""
Return URL scheme and host and cleaned URL.
Parameters
----------
url : str
Input URL
require_url_encoding : bool
Set to True if url needs encoding. Defualt is False.
Returns
-------
Tuple[Optional[str], Optional[str], Optional[str]
Tuple of URL, scheme, host
"""
clean_url = None
scheme = None
host = None
try:
scheme, _, host, _, _, _, _ = parse_url(url)
clean_url = url
except LocationParseError:
# Try to clean URL and re-check
cleaned_url = _clean_url(url)
if cleaned_url is not None:
try:
scheme, _, host, _, _, _, _ = parse_url(cleaned_url)
clean_url = cleaned_url
except LocationParseError:
pass
if require_url_encoding and clean_url:
clean_url = quote_plus(clean_url)
return clean_url, scheme, host
def _clean_url(url: str) -> Optional[str]:
"""
Clean URL to remove query params and fragments and any trailing stuff.
Parameters
----------
url : str
the URL to check
Returns
-------
Optional[str]
Cleaned URL or None if the input was not a valid URL
"""
# Try to clean URL and re-check
match_url = _HTTP_STRICT_RGXC.search(url)
if (
not match_url
or match_url.groupdict()["protocol"] is None
or match_url.groupdict()["host"] is None
):
return None
# build the URL dropping the query string and fragments
clean_url = match_url.groupdict()["protocol"]
if match_url.groupdict()["userinfo"]:
clean_url += match_url.groupdict()["userinfo"]
clean_url += match_url.groupdict()["host"]
if match_url.groupdict()["port"]:
clean_url += ":" + match_url.groupdict()["port"]
if match_url.groupdict()["path"]:
clean_url += "/" + match_url.groupdict()["path"]
return clean_url
# Would complicate code with too many branches
# pylint: disable=too-many-return-statements
def _preprocess_ip(ipaddress: str, version=4):
"""Ensure Ip address is a valid public IPv4 address."""
try:
addr = ip_address(ipaddress)
except ValueError:
return SanitizedObservable(None, "IP address is invalid format")
if version == 4 and not isinstance(addr, IPv4Address):
return SanitizedObservable(None, "Not an IPv4 address")
if version == 6 and not isinstance(addr, IPv6Address):
return SanitizedObservable(None, "Not an IPv6 address")
if addr.is_global:
return SanitizedObservable(ipaddress, "ok")
return SanitizedObservable(None, "IP address is not global")
def _preprocess_dns(domain: str) -> SanitizedObservable:
"""Ensure DNS is a valid-looking domain."""
if "." not in domain:
return SanitizedObservable(None, "Domain is unqualified domain name")
try:
addr = ip_address(domain)
del addr
return SanitizedObservable(None, "Domain is an IP address")
except ValueError:
pass
return SanitizedObservable(domain, "ok")
def _preprocess_hash(hash_str: str) -> SanitizedObservable:
"""Ensure Hash has minimum entropy (rather than a string of 'x')."""
str_entropy = entropy(hash_str)
if str_entropy < 3.0:
return SanitizedObservable(None, "String has too low an entropy to be a hash")
return SanitizedObservable(hash_str, "ok")
def entropy(input_str: str) -> float:
"""Compute entropy of input string."""
str_len = float(len(input_str))
return -sum(
map(
lambda a: (a / str_len) * math.log2(a / str_len),
Counter(input_str).values(),
)
)
@singledispatch
def generate_items(
data: Any, obs_col: Optional[str] = None, ioc_type_col: Optional[str] = None
) -> Iterable[Tuple[Optional[str], Optional[str]]]:
"""
Generate item pairs from different input types.
Parameters
----------
data : Any
DataFrame, dictionary or iterable
obs_col : Optional[str]
If `data` is a DataFrame, the column containing the observable value.
ioc_type_col : Optional[str]
If `data` is a DataFrame, the column containing the observable type.
Returns
-------
Iterable[Tuple[Optional[str], Optional[str]]]] - a tuple of Observable/Type.
"""
del obs_col, ioc_type_col
if isinstance(data, Iterable):
for item in data:
yield item, TIProvider.resolve_ioc_type(item)
else:
yield None, None
@generate_items.register(pd.DataFrame)
def _(data: pd.DataFrame, obs_col: str, ioc_type_col: Optional[str] = None):
for _, row in data.iterrows():
if ioc_type_col is None:
yield row[obs_col], TIProvider.resolve_ioc_type(row[obs_col])
else:
yield row[obs_col], row[ioc_type_col]
@generate_items.register(dict) # type: ignore
def _(data: dict, obs_col: Optional[str] = None, ioc_type_col: Optional[str] = None):
for obs, ioc_type in data.items():
if not ioc_type:
ioc_type = TIProvider.resolve_ioc_type(obs)
yield obs, ioc_type
| mit |
AshivDhondea/SORADSIM | scenarios/main_057_iss_13.py | 1 | 13528 | # -*- coding: utf-8 -*-
"""
Created on 01 October 2017
@author: Ashiv Dhondea
"""
import AstroFunctions as AstFn
import AstroConstants as AstCnst
import GeometryFunctions as GF
import RadarSystem as RS
import TimeHandlingFunctions as THF
import math
import numpy as np
import datetime as dt
import pytz
import aniso8601
import matplotlib.pyplot as plt
from matplotlib import rc
rc('font', **{'family': 'serif', 'serif': ['Computer Modern']})
rc('text', usetex=True)
params = {'text.latex.preamble' : [r'\usepackage{amsmath}', r'\usepackage{amssymb}']}
plt.rcParams.update(params)
from mpl_toolkits.axes_grid.anchored_artists import AnchoredText
import matplotlib as mpl
import matplotlib.patches as patches
from mpl_toolkits.basemap import Basemap
import pandas as pd # for loading MeerKAT dishes' latlon
# --------------------------------------------------------------------------- #
print 'Loading MeerKAT positions'
dframe = pd.read_excel("MeerKAT64v36.wgs84.64x4_edited.xlsx",sheetname="Sheet1")
dframe = dframe.reset_index()
meerkat_id = dframe['ID'][0:64]
meerkat_lat = dframe['Lat'][0:64]
meerkat_lon = dframe['Lon'][0:64]
# --------------------------------------------------------------------------- #
with open('main_meerkat_radar_parameters_doreen.txt') as fp:
for line in fp:
if 'centre_frequency' in line:
good_index = line.index('=')
centre_frequency = float(line[good_index+1:-1]);
if 'HPBW Rx' in line:
good_index = line.index('=')
beamwidth_rx = float(line[good_index+1:-1]);
if 'HPBW Tx' in line:
good_index = line.index('=')
beamwidth_tx = float(line[good_index+1:-1]);
if 'HPBW Tx' in line:
good_index = line.index('=')
beamwidth_tx = float(line[good_index+1:-1]);
if 'bandwidth' in line:
good_index = line.index('=')
bandwidth = float(line[good_index+1:-1]);
fp.close();
# --------------------------------------------------------------------------- #
speed_light = AstCnst.c*1e3; # [m/s]
wavelength = speed_light/centre_frequency; # [m]
# --------------------------------------------------------------------------- #
print 'Loading data'
timevec = np.load('main_057_iss_05_timevec.npy'); # timevector
x_target = np.load('main_057_iss_05_x_target.npy'); # state vector in SEZ frame
theta_GMST = np.load('main_057_iss_05_theta_GMST.npy'); # GMST angles in rad
y_sph_tx = np.load('main_057_iss_05_y_sph_tx.npy'); # spherical measurement vectors in Tx frame
y_sph_rx = np.load('main_057_iss_05_y_sph_rx.npy'); # spherical measurement vectors in Rx frame
y_sph_rx_meerkat_01 = np.load('main_057_iss_05_y_sph_rx_meerkat_01.npy');
y_sph_rx_meerkat_02 = np.load('main_057_iss_05_y_sph_rx_meerkat_02.npy');
# discretization step length/PRF
delta_t = timevec[2]-timevec[1];
# time stamps
experiment_timestamps = [None]*len(timevec)
index=0;
with open('main_057_iss_05_experiment_timestamps.txt') as fp:
for line in fp:
modified_timestring = line[:-8];
experiment_timestamps[index] = aniso8601.parse_datetime(modified_timestring);
index+=1;
fp.close();
experiment_timestamps[-1] = experiment_timestamps[-1].replace(tzinfo=None)
title_string1 = str(experiment_timestamps[0].isoformat())+'/'+str(experiment_timestamps[-1].isoformat());
norad_id = '25544'
# --------------------------------------------------------------------------- #
# Bistatic Radar characteristics
# beamwidth of transmitter and receiver
beamwidth_rx = math.radians(beamwidth_rx);
# Location of MeerKAT
lat_meerkat_00 = float(meerkat_lat[0]);
lon_meerkat_00 = float(meerkat_lon[0]);
altitude_meerkat = 1.038; # [km]
lat_meerkat_01 = float(meerkat_lat[1]);
lon_meerkat_01 = float(meerkat_lon[1]);
lat_meerkat_02 = float(meerkat_lat[2]);
lon_meerkat_02 = float(meerkat_lon[2]);
lat_meerkat_03 = float(meerkat_lat[3]);
lon_meerkat_03 = float(meerkat_lon[3]);
# Location of Denel Bredasdorp
lat_denel = -34.6; # [deg]
lon_denel = 20.316666666666666; # [deg]
altitude_denel = 0.018;#[km]
# --------------------------------------------------------------------------- #
lat_sgp4 = np.load('main_057_iss_05_lat_sgp4.npy',);
lon_sgp4 = np.load('main_057_iss_05_lon_sgp4.npy');
tx_beam_indices_best = np.load('main_057_iss_07_tx_beam_indices_best.npy');
# --------------------------------------------------------------------------- #
# sort out a few variables
tx_bw_time_max = tx_beam_indices_best[1];
tx_beam_index_down = tx_beam_indices_best[0];
# --------------------------------------------------------------------------- #
# sort out a few variables
tx_bw_time_max = tx_beam_indices_best[1];
tx_beam_index_down = tx_beam_indices_best[0];
tx_beam_index_up = tx_beam_indices_best[2];
# --------------------------------------------------------------------------- #
# sort out a few variables
tx_beam_circ_index = np.load('main_057_iss_08_tx_beam_circ_index.npy');
earliest_pt = tx_beam_circ_index[0];
tx_bw_time_max = tx_beam_circ_index[1];
latest_pt = tx_beam_circ_index[2];
# --------------------------------------------------------------------------- #
rx0_beam_circ_index = np.load('main_057_iss_09_rx0_beam_circ_index.npy');
earliest_pt_rx = rx0_beam_circ_index[0]
index_for_rx0 = rx0_beam_circ_index[1]
latest_pt_rx = rx0_beam_circ_index[2]
rx1_beam_circ_index = np.load('main_057_iss_09_rx1_beam_circ_index.npy');
earliest_pt_rx1 = rx1_beam_circ_index[0]
index_for_rx1 = rx1_beam_circ_index[1]
latest_pt_rx1 = rx1_beam_circ_index[2]
rx2_beam_circ_index = np.load('main_057_iss_09_rx2_beam_circ_index.npy');
earliest_pt_rx2 = rx2_beam_circ_index[0]
index_for_rx2 = rx2_beam_circ_index[1]
latest_pt_rx2 = rx2_beam_circ_index[2]
# --------------------------------------------------------------------------- #
print 'finding relevant epochs'
# Find the epoch of the relevant data points
plot_lim = 6
plt_start_index = tx_beam_index_down - int(plot_lim/delta_t)
plt_end_index = tx_beam_index_up+1 + int(2/delta_t)
start_epoch_test = THF.fnCalculate_DatetimeEpoch(timevec,plt_start_index,experiment_timestamps[0]);
end_epoch_test = THF.fnCalculate_DatetimeEpoch(timevec,plt_end_index,experiment_timestamps[0]);
tx_beam_index_down_epoch = THF.fnCalculate_DatetimeEpoch(timevec,tx_beam_index_down,experiment_timestamps[0]);
tx_beam_index_up_epoch= THF.fnCalculate_DatetimeEpoch(timevec,tx_beam_index_up,experiment_timestamps[0]);
tx_bw_time_max_epoch = THF.fnCalculate_DatetimeEpoch(timevec,tx_bw_time_max,experiment_timestamps[0]);
earliest_pt_epoch= THF.fnCalculate_DatetimeEpoch(timevec,earliest_pt,experiment_timestamps[0]);
latest_pt_epoch = THF.fnCalculate_DatetimeEpoch(timevec,latest_pt,experiment_timestamps[0]);
earliest_pt_epoch = earliest_pt_epoch.replace(tzinfo=None)
end_epoch_test = end_epoch_test.replace(tzinfo=None);
start_epoch_test = start_epoch_test.replace(tzinfo=None)
title_string = str(start_epoch_test.isoformat())+'/'+str(end_epoch_test .isoformat());
tx_beam_index_down_epoch = tx_beam_index_down_epoch.replace(tzinfo=None);
tx_beam_index_up_epoch = tx_beam_index_up_epoch.replace(tzinfo=None)
tx_bw_time_max_epoch = tx_bw_time_max_epoch.replace(tzinfo=None)
latest_pt_epoch= latest_pt_epoch.replace(tzinfo=None)
# --------------------------------------------------------------------------- #
fig = plt.figure(1);ax = fig.gca();
plt.rc('text', usetex=True)
plt.rc('font', family='serif');
plt.rc('font',family='helvetica');
params = {'legend.fontsize': 8,
'legend.handlelength': 2}
plt.rcParams.update(params)
map = Basemap(llcrnrlon=3.0,llcrnrlat=-39.0,urcrnrlon=34.,urcrnrlat=-8.,resolution='i', projection='cass', lat_0 = 0.0, lon_0 = 0.0)
map.drawcoastlines()
map.drawcountries()
map.drawmapboundary(fill_color='lightblue')
map.fillcontinents(color='beige',lake_color='lightblue')
lon =np.rad2deg(lon_sgp4);
lat = np.rad2deg(lat_sgp4);
x,y = map(lon[plt_start_index:earliest_pt+1], lat[plt_start_index:earliest_pt+1])
map.plot(x, y, color="blue", latlon=False,linewidth=1)
x,y = map(lon[earliest_pt:latest_pt+1], lat[earliest_pt:latest_pt+1])
map.plot(x, y, color="crimson", latlon=False,linewidth=2,label=r"%s" %str(earliest_pt_epoch.isoformat())+'Z/'+str(latest_pt_epoch.isoformat())+'Z');
x,y = map(lon[latest_pt+1:plt_end_index+1], lat[latest_pt+1:plt_end_index+1])
map.plot(x, y, color="blue", latlon=False,linewidth=1)
x,y = map(lon_denel,lat_denel)
map.plot(x,y,marker='o',color='green'); # Denel Bredasdorp lat lon
x2,y2 = map(20,-34)
plt.annotate(r"\textbf{Tx}", xy=(x2, y2),color='green')
x,y = map(lon_meerkat_00,lat_meerkat_00)
map.plot(x,y,marker='o',color='blue'); # rx lat lon
x2,y2 = map(22,-30)
plt.annotate(r"\textbf{Rx}", xy=(x2, y2),color='blue')
parallels = np.arange(-81.,0.,5.)
# labels = [left,right,top,bottom]
map.drawparallels(parallels,labels=[False,True,False,False],labelstyle='+/-',linewidth=0.2)
meridians = np.arange(10.,351.,10.)
map.drawmeridians(meridians,labels=[True,False,False,True],labelstyle='+/-',linewidth=0.2)
plt.title(r'\textbf{Object %s trajectory during the interval %s}' %(norad_id,title_string), fontsize=12)
plt.legend(loc='upper right',title=r"Dwell-time interval");
ax.get_legend().get_title().set_fontsize('10')
fig.savefig('main_057_iss_13_map.pdf',bbox_inches='tight',pad_inches=0.05,dpi=10)
# --------------------------------------------------------------------------- #
tx_beam_index_down_lat = math.degrees(lat_sgp4[tx_beam_index_down]);
tx_beam_index_down_lon = math.degrees(lon_sgp4[tx_beam_index_down]);
tx_beam_index_up_lat = math.degrees(lat_sgp4[tx_beam_index_up]);
tx_beam_index_up_lon = math.degrees(lon_sgp4[tx_beam_index_up]);
#earliest_pt_rx
rx_beam_index_down_lat = math.degrees(lat_sgp4[earliest_pt_rx]);
rx_beam_index_down_lon = math.degrees(lon_sgp4[earliest_pt_rx]);
rx_beam_index_up_lat = math.degrees(lat_sgp4[latest_pt_rx]);
rx_beam_index_up_lon = math.degrees(lon_sgp4[latest_pt_rx]);
rx1_beam_index_down_lat = math.degrees(lat_sgp4[earliest_pt_rx1]);
rx1_beam_index_down_lon = math.degrees(lon_sgp4[earliest_pt_rx1]);
rx1_beam_index_up_lat = math.degrees(lat_sgp4[latest_pt_rx1]);
rx1_beam_index_up_lon = math.degrees(lon_sgp4[latest_pt_rx1]);
rx2_beam_index_down_lat = math.degrees(lat_sgp4[earliest_pt_rx2]);
rx2_beam_index_down_lon = math.degrees(lon_sgp4[earliest_pt_rx2]);
rx2_beam_index_up_lat = math.degrees(lat_sgp4[latest_pt_rx2]);
rx2_beam_index_up_lon = math.degrees(lon_sgp4[latest_pt_rx2]);
# --------------------------------------------------------------------------- #
fig = plt.figure(2);
ax = fig.gca();
plt.rc('text', usetex=True)
plt.rc('font', family='serif');
plt.rc('font',family='helvetica');
params = {'legend.fontsize': 8,
'legend.handlelength': 2}
plt.rcParams.update(params)
map = Basemap(llcrnrlon=3.0,llcrnrlat=-38.0,urcrnrlon=34.,urcrnrlat=-16.,resolution='i', projection='cass', lat_0 = 0.0, lon_0 = 0.0)
map.drawcoastlines()
lon =np.rad2deg(lon_sgp4);
lat = np.rad2deg(lat_sgp4);
x,y = map(lon[plt_start_index:earliest_pt+1], lat[plt_start_index:earliest_pt+1])
map.plot(x, y, color="blue", latlon=False,linewidth=1)
x,y = map(lon[tx_beam_index_down:tx_beam_index_up+1], lat[tx_beam_index_down:tx_beam_index_up+1])
map.plot(x, y, color="crimson", latlon=False,linewidth=2,label=r"%s" %str(tx_beam_index_down_epoch.isoformat())+'Z/'+str(tx_beam_index_up_epoch.isoformat())+'Z');
x,y = map(lon[tx_beam_index_up+1:plt_end_index+1], lat[tx_beam_index_up+1:plt_end_index+1])
map.plot(x, y, color="blue", latlon=False,linewidth=1)
x_denel,y_denel = map(lon_denel,lat_denel)
map.plot(x_denel,y_denel,marker='o',color='green'); # Denel Bredasdorp lat lon
x2,y2 = map(20,-34)
plt.annotate(r"\textbf{Tx}", xy=(x2, y2),color='green')
tx_beam_index_down_x,tx_beam_index_down_y = map(tx_beam_index_down_lon,tx_beam_index_down_lat )
tx_beam_index_up_x,tx_beam_index_up_y = map(tx_beam_index_up_lon,tx_beam_index_up_lat )
map.drawgreatcircle(tx_beam_index_down_lon,tx_beam_index_down_lat, lon_denel,lat_denel,linewidth=0.5,color='gray')
map.drawgreatcircle(tx_beam_index_up_lon,tx_beam_index_up_lat, lon_denel,lat_denel,linewidth=0.5,color='gray')
map.drawgreatcircle(rx_beam_index_down_lon,rx_beam_index_down_lat, lon_meerkat_00,lat_meerkat_00,linewidth=0.5,color='mediumblue')
map.drawgreatcircle(rx_beam_index_up_lon,rx_beam_index_up_lat,lon_meerkat_00,lat_meerkat_00,linewidth=0.5,color='mediumblue')
map.drawgreatcircle(rx1_beam_index_down_lon,rx1_beam_index_down_lat, lon_meerkat_01,lat_meerkat_01,linewidth=0.5,color='orangered')
map.drawgreatcircle(rx1_beam_index_up_lon,rx1_beam_index_up_lat,lon_meerkat_01,lat_meerkat_01,linewidth=0.5,color='orangered')
map.drawgreatcircle(rx2_beam_index_down_lon,rx2_beam_index_down_lat, lon_meerkat_02,lat_meerkat_02,linewidth=0.5,color='purple')
map.drawgreatcircle(rx2_beam_index_up_lon,rx2_beam_index_up_lat,lon_meerkat_02,lat_meerkat_02,linewidth=0.5,color='purple')
x,y = map(lon_meerkat_00,lat_meerkat_00)
map.plot(x,y,marker='o',color='blue'); # rx lat lon
x2,y2 = map(22,-31)
plt.annotate(r"\textbf{Rx}", xy=(x2, y2),color='blue');
plt.title(r'\textbf{Object %s trajectory during the interval %s}' %(norad_id,title_string), fontsize=12)
plt.legend(loc='upper right',title=r"Dwell-time interval");
ax.get_legend().get_title().set_fontsize('10')
fig.savefig('main_057_iss_13_map2.pdf',bbox_inches='tight',pad_inches=0.05,dpi=10)
| mit |
ifcharming/original2.0 | tools/vis-micro-hudson.py | 4 | 9316 | #!/usr/bin/env python
# This is a visualizer which pulls microbenchmark results from the MySQL
# databases and visualizes them. Four graphs will be generated per workload,
# latency graphs on single node and multiple nodes, and throughput graphs
# on single node and multiple nodes.
#
# Run it without any arguments to see what arguments are needed.
import sys
import os
import time
import datetime
import MySQLdb
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
def COLORS(k):
return (((k ** 3) % 255) / 255.0,
((k * 100) % 255) / 255.0,
((k * k) % 255) / 255.0)
class Stat:
def __init__(self, hostname, username, password, database):
self.conn = MySQLdb.connect(host = hostname,
user = username,
passwd = password,
db = database)
self.cursor = self.conn.cursor(MySQLdb.cursors.DictCursor)
def close(self):
self.cursor.close()
self.conn.close()
class LatencyStat(Stat):
LATENCIES = """
SELECT startTime AS time, numHosts AS hosts, AVG(latencies) AS latency
FROM ma_instances AS runs
JOIN ma_clientInstances AS clients ON clusterStartTime = startTime
JOIN (SELECT instanceId, AVG(clusterRoundtripAvg) AS latencies
FROM ma_clientProcedureStats
GROUP BY instanceId) AS stats ON stats.instanceId = clientInstanceId
WHERE runs.startTime >= '%s'
AND clients.applicationName = "Microbenchmark"
AND clients.subApplicationName = "%s"
GROUP BY startTime
LIMIT %u
"""
def get_latencies(self, workload_name, start_time, count):
res = []
latencies = {}
self.cursor.execute(self.LATENCIES % (start_time, workload_name, count))
res = list(self.cursor.fetchall())
for i in res:
i["time"] = datetime.date.fromtimestamp(i["time"] / 1000.0)
key = (i["time"], i["hosts"])
if i["latency"] == None:
continue
if key not in latencies \
or i["latency"] < latencies[key]["latency"]:
latencies[key] = i
return latencies.values()
class ThroughputStat(Stat):
THROUGHPUT = """
SELECT resultid as id,
hostcount as hosts,
date(time) as time,
avg(txnpersecond) as tps
FROM results
WHERE time >= '%s'
AND benchmarkname = 'org.voltdb.benchmark.workloads.Generator'
AND benchmarkoptions LIKE '%%workload=%s%%'
GROUP BY hostcount, date(time)
ORDER BY time DESC
LIMIT %u
"""
def get_throughputs(self, workload_name, time, count):
throughput_map = {}
self.cursor.execute(self.THROUGHPUT % (time, workload_name, count))
return list(self.cursor.fetchall())
class WorkloadNames(Stat):
NAMES = """
SELECT DISTINCT subApplicationName as names
FROM ma_clientInstances
WHERE applicationName = 'Microbenchmark' AND subApplicationName != 'FirstWorkload'
"""
def get_names(self):
name_map = {}
self.cursor.execute(self.NAMES)
return list(self.cursor.fetchall())
class Plot:
DPI = 100.0
def __init__(self, title, xlabel, ylabel, filename, w, h):
self.filename = filename
self.legends = {}
w = w == None and 800 or w
h = h == None and 300 or h
fig = plt.figure(figsize=(w / self.DPI, h / self.DPI),
dpi=self.DPI)
self.ax = fig.add_subplot(111)
self.ax.set_title(title)
plt.ylabel(ylabel)
plt.xlabel(xlabel)
fig.autofmt_xdate()
def plot(self, x, y, color, legend):
self.ax.plot(x, y, linestyle="-", label=str(legend), marker="^",
markerfacecolor=color, markersize=10)
def close(self):
formatter = matplotlib.dates.DateFormatter("%b %d")
self.ax.xaxis.set_major_formatter(formatter)
plt.legend(loc=0)
plt.savefig(self.filename, format="png", transparent=False,
bbox_inches="tight", pad_inches=0.2)
def parse_credentials(filename):
credentials = {}
fd = open(filename, "r")
for i in fd:
line = i.strip().split("?")
credentials["hostname"] = line[0].split("/")[-2]
db = line[0].split("/")[-1]
pair = line[1].split("&")
user = pair[0].strip("\\").split("=")
password = pair[1].strip("\\").split("=")
if user[1].startswith("monitor"):
credentials["latency"] = {user[0]: user[1],
password[0]: password[1],
"database": db}
else:
credentials["throughput"] = {user[0]: user[1],
password[0]: password[1],
"database": db}
fd.close()
return credentials
def usage():
print "Usage:"
print "\t", sys.argv[0], "credential_file output_dir filename_base" \
" [numDays] [width] [height] "
print
print "\t", "number of past days to take into account"
print "\t", "width in pixels"
print "\t", "height in pixels"
def main():
if len(sys.argv) < 4:
usage()
exit(-1)
if not os.path.exists(sys.argv[2]):
print sys.argv[2], "does not exist"
exit(-1)
credentials = parse_credentials(sys.argv[1])
path = os.path.join(sys.argv[2], sys.argv[3])
numDays = 30
width = None
height = None
if len(sys.argv) >= 5:
numDays = int(sys.argv[4])
if len(sys.argv) >= 6:
width = int(sys.argv[5])
if len(sys.argv) >= 7:
height = int(sys.argv[6])
workload_names = WorkloadNames(credentials["hostname"],
credentials["latency"]["user"],
credentials["latency"]["password"],
credentials["latency"]["database"])
latency_stat = LatencyStat(credentials["hostname"],
credentials["latency"]["user"],
credentials["latency"]["password"],
credentials["latency"]["database"])
volt_stat = ThroughputStat(credentials["hostname"],
credentials["throughput"]["user"],
credentials["throughput"]["password"],
credentials["throughput"]["database"])
timedelta = datetime.timedelta(days=numDays)
starttime = datetime.datetime.now() - timedelta
timestamp = time.mktime(starttime.timetuple()) * 1000.0
names = workload_names.get_names()
for n in names:
name = n["names"]
latencies = latency_stat.get_latencies(name, timestamp, 900)
throughput = volt_stat.get_throughputs(name, starttime, 900)
latency_map = {}
latencies.sort(key=lambda x: x["time"])
for v in latencies:
if v["time"] == None or v["latency"] == None:
continue
if v["hosts"] not in latency_map:
latency_map[v["hosts"]] = {"time": [], "latency": []}
datenum = matplotlib.dates.date2num(v["time"])
latency_map[v["hosts"]]["time"].append(datenum)
latency_map[v["hosts"]]["latency"].append(v["latency"])
if 1 in latency_map:
pl = Plot("Average Latency on Single Node for Workload: " + name, "Time", "Latency (ms)",
path + "-latency-single-" + name + ".png",
width, height)
v = latency_map.pop(1)
pl.plot(v["time"], v["latency"], COLORS(1), 1)
pl.close()
if len(latency_map) > 0:
pl = Plot("Average Latency for Workload: " + name, "Time", "Latency (ms)",
path + "-latency-" + name + ".png", width, height)
for k in latency_map.iterkeys():
v = latency_map[k]
pl.plot(v["time"], v["latency"], COLORS(k), k)
pl.close()
throughput_map = {}
throughput.sort(key=lambda x: x["id"])
for v in throughput:
if v["hosts"] not in throughput_map:
throughput_map[v["hosts"]] = {"time": [], "tps": []}
datenum = matplotlib.dates.date2num(v["time"])
throughput_map[v["hosts"]]["time"].append(datenum)
throughput_map[v["hosts"]]["tps"].append(v["tps"])
if 1 in throughput_map:
pl = Plot("Performance on Single Node for Workload: " + name, "Time", "Throughput (txns/sec)",
path + "-throughput-single-" + name + ".png",
width, height)
v = throughput_map.pop(1)
pl.plot(v["time"], v["tps"], COLORS(1), 1)
pl.close()
if len(throughput_map) > 0:
pl = Plot("Performance for Workload: " + name, "Time", "Throughput (txns/sec)",
path + "-throughput-" + name + ".png", width, height)
for k in throughput_map.iterkeys():
v = throughput_map[k]
pl.plot(v["time"], v["tps"], COLORS(k), k)
pl.close()
latency_stat.close()
volt_stat.close()
if __name__ == "__main__":
main()
| gpl-3.0 |
adrienpacifico/openfisca-france-data | openfisca_france_data/input_data_builders/build_openfisca_survey_data/step_06_rebuild.py | 2 | 38058 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# OpenFisca -- A versatile microsimulation software
# By: OpenFisca Team <[email protected]>
#
# Copyright (C) 2011, 2012, 2013, 2014, 2015 OpenFisca Team
# https://github.com/openfisca
#
# This file is part of OpenFisca.
#
# OpenFisca is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# OpenFisca is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import gc
import logging
from pandas import Series, concat
import numpy as np
from numpy import where
from openfisca_france_data.temporary import temporary_store_decorator
from openfisca_france_data import default_config_files_directory as config_files_directory
from openfisca_france_data.input_data_builders.build_openfisca_survey_data.base import (
year_specific_by_generic_data_frame_name
)
from openfisca_france_data.input_data_builders.build_openfisca_survey_data.utils import print_id, control
from openfisca_survey_manager.survey_collections import SurveyCollection
log = logging.getLogger(__name__)
@temporary_store_decorator(config_files_directory = config_files_directory, file_name = 'erfs')
def create_totals_first_pass(temporary_store = None, year = None):
assert temporary_store is not None
assert year is not None
# On part de la table individu de l'ERFS
# on renomme les variables
log.info(u"Creating Totals")
log.info(u"Etape 1 : Chargement des données")
indivim = temporary_store['indivim_{}'.format(year)]
assert not indivim.duplicated(['noindiv']).any(), "Présence de doublons"
# Deals individuals with imputed income : some individuals are in 'erf individu table' but
# not in the 'foyer' table. We need to create a foyer for them.
selection = Series()
for var_i in ["zsali", "zchoi", "zrsti", "zalri", "zrtoi", "zragi", "zrici", "zrnci"]:
var_o = var_i[:-1] + "o"
test = indivim[var_i] != indivim[var_o]
if selection.empty:
selection = test
else:
selection = test | selection
indivi_i = indivim[selection].copy()
indivi_i.rename(
columns = {
"ident": "idmen",
"persfip": "quifoy",
"zsali": "sali", # Inclu les salaires non imposables des agents d'assurance
"zchoi": "choi",
"zrsti": "rsti",
"zalri": "alr"
},
inplace = True,
)
assert indivi_i.quifoy.notnull().all()
indivi_i.loc[indivi_i.quifoy == "", "quifoy"] = "vous"
indivi_i.quelfic = "FIP_IMP"
# We merge them with the other individuals
indivim.rename(
columns = dict(
ident = "idmen",
persfip = "quifoy",
zsali = "sali", # Inclu les salaires non imposables des agents d'assurance
zchoi = "choi",
zrsti = "rsti",
zalri = "alr",
),
inplace = True,
)
if not (set(list(indivim.noindiv)) > set(list(indivi_i.noindiv))):
raise Exception("Individual ")
indivim.set_index("noindiv", inplace = True, verify_integrity = True)
indivi_i.set_index("noindiv", inplace = True, verify_integrity = True)
indivi = indivim
del indivim
indivi.update(indivi_i)
indivi.reset_index(inplace = True)
assert not(indivi.noindiv.duplicated().any()), "Doublons"
log.info("Etape 2 : isolation des FIP")
fip_imp = indivi.quelfic == "FIP_IMP"
indivi["idfoy"] = (
indivi.idmen.astype('int') * 100 +
(indivi.declar1.str[0:2]).convert_objects(convert_numeric=True)
)
# indivi.loc[fip_imp, "idfoy"] = np.nan
# Certains FIP (ou du moins avec revenus imputés) ont un numéro de déclaration d'impôt ( pourquoi ?)
assert indivi_i.declar1.notnull().all()
assert (indivi_i.declar1 == "").sum() > 0
fip_has_declar = (fip_imp) & (indivi.declar1 != "")
indivi.loc[fip_has_declar, "idfoy"] = (
indivi.idmen * 100 + indivi.declar1.str[0:2].convert_objects(convert_numeric = True))
del fip_has_declar
fip_no_declar = (fip_imp) & (indivi.declar1 == "")
del fip_imp
indivi.loc[fip_no_declar, "idfoy"] = 100 * indivi.loc[fip_no_declar, "idmen"] + indivi.loc[fip_no_declar, "noi"]
# WAS indivi["idmen"] * 100 + 50
indivi_fnd = indivi.loc[fip_no_declar, ["idfoy", "noindiv"]].copy()
while any(indivi_fnd.duplicated(subset = ["idfoy"])):
indivi_fnd["idfoy"] = where(
indivi_fnd.duplicated(subset = ["idfoy"]),
indivi_fnd["idfoy"] + 1,
indivi_fnd["idfoy"]
)
# assert indivi_fnd["idfoy"].duplicated().value_counts()[False] == len(indivi_fnd["idfoy"].values), \
# "Duplicates remaining"
assert not(indivi.noindiv.duplicated().any()), "Doublons"
indivi.idfoy.loc[fip_no_declar] = indivi_fnd.idfoy.copy()
del indivi_fnd, fip_no_declar
log.info(u"Etape 3 : Récupération des EE_NRT")
nrt = indivi.quelfic == "EE_NRT"
indivi.loc[nrt, 'idfoy'] = indivi.loc[nrt, 'idmen'] * 100 + indivi.loc[nrt, 'noi']
indivi.loc[nrt, 'quifoy'] = "vous"
del nrt
pref_or_cref = indivi.lpr.isin([1, 2])
adults = (indivi.quelfic.isin(["EE", "EE_CAF"])) & (pref_or_cref)
pref = adults & (indivi.lpr == 1)
cref = adults & (indivi.lpr == 2)
indivi.loc[adults, "idfoy"] = indivi.loc[adults, 'idmen'] * 100 + indivi.loc[adults, 'noiprm']
indivi.loc[pref, "quifoy"] = "vous"
indivi.loc[cref, "quifoy"] = "conj"
del adults, pref, cref
assert indivi.idfoy[indivi.lpr.dropna().isin([1, 2])].all()
idfoyList = indivi.loc[indivi.quifoy == "vous", 'idfoy'].unique()
indivi_without_idfoy = ~indivi.idfoy.isin(idfoyList)
log.info(u"Il reste {} idfoy problématiques".format(
indivi_without_idfoy.sum()
))
# Conjoints qui dont il n'existe pas de vous avec leur idfoy.
# Problème de double déclaration: si leur conjoint à un noindiv à leur idfoy on switch les déclarants
log.info("{} conj without a valid idfoy".format(
(indivi_without_idfoy & indivi.idfoy.notnull() & indivi.quifoy.isin(['conj'])).sum()
))
if (indivi_without_idfoy & indivi.idfoy.notnull() & indivi.quifoy.isin(['conj'])).any():
# On traite les gens qui ont quifoy=conj mais dont l'idfoy n'a pas de vous
# 1) s'ils ont un conjoint et qu'il est vous avec un idfoy valide on leur attribue son idfoy:
avec_conjoint = (
indivi_without_idfoy &
indivi.idfoy.notnull() &
indivi.quifoy.isin(['conj']) &
(indivi.noicon != 0) &
(100 * indivi.idmen + indivi.noicon).isin(idfoyList)
)
indivi.loc[avec_conjoint, 'idfoy'] = (
100 * indivi.loc[avec_conjoint, 'idmen'] + indivi.loc[avec_conjoint, 'noicon']
)
indivi_without_idfoy = ~indivi.idfoy.isin(idfoyList) # mise à jour des cas problématiques
del avec_conjoint
if (indivi_without_idfoy & indivi.idfoy.notnull() & indivi.quifoy.isin(['conj'])).any():
# 2) sinon ils deviennent vous
devient_vous = (
indivi_without_idfoy &
indivi.idfoy.notnull() &
indivi.quifoy.isin(['conj']) &
(indivi.noicon == 0)
)
indivi.loc[devient_vous, 'idfoy'] = indivi.loc[devient_vous, 'noindiv'].copy()
indivi.loc[devient_vous, 'quifoy'] = 'vous'
idfoyList = indivi.loc[indivi.quifoy == "vous", 'idfoy'].unique()
indivi_without_idfoy = ~indivi.idfoy.isin(idfoyList) # Mise à jour des cas problématiques
del devient_vous
problem = (indivi_without_idfoy & indivi.idfoy.notnull() & indivi.quifoy.isin(['conj']))
if problem.sum() > 0:
log.info("Dropping {} conj without valid idfoy".format(
problem.sum()
))
indivi.drop(indivi[problem].index, inplace = True)
indivi_without_idfoy = ~indivi.idfoy.isin(idfoyList) # Mise à jour des cas problématiques
problem = (indivi_without_idfoy & indivi.idfoy.notnull() & indivi.quifoy.isin(['conj']))
assert not problem.any()
idfoyList = indivi.loc[indivi.quifoy == "vous", 'idfoy'].unique()
indivi_without_idfoy = ~indivi.idfoy.isin(idfoyList)
log.info("Remainning {} non valid idfoy".format(
indivi_without_idfoy.sum()
))
# Les personnes à charge de personnes repérées comme conjoint idfoy = conj_noindiv
# (problème des doubles déclarations) doivent récupérer l'idfoy de celles-ci
pac = (
indivi_without_idfoy & indivi.idfoy.notnull() & indivi.quifoy.isin(['pac'])
)
log.info(u"Dealing with {} non valid idfoy of pacs".format(
pac.sum()
))
conj_noindiv = indivi.idfoy[pac].copy()
new_idfoy_by_old = indivi.loc[
indivi.noindiv.isin(conj_noindiv), ['noindiv', 'idfoy']
].astype('int').set_index('noindiv').squeeze().to_dict()
indivi.loc[pac, 'idfoy'] = indivi.loc[pac, 'idfoy'].map(new_idfoy_by_old)
del pac
indivi_without_idfoy = ~indivi.idfoy.isin(idfoyList) # Mise à jour des cas problématiques
assert not (indivi_without_idfoy & indivi.idfoy.notnull() & indivi.quifoy.isin(['pac'])).any()
# Il faut traiter les idfoy non attribués
indivi_without_idfoy = ~indivi.idfoy.isin(idfoyList)
assert (indivi_without_idfoy == indivi.idfoy.isnull()).all()
log.info(u"Il faut traiter les {} idfoy non attribués".format(
indivi_without_idfoy.sum()
))
# Adultes non enfants avec conjoints déclarants
married_adult_with_vous = (
indivi_without_idfoy &
((indivi.noiper == 0) | (indivi.noimer == 0)) &
(indivi.age >= 25) &
(indivi.noicon > 0) &
(100 * indivi.idmen + indivi.noicon).isin(idfoyList)
)
indivi.loc[married_adult_with_vous, 'idfoy'] = (
100 * indivi.loc[married_adult_with_vous, 'idmen'] + indivi.loc[married_adult_with_vous, 'noicon']
)
indivi.loc[married_adult_with_vous, 'quifoy'] = 'conj'
log.info(
u"""Il y a {} adultes > 25 ans non enfants avec conjoints déclarants""".format(
married_adult_with_vous.sum()
)
)
# Les deux membres du couples n'ont pas d'idfoy
married_adult_without_vous = (
indivi_without_idfoy &
((indivi.noiper == 0) | (indivi.noimer == 0)) &
(indivi.age >= 18) &
(indivi.noicon > 0) &
(~married_adult_with_vous)
)
# On les groupes par ménages, on vérifie qu'ils ne sont que deux
couple_by_idmen = (
(indivi.loc[
married_adult_without_vous, ['idmen', 'noindiv']
].groupby('idmen').agg('count')) == 2).astype('int').squeeze().to_dict()
couple_idmens = list(idmen for idmen in couple_by_idmen.keys() if couple_by_idmen[idmen])
# On crée un foyer vous-conj si couple
vous = married_adult_without_vous & (
((indivi.sexe == 1) & indivi.idmen.isin(couple_idmens)) |
(~indivi.idmen.isin(couple_idmens))
)
conj = married_adult_without_vous & (~vous) & indivi.idmen.isin(couple_idmens)
indivi.loc[vous, 'idfoy'] = indivi.loc[vous, 'noindiv'].copy()
indivi.loc[vous, 'quifoy'] = 'vous'
indivi.loc[conj, 'idfoy'] = 100 * indivi.loc[conj, 'idmen'] + indivi.loc[conj, 'noicon']
indivi.loc[conj, 'quifoy'] = 'conj'
del vous, conj
log.info(
u"""Il y a {} adultes > 25 ans non enfants sans conjoints déclarants: on crée un foyer""".format(
married_adult_without_vous.sum()
)
)
idfoyList = indivi.loc[indivi.quifoy == "vous", 'idfoy'].unique()
indivi_without_idfoy = ~indivi.idfoy.isin(idfoyList)
# Cas des enfants agés sans conjoint >= 25 ans
non_married_aged_kids = (
indivi_without_idfoy &
((indivi.noiper > 0) | (indivi.noimer > 0)) &
(indivi.age >= 25) &
(indivi.noicon == 0)
)
indivi.loc[non_married_aged_kids, 'idfoy'] = indivi.loc[non_married_aged_kids, 'noindiv'].copy()
indivi.loc[non_married_aged_kids, 'quifoy'] = 'vous'
log.info(
u"""On crée un foyer fiscal indépendants pour les {} enfants agés de plus de 25 ans sans conjoint
vivant avec leurs parents""".format(
non_married_aged_kids.sum()
)
)
del non_married_aged_kids
idfoyList = indivi.loc[indivi.quifoy == "vous", 'idfoy'].unique()
indivi_without_idfoy = ~indivi.idfoy.isin(idfoyList)
# Cas des enfants agés avec conjoint >= 18 ans
married_aged_kids = (
indivi_without_idfoy &
((indivi.noiper > 0) | (indivi.noimer > 0)) &
(indivi.age >= 18) &
(indivi.noicon != 0)
)
# Cas des enfants agés avec conjoint >= 18 ans
married_aged_kids = (
indivi_without_idfoy &
((indivi.noiper > 0) | (indivi.noimer > 0)) &
(indivi.age >= 18) &
(indivi.noicon != 0)
)
noiconjs = 100 * indivi.idmen + indivi.noicon
quifoy_by_noiconj = indivi.loc[
indivi.noindiv.isin(noiconjs[married_aged_kids]), ['noindiv', 'quifoy']
].set_index('noindiv').dropna().squeeze().to_dict()
is_conj_vous = noiconjs.map(quifoy_by_noiconj) == "vous"
indivi.loc[married_aged_kids & is_conj_vous, 'quifoy'] = "conj"
indivi.loc[married_aged_kids & is_conj_vous, 'idfoy'] = noiconjs[married_aged_kids & is_conj_vous].copy()
log.info("""Il y a {} enfants agés de plus de 25 ans avec conjoint
vivant avec leurs parents qui ne sont pas traités""".format(
married_aged_kids.sum()
)) # Il n'y en a pas en 2009
del married_aged_kids, noiconjs, is_conj_vous
# Colocations
if indivi_without_idfoy.any():
potential_idmens = indivi.loc[indivi_without_idfoy, 'idmen'].copy()
colocs = indivi.loc[indivi.idmen.isin(potential_idmens), ['idmen', 'age', 'quifoy']].copy()
coloc_by_idmen = colocs.groupby('idmen').agg({
'age':
lambda x:
(abs((x.min() - x.max())) < 20) & (x.min() >= 18),
'quifoy':
lambda x:
(x == 'vous').sum() >= 1,
}
)
coloc_dummy_by_idmen = (coloc_by_idmen.age * coloc_by_idmen.quifoy)
coloc_idmens = coloc_dummy_by_idmen.index[coloc_dummy_by_idmen.astype('bool')].tolist()
colocataires = indivi_without_idfoy & indivi.idmen.isin(coloc_idmens)
indivi.loc[colocataires, 'quifoy'] = 'vous'
indivi.loc[colocataires, 'idfoy'] = indivi.loc[colocataires, 'noindiv'].copy()
log.info(u"Il y a {} colocataires".format(
colocataires.sum()
))
del colocataires, coloc_dummy_by_idmen, coloc_by_idmen, coloc_idmens, colocs
idfoyList = indivi.loc[indivi.quifoy == "vous", 'idfoy'].unique()
indivi_without_idfoy = ~indivi.idfoy.isin(idfoyList)
# On met le reste des adultes de plus de 25 ans dans des foyers uniques
other_adults = indivi_without_idfoy & (indivi.age >= 25)
if indivi_without_idfoy.any():
indivi.loc[other_adults, 'quifoy'] = 'vous'
indivi.loc[other_adults, 'idfoy'] = indivi.loc[other_adults, 'noindiv'].copy()
log.info(u"Il y a {} autres adultes seuls à qui l'on crée un foyer individuel".format(
other_adults.sum()
))
del other_adults
idfoyList = indivi.loc[indivi.quifoy == "vous", 'idfoy'].unique()
indivi_without_idfoy = ~indivi.idfoy.isin(idfoyList)
# Cas des enfants jeunes < 25 ans
kids = (
indivi_without_idfoy &
(indivi.age < 25) &
((indivi.noiper > 0) | (indivi.noimer > 0))
)
# On rattache les enfants au foyer de leur pères s'il existe
log.info(u"On traite le cas des {} enfants (noiper ou noimer non nuls) repérés non rattachés".format(
kids.sum()
))
if kids.any():
pere_declarant_potentiel = kids & (indivi.noiper > 0)
indivi['pere_noindiv'] = (100 * indivi.idmen.fillna(0) + indivi.noiper.fillna(0)).astype('int')
pere_noindiv = (
100 * indivi.loc[pere_declarant_potentiel, 'idmen'].fillna(0) +
indivi.loc[pere_declarant_potentiel, 'noiper'].fillna(0)
).astype('int')
idfoy_by_noindiv = indivi.loc[
indivi.noindiv.isin(pere_noindiv), ['noindiv', 'idfoy']
].dropna().astype('int').set_index('noindiv').squeeze().to_dict()
pere_declarant_potentiel_idfoy = indivi['pere_noindiv'].map(idfoy_by_noindiv)
pere_veritable_declarant = pere_declarant_potentiel & pere_declarant_potentiel_idfoy.isin(idfoyList)
indivi.loc[pere_veritable_declarant, 'idfoy'] = (
pere_declarant_potentiel_idfoy[pere_veritable_declarant].astype('int')
)
indivi.loc[pere_veritable_declarant, 'quifoy'] = 'pac'
log.info(u"{} enfants rattachés au père ".format(
pere_veritable_declarant.sum()
))
del pere_declarant_potentiel, pere_declarant_potentiel_idfoy, pere_noindiv, \
pere_veritable_declarant, idfoy_by_noindiv
idfoyList = indivi.loc[indivi.quifoy == "vous", 'idfoy'].unique()
indivi_without_idfoy = ~indivi.idfoy.isin(idfoyList)
kids = (
indivi_without_idfoy &
(indivi.age < 25) &
((indivi.noiper > 0) | (indivi.noimer > 0))
)
log.info(u"Il reste {} enfants (noimer non nuls) repérés non rattachés".format(
kids.sum()
))
# Et de leurs mères sinon
if kids.any():
mere_declarant_potentiel = kids & (indivi.noimer > 0)
indivi['mere_noindiv'] = (100 * indivi.idmen.fillna(0) + indivi.noimer.fillna(0)).astype('int')
mere_noindiv = (
100 * indivi.loc[mere_declarant_potentiel, 'idmen'].fillna(0) +
indivi.loc[mere_declarant_potentiel, 'noimer'].fillna(0)
).astype('int')
idfoy_by_noindiv = indivi.loc[
indivi.noindiv.isin(mere_noindiv), ['noindiv', 'idfoy']
].dropna().astype('int').set_index('noindiv').squeeze().to_dict()
mere_declarant_potentiel_idfoy = indivi['mere_noindiv'].map(idfoy_by_noindiv)
mere_veritable_declarant = mere_declarant_potentiel & mere_declarant_potentiel_idfoy.isin(idfoyList)
indivi.loc[mere_veritable_declarant, 'idfoy'] = (
mere_declarant_potentiel_idfoy[mere_veritable_declarant].astype('int')
)
indivi.loc[mere_veritable_declarant, 'quifoy'] = 'pac'
log.info(u"{} enfants rattachés à la mère".format(
mere_veritable_declarant.sum()
))
del mere_declarant_potentiel, mere_declarant_potentiel_idfoy, mere_noindiv, \
mere_veritable_declarant, idfoy_by_noindiv
idfoyList = indivi.loc[indivi.quifoy == "vous", 'idfoy'].unique()
indivi_without_idfoy = ~indivi.idfoy.isin(idfoyList)
# Enfants avec parents pas indiqués (noimer et noiper = 0)
if indivi_without_idfoy.any():
potential_idmens = indivi.loc[indivi_without_idfoy, 'idmen'].copy()
parents = indivi.loc[indivi.idmen.isin(potential_idmens), [
'idmen', 'age', 'quifoy', 'lpr', 'noiper', 'noimer']].copy()
parents_by_idmen = parents.groupby('idmen').agg({
'quifoy':
lambda quifoy: (quifoy == 'vous').sum() == 1,
}
)
parents_dummy_by_idmen = parents_by_idmen.quifoy.copy()
parents_idmens = parents_dummy_by_idmen.index[
parents_dummy_by_idmen.astype('bool')].tolist()
parents_idfoy_by_idmem = indivi.loc[
indivi.idmen.isin(parents_idmens) & (indivi.quifoy == 'vous'),
['idmen', 'noindiv']].dropna().astype('int').set_index('idmen').squeeze().to_dict()
avec_parents = (
indivi_without_idfoy &
indivi.idmen.isin(parents_idmens) &
(
(indivi.age < 18) |
(
(indivi.age < 25) &
(indivi.sali == 0) &
(indivi.choi == 0) &
(indivi.alr == 0)
)
) &
(indivi.lpr == 4) &
(indivi.noiper == 0) &
(indivi.noimer == 0) &
(indivi.lpr == 4)
)
indivi.loc[avec_parents, 'idfoy'] = (
indivi.loc[avec_parents, 'idmen'].map(parents_idfoy_by_idmem))
indivi.loc[avec_parents, 'quifoy'] = 'pac'
log.info(u"Il y a {} enfants sans noiper ni noimer avec le seul vous du ménage".format(
avec_parents.sum()
))
del parents, parents_by_idmen, parents_dummy_by_idmen, parents_idfoy_by_idmem, parents_idmens
idfoyList = indivi.loc[indivi.quifoy == "vous", 'idfoy'].unique()
indivi_without_idfoy = ~indivi.idfoy.isin(idfoyList)
if indivi_without_idfoy.any():
potential_idmens = indivi.loc[indivi_without_idfoy, 'idmen'].copy()
parents_non_pr = indivi.loc[
indivi.idmen.isin(potential_idmens) & (indivi.quifoy == 'vous'),
['idmen', 'quifoy', 'noindiv', 'lpr']].copy()
parents_by_idmen = parents_non_pr.groupby('idmen').filter(
lambda df: (
((df.quifoy == 'vous').sum() >= 1) &
(df.lpr > 2).any()
)).query('lpr > 2')
parents_idfoy_by_idmem = parents_by_idmen[
['idmen', 'noindiv']
].dropna().astype('int').set_index('idmen').squeeze().to_dict()
avec_parents_non_pr = (
indivi_without_idfoy &
indivi.idmen.isin(parents_idfoy_by_idmem.keys()) &
(indivi.age < 18) &
(indivi.lpr == 4) &
(indivi.noiper == 0) &
(indivi.noimer == 0)
)
indivi.loc[avec_parents_non_pr, 'idfoy'] = (
indivi.loc[avec_parents_non_pr, 'idmen'].map(parents_idfoy_by_idmem))
indivi.loc[avec_parents_non_pr, 'quifoy'] = 'pac'
log.info(u"Il y a {} enfants sans noiper ni noimer avec le seul vous du ménage".format(
avec_parents_non_pr.sum()
))
del parents_non_pr, parents_by_idmen, parents_idfoy_by_idmem, avec_parents_non_pr
idfoyList = indivi.loc[indivi.quifoy == "vous", 'idfoy'].unique()
indivi_without_idfoy = ~indivi.idfoy.isin(idfoyList)
if indivi_without_idfoy.any():
other_enfants = indivi_without_idfoy & (indivi.age < 18)
potential_idmens = indivi.loc[other_enfants, 'idmen'].copy()
declarants = indivi.loc[
indivi.idmen.isin(potential_idmens) & (indivi.quifoy == 'vous'),
['idmen', 'idfoy']].dropna().astype('int').copy()
declarants_by_idmen = declarants.groupby('idmen').agg({
'idfoy': 'max'
}).squeeze().to_dict()
indivi.loc[other_enfants, 'idfoy'] = indivi.loc[other_enfants, 'idmen'].copy().map(declarants_by_idmen)
indivi.loc[other_enfants, 'quifoy'] = 'pac'
log.info(u"Il y a {} autres enfants que l'on met avec un vous du ménage".format(
other_enfants.sum()
))
idfoyList = indivi.loc[indivi.quifoy == "vous", 'idfoy'].unique()
indivi_without_idfoy = ~indivi.idfoy.isin(idfoyList)
if indivi_without_idfoy.any():
other_grands_enfants = indivi_without_idfoy & (indivi.age >= 18)
indivi.loc[other_grands_enfants, 'idfoy'] = indivi.loc[other_grands_enfants, 'noindiv']
indivi.loc[other_grands_enfants, 'quifoy'] = 'vous'
log.info(u"Il y a {} autres grans enfants (>= 18) que l'on met avec un vous du ménage".format(
other_grands_enfants.sum()
))
idfoyList = indivi.loc[indivi.quifoy == "vous", 'idfoy'].unique()
indivi_without_idfoy = ~indivi.idfoy.isin(idfoyList)
temporary_store['indivi_step_06_{}'.format(year)] = indivi
assert not indivi_without_idfoy.any()
log.info(u" 4.2 : On enlève les individus pour lesquels il manque le déclarant")
fip = temporary_store['fipDat_{}'.format(year)]
fip["declar"] = np.nan
fip["agepf"] = np.nan
fip.drop(["actrec", "year", "noidec"], axis = 1, inplace = True)
fip.naia = fip.naia.astype("int32")
fip.rename(
columns = dict(
ident = "idmen",
persfip = "quifoy",
zsali = "sali", # Inclu les salaires non imposables des agents d'assurance
zchoi = "choi",
zrsti = "rsti",
zalri = "alr"),
inplace = True)
is_fip_19_25 = ((year - fip.naia - 1) >= 19) & ((year - fip.naia - 1) < 25)
# TODO: BUT for the time being we keep them in thier vous menage so the following lines are commented
# The idmen are of the form 60XXXX we use idmen 61XXXX, 62XXXX for the idmen of the kids over 18 and less than 25
indivi = concat([indivi, fip.loc[is_fip_19_25].copy()])
temporary_store['indivi_step_06_{}'.format(year)] = indivi
assert not(indivi.noindiv.duplicated().any())
del is_fip_19_25
indivi['age'] = year - indivi.naia - 1
indivi['age_en_mois'] = 12 * indivi.age + 12 - indivi.naim
indivi["quimen"] = 0
assert indivi.lpr.notnull().all()
indivi.loc[indivi.lpr == 1, 'quimen'] = 0
indivi.loc[indivi.lpr == 2, 'quimen'] = 1
indivi.loc[indivi.lpr == 3, 'quimen'] = 2
indivi.loc[indivi.lpr == 4, 'quimen'] = 3
indivi['not_pr_cpr'] = None # Create a new row
indivi.loc[indivi.lpr <= 2, 'not_pr_cpr'] = False
indivi.loc[indivi.lpr > 2, 'not_pr_cpr'] = True
assert indivi.not_pr_cpr.isin([True, False]).all()
log.info(u" 4.3 : Creating non pr=0 and cpr=1 idmen's")
indivi.set_index('noindiv', inplace = True, verify_integrity = True)
test1 = indivi.loc[indivi.not_pr_cpr, ['quimen', 'idmen']].copy()
test1['quimen'] = 2
j = 2
while any(test1.duplicated(['quimen', 'idmen'])):
test1.loc[test1.duplicated(['quimen', 'idmen']), 'quimen'] = j + 1
j += 1
print_id(indivi)
indivi.update(test1)
indivi.reset_index(inplace = True)
print_id(indivi)
temporary_store['indivi_step_06_{}'.format(year)] = indivi
gc.collect()
return
@temporary_store_decorator(config_files_directory = config_files_directory, file_name = 'erfs')
def create_totals_second_pass(temporary_store = None, year = None):
assert temporary_store is not None
assert year is not None
log.info(u" 5.1 : Elimination idfoy restant")
# Voiture balai
# On a plein d'idfoy vides, on fait 1 ménage = 1 foyer fiscal
indivi = temporary_store['indivi_step_06_{}'.format(year)]
idfoyList = indivi.loc[indivi.quifoy == "vous", 'idfoy'].unique()
indivi_without_idfoy = ~indivi.idfoy.isin(idfoyList)
indivi.loc[indivi_without_idfoy, 'quifoy'] = "pac"
indivi.loc[indivi_without_idfoy & (indivi.quimen == 0) & (indivi.age >= 18), 'quifoy'] = "vous"
indivi.loc[indivi_without_idfoy & (indivi.quimen == 0) & (indivi.age >= 18), 'idfoy'] = (
indivi.loc[indivi_without_idfoy, "idmen"].astype('int') * 100 + 51
)
indivi.loc[indivi_without_idfoy & (indivi.quimen == 1) & (indivi.age >= 18), 'quifoy'] = "conj"
del idfoyList
print_id(indivi)
# Sélectionne les variables à garder pour les steps suivants
variables = [
"actrec",
"age",
"age_en_mois",
"chpub",
"encadr",
"idfoy",
"idmen",
"nbsala",
"noi",
"noindiv",
"prosa",
"quelfic",
"quifoy",
"quimen",
"statut",
"titc",
"txtppb",
"wprm",
"rc1rev",
"maahe",
"sali",
"rsti",
"choi",
"alr",
"wprm",
]
assert set(variables).issubset(set(indivi.columns)), \
"Manquent les colonnes suivantes : {}".format(set(variables).difference(set(indivi.columns)))
dropped_columns = [variable for variable in indivi.columns if variable not in variables]
indivi.drop(dropped_columns, axis = 1, inplace = True)
# see http://stackoverflow.com/questions/11285613/selecting-columns
indivi.reset_index(inplace = True)
gc.collect()
# TODO les actrec des fip ne sont pas codées (on le fera à la fin quand on aura rassemblé
# les infos provenant des déclarations)
log.info(u"Etape 6 : Création des variables descriptives")
log.info(u" 6.1 : Variable activité")
log.info(u"Variables présentes; \n {}".format(indivi.columns))
indivi['activite'] = np.nan
indivi.loc[indivi.actrec <= 3, 'activite'] = 0
indivi.loc[indivi.actrec == 4, 'activite'] = 1
indivi.loc[indivi.actrec == 5, 'activite'] = 2
indivi.loc[indivi.actrec == 7, 'activite'] = 3
indivi.loc[indivi.actrec == 8, 'activite'] = 4
indivi.loc[indivi.age <= 13, 'activite'] = 2 # ce sont en fait les actrec=9
log.info("Valeurs prises par la variable activité \n {}".format(indivi['activite'].value_counts(dropna = False)))
# TODO: MBJ problem avec les actrec
# TODO: FIX AND REMOVE
indivi.loc[indivi.actrec.isnull(), 'activite'] = 5
indivi.loc[indivi.titc.isnull(), 'titc'] = 0
assert indivi.titc.notnull().all(), \
u"Problème avec les titc" # On a 420 NaN pour les varaibels statut, titc etc
log.info(u" 6.2 : Variable statut")
indivi.loc[indivi.statut.isnull(), 'statut'] = 0
indivi.statut = indivi.statut.astype('int')
indivi.loc[indivi.statut == 11, 'statut'] = 1
indivi.loc[indivi.statut == 12, 'statut'] = 2
indivi.loc[indivi.statut == 13, 'statut'] = 3
indivi.loc[indivi.statut == 21, 'statut'] = 4
indivi.loc[indivi.statut == 22, 'statut'] = 5
indivi.loc[indivi.statut == 33, 'statut'] = 6
indivi.loc[indivi.statut == 34, 'statut'] = 7
indivi.loc[indivi.statut == 35, 'statut'] = 8
indivi.loc[indivi.statut == 43, 'statut'] = 9
indivi.loc[indivi.statut == 44, 'statut'] = 10
indivi.loc[indivi.statut == 45, 'statut'] = 11
assert indivi.statut.isin(range(12)).all(), u"statut value over range"
log.info("Valeurs prises par la variable statut \n {}".format(
indivi['statut'].value_counts(dropna = False)))
log.info(u" 6.3 : variable txtppb")
indivi.loc[indivi.txtppb.isnull(), 'txtppb'] = 0
assert indivi.txtppb.notnull().all()
indivi.loc[indivi.nbsala.isnull(), 'nbsala'] = 0
indivi.nbsala = indivi.nbsala.astype('int')
indivi.loc[indivi.nbsala == 99, 'nbsala'] = 10
assert indivi.nbsala.isin(range(11)).all()
log.info("Valeurs prises par la variable txtppb \n {}".format(
indivi['txtppb'].value_counts(dropna = False)))
log.info(u" 6.4 : variable chpub et CSP")
indivi.loc[indivi.chpub.isnull(), 'chpub'] = 0
indivi.chpub = indivi.chpub.astype('int')
assert indivi.chpub.isin(range(11)).all()
indivi['cadre'] = 0
indivi.loc[indivi.prosa.isnull(), 'prosa'] = 0
assert indivi.prosa.notnull().all()
log.info("Valeurs prises par la variable encadr \n {}".format(indivi['encadr'].value_counts(dropna = False)))
# encadr : 1=oui, 2=non
indivi.loc[indivi.encadr.isnull(), 'encadr'] = 2
indivi.loc[indivi.encadr == 0, 'encadr'] = 2
assert indivi.encadr.notnull().all()
assert indivi.encadr.isin([1, 2]).all()
indivi.loc[indivi.prosa.isin([7, 8]), 'cadre'] = 1
indivi.loc[(indivi.prosa == 9) & (indivi.encadr == 1), 'cadre'] = 1
assert indivi.cadre.isin(range(2)).all()
log.info(
u"Etape 7: on vérifie qu'il ne manque pas d'info sur les liens avec la personne de référence"
)
log.info(
u"nb de doublons idfoy/quifoy {}".format(len(indivi[indivi.duplicated(subset = ['idfoy', 'quifoy'])]))
)
log.info(u"On crée les n° de personnes à charge dans le foyer fiscal")
assert indivi.idfoy.notnull().all()
print_id(indivi)
indivi['quifoy_bis'] = 2
indivi.loc[indivi.quifoy == 'vous', 'quifoy_bis'] = 0
indivi.loc[indivi.quifoy == 'conj', 'quifoy_bis'] = 1
indivi.loc[indivi.quifoy == 'pac', 'quifoy_bis'] = 2
del indivi['quifoy']
indivi['quifoy'] = indivi.quifoy_bis.copy()
del indivi['quifoy_bis']
print_id(indivi)
pac = indivi.loc[indivi['quifoy'] == 2, ['quifoy', 'idfoy', 'noindiv']].copy()
print_id(pac)
j = 2
while pac.duplicated(['quifoy', 'idfoy']).any():
pac.loc[pac.duplicated(['quifoy', 'idfoy']), 'quifoy'] = j
j += 1
print_id(pac)
indivi = indivi.merge(pac, on = ['noindiv', 'idfoy'], how = "left")
indivi['quifoy'] = indivi['quifoy_x']
indivi['quifoy'] = where(indivi['quifoy_x'] == 2, indivi['quifoy_y'], indivi['quifoy_x'])
del indivi['quifoy_x'], indivi['quifoy_y']
print_id(indivi)
del pac
assert len(indivi[indivi.duplicated(subset = ['idfoy', 'quifoy'])]) == 0, \
u"Il y a {} doublons idfoy/quifoy".format(
len(indivi[indivi.duplicated(subset = ['idfoy', 'quifoy'])])
)
print_id(indivi)
log.info(u"Etape 8 : création des fichiers totaux")
famille = temporary_store['famc_{}'.format(year)]
log.info(u" 8.1 : création de tot2 & tot3")
tot2 = indivi.merge(famille, on = 'noindiv', how = 'inner')
# TODO: MBJ increase in number of menage/foyer when merging with family ...
del famille
control(tot2, debug = True, verbose = True)
assert tot2.quifam.notnull().all()
temporary_store['tot2_{}'.format(year)] = tot2
del indivi
log.info(u" tot2 saved")
tot2 = tot2[tot2.idmen.notnull()].copy()
print_id(tot2)
tot3 = tot2
# TODO: check where they come from
log.info("Avant élimination des doublons noindiv: {}".format(len(tot3)))
tot3 = tot3.drop_duplicates(subset = 'noindiv')
log.info("Après élimination des doublons noindiv: {}".format(len(tot3)))
# Block to remove any unwanted duplicated pair
control(tot3, debug = True, verbose = True)
tot3 = tot3.drop_duplicates(subset = ['idfoy', 'quifoy'])
log.info("Après élimination des doublons idfoy, quifoy: {}".format(len(tot3)))
tot3 = tot3.drop_duplicates(subset = ['idfam', 'quifam'])
log.info("Après élimination des doublons idfam, 'quifam: {}".format(len(tot3)))
tot3 = tot3.drop_duplicates(subset = ['idmen', 'quimen'])
log.info("Après élimination des doublons idmen, quimen: {}".format(len(tot3)))
tot3 = tot3.drop_duplicates(subset = ['noindiv'])
control(tot3)
log.info(u" 8.2 : On ajoute les variables individualisables")
allvars = temporary_store['ind_vars_to_remove_{}'.format(year)]
vars2 = set(tot3.columns).difference(set(allvars))
tot3 = tot3[list(vars2)]
log.info("{}".format(len(tot3)))
assert not(tot3.duplicated(subset = ['noindiv']).any()), "doublon dans tot3['noindiv']"
lg_dup = len(tot3[tot3.duplicated(['idfoy', 'quifoy'])])
assert lg_dup == 0, "{} pairs of idfoy/quifoy in tot3 are duplicated".format(lg_dup)
temporary_store['tot3_{}'.format(year)] = tot3
control(tot3)
del tot2, allvars, tot3, vars2
gc.collect()
log.info(u"tot3 sauvegardé")
@temporary_store_decorator(config_files_directory = config_files_directory, file_name = 'erfs')
def create_final(temporary_store = None, year = None):
assert temporary_store is not None
assert year is not None
log.info(u"création de final")
foy_ind = temporary_store['foy_ind_{}'.format(year)]
tot3 = temporary_store['tot3_{}'.format(year)]
log.info(u"Stats on tot3")
print_id(tot3)
log.info(u"Stats on foy_ind")
print_id(foy_ind)
foy_ind.set_index(['idfoy', 'quifoy'], inplace = True, verify_integrity = True)
tot3.set_index(['idfoy', 'quifoy'], inplace = True, verify_integrity = True)
# tot3 = concat([tot3, foy_ind], join_axes=[tot3.index], axis=1, verify_integrity = True)
# TODO improve this
foy_ind.drop([u'alr', u'rsti', u'sali', u'choi'], axis = 1, inplace = True)
tot3 = tot3.join(foy_ind)
tot3.reset_index(inplace = True)
foy_ind.reset_index(inplace = True)
# tot3 = tot3.drop_duplicates(subset=['idfam', 'quifam'])
control(tot3, verbose=True)
final = tot3.loc[tot3.idmen.notnull(), :].copy()
control(final, verbose=True)
del tot3, foy_ind
gc.collect()
log.info(" loading fip")
sif = temporary_store['sif_{}'.format(year)]
log.info("Columns from sif dataframe: {}".format(sif.columns))
log.info(" update final using fip")
final.set_index('noindiv', inplace = True, verify_integrity = True)
# TODO: IL FAUT UNE METHODE POUR GERER LES DOUBLES DECLARATIONS
# On ne garde que les sif.noindiv qui correspondent à des idfoy == "vous"
# Et on enlève les duplicates
idfoys = final.loc[final.quifoy == 0, "idfoy"]
sif = sif[sif.noindiv.isin(idfoys) & ~(sif.change.isin(['M', 'S', 'Z']))].copy()
sif.drop_duplicates(subset = ['noindiv'], inplace = True)
sif.set_index('noindiv', inplace = True, verify_integrity = True)
final = final.join(sif)
final.reset_index(inplace = True)
control(final, debug=True)
final['caseP'] = final.caseP.fillna(False)
final['caseF'] = final.caseF.fillna(False)
print_id(final)
temporary_store['final_{}'.format(year)] = final
log.info(u"final sauvegardé")
del sif, final
if __name__ == '__main__':
year = 2009
logging.basicConfig(level = logging.INFO, filename = 'step_06.log', filemode = 'w')
create_totals_first_pass(year = year)
create_totals_second_pass(year = year)
create_final(year = year)
log.info(u"étape 06 remise en forme des données terminée")
| agpl-3.0 |
0x0all/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/quiver.py | 69 | 36790 | """
Support for plotting vector fields.
Presently this contains Quiver and Barb. Quiver plots an arrow in the
direction of the vector, with the size of the arrow related to the
magnitude of the vector.
Barbs are like quiver in that they point along a vector, but
the magnitude of the vector is given schematically by the presence of barbs
or flags on the barb.
This will also become a home for things such as standard
deviation ellipses, which can and will be derived very easily from
the Quiver code.
"""
import numpy as np
from numpy import ma
import matplotlib.collections as collections
import matplotlib.transforms as transforms
import matplotlib.text as mtext
import matplotlib.artist as martist
import matplotlib.font_manager as font_manager
from matplotlib.cbook import delete_masked_points
from matplotlib.patches import CirclePolygon
import math
_quiver_doc = """
Plot a 2-D field of arrows.
call signatures::
quiver(U, V, **kw)
quiver(U, V, C, **kw)
quiver(X, Y, U, V, **kw)
quiver(X, Y, U, V, C, **kw)
Arguments:
*X*, *Y*:
The x and y coordinates of the arrow locations (default is tail of
arrow; see *pivot* kwarg)
*U*, *V*:
give the *x* and *y* components of the arrow vectors
*C*:
an optional array used to map colors to the arrows
All arguments may be 1-D or 2-D arrays or sequences. If *X* and *Y*
are absent, they will be generated as a uniform grid. If *U* and *V*
are 2-D arrays but *X* and *Y* are 1-D, and if len(*X*) and len(*Y*)
match the column and row dimensions of *U*, then *X* and *Y* will be
expanded with :func:`numpy.meshgrid`.
*U*, *V*, *C* may be masked arrays, but masked *X*, *Y* are not
supported at present.
Keyword arguments:
*units*: ['width' | 'height' | 'dots' | 'inches' | 'x' | 'y' ]
arrow units; the arrow dimensions *except for length* are in
multiples of this unit.
* 'width' or 'height': the width or height of the axes
* 'dots' or 'inches': pixels or inches, based on the figure dpi
* 'x' or 'y': *X* or *Y* data units
The arrows scale differently depending on the units. For
'x' or 'y', the arrows get larger as one zooms in; for other
units, the arrow size is independent of the zoom state. For
'width or 'height', the arrow size increases with the width and
height of the axes, respectively, when the the window is resized;
for 'dots' or 'inches', resizing does not change the arrows.
*angles*: ['uv' | 'xy' | array]
With the default 'uv', the arrow aspect ratio is 1, so that
if *U*==*V* the angle of the arrow on the plot is 45 degrees
CCW from the *x*-axis.
With 'xy', the arrow points from (x,y) to (x+u, y+v).
Alternatively, arbitrary angles may be specified as an array
of values in degrees, CCW from the *x*-axis.
*scale*: [ None | float ]
data units per arrow unit, e.g. m/s per plot width; a smaller
scale parameter makes the arrow longer. If *None*, a simple
autoscaling algorithm is used, based on the average vector length
and the number of vectors.
*width*:
shaft width in arrow units; default depends on choice of units,
above, and number of vectors; a typical starting value is about
0.005 times the width of the plot.
*headwidth*: scalar
head width as multiple of shaft width, default is 3
*headlength*: scalar
head length as multiple of shaft width, default is 5
*headaxislength*: scalar
head length at shaft intersection, default is 4.5
*minshaft*: scalar
length below which arrow scales, in units of head length. Do not
set this to less than 1, or small arrows will look terrible!
Default is 1
*minlength*: scalar
minimum length as a multiple of shaft width; if an arrow length
is less than this, plot a dot (hexagon) of this diameter instead.
Default is 1.
*pivot*: [ 'tail' | 'middle' | 'tip' ]
The part of the arrow that is at the grid point; the arrow rotates
about this point, hence the name *pivot*.
*color*: [ color | color sequence ]
This is a synonym for the
:class:`~matplotlib.collections.PolyCollection` facecolor kwarg.
If *C* has been set, *color* has no effect.
The defaults give a slightly swept-back arrow; to make the head a
triangle, make *headaxislength* the same as *headlength*. To make the
arrow more pointed, reduce *headwidth* or increase *headlength* and
*headaxislength*. To make the head smaller relative to the shaft,
scale down all the head parameters. You will probably do best to leave
minshaft alone.
linewidths and edgecolors can be used to customize the arrow
outlines. Additional :class:`~matplotlib.collections.PolyCollection`
keyword arguments:
%(PolyCollection)s
""" % martist.kwdocd
_quiverkey_doc = """
Add a key to a quiver plot.
call signature::
quiverkey(Q, X, Y, U, label, **kw)
Arguments:
*Q*:
The Quiver instance returned by a call to quiver.
*X*, *Y*:
The location of the key; additional explanation follows.
*U*:
The length of the key
*label*:
a string with the length and units of the key
Keyword arguments:
*coordinates* = [ 'axes' | 'figure' | 'data' | 'inches' ]
Coordinate system and units for *X*, *Y*: 'axes' and 'figure' are
normalized coordinate systems with 0,0 in the lower left and 1,1
in the upper right; 'data' are the axes data coordinates (used for
the locations of the vectors in the quiver plot itself); 'inches'
is position in the figure in inches, with 0,0 at the lower left
corner.
*color*:
overrides face and edge colors from *Q*.
*labelpos* = [ 'N' | 'S' | 'E' | 'W' ]
Position the label above, below, to the right, to the left of the
arrow, respectively.
*labelsep*:
Distance in inches between the arrow and the label. Default is
0.1
*labelcolor*:
defaults to default :class:`~matplotlib.text.Text` color.
*fontproperties*:
A dictionary with keyword arguments accepted by the
:class:`~matplotlib.font_manager.FontProperties` initializer:
*family*, *style*, *variant*, *size*, *weight*
Any additional keyword arguments are used to override vector
properties taken from *Q*.
The positioning of the key depends on *X*, *Y*, *coordinates*, and
*labelpos*. If *labelpos* is 'N' or 'S', *X*, *Y* give the position
of the middle of the key arrow. If *labelpos* is 'E', *X*, *Y*
positions the head, and if *labelpos* is 'W', *X*, *Y* positions the
tail; in either of these two cases, *X*, *Y* is somewhere in the
middle of the arrow+label key object.
"""
class QuiverKey(martist.Artist):
""" Labelled arrow for use as a quiver plot scale key.
"""
halign = {'N': 'center', 'S': 'center', 'E': 'left', 'W': 'right'}
valign = {'N': 'bottom', 'S': 'top', 'E': 'center', 'W': 'center'}
pivot = {'N': 'mid', 'S': 'mid', 'E': 'tip', 'W': 'tail'}
def __init__(self, Q, X, Y, U, label, **kw):
martist.Artist.__init__(self)
self.Q = Q
self.X = X
self.Y = Y
self.U = U
self.coord = kw.pop('coordinates', 'axes')
self.color = kw.pop('color', None)
self.label = label
self._labelsep_inches = kw.pop('labelsep', 0.1)
self.labelsep = (self._labelsep_inches * Q.ax.figure.dpi)
def on_dpi_change(fig):
self.labelsep = (self._labelsep_inches * fig.dpi)
self._initialized = False # simple brute force update
# works because _init is called
# at the start of draw.
Q.ax.figure.callbacks.connect('dpi_changed', on_dpi_change)
self.labelpos = kw.pop('labelpos', 'N')
self.labelcolor = kw.pop('labelcolor', None)
self.fontproperties = kw.pop('fontproperties', dict())
self.kw = kw
_fp = self.fontproperties
#boxprops = dict(facecolor='red')
self.text = mtext.Text(text=label, # bbox=boxprops,
horizontalalignment=self.halign[self.labelpos],
verticalalignment=self.valign[self.labelpos],
fontproperties=font_manager.FontProperties(**_fp))
if self.labelcolor is not None:
self.text.set_color(self.labelcolor)
self._initialized = False
self.zorder = Q.zorder + 0.1
__init__.__doc__ = _quiverkey_doc
def _init(self):
if True: ##not self._initialized:
self._set_transform()
_pivot = self.Q.pivot
self.Q.pivot = self.pivot[self.labelpos]
self.verts = self.Q._make_verts(np.array([self.U]),
np.zeros((1,)))
self.Q.pivot = _pivot
kw = self.Q.polykw
kw.update(self.kw)
self.vector = collections.PolyCollection(self.verts,
offsets=[(self.X,self.Y)],
transOffset=self.get_transform(),
**kw)
if self.color is not None:
self.vector.set_color(self.color)
self.vector.set_transform(self.Q.get_transform())
self._initialized = True
def _text_x(self, x):
if self.labelpos == 'E':
return x + self.labelsep
elif self.labelpos == 'W':
return x - self.labelsep
else:
return x
def _text_y(self, y):
if self.labelpos == 'N':
return y + self.labelsep
elif self.labelpos == 'S':
return y - self.labelsep
else:
return y
def draw(self, renderer):
self._init()
self.vector.draw(renderer)
x, y = self.get_transform().transform_point((self.X, self.Y))
self.text.set_x(self._text_x(x))
self.text.set_y(self._text_y(y))
self.text.draw(renderer)
def _set_transform(self):
if self.coord == 'data':
self.set_transform(self.Q.ax.transData)
elif self.coord == 'axes':
self.set_transform(self.Q.ax.transAxes)
elif self.coord == 'figure':
self.set_transform(self.Q.ax.figure.transFigure)
elif self.coord == 'inches':
self.set_transform(self.Q.ax.figure.dpi_scale_trans)
else:
raise ValueError('unrecognized coordinates')
def set_figure(self, fig):
martist.Artist.set_figure(self, fig)
self.text.set_figure(fig)
def contains(self, mouseevent):
# Maybe the dictionary should allow one to
# distinguish between a text hit and a vector hit.
if (self.text.contains(mouseevent)[0]
or self.vector.contains(mouseevent)[0]):
return True, {}
return False, {}
quiverkey_doc = _quiverkey_doc
class Quiver(collections.PolyCollection):
"""
Specialized PolyCollection for arrows.
The only API method is set_UVC(), which can be used
to change the size, orientation, and color of the
arrows; their locations are fixed when the class is
instantiated. Possibly this method will be useful
in animations.
Much of the work in this class is done in the draw()
method so that as much information as possible is available
about the plot. In subsequent draw() calls, recalculation
is limited to things that might have changed, so there
should be no performance penalty from putting the calculations
in the draw() method.
"""
def __init__(self, ax, *args, **kw):
self.ax = ax
X, Y, U, V, C = self._parse_args(*args)
self.X = X
self.Y = Y
self.XY = np.hstack((X[:,np.newaxis], Y[:,np.newaxis]))
self.N = len(X)
self.scale = kw.pop('scale', None)
self.headwidth = kw.pop('headwidth', 3)
self.headlength = float(kw.pop('headlength', 5))
self.headaxislength = kw.pop('headaxislength', 4.5)
self.minshaft = kw.pop('minshaft', 1)
self.minlength = kw.pop('minlength', 1)
self.units = kw.pop('units', 'width')
self.angles = kw.pop('angles', 'uv')
self.width = kw.pop('width', None)
self.color = kw.pop('color', 'k')
self.pivot = kw.pop('pivot', 'tail')
kw.setdefault('facecolors', self.color)
kw.setdefault('linewidths', (0,))
collections.PolyCollection.__init__(self, [], offsets=self.XY,
transOffset=ax.transData,
closed=False,
**kw)
self.polykw = kw
self.set_UVC(U, V, C)
self._initialized = False
self.keyvec = None
self.keytext = None
def on_dpi_change(fig):
self._new_UV = True # vertices depend on width, span
# which in turn depend on dpi
self._initialized = False # simple brute force update
# works because _init is called
# at the start of draw.
self.ax.figure.callbacks.connect('dpi_changed', on_dpi_change)
__init__.__doc__ = """
The constructor takes one required argument, an Axes
instance, followed by the args and kwargs described
by the following pylab interface documentation:
%s""" % _quiver_doc
def _parse_args(self, *args):
X, Y, U, V, C = [None]*5
args = list(args)
if len(args) == 3 or len(args) == 5:
C = ma.asarray(args.pop(-1)).ravel()
V = ma.asarray(args.pop(-1))
U = ma.asarray(args.pop(-1))
nn = np.shape(U)
nc = nn[0]
nr = 1
if len(nn) > 1:
nr = nn[1]
if len(args) == 2: # remaining after removing U,V,C
X, Y = [np.array(a).ravel() for a in args]
if len(X) == nc and len(Y) == nr:
X, Y = [a.ravel() for a in np.meshgrid(X, Y)]
else:
indexgrid = np.meshgrid(np.arange(nc), np.arange(nr))
X, Y = [np.ravel(a) for a in indexgrid]
return X, Y, U, V, C
def _init(self):
"""initialization delayed until first draw;
allow time for axes setup.
"""
# It seems that there are not enough event notifications
# available to have this work on an as-needed basis at present.
if True: ##not self._initialized:
trans = self._set_transform()
ax = self.ax
sx, sy = trans.inverted().transform_point(
(ax.bbox.width, ax.bbox.height))
self.span = sx
sn = max(8, min(25, math.sqrt(self.N)))
if self.width is None:
self.width = 0.06 * self.span / sn
def draw(self, renderer):
self._init()
if self._new_UV or self.angles == 'xy':
verts = self._make_verts(self.U, self.V)
self.set_verts(verts, closed=False)
self._new_UV = False
collections.PolyCollection.draw(self, renderer)
def set_UVC(self, U, V, C=None):
self.U = U.ravel()
self.V = V.ravel()
if C is not None:
self.set_array(C.ravel())
self._new_UV = True
def _set_transform(self):
ax = self.ax
if self.units in ('x', 'y'):
if self.units == 'x':
dx0 = ax.viewLim.width
dx1 = ax.bbox.width
else:
dx0 = ax.viewLim.height
dx1 = ax.bbox.height
dx = dx1/dx0
else:
if self.units == 'width':
dx = ax.bbox.width
elif self.units == 'height':
dx = ax.bbox.height
elif self.units == 'dots':
dx = 1.0
elif self.units == 'inches':
dx = ax.figure.dpi
else:
raise ValueError('unrecognized units')
trans = transforms.Affine2D().scale(dx)
self.set_transform(trans)
return trans
def _angles(self, U, V, eps=0.001):
xy = self.ax.transData.transform(self.XY)
uv = ma.hstack((U[:,np.newaxis], V[:,np.newaxis])).filled(0)
xyp = self.ax.transData.transform(self.XY + eps * uv)
dxy = xyp - xy
ang = ma.arctan2(dxy[:,1], dxy[:,0])
return ang
def _make_verts(self, U, V):
uv = ma.asarray(U+V*1j)
a = ma.absolute(uv)
if self.scale is None:
sn = max(10, math.sqrt(self.N))
scale = 1.8 * a.mean() * sn / self.span # crude auto-scaling
self.scale = scale
length = a/(self.scale*self.width)
X, Y = self._h_arrows(length)
if self.angles == 'xy':
theta = self._angles(U, V).filled(0)[:,np.newaxis]
elif self.angles == 'uv':
theta = np.angle(ma.asarray(uv[..., np.newaxis]).filled(0))
else:
theta = ma.asarray(self.angles*np.pi/180.0).filled(0)
xy = (X+Y*1j) * np.exp(1j*theta)*self.width
xy = xy[:,:,np.newaxis]
XY = ma.concatenate((xy.real, xy.imag), axis=2)
return XY
def _h_arrows(self, length):
""" length is in arrow width units """
# It might be possible to streamline the code
# and speed it up a bit by using complex (x,y)
# instead of separate arrays; but any gain would be slight.
minsh = self.minshaft * self.headlength
N = len(length)
length = length.reshape(N, 1)
# x, y: normal horizontal arrow
x = np.array([0, -self.headaxislength,
-self.headlength, 0], np.float64)
x = x + np.array([0,1,1,1]) * length
y = 0.5 * np.array([1, 1, self.headwidth, 0], np.float64)
y = np.repeat(y[np.newaxis,:], N, axis=0)
# x0, y0: arrow without shaft, for short vectors
x0 = np.array([0, minsh-self.headaxislength,
minsh-self.headlength, minsh], np.float64)
y0 = 0.5 * np.array([1, 1, self.headwidth, 0], np.float64)
ii = [0,1,2,3,2,1,0]
X = x.take(ii, 1)
Y = y.take(ii, 1)
Y[:, 3:] *= -1
X0 = x0.take(ii)
Y0 = y0.take(ii)
Y0[3:] *= -1
shrink = length/minsh
X0 = shrink * X0[np.newaxis,:]
Y0 = shrink * Y0[np.newaxis,:]
short = np.repeat(length < minsh, 7, axis=1)
#print 'short', length < minsh
# Now select X0, Y0 if short, otherwise X, Y
X = ma.where(short, X0, X)
Y = ma.where(short, Y0, Y)
if self.pivot[:3] == 'mid':
X -= 0.5 * X[:,3, np.newaxis]
elif self.pivot[:3] == 'tip':
X = X - X[:,3, np.newaxis] #numpy bug? using -= does not
# work here unless we multiply
# by a float first, as with 'mid'.
tooshort = length < self.minlength
if tooshort.any():
# Use a heptagonal dot:
th = np.arange(0,7,1, np.float64) * (np.pi/3.0)
x1 = np.cos(th) * self.minlength * 0.5
y1 = np.sin(th) * self.minlength * 0.5
X1 = np.repeat(x1[np.newaxis, :], N, axis=0)
Y1 = np.repeat(y1[np.newaxis, :], N, axis=0)
tooshort = ma.repeat(tooshort, 7, 1)
X = ma.where(tooshort, X1, X)
Y = ma.where(tooshort, Y1, Y)
return X, Y
quiver_doc = _quiver_doc
_barbs_doc = """
Plot a 2-D field of barbs.
call signatures::
barb(U, V, **kw)
barb(U, V, C, **kw)
barb(X, Y, U, V, **kw)
barb(X, Y, U, V, C, **kw)
Arguments:
*X*, *Y*:
The x and y coordinates of the barb locations
(default is head of barb; see *pivot* kwarg)
*U*, *V*:
give the *x* and *y* components of the barb shaft
*C*:
an optional array used to map colors to the barbs
All arguments may be 1-D or 2-D arrays or sequences. If *X* and *Y*
are absent, they will be generated as a uniform grid. If *U* and *V*
are 2-D arrays but *X* and *Y* are 1-D, and if len(*X*) and len(*Y*)
match the column and row dimensions of *U*, then *X* and *Y* will be
expanded with :func:`numpy.meshgrid`.
*U*, *V*, *C* may be masked arrays, but masked *X*, *Y* are not
supported at present.
Keyword arguments:
*length*:
Length of the barb in points; the other parts of the barb
are scaled against this.
Default is 9
*pivot*: [ 'tip' | 'middle' ]
The part of the arrow that is at the grid point; the arrow rotates
about this point, hence the name *pivot*. Default is 'tip'
*barbcolor*: [ color | color sequence ]
Specifies the color all parts of the barb except any flags. This
parameter is analagous to the *edgecolor* parameter for polygons,
which can be used instead. However this parameter will override
facecolor.
*flagcolor*: [ color | color sequence ]
Specifies the color of any flags on the barb. This parameter is
analagous to the *facecolor* parameter for polygons, which can be
used instead. However this parameter will override facecolor. If
this is not set (and *C* has not either) then *flagcolor* will be
set to match *barbcolor* so that the barb has a uniform color. If
*C* has been set, *flagcolor* has no effect.
*sizes*:
A dictionary of coefficients specifying the ratio of a given
feature to the length of the barb. Only those values one wishes to
override need to be included. These features include:
- 'spacing' - space between features (flags, full/half barbs)
- 'height' - height (distance from shaft to top) of a flag or
full barb
- 'width' - width of a flag, twice the width of a full barb
- 'emptybarb' - radius of the circle used for low magnitudes
*fill_empty*:
A flag on whether the empty barbs (circles) that are drawn should
be filled with the flag color. If they are not filled, they will
be drawn such that no color is applied to the center. Default is
False
*rounding*:
A flag to indicate whether the vector magnitude should be rounded
when allocating barb components. If True, the magnitude is
rounded to the nearest multiple of the half-barb increment. If
False, the magnitude is simply truncated to the next lowest
multiple. Default is True
*barb_increments*:
A dictionary of increments specifying values to associate with
different parts of the barb. Only those values one wishes to
override need to be included.
- 'half' - half barbs (Default is 5)
- 'full' - full barbs (Default is 10)
- 'flag' - flags (default is 50)
*flip_barb*:
Either a single boolean flag or an array of booleans. Single
boolean indicates whether the lines and flags should point
opposite to normal for all barbs. An array (which should be the
same size as the other data arrays) indicates whether to flip for
each individual barb. Normal behavior is for the barbs and lines
to point right (comes from wind barbs having these features point
towards low pressure in the Northern Hemisphere.) Default is
False
Barbs are traditionally used in meteorology as a way to plot the speed
and direction of wind observations, but can technically be used to
plot any two dimensional vector quantity. As opposed to arrows, which
give vector magnitude by the length of the arrow, the barbs give more
quantitative information about the vector magnitude by putting slanted
lines or a triangle for various increments in magnitude, as show
schematically below::
: /\ \\
: / \ \\
: / \ \ \\
: / \ \ \\
: ------------------------------
.. note the double \\ at the end of each line to make the figure
.. render correctly
The largest increment is given by a triangle (or "flag"). After those
come full lines (barbs). The smallest increment is a half line. There
is only, of course, ever at most 1 half line. If the magnitude is
small and only needs a single half-line and no full lines or
triangles, the half-line is offset from the end of the barb so that it
can be easily distinguished from barbs with a single full line. The
magnitude for the barb shown above would nominally be 65, using the
standard increments of 50, 10, and 5.
linewidths and edgecolors can be used to customize the barb.
Additional :class:`~matplotlib.collections.PolyCollection` keyword
arguments:
%(PolyCollection)s
""" % martist.kwdocd
class Barbs(collections.PolyCollection):
'''
Specialized PolyCollection for barbs.
The only API method is :meth:`set_UVC`, which can be used to
change the size, orientation, and color of the arrows. Locations
are changed using the :meth:`set_offsets` collection method.
Possibly this method will be useful in animations.
There is one internal function :meth:`_find_tails` which finds
exactly what should be put on the barb given the vector magnitude.
From there :meth:`_make_barbs` is used to find the vertices of the
polygon to represent the barb based on this information.
'''
#This may be an abuse of polygons here to render what is essentially maybe
#1 triangle and a series of lines. It works fine as far as I can tell
#however.
def __init__(self, ax, *args, **kw):
self._pivot = kw.pop('pivot', 'tip')
self._length = kw.pop('length', 7)
barbcolor = kw.pop('barbcolor', None)
flagcolor = kw.pop('flagcolor', None)
self.sizes = kw.pop('sizes', dict())
self.fill_empty = kw.pop('fill_empty', False)
self.barb_increments = kw.pop('barb_increments', dict())
self.rounding = kw.pop('rounding', True)
self.flip = kw.pop('flip_barb', False)
#Flagcolor and and barbcolor provide convenience parameters for setting
#the facecolor and edgecolor, respectively, of the barb polygon. We
#also work here to make the flag the same color as the rest of the barb
#by default
if None in (barbcolor, flagcolor):
kw['edgecolors'] = 'face'
if flagcolor:
kw['facecolors'] = flagcolor
elif barbcolor:
kw['facecolors'] = barbcolor
else:
#Set to facecolor passed in or default to black
kw.setdefault('facecolors', 'k')
else:
kw['edgecolors'] = barbcolor
kw['facecolors'] = flagcolor
#Parse out the data arrays from the various configurations supported
x, y, u, v, c = self._parse_args(*args)
self.x = x
self.y = y
xy = np.hstack((x[:,np.newaxis], y[:,np.newaxis]))
#Make a collection
barb_size = self._length**2 / 4 #Empirically determined
collections.PolyCollection.__init__(self, [], (barb_size,), offsets=xy,
transOffset=ax.transData, **kw)
self.set_transform(transforms.IdentityTransform())
self.set_UVC(u, v, c)
__init__.__doc__ = """
The constructor takes one required argument, an Axes
instance, followed by the args and kwargs described
by the following pylab interface documentation:
%s""" % _barbs_doc
def _find_tails(self, mag, rounding=True, half=5, full=10, flag=50):
'''
Find how many of each of the tail pieces is necessary. Flag
specifies the increment for a flag, barb for a full barb, and half for
half a barb. Mag should be the magnitude of a vector (ie. >= 0).
This returns a tuple of:
(*number of flags*, *number of barbs*, *half_flag*, *empty_flag*)
*half_flag* is a boolean whether half of a barb is needed,
since there should only ever be one half on a given
barb. *empty_flag* flag is an array of flags to easily tell if
a barb is empty (too low to plot any barbs/flags.
'''
#If rounding, round to the nearest multiple of half, the smallest
#increment
if rounding:
mag = half * (mag / half + 0.5).astype(np.int)
num_flags = np.floor(mag / flag).astype(np.int)
mag = np.mod(mag, flag)
num_barb = np.floor(mag / full).astype(np.int)
mag = np.mod(mag, full)
half_flag = mag >= half
empty_flag = ~(half_flag | (num_flags > 0) | (num_barb > 0))
return num_flags, num_barb, half_flag, empty_flag
def _make_barbs(self, u, v, nflags, nbarbs, half_barb, empty_flag, length,
pivot, sizes, fill_empty, flip):
'''
This function actually creates the wind barbs. *u* and *v*
are components of the vector in the *x* and *y* directions,
respectively.
*nflags*, *nbarbs*, and *half_barb*, empty_flag* are,
*respectively, the number of flags, number of barbs, flag for
*half a barb, and flag for empty barb, ostensibly obtained
*from :meth:`_find_tails`.
*length* is the length of the barb staff in points.
*pivot* specifies the point on the barb around which the
entire barb should be rotated. Right now, valid options are
'head' and 'middle'.
*sizes* is a dictionary of coefficients specifying the ratio
of a given feature to the length of the barb. These features
include:
- *spacing*: space between features (flags, full/half
barbs)
- *height*: distance from shaft of top of a flag or full
barb
- *width* - width of a flag, twice the width of a full barb
- *emptybarb* - radius of the circle used for low
magnitudes
*fill_empty* specifies whether the circle representing an
empty barb should be filled or not (this changes the drawing
of the polygon).
*flip* is a flag indicating whether the features should be flipped to
the other side of the barb (useful for winds in the southern
hemisphere.
This function returns list of arrays of vertices, defining a polygon for
each of the wind barbs. These polygons have been rotated to properly
align with the vector direction.
'''
#These control the spacing and size of barb elements relative to the
#length of the shaft
spacing = length * sizes.get('spacing', 0.125)
full_height = length * sizes.get('height', 0.4)
full_width = length * sizes.get('width', 0.25)
empty_rad = length * sizes.get('emptybarb', 0.15)
#Controls y point where to pivot the barb.
pivot_points = dict(tip=0.0, middle=-length/2.)
#Check for flip
if flip: full_height = -full_height
endx = 0.0
endy = pivot_points[pivot.lower()]
#Get the appropriate angle for the vector components. The offset is due
#to the way the barb is initially drawn, going down the y-axis. This
#makes sense in a meteorological mode of thinking since there 0 degrees
#corresponds to north (the y-axis traditionally)
angles = -(ma.arctan2(v, u) + np.pi/2)
#Used for low magnitude. We just get the vertices, so if we make it
#out here, it can be reused. The center set here should put the
#center of the circle at the location(offset), rather than at the
#same point as the barb pivot; this seems more sensible.
circ = CirclePolygon((0,0), radius=empty_rad).get_verts()
if fill_empty:
empty_barb = circ
else:
#If we don't want the empty one filled, we make a degenerate polygon
#that wraps back over itself
empty_barb = np.concatenate((circ, circ[::-1]))
barb_list = []
for index, angle in np.ndenumerate(angles):
#If the vector magnitude is too weak to draw anything, plot an
#empty circle instead
if empty_flag[index]:
#We can skip the transform since the circle has no preferred
#orientation
barb_list.append(empty_barb)
continue
poly_verts = [(endx, endy)]
offset = length
#Add vertices for each flag
for i in range(nflags[index]):
#The spacing that works for the barbs is a little to much for
#the flags, but this only occurs when we have more than 1 flag.
if offset != length: offset += spacing / 2.
poly_verts.extend([[endx, endy + offset],
[endx + full_height, endy - full_width/2 + offset],
[endx, endy - full_width + offset]])
offset -= full_width + spacing
#Add vertices for each barb. These really are lines, but works
#great adding 3 vertices that basically pull the polygon out and
#back down the line
for i in range(nbarbs[index]):
poly_verts.extend([(endx, endy + offset),
(endx + full_height, endy + offset + full_width/2),
(endx, endy + offset)])
offset -= spacing
#Add the vertices for half a barb, if needed
if half_barb[index]:
#If the half barb is the first on the staff, traditionally it is
#offset from the end to make it easy to distinguish from a barb
#with a full one
if offset == length:
poly_verts.append((endx, endy + offset))
offset -= 1.5 * spacing
poly_verts.extend([(endx, endy + offset),
(endx + full_height/2, endy + offset + full_width/4),
(endx, endy + offset)])
#Rotate the barb according the angle. Making the barb first and then
#rotating it made the math for drawing the barb really easy. Also,
#the transform framework makes doing the rotation simple.
poly_verts = transforms.Affine2D().rotate(-angle).transform(
poly_verts)
barb_list.append(poly_verts)
return barb_list
#Taken shamelessly from Quiver
def _parse_args(self, *args):
X, Y, U, V, C = [None]*5
args = list(args)
if len(args) == 3 or len(args) == 5:
C = ma.asarray(args.pop(-1)).ravel()
V = ma.asarray(args.pop(-1))
U = ma.asarray(args.pop(-1))
nn = np.shape(U)
nc = nn[0]
nr = 1
if len(nn) > 1:
nr = nn[1]
if len(args) == 2: # remaining after removing U,V,C
X, Y = [np.array(a).ravel() for a in args]
if len(X) == nc and len(Y) == nr:
X, Y = [a.ravel() for a in np.meshgrid(X, Y)]
else:
indexgrid = np.meshgrid(np.arange(nc), np.arange(nr))
X, Y = [np.ravel(a) for a in indexgrid]
return X, Y, U, V, C
def set_UVC(self, U, V, C=None):
self.u = ma.asarray(U).ravel()
self.v = ma.asarray(V).ravel()
if C is not None:
c = ma.asarray(C).ravel()
x,y,u,v,c = delete_masked_points(self.x.ravel(), self.y.ravel(),
self.u, self.v, c)
else:
x,y,u,v = delete_masked_points(self.x.ravel(), self.y.ravel(),
self.u, self.v)
magnitude = np.sqrt(u*u + v*v)
flags, barbs, halves, empty = self._find_tails(magnitude,
self.rounding, **self.barb_increments)
#Get the vertices for each of the barbs
plot_barbs = self._make_barbs(u, v, flags, barbs, halves, empty,
self._length, self._pivot, self.sizes, self.fill_empty, self.flip)
self.set_verts(plot_barbs)
#Set the color array
if C is not None:
self.set_array(c)
#Update the offsets in case the masked data changed
xy = np.hstack((x[:,np.newaxis], y[:,np.newaxis]))
self._offsets = xy
def set_offsets(self, xy):
'''
Set the offsets for the barb polygons. This saves the offets passed in
and actually sets version masked as appropriate for the existing U/V
data. *offsets* should be a sequence.
ACCEPTS: sequence of pairs of floats
'''
self.x = xy[:,0]
self.y = xy[:,1]
x,y,u,v = delete_masked_points(self.x.ravel(), self.y.ravel(), self.u,
self.v)
xy = np.hstack((x[:,np.newaxis], y[:,np.newaxis]))
collections.PolyCollection.set_offsets(self, xy)
set_offsets.__doc__ = collections.PolyCollection.set_offsets.__doc__
barbs_doc = _barbs_doc
| gpl-3.0 |
OspreyX/trading-with-python | lib/qtpandas.py | 77 | 7937 | '''
Easy integration of DataFrame into pyqt framework
Copyright: Jev Kuznetsov
Licence: BSD
'''
from PyQt4.QtCore import (QAbstractTableModel,Qt,QVariant,QModelIndex,SIGNAL)
from PyQt4.QtGui import (QApplication,QDialog,QVBoxLayout, QHBoxLayout, QTableView, QPushButton,
QWidget,QTableWidget, QHeaderView, QFont,QMenu,QAbstractItemView)
from pandas import DataFrame, Index
class DataFrameModel(QAbstractTableModel):
''' data model for a DataFrame class '''
def __init__(self,parent=None):
super(DataFrameModel,self).__init__(parent)
self.df = DataFrame()
self.columnFormat = {} # format columns
def setFormat(self,fmt):
"""
set string formatting for the output
example : format = {'close':"%.2f"}
"""
self.columnFormat = fmt
def setDataFrame(self,dataFrame):
self.df = dataFrame
self.signalUpdate()
def signalUpdate(self):
''' tell viewers to update their data (this is full update, not efficient)'''
self.layoutChanged.emit()
def __repr__(self):
return str(self.df)
def setData(self,index,value, role=Qt.EditRole):
if index.isValid():
row,column = index.row(), index.column()
dtype = self.df.dtypes.tolist()[column] # get column dtype
if np.issubdtype(dtype,np.float):
val,ok = value.toFloat()
elif np.issubdtype(dtype,np.int):
val,ok = value.toInt()
else:
val = value.toString()
ok = True
if ok:
self.df.iloc[row,column] = val
return True
return False
def flags(self, index):
if not index.isValid():
return Qt.ItemIsEnabled
return Qt.ItemFlags(
QAbstractTableModel.flags(self, index)|
Qt.ItemIsEditable)
def appendRow(self, index, data=0):
self.df.loc[index,:] = data
self.signalUpdate()
def deleteRow(self, index):
idx = self.df.index[index]
#self.beginRemoveRows(QModelIndex(), index,index)
#self.df = self.df.drop(idx,axis=0)
#self.endRemoveRows()
#self.signalUpdate()
#------------- table display functions -----------------
def headerData(self,section,orientation,role=Qt.DisplayRole):
if role != Qt.DisplayRole:
return QVariant()
if orientation == Qt.Horizontal:
try:
return self.df.columns.tolist()[section]
except (IndexError, ):
return QVariant()
elif orientation == Qt.Vertical:
try:
#return self.df.index.tolist()
return str(self.df.index.tolist()[section])
except (IndexError, ):
return QVariant()
def data(self, index, role=Qt.DisplayRole):
if role != Qt.DisplayRole:
return QVariant()
if not index.isValid():
return QVariant()
col = self.df.ix[:,index.column()] # get a column slice first to get the right data type
elm = col[index.row()]
#elm = self.df.ix[index.row(),index.column()]
if self.df.columns[index.column()] in self.columnFormat.keys():
return QVariant(self.columnFormat[self.df.columns[index.column()]] % elm )
else:
return QVariant(str(elm))
def sort(self,nCol,order):
self.layoutAboutToBeChanged.emit()
if order == Qt.AscendingOrder:
self.df = self.df.sort(columns=self.df.columns[nCol], ascending=True)
elif order == Qt.DescendingOrder:
self.df = self.df.sort(columns=self.df.columns[nCol], ascending=False)
self.layoutChanged.emit()
def rowCount(self, index=QModelIndex()):
return self.df.shape[0]
def columnCount(self, index=QModelIndex()):
return self.df.shape[1]
class TableView(QTableView):
""" extended table view """
def __init__(self,name='TableView1', parent=None):
super(TableView,self).__init__(parent)
self.name = name
self.setSelectionBehavior(QAbstractItemView.SelectRows)
def contextMenuEvent(self, event):
menu = QMenu(self)
Action = menu.addAction("delete row")
Action.triggered.connect(self.deleteRow)
menu.exec_(event.globalPos())
def deleteRow(self):
print "Action triggered from " + self.name
print 'Selected rows:'
for idx in self.selectionModel().selectedRows():
print idx.row()
# self.model.deleteRow(idx.row())
class DataFrameWidget(QWidget):
''' a simple widget for using DataFrames in a gui '''
def __init__(self,name='DataFrameTable1', parent=None):
super(DataFrameWidget,self).__init__(parent)
self.name = name
self.dataModel = DataFrameModel()
self.dataModel.setDataFrame(DataFrame())
self.dataTable = QTableView()
#self.dataTable.setSelectionBehavior(QAbstractItemView.SelectRows)
self.dataTable.setSortingEnabled(True)
self.dataTable.setModel(self.dataModel)
self.dataModel.signalUpdate()
#self.dataTable.setFont(QFont("Courier New", 8))
layout = QVBoxLayout()
layout.addWidget(self.dataTable)
self.setLayout(layout)
def setFormat(self,fmt):
""" set non-default string formatting for a column """
for colName, f in fmt.iteritems():
self.dataModel.columnFormat[colName]=f
def fitColumns(self):
self.dataTable.horizontalHeader().setResizeMode(QHeaderView.Stretch)
def setDataFrame(self,df):
self.dataModel.setDataFrame(df)
def resizeColumnsToContents(self):
self.dataTable.resizeColumnsToContents()
def insertRow(self,index, data=None):
self.dataModel.appendRow(index,data)
#-----------------stand alone test code
def testDf():
''' creates test dataframe '''
data = {'int':[1,2,3],'float':[1./3,2.5,3.5],'string':['a','b','c'],'nan':[np.nan,np.nan,np.nan]}
return DataFrame(data, index=Index(['AAA','BBB','CCC']))[['int','float','string','nan']]
class Form(QDialog):
def __init__(self,parent=None):
super(Form,self).__init__(parent)
df = testDf() # make up some data
self.table = DataFrameWidget(parent=self)
self.table.setDataFrame(df)
#self.table.resizeColumnsToContents()
self.table.fitColumns()
self.table.setFormat({'float': '%.2f'})
#buttons
#but_add = QPushButton('Add')
but_test = QPushButton('Test')
but_test.clicked.connect(self.testFcn)
hbox = QHBoxLayout()
#hbox.addself.table(but_add)
hbox.addWidget(but_test)
layout = QVBoxLayout()
layout.addWidget(self.table)
layout.addLayout(hbox)
self.setLayout(layout)
def testFcn(self):
print 'test function'
self.table.insertRow('foo')
if __name__=='__main__':
import sys
import numpy as np
app = QApplication(sys.argv)
form = Form()
form.show()
app.exec_()
| bsd-3-clause |
CforED/Machine-Learning | examples/exercises/plot_iris_exercise.py | 323 | 1602 | """
================================
SVM Exercise
================================
A tutorial exercise for using different SVM kernels.
This exercise is used in the :ref:`using_kernels_tut` part of the
:ref:`supervised_learning_tut` section of the :ref:`stat_learn_tut_index`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, svm
iris = datasets.load_iris()
X = iris.data
y = iris.target
X = X[y != 0, :2]
y = y[y != 0]
n_sample = len(X)
np.random.seed(0)
order = np.random.permutation(n_sample)
X = X[order]
y = y[order].astype(np.float)
X_train = X[:.9 * n_sample]
y_train = y[:.9 * n_sample]
X_test = X[.9 * n_sample:]
y_test = y[.9 * n_sample:]
# fit the model
for fig_num, kernel in enumerate(('linear', 'rbf', 'poly')):
clf = svm.SVC(kernel=kernel, gamma=10)
clf.fit(X_train, y_train)
plt.figure(fig_num)
plt.clf()
plt.scatter(X[:, 0], X[:, 1], c=y, zorder=10, cmap=plt.cm.Paired)
# Circle out the test data
plt.scatter(X_test[:, 0], X_test[:, 1], s=80, facecolors='none', zorder=10)
plt.axis('tight')
x_min = X[:, 0].min()
x_max = X[:, 0].max()
y_min = X[:, 1].min()
y_max = X[:, 1].max()
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired)
plt.contour(XX, YY, Z, colors=['k', 'k', 'k'], linestyles=['--', '-', '--'],
levels=[-.5, 0, .5])
plt.title(kernel)
plt.show()
| bsd-3-clause |
bahamoot/CaP | cap/devtools/app.py | 2 | 1220 | import os
import cap.plugin.toy.animal
import cap.plugin.toy.extra_animal
import matplotlib.pyplot as plt
from cap.model.som import SOM2D
from cap.settings import TYPE_TRAINING_SAMPLE
from cap.settings import TYPE_TEST_SAMPLE
ANIMAL_WEIGHT_STEP_SIZE = 0.2
ANIMAL_NBH_STEP_SIZE = 8
ANIMAL_MAX_NBH_SIZE = 5
ANIMAL_MAP_ROWS = 10
ANIMAL_MAP_COLS = 10
ANIMAL_RANDOM_SEED = None
def demo_toy_training():
animals = cap.plugin.toy.animal.load_animals()
test_samples = [cap.plugin.toy.extra_animal.load_animals(samples_type=TYPE_TEST_SAMPLE)[8]]
features_size = len(animals[0].features)
model = SOM2D(features_size,
max_nbh_size=9,
nbh_step_size=0.3,
map_rows=17,
map_cols=17,
)
model.train(animals)
model.load_visualize_samples(animals, test_samples)
model.visualize_term()
fig = plt.figure()
ax = plt.subplot2grid((2, 3), (0, 0), colspan=2, rowspan=2)
model.visualize_plt(ax,
29,
plt_style={0: 'r^',
1: 'b*',
},
)
plt.tight_layout()
plt.show()
| gpl-3.0 |
DSLituiev/scikit-learn | examples/tree/unveil_tree_structure.py | 67 | 4824 | """
=========================================
Understanding the decision tree structure
=========================================
The decision tree structure can be analysed to gain further insight on the
relation between the features and the target to predict. In this example, we
show how to retrieve:
- the binary tree structure;
- the depth of each node and whether or not it's a leaf;
- the nodes that were reached by a sample using the ``decision_path`` method;
- the leaf that was reached by a sample using the apply method;
- the rules that were used to predict a sample;
- the decision path shared by a group of samples.
"""
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_iris
from sklearn.tree import DecisionTreeClassifier
iris = load_iris()
X = iris.data
y = iris.target
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
estimator = DecisionTreeClassifier(max_leaf_nodes=3, random_state=0)
estimator.fit(X_train, y_train)
# The decision estimator has an attribute called tree_ which stores the entire
# tree structure and allows access to low level attributes. The binary tree
# tree_ is represented as a number of parallel arrays. The i-th element of each
# array holds information about the node `i`. Node 0 is the tree's root. NOTE:
# Some of the arrays only apply to either leaves or split nodes, resp. In this
# case the values of nodes of the other type are arbitrary!
#
# Among those arrays, we have:
# - left_child, id of the left child of the node
# - right_child, id of the right child of the node
# - feature, feature used for splitting the node
# - threshold, threshold value at the node
#
# Using those arrays, we can parse the tree structure:
n_nodes = estimator.tree_.node_count
children_left = estimator.tree_.children_left
children_right = estimator.tree_.children_right
feature = estimator.tree_.feature
threshold = estimator.tree_.threshold
# The tree structure can be traversed to compute various properties such
# as the depth of each node and whether or not it is a leaf.
node_depth = np.zeros(shape=n_nodes)
is_leaves = np.zeros(shape=n_nodes, dtype=bool)
stack = [(0, -1)] # seed is the root node id and its parent depth
while len(stack) > 0:
node_id, parent_depth = stack.pop()
node_depth[node_id] = parent_depth + 1
# If we have a test node
if (children_left[node_id] != children_right[node_id]):
stack.append((children_left[node_id], parent_depth + 1))
stack.append((children_right[node_id], parent_depth + 1))
else:
is_leaves[node_id] = True
print("The binary tree structure has %s nodes and has "
"the following tree structure:"
% n_nodes)
for i in range(n_nodes):
if is_leaves[i]:
print("%snode=%s leaf node." % (node_depth[i] * "\t", i))
else:
print("%snode=%s test node: go to node %s if X[:, %s] <= %ss else to "
"node %s."
% (node_depth[i] * "\t",
i,
children_left[i],
feature[i],
threshold[i],
children_right[i],
))
print()
# First let's retrieve the decision path of each sample. The decision_path
# method allows to retrieve the node indicator functions. A non zero element of
# indicator matrix at the position (i, j) indicates that the sample i goes
# through the node j.
node_indicator = estimator.decision_path(X_test)
# Similarly, we can also have the leaves ids reached by each sample.
leave_id = estimator.apply(X_test)
# Now, it's possible to get the tests that were used to predict a sample or
# a group of samples. First, let's make it for the sample.
sample_id = 0
node_index = node_indicator.indices[node_indicator.indptr[sample_id]:
node_indicator.indptr[sample_id + 1]]
print('Rules used to predict sample %s: ' % sample_id)
for node_id in node_index:
if leave_id[sample_id] != node_id:
continue
if (X_test[sample_id, feature[node_id]] <= threshold[node_id]):
threshold_sign = "<="
else:
threshold_sign = ">"
print("decision id node %s : (X[%s, %s] (= %s) %s %s)"
% (node_id,
sample_id,
feature[node_id],
X_test[i, feature[node_id]],
threshold_sign,
threshold[node_id]))
# For a group of samples, we have the following common node.
sample_ids = [0, 1]
common_nodes = (node_indicator.toarray()[sample_ids].sum(axis=0) ==
len(sample_ids))
common_node_id = np.arange(n_nodes)[common_nodes]
print("\nThe following samples %s share the node %s in the tree"
% (sample_ids, common_node_id))
print("It is %s %% of all nodes." % (100 * len(common_node_id) / n_nodes,))
| bsd-3-clause |
guoxiaoyong/simple-useful | simple_3danim.py | 2 | 1799 | """
A simple example of an animated plot... In 3D!
"""
import numpy as np
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d.axes3d as p3
import matplotlib.animation as animation
def Gen_RandLine(length, dims=2):
"""
Create a line using a random walk algorithm
length is the number of points for the line.
dims is the number of dimensions the line has.
"""
lineData = np.empty((dims, length))
lineData[:, 0] = np.random.rand(dims)
for index in range(1, length):
# scaling the random numbers by 0.1 so
# movement is small compared to position.
# subtraction by 0.5 is to change the range to [-0.5, 0.5]
# to allow a line to move backwards.
step = ((np.random.rand(dims) - 0.5) * 0.1)
lineData[:, index] = lineData[:, index - 1] + step
return lineData
def update_lines(num, dataLines, lines):
for line, data in zip(lines, dataLines):
# NOTE: there is no .set_data() for 3 dim data...
line.set_data(data[0:2, :num])
line.set_3d_properties(data[2, :num])
return lines
# Attaching 3D axis to the figure
fig = plt.figure()
ax = p3.Axes3D(fig)
# Fifty lines of random 3-D lines
data = [Gen_RandLine(25, 3) for index in range(50)]
# Creating fifty line objects.
# NOTE: Can't pass empty arrays into 3d version of plot()
lines = [ax.plot(dat[0, 0:1], dat[1, 0:1], dat[2, 0:1])[0] for dat in data]
# Setting the axes properties
ax.set_xlim3d([0.0, 1.0])
ax.set_xlabel('X')
ax.set_ylim3d([0.0, 1.0])
ax.set_ylabel('Y')
ax.set_zlim3d([0.0, 1.0])
ax.set_zlabel('Z')
ax.set_title('3D Test')
# Creating the Animation object
line_ani = animation.FuncAnimation(fig, update_lines, 25, fargs=(data, lines),
interval=50, blit=False)
plt.show()
| cc0-1.0 |
gandalfcode/gandalf | tests/paper_tests/adsodtest.py | 1 | 7950 | #==============================================================================
# adsodtest.py
#==============================================================================
from gandalf.analysis.facade import *
import matplotlib.pyplot as plt
from matplotlib import rc
from mpl_toolkits.axes_grid1 import AxesGrid
import time
#rc('font', **{'family': 'serif', 'serif': ['Computer Modern']})
rc('font', **{'family': 'normal', 'weight' : 'bold', 'size' : 14})
rc('text', usetex=True)
# Set all plot limits
xmin = -9.9
xmax = 9.9
rhomin = 0.15
rhomax = 1.15
vxmin = -0.15
vxmax = 0.95
umin = 1.45
umax = 2.98
Nleft = 240
Nright = 60
Nunequal = 60
#Nleft = 80
#Nright = 20
# Extract data from Grad-h SPH simulation
gradhsphsim = newsim("adsod-gradhsph.dat")
gradhsphsim.SetParam('Nlattice1[0]',Nleft)
gradhsphsim.SetParam('Nlattice2[0]',Nright)
setupsim()
run()
x0 = get_data('x') #, sim=0, snap=10)
rho0 = get_data('rho') #, sim=0, snap=10)
vx0 = get_data('vx') #, sim=0, snap=10)
u0 = get_data('u') #, sim=0, snap=10)
# Extract data from Grad-h SPH simulation
gradhsphsim_unequal = newsim("adsod-gradhsph.dat")
gradhsphsim_unequal.SetParam('Nlattice1[0]',Nunequal)
gradhsphsim_unequal.SetParam('Nlattice2[0]',Nunequal)
setupsim()
run()
x1 = get_data('x') #, sim=0, snap=10)
rho1 = get_data('rho') #, sim=0, snap=10)
vx1 = get_data('vx') #, sim=0, snap=10)
u1 = get_data('u') #, sim=0, snap=10)
# Extract data from MFV simulation
mfvsim = newsim("adsod-mfv-moving.dat")
mfvsim.SetParam('Nlattice1[0]',Nleft)
mfvsim.SetParam('Nlattice2[0]',Nright)
setupsim()
run()
x2 = get_data('x') #, sim=1, snap=10)
rho2 = get_data('rho') #, sim=1, snap=10)
vx2 = get_data('vx') #, sim=1, snap=10)
u2 = get_data('u') #, sim=1, snap=10)
# Extract data from MFV simulation
mfvsim_unequal = newsim("adsod-mfv-moving.dat")
mfvsim_unequal.SetParam('Nlattice1[0]',Nunequal)
mfvsim_unequal.SetParam('Nlattice2[0]',Nunequal)
setupsim()
run()
x3 = get_data('x') #, sim=1, snap=10)
rho3 = get_data('rho') #, sim=1, snap=10)
vx3 = get_data('vx') #, sim=1, snap=10)
u3 = get_data('u') #, sim=1, snap=10)
# Extract data from MFV simulation
mfmsim = newsim("adsod-mfm-moving.dat")
mfmsim.SetParam("riemann_solver", "exact")
setupsim()
run()
x4 = get_data('x') #, sim=2, snap=10)
rho4 = get_data('rho') #, sim=2, snap=10)
vx4 = get_data('vx') #, sim=2, snap=10)
u4 = get_data('u') #, sim=2, snap=10)
# Extract data from MFV simulation
mfmsim_unequal = newsim("adsod-mfm-moving.dat")
mfmsim_unequal.SetParam("riemann_solver", "exact")
mfmsim_unequal.SetParam('Nlattice1[0]',Nunequal)
mfmsim_unequal.SetParam('Nlattice2[0]',Nunequal)
setupsim()
run()
x5 = get_data('x') #, sim=1, snap=10)
rho5 = get_data('rho') #, sim=1, snap=10)
vx5 = get_data('vx') #, sim=1, snap=10)
u5 = get_data('u') #, sim=1, snap=10)
# Extract data from MFV simulation
mfmsim_hllc = newsim("adsod-mfm-moving.dat")
mfmsim_hllc.SetParam("riemann_solver", "hllc")
setupsim()
run()
x6 = get_data('x') #, sim=2, snap=10)
rho6 = get_data('rho') #, sim=2, snap=10)
vx6 = get_data('vx') #, sim=2, snap=10)
u6 = get_data('u') #, sim=2, snap=10)
# Extract data from MFV simulation
mfmsim_unequal_hllc = newsim("adsod-mfm-moving.dat")
mfmsim_unequal_hllc.SetParam("riemann_solver", "hllc")
mfmsim_unequal_hllc.SetParam('Nlattice1[0]',Nunequal)
mfmsim_unequal_hllc.SetParam('Nlattice2[0]',Nunequal)
setupsim()
run()
x7 = get_data('x') #, sim=1, snap=10)
rho7 = get_data('rho') #, sim=1, snap=10)
vx7 = get_data('vx') #, sim=1, snap=10)
u7 = get_data('u') #, sim=1, snap=10)
# Extract data for analytical solution
rhodata = get_analytical_data("x","rho") #,sim=0,snap=10)
vxdata = get_analytical_data("x","vx") #,sim=0,snap=10)
udata = get_analytical_data("x","u") #,sim=0,snap=10)
# Create matplotlib figure object with shared x-axis
fig, axarr = plt.subplots(3, 3, sharex='col', sharey='row', figsize=(13,9))
fig.subplots_adjust(hspace=0.0001, wspace=0.0001)
fig.subplots_adjust(bottom=0.07, top=0.99, left=0.045, right=0.99)
axarr[0,0].set_ylabel(r"$\rho$")
axarr[0,0].set_xlim([xmin, xmax])
axarr[0,0].set_ylim([rhomin, rhomax])
axarr[0,0].plot(rhodata.x_data, rhodata.y_data, linestyle='-', color="red", label='Exact solution')
axarr[0,0].scatter(x0, rho0, marker='o', facecolors='none', edgecolors='blue', s=10, label='Gradh-SPH, equal-mass')
axarr[0,0].scatter(x1, rho1, color='black', marker='+', s=32, label='Gradh-SPH, unequal-mass')
axarr[0,0].legend(fontsize=10)
axarr[1,0].set_ylabel(r"$v_x$")
axarr[1,0].set_ylim([vxmin, vxmax])
axarr[1,0].plot(vxdata.x_data, vxdata.y_data, linestyle='-', color="red")
axarr[1,0].scatter(x1, vx1, color='black', marker='+', s=32)
axarr[1,0].scatter(x0, vx0, marker='o', facecolors='none', edgecolors='blue', s=10)
axarr[2,0].set_xlabel(r"$x$")
axarr[2,0].set_ylabel(r"$u$")
axarr[2,0].set_ylim([umin, umax])
axarr[2,0].plot(udata.x_data, udata.y_data, linestyle='-', color="red")
axarr[2,0].scatter(x1, u1, color='black', marker='+', s=32)
axarr[2,0].scatter(x0, u0, marker='o', facecolors='none', edgecolors='blue', s=10)
axarr[0,1].set_xlim([xmin, xmax])
axarr[0,1].set_ylim([rhomin, rhomax])
axarr[0,1].plot(rhodata.x_data, rhodata.y_data, linestyle='-', color="red", label='Exact solution')
axarr[0,1].scatter(x2, rho2, marker='o', facecolors='none', edgecolors='blue', s=10, label='MFV, equal-mass')
axarr[0,1].scatter(x3, rho3, color='black', marker='+', s=32, label='MFV, unequal-mass')
axarr[0,1].legend(fontsize=10)
axarr[1,1].set_ylim([vxmin, vxmax])
axarr[1,1].plot(vxdata.x_data, vxdata.y_data, linestyle='-', color="red")
axarr[1,1].scatter(x3, vx3, color='black', marker='+', s=32)
axarr[1,1].scatter(x2, vx2, marker='o', facecolors='none', edgecolors='blue', s=10)
axarr[2,1].set_xlabel(r"$x$")
axarr[2,1].set_ylim([umin, umax])
axarr[2,1].plot(udata.x_data, udata.y_data, linestyle='-', color="red")
axarr[2,1].scatter(x3, u3, color='black', marker='+', s=32)
axarr[2,1].scatter(x2, u2, marker='o', facecolors='none', edgecolors='blue', s=10)
axarr[0,2].set_xlim([xmin, xmax])
axarr[0,2].set_ylim([rhomin, rhomax])
axarr[0,2].plot(rhodata.x_data, rhodata.y_data, linestyle='-', color="red", label='Exact solution')
axarr[0,2].scatter(x4, rho4, marker='o', facecolors='none', edgecolors='blue', s=10, label='MFM, equal-mass')
axarr[0,2].scatter(x5, rho5, color='black', marker='+', s=32, label='MFM, unequal-mass')
axarr[0,2].legend(fontsize=10)
axarr[1,2].set_ylim([vxmin, vxmax])
axarr[1,2].plot(vxdata.x_data, vxdata.y_data, linestyle='-', color="red")
axarr[1,2].scatter(x5, vx5, color='black', marker='+', s=32)
axarr[1,2].scatter(x4, vx4, marker='o', facecolors='none', edgecolors='blue', s=10)
axarr[2,2].set_xlabel(r"$x$")
axarr[2,2].set_ylim([umin, umax])
axarr[2,2].plot(udata.x_data, udata.y_data, linestyle='-', color="red")
axarr[2,2].scatter(x5, u5, color='black', marker='+', s=32)
axarr[2,2].scatter(x4, u4, marker='o', facecolors='none', edgecolors='blue', s=10)
#axarr[0,3].set_xlim([xmin, xmax])
#axarr[0,3].set_ylim([rhomin, rhomax])
#axarr[0,3].plot(rhodata.x_data, rhodata.y_data, linestyle='-', color="red", label='Exact solution')
#axarr[0,3].scatter(x6, rho6, color='black', marker='+', s=32, label='MFM-HLLC, equal-mass')
#axarr[0,3].scatter(x7, rho7, marker='o', facecolors='none', edgecolors='blue', s=10, label='MFM-HLLC, unequal-mass')
#axarr[0,3].legend(fontsize=10)
#axarr[1,3].set_ylim([vxmin, vxmax])
#axarr[1,3].plot(vxdata.x_data, vxdata.y_data, linestyle='-', color="red")
#axarr[1,3].scatter(x6, vx6, color='black', marker='+', s=32)
#axarr[1,3].scatter(x7, vx7, marker='o', facecolors='none', edgecolors='blue', s=10)
#axarr[2,3].set_xlabel(r"$x$")
#axarr[2,3].set_ylim([umin, umax])
#axarr[2,3].plot(udata.x_data, udata.y_data, linestyle='-', color="red")
#axarr[2,3].scatter(x6, u6, color='black', marker='+', s=32)
#axarr[2,3].scatter(x7, u7, marker='o', facecolors='none', edgecolors='blue', s=10)
plt.show()
fig.savefig('adsod.pdf', dpi=50)
| gpl-2.0 |
trankmichael/scikit-learn | sklearn/utils/tests/test_testing.py | 144 | 4121 | import warnings
import unittest
import sys
from nose.tools import assert_raises
from sklearn.utils.testing import (
_assert_less,
_assert_greater,
assert_less_equal,
assert_greater_equal,
assert_warns,
assert_no_warnings,
assert_equal,
set_random_state,
assert_raise_message)
from sklearn.tree import DecisionTreeClassifier
from sklearn.lda import LDA
try:
from nose.tools import assert_less
def test_assert_less():
# Check that the nose implementation of assert_less gives the
# same thing as the scikit's
assert_less(0, 1)
_assert_less(0, 1)
assert_raises(AssertionError, assert_less, 1, 0)
assert_raises(AssertionError, _assert_less, 1, 0)
except ImportError:
pass
try:
from nose.tools import assert_greater
def test_assert_greater():
# Check that the nose implementation of assert_less gives the
# same thing as the scikit's
assert_greater(1, 0)
_assert_greater(1, 0)
assert_raises(AssertionError, assert_greater, 0, 1)
assert_raises(AssertionError, _assert_greater, 0, 1)
except ImportError:
pass
def test_assert_less_equal():
assert_less_equal(0, 1)
assert_less_equal(1, 1)
assert_raises(AssertionError, assert_less_equal, 1, 0)
def test_assert_greater_equal():
assert_greater_equal(1, 0)
assert_greater_equal(1, 1)
assert_raises(AssertionError, assert_greater_equal, 0, 1)
def test_set_random_state():
lda = LDA()
tree = DecisionTreeClassifier()
# LDA doesn't have random state: smoke test
set_random_state(lda, 3)
set_random_state(tree, 3)
assert_equal(tree.random_state, 3)
def test_assert_raise_message():
def _raise_ValueError(message):
raise ValueError(message)
def _no_raise():
pass
assert_raise_message(ValueError, "test",
_raise_ValueError, "test")
assert_raises(AssertionError,
assert_raise_message, ValueError, "something else",
_raise_ValueError, "test")
assert_raises(ValueError,
assert_raise_message, TypeError, "something else",
_raise_ValueError, "test")
assert_raises(AssertionError,
assert_raise_message, ValueError, "test",
_no_raise)
# multiple exceptions in a tuple
assert_raises(AssertionError,
assert_raise_message, (ValueError, AttributeError),
"test", _no_raise)
# This class is inspired from numpy 1.7 with an alteration to check
# the reset warning filters after calls to assert_warns.
# This assert_warns behavior is specific to scikit-learn because
#`clean_warning_registry()` is called internally by assert_warns
# and clears all previous filters.
class TestWarns(unittest.TestCase):
def test_warn(self):
def f():
warnings.warn("yo")
return 3
# Test that assert_warns is not impacted by externally set
# filters and is reset internally.
# This is because `clean_warning_registry()` is called internally by
# assert_warns and clears all previous filters.
warnings.simplefilter("ignore", UserWarning)
assert_equal(assert_warns(UserWarning, f), 3)
# Test that the warning registry is empty after assert_warns
assert_equal(sys.modules['warnings'].filters, [])
assert_raises(AssertionError, assert_no_warnings, f)
assert_equal(assert_no_warnings(lambda x: x, 1), 1)
def test_warn_wrong_warning(self):
def f():
warnings.warn("yo", DeprecationWarning)
failed = False
filters = sys.modules['warnings'].filters[:]
try:
try:
# Should raise an AssertionError
assert_warns(UserWarning, f)
failed = True
except AssertionError:
pass
finally:
sys.modules['warnings'].filters = filters
if failed:
raise AssertionError("wrong warning caught by assert_warn")
| bsd-3-clause |
adelomana/cassandra | conditionedFitness/figureClonal/clonal.3.3.py | 2 | 3270 | import matplotlib,numpy,sys,scipy,pickle
import matplotlib.pyplot
sys.path.append('../lib')
import calculateStatistics
### MAIN
matplotlib.rcParams.update({'font.size':36,'font.family':'Times New Roman','xtick.labelsize':28,'ytick.labelsize':28})
thePointSize=12
jarDir='/Users/adriandelomana/scratch/'
# clonal 2
xSignal=numpy.array([[175, 153, 186, 189, 157],[37, 59, 46, 67, 70]])
xNoSignal=numpy.array([[200, 202, 224, 194, 193],[71, 66, 71, 87, 60]])
cf_mu_0, cf_sd_0, pvalue_0 = calculateStatistics.main(xSignal, xNoSignal)
xSignal=numpy.array([[25, 28, 19, 18, 16],[0, 9, 4, 9, 1]])
xNoSignal=numpy.array([[24, 16, 29, 17, 23],[4, 7, 5, 3, 4]])
cf_mu_50, cf_sd_50, pvalue_50 = calculateStatistics.main(xSignal, xNoSignal)
xSignal=numpy.array([[96, 97, 94, 127, 80],[32, 36, 36, 42, 36]])
xNoSignal=numpy.array([[104, 137, 110, 128, 113],[52, 36, 32, 50, 41]])
cf_mu_100, cf_sd_100, pvalue_100 = calculateStatistics.main(xSignal, xNoSignal)
xSignal=numpy.array([[204, 223, 199, 249, 193],[141, 131, 125, 154, 139]])
xNoSignal=numpy.array([[171, 217, 240, 200, 168],[166, 192, 163, 196, 170]])
cf_mu_150, cf_sd_150, pvalue_150 = calculateStatistics.main(xSignal, xNoSignal)
xSignal=numpy.array([[197, 216, 224, 219, 208],[181, 182, 186, 179, 116]])
xNoSignal=numpy.array([[261, 227, 229, 188, 236],[179, 169, 174, 183, 164]])
cf_mu_200, cf_sd_200, pvalue_200 = calculateStatistics.main(xSignal, xNoSignal)
xSignal=numpy.array([[226, 214, 222, 224, 211],[235, 199, 177, 199, 184]])
xNoSignal=numpy.array([[223, 230, 215, 273, 245],[204, 199, 247, 220, 204]])
cf_mu_250, cf_sd_250, pvalue_250 = calculateStatistics.main(xSignal, xNoSignal)
xSignal=numpy.array([[222, 235, 253, 234, 189],[175, 160, 194, 156, 178]])
xNoSignal=numpy.array([[212, 222, 246, 228, 220],[191, 192, 198, 217, 199]])
cf_mu_300, cf_sd_300, pvalue_300 = calculateStatistics.main(xSignal, xNoSignal)
x = [0, 50, 100, 150, 200, 250, 300]
y = [cf_mu_0, cf_mu_50, cf_mu_100, cf_mu_150, cf_mu_200, cf_mu_250, cf_mu_300]
z = [cf_sd_0, cf_sd_50, cf_sd_100, cf_sd_150, cf_sd_200, cf_sd_250, cf_sd_300]
w = [pvalue_0, pvalue_50, pvalue_100, pvalue_150, pvalue_200, pvalue_250, pvalue_300]
matplotlib.pyplot.errorbar(x,y,yerr=z,fmt=':o',color='green',ecolor='green',markeredgecolor='green',capsize=0,ms=thePointSize,mew=0)
for i in range(len(w)):
if y[i] > 0.:
sp=y[i]+z[i]+0.02
else:
sp=y[i]-z[i]-0.02
if w[i] < 0.05 and w[i] >= 0.01:
matplotlib.pyplot.scatter(x[i], sp, s=75, c='black', marker=r"${*}$", edgecolors='none')
if w[i] < 0.01:
matplotlib.pyplot.scatter(x[i]-3, sp, s=75, c='black', marker=r"${*}$", edgecolors='none')
matplotlib.pyplot.scatter(x[i]+3, sp, s=75, c='black', marker=r"${*}$", edgecolors='none')
matplotlib.pyplot.plot([0,300],[0,0],'--',color='black')
matplotlib.pyplot.xlim([-25,325])
matplotlib.pyplot.ylim([-0.4,0.4])
matplotlib.pyplot.yticks([-0.4,-0.2,0,0.2,0.4])
matplotlib.pyplot.xlabel('Generation')
matplotlib.pyplot.ylabel('Conditioned Fitness')
matplotlib.pyplot.tight_layout(pad=0.5)
matplotlib.pyplot.savefig('figure.clonal.3.3.pdf')
# save processed data alternative plotting
trajectory=[x,y,z]
jarFile=jarDir+'clonal.3.3.pickle'
f=open(jarFile,'wb')
pickle.dump(trajectory,f)
f.close()
| gpl-3.0 |
wukan1986/kquant_data | demo_stock/A_1day_000016/E03_merge_000016.py | 1 | 2779 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
指定数据目录,生成对应的合约行业数据
分为两种
1. 全市场数据,将部分标记上权重
2. 只对历史上成为成份股的,进行处理,由于前面已经转换了数据,这里只要跳选数据并处理即可
"""
import os
import pandas as pd
from kquant_data.config import __CONFIG_H5_STK_WEIGHT_DIR__, __CONFIG_H5_STK_DIR__, __CONFIG_TDX_STK_DIR__, \
__CONFIG_H5_STK_DIVIDEND_DIR__
from kquant_data.processing.merge import merge_weight, load_index_weight, merge_weight_internal
from kquant_data.stock.symbol import get_symbols_from_wind_code_df
from kquant_data.api import get_datetime
from kquant_data.processing.utils import filter_dataframe, split_into_group
from kquant_data.processing.MergeBar import MergeBar
from kquant_data.stock.stock import read_h5_tdx
class MergeDataStock_1day_000016(MergeBar):
def __init__(self, folder):
super(MergeDataStock_1day_000016, self).__init__(folder)
self.bar_size = 86400
def init_symbols(self):
# 不再从导出列表中取,而是从文件夹中推算
wind_code = '000016.SH'
path = os.path.join(__CONFIG_H5_STK_WEIGHT_DIR__, wind_code)
df = load_index_weight(path)
wind_codes = pd.DataFrame(list(df.columns), columns=['wind_code'])
df = get_symbols_from_wind_code_df(wind_codes)
self.instruments = df
path = os.path.join(self.folder, 'Symbol.csv')
self.instruments.to_csv(path, index=False)
self.instruments_group = split_into_group(self.instruments, self.group_len)
def init_datetime(self):
df = read_h5_tdx("sh", "000016", 86400, __CONFIG_H5_STK_DIR__, __CONFIG_TDX_STK_DIR__,
__CONFIG_H5_STK_DIVIDEND_DIR__)
df = filter_dataframe(df, 'DateTime', None, None, fields=['DateTime'])
# 可以保存,也可以不保存
self.datetime = df
super(MergeDataStock_1day_000016, self).init_datetime()
def init_fields(self):
self.fields = ['Open', 'High', 'Low', 'Close', 'Volume', 'Amount', 'backward_factor', 'forward_factor']
def read_data(self, market, code, bar_size):
h5_path = os.path.join(__CONFIG_H5_STK_DIR__, '1day', market, '%s%s.h5' % (market, code))
try:
df = pd.read_hdf(h5_path)
df = filter_dataframe(df, 'DateTime', None, None, None)
except:
return pd.DataFrame(columns=self.fields + ['DateTime'])
return df
if __name__ == '__main__':
# 得到50成份股内的各种开高低收等行情
path = os.path.join(__CONFIG_H5_STK_DIR__, "1day_000016.SH")
mdf = MergeDataStock_1day_000016(path)
mdf.merge()
mdf.hmerge()
mdf.clear()
pass
| bsd-2-clause |
datamicroscopes/kernels | bin/plot.py | 1 | 2322 | import sys
import argparse
import os
import json
import numpy as np
import matplotlib.pylab as plt
def draw(obj, outfile):
groups, entities_per_group, features = (
obj['args']['groups'],
obj['args']['entities_per_group'],
obj['args']['features'],
)
results = obj['results']
results = np.array(results).reshape(
(len(groups), len(entities_per_group), len(features)))
groups = np.array(groups, dtype=np.float)
for i in xrange(len(features)):
data = results[:, :, i]
linear = groups * \
(data[0, 0] /
(float(entities_per_group[0]) * groups[0]) / groups[0])
plt.plot(groups, linear, 'k--')
for j in xrange(len(entities_per_group)):
plt.plot(
groups,
data[:, j] / (float(entities_per_group[j]) * groups))
legend = ['linear']
legend.extend(['gsize {}'.format(gsize)
for gsize in entities_per_group])
plt.legend(legend, loc='lower right')
plt.xlabel('groups')
plt.ylabel('time/iteration/entity (sec)')
plt.ylim(ymin=0)
plt.tight_layout()
plt.savefig(outfile)
plt.close()
def main(args):
parser = argparse.ArgumentParser()
parser.add_argument("--results-dir", required=True)
parser.add_argument("--sync", action='store_true')
parser.add_argument("--volume")
args = parser.parse_args(args)
if args.sync and not args.volume:
raise ValueError("--sync requires --volume")
if args.sync:
import multyvac
vol = multyvac.volume.get(args.volume)
vol.sync_down("", args.results_dir)
for dirpath, _, filenames in os.walk(args.results_dir):
for fname in filenames:
toks = fname.split(".")
if len(toks) != 2 or toks[1] != 'json':
continue
p = os.path.join(dirpath, fname)
outp = os.path.join(dirpath, '{}.pdf'.format(toks[0]))
with open(p, 'r') as fp:
try:
obj = json.load(fp)
except ValueError:
print "skipping file {}".format(p)
continue
draw(obj, outp)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| bsd-3-clause |
wasade/qiime | qiime/make_bootstrapped_tree.py | 1 | 1352 | #!/usr/bin/env python
from __future__ import division
__author__ = "Justin Kuczynski"
__copyright__ = "Copyright 2011, The QIIME Project"
__credits__ = ["Justin Kuczynski"]
__license__ = "GPL"
__version__ = "1.8.0-dev"
__maintainer__ = "Justin Kuczynski"
__email__ = "[email protected]"
"""takes a tree and bootstrap support file and writes a pdf, colored by
bootstrap support
"""
from matplotlib import use
use('Agg', warn=False)
from cogent.draw.dendrogram import SquareDendrogram
import os.path
import sys
def write_pdf_bootstrap_tree(tree, output_f, hits_dict):
def f(node):
if not node.Name:
return 'black'
tip_id = node.Name.split('/')[0]
try:
if hits_dict[tip_id] < .25:
return 'blue'
elif hits_dict[tip_id] < .5:
return 'green'
elif hits_dict[tip_id] < .75:
return 'yellow'
elif hits_dict[tip_id] <= 1.1:
return 'red'
return 'black'
except:
return 'black'
t = SquareDendrogram(tree)
# Make output size proportional to the tree size.
width = 8 * len(tree.tips())
height = 8 * len(tree.tips())
if width < 700:
width = 700
if height < 700:
height = 700
t.drawToPDF(output_f, width, height, edge_color_callback=f)
| gpl-2.0 |
zrhans/pythonanywhere | .virtualenvs/django19/lib/python3.4/site-packages/pandas/tests/test_msgpack/test_newspec.py | 9 | 2586 | # coding: utf-8
from pandas.msgpack import packb, unpackb, ExtType
def test_str8():
header = b'\xd9'
data = b'x' * 32
b = packb(data.decode(), use_bin_type=True)
assert len(b) == len(data) + 2
assert b[0:2] == header + b'\x20'
assert b[2:] == data
assert unpackb(b) == data
data = b'x' * 255
b = packb(data.decode(), use_bin_type=True)
assert len(b) == len(data) + 2
assert b[0:2] == header + b'\xff'
assert b[2:] == data
assert unpackb(b) == data
def test_bin8():
header = b'\xc4'
data = b''
b = packb(data, use_bin_type=True)
assert len(b) == len(data) + 2
assert b[0:2] == header + b'\x00'
assert b[2:] == data
assert unpackb(b) == data
data = b'x' * 255
b = packb(data, use_bin_type=True)
assert len(b) == len(data) + 2
assert b[0:2] == header + b'\xff'
assert b[2:] == data
assert unpackb(b) == data
def test_bin16():
header = b'\xc5'
data = b'x' * 256
b = packb(data, use_bin_type=True)
assert len(b) == len(data) + 3
assert b[0:1] == header
assert b[1:3] == b'\x01\x00'
assert b[3:] == data
assert unpackb(b) == data
data = b'x' * 65535
b = packb(data, use_bin_type=True)
assert len(b) == len(data) + 3
assert b[0:1] == header
assert b[1:3] == b'\xff\xff'
assert b[3:] == data
assert unpackb(b) == data
def test_bin32():
header = b'\xc6'
data = b'x' * 65536
b = packb(data, use_bin_type=True)
assert len(b) == len(data) + 5
assert b[0:1] == header
assert b[1:5] == b'\x00\x01\x00\x00'
assert b[5:] == data
assert unpackb(b) == data
def test_ext():
def check(ext, packed):
assert packb(ext) == packed
assert unpackb(packed) == ext
check(ExtType(0x42, b'Z'), b'\xd4\x42Z') # fixext 1
check(ExtType(0x42, b'ZZ'), b'\xd5\x42ZZ') # fixext 2
check(ExtType(0x42, b'Z'*4), b'\xd6\x42' + b'Z'*4) # fixext 4
check(ExtType(0x42, b'Z'*8), b'\xd7\x42' + b'Z'*8) # fixext 8
check(ExtType(0x42, b'Z'*16), b'\xd8\x42' + b'Z'*16) # fixext 16
# ext 8
check(ExtType(0x42, b''), b'\xc7\x00\x42')
check(ExtType(0x42, b'Z'*255), b'\xc7\xff\x42' + b'Z'*255)
# ext 16
check(ExtType(0x42, b'Z'*256), b'\xc8\x01\x00\x42' + b'Z'*256)
check(ExtType(0x42, b'Z'*0xffff), b'\xc8\xff\xff\x42' + b'Z'*0xffff)
# ext 32
check(ExtType(0x42, b'Z'*0x10000), b'\xc9\x00\x01\x00\x00\x42' + b'Z'*0x10000)
# needs large memory
#check(ExtType(0x42, b'Z'*0xffffffff),
# b'\xc9\xff\xff\xff\xff\x42' + b'Z'*0xffffffff)
| apache-2.0 |
effigies/mne-python | mne/viz/tests/test_evoked.py | 1 | 3310 | # Authors: Alexandre Gramfort <[email protected]>
# Denis Engemann <[email protected]>
# Martin Luessi <[email protected]>
# Eric Larson <[email protected]>
# Cathy Nangini <[email protected]>
# Mainak Jas <[email protected]>
#
# License: Simplified BSD
import os.path as op
import warnings
import numpy as np
from numpy.testing import assert_raises
# Set our plotters to test mode
import matplotlib
matplotlib.use('Agg') # for testing don't use X server
import matplotlib.pyplot as plt
from mne import io, read_events, Epochs
from mne import pick_types
from mne.channels import read_layout
warnings.simplefilter('always') # enable b/c these tests throw warnings
base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
evoked_fname = op.join(base_dir, 'test-ave.fif')
raw_fname = op.join(base_dir, 'test_raw.fif')
cov_fname = op.join(base_dir, 'test-cov.fif')
event_name = op.join(base_dir, 'test-eve.fif')
event_id, tmin, tmax = 1, -0.1, 0.1
n_chan = 6
layout = read_layout('Vectorview-all')
def _get_raw():
return io.Raw(raw_fname, preload=False)
def _get_events():
return read_events(event_name)
def _get_picks(raw):
return pick_types(raw.info, meg=True, eeg=False, stim=False,
ecg=False, eog=False, exclude='bads')
def _get_epochs():
raw = _get_raw()
events = _get_events()
picks = _get_picks(raw)
# Use a subset of channels for plotting speed
picks = np.round(np.linspace(0, len(picks) + 1, n_chan)).astype(int)
epochs = Epochs(raw, events[:5], event_id, tmin, tmax, picks=picks,
baseline=(None, 0))
return epochs
def _get_epochs_delayed_ssp():
raw = _get_raw()
events = _get_events()
picks = _get_picks(raw)
reject = dict(mag=4e-12)
epochs_delayed_ssp = Epochs(raw, events[:10], event_id, tmin, tmax,
picks=picks, baseline=(None, 0),
proj='delayed', reject=reject)
return epochs_delayed_ssp
def test_plot_evoked():
"""Test plotting of evoked
"""
evoked = _get_epochs().average()
with warnings.catch_warnings(record=True):
evoked.plot(proj=True, hline=[1])
# plot with bad channels excluded
evoked.plot(exclude='bads')
evoked.plot(exclude=evoked.info['bads']) # does the same thing
# test selective updating of dict keys is working.
evoked.plot(hline=[1], units=dict(mag='femto foo'))
evoked_delayed_ssp = _get_epochs_delayed_ssp().average()
evoked_delayed_ssp.plot(proj='interactive')
evoked_delayed_ssp.apply_proj()
assert_raises(RuntimeError, evoked_delayed_ssp.plot,
proj='interactive')
evoked_delayed_ssp.info['projs'] = []
assert_raises(RuntimeError, evoked_delayed_ssp.plot,
proj='interactive')
assert_raises(RuntimeError, evoked_delayed_ssp.plot,
proj='interactive', axes='foo')
evoked.plot_image(proj=True)
# plot with bad channels excluded
evoked.plot_image(exclude='bads')
evoked.plot_image(exclude=evoked.info['bads']) # does the same thing
plt.close('all')
| bsd-3-clause |
lounick/task_scheduling | task_scheduling/op_problem.py | 2 | 9881 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2015, lounick and decabyte
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of task_scheduling nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Orienteering problem solver
Implementation of an integer linear formulation for maximizing the targets visited by a vehicle under cost constraint.
The vehicle has to start and finish at the first point and it is allowed to skip targets.
Described in:
Vansteenwegen, Pieter, Wouter Souffriau, and Dirk Van Oudheusden. "The orienteering problem: A survey."
European Journal of Operational Research 209.1 (2011): 1-10.
"""
from __future__ import division
import numpy as np
from gurobipy import *
def _callback(model, where):
"""Callback function for the solver
Callback function that adds lazy constraints for the optimisation process. Here it dynamically imposes cardinality
constraints for the vertices in the solution, ensuring that if a path enters a vertex there must be a path exiting.
Parameters
----------
model : object
The gurobi model instance
where : int
Gurobi specific callback variable
Returns
-------
"""
if where == GRB.callback.MIPSOL:
V = set(range(model._n))
idx_start = model._idxStart
# idx_finish = model._idxFinish
# solmat = np.zeros((model._n, model._n))
selected = []
for i in V:
sol = model.cbGetSolution([model._eVars[i, j] for j in V])
selected += [(i, j) for j in V if sol[j] > 0.5]
# solmat[i, :] = sol
if len(selected) <= 1:
return
for k in range(len(selected)):
el = selected[k]
entry = el[0]
if idx_start != entry:
expr1 = quicksum(model._eVars[i, entry] for i in V)
expr2 = quicksum(model._eVars[entry, j] for j in V)
model.cbLazy(expr1, GRB.EQUAL, expr2)
def op_solver(cost, profit=None, cost_max=None, idx_start=None, idx_finish=None, **kwargs):
"""Orienteering problem solver instance
Cost constrained traveling salesman problem solver for a single vehicle using the Gurobi MILP optimiser.
Parameters
----------
cost : ndarray (n, dims)
Cost matrix for traveling from point to point. Here is time (seconds) needed to go from points a to b.
profit : Optional[vector]
Profit vector for profit of visiting each point.
cost_max : Optional[double]
Maximum running time of the mission in seconds.
idx_start : Optional[int]
Optional starting point for the tour. If none is provided the first point of the array is chosen.
idx_finish : Optional[int]
Optional ending point of the tour. If none is provided the last point of the array is chosen.
kwargs : Optional[list]
Optional extra arguments/
Returns
-------
route : list
The calculated route.
profit : double
The profit of the route.
m : object
A gurobi model object.
"""
# Number of points
n = cost.shape[0]
# other params
node_energy = float(kwargs.get('node_energy', 1.0))
# Check for default values
if idx_start is None:
idx_start = 0
if idx_finish is None:
idx_finish = n - 1
if profit is None:
profit = np.ones(n)
if cost_max is None:
cost_max = cost[idx_start, idx_finish]
# Create the vertices set
V = set(range(n))
m = Model()
# Create model variables
e_vars = {}
for i in V:
for j in V:
e_vars[i, j] = m.addVar(vtype=GRB.BINARY, name='e_' + str(i) + '_' + str(j))
m.update()
for i in V:
e_vars[i, i].ub = 0
m.update()
u_vars = {}
for i in V:
u_vars[i] = m.addVar(vtype=GRB.INTEGER, name='u_' + str(i))
m.update()
# Set objective function (0)
expr = 0
for i in V:
for j in V:
if i != idx_start and i != idx_finish:
expr += profit[i] * e_vars[i, j]
m.setObjective(expr, GRB.MAXIMIZE)
m.update()
# Constraints
# Add constraints for the initial and final node (1)
# None enters the starting point
m.addConstr(quicksum(e_vars[j, idx_start] for j in V.difference([idx_start])) == 0, "s_entry")
m.update()
# None exits the finish point
m.addConstr(quicksum(e_vars[idx_finish, j] for j in V.difference([idx_finish])) == 0, "f_exit")
m.update()
# Always exit the starting point
m.addConstr(quicksum(e_vars[idx_start, i] for i in V.difference([idx_start])) == 1, "s_exit")
m.update()
# Always enter the finish point
m.addConstr(quicksum(e_vars[i, idx_finish] for i in V.difference([idx_finish])) == 1, "f_entry")
m.update()
# From all other points someone may exit
for i in V.difference([idx_start, idx_finish]):
m.addConstr(quicksum(e_vars[i, j] for j in V if i != j) <= 1, "v_" + str(i) + "_exit")
m.update()
# To all other points someone may enter
for i in V.difference([idx_start, idx_finish]):
m.addConstr(quicksum(e_vars[j, i] for j in V if i != j) <= 1, "v_" + str(i) + "_entry")
m.update()
# for i in V.difference([idx_start, idx_finish]):
# m.addConstr(quicksum(e_vars[j, i] for j in V if i != j) == quicksum(e_vars[i, j] for j in V if i != j), "v_" + str(i) + "_cardinality")
# m.update()
# Add cost constraints (3)
expr = 0
for i in V:
for j in V:
# add a fixed cost for intermediate nodes (sensing energy)
if i != idx_start and i != idx_finish:
expr += node_energy * e_vars[i, j]
expr += cost[i, j] * e_vars[i, j]
m.addConstr(expr <= cost_max, "max_energy")
m.update()
# Constraint (4)
for i in V:
u_vars[i].lb = 0
u_vars[i].ub = n
m.update()
# Add subtour constraint (5)
for i in V:
for j in V:
m.addConstr(u_vars[i] - u_vars[j] + 1, GRB.LESS_EQUAL, (n - 1)*(1 - e_vars[i, j]),
"sec_" + str(i) + "_" + str(j))
m.update()
m._n = n
m._eVars = e_vars
m._uVars = u_vars
m._idxStart = idx_start
m._idxFinish = idx_finish
m.update()
m.params.OutputFlag = int(kwargs.get('output_flag', 0))
m.params.TimeLimit = float(kwargs.get('time_limit', 60.0))
m.params.MIPGap = float(kwargs.get('mip_gap', 0.0))
m.params.LazyConstraints = 1
m.optimize(_callback)
solution = m.getAttr('X', e_vars)
selected = [(i, j) for i in V for j in V if solution[i, j] > 0.5]
# solmat = np.zeros((n, n))
# for k, v in solution.iteritems():
# solmat[k[0], k[1]] = v
# print("\n")
# print(solmat)
# print(u)
# print(selected)
# print(sum(cost[s[0], s[1]] for s in selected))
route = []
next_city = idx_start
while len(selected) > 0:
for i in range(len(selected)):
if selected[i][0] == next_city:
route.append(next_city)
next_city = selected[i][1]
selected.pop(i)
break
route.append(next_city)
return route, m.objVal, m
def main():
import matplotlib.pyplot as plt
import task_scheduling.utils as tsu
import random
nodes = tsu.generate_nodes(n=100, lb=-100, up=100, dims=2)
cost = tsu.calculate_distances(nodes)
nodes = []
random.seed(42)
nodes.append([0,0])
for i in range(1,6):
for j in range(-2,3):
ni = i
nj = j
# ni = random.uniform(-0.5,0.5) + i
# nj = random.uniform(-0.5,0.5) + j
nodes.append([ni,nj])
nodes.append([6,0])
nodes = np.array(nodes)
cost = tsu.calculate_distances(nodes)
max_cost = [25.5]
for mc in max_cost:
solution, objective, _ = tsu.solve_problem(op_solver, cost, cost_max=mc, output_flag=1, mip_gap=0.0, time_limit=3600)
util = 0
for i in solution:
extras = 0
if i != 0 and i != solution[len(solution)-1]:
for j in range(cost.shape[0]):
if j != i and j not in solution and j != 0 and j != solution[len(solution)-1]:
extras += np.e**(-2*cost[i,j])
util += 1 + extras
print("Utility: {0}".format(util))
fig, ax = tsu.plot_problem(nodes, solution, objective)
plt.show()
if __name__ == '__main__':
main() | bsd-3-clause |
okuta/chainer | examples/glance/glance.py | 8 | 2876 | # Note for contributors:
# This example code is referred to from "Chainer at a Glance" tutorial.
# If this file is to be modified, please also update the line numbers in
# `docs/source/glance.rst` accordingly.
import chainer as ch
from chainer import datasets
import chainer.functions as F
import chainer.links as L
from chainer import training
from chainer.training import extensions
import numpy as np
import matplotlib
matplotlib.use('Agg')
mushroomsfile = 'mushrooms.csv'
data_array = np.genfromtxt(
mushroomsfile, delimiter=',', dtype=str, skip_header=1)
for col in range(data_array.shape[1]):
data_array[:, col] = np.unique(data_array[:, col], return_inverse=True)[1]
X = data_array[:, 1:].astype(np.float32)
Y = data_array[:, 0].astype(np.int32)[:, None]
train, test = datasets.split_dataset_random(
datasets.TupleDataset(X, Y), int(data_array.shape[0] * .7))
train_iter = ch.iterators.SerialIterator(train, 100)
test_iter = ch.iterators.SerialIterator(
test, 100, repeat=False, shuffle=False)
# Network definition
def MLP(n_units, n_out):
layer = ch.Sequential(L.Linear(n_units), F.relu)
model = layer.repeat(2)
model.append(L.Linear(n_out))
return model
model = L.Classifier(
MLP(44, 1), lossfun=F.sigmoid_cross_entropy, accfun=F.binary_accuracy)
# Setup an optimizer
optimizer = ch.optimizers.SGD().setup(model)
# Create the updater, using the optimizer
updater = training.StandardUpdater(train_iter, optimizer, device=-1)
# Set up a trainer
trainer = training.Trainer(updater, (50, 'epoch'), out='result')
# Evaluate the model with the test dataset for each epoch
trainer.extend(extensions.Evaluator(test_iter, model, device=-1))
# Dump a computational graph from 'loss' variable at the first iteration
# The "main" refers to the target link of the "main" optimizer.
trainer.extend(extensions.DumpGraph('main/loss'))
trainer.extend(extensions.snapshot(), trigger=(20, 'epoch'))
# Write a log of evaluation statistics for each epoch
trainer.extend(extensions.LogReport())
# Save two plot images to the result dir
trainer.extend(
extensions.PlotReport(['main/loss', 'validation/main/loss'],
'epoch', file_name='loss.png'))
trainer.extend(
extensions.PlotReport(
['main/accuracy', 'validation/main/accuracy'],
'epoch', file_name='accuracy.png'))
# Print selected entries of the log to stdout
trainer.extend(extensions.PrintReport(
['epoch', 'main/loss', 'validation/main/loss',
'main/accuracy', 'validation/main/accuracy', 'elapsed_time']))
# Run the training
trainer.run()
x, t = test[np.random.randint(len(test))]
predict = model.predictor(x[None]).array
predict = predict[0][0]
if predict >= 0:
print('Predicted Poisonous, Actual ' + ['Edible', 'Poisonous'][t[0]])
else:
print('Predicted Edible, Actual ' + ['Edible', 'Poisonous'][t[0]])
| mit |
allanino/nupic | nupic/math/roc_utils.py | 49 | 8308 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Utility functions to compute ROC (Receiver Operator Characteristic) curves
and AUC (Area Under the Curve).
The ROCCurve() and AreaUnderCurve() functions are based on the roc_curve()
and auc() functions found in metrics.py module of scikit-learn
(http://scikit-learn.org/stable/). Scikit-learn has a BSD license (3 clause).
Following is the original license/credits statement from the top of the
metrics.py file:
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# License: BSD Style.
"""
import numpy as np
def ROCCurve(y_true, y_score):
"""compute Receiver operating characteristic (ROC)
Note: this implementation is restricted to the binary classification task.
Parameters
----------
y_true : array, shape = [n_samples]
true binary labels
y_score : array, shape = [n_samples]
target scores, can either be probability estimates of
the positive class, confidence values, or binary decisions.
Returns
-------
fpr : array, shape = [>2]
False Positive Rates
tpr : array, shape = [>2]
True Positive Rates
thresholds : array, shape = [>2]
Thresholds on y_score used to compute fpr and tpr
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, scores)
>>> fpr
array([ 0. , 0.5, 0.5, 1. ])
References
----------
http://en.wikipedia.org/wiki/Receiver_operating_characteristic
"""
y_true = np.ravel(y_true)
classes = np.unique(y_true)
# ROC only for binary classification
if classes.shape[0] != 2:
raise ValueError("ROC is defined for binary classification only")
y_score = np.ravel(y_score)
n_pos = float(np.sum(y_true == classes[1])) # nb of true positive
n_neg = float(np.sum(y_true == classes[0])) # nb of true negative
thresholds = np.unique(y_score)
neg_value, pos_value = classes[0], classes[1]
tpr = np.empty(thresholds.size, dtype=np.float) # True positive rate
fpr = np.empty(thresholds.size, dtype=np.float) # False positive rate
# Build tpr/fpr vector
current_pos_count = current_neg_count = sum_pos = sum_neg = idx = 0
signal = np.c_[y_score, y_true]
sorted_signal = signal[signal[:, 0].argsort(), :][::-1]
last_score = sorted_signal[0][0]
for score, value in sorted_signal:
if score == last_score:
if value == pos_value:
current_pos_count += 1
else:
current_neg_count += 1
else:
tpr[idx] = (sum_pos + current_pos_count) / n_pos
fpr[idx] = (sum_neg + current_neg_count) / n_neg
sum_pos += current_pos_count
sum_neg += current_neg_count
current_pos_count = 1 if value == pos_value else 0
current_neg_count = 1 if value == neg_value else 0
idx += 1
last_score = score
else:
tpr[-1] = (sum_pos + current_pos_count) / n_pos
fpr[-1] = (sum_neg + current_neg_count) / n_neg
# hard decisions, add (0,0)
if fpr.shape[0] == 2:
fpr = np.array([0.0, fpr[0], fpr[1]])
tpr = np.array([0.0, tpr[0], tpr[1]])
# trivial decisions, add (0,0) and (1,1)
elif fpr.shape[0] == 1:
fpr = np.array([0.0, fpr[0], 1.0])
tpr = np.array([0.0, tpr[0], 1.0])
return fpr, tpr, thresholds
def AreaUnderCurve(x, y):
"""Compute Area Under the Curve (AUC) using the trapezoidal rule
Parameters
----------
x : array, shape = [n]
x coordinates
y : array, shape = [n]
y coordinates
Returns
-------
auc : float
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> pred = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, pred)
>>> metrics.auc(fpr, tpr)
0.75
"""
#x, y = check_arrays(x, y)
if x.shape[0] != y.shape[0]:
raise ValueError('x and y should have the same shape'
' to compute area under curve,'
' but x.shape = %s and y.shape = %s.'
% (x.shape, y.shape))
if x.shape[0] < 2:
raise ValueError('At least 2 points are needed to compute'
' area under curve, but x.shape = %s' % x.shape)
# reorder the data points according to the x axis
order = np.argsort(x)
x = x[order]
y = y[order]
h = np.diff(x)
area = np.sum(h * (y[1:] + y[:-1])) / 2.0
return area
def _printNPArray(x, precision=2):
format = "%%.%df" % (precision)
for elem in x:
print format % (elem),
print
def _test():
"""
This is a toy example, to show the basic functionality:
The dataset is:
actual prediction
-------------------------
0 0.1
0 0.4
1 0.5
1 0.3
1 0.45
Some ROC terminology:
A True Positive (TP) is when we predict TRUE and the actual value is 1.
A False Positive (FP) is when we predict TRUE, but the actual value is 0.
The True Positive Rate (TPR) is TP/P, where P is the total number of actual
positives (3 in this example, the last 3 samples).
The False Positive Rate (FPR) is FP/N, where N is the total number of actual
negatives (2 in this example, the first 2 samples)
Here are the classifications at various choices for the threshold. The
prediction is TRUE if the predicted value is >= threshold and FALSE otherwise.
actual pred 0.50 0.45 0.40 0.30 0.10
---------------------------------------------------------
0 0.1 0 0 0 0 1
0 0.4 0 0 1 1 1
1 0.5 1 1 1 1 1
1 0.3 0 0 0 1 1
1 0.45 0 1 1 1 1
TruePos(TP) 1 2 2 3 3
FalsePos(FP) 0 0 1 1 2
TruePosRate(TPR) 1/3 2/3 2/3 3/3 3/3
FalsePosRate(FPR) 0/2 0/2 1/2 1/2 2/2
The ROC curve is a plot of FPR on the x-axis and TPR on the y-axis. Basically,
one can pick any operating point along this curve to run, the operating point
determined by which threshold you want to use. By changing the threshold, you
tradeoff TP's for FPs.
The more area under this curve, the better the classification algorithm is.
The AreaUnderCurve() function can be used to compute the area under this
curve.
"""
yTrue = np.array([0, 0, 1, 1, 1])
yScore = np.array([0.1, 0.4, 0.5, 0.3, 0.45])
(fpr, tpr, thresholds) = ROCCurve(yTrue, yScore)
print "Actual: ",
_printNPArray(yTrue)
print "Predicted: ",
_printNPArray(yScore)
print
print "Thresholds:",
_printNPArray(thresholds[::-1])
print "FPR(x): ",
_printNPArray(fpr)
print "TPR(y): ",
_printNPArray(tpr)
print
area = AreaUnderCurve(fpr, tpr)
print "AUC: ", area
if __name__=='__main__':
_test()
| agpl-3.0 |
gbrammer/sgas-lens | sgas/other_bands.py | 1 | 2384 | def recalibrate():
"""
Rerun the WFC3 calibration pipeline to flatten the (potentially)
variable ramps
"""
import matplotlib as mpl
mpl.rcParams['backend'] = 'agg'
import glob
import os
import stsci.tools
from sgas import reprocess_wfc3
# In RAW
files=glob.glob('*raw.fits')
reprocess_wfc3.show_ramps_parallel(files, cpu_count=4)
files=glob.glob('*raw.fits')
reprocess_wfc3.reprocess_parallel(files)
def preprocess():
"""
Drizzle and align the other bands
"""
# In Prep
import grizli
import grizli.prep
import os
import glob
import numpy as np
import sgas
# other bands from the Gladders program
files=glob.glob('../RAW/ic2*_flt.fits')
visits, xx = grizli.utils.parse_flt_files(files=files, uniquename=True)
# Alignment list, generated by GBr
radec = os.path.join(sgas.get_data_path(), 'sdssj0851+3331-f160w.radec')
# Copy aligment guess files
os.system('cp {0}/*align_guess .'.format(sgas.get_data_path()))
all_failed = []
Skip=True
# This main loop does all of the alignment and background subtraction
for visit in visits:
if os.path.exists('%s.failed' %(visit['product'])):
all_failed.append(visit)
if (os.path.exists('%s_drz_sci.fits' %(visit['product']))) & (Skip):
continue
print(visit['product'])
try:
status = grizli.prep.process_direct_grism_visit(direct=visit, grism={}, radec=radec, skip_direct=False, align_mag_limits=[14,23], tweak_max_dist=8, tweak_threshold=8, align_tolerance=8, tweak_fit_order=2)
except:
fp = open('%s.failed' %(visit['product']), 'w')
fp.write('\n')
fp.close()
continue
if os.path.exists('%s.failed' %(visit['product'])):
os.remove('%s.failed' %(visit['product']))
# Make both images have the same pixel grid
visits[1]['reference'] = 'sdssj0851+3331-c2i-06-293.0-f125w_drz_sci.fits'
# Drizzle them, North-up and with 0.06" pixels
grizli.prep.drizzle_overlaps(visits, parse_visits=False, pixfrac=0.8, scale=0.06, skysub=False, final_wht_type='IVM', check_overlaps=False)
| mit |
mjirik/lisa | lisa/classification.py | 1 | 2243 | # ! /usr/bin/python
# -*- coding: utf-8 -*-
from loguru import logger
# logger = logging.getLogger()
import numpy as np
class GMMClassifier():
def __init__(self, each_class_params=None, **same_params):
"""
same_params: classifier params for each class are same
each_class_params: is list of dictionary of params for each
class classifier. For example:
[{'covariance_type': 'full'}, {'n_components': 2}])
"""
self.same_params = same_params
self.each_class_params = each_class_params
self.models = []
def fit(self, X_train, y_train):
X_train = np.asarray(X_train)
y_train = np.asarray(y_train)
# from sklearn.mixture import GMM as GaussianMixture
from sklearn.mixture import GaussianMixture
unlabels = range(0, np.max(y_train) + 1)
for lab in unlabels:
if self.each_class_params is not None:
# print 'eacl'
# print self.each_class_params[lab]
model = GaussianMixture(**self.each_class_params[lab])
# print 'po gmm ', model
elif len(self.same_params) > 0:
model = GaussianMixture(**self.same_params)
# print 'ewe ', model
else:
model = GaussianMixture()
X_train_lab = X_train[y_train == lab]
# logger.debug('xtr lab shape ' + str(X_train_lab))
model.fit(X_train_lab)
self.models.insert(lab, model)
def __str__(self):
if self.each_class_params is not None:
return "GMMClassificator(" + str(self.each_class_params) + ')'
else:
return "GMMClassificator(" + str(self.same_params) + ')'
def predict(self, X_test):
X_test = np.asarray(X_test)
logger.debug(str(X_test.shape))
logger.debug(str(X_test))
scores = np.zeros([X_test.shape[0], len(self.models)])
for lab in range(0, len(self.models)):
logger.debug('means shape' + str(self.models[lab].means_.shape))
sc = self.models[lab].score_samples(X_test)
scores[:, lab] = sc
pred = np.argmax(scores, 1)
return pred
| bsd-3-clause |
bzamecnik/sms-tools | lectures/06-Harmonic-model/plots-code/oboe-autocorrelation.py | 1 | 1074 | import essentia.standard as ess
# matplotlib without any blocking GUI
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
from smst.utils import audio
(fs, x) = audio.read_wav('../../../sounds/oboe-A4.wav')
M = 500
start = .8 * fs
xp = x[start:start + M] / float(max(x[start:start + M]))
r = ess.AutoCorrelation(normalization='standard')(xp)
r = r / max(r)
peaks = ess.PeakDetection(threshold=.2, interpolate=False, minPosition=.01)(r)
plt.figure(1, figsize=(9, 7))
plt.subplot(211)
plt.plot(np.arange(M) / float(fs), xp, lw=1.5)
plt.axis([0, (M - 1) / float(fs), min(xp), max(xp)])
plt.xlabel('time (sec)')
plt.ylabel('amplitude')
plt.title('x (oboe-A4.wav)')
plt.subplot(212)
plt.plot(np.arange(M) / float(fs), r, 'r', lw=1.5)
plt.plot(peaks[0] * (M - 1) / float(fs), peaks[1], 'x', color='k', markeredgewidth=1.5)
plt.axis([0, (M - 1) / float(fs), min(r), max(r)])
plt.title('autocorrelation function + peaks')
plt.xlabel('lag time (sec)')
plt.ylabel('correlation')
plt.tight_layout()
plt.savefig('oboe-autocorrelation.png')
| agpl-3.0 |
EuropeanSocialInnovationDatabase/ESID-main | TextMining/Classifiers/Trainers/NaiveBayesOutputs.py | 1 | 16411 | from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.naive_bayes import MultinomialNB
from sklearn import metrics
from sklearn.pipeline import Pipeline
import numpy as np
import pandas as pd
import re
from os import listdir
from os.path import join,isdir
from sklearn.utils import resample
from sklearn.model_selection import cross_val_score
import pickle
from sklearn.utils import resample
class DataSet:
Annotators = []
def __init__(self):
self.Annotators = []
class Annotator:
files = []
documents = []
Name = ""
def __init__(self):
self.files = []
self.documents = []
self.Name = ""
class Document:
Lines = []
DocumentName = ""
DatabaseID = ""
Annotations = []
Text = ""
isSpam = False
Project_Mark_Objective_1A = 0
Project_Mark_Objective_1B = 0
Project_Mark_Objective_1C = 0
Project_Mark_Actors_2A = 0
Project_Mark_Actors_2B = 0
Project_Mark_Actors_2C = 0
Project_Mark_Outputs_3A = 0
Project_Mark_Innovativeness_3A = 0
isProjectObjectiveSatisfied = False
isProjectActorSatisfied = False
isProjectOutputSatisfied = False
isProjectInnovativenessSatisfied = False
isProjectObjectiveSatisfied_predicted = False
isProjectActorSatisfied_predicted = False
isProjectOutputSatisfied_predicted = False
isProjectInnovativenessSatisfied_predicted = False
def __init__(self):
self.Text = ""
self.Lines = []
self.DocumentName = ""
self.DatabaseID = ""
self.Annotations = []
self.isSpam = False
self.Project_Mark_Objective_1A = 0
self.Project_Mark_Objective_1B = 0
self.Project_Mark_Objective_1C = 0
self.Project_Mark_Actors_2A = 0
self.Project_Mark_Actors_2B = 0
self.Project_Mark_Actors_2C = 0
self.Project_Mark_Outputs_3A = 0
self.Project_Mark_Innovativeness_3A = 0
self.Project_Mark_Innovativeness_3A = 0
self.isProjectObjectiveSatisfied = False
self.isProjectActorSatisfied = False
self.isProjectOutputSatisfied = False
self.isProjectInnovativenessSatisfied = False
self.isProjectObjectiveSatisfied_predicted = False
self.isProjectActorSatisfied_predicted = False
self.isProjectOutputSatisfied_predicted = False
self.isProjectInnovativenessSatisfied_predicted = False
class Line:
StartSpan = 0
EndSpan = 0
Text = ""
Sentences = []
Tokens = []
Annotations = []
def __init__(self):
self.StartSpan = 0
self.EndSpan = 0
self.Text = ""
self.Sentences = []
self.Tokens = []
self.Annotations = []
class Sentence:
SentenceText = ""
StartSpan = -1
EndSpan = -1
Annotations = []
def __init__(self):
self.SentenceText = ""
self.StartSpan = -1
self.EndSpan = -1
self.Annotations = []
class Annotation:
FromFile = ""
FromAnnotator = ""
AnnotationText = ""
StartSpan = -1
EndSpan = -1
HighLevelClass = ""
LowLevelClass = ""
data_folder = "../../../Helpers/FullDataset_Alina/"
ds = DataSet()
total_num_spam = 0
sentences = []
total_num_files = 0
# job = aetros.backend.start_job('nikolamilosevic86/GloveModel')
annotators = [f for f in listdir(data_folder) if isdir(join(data_folder, f))]
for ann in annotators:
folder = data_folder + "/" + ann
Annot = Annotator()
Annot.Name = ann
ds.Annotators.append(Annot)
onlyfiles = [f for f in listdir(folder) if (f.endswith(".txt"))]
for file in onlyfiles:
Annot.files.append(data_folder + "/" + ann + '/' + file)
doc = Document()
total_num_files = total_num_files + 1
doc.Lines = []
# doc.Annotations = []
doc.DocumentName = file
Annot.documents.append(doc)
if (file.startswith('a') or file.startswith('t')):
continue
print file
doc.DatabaseID = file.split("_")[1].split(".")[0]
fl = open(data_folder + "/" + ann + '/' + file, 'r')
content = fl.read()
doc.Text = content
lines = content.split('\n')
line_index = 0
for line in lines:
l = Line()
l.StartSpan = line_index
l.EndSpan = line_index + len(line)
l.Text = line
line_index = line_index + len(line) + 1
sentences.append(line)
doc.Lines.append(l)
an = open(data_folder + "/" + ann + '/' + file.replace(".txt", ".ann"), 'r')
annotations = an.readlines()
for a in annotations:
a = re.sub(r'\d+;\d+', '', a).replace(' ', ' ')
split_ann = a.split('\t')
if (split_ann[0].startswith("T")):
id = split_ann[0]
sp_split_ann = split_ann[1].split(' ')
low_level_ann = sp_split_ann[0]
if low_level_ann == "ProjectMark":
continue
span_start = sp_split_ann[1]
span_end = sp_split_ann[2]
ann_text = split_ann[2]
Ann = Annotation()
Ann.AnnotationText = ann_text
Ann.StartSpan = int(span_start)
Ann.EndSpan = int(span_end)
Ann.FromAnnotator = Annot.Name
Ann.FromFile = file
Ann.LowLevelClass = low_level_ann
if (low_level_ann == "SL_Outputs_3a"):
Ann.HighLevelClass = "Outputs"
if (
low_level_ann == "SL_Objective_1a" or low_level_ann == "SL_Objective_1b" or low_level_ann == "SL_Objective_1c"):
Ann.HighLevelClass = "Objectives"
if (
low_level_ann == "SL_Actors_2a" or low_level_ann == "SL_Actors_2b" or low_level_ann == "SL_Actors_2c"):
Ann.HighLevelClass = "Actors"
if (low_level_ann == "SL_Innovativeness_4a"):
Ann.HighLevelClass = "Innovativeness"
doc.Annotations.append(Ann)
for line in doc.Lines:
if line.StartSpan <= Ann.StartSpan and line.EndSpan >= Ann.EndSpan:
line.Annotations.append(Ann)
else:
id = split_ann[0]
sp_split_ann = split_ann[1].split(' ')
mark_name = sp_split_ann[0]
if (len(sp_split_ann) <= 2):
continue
mark = sp_split_ann[2].replace('\n', '')
if (mark_name == "DL_Outputs_3a"):
doc.Project_Mark_Outputs_3A = int(mark)
if int(mark) >= 1:
doc.isProjectOutputSatisfied = True
if (mark_name == "DL_Objective_1a"):
doc.Project_Mark_Objective_1A = int(mark)
if int(mark) >= 1:
doc.isProjectObjectiveSatisfied = True
if (mark_name == "DL_Objective_1b" or mark_name == "DL_Objective"):
doc.Project_Mark_Objective_1B = int(mark)
if int(mark) >= 1:
doc.isProjectObjectiveSatisfied = True
if (mark_name == "DL_Objective_1c"):
doc.Project_Mark_Objective_1C = int(mark)
if int(mark) >= 1:
doc.isProjectObjectiveSatisfied = True
if (mark_name == "DL_Innovativeness_4a" or mark_name == "DL_Innovativeness"):
doc.Project_Mark_Innovativeness_3A = int(mark)
if int(mark) >= 1:
doc.isProjectInnovativenessSatisfied = True
if (mark_name == "DL_Actors_2a" or mark_name == "DL_Actors"):
doc.Project_Mark_Actors_2A = int(mark)
if int(mark) >= 1:
doc.isProjectActorSatisfied = True
if (mark_name == "DL_Actors_2b"):
doc.Project_Mark_Actors_2B = int(mark)
if int(mark) >= 1:
doc.isProjectActorSatisfied = True
if (mark_name == "DL_Actors_2c"):
doc.Project_Mark_Actors_2C = int(mark)
if int(mark) >= 1:
doc.isProjectActorSatisfied = True
if (
doc.Project_Mark_Objective_1A == 0 and doc.Project_Mark_Objective_1B == 0 and doc.Project_Mark_Objective_1C == 0 and doc.Project_Mark_Actors_2A == 0
and doc.Project_Mark_Actors_2B == 0 and doc.Project_Mark_Actors_2B == 0 and doc.Project_Mark_Actors_2C == 0 and doc.Project_Mark_Outputs_3A == 0
and doc.Project_Mark_Innovativeness_3A == 0):
doc.isSpam = True
total_num_spam = total_num_spam + 1
i = 0
j = i + 1
kappa_files = 0
done_documents = []
num_overlap_spam = 0
num_spam = 0
total_objectives = 0
total_outputs = 0
total_actors = 0
total_innovativeness = 0
ann1_annotations_objectives = []
ann2_annotations_objectives = []
ann1_annotations_actors = []
ann2_annotations_actors = []
ann1_annotations_outputs = []
ann2_annotations_outputs = []
ann1_annotations_innovativeness = []
ann2_annotations_innovativeness = []
match_objectives = 0
match_outputs = 0
match_actors = 0
match_innovativeness = 0
while i < len(ds.Annotators) - 1:
while j < len(ds.Annotators):
annotator1 = ds.Annotators[i]
annotator2 = ds.Annotators[j]
for doc1 in annotator1.documents:
for doc2 in annotator2.documents:
if doc1.DocumentName == doc2.DocumentName and doc1.DocumentName not in done_documents:
done_documents.append(doc1.DocumentName)
line_num = 0
ann1_objective = [0] * len(doc1.Lines)
ann2_objective = [0] * len(doc2.Lines)
ann1_output = [0] * len(doc1.Lines)
ann2_output = [0] * len(doc2.Lines)
ann1_actor = [0] * len(doc1.Lines)
ann2_actor = [0] * len(doc2.Lines)
ann1_innovativeness = [0] * len(doc1.Lines)
ann2_innovativeness = [0] * len(doc2.Lines)
while line_num < len(doc1.Lines):
if len(doc1.Lines[line_num].Annotations) > 0:
for a in doc1.Lines[line_num].Annotations:
if a.HighLevelClass == "Objectives":
ann1_objective[line_num] = 1
total_objectives = total_objectives + 1
if a.HighLevelClass == "Outputs":
ann1_output[line_num] = 1
total_outputs = total_outputs + 1
if a.HighLevelClass == "Actors":
ann1_actor[line_num] = 1
total_actors = total_actors + 1
if a.HighLevelClass == "Innovativeness":
ann1_innovativeness[line_num] = 1
total_innovativeness = total_innovativeness + 1
for a1 in doc2.Lines[line_num].Annotations:
if a1.HighLevelClass == a.HighLevelClass:
if a1.HighLevelClass == "Objectives":
match_objectives = match_objectives + 1
if a1.HighLevelClass == "Outputs":
match_outputs = match_outputs + 1
if a1.HighLevelClass == "Actors":
match_actors = match_actors + 1
if a1.HighLevelClass == "Innovativeness":
match_innovativeness = match_innovativeness + 1
if len(doc2.Lines[line_num].Annotations) > 0:
for a in doc2.Lines[line_num].Annotations:
if a.HighLevelClass == "Objectives":
ann2_objective[line_num] = 1
total_objectives = total_objectives + 1
if a.HighLevelClass == "Outputs":
ann2_output[line_num] = 1
total_outputs = total_outputs + 1
if a.HighLevelClass == "Actors":
ann2_actor[line_num] = 1
total_actors = total_actors + 1
if a.HighLevelClass == "Innovativeness":
ann2_innovativeness[line_num] = 1
total_innovativeness = total_innovativeness + 1
line_num = line_num + 1
ann1_annotations_outputs.extend(ann1_output)
ann2_annotations_outputs.extend(ann2_output)
ann1_annotations_objectives.extend(ann1_objective)
ann2_annotations_objectives.extend(ann2_objective)
ann1_annotations_actors.extend(ann1_actor)
ann2_annotations_actors.extend(ann2_actor)
ann1_annotations_innovativeness.extend(ann1_innovativeness)
ann2_annotations_innovativeness.extend(ann2_innovativeness)
print "Statistics for document:" + doc1.DocumentName
print "Annotators " + annotator1.Name + " and " + annotator2.Name
print "Spam by " + annotator1.Name + ":" + str(doc1.isSpam)
print "Spam by " + annotator2.Name + ":" + str(doc2.isSpam)
if (doc1.isSpam == doc2.isSpam):
num_overlap_spam = num_overlap_spam + 1
if doc1.isSpam:
num_spam = num_spam + 1
if doc2.isSpam:
num_spam = num_spam + 1
kappa_files = kappa_files + 1
j = j + 1
i = i + 1
j = i + 1
print annotators
doc_array = []
text_array = []
objectives = []
actors = []
outputs = []
innovativeness = []
for ann in ds.Annotators:
for doc in ann.documents:
doc_array.append(
[doc.Text, doc.isProjectObjectiveSatisfied, doc.isProjectActorSatisfied, doc.isProjectOutputSatisfied,
doc.isProjectInnovativenessSatisfied])
objectives.append(doc.isProjectObjectiveSatisfied)
actors.append(doc.isProjectActorSatisfied)
outputs.append(doc.isProjectOutputSatisfied)
innovativeness.append(doc.isProjectInnovativenessSatisfied)
text_array.append(doc.Text)
df = pd.DataFrame({'text':text_array,'classa':outputs})
df_majority = df[df.classa==0]
df_minority = df[df.classa==1]
df_minority_upsampled = resample(df_minority,
replace=True, # sample with replacement
n_samples=160, # to match majority class
random_state=83293) # reproducible results
df_upsampled = pd.concat([df_majority, df_minority_upsampled])
# Display new class counts
print df_upsampled.classa.value_counts()
train = text_array[0:int(0.8*len(text_array))]
train_Y = outputs[0:int(0.8*len(actors))]
test = text_array[int(0.8*len(text_array)):]
test_Y = outputs[int(0.8*len(actors)):]
#categories = ['non actor', 'actor']
text_clf = Pipeline([('vect', CountVectorizer()),
('tfidf', TfidfTransformer()),
('clf', MultinomialNB()),
])
scores = cross_val_score(text_clf, df_upsampled.text, df_upsampled.classa, cv=10,scoring='f1')
final = 0
for score in scores:
final = final + score
print scores
print "Final:" + str(final/10)
text_clf.fit( df_upsampled.text, df_upsampled.classa)
filename = '../Models/naive_bayes_outputs.sav'
pickle.dump(text_clf, open(filename, 'wb')) | gpl-3.0 |
wkrzemien/DIRAC | Core/Utilities/Graphs/BarGraph.py | 5 | 5858 | """ BarGraph represents bar graphs with vertical bars both simple
and stacked.
The DIRAC Graphs package is derived from the GraphTool plotting package of the
CMS/Phedex Project by ... <to be added>
"""
import datetime
from pylab import setp
from matplotlib.patches import Polygon
from matplotlib.dates import date2num
from DIRAC.Core.Utilities.Graphs.PlotBase import PlotBase
from DIRAC.Core.Utilities.Graphs.GraphUtilities import to_timestamp, pixelToPoint, \
PrettyDateLocator, PrettyDateFormatter, \
PrettyScalarFormatter
__RCSID__ = "$Id$"
class BarGraph( PlotBase ):
"""
The BarGraph class is a straightforward bar graph; given a dictionary
of values, it takes the keys as the independent variable and the values
as the dependent variable.
"""
def __init__(self,data,ax,prefs,*args,**kw):
PlotBase.__init__(self,data,ax,prefs,*args,**kw)
if 'span' in self.prefs:
self.width = self.prefs['span']
else:
self.width = 1.0
if self.gdata.key_type == "time":
# Try to guess the time bin span
nKeys = self.gdata.getNumberOfKeys()
self.width = (max(self.gdata.all_keys)-min(self.gdata.all_keys))/(nKeys-1)
def draw( self ):
PlotBase.draw(self)
self.x_formatter_cb(self.ax)
if self.gdata.isEmpty():
return None
tmp_x = []
tmp_y = []
# Evaluate the bar width
width = float(self.width)
if self.gdata.key_type == 'time':
#width = (1 - self.bar_graph_space) * width / 86400.0
width = width / 86400.0
offset = 0
elif self.gdata.key_type == 'string':
self.bar_graph_space = 0.1
width = (1 - self.bar_graph_space) * width
offset = self.bar_graph_space / 2.0
else:
offset = 0
start_plot = 0
end_plot = 0
if "starttime" in self.prefs and "endtime" in self.prefs:
start_plot = date2num( datetime.datetime.fromtimestamp(to_timestamp(self.prefs['starttime'])))
end_plot = date2num( datetime.datetime.fromtimestamp(to_timestamp(self.prefs['endtime'])))
nKeys = self.gdata.getNumberOfKeys()
tmp_b = []
if 'log_yaxis' in self.prefs:
tmp_b = [0.001]*nKeys
ymin = 0.001
else:
tmp_b = [0.]*nKeys
ymin = 0.
self.polygons = []
self.lines = []
labels = self.gdata.getLabels()
labels.reverse()
# If it is a simple plot, no labels are used
# Evaluate the most appropriate color in this case
if self.gdata.isSimplePlot():
labels = [('SimplePlot',0.)]
color = self.prefs.get('plot_color','Default')
if color.find('#') != -1:
self.palette.setColor('SimplePlot',color)
else:
labels = [(color,0.)]
seq_b = [(self.gdata.max_num_key+width,0.0),(self.gdata.min_num_key,0.0)]
zorder = 0.0
dpi = self.prefs.get('dpi',100)
for label,num in labels:
color = self.palette.getColor(label)
ind = 0
tmp_x = []
tmp_y = []
tmp_t = []
plot_data = self.gdata.getPlotNumData(label)
for key, value, error in plot_data:
if value is None:
value = 0.
tmp_x.append( offset+key )
#tmp_y.append( ymin )
tmp_y.append( 0.001 )
tmp_x.append( offset+key )
tmp_y.append( float(value)+tmp_b[ind] )
tmp_x.append( offset+key+width )
tmp_y.append( float(value)+tmp_b[ind] )
tmp_x.append( offset+key+width )
#tmp_y.append( ymin )
tmp_y.append( 0.001 )
tmp_t.append(float(value)+tmp_b[ind])
ind += 1
seq_t = zip(tmp_x,tmp_y)
seq = seq_t+seq_b
poly = Polygon( seq, facecolor=color, fill=True,
linewidth=pixelToPoint(0.2,dpi),
zorder=zorder)
self.ax.add_patch( poly )
self.polygons.append( poly )
tmp_b = list(tmp_t)
zorder -= 0.1
tight_bars_flag = self.prefs.get('tight_bars',False)
if tight_bars_flag:
setp( self.polygons, linewidth=0. )
#pivots = keys
#for idx in range(len(pivots)):
# self.coords[ pivots[idx] ] = self.bars[idx]
ymax = max(tmp_b)
ymax *= 1.1
if 'log_yaxis' in self.prefs:
ymin = 0.001
else:
ymin = min( tmp_b, 0. )
ymin *= 1.1
xmax=max(tmp_x)
if self.log_xaxis:
xmin = 0.001
else:
xmin = 0
ymin = self.prefs.get( 'ymin', ymin )
ymax = self.prefs.get( 'ymax', ymax )
xmin = self.prefs.get( 'xmin', xmin )
xmax = self.prefs.get( 'xmax', xmax )
self.ax.set_xlim( xmin=xmin, xmax=xmax+offset )
self.ax.set_ylim( ymin=ymin, ymax=ymax )
if self.gdata.key_type == 'time':
if start_plot and end_plot:
self.ax.set_xlim( xmin=start_plot, xmax=end_plot)
else:
self.ax.set_xlim( xmin=min(tmp_x), xmax=max(tmp_x))
def x_formatter_cb( self, ax ):
if self.gdata.key_type == "string":
smap = self.gdata.getStringMap()
reverse_smap = {}
for key, val in smap.items():
reverse_smap[val] = key
ticks = smap.values()
ticks.sort()
ax.set_xticks( [i+.5 for i in ticks] )
ax.set_xticklabels( [reverse_smap[i] for i in ticks] )
labels = ax.get_xticklabels()
ax.grid( False )
if self.log_xaxis:
xmin = 0.001
else:
xmin = 0
ax.set_xlim( xmin=xmin,xmax=len(ticks) )
elif self.gdata.key_type == "time":
#ax.set_xlim( xmin=self.begin_num,xmax=self.end_num )
dl = PrettyDateLocator()
df = PrettyDateFormatter( dl )
ax.xaxis.set_major_locator( dl )
ax.xaxis.set_major_formatter( df )
ax.xaxis.set_clip_on(False)
sf = PrettyScalarFormatter( )
ax.yaxis.set_major_formatter( sf )
#labels = ax.get_xticklabels()
else:
return None
| gpl-3.0 |
TomAugspurger/pandas | pandas/tests/generic/methods/test_reorder_levels.py | 2 | 2804 | import numpy as np
import pytest
from pandas import DataFrame, MultiIndex, Series
import pandas._testing as tm
class TestReorderLevels:
@pytest.mark.parametrize("klass", [Series, DataFrame])
def test_reorder_levels(self, klass):
index = MultiIndex(
levels=[["bar"], ["one", "two", "three"], [0, 1]],
codes=[[0, 0, 0, 0, 0, 0], [0, 1, 2, 0, 1, 2], [0, 1, 0, 1, 0, 1]],
names=["L0", "L1", "L2"],
)
df = DataFrame({"A": np.arange(6), "B": np.arange(6)}, index=index)
obj = df if klass is DataFrame else df["A"]
# no change, position
result = obj.reorder_levels([0, 1, 2])
tm.assert_equal(obj, result)
# no change, labels
result = obj.reorder_levels(["L0", "L1", "L2"])
tm.assert_equal(obj, result)
# rotate, position
result = obj.reorder_levels([1, 2, 0])
e_idx = MultiIndex(
levels=[["one", "two", "three"], [0, 1], ["bar"]],
codes=[[0, 1, 2, 0, 1, 2], [0, 1, 0, 1, 0, 1], [0, 0, 0, 0, 0, 0]],
names=["L1", "L2", "L0"],
)
expected = DataFrame({"A": np.arange(6), "B": np.arange(6)}, index=e_idx)
expected = expected if klass is DataFrame else expected["A"]
tm.assert_equal(result, expected)
result = obj.reorder_levels([0, 0, 0])
e_idx = MultiIndex(
levels=[["bar"], ["bar"], ["bar"]],
codes=[[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]],
names=["L0", "L0", "L0"],
)
expected = DataFrame({"A": np.arange(6), "B": np.arange(6)}, index=e_idx)
expected = expected if klass is DataFrame else expected["A"]
tm.assert_equal(result, expected)
result = obj.reorder_levels(["L0", "L0", "L0"])
tm.assert_equal(result, expected)
def test_reorder_levels_swaplevel_equivalence(
self, multiindex_year_month_day_dataframe_random_data
):
ymd = multiindex_year_month_day_dataframe_random_data
result = ymd.reorder_levels(["month", "day", "year"])
expected = ymd.swaplevel(0, 1).swaplevel(1, 2)
tm.assert_frame_equal(result, expected)
result = ymd["A"].reorder_levels(["month", "day", "year"])
expected = ymd["A"].swaplevel(0, 1).swaplevel(1, 2)
tm.assert_series_equal(result, expected)
result = ymd.T.reorder_levels(["month", "day", "year"], axis=1)
expected = ymd.T.swaplevel(0, 1, axis=1).swaplevel(1, 2, axis=1)
tm.assert_frame_equal(result, expected)
with pytest.raises(TypeError, match="hierarchical axis"):
ymd.reorder_levels([1, 2], axis=1)
with pytest.raises(IndexError, match="Too many levels"):
ymd.index.reorder_levels([1, 2, 3])
| bsd-3-clause |
mbayon/TFG-MachineLearning | venv/lib/python3.6/site-packages/sklearn/preprocessing/data.py | 5 | 94481 | # Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Andreas Mueller <[email protected]>
# Eric Martin <[email protected]>
# Giorgio Patrini <[email protected]>
# License: BSD 3 clause
from __future__ import division
from itertools import chain, combinations
import numbers
import warnings
from itertools import combinations_with_replacement as combinations_w_r
import numpy as np
from scipy import sparse
from scipy import stats
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.six import string_types
from ..utils import check_array
from ..utils.extmath import row_norms
from ..utils.extmath import _incremental_mean_and_var
from ..utils.sparsefuncs_fast import (inplace_csr_row_normalize_l1,
inplace_csr_row_normalize_l2)
from ..utils.sparsefuncs import (inplace_column_scale,
mean_variance_axis, incr_mean_variance_axis,
min_max_axis)
from ..utils.validation import (check_is_fitted, check_random_state,
FLOAT_DTYPES)
BOUNDS_THRESHOLD = 1e-7
zip = six.moves.zip
map = six.moves.map
range = six.moves.range
__all__ = [
'Binarizer',
'KernelCenterer',
'MinMaxScaler',
'MaxAbsScaler',
'Normalizer',
'OneHotEncoder',
'RobustScaler',
'StandardScaler',
'QuantileTransformer',
'add_dummy_feature',
'binarize',
'normalize',
'scale',
'robust_scale',
'maxabs_scale',
'minmax_scale',
'quantile_transform',
]
def _handle_zeros_in_scale(scale, copy=True):
''' Makes sure that whenever scale is zero, we handle it correctly.
This happens in most scalers when we have constant features.'''
# if we are fitting on 1D arrays, scale might be a scalar
if np.isscalar(scale):
if scale == .0:
scale = 1.
return scale
elif isinstance(scale, np.ndarray):
if copy:
# New array to avoid side-effects
scale = scale.copy()
scale[scale == 0.0] = 1.0
return scale
def scale(X, axis=0, with_mean=True, with_std=True, copy=True):
"""Standardize a dataset along any axis
Center to the mean and component wise scale to unit variance.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : {array-like, sparse matrix}
The data to center and scale.
axis : int (0 by default)
axis used to compute the means and standard deviations along. If 0,
independently standardize each feature, otherwise (if 1) standardize
each sample.
with_mean : boolean, True by default
If True, center the data before scaling.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSC matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_mean=False` (in that case, only variance scaling will be
performed on the features of the CSC matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSC matrix.
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
See also
--------
StandardScaler: Performs scaling to unit variance using the``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
""" # noqa
X = check_array(X, accept_sparse='csc', copy=copy, ensure_2d=False,
warn_on_dtype=True, estimator='the scale function',
dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` instead"
" See docstring for motivation and alternatives.")
if axis != 0:
raise ValueError("Can only scale sparse matrix on axis=0, "
" got axis=%d" % axis)
if with_std:
_, var = mean_variance_axis(X, axis=0)
var = _handle_zeros_in_scale(var, copy=False)
inplace_column_scale(X, 1 / np.sqrt(var))
else:
X = np.asarray(X)
if with_mean:
mean_ = np.mean(X, axis)
if with_std:
scale_ = np.std(X, axis)
# Xr is a view on the original array that enables easy use of
# broadcasting on the axis in which we are interested in
Xr = np.rollaxis(X, axis)
if with_mean:
Xr -= mean_
mean_1 = Xr.mean(axis=0)
# Verify that mean_1 is 'close to zero'. If X contains very
# large values, mean_1 can also be very large, due to a lack of
# precision of mean_. In this case, a pre-scaling of the
# concerned feature is efficient, for instance by its mean or
# maximum.
if not np.allclose(mean_1, 0):
warnings.warn("Numerical issues were encountered "
"when centering the data "
"and might not be solved. Dataset may "
"contain too large values. You may need "
"to prescale your features.")
Xr -= mean_1
if with_std:
scale_ = _handle_zeros_in_scale(scale_, copy=False)
Xr /= scale_
if with_mean:
mean_2 = Xr.mean(axis=0)
# If mean_2 is not 'close to zero', it comes from the fact that
# scale_ is very small so that mean_2 = mean_1/scale_ > 0, even
# if mean_1 was close to zero. The problem is thus essentially
# due to the lack of precision of mean_. A solution is then to
# subtract the mean again:
if not np.allclose(mean_2, 0):
warnings.warn("Numerical issues were encountered "
"when scaling the data "
"and might not be solved. The standard "
"deviation of the data is probably "
"very close to 0. ")
Xr -= mean_2
return X
class MinMaxScaler(BaseEstimator, TransformerMixin):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
feature_range : tuple (min, max), default=(0, 1)
Desired range of transformed data.
copy : boolean, optional, default True
Set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array).
Attributes
----------
min_ : ndarray, shape (n_features,)
Per feature adjustment for minimum.
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
.. versionadded:: 0.17
*scale_* attribute.
data_min_ : ndarray, shape (n_features,)
Per feature minimum seen in the data
.. versionadded:: 0.17
*data_min_*
data_max_ : ndarray, shape (n_features,)
Per feature maximum seen in the data
.. versionadded:: 0.17
*data_max_*
data_range_ : ndarray, shape (n_features,)
Per feature range ``(data_max_ - data_min_)`` seen in the data
.. versionadded:: 0.17
*data_range_*
Examples
--------
>>> from sklearn.preprocessing import MinMaxScaler
>>>
>>> data = [[-1, 2], [-0.5, 6], [0, 10], [1, 18]]
>>> scaler = MinMaxScaler()
>>> print(scaler.fit(data))
MinMaxScaler(copy=True, feature_range=(0, 1))
>>> print(scaler.data_max_)
[ 1. 18.]
>>> print(scaler.transform(data))
[[ 0. 0. ]
[ 0.25 0.25]
[ 0.5 0.5 ]
[ 1. 1. ]]
>>> print(scaler.transform([[2, 2]]))
[[ 1.5 0. ]]
See also
--------
minmax_scale: Equivalent function without the estimator API.
Notes
-----
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
"""
def __init__(self, feature_range=(0, 1), copy=True):
self.feature_range = feature_range
self.copy = copy
def _reset(self):
"""Reset internal data-dependent state of the scaler, if necessary.
__init__ parameters are not touched.
"""
# Checking one attribute is enough, becase they are all set together
# in partial_fit
if hasattr(self, 'scale_'):
del self.scale_
del self.min_
del self.n_samples_seen_
del self.data_min_
del self.data_max_
del self.data_range_
def fit(self, X, y=None):
"""Compute the minimum and maximum to be used for later scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
# Reset internal state before fitting
self._reset()
return self.partial_fit(X, y)
def partial_fit(self, X, y=None):
"""Online computation of min and max on X for later scaling.
All of X is processed as a single batch. This is intended for cases
when `fit` is not feasible due to very large number of `n_samples`
or because X is read from a continuous stream.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y : Passthrough for ``Pipeline`` compatibility.
"""
feature_range = self.feature_range
if feature_range[0] >= feature_range[1]:
raise ValueError("Minimum of desired feature range must be smaller"
" than maximum. Got %s." % str(feature_range))
if sparse.issparse(X):
raise TypeError("MinMaxScaler does no support sparse input. "
"You may consider to use MaxAbsScaler instead.")
X = check_array(X, copy=self.copy, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
data_min = np.min(X, axis=0)
data_max = np.max(X, axis=0)
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.n_samples_seen_ = X.shape[0]
# Next steps
else:
data_min = np.minimum(self.data_min_, data_min)
data_max = np.maximum(self.data_max_, data_max)
self.n_samples_seen_ += X.shape[0]
data_range = data_max - data_min
self.scale_ = ((feature_range[1] - feature_range[0]) /
_handle_zeros_in_scale(data_range))
self.min_ = feature_range[0] - data_min * self.scale_
self.data_min_ = data_min
self.data_max_ = data_max
self.data_range_ = data_range
return self
def transform(self, X):
"""Scaling features of X according to feature_range.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Input data that will be transformed.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, dtype=FLOAT_DTYPES)
X *= self.scale_
X += self.min_
return X
def inverse_transform(self, X):
"""Undo the scaling of X according to feature_range.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Input data that will be transformed. It cannot be sparse.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, dtype=FLOAT_DTYPES)
X -= self.min_
X /= self.scale_
return X
def minmax_scale(X, feature_range=(0, 1), axis=0, copy=True):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
.. versionadded:: 0.17
*minmax_scale* function interface
to :class:`sklearn.preprocessing.MinMaxScaler`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The data.
feature_range : tuple (min, max), default=(0, 1)
Desired range of transformed data.
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
See also
--------
MinMaxScaler: Performs scaling to a given range using the``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
Notes
-----
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
""" # noqa
# Unlike the scaler object, this function allows 1d input.
# If copy is required, it will be done inside the scaler object.
X = check_array(X, copy=False, ensure_2d=False, warn_on_dtype=True,
dtype=FLOAT_DTYPES)
original_ndim = X.ndim
if original_ndim == 1:
X = X.reshape(X.shape[0], 1)
s = MinMaxScaler(feature_range=feature_range, copy=copy)
if axis == 0:
X = s.fit_transform(X)
else:
X = s.fit_transform(X.T).T
if original_ndim == 1:
X = X.ravel()
return X
class StandardScaler(BaseEstimator, TransformerMixin):
"""Standardize features by removing the mean and scaling to unit variance
Centering and scaling happen independently on each feature by computing
the relevant statistics on the samples in the training set. Mean and
standard deviation are then stored to be used on later data using the
`transform` method.
Standardization of a dataset is a common requirement for many
machine learning estimators: they might behave badly if the
individual feature do not more or less look like standard normally
distributed data (e.g. Gaussian with 0 mean and unit variance).
For instance many elements used in the objective function of
a learning algorithm (such as the RBF kernel of Support Vector
Machines or the L1 and L2 regularizers of linear models) assume that
all features are centered around 0 and have variance in the same
order. If a feature has a variance that is orders of magnitude larger
that others, it might dominate the objective function and make the
estimator unable to learn from other features correctly as expected.
This scaler can also be applied to sparse CSR or CSC matrices by passing
`with_mean=False` to avoid breaking the sparsity structure of the data.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
copy : boolean, optional, default True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
with_mean : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
Attributes
----------
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
.. versionadded:: 0.17
*scale_*
mean_ : array of floats with shape [n_features]
The mean value for each feature in the training set.
var_ : array of floats with shape [n_features]
The variance for each feature in the training set. Used to compute
`scale_`
n_samples_seen_ : int
The number of samples processed by the estimator. Will be reset on
new calls to fit, but increments across ``partial_fit`` calls.
Examples
--------
>>> from sklearn.preprocessing import StandardScaler
>>>
>>> data = [[0, 0], [0, 0], [1, 1], [1, 1]]
>>> scaler = StandardScaler()
>>> print(scaler.fit(data))
StandardScaler(copy=True, with_mean=True, with_std=True)
>>> print(scaler.mean_)
[ 0.5 0.5]
>>> print(scaler.transform(data))
[[-1. -1.]
[-1. -1.]
[ 1. 1.]
[ 1. 1.]]
>>> print(scaler.transform([[2, 2]]))
[[ 3. 3.]]
See also
--------
scale: Equivalent function without the estimator API.
:class:`sklearn.decomposition.PCA`
Further removes the linear correlation across features with 'whiten=True'.
Notes
-----
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
""" # noqa
def __init__(self, copy=True, with_mean=True, with_std=True):
self.with_mean = with_mean
self.with_std = with_std
self.copy = copy
def _reset(self):
"""Reset internal data-dependent state of the scaler, if necessary.
__init__ parameters are not touched.
"""
# Checking one attribute is enough, becase they are all set together
# in partial_fit
if hasattr(self, 'scale_'):
del self.scale_
del self.n_samples_seen_
del self.mean_
del self.var_
def fit(self, X, y=None):
"""Compute the mean and std to be used for later scaling.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y : Passthrough for ``Pipeline`` compatibility.
"""
# Reset internal state before fitting
self._reset()
return self.partial_fit(X, y)
def partial_fit(self, X, y=None):
"""Online computation of mean and std on X for later scaling.
All of X is processed as a single batch. This is intended for cases
when `fit` is not feasible due to very large number of `n_samples`
or because X is read from a continuous stream.
The algorithm for incremental mean and std is given in Equation 1.5a,b
in Chan, Tony F., Gene H. Golub, and Randall J. LeVeque. "Algorithms
for computing the sample variance: Analysis and recommendations."
The American Statistician 37.3 (1983): 242-247:
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y : Passthrough for ``Pipeline`` compatibility.
"""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
warn_on_dtype=True, estimator=self, dtype=FLOAT_DTYPES)
# Even in the case of `with_mean=False`, we update the mean anyway
# This is needed for the incremental computation of the var
# See incr_mean_variance_axis and _incremental_mean_variance_axis
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
if self.with_std:
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.mean_, self.var_ = mean_variance_axis(X, axis=0)
self.n_samples_seen_ = X.shape[0]
# Next passes
else:
self.mean_, self.var_, self.n_samples_seen_ = \
incr_mean_variance_axis(X, axis=0,
last_mean=self.mean_,
last_var=self.var_,
last_n=self.n_samples_seen_)
else:
self.mean_ = None
self.var_ = None
else:
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.mean_ = .0
self.n_samples_seen_ = 0
if self.with_std:
self.var_ = .0
else:
self.var_ = None
self.mean_, self.var_, self.n_samples_seen_ = \
_incremental_mean_and_var(X, self.mean_, self.var_,
self.n_samples_seen_)
if self.with_std:
self.scale_ = _handle_zeros_in_scale(np.sqrt(self.var_))
else:
self.scale_ = None
return self
def transform(self, X, y='deprecated', copy=None):
"""Perform standardization by centering and scaling
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to scale along the features axis.
y : (ignored)
.. deprecated:: 0.19
This parameter will be removed in 0.21.
copy : bool, optional (default: None)
Copy the input X or not.
"""
if not isinstance(y, string_types) or y != 'deprecated':
warnings.warn("The parameter y on transform() is "
"deprecated since 0.19 and will be removed in 0.21",
DeprecationWarning)
check_is_fitted(self, 'scale_')
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr', copy=copy, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
if self.scale_ is not None:
inplace_column_scale(X, 1 / self.scale_)
else:
if self.with_mean:
X -= self.mean_
if self.with_std:
X /= self.scale_
return X
def inverse_transform(self, X, copy=None):
"""Scale back the data to the original representation
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to scale along the features axis.
copy : bool, optional (default: None)
Copy the input X or not.
Returns
-------
X_tr : array-like, shape [n_samples, n_features]
Transformed array.
"""
check_is_fitted(self, 'scale_')
copy = copy if copy is not None else self.copy
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot uncenter sparse matrices: pass `with_mean=False` "
"instead See docstring for motivation and alternatives.")
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
if self.scale_ is not None:
inplace_column_scale(X, self.scale_)
else:
X = np.asarray(X)
if copy:
X = X.copy()
if self.with_std:
X *= self.scale_
if self.with_mean:
X += self.mean_
return X
class MaxAbsScaler(BaseEstimator, TransformerMixin):
"""Scale each feature by its maximum absolute value.
This estimator scales and translates each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0. It does not shift/center the data, and
thus does not destroy any sparsity.
This scaler can also be applied to sparse CSR or CSC matrices.
.. versionadded:: 0.17
Parameters
----------
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
Attributes
----------
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
.. versionadded:: 0.17
*scale_* attribute.
max_abs_ : ndarray, shape (n_features,)
Per feature maximum absolute value.
n_samples_seen_ : int
The number of samples processed by the estimator. Will be reset on
new calls to fit, but increments across ``partial_fit`` calls.
See also
--------
maxabs_scale: Equivalent function without the estimator API.
Notes
-----
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
"""
def __init__(self, copy=True):
self.copy = copy
def _reset(self):
"""Reset internal data-dependent state of the scaler, if necessary.
__init__ parameters are not touched.
"""
# Checking one attribute is enough, becase they are all set together
# in partial_fit
if hasattr(self, 'scale_'):
del self.scale_
del self.n_samples_seen_
del self.max_abs_
def fit(self, X, y=None):
"""Compute the maximum absolute value to be used for later scaling.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
# Reset internal state before fitting
self._reset()
return self.partial_fit(X, y)
def partial_fit(self, X, y=None):
"""Online computation of max absolute value of X for later scaling.
All of X is processed as a single batch. This is intended for cases
when `fit` is not feasible due to very large number of `n_samples`
or because X is read from a continuous stream.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y : Passthrough for ``Pipeline`` compatibility.
"""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
mins, maxs = min_max_axis(X, axis=0)
max_abs = np.maximum(np.abs(mins), np.abs(maxs))
else:
max_abs = np.abs(X).max(axis=0)
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.n_samples_seen_ = X.shape[0]
# Next passes
else:
max_abs = np.maximum(self.max_abs_, max_abs)
self.n_samples_seen_ += X.shape[0]
self.max_abs_ = max_abs
self.scale_ = _handle_zeros_in_scale(max_abs)
return self
def transform(self, X):
"""Scale the data
Parameters
----------
X : {array-like, sparse matrix}
The data that should be scaled.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
inplace_column_scale(X, 1.0 / self.scale_)
else:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : {array-like, sparse matrix}
The data that should be transformed back.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
inplace_column_scale(X, self.scale_)
else:
X *= self.scale_
return X
def maxabs_scale(X, axis=0, copy=True):
"""Scale each feature to the [-1, 1] range without breaking the sparsity.
This estimator scales each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0.
This scaler can also be applied to sparse CSR or CSC matrices.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The data.
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
See also
--------
MaxAbsScaler: Performs scaling to the [-1, 1] range using the``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
Notes
-----
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
""" # noqa
# Unlike the scaler object, this function allows 1d input.
# If copy is required, it will be done inside the scaler object.
X = check_array(X, accept_sparse=('csr', 'csc'), copy=False,
ensure_2d=False, dtype=FLOAT_DTYPES)
original_ndim = X.ndim
if original_ndim == 1:
X = X.reshape(X.shape[0], 1)
s = MaxAbsScaler(copy=copy)
if axis == 0:
X = s.fit_transform(X)
else:
X = s.fit_transform(X.T).T
if original_ndim == 1:
X = X.ravel()
return X
class RobustScaler(BaseEstimator, TransformerMixin):
"""Scale features using statistics that are robust to outliers.
This Scaler removes the median and scales the data according to
the quantile range (defaults to IQR: Interquartile Range).
The IQR is the range between the 1st quartile (25th quantile)
and the 3rd quartile (75th quantile).
Centering and scaling happen independently on each feature (or each
sample, depending on the ``axis`` argument) by computing the relevant
statistics on the samples in the training set. Median and interquartile
range are then stored to be used on later data using the ``transform``
method.
Standardization of a dataset is a common requirement for many
machine learning estimators. Typically this is done by removing the mean
and scaling to unit variance. However, outliers can often influence the
sample mean / variance in a negative way. In such cases, the median and
the interquartile range often give better results.
.. versionadded:: 0.17
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_centering : boolean, True by default
If True, center the data before scaling.
This will cause ``transform`` to raise an exception when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_scaling : boolean, True by default
If True, scale the data to interquartile range.
quantile_range : tuple (q_min, q_max), 0.0 < q_min < q_max < 100.0
Default: (25.0, 75.0) = (1st quantile, 3rd quantile) = IQR
Quantile range used to calculate ``scale_``.
.. versionadded:: 0.18
copy : boolean, optional, default is True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
center_ : array of floats
The median value for each feature in the training set.
scale_ : array of floats
The (scaled) interquartile range for each feature in the training set.
.. versionadded:: 0.17
*scale_* attribute.
See also
--------
robust_scale: Equivalent function without the estimator API.
:class:`sklearn.decomposition.PCA`
Further removes the linear correlation across features with
'whiten=True'.
Notes
-----
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
https://en.wikipedia.org/wiki/Median_(statistics)
https://en.wikipedia.org/wiki/Interquartile_range
"""
def __init__(self, with_centering=True, with_scaling=True,
quantile_range=(25.0, 75.0), copy=True):
self.with_centering = with_centering
self.with_scaling = with_scaling
self.quantile_range = quantile_range
self.copy = copy
def _check_array(self, X, copy):
"""Makes sure centering is not enabled for sparse matrices."""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if self.with_centering:
raise ValueError(
"Cannot center sparse matrices: use `with_centering=False`"
" instead. See docstring for motivation and alternatives.")
return X
def fit(self, X, y=None):
"""Compute the median and quantiles to be used for scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the median and quantiles
used for later scaling along the features axis.
"""
if sparse.issparse(X):
raise TypeError("RobustScaler cannot be fitted on sparse inputs")
X = self._check_array(X, self.copy)
if self.with_centering:
self.center_ = np.median(X, axis=0)
if self.with_scaling:
q_min, q_max = self.quantile_range
if not 0 <= q_min <= q_max <= 100:
raise ValueError("Invalid quantile range: %s" %
str(self.quantile_range))
q = np.percentile(X, self.quantile_range, axis=0)
self.scale_ = (q[1] - q[0])
self.scale_ = _handle_zeros_in_scale(self.scale_, copy=False)
return self
def transform(self, X):
"""Center and scale the data.
Can be called on sparse input, provided that ``RobustScaler`` has been
fitted to dense input and ``with_centering=False``.
Parameters
----------
X : {array-like, sparse matrix}
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if sparse.issparse(X):
if self.with_scaling:
inplace_column_scale(X, 1.0 / self.scale_)
else:
if self.with_centering:
X -= self.center_
if self.with_scaling:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : array-like
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if sparse.issparse(X):
if self.with_scaling:
inplace_column_scale(X, self.scale_)
else:
if self.with_scaling:
X *= self.scale_
if self.with_centering:
X += self.center_
return X
def robust_scale(X, axis=0, with_centering=True, with_scaling=True,
quantile_range=(25.0, 75.0), copy=True):
"""Standardize a dataset along any axis
Center to the median and component wise scale
according to the interquartile range.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : array-like
The data to center and scale.
axis : int (0 by default)
axis used to compute the medians and IQR along. If 0,
independently scale each feature, otherwise (if 1) scale
each sample.
with_centering : boolean, True by default
If True, center the data before scaling.
with_scaling : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
quantile_range : tuple (q_min, q_max), 0.0 < q_min < q_max < 100.0
Default: (25.0, 75.0) = (1st quantile, 3rd quantile) = IQR
Quantile range used to calculate ``scale_``.
.. versionadded:: 0.18
copy : boolean, optional, default is True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_centering=False` (in that case, only variance scaling will be
performed on the features of the CSR matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSR matrix.
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
See also
--------
RobustScaler: Performs centering and scaling using the ``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
"""
s = RobustScaler(with_centering=with_centering, with_scaling=with_scaling,
quantile_range=quantile_range, copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class PolynomialFeatures(BaseEstimator, TransformerMixin):
"""Generate polynomial and interaction features.
Generate a new feature matrix consisting of all polynomial combinations
of the features with degree less than or equal to the specified degree.
For example, if an input sample is two dimensional and of the form
[a, b], the degree-2 polynomial features are [1, a, b, a^2, ab, b^2].
Parameters
----------
degree : integer
The degree of the polynomial features. Default = 2.
interaction_only : boolean, default = False
If true, only interaction features are produced: features that are
products of at most ``degree`` *distinct* input features (so not
``x[1] ** 2``, ``x[0] * x[2] ** 3``, etc.).
include_bias : boolean
If True (default), then include a bias column, the feature in which
all polynomial powers are zero (i.e. a column of ones - acts as an
intercept term in a linear model).
Examples
--------
>>> X = np.arange(6).reshape(3, 2)
>>> X
array([[0, 1],
[2, 3],
[4, 5]])
>>> poly = PolynomialFeatures(2)
>>> poly.fit_transform(X)
array([[ 1., 0., 1., 0., 0., 1.],
[ 1., 2., 3., 4., 6., 9.],
[ 1., 4., 5., 16., 20., 25.]])
>>> poly = PolynomialFeatures(interaction_only=True)
>>> poly.fit_transform(X)
array([[ 1., 0., 1., 0.],
[ 1., 2., 3., 6.],
[ 1., 4., 5., 20.]])
Attributes
----------
powers_ : array, shape (n_output_features, n_input_features)
powers_[i, j] is the exponent of the jth input in the ith output.
n_input_features_ : int
The total number of input features.
n_output_features_ : int
The total number of polynomial output features. The number of output
features is computed by iterating over all suitably sized combinations
of input features.
Notes
-----
Be aware that the number of features in the output array scales
polynomially in the number of features of the input array, and
exponentially in the degree. High degrees can cause overfitting.
See :ref:`examples/linear_model/plot_polynomial_interpolation.py
<sphx_glr_auto_examples_linear_model_plot_polynomial_interpolation.py>`
"""
def __init__(self, degree=2, interaction_only=False, include_bias=True):
self.degree = degree
self.interaction_only = interaction_only
self.include_bias = include_bias
@staticmethod
def _combinations(n_features, degree, interaction_only, include_bias):
comb = (combinations if interaction_only else combinations_w_r)
start = int(not include_bias)
return chain.from_iterable(comb(range(n_features), i)
for i in range(start, degree + 1))
@property
def powers_(self):
check_is_fitted(self, 'n_input_features_')
combinations = self._combinations(self.n_input_features_, self.degree,
self.interaction_only,
self.include_bias)
return np.vstack(np.bincount(c, minlength=self.n_input_features_)
for c in combinations)
def get_feature_names(self, input_features=None):
"""
Return feature names for output features
Parameters
----------
input_features : list of string, length n_features, optional
String names for input features if available. By default,
"x0", "x1", ... "xn_features" is used.
Returns
-------
output_feature_names : list of string, length n_output_features
"""
powers = self.powers_
if input_features is None:
input_features = ['x%d' % i for i in range(powers.shape[1])]
feature_names = []
for row in powers:
inds = np.where(row)[0]
if len(inds):
name = " ".join("%s^%d" % (input_features[ind], exp)
if exp != 1 else input_features[ind]
for ind, exp in zip(inds, row[inds]))
else:
name = "1"
feature_names.append(name)
return feature_names
def fit(self, X, y=None):
"""
Compute number of output features.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The data.
Returns
-------
self : instance
"""
n_samples, n_features = check_array(X).shape
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
self.n_input_features_ = n_features
self.n_output_features_ = sum(1 for _ in combinations)
return self
def transform(self, X):
"""Transform data to polynomial features
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data to transform, row by row.
Returns
-------
XP : np.ndarray shape [n_samples, NP]
The matrix of features, where NP is the number of polynomial
features generated from the combination of inputs.
"""
check_is_fitted(self, ['n_input_features_', 'n_output_features_'])
X = check_array(X, dtype=FLOAT_DTYPES)
n_samples, n_features = X.shape
if n_features != self.n_input_features_:
raise ValueError("X shape does not match training shape")
# allocate output data
XP = np.empty((n_samples, self.n_output_features_), dtype=X.dtype)
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
for i, c in enumerate(combinations):
XP[:, i] = X[:, c].prod(1)
return XP
def normalize(X, norm='l2', axis=1, copy=True, return_norm=False):
"""Scale input vectors individually to unit norm (vector length).
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to normalize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample (or each non-zero
feature if axis is 0).
axis : 0 or 1, optional (1 by default)
axis used to normalize the data along. If 1, independently normalize
each sample, otherwise (if 0) normalize each feature.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
return_norm : boolean, default False
whether to return the computed norms
Returns
-------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
Normalized input X.
norms : array, shape [n_samples] if axis=1 else [n_features]
An array of norms along given axis for X.
When X is sparse, a NotImplementedError will be raised
for norm 'l1' or 'l2'.
See also
--------
Normalizer: Performs normalization using the ``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
Notes
-----
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
"""
if norm not in ('l1', 'l2', 'max'):
raise ValueError("'%s' is not a supported norm" % norm)
if axis == 0:
sparse_format = 'csc'
elif axis == 1:
sparse_format = 'csr'
else:
raise ValueError("'%d' is not a supported axis" % axis)
X = check_array(X, sparse_format, copy=copy,
estimator='the normalize function', dtype=FLOAT_DTYPES)
if axis == 0:
X = X.T
if sparse.issparse(X):
if return_norm and norm in ('l1', 'l2'):
raise NotImplementedError("return_norm=True is not implemented "
"for sparse matrices with norm 'l1' "
"or norm 'l2'")
if norm == 'l1':
inplace_csr_row_normalize_l1(X)
elif norm == 'l2':
inplace_csr_row_normalize_l2(X)
elif norm == 'max':
_, norms = min_max_axis(X, 1)
norms_elementwise = norms.repeat(np.diff(X.indptr))
mask = norms_elementwise != 0
X.data[mask] /= norms_elementwise[mask]
else:
if norm == 'l1':
norms = np.abs(X).sum(axis=1)
elif norm == 'l2':
norms = row_norms(X)
elif norm == 'max':
norms = np.max(X, axis=1)
norms = _handle_zeros_in_scale(norms, copy=False)
X /= norms[:, np.newaxis]
if axis == 0:
X = X.T
if return_norm:
return X, norms
else:
return X
class Normalizer(BaseEstimator, TransformerMixin):
"""Normalize samples individually to unit norm.
Each sample (i.e. each row of the data matrix) with at least one
non zero component is rescaled independently of other samples so
that its norm (l1 or l2) equals one.
This transformer is able to work both with dense numpy arrays and
scipy.sparse matrix (use CSR format if you want to avoid the burden of
a copy / conversion).
Scaling inputs to unit norms is a common operation for text
classification or clustering for instance. For instance the dot
product of two l2-normalized TF-IDF vectors is the cosine similarity
of the vectors and is the base similarity metric for the Vector
Space Model commonly used by the Information Retrieval community.
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix).
Notes
-----
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
See also
--------
normalize: Equivalent function without the estimator API.
"""
def __init__(self, norm='l2', copy=True):
self.norm = norm
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
Parameters
----------
X : array-like
"""
X = check_array(X, accept_sparse='csr')
return self
def transform(self, X, y='deprecated', copy=None):
"""Scale each non zero row of X to unit norm
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to normalize, row by row. scipy.sparse matrices should be
in CSR format to avoid an un-necessary copy.
y : (ignored)
.. deprecated:: 0.19
This parameter will be removed in 0.21.
copy : bool, optional (default: None)
Copy the input X or not.
"""
if not isinstance(y, string_types) or y != 'deprecated':
warnings.warn("The parameter y on transform() is "
"deprecated since 0.19 and will be removed in 0.21",
DeprecationWarning)
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr')
return normalize(X, norm=self.norm, axis=1, copy=copy)
def binarize(X, threshold=0.0, copy=True):
"""Boolean thresholding of array-like or scipy.sparse matrix
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR or CSC format to avoid an
un-necessary copy.
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy
(if the input is already a numpy array or a scipy.sparse CSR / CSC
matrix and if axis is 1).
See also
--------
Binarizer: Performs binarization using the ``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
"""
X = check_array(X, accept_sparse=['csr', 'csc'], copy=copy)
if sparse.issparse(X):
if threshold < 0:
raise ValueError('Cannot binarize a sparse matrix with threshold '
'< 0')
cond = X.data > threshold
not_cond = np.logical_not(cond)
X.data[cond] = 1
X.data[not_cond] = 0
X.eliminate_zeros()
else:
cond = X > threshold
not_cond = np.logical_not(cond)
X[cond] = 1
X[not_cond] = 0
return X
class Binarizer(BaseEstimator, TransformerMixin):
"""Binarize data (set feature values to 0 or 1) according to a threshold
Values greater than the threshold map to 1, while values less than
or equal to the threshold map to 0. With the default threshold of 0,
only positive values map to 1.
Binarization is a common operation on text count data where the
analyst can decide to only consider the presence or absence of a
feature rather than a quantified number of occurrences for instance.
It can also be used as a pre-processing step for estimators that
consider boolean random variables (e.g. modelled using the Bernoulli
distribution in a Bayesian setting).
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy (if
the input is already a numpy array or a scipy.sparse CSR matrix).
Notes
-----
If the input is a sparse matrix, only the non-zero values are subject
to update by the Binarizer class.
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
See also
--------
binarize: Equivalent function without the estimator API.
"""
def __init__(self, threshold=0.0, copy=True):
self.threshold = threshold
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
Parameters
----------
X : array-like
"""
check_array(X, accept_sparse='csr')
return self
def transform(self, X, y='deprecated', copy=None):
"""Binarize each element of X
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
y : (ignored)
.. deprecated:: 0.19
This parameter will be removed in 0.21.
copy : bool
Copy the input X or not.
"""
if not isinstance(y, string_types) or y != 'deprecated':
warnings.warn("The parameter y on transform() is "
"deprecated since 0.19 and will be removed in 0.21",
DeprecationWarning)
copy = copy if copy is not None else self.copy
return binarize(X, threshold=self.threshold, copy=copy)
class KernelCenterer(BaseEstimator, TransformerMixin):
"""Center a kernel matrix
Let K(x, z) be a kernel defined by phi(x)^T phi(z), where phi is a
function mapping x to a Hilbert space. KernelCenterer centers (i.e.,
normalize to have zero mean) the data without explicitly computing phi(x).
It is equivalent to centering phi(x) with
sklearn.preprocessing.StandardScaler(with_std=False).
Read more in the :ref:`User Guide <kernel_centering>`.
"""
def fit(self, K, y=None):
"""Fit KernelCenterer
Parameters
----------
K : numpy array of shape [n_samples, n_samples]
Kernel matrix.
Returns
-------
self : returns an instance of self.
"""
K = check_array(K, dtype=FLOAT_DTYPES)
n_samples = K.shape[0]
self.K_fit_rows_ = np.sum(K, axis=0) / n_samples
self.K_fit_all_ = self.K_fit_rows_.sum() / n_samples
return self
def transform(self, K, y='deprecated', copy=True):
"""Center kernel matrix.
Parameters
----------
K : numpy array of shape [n_samples1, n_samples2]
Kernel matrix.
y : (ignored)
.. deprecated:: 0.19
This parameter will be removed in 0.21.
copy : boolean, optional, default True
Set to False to perform inplace computation.
Returns
-------
K_new : numpy array of shape [n_samples1, n_samples2]
"""
if not isinstance(y, string_types) or y != 'deprecated':
warnings.warn("The parameter y on transform() is "
"deprecated since 0.19 and will be removed in 0.21",
DeprecationWarning)
check_is_fitted(self, 'K_fit_all_')
K = check_array(K, copy=copy, dtype=FLOAT_DTYPES)
K_pred_cols = (np.sum(K, axis=1) /
self.K_fit_rows_.shape[0])[:, np.newaxis]
K -= self.K_fit_rows_
K -= K_pred_cols
K += self.K_fit_all_
return K
@property
def _pairwise(self):
return True
def add_dummy_feature(X, value=1.0):
"""Augment dataset with an additional dummy feature.
This is useful for fitting an intercept term with implementations which
cannot otherwise fit it directly.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
Data.
value : float
Value to use for the dummy feature.
Returns
-------
X : {array, sparse matrix}, shape [n_samples, n_features + 1]
Same data with dummy feature added as first column.
Examples
--------
>>> from sklearn.preprocessing import add_dummy_feature
>>> add_dummy_feature([[0, 1], [1, 0]])
array([[ 1., 0., 1.],
[ 1., 1., 0.]])
"""
X = check_array(X, accept_sparse=['csc', 'csr', 'coo'], dtype=FLOAT_DTYPES)
n_samples, n_features = X.shape
shape = (n_samples, n_features + 1)
if sparse.issparse(X):
if sparse.isspmatrix_coo(X):
# Shift columns to the right.
col = X.col + 1
# Column indices of dummy feature are 0 everywhere.
col = np.concatenate((np.zeros(n_samples), col))
# Row indices of dummy feature are 0, ..., n_samples-1.
row = np.concatenate((np.arange(n_samples), X.row))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.coo_matrix((data, (row, col)), shape)
elif sparse.isspmatrix_csc(X):
# Shift index pointers since we need to add n_samples elements.
indptr = X.indptr + n_samples
# indptr[0] must be 0.
indptr = np.concatenate((np.array([0]), indptr))
# Row indices of dummy feature are 0, ..., n_samples-1.
indices = np.concatenate((np.arange(n_samples), X.indices))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.csc_matrix((data, indices, indptr), shape)
else:
klass = X.__class__
return klass(add_dummy_feature(X.tocoo(), value))
else:
return np.hstack((np.ones((n_samples, 1)) * value, X))
def _transform_selected(X, transform, selected="all", copy=True):
"""Apply a transform function to portion of selected features
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
Dense array or sparse matrix.
transform : callable
A callable transform(X) -> X_transformed
copy : boolean, optional
Copy X even if it could be avoided.
selected: "all" or array of indices or mask
Specify which features to apply the transform to.
Returns
-------
X : array or sparse matrix, shape=(n_samples, n_features_new)
"""
X = check_array(X, accept_sparse='csc', copy=copy, dtype=FLOAT_DTYPES)
if isinstance(selected, six.string_types) and selected == "all":
return transform(X)
if len(selected) == 0:
return X
n_features = X.shape[1]
ind = np.arange(n_features)
sel = np.zeros(n_features, dtype=bool)
sel[np.asarray(selected)] = True
not_sel = np.logical_not(sel)
n_selected = np.sum(sel)
if n_selected == 0:
# No features selected.
return X
elif n_selected == n_features:
# All features selected.
return transform(X)
else:
X_sel = transform(X[:, ind[sel]])
X_not_sel = X[:, ind[not_sel]]
if sparse.issparse(X_sel) or sparse.issparse(X_not_sel):
return sparse.hstack((X_sel, X_not_sel))
else:
return np.hstack((X_sel, X_not_sel))
class OneHotEncoder(BaseEstimator, TransformerMixin):
"""Encode categorical integer features using a one-hot aka one-of-K scheme.
The input to this transformer should be a matrix of integers, denoting
the values taken on by categorical (discrete) features. The output will be
a sparse matrix where each column corresponds to one possible value of one
feature. It is assumed that input features take on values in the range
[0, n_values).
This encoding is needed for feeding categorical data to many scikit-learn
estimators, notably linear models and SVMs with the standard kernels.
Note: a one-hot encoding of y labels should use a LabelBinarizer
instead.
Read more in the :ref:`User Guide <preprocessing_categorical_features>`.
Parameters
----------
n_values : 'auto', int or array of ints
Number of values per feature.
- 'auto' : determine value range from training data.
- int : number of categorical values per feature.
Each feature value should be in ``range(n_values)``
- array : ``n_values[i]`` is the number of categorical values in
``X[:, i]``. Each feature value should be
in ``range(n_values[i])``
categorical_features : "all" or array of indices or mask
Specify what features are treated as categorical.
- 'all' (default): All features are treated as categorical.
- array of indices: Array of categorical feature indices.
- mask: Array of length n_features and with dtype=bool.
Non-categorical features are always stacked to the right of the matrix.
dtype : number type, default=np.float
Desired dtype of output.
sparse : boolean, default=True
Will return sparse matrix if set True else will return an array.
handle_unknown : str, 'error' or 'ignore'
Whether to raise an error or ignore if a unknown categorical feature is
present during transform.
Attributes
----------
active_features_ : array
Indices for active features, meaning values that actually occur
in the training set. Only available when n_values is ``'auto'``.
feature_indices_ : array of shape (n_features,)
Indices to feature ranges.
Feature ``i`` in the original data is mapped to features
from ``feature_indices_[i]`` to ``feature_indices_[i+1]``
(and then potentially masked by `active_features_` afterwards)
n_values_ : array of shape (n_features,)
Maximum number of values per feature.
Examples
--------
Given a dataset with three features and four samples, we let the encoder
find the maximum value per feature and transform the data to a binary
one-hot encoding.
>>> from sklearn.preprocessing import OneHotEncoder
>>> enc = OneHotEncoder()
>>> enc.fit([[0, 0, 3], [1, 1, 0], [0, 2, 1], \
[1, 0, 2]]) # doctest: +ELLIPSIS
OneHotEncoder(categorical_features='all', dtype=<... 'numpy.float64'>,
handle_unknown='error', n_values='auto', sparse=True)
>>> enc.n_values_
array([2, 3, 4])
>>> enc.feature_indices_
array([0, 2, 5, 9])
>>> enc.transform([[0, 1, 1]]).toarray()
array([[ 1., 0., 0., 1., 0., 0., 1., 0., 0.]])
See also
--------
sklearn.feature_extraction.DictVectorizer : performs a one-hot encoding of
dictionary items (also handles string-valued features).
sklearn.feature_extraction.FeatureHasher : performs an approximate one-hot
encoding of dictionary items or strings.
sklearn.preprocessing.LabelBinarizer : binarizes labels in a one-vs-all
fashion.
sklearn.preprocessing.MultiLabelBinarizer : transforms between iterable of
iterables and a multilabel format, e.g. a (samples x classes) binary
matrix indicating the presence of a class label.
sklearn.preprocessing.LabelEncoder : encodes labels with values between 0
and n_classes-1.
"""
def __init__(self, n_values="auto", categorical_features="all",
dtype=np.float64, sparse=True, handle_unknown='error'):
self.n_values = n_values
self.categorical_features = categorical_features
self.dtype = dtype
self.sparse = sparse
self.handle_unknown = handle_unknown
def fit(self, X, y=None):
"""Fit OneHotEncoder to X.
Parameters
----------
X : array-like, shape [n_samples, n_feature]
Input array of type int.
Returns
-------
self
"""
self.fit_transform(X)
return self
def _fit_transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
if (isinstance(self.n_values, six.string_types) and
self.n_values == 'auto'):
n_values = np.max(X, axis=0) + 1
elif isinstance(self.n_values, numbers.Integral):
if (np.max(X, axis=0) >= self.n_values).any():
raise ValueError("Feature out of bounds for n_values=%d"
% self.n_values)
n_values = np.empty(n_features, dtype=np.int)
n_values.fill(self.n_values)
else:
try:
n_values = np.asarray(self.n_values, dtype=int)
except (ValueError, TypeError):
raise TypeError("Wrong type for parameter `n_values`. Expected"
" 'auto', int or array of ints, got %r"
% type(X))
if n_values.ndim < 1 or n_values.shape[0] != X.shape[1]:
raise ValueError("Shape mismatch: if n_values is an array,"
" it has to be of shape (n_features,).")
self.n_values_ = n_values
n_values = np.hstack([[0], n_values])
indices = np.cumsum(n_values)
self.feature_indices_ = indices
column_indices = (X + indices[:-1]).ravel()
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)
data = np.ones(n_samples * n_features)
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if (isinstance(self.n_values, six.string_types) and
self.n_values == 'auto'):
mask = np.array(out.sum(axis=0)).ravel() != 0
active_features = np.where(mask)[0]
out = out[:, active_features]
self.active_features_ = active_features
return out if self.sparse else out.toarray()
def fit_transform(self, X, y=None):
"""Fit OneHotEncoder to X, then transform X.
Equivalent to self.fit(X).transform(X), but more convenient and more
efficient. See fit for the parameters, transform for the return value.
Parameters
----------
X : array-like, shape [n_samples, n_feature]
Input array of type int.
"""
return _transform_selected(X, self._fit_transform,
self.categorical_features, copy=True)
def _transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
indices = self.feature_indices_
if n_features != indices.shape[0] - 1:
raise ValueError("X has different shape than during fitting."
" Expected %d, got %d."
% (indices.shape[0] - 1, n_features))
# We use only those categorical features of X that are known using fit.
# i.e lesser than n_values_ using mask.
# This means, if self.handle_unknown is "ignore", the row_indices and
# col_indices corresponding to the unknown categorical feature are
# ignored.
mask = (X < self.n_values_).ravel()
if np.any(~mask):
if self.handle_unknown not in ['error', 'ignore']:
raise ValueError("handle_unknown should be either error or "
"unknown got %s" % self.handle_unknown)
if self.handle_unknown == 'error':
raise ValueError("unknown categorical feature present %s "
"during transform." % X.ravel()[~mask])
column_indices = (X + indices[:-1]).ravel()[mask]
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)[mask]
data = np.ones(np.sum(mask))
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if (isinstance(self.n_values, six.string_types) and
self.n_values == 'auto'):
out = out[:, self.active_features_]
return out if self.sparse else out.toarray()
def transform(self, X):
"""Transform X using one-hot encoding.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Input array of type int.
Returns
-------
X_out : sparse matrix if sparse=True else a 2-d array, dtype=int
Transformed input.
"""
return _transform_selected(X, self._transform,
self.categorical_features, copy=True)
class QuantileTransformer(BaseEstimator, TransformerMixin):
"""Transform features using quantiles information.
This method transforms the features to follow a uniform or a normal
distribution. Therefore, for a given feature, this transformation tends
to spread out the most frequent values. It also reduces the impact of
(marginal) outliers: this is therefore a robust preprocessing scheme.
The transformation is applied on each feature independently.
The cumulative density function of a feature is used to project the
original values. Features values of new/unseen data that fall below
or above the fitted range will be mapped to the bounds of the output
distribution. Note that this transform is non-linear. It may distort linear
correlations between variables measured at the same scale but renders
variables measured at different scales more directly comparable.
Read more in the :ref:`User Guide <preprocessing_transformer>`.
Parameters
----------
n_quantiles : int, optional (default=1000)
Number of quantiles to be computed. It corresponds to the number
of landmarks used to discretize the cumulative density function.
output_distribution : str, optional (default='uniform')
Marginal distribution for the transformed data. The choices are
'uniform' (default) or 'normal'.
ignore_implicit_zeros : bool, optional (default=False)
Only applies to sparse matrices. If True, the sparse entries of the
matrix are discarded to compute the quantile statistics. If False,
these entries are treated as zeros.
subsample : int, optional (default=1e5)
Maximum number of samples used to estimate the quantiles for
computational efficiency. Note that the subsampling procedure may
differ for value-identical sparse and dense matrices.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by np.random. Note that this is used by subsampling and smoothing
noise.
copy : boolean, optional, (default=True)
Set to False to perform inplace transformation and avoid a copy (if the
input is already a numpy array).
Attributes
----------
quantiles_ : ndarray, shape (n_quantiles, n_features)
The values corresponding the quantiles of reference.
references_ : ndarray, shape(n_quantiles, )
Quantiles of references.
Examples
--------
>>> import numpy as np
>>> from sklearn.preprocessing import QuantileTransformer
>>> rng = np.random.RandomState(0)
>>> X = np.sort(rng.normal(loc=0.5, scale=0.25, size=(25, 1)), axis=0)
>>> qt = QuantileTransformer(n_quantiles=10, random_state=0)
>>> qt.fit_transform(X) # doctest: +ELLIPSIS
array([...])
See also
--------
quantile_transform : Equivalent function without the estimator API.
StandardScaler : perform standardization that is faster, but less robust
to outliers.
RobustScaler : perform robust standardization that removes the influence
of outliers but does not put outliers and inliers on the same scale.
Notes
-----
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
"""
def __init__(self, n_quantiles=1000, output_distribution='uniform',
ignore_implicit_zeros=False, subsample=int(1e5),
random_state=None, copy=True):
self.n_quantiles = n_quantiles
self.output_distribution = output_distribution
self.ignore_implicit_zeros = ignore_implicit_zeros
self.subsample = subsample
self.random_state = random_state
self.copy = copy
def _dense_fit(self, X, random_state):
"""Compute percentiles for dense matrices.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
The data used to scale along the features axis.
"""
if self.ignore_implicit_zeros:
warnings.warn("'ignore_implicit_zeros' takes effect only with"
" sparse matrix. This parameter has no effect.")
n_samples, n_features = X.shape
# for compatibility issue with numpy<=1.8.X, references
# need to be a list scaled between 0 and 100
references = (self.references_ * 100).tolist()
self.quantiles_ = []
for col in X.T:
if self.subsample < n_samples:
subsample_idx = random_state.choice(n_samples,
size=self.subsample,
replace=False)
col = col.take(subsample_idx, mode='clip')
self.quantiles_.append(np.percentile(col, references))
self.quantiles_ = np.transpose(self.quantiles_)
def _sparse_fit(self, X, random_state):
"""Compute percentiles for sparse matrices.
Parameters
----------
X : sparse matrix CSC, shape (n_samples, n_features)
The data used to scale along the features axis. The sparse matrix
needs to be nonnegative.
"""
n_samples, n_features = X.shape
# for compatibility issue with numpy<=1.8.X, references
# need to be a list scaled between 0 and 100
references = list(map(lambda x: x * 100, self.references_))
self.quantiles_ = []
for feature_idx in range(n_features):
column_nnz_data = X.data[X.indptr[feature_idx]:
X.indptr[feature_idx + 1]]
if len(column_nnz_data) > self.subsample:
column_subsample = (self.subsample * len(column_nnz_data) //
n_samples)
if self.ignore_implicit_zeros:
column_data = np.zeros(shape=column_subsample,
dtype=X.dtype)
else:
column_data = np.zeros(shape=self.subsample, dtype=X.dtype)
column_data[:column_subsample] = random_state.choice(
column_nnz_data, size=column_subsample, replace=False)
else:
if self.ignore_implicit_zeros:
column_data = np.zeros(shape=len(column_nnz_data),
dtype=X.dtype)
else:
column_data = np.zeros(shape=n_samples, dtype=X.dtype)
column_data[:len(column_nnz_data)] = column_nnz_data
if not column_data.size:
# if no nnz, an error will be raised for computing the
# quantiles. Force the quantiles to be zeros.
self.quantiles_.append([0] * len(references))
else:
self.quantiles_.append(
np.percentile(column_data, references))
self.quantiles_ = np.transpose(self.quantiles_)
def fit(self, X, y=None):
"""Compute the quantiles used for transforming.
Parameters
----------
X : ndarray or sparse matrix, shape (n_samples, n_features)
The data used to scale along the features axis. If a sparse
matrix is provided, it will be converted into a sparse
``csc_matrix``. Additionally, the sparse matrix needs to be
nonnegative if `ignore_implicit_zeros` is False.
Returns
-------
self : object
Returns self
"""
if self.n_quantiles <= 0:
raise ValueError("Invalid value for 'n_quantiles': %d. "
"The number of quantiles must be at least one."
% self.n_quantiles)
if self.subsample <= 0:
raise ValueError("Invalid value for 'subsample': %d. "
"The number of subsamples must be at least one."
% self.subsample)
if self.n_quantiles > self.subsample:
raise ValueError("The number of quantiles cannot be greater than"
" the number of samples used. Got {} quantiles"
" and {} samples.".format(self.n_quantiles,
self.subsample))
X = self._check_inputs(X)
rng = check_random_state(self.random_state)
# Create the quantiles of reference
self.references_ = np.linspace(0, 1, self.n_quantiles,
endpoint=True)
if sparse.issparse(X):
self._sparse_fit(X, rng)
else:
self._dense_fit(X, rng)
return self
def _transform_col(self, X_col, quantiles, inverse):
"""Private function to transform a single feature"""
if self.output_distribution == 'normal':
output_distribution = 'norm'
else:
output_distribution = self.output_distribution
output_distribution = getattr(stats, output_distribution)
# older version of scipy do not handle tuple as fill_value
# clipping the value before transform solve the issue
if not inverse:
lower_bound_x = quantiles[0]
upper_bound_x = quantiles[-1]
lower_bound_y = 0
upper_bound_y = 1
else:
lower_bound_x = 0
upper_bound_x = 1
lower_bound_y = quantiles[0]
upper_bound_y = quantiles[-1]
# for inverse transform, match a uniform PDF
X_col = output_distribution.cdf(X_col)
# find index for lower and higher bounds
lower_bounds_idx = (X_col - BOUNDS_THRESHOLD <
lower_bound_x)
upper_bounds_idx = (X_col + BOUNDS_THRESHOLD >
upper_bound_x)
if not inverse:
# Interpolate in one direction and in the other and take the
# mean. This is in case of repeated values in the features
# and hence repeated quantiles
#
# If we don't do this, only one extreme of the duplicated is
# used (the upper when we do assending, and the
# lower for descending). We take the mean of these two
X_col = .5 * (np.interp(X_col, quantiles, self.references_)
- np.interp(-X_col, -quantiles[::-1],
-self.references_[::-1]))
else:
X_col = np.interp(X_col, self.references_, quantiles)
X_col[upper_bounds_idx] = upper_bound_y
X_col[lower_bounds_idx] = lower_bound_y
# for forward transform, match the output PDF
if not inverse:
X_col = output_distribution.ppf(X_col)
# find the value to clip the data to avoid mapping to
# infinity. Clip such that the inverse transform will be
# consistent
clip_min = output_distribution.ppf(BOUNDS_THRESHOLD -
np.spacing(1))
clip_max = output_distribution.ppf(1 - (BOUNDS_THRESHOLD -
np.spacing(1)))
X_col = np.clip(X_col, clip_min, clip_max)
return X_col
def _check_inputs(self, X, accept_sparse_negative=False):
"""Check inputs before fit and transform"""
X = check_array(X, accept_sparse='csc', copy=self.copy,
dtype=[np.float64, np.float32])
# we only accept positive sparse matrix when ignore_implicit_zeros is
# false and that we call fit or transform.
if (not accept_sparse_negative and not self.ignore_implicit_zeros and
(sparse.issparse(X) and np.any(X.data < 0))):
raise ValueError('QuantileTransformer only accepts non-negative'
' sparse matrices.')
# check the output PDF
if self.output_distribution not in ('normal', 'uniform'):
raise ValueError("'output_distribution' has to be either 'normal'"
" or 'uniform'. Got '{}' instead.".format(
self.output_distribution))
return X
def _check_is_fitted(self, X):
"""Check the inputs before transforming"""
check_is_fitted(self, 'quantiles_')
# check that the dimension of X are adequate with the fitted data
if X.shape[1] != self.quantiles_.shape[1]:
raise ValueError('X does not have the same number of features as'
' the previously fitted data. Got {} instead of'
' {}.'.format(X.shape[1],
self.quantiles_.shape[1]))
def _transform(self, X, inverse=False):
"""Forward and inverse transform.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
The data used to scale along the features axis.
inverse : bool, optional (default=False)
If False, apply forward transform. If True, apply
inverse transform.
Returns
-------
X : ndarray, shape (n_samples, n_features)
Projected data
"""
if sparse.issparse(X):
for feature_idx in range(X.shape[1]):
column_slice = slice(X.indptr[feature_idx],
X.indptr[feature_idx + 1])
X.data[column_slice] = self._transform_col(
X.data[column_slice], self.quantiles_[:, feature_idx],
inverse)
else:
for feature_idx in range(X.shape[1]):
X[:, feature_idx] = self._transform_col(
X[:, feature_idx], self.quantiles_[:, feature_idx],
inverse)
return X
def transform(self, X):
"""Feature-wise transformation of the data.
Parameters
----------
X : ndarray or sparse matrix, shape (n_samples, n_features)
The data used to scale along the features axis. If a sparse
matrix is provided, it will be converted into a sparse
``csc_matrix``. Additionally, the sparse matrix needs to be
nonnegative if `ignore_implicit_zeros` is False.
Returns
-------
Xt : ndarray or sparse matrix, shape (n_samples, n_features)
The projected data.
"""
X = self._check_inputs(X)
self._check_is_fitted(X)
return self._transform(X, inverse=False)
def inverse_transform(self, X):
"""Back-projection to the original space.
Parameters
----------
X : ndarray or sparse matrix, shape (n_samples, n_features)
The data used to scale along the features axis. If a sparse
matrix is provided, it will be converted into a sparse
``csc_matrix``. Additionally, the sparse matrix needs to be
nonnegative if `ignore_implicit_zeros` is False.
Returns
-------
Xt : ndarray or sparse matrix, shape (n_samples, n_features)
The projected data.
"""
X = self._check_inputs(X, accept_sparse_negative=True)
self._check_is_fitted(X)
return self._transform(X, inverse=True)
def quantile_transform(X, axis=0, n_quantiles=1000,
output_distribution='uniform',
ignore_implicit_zeros=False,
subsample=int(1e5),
random_state=None,
copy=False):
"""Transform features using quantiles information.
This method transforms the features to follow a uniform or a normal
distribution. Therefore, for a given feature, this transformation tends
to spread out the most frequent values. It also reduces the impact of
(marginal) outliers: this is therefore a robust preprocessing scheme.
The transformation is applied on each feature independently.
The cumulative density function of a feature is used to project the
original values. Features values of new/unseen data that fall below
or above the fitted range will be mapped to the bounds of the output
distribution. Note that this transform is non-linear. It may distort linear
correlations between variables measured at the same scale but renders
variables measured at different scales more directly comparable.
Read more in the :ref:`User Guide <preprocessing_transformer>`.
Parameters
----------
X : array-like, sparse matrix
The data to transform.
axis : int, (default=0)
Axis used to compute the means and standard deviations along. If 0,
transform each feature, otherwise (if 1) transform each sample.
n_quantiles : int, optional (default=1000)
Number of quantiles to be computed. It corresponds to the number
of landmarks used to discretize the cumulative density function.
output_distribution : str, optional (default='uniform')
Marginal distribution for the transformed data. The choices are
'uniform' (default) or 'normal'.
ignore_implicit_zeros : bool, optional (default=False)
Only applies to sparse matrices. If True, the sparse entries of the
matrix are discarded to compute the quantile statistics. If False,
these entries are treated as zeros.
subsample : int, optional (default=1e5)
Maximum number of samples used to estimate the quantiles for
computational efficiency. Note that the subsampling procedure may
differ for value-identical sparse and dense matrices.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by np.random. Note that this is used by subsampling and smoothing
noise.
copy : boolean, optional, (default=True)
Set to False to perform inplace transformation and avoid a copy (if the
input is already a numpy array).
Attributes
----------
quantiles_ : ndarray, shape (n_quantiles, n_features)
The values corresponding the quantiles of reference.
references_ : ndarray, shape(n_quantiles, )
Quantiles of references.
Examples
--------
>>> import numpy as np
>>> from sklearn.preprocessing import quantile_transform
>>> rng = np.random.RandomState(0)
>>> X = np.sort(rng.normal(loc=0.5, scale=0.25, size=(25, 1)), axis=0)
>>> quantile_transform(X, n_quantiles=10, random_state=0)
... # doctest: +ELLIPSIS
array([...])
See also
--------
QuantileTransformer : Performs quantile-based scaling using the
``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`).
scale : perform standardization that is faster, but less robust
to outliers.
robust_scale : perform robust standardization that removes the influence
of outliers but does not put outliers and inliers on the same scale.
Notes
-----
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
"""
n = QuantileTransformer(n_quantiles=n_quantiles,
output_distribution=output_distribution,
subsample=subsample,
ignore_implicit_zeros=ignore_implicit_zeros,
random_state=random_state,
copy=copy)
if axis == 0:
return n.fit_transform(X)
elif axis == 1:
return n.fit_transform(X.T).T
else:
raise ValueError("axis should be either equal to 0 or 1. Got"
" axis={}".format(axis))
| mit |
datapythonista/pandas | pandas/tests/series/methods/test_unique.py | 4 | 1432 | import numpy as np
from pandas import (
Categorical,
Series,
)
import pandas._testing as tm
class TestUnique:
def test_unique_data_ownership(self):
# it works! GH#1807
Series(Series(["a", "c", "b"]).unique()).sort_values()
def test_unique(self):
# GH#714 also, dtype=float
ser = Series([1.2345] * 100)
ser[::2] = np.nan
result = ser.unique()
assert len(result) == 2
# explicit f4 dtype
ser = Series([1.2345] * 100, dtype="f4")
ser[::2] = np.nan
result = ser.unique()
assert len(result) == 2
def test_unique_nan_object_dtype(self):
# NAs in object arrays GH#714
ser = Series(["foo"] * 100, dtype="O")
ser[::2] = np.nan
result = ser.unique()
assert len(result) == 2
def test_unique_none(self):
# decision about None
ser = Series([1, 2, 3, None, None, None], dtype=object)
result = ser.unique()
expected = np.array([1, 2, 3, None], dtype=object)
tm.assert_numpy_array_equal(result, expected)
def test_unique_categorical(self):
# GH#18051
cat = Categorical([])
ser = Series(cat)
result = ser.unique()
tm.assert_categorical_equal(result, cat)
cat = Categorical([np.nan])
ser = Series(cat)
result = ser.unique()
tm.assert_categorical_equal(result, cat)
| bsd-3-clause |
stylianos-kampakis/scikit-learn | examples/svm/plot_svm_regression.py | 249 | 1451 | """
===================================================================
Support Vector Regression (SVR) using linear and non-linear kernels
===================================================================
Toy example of 1D regression using linear, polynomial and RBF kernels.
"""
print(__doc__)
import numpy as np
from sklearn.svm import SVR
import matplotlib.pyplot as plt
###############################################################################
# Generate sample data
X = np.sort(5 * np.random.rand(40, 1), axis=0)
y = np.sin(X).ravel()
###############################################################################
# Add noise to targets
y[::5] += 3 * (0.5 - np.random.rand(8))
###############################################################################
# Fit regression model
svr_rbf = SVR(kernel='rbf', C=1e3, gamma=0.1)
svr_lin = SVR(kernel='linear', C=1e3)
svr_poly = SVR(kernel='poly', C=1e3, degree=2)
y_rbf = svr_rbf.fit(X, y).predict(X)
y_lin = svr_lin.fit(X, y).predict(X)
y_poly = svr_poly.fit(X, y).predict(X)
###############################################################################
# look at the results
plt.scatter(X, y, c='k', label='data')
plt.hold('on')
plt.plot(X, y_rbf, c='g', label='RBF model')
plt.plot(X, y_lin, c='r', label='Linear model')
plt.plot(X, y_poly, c='b', label='Polynomial model')
plt.xlabel('data')
plt.ylabel('target')
plt.title('Support Vector Regression')
plt.legend()
plt.show()
| bsd-3-clause |
endolith/scikit-image | doc/examples/xx_applications/plot_geometric.py | 28 | 3253 | """
===============================
Using geometric transformations
===============================
In this example, we will see how to use geometric transformations in the context
of image processing.
"""
from __future__ import print_function
import math
import numpy as np
import matplotlib.pyplot as plt
from skimage import data
from skimage import transform as tf
margins = dict(hspace=0.01, wspace=0.01, top=1, bottom=0, left=0, right=1)
"""
Basics
======
Several different geometric transformation types are supported: similarity,
affine, projective and polynomial.
Geometric transformations can either be created using the explicit parameters
(e.g. scale, shear, rotation and translation) or the transformation matrix:
First we create a transformation using explicit parameters:
"""
tform = tf.SimilarityTransform(scale=1, rotation=math.pi / 2,
translation=(0, 1))
print(tform.params)
"""
Alternatively you can define a transformation by the transformation matrix
itself:
"""
matrix = tform.params.copy()
matrix[1, 2] = 2
tform2 = tf.SimilarityTransform(matrix)
"""
These transformation objects can then be used to apply forward and inverse
coordinate transformations between the source and destination coordinate
systems:
"""
coord = [1, 0]
print(tform2(coord))
print(tform2.inverse(tform(coord)))
"""
Image warping
=============
Geometric transformations can also be used to warp images:
"""
text = data.text()
tform = tf.SimilarityTransform(scale=1, rotation=math.pi / 4,
translation=(text.shape[0] / 2, -100))
rotated = tf.warp(text, tform)
back_rotated = tf.warp(rotated, tform.inverse)
fig, (ax1, ax2, ax3) = plt.subplots(ncols=3, figsize=(8, 3))
fig.subplots_adjust(**margins)
plt.gray()
ax1.imshow(text)
ax1.axis('off')
ax2.imshow(rotated)
ax2.axis('off')
ax3.imshow(back_rotated)
ax3.axis('off')
"""
.. image:: PLOT2RST.current_figure
Parameter estimation
====================
In addition to the basic functionality mentioned above you can also estimate the
parameters of a geometric transformation using the least-squares method.
This can amongst other things be used for image registration or rectification,
where you have a set of control points or homologous/corresponding points in two
images.
Let's assume we want to recognize letters on a photograph which was not taken
from the front but at a certain angle. In the simplest case of a plane paper
surface the letters are projectively distorted. Simple matching algorithms would
not be able to match such symbols. One solution to this problem would be to warp
the image so that the distortion is removed and then apply a matching algorithm:
"""
text = data.text()
src = np.array((
(0, 0),
(0, 50),
(300, 50),
(300, 0)
))
dst = np.array((
(155, 15),
(65, 40),
(260, 130),
(360, 95)
))
tform3 = tf.ProjectiveTransform()
tform3.estimate(src, dst)
warped = tf.warp(text, tform3, output_shape=(50, 300))
fig, (ax1, ax2) = plt.subplots(nrows=2, figsize=(8, 3))
fig.subplots_adjust(**margins)
plt.gray()
ax1.imshow(text)
ax1.plot(dst[:, 0], dst[:, 1], '.r')
ax1.axis('off')
ax2.imshow(warped)
ax2.axis('off')
"""
.. image:: PLOT2RST.current_figure
"""
plt.show()
| bsd-3-clause |
mikeireland/pymfe | pymfe/rv.py | 1 | 37463 | """This module/class contains functionality for computing (and plotting) radial
velocities and creating reference spectra for extracted fluxes. This should
ideally remain independent of the extraction method, such that it does not
matter which spectrograph took the data, nor what "Spectrograph" object was
used for extraction.
Most of the code below has been moved from the script "test_rhea2_extract.py".
Work still needs to be done post-refactor to ensure function input and outputs
are sensible, their docstrings are informative and they follow the principles of
Object Oriented Programming - such as the Single Responsibility Principle (Along
with a general clean up of the code and comments, such as having the code meet
the python line length guidelines --> the main benefit of which is having
multiple editors open side by side on smaller screens)
TODO
1) Move extract method to either extract module or rhea
2) Try to separate calculation/processing of data from saving/loading/displaying
3) Tidy up inputs to functions (e.g. cull unnecessary input parameters)
4) Make create_ref_spect() output variances (Median Absolute Deviations)
5) Possibly have dark calibration (for both flats and science frames) in its own
method. This would clean up the existing extract method, removing the need
to check whether darks and flats had been passed in (or varying permutations
of each - e.g. in the case where some of the data has already been dark
corrected, such as the solar data)
"""
from __future__ import division, print_function
import numpy as np
import matplotlib.pyplot as plt
import scipy.optimize as op
import scipy.interpolate as interp
from astropy.time import Time
from astropy.coordinates import SkyCoord
from astropy import constants as const
import PyAstronomy.pyasl as pyasl
import opticstools as ot
import pdb
try:
import pyfits
except:
import astropy.io.fits as pyfits
class RadialVelocity():
"""A RadialVelocity object for calculating and plotting RVS and generating
reference spectra.
Unclear if the object needs to be initialised with any parameters at this
stage. Perhaps a file path?
"""
def __init__(self):
"""(Presently empty) constructor.
"""
pass
def rv_shift_resid(self, params, wave, spect, spect_sdev, spline_ref,
return_spect=False):
"""Find the residuals to a fit of a (subsampled)reference spectrum to an
observed spectrum.
The function for parameters p[0] through p[3] is:
.. math::
y(x) = Ref[ wave(x) * (1 - p[0]/c) ] * exp(p[1] * x^2 + p[2] * x + p[3])
Here "Ref" is a function f(wave)
Parameters
----------
params: array-like
wave: float array
Wavelengths for the observed spectrum.
spect: float array
The observed spectra
spect_sdev: float array
standard deviation of the input spectra.
spline_ref: InterpolatedUnivariateSpline instance
For interpolating the reference spectrum
return_spect: boolean
Whether to return the fitted spectrum or the residuals.
wave_ref: float array
The wavelengths of the reference spectrum
ref: float array
The reference spectrum
Returns
-------
resid: float array
The fit residuals
"""
ny = len(spect)
xx = (np.arange(ny)-ny//2)/ny
norm = np.exp(params[1]*xx**2 + params[2]*xx + params[3])
# Lets get this sign correct. A redshift (positive velocity) means that
# a given wavelength for the reference corresponds to a longer
# wavelength for the target, which in turn means that the target
# wavelength has to be interpolated onto shorter wavelengths for the
# reference.
fitted_spect = spline_ref(wave*(1.0 - params[0]/const.c.si.value))*norm
if return_spect:
return fitted_spect
else:
return (fitted_spect - spect)/spect_sdev
def rv_shift_chi2(self, params, wave, spect, spect_sdev, spline_ref):
"""Find the chi-squared for an RV fit. Just a wrapper for rv_shift_resid,
so the docstring is cut and paste!
The function for parameters p[0] through p[3] is:
.. math::
y(x) = Ref[ wave(x) * (1 - p[0]/c) ] * exp(p[1] * x^2 + p[2] * x + p[3])
Here "Ref" is a function f(wave)
Parameters
----------
params:
...
wave: float array
Wavelengths for the observed spectrum.
spect: float array
The observed spectrum
spect_sdev:
...
spline_ref:
...
return_spect: boolean
Whether to return the fitted spectrum or the
wave_ref: float array
The wavelengths of the reference spectrum
ref: float array
The reference spectrum
Returns
-------
chi2:
The fit chi-squared
"""
return np.sum(self.rv_shift_resid(params, wave, spect, spect_sdev, spline_ref)**2)
def rv_shift_jac(self, params, wave, spect, spect_sdev, spline_ref):
r"""Explicit Jacobian function for rv_shift_resid.
This is not a completely analytic solution, but without it there seems to be
numerical instability.
The key equations are:
.. math:: f(x) = R( \lambda(x) (1 - p_0/c) ) \times \exp(p_1 x^2 + p_2 + p_3)
g(x) = (f(x) - d(x))/\sigma(x)
\frac{dg}{dp_0}(x) \approx [f(x + 1 m/s) -f(x) ]/\sigma(x)
\frac{dg}{dp_1}(x) = x^2 f(x) / \sigma(x)
\frac{dg}{dp_2}(x) = x f(x) / \sigma(x)
\frac{dg}{dp_3}(x) = f(x) / \sigma(x)
Parameters
----------
params: float array
wave: float array
Wavelengths for the observed spectrum.
spect: float array
The observed spectrum
spect_sdev:
...
spline_ref:
...
Returns
-------
jac:
The Jacobian.
"""
ny = len(spect)
xx = (np.arange(ny)-ny//2)/ny
norm = np.exp(params[1]*xx**2 + params[2]*xx + params[3])
fitted_spect = spline_ref(wave*(1.0 - params[0]/const.c.si.value))*norm
jac = np.empty( (ny,4) )
#The Jacobian is the derivative of fitted_spect/sdev with respect to
#p[0] through p[3]
jac[:,3] = fitted_spect/spect_sdev
jac[:,2] = fitted_spect*xx/spect_sdev
jac[:,1] = fitted_spect*xx**2/spect_sdev
jac[:,0] = (spline_ref(wave*(1.0 - (params[0] + 1.0)/const.c.si.value))*
norm - fitted_spect)/spect_sdev
return jac
def create_ref_spect(self, wave, fluxes, vars, bcors, rebin_fact=2,
gauss_sdev=1.0, med_cut=0.6,gauss_hw=7,threshold=100):
"""Create a reference spectrum from a series of target spectra.
The process is:
1) Re-grid the spectra into a rebin_fact times smaller wavelength grid.
2) The spectra are barycentrically corrected by linear interpolation. Note
that when used on a small data set, typically the spectra will be shifted by
many km/s. For an RV-stable star, the fitting process then needs to find the
opposite of this barycentric velocity.
3) Remove bad (i.e. low flux) files.
4) Median combine the spectra.
5) Convolve the result by a Gaussian to remove high spatial frequency noise. This
can be important when the reference spectrum is created from only a small
number of input spectra, and high-frequency noise can be effectively fitted to
itself.
Parameters
----------
wave: 2D np.array(float)
Wavelength coordinate map of form (Order, Wavelength/pixel)
fluxes: 3D np.array(float)
Fluxes of form (Observation, Order, Flux/pixel)
vars: 3D np.array(float)
Variance of form (Observation, Order, Variance/pixel)
bcors: 1D np.array(float)
Barycentric correction for each observation.
rebin_fact: int
Factor by which to rebin.
gauss_sdev:
...
med_cut:
...
gauss_hw:
...
Returns
-------
wave_ref: 2D np.array(float)
Wavelength coordinate map of form (Order, Wavelength/pixel*2+2),
where the wavelength scale has been interpolated.
ref_spect: 2D np.array(float)
Reference spectrum of form (Order, Flux/pixel*2+2),
where the flux scale has been interpolated.
"""
nm = fluxes.shape[1]
ny = fluxes.shape[2]
nf = fluxes.shape[0]
C = const.c.si.value
#Create arrays for our outputs.
wave_ref = np.empty( (nm,rebin_fact*ny + 2) )
ref_spect = np.empty( (nm,rebin_fact*ny + 2) )
#First, rebin everything, using opticstools.utils.regrid_fft
new_shape = (fluxes.shape[1],rebin_fact*fluxes.shape[2])
fluxes_rebin = np.empty( (fluxes.shape[0],fluxes.shape[1],
rebin_fact*fluxes.shape[2]) )
for i in range(nf):
fluxes_rebin[i] = ot.utils.regrid_fft(fluxes[i],new_shape)
#Create the final wavelength grid.
for j in range(nm):
wave_ref[j,1:-1] = np.interp(np.arange(rebin_fact*ny)/rebin_fact,
np.arange(ny),wave[j,:])
#Fill in the end wavelengths, including +/-100 km/s from the ends.
wave_ref[j,-2] = wave_ref[j,-3] + (wave_ref[j,-3]-wave_ref[j,-4])
wave_ref[j,0] = wave_ref[j,1] * (C + 1e5)/C
wave_ref[j,-1] = wave_ref[j,-2] * (C - 1e5)/C
#Barycentric correct. For a positive barycentric velocity, the observer is
#moving towards the star, which means that star is blue-shifted and the
#correct rest-frame spectrum is at longer wavelengths. The interpolation
#below shifts the spectrum to the red, as required.
for i in range(nf):
for j in range(nm):
# Awkwardly, we've extended the wavelength scale by 2 elements,
# but haven't yet extended the fluxes...
ww = wave_ref[j,1:-1]
fluxes_rebin[i,j] = np.interp(ww*(1-bcors[i]/C), ww[::-1],
fluxes_rebin[i,j,::-1])
#!!! New Code. This was already checked and makes no sense.
#Combine the spectra.
flux_meds = np.median(fluxes_rebin,axis=2)
flux_files = np.median(flux_meds,axis=1)
if med_cut > 0:
good_files = np.where(flux_files > med_cut*np.median(flux_files))[0]
else:
good_files = np.arange(len(flux_files),dtype=np.int)
flux_orders = np.median(flux_meds[good_files],axis=0)
flux_norm = fluxes_rebin.copy()
for g in good_files:
for j in range(nm):
flux_norm[g,j,:] /= flux_meds[g,j]
#pdb.set_trace()
#Create a median over files
flux_ref = np.median(flux_norm[good_files],axis=0)
#Multiply this by the median for each order
for j in range(nm):
flux_ref[j] *= flux_orders[j]
#Threshold the data whenever the flux is less than "threshold"
if (threshold > 0):
bad = flux_ref<2*threshold
flux_ref[bad] *= np.maximum(flux_ref[bad]-threshold,0)/threshold
# Create a Gaussian smoothing function for the reference spectrum. This
# is needed to prevent a bias to zero radial velocity, especially in the
# case of few data points.
gg = np.exp(-(np.arange(2*gauss_hw+1)-gauss_hw)**2/2.0/gauss_sdev**2)
gg /= np.sum(gg)
one_order = np.empty(flux_ref.shape[1] + 2*gauss_hw)
for j in range(nm):
one_order[gauss_hw:-gauss_hw] = flux_ref[j,:]
one_order[:gauss_hw] = one_order[gauss_hw]
one_order[-gauss_hw:] = one_order[-gauss_hw-1]
ref_spect[j,:] = np.convolve(one_order, gg,
mode='same')[gauss_hw-1:1-gauss_hw]
return wave_ref, ref_spect
def extract_spectra(self, files, extractor, star_dark=None, flat_files=None,
flat_dark=None, location=('151.2094','-33.865',100.0),
coord=None, do_bcor=True, ra_dec_hr=False):
"""Extract the spectrum from a file, given a dark file, a flat file and
a dark for the flat. The process is:
1) Dark correcting the data and the flat fields.
2) Computing (but not applying) Barycentric corrections.
3) Extracting the data and the flat fields using the extract module, to form
:math:`f_m(x)`, the flux for orders m and dispersion direction pixels x.
4) Normalising the flat fields, so that the median of each order is 1.0.
5) Dividing by the extracted flat field. Uncertainties from the flat field are
added in quadrature.
TODO: Not the neatest implementation, but should account for the fact that
there are no flats or darks for the ThAr frames. Might be worth tidying
up and making the implementation a little more elegant.
Parameters
----------
files: list of strings
One string for each file. CAn be on separate nights - a full
pathname should be given.
star_dark:
flat_files: list of strings.
One string for each star file. CAn be on separate nights - a full
pathname should be given.
flat_dark:
location: (lattitude:string, longitude:string, elevation:string)
The location on Earth where the data were taken.
coord: astropy.coordinates.sky_coordinate.SkyCoord
The coordinates of the observation site
do_bcor: boolean
Flag for whether to do barycentric correction
Returns
-------
fluxes: 3D np.array(float)
Fluxes of form (Observation, Order, Flux/pixel)
vars: 3D np.array(float)
Variance of form (Observation, Order, Variance/pixel)
bcors: 1D np.array(float)
Barycentric correction for each observation.
wave: 2D np.array(float)
Wavelength coordinate map of form (Order, Wavelength/pixel)
mjds: 1D np.array(float)
Modified Julian Date (MJD) of each observation.
"""
# Initialise list of return values
# Each index represents a single observation
fluxes = []
vars = []
dates = []
bcors = []
#!!! This is dodgy, as files and flat_files should go together in a dict
for ix,file in enumerate(files):
# Dark correct the science and flat frames
# Only if flat/darks have been supplied --> ThAr might not have them
# If not supplied, just use science/reference data
try:
# Dark correct science frames
if len(star_dark) > 0:
data = pyfits.getdata(file) - star_dark
else:
data = pyfits.getdata(file)
# Dark correct flats
if len(flat_files) > 0 and len(flat_dark) > 0:
flat = pyfits.getdata(flat_files[ix]) - flat_dark
elif len(flat_files) > 0:
flat = pyfits.getdata(flat_files[ix])
except:
print('Unable to calibrate file ' + file +
'. Check that format of data arrays are consistent.')
print(pyfits.getdata(file).shape)
print(star_dark.shape)
continue
header = pyfits.getheader(file)
date = Time(header['JD'], format='jd', location=location)
dates.append(date)
# Determine the barycentric correction
if do_bcor:
if not coord:
# Depending on whether the RA and DEC is saved in hours or
# degrees, load and create a SkyCoord object
if ra_dec_hr:
ra_deg = float(header['RA'])*15
else:
ra_deg = float(header['RA'])
dec_deg = float(header['DEC'])
coord = SkyCoord(ra=ra_deg, dec=dec_deg, unit='deg')
if not location:
location=(float(header['LONG']), float(header['LAT']),
float(header['HEIGHT']))
#(obs_long, obs_lat, obs_alt, ra2000, dec2000, jd, debug=False)
#pdb.set_trace()
bcors.append(1e3*pyasl.helcorr(float(location[0]),
float(location[1]),location[2],coord.ra.deg,
coord.dec.deg,date.jd)[0] )
else:
bcors.append(0.0)
# Extract the fluxes and variance for the science and flat frames
print("Extracting spectra from file #", str(ix))
flux, var = extractor.one_d_extract(data=data, rnoise=20.0)
# Continue only when flats have been supplied
# Perform flat field correction and adjust variances
if len(flat_files) > 0:
flat_flux, fvar = extractor.one_d_extract(data=flat,
rnoise=20.0)
for j in range(flat_flux.shape[0]):
medf = np.median(flat_flux[j])
flat_flux[j] /= medf
fvar[j] /= medf**2
#Calculate the variance after dividing by the flat
var = var/flat_flux**2 + fvar * flux**2/flat_flux**4
#Now normalise the flux.
flux /= flat_flux
# Regardless of whether the data has been flat field corrected,
# append to the arrays and continue
fluxes.append(flux[:,:,0])
vars.append(var[:,:,0])
fluxes = np.array(fluxes)
vars = np.array(vars)
bcors = np.array(bcors)
mjds = np.array([d.mjd for d in dates])
return fluxes, vars, bcors, mjds
def calculate_rv_shift(self, wave_ref, ref_spect, fluxes, vars, bcors,
wave,return_fitted_spects=False,bad_threshold=10):
"""Calculates the Radial Velocity of each spectrum
The radial velocity shift of the reference spectrum required
to match the flux in each order in each input spectrum is calculated
The input fluxes to this method are flat-fielded data, which are then fitted with
a barycentrically corrected reference spectrum :math:`R(\lambda)`, according to
the following equation:
.. math::
f(x) = R( \lambda(x) (1 - p_0/c) ) \\times \exp(p_1 x^2 + p_2 + p_3)
The first term in this equation is simply the velocity corrected spectrum, based on a
the arc-lamp derived reference wavelength scale :math:`\lambda(x)` for pixels coordinates x.
The second term in the equation is a continuum normalisation - a shifted Gaussian was
chosen as a function that is non-zero everywhere. The scipy.optimize.leastsq function is used
to find the best fitting set fof parameters :math:`p_0` through to :math`p_3`.
The reference spectrum function :math:`R(\lambda)` is created using a wavelength grid
which is over-sampled with respect to the data by a factor of 2. Individual fitted
wavelengths are then found by cubic spline interpolation on this :math:`R_j(\lambda_j)`
discrete grid.
Parameters
----------
wave_ref: 2D np.array(float)
Wavelength coordinate map of form (Order, Wavelength/pixel*2+2),
where the wavelength scale has been interpolated.
ref_spect: 2D np.array(float)
Reference spectrum of form (Order, Flux/pixel*2+2),
where the flux scale has been interpolated.
fluxes: 3D np.array(float)
Fluxes of form (Observation, Order, Flux/pixel)
vars: 3D np.array(float)
Variance of form (Observation, Order, Variance/pixel)
bcors: 1D np.array(float)
Barycentric correction for each observation.
wave: 2D np.array(float)
Wavelength coordinate map of form (Order, Wavelength/pixel)
Returns
-------
rvs: 2D np.array(float)
Radial velocities of format (Observation, Order)
rv_sigs: 2D np.array(float)
Radial velocity sigmas of format (Observation, Order)
"""
nm = fluxes.shape[1]
ny = fluxes.shape[2]
nf = fluxes.shape[0]
rvs = np.zeros( (nf,nm) )
rv_sigs = np.zeros( (nf,nm) )
initp = np.zeros(4)
initp[3]=0.5
initp[0]=0.0
spect_sdev = np.sqrt(vars)
fitted_spects = np.empty(fluxes.shape)
for i in range(nf):
# Start with initial guess of no intrinsic RV for the target.
initp[0] = -bcors[i] #!!! New Change
nbad=0
for j in range(nm):
# This is the *only* non-linear interpolation function that
# doesn't take forever
spl_ref = interp.InterpolatedUnivariateSpline(wave_ref[j,::-1],
ref_spect[j,::-1])
args = (wave[j,:], fluxes[i,j,:], spect_sdev[i,j,:], spl_ref)
# Remove edge effects in a slightly dodgy way.
# 20 pixels is about 30km/s.
args[2][:20] = np.inf
args[2][-20:] = np.inf
the_fit = op.leastsq(self.rv_shift_resid, initp, args=args,diag=[1e3,1,1,1],Dfun=self.rv_shift_jac, full_output=True)
#the_fit = op.leastsq(self.rv_shift_resid, initp, args=args,diag=[1e3,1e-6,1e-3,1], full_output=True,epsfcn=1e-9)
#The following line also doesn't work "out of the box".
#the_fit = op.minimize(self.rv_shift_chi2,initp,args=args)
#pdb.set_trace()
#Remove bad points...
resid = self.rv_shift_resid( the_fit[0], *args)
wbad = np.where( np.abs(resid) > bad_threshold)[0]
nbad += len(wbad)
#15 bad pixels in a single order is *crazy*
if len(wbad)>20:
fitted_spect = self.rv_shift_resid(the_fit[0], *args, return_spect=True)
plt.clf()
plt.plot(args[0], args[1])
plt.plot(args[0][wbad], args[1][wbad],'o')
plt.plot(args[0], fitted_spect)
plt.xlabel("Wavelength")
plt.ylabel("Flux")
#print("Lots of 'bad' pixels. Type c to continue if not a problem")
#pdb.set_trace()
args[2][wbad] = np.inf
the_fit = op.leastsq(self.rv_shift_resid, initp,args=args, diag=[1e3,1,1,1], Dfun=self.rv_shift_jac, full_output=True)
#the_fit = op.leastsq(self.rv_shift_resid, initp,args=args, diag=[1e3,1e-6,1e-3,1], full_output=True, epsfcn=1e-9)
#Some outputs for testing
fitted_spects[i,j] = self.rv_shift_resid(the_fit[0], *args, return_spect=True)
if ( np.abs(the_fit[0][0] - bcors[i]) < 1e-4 ):
#pdb.set_trace() #This shouldn't happen, and indicates a problem with the fit.
pass
#Save the fit and the uncertainty.
rvs[i,j] = the_fit[0][0]
try:
rv_sigs[i,j] = np.sqrt(the_fit[1][0,0])
except:
rv_sigs[i,j] = np.NaN
print("Done file {0:d}. Bad spectral pixels: {1:d}".format(i,nbad))
if return_fitted_spects:
return rvs, rv_sigs, fitted_spects
else:
return rvs, rv_sigs
def save_fluxes(self, files, fluxes, vars, bcors, wave, mjds, out_path):
"""Method to save the extracted spectra.
TODO:
Might want to remove the dependence on files (to get the headers) as it
will prevent (or complicate) the saving of the reference spectrum.
Parameters
----------
fluxes: 3D np.array(float)
Fluxes of form (Observation, Order, Flux/pixel)
vars: 3D np.array(float)
Variance of form (Observation, Order, Variance/pixel)
bcors: 1D np.array(float)
Barycentric correction for each observation.
wave: 2D np.array(float)
Wavelength coordinate map of form (Order, Wavelength/pixel)
mjds: 1D np.array(float)
Modified Julian Date (MJD) of each observation.
out_path: String
The directory to save the extracted fluxes.
"""
# Loop through each extracted spectrum
for i, file in enumerate(files):
#try:
# Extract the header information from the file
header = pyfits.getheader(file)
file_name = file.split("/")[-1].split(".")[0] + "_extracted.fits"
full_path = out_path + file_name
# Save to fits
hl = pyfits.HDUList()
hl.append(pyfits.ImageHDU(fluxes[i], header))
hl.append(pyfits.ImageHDU(vars[i]))
hl.append(pyfits.ImageHDU(wave))
col1 = pyfits.Column(name='bcor', format='D',
array=np.array([bcors[i]]))
col2 = pyfits.Column(name='mjd', format='D',
array=np.array([mjds[i]]))
cols = pyfits.ColDefs([col1, col2])
hl.append(pyfits.new_table(cols))
hl.writeto(full_path, clobber=True)
#except:
#print("Error: Some files may not have been saved.")
#print("Likely due to incompatible array sizes for frames.")
#continue
def save_ref_spect(self, files, ref_spect, vars_ref, wave_ref, bcors, mjds,
out_path, object):
"""Method to save an extracted reference spectrum
Parameters
----------
ref_spect: 3D np.array(float)
Fluxes of form (Observation, Order, Flux/pixel)
vars_ref: 3D np.array(float)
Variance of form (Observation, Order, Variance/pixel)
wave_ref: 2D np.array(float)
Wavelength coordinate map of form (Order, Wavelength/pixel)
bcors: 1D np.array(float)
Barycentric correction for each observation used to create ref_spect
mjds: 1D np.array(float)
Modified Julian Date (MJD) of each observation used to create
ref_spect
out_path: String
The directory to save the reference spectrum
object: String
The object object observed.
"""
header = pyfits.header.Header()
n = str(len(files))
full_path = out_path + "reference_spectrum_" + n + "_" + object +".fits"
# Record which spectra were used to create the reference
for i, file in enumerate(files):
# Extract the file name of each file and store in the header
file_name = file.split("/")[-1].split(".")[0] + "_extracted.fits"
header_name = "COMB" + str(i)
comment = "Combined spectrum #" + str(i)
header[header_name] = (file_name, comment)
# Save to fits
hl = pyfits.HDUList()
hl.append(pyfits.ImageHDU(ref_spect, header))
hl.append(pyfits.ImageHDU(vars_ref[0]))
hl.append(pyfits.ImageHDU(wave_ref))
col1 = pyfits.Column(name='bcor', format='D', array=np.array([bcors[0]]))
col2 = pyfits.Column(name='mjd', format='D',
array=np.array([mjds[0]]))
cols = pyfits.ColDefs([col1, col2])
hl.append(pyfits.new_table(cols))
hl.writeto(full_path, clobber=True)
def load_ref_spect(self, path):
"""Method to load a previously saved reference spectrum
Parameters
----------
path: string
The file path to the saved reference spectrum.
Returns
-------
ref_spect: 3D np.array(float)
Fluxes of form (Observation, Order, Flux/pixel)
vars_ref: 3D np.array(float)
Variance of form (Observation, Order, Variance/pixel)
wave_ref: 2D np.array(float)
Wavelength coordinate map of form (Order, Wavelength/pixel)
bcors_ref: 1D np.array(float)
Barycentric correction for each observation used to create ref_spect
mjds_ref: 1D np.array(float)
Modified Julian Date (MJD) of each observation used to create
ref_spect
"""
hl = pyfits.open(path)
ref_spect = hl[0].data
vars_ref = hl[1].data
wave_ref = hl[2].data
bcors_ref = hl[3].data['bcor'][0]
mjds_ref = hl[3].data['mjd'][0]
hl.close()
return ref_spect, vars_ref, wave_ref, bcors_ref, mjds_ref
def load_fluxes(self, files):
"""Loads previously saved fluxes.
Parameters
----------
files: [string]
String list of filepaths of the saved fluxes
Returns
-------
fluxes: 3D np.array(float)
Fluxes of form (Observation, Order, Flux/pixel)
vars: 3D np.array(float)
Variance of form (Observation, Order, Variance/pixel)
bcors: 1D np.array(float)
Barycentric correction for each observation.
wave: 2D np.array(float)
Wavelength coordinate map of form (Order, Wavelength/pixel)
mjds: 1D np.array(float)
Modified Julian Date (MJD) of each observation.
"""
fluxes = []
vars = []
wave = []
bcors = []
mjds = []
for f in files:
hl = pyfits.open(f)
fluxes.append(hl[0].data)
vars.append(hl[1].data)
wave = hl[2].data # Only need one (assumption of same instrument)
bcors.append(hl[3].data['bcor'][0])
mjds.append(hl[3].data['mjd'][0])
hl.close()
fluxes = np.array(fluxes)
vars = np.array(vars)
#wave = np.array(hl[2].data)
bcors = np.array(bcors)
mjds = np.array(mjds)
return fluxes, vars, wave, bcors, mjds
def plot_rvs(self, rvs, rv_sigs, mjds, dates, bcors, plot_title):
"""Plots the barycentrically corrected Radial Velocities.
Note:
Not complete.
Parameters
----------
rvs: 2D np.array(float)
Radial velocities of format (Observation, Order)
rv_sigs: 2D np.array(float)
Radial velocity sigmas of format (Observation, Order)
mjds: 1D np.array(float)
Modified Julian Date (MJD) of each observation.
bcors: 1D np.array(float)
Barycentric correction for each observation.
plot_title: String
Name of the plot
"""
# Dimensions (Number of observations and orders respectively)
nf = rvs.shape[0]
nm = rvs.shape[1]
# Plot the Barycentric corrected RVs. Note that a median over all orders
# is only a first step - a weighted mean is needed.
plt.clf()
rvs += bcors.repeat(nm).reshape( (nf,nm) )
rv_mn, wt_sum = np.average(rvs,axis=1, weights=1.0/rv_sigs**2,
returned=True)
rv_mn_sig = 1.0/np.sqrt(wt_sum)
rv_med1 = np.median(rvs,1)
rv_med2 = np.median(rvs[:,3:20],1)
#plt.plot_date([dates[i].plot_date for i in range(len(dates))], rv_mn)
#plt.errorbar(mjds, rv_mn, yerr=rv_mn_sig,fmt='o')
plt.errorbar(mjds, rv_med2, yerr=rv_mn_sig,fmt='o')
plt.xlabel('Date (MJD)')
plt.ylabel('Barycentric RV (m/s)')
plt.title(plot_title)
plt.plot_date([dates[i].plot_date for i in range(len(dates))], rv_mn)
plt.show()
def save_rvs(self, rvs, rv_sigs, bcor, mjds, bcor_rvs, base_save_path):
"""Method for saving calculated radial velocities and their errors to
csv files.
Parameters
----------
wave_ref: 2D np.array(float)
Wavelength coordinate map of form (Order, Wavelength/pixel*2+2),
where the wavelength scale has been interpolated.
ref_spect: 2D np.array(float)
Reference spectrum of form (Order, Flux/pixel*2+2),
where the flux scale has been interpolated.
mjds: 1D np.array(float)
Modified Julian Date (MJD) of each observation.
base_save_path: string
The base of each of the csv file paths.
"""
# Dimensions (Number of observations and orders respectively)
nf = rvs.shape[0]
nm = rvs.shape[1]
# Setup save paths
rv_file = base_save_path + "_" + str(rvs.shape[0]) + "_rvs.csv"
rv_sig_file = base_save_path + "_" + str(rvs.shape[0]) + "_rv_sig.csv"
bcor_file = base_save_path + "_" + str(rvs.shape[0]) + "_bcor.csv"
bcor_rv_file = base_save_path + "_" + str(rvs.shape[0]) + "_bcor_rv.csv"
# Headers for each csv
rv_h = "RV in m/s for each order, for each MJD epoch"
rv_sig_h = "RV uncertainties in m/s for each order, for each MJD epoch"
bcor_h = "Barycentric correction in m/s"
bcor_rvs_h = "Barycentrically corrected RVs in m/s"
# Save rvs and errors
np.savetxt(rv_file, np.append(mjds.reshape(nf,1), rvs,axis=1),
fmt="%10.4f" + nm*", %6.1f", header=rv_h)
np.savetxt(rv_sig_file, np.append(mjds.reshape(nf,1),rv_sigs,axis=1),
fmt="%10.4f" + nm*", %6.1f", header=rv_sig_h)
np.savetxt(bcor_file, np.append(mjds.reshape(nf,1),bcor.reshape(nf,1),axis=1),
fmt="%10.4f" + ", %6.1f", header=bcor_h)
np.savetxt(bcor_rv_file, np.append(mjds.reshape(nf,1), bcor_rvs,axis=1),
fmt="%10.4f" + nm*", %6.1f", header=bcor_rvs_h)
def load_rvs(self, rvs_path, rv_sig_path, bcor_path=None):
"""Opens the saved RV, RV sig and bcor csv files and formats the
contents to be easily usable and non-redundant
Parameters
----------
rvs_path: string
File path to the rv csv
rv_sig_path: string
File path to the rv sig csv
bcor_path: string
File path to the bcor csv
Returns
-------
mjds: 1D np.array(float)
Modified Julian Date (MJD) of each observation.
raw_rvs: 2D np.array(float)
Radial velocities of format (Observation, Order)
raw_rv_sigs: 2D np.array(float)
Radial velocity sigmas of format (Observation, Order)
raw_bcor: 1D np.array(float)
RV barycentric correction for each observation
bcors_rvs: 2D np.array(float)
Barycentrically corrected radial velocity sigmas of format
(Observation, Order)
"""
# Import
rvs = np.loadtxt(rvs_path, delimiter=",")
rv_sig = np.loadtxt(rv_sig_path, delimiter=",")
# Format to remove mjd values from start of each row
mjds = rvs[:,0]
raw_rvs = rvs[:,1:]
raw_rv_sig = rv_sig[:,1:]
# Number of observations and orders respectively
nf = len(mjds)
nm = raw_rvs.shape[1]
# Only deal with barycentric correction if it is passed in
# (It may not be when dealing with ThAr files)
if bcor_path is not None:
bcors = np.loadtxt(bcor_path, delimiter=",")
raw_bcor = bcors[:,1]
bcor_rvs = raw_rvs + raw_bcor.repeat(nm).reshape( (nf, nm) )
return mjds, raw_rvs, raw_rv_sig, raw_bcor, bcor_rvs
else:
return mjds, raw_rvs, raw_rv_sig
| mit |
kjordahl/xray | doc/conf.py | 3 | 13424 | # -*- coding: utf-8 -*-
#
# xray documentation build configuration file, created by
# sphinx-quickstart on Thu Feb 6 18:57:54 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
print "python exec:", sys.executable
print "sys.path:", sys.path
try:
import numpy
print "numpy: %s, %s" % (numpy.__version__, numpy.__file__)
except ImportError:
print "no numpy"
try:
import scipy
print "scipy: %s, %s" % (scipy.__version__, scipy.__file__)
except ImportError:
print "no scipy"
try:
import pandas
print "pandas: %s, %s" % (pandas.__version__, pandas.__file__)
except ImportError:
print "no pandas"
try:
import matplotlib
print "matplotlib: %s, %s" % (matplotlib.__version__, matplotlib.__file__)
except ImportError:
print "no matplotlib"
try:
import IPython
print "ipython: %s, %s" % (IPython.__version__, IPython.__file__)
except ImportError:
print "no ipython"
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Monkey patch inspect.findsource to work around a Python bug that manifests on
# RTD. Copied from IPython.core.ultratb.
# Reference: https://github.com/ipython/ipython/issues/1456
import linecache
import re
from inspect import getsourcefile, getfile, getmodule,\
ismodule, isclass, ismethod, isfunction, istraceback, isframe, iscode
def findsource(object):
"""Return the entire source file and starting line number for an object.
The argument may be a module, class, method, function, traceback, frame,
or code object. The source code is returned as a list of all the lines
in the file and the line number indexes a line in that list. An IOError
is raised if the source code cannot be retrieved.
FIXED version with which we monkeypatch the stdlib to work around a bug."""
file = getsourcefile(object) or getfile(object)
# If the object is a frame, then trying to get the globals dict from its
# module won't work. Instead, the frame object itself has the globals
# dictionary.
globals_dict = None
if inspect.isframe(object):
# XXX: can this ever be false?
globals_dict = object.f_globals
else:
module = getmodule(object, file)
if module:
globals_dict = module.__dict__
lines = linecache.getlines(file, globals_dict)
if not lines:
raise IOError('could not get source code')
if ismodule(object):
return lines, 0
if isclass(object):
name = object.__name__
pat = re.compile(r'^(\s*)class\s*' + name + r'\b')
# make some effort to find the best matching class definition:
# use the one with the least indentation, which is the one
# that's most probably not inside a function definition.
candidates = []
for i in range(len(lines)):
match = pat.match(lines[i])
if match:
# if it's at toplevel, it's already the best one
if lines[i][0] == 'c':
return lines, i
# else add whitespace to candidate list
candidates.append((match.group(1), i))
if candidates:
# this will sort by whitespace, and by line number,
# less whitespace first
candidates.sort()
return lines, candidates[0][1]
else:
raise IOError('could not find class definition')
if ismethod(object):
object = object.__func__
if isfunction(object):
object = object.__code__
if istraceback(object):
object = object.tb_frame
if isframe(object):
object = object.f_code
if iscode(object):
if not hasattr(object, 'co_firstlineno'):
raise IOError('could not find function definition')
pat = re.compile(r'^(\s*def\s)|(.*(?<!\w)lambda(:|\s))|^(\s*@)')
pmatch = pat.match
# fperez - fix: sometimes, co_firstlineno can give a number larger than
# the length of lines, which causes an error. Safeguard against that.
lnum = min(object.co_firstlineno,len(lines))-1
while lnum > 0:
if pmatch(lines[lnum]): break
lnum -= 1
return lines, lnum
raise IOError('could not find code object')
import inspect
inspect.findsource = findsource
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.intersphinx',
'sphinx.ext.extlinks',
'sphinx.ext.mathjax',
'numpydoc',
'IPython.sphinxext.ipython_directive',
'IPython.sphinxext.ipython_console_highlighting',
]
extlinks = {'issue': ('https://github.com/xray/xray/issues/%s', 'GH')}
autosummary_generate = True
numpydoc_class_members_toctree = True
numpydoc_show_class_members = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'xray'
copyright = u'2014, xray Developers'
import xray
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = xray.version.short_version
# The full version, including alpha/beta/rc tags.
release = xray.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# on_rtd is whether we are on readthedocs.org, this line of code grabbed from
# docs.readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# otherwise, readthedocs.org uses their theme by default, so no need to specify it
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'xraydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'xray.tex', u'xray Documentation',
u'xray Developers', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'xray', u'xray Documentation',
[u'xray Developers'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'xray', u'xray Documentation',
u'xray Developers', 'xray', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'python': ('https://docs.python.org/2.7/', None),
'pandas': ('http://pandas.pydata.org/pandas-docs/stable/', None),
'iris': ('http://scitools.org.uk/iris/docs/latest/', None),
'numpy': ('http://docs.scipy.org/doc/numpy/', None),
}
| apache-2.0 |
crisely09/horton | horton/meanfield/scf_diis.py | 1 | 18828 | # -*- coding: utf-8 -*-
# HORTON: Helpful Open-source Research TOol for N-fermion systems.
# Copyright (C) 2011-2016 The HORTON Development Team
#
# This file is part of HORTON.
#
# HORTON is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# HORTON is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>
#
# --
'''Abstract DIIS code used by the different DIIS implementations'''
import numpy as np
from horton.log import log, timer
from horton.exceptions import NoSCFConvergence
from horton.meanfield.utils import compute_commutator, check_dm
from horton.meanfield.convergence import convergence_error_commutator
__all__ = []
class DIISSCFSolver(object):
'''Base class for all DIIS SCF solvers'''
kind = 'dm' # input/output variable is the density matrix
def __init__(self, DIISHistoryClass, threshold=1e-6, maxiter=128, nvector=6, skip_energy=False, prune_old_states=False):
'''
**Arguments:**
DIISHistoryClass
A DIIS history class.
**Optional arguments:**
maxiter
The maximum number of iterations. When set to None, the SCF loop
will go one until convergence is reached.
threshold
The convergence threshold for the wavefunction
skip_energy
When set to True, the final energy is not computed. Note that some
DIIS variants need to compute the energy anyway. for these methods
this option is irrelevant.
prune_old_states
When set to True, old states are pruned from the history when their
coefficient is zero. Pruning starts at the oldest state and stops
as soon as a state is encountered with a non-zero coefficient. Even
if some newer states have a zero coefficient.
'''
self.DIISHistoryClass = DIISHistoryClass
self.threshold = threshold
self.maxiter = maxiter
self.nvector = nvector
self.skip_energy = skip_energy
self.prune_old_states = prune_old_states
@timer.with_section('SCF')
def __call__(self, ham, lf, overlap, occ_model, *dms):
'''Find a self-consistent set of density matrices.
**Arguments:**
ham
An effective Hamiltonian.
lf
The linalg factory to be used.
overlap
The overlap operator.
occ_model
Model for the orbital occupations.
dm1, dm2, ...
The initial density matrices. The number of dms must match
ham.ndm.
'''
# Some type checking
if ham.ndm != len(dms):
raise TypeError('The number of initial density matrices does not match the Hamiltonian.')
# Check input density matrices.
for i in xrange(ham.ndm):
check_dm(dms[i], overlap, lf)
occ_model.check_dms(overlap, *dms)
# keep local variables as attributes for inspection/debugging by caller
self._history = self.DIISHistoryClass(lf, self.nvector, ham.ndm, ham.deriv_scale, overlap)
self._focks = [lf.create_two_index() for i in xrange(ham.ndm)]
self._exps = [lf.create_expansion() for i in xrange(ham.ndm)]
if log.do_medium:
log('Starting restricted closed-shell %s-SCF' % self._history.name)
log.hline()
log('Iter Error CN Last nv Method Energy Change')
log.hline()
converged = False
counter = 0
while self.maxiter is None or counter < self.maxiter:
# Construct the Fock operator from scratch if the history is empty:
if self._history.nused == 0:
# feed the latest density matrices in the hamiltonian
ham.reset(*dms)
# Construct the Fock operators
ham.compute_fock(*self._focks)
# Compute the energy if needed by the history
energy = ham.compute_energy() if self._history.need_energy \
else None
# Add the current fock+dm pair to the history
error = self._history.add(energy, dms, self._focks)
# Screen logging
if log.do_high:
log(' DIIS add')
if error < self.threshold:
converged = True
break
if log.do_high:
log.blank()
if log.do_medium:
energy_str = ' '*20 if energy is None else '% 20.13f' % energy
log('%4i %12.5e %2i %20s' % (
counter, error, self._history.nused, energy_str
))
if log.do_high:
log.blank()
fock_interpolated = False
else:
energy = None
fock_interpolated = True
# Take a regular SCF step using the current fock matrix. Then
# construct a new density matrix and fock matrix.
for i in xrange(ham.ndm):
self._exps[i].from_fock(self._focks[i], overlap)
occ_model.assign(*self._exps)
for i in xrange(ham.ndm):
self._exps[i].to_dm(dms[i])
ham.reset(*dms)
energy = ham.compute_energy() if self._history.need_energy else None
ham.compute_fock(*self._focks)
# Add the current (dm, fock) pair to the history
if log.do_high:
log(' DIIS add')
error = self._history.add(energy, dms, self._focks)
# break when converged
if error < self.threshold:
converged = True
break
# Screen logging
if log.do_high:
log.blank()
if log.do_medium:
energy_str = ' '*20 if energy is None else '% 20.13f' % energy
log('%4i %12.5e %2i %20s' % (
counter, error, self._history.nused, energy_str
))
if log.do_high:
log.blank()
# get extra/intra-polated Fock matrix
while True:
# The following method writes the interpolated dms and focks
# in-place.
energy_approx, coeffs, cn, method, error = self._history.solve(dms, self._focks)
# if the error is small on the interpolated state, we have
# converged to a solution that may have fractional occupation
# numbers.
if error < self.threshold:
converged = True
break
#if coeffs[coeffs<0].sum() < -1:
# if log.do_high:
# log(' DIIS (coeffs too negative) -> drop %i and retry' % self._history.stack[0].identity)
# self._history.shrink()
if self._history.nused <= 2:
break
if coeffs[-1] == 0.0:
if log.do_high:
log(' DIIS (last coeff zero) -> drop %i and retry' % self._history.stack[0].identity)
self._history.shrink()
else:
break
if False and len(coeffs) == 2:
dms_tmp = [dm.copy() for dm in dms]
import matplotlib.pyplot as pt
xs = np.linspace(0.0, 1.0, 25)
a, b = self._history._setup_equations()
energies1 = []
energies2 = []
for x in xs:
x_coeffs = np.array([1-x, x])
energies1.append(np.dot(x_coeffs, 0.5*np.dot(a, x_coeffs) - b))
self._history._build_combinations(x_coeffs, dms_tmp, None)
ham.reset(*dms_tmp)
energies2.append(ham.compute_energy())
print x, energies1[-1], energies2[-1]
pt.clf()
pt.plot(xs, energies1, label='est')
pt.plot(xs, energies2, label='ref')
pt.axvline(coeffs[1], color='k')
pt.legend(loc=0)
pt.savefig('diis_test_%05i.png' % counter)
if energy_approx is not None:
energy_change = energy_approx - min(state.energy for state in self._history.stack)
else:
energy_change = None
# log
if log.do_high:
self._history.log(coeffs)
if log.do_medium:
change_str = ' '*10 if energy_change is None else '% 12.7f' % energy_change
log('%4i %10.3e %12.7f %2i %s %12s' % (
counter, cn, coeffs[-1], self._history.nused, method,
change_str
))
if log.do_high:
log.blank()
if self.prune_old_states:
# get rid of old states with zero coeff
for i in xrange(self._history.nused):
if coeffs[i] == 0.0:
if log.do_high:
log(' DIIS insignificant -> drop %i' % self._history.stack[0].identity)
self._history.shrink()
else:
break
# counter
counter += 1
if log.do_medium:
if converged:
log('%4i %12.5e (converged)' % (counter, error))
log.blank()
if not self.skip_energy or self._history.need_energy:
if not self._history.need_energy:
ham.compute_energy()
if log.do_medium:
ham.log()
if not converged:
raise NoSCFConvergence
return counter
def error(self, ham, lf, overlap, *dms):
return convergence_error_commutator(ham, lf, overlap, *dms)
class DIISState(object):
'''A single record (vector) in a DIIS history object.'''
def __init__(self, lf, ndm, work, overlap):
'''
**Arguments:**
lf
The LinalgFactor used to create the two-index operators.
ndm
The number of density matrices (and fock matrices) in one
state.
work
A two index operator to be used as a temporary variable. This
object is allocated by the history object.
overlap
The overlap matrix.
'''
# Not all of these need to be used.
self.ndm = ndm
self.work = work
self.overlap = overlap
self.energy = np.nan
self.normsq = np.nan
self.dms = [lf.create_two_index() for i in xrange(self.ndm)]
self.focks = [lf.create_two_index() for i in xrange(self.ndm)]
self.commutators = [lf.create_two_index() for i in xrange(self.ndm)]
self.identity = None # every state has a different id.
def clear(self):
'''Reset this record.'''
self.energy = np.nan
self.normsq = np.nan
for i in xrange(self.ndm):
self.dms[i].clear()
self.focks[i].clear()
self.commutators[i].clear()
def assign(self, identity, energy, dms, focks):
'''Assign a new state.
**Arguments:**
identity
A unique id for the new state.
energy
The energy of the new state.
dm
The density matrix of the new state.
fock
The Fock matrix of the new state.
'''
self.identity = identity
self.energy = energy
self.normsq = 0.0
for i in xrange(self.ndm):
self.dms[i].assign(dms[i])
self.focks[i].assign(focks[i])
compute_commutator(dms[i], focks[i], self.overlap, self.work, self.commutators[i])
self.normsq += self.commutators[i].contract_two('ab,ab', self.commutators[i])
class DIISHistory(object):
'''A base class of DIIS histories'''
name = None
need_energy = None
def __init__(self, lf, nvector, ndm, deriv_scale, overlap, dots_matrices):
'''
**Arguments:**
lf
The LinalgFactor used to create the two-index operators.
nvector
The maximum size of the history.
ndm
The number of density matrices (and fock matrices) in one
state.
deriv_scale
The deriv_scale attribute of the Effective Hamiltonian
overlap
The overlap matrix.
dots_matrices
Matrices in which dot products will be stored
**Useful attributes:**
used
The actual number of vectors in the history.
'''
self.work = lf.create_two_index()
self.stack = [DIISState(lf, ndm, self.work, overlap) for i in xrange(nvector)]
self.ndm = ndm
self.deriv_scale = deriv_scale
self.overlap = overlap
self.dots_matrices = dots_matrices
self.nused = 0
self.idcounter = 0
self.commutator = lf.create_two_index()
def _get_nvector(self):
'''The maximum size of the history'''
return len(self.stack)
nvector = property(_get_nvector)
def log(self, coeffs):
eref = min(state.energy for state in self.stack[:self.nused])
if eref is None:
log(' DIIS history normsq coeff id')
for i in xrange(self.nused):
state = self.stack[i]
log(' DIIS history %12.5e %12.7f %8i' % (state.normsq, coeffs[i], state.identity))
else:
log(' DIIS history normsq energy coeff id')
for i in xrange(self.nused):
state = self.stack[i]
log(' DIIS history %12.5e %12.5e %12.7f %8i' % (state.normsq, state.energy-eref, coeffs[i], state.identity))
log.blank()
def solve(self, dms_output, focks_output):
'''Inter- or extrapolate new density and/or fock matrices.
**Arguments:**
dms_output
The output for the density matrices. If set to None, this is
argument is ignored.
focks_output
The output for the Fock matrices. If set to None, this is
argument is ignored.
'''
raise NotImplementedError
def shrink(self):
'''Remove the oldest item from the history'''
self.nused -= 1
state = self.stack.pop(0)
state.clear()
self.stack.append(state)
for dots in self.dots_matrices:
dots[:-1] = dots[1:]
dots[:,:-1] = dots[:,1:]
dots[-1] = np.nan
dots[:,-1] = np.nan
def add(self, energy, dms, focks):
'''Add new state to the history.
**Arguments:**
energy
The energy of the new state.
dms
A list of density matrices of the new state.
focks
A list of Fock matrix of the new state.
**Returns**: the square root of commutator error for the given pairs
of density and Fock matrices.
'''
if len(dms) != self.ndm or len(focks) != self.ndm:
raise TypeError('The number of density and Fock matrices must match the ndm parameter.')
# There must be a free spot. If needed, make one.
if self.nused == self.nvector:
self.shrink()
# assign dm and fock
state = self.stack[self.nused]
state.assign(self.idcounter, energy, dms, focks)
self.idcounter += 1
# prepare for next iteration
self.nused += 1
return np.sqrt(state.normsq)
def _build_combinations(self, coeffs, dms_output, focks_output):
'''Construct a linear combination of density/fock matrices
**Arguments:**
coeffs
The linear mixing coefficients for the previous SCF states.
dms_output
A list of output density matrices. (Ignored if None)
focks_output
A list of output density matrices. (Ignored if None)
**Returns:** the commutator error, only when both dms_output and
focks_output are given.
'''
if dms_output is not None:
if len(dms_output) != self.ndm:
raise TypeError('The number of density matrices must match the ndm parameter.')
for i in xrange(self.ndm):
dms_stack = [self.stack[j].dms[i] for j in xrange(self.nused)]
self._linear_combination(coeffs, dms_stack, dms_output[i])
if focks_output is not None:
if len(focks_output) != self.ndm:
raise TypeError('The number of Fock matrices must match the ndm parameter.')
for i in xrange(self.ndm):
focks_stack = [self.stack[j].focks[i] for j in xrange(self.nused)]
self._linear_combination(coeffs, focks_stack, focks_output[i])
if not (dms_output is None or focks_output is None):
errorsq = 0.0
for i in xrange(self.ndm):
compute_commutator(dms_output[i], focks_output[i], self.overlap, self.work, self.commutator)
errorsq += self.commutator.contract_two('ab,ab', self.commutator)
return errorsq**0.5
def _linear_combination(self, coeffs, ops, output):
'''Make a linear combination of two-index objects
**Arguments:**
coeffs
The linear mixing coefficients for the previous SCF states.
ops
A list of input operators.
output
The output operator.
'''
output.clear()
for i in xrange(self.nused):
output.iadd(ops[i], factor=coeffs[i])
| gpl-3.0 |
louisLouL/pair_trading | capstone_env/lib/python3.6/site-packages/pandas/tests/test_lib.py | 6 | 9172 | # -*- coding: utf-8 -*-
import pytest
import numpy as np
import pandas as pd
import pandas._libs.lib as lib
import pandas.util.testing as tm
class TestMisc(object):
def test_max_len_string_array(self):
arr = a = np.array(['foo', 'b', np.nan], dtype='object')
assert lib.max_len_string_array(arr), 3
# unicode
arr = a.astype('U').astype(object)
assert lib.max_len_string_array(arr), 3
# bytes for python3
arr = a.astype('S').astype(object)
assert lib.max_len_string_array(arr), 3
# raises
pytest.raises(TypeError,
lambda: lib.max_len_string_array(arr.astype('U')))
def test_fast_unique_multiple_list_gen_sort(self):
keys = [['p', 'a'], ['n', 'd'], ['a', 's']]
gen = (key for key in keys)
expected = np.array(['a', 'd', 'n', 'p', 's'])
out = lib.fast_unique_multiple_list_gen(gen, sort=True)
tm.assert_numpy_array_equal(np.array(out), expected)
gen = (key for key in keys)
expected = np.array(['p', 'a', 'n', 'd', 's'])
out = lib.fast_unique_multiple_list_gen(gen, sort=False)
tm.assert_numpy_array_equal(np.array(out), expected)
class TestIndexing(object):
def test_maybe_indices_to_slice_left_edge(self):
target = np.arange(100)
# slice
indices = np.array([], dtype=np.int64)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert isinstance(maybe_slice, slice)
tm.assert_numpy_array_equal(target[indices], target[maybe_slice])
for end in [1, 2, 5, 20, 99]:
for step in [1, 2, 4]:
indices = np.arange(0, end, step, dtype=np.int64)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert isinstance(maybe_slice, slice)
tm.assert_numpy_array_equal(target[indices],
target[maybe_slice])
# reverse
indices = indices[::-1]
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert isinstance(maybe_slice, slice)
tm.assert_numpy_array_equal(target[indices],
target[maybe_slice])
# not slice
for case in [[2, 1, 2, 0], [2, 2, 1, 0], [0, 1, 2, 1], [-2, 0, 2],
[2, 0, -2]]:
indices = np.array(case, dtype=np.int64)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert not isinstance(maybe_slice, slice)
tm.assert_numpy_array_equal(maybe_slice, indices)
tm.assert_numpy_array_equal(target[indices], target[maybe_slice])
def test_maybe_indices_to_slice_right_edge(self):
target = np.arange(100)
# slice
for start in [0, 2, 5, 20, 97, 98]:
for step in [1, 2, 4]:
indices = np.arange(start, 99, step, dtype=np.int64)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert isinstance(maybe_slice, slice)
tm.assert_numpy_array_equal(target[indices],
target[maybe_slice])
# reverse
indices = indices[::-1]
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert isinstance(maybe_slice, slice)
tm.assert_numpy_array_equal(target[indices],
target[maybe_slice])
# not slice
indices = np.array([97, 98, 99, 100], dtype=np.int64)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert not isinstance(maybe_slice, slice)
tm.assert_numpy_array_equal(maybe_slice, indices)
with pytest.raises(IndexError):
target[indices]
with pytest.raises(IndexError):
target[maybe_slice]
indices = np.array([100, 99, 98, 97], dtype=np.int64)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert not isinstance(maybe_slice, slice)
tm.assert_numpy_array_equal(maybe_slice, indices)
with pytest.raises(IndexError):
target[indices]
with pytest.raises(IndexError):
target[maybe_slice]
for case in [[99, 97, 99, 96], [99, 99, 98, 97], [98, 98, 97, 96]]:
indices = np.array(case, dtype=np.int64)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert not isinstance(maybe_slice, slice)
tm.assert_numpy_array_equal(maybe_slice, indices)
tm.assert_numpy_array_equal(target[indices], target[maybe_slice])
def test_maybe_indices_to_slice_both_edges(self):
target = np.arange(10)
# slice
for step in [1, 2, 4, 5, 8, 9]:
indices = np.arange(0, 9, step, dtype=np.int64)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert isinstance(maybe_slice, slice)
tm.assert_numpy_array_equal(target[indices], target[maybe_slice])
# reverse
indices = indices[::-1]
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert isinstance(maybe_slice, slice)
tm.assert_numpy_array_equal(target[indices], target[maybe_slice])
# not slice
for case in [[4, 2, 0, -2], [2, 2, 1, 0], [0, 1, 2, 1]]:
indices = np.array(case, dtype=np.int64)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert not isinstance(maybe_slice, slice)
tm.assert_numpy_array_equal(maybe_slice, indices)
tm.assert_numpy_array_equal(target[indices], target[maybe_slice])
def test_maybe_indices_to_slice_middle(self):
target = np.arange(100)
# slice
for start, end in [(2, 10), (5, 25), (65, 97)]:
for step in [1, 2, 4, 20]:
indices = np.arange(start, end, step, dtype=np.int64)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert isinstance(maybe_slice, slice)
tm.assert_numpy_array_equal(target[indices],
target[maybe_slice])
# reverse
indices = indices[::-1]
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert isinstance(maybe_slice, slice)
tm.assert_numpy_array_equal(target[indices],
target[maybe_slice])
# not slice
for case in [[14, 12, 10, 12], [12, 12, 11, 10], [10, 11, 12, 11]]:
indices = np.array(case, dtype=np.int64)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert not isinstance(maybe_slice, slice)
tm.assert_numpy_array_equal(maybe_slice, indices)
tm.assert_numpy_array_equal(target[indices], target[maybe_slice])
def test_maybe_booleans_to_slice(self):
arr = np.array([0, 0, 1, 1, 1, 0, 1], dtype=np.uint8)
result = lib.maybe_booleans_to_slice(arr)
assert result.dtype == np.bool_
result = lib.maybe_booleans_to_slice(arr[:0])
assert result == slice(0, 0)
def test_get_reverse_indexer(self):
indexer = np.array([-1, -1, 1, 2, 0, -1, 3, 4], dtype=np.int64)
result = lib.get_reverse_indexer(indexer, 5)
expected = np.array([4, 2, 3, 6, 7], dtype=np.int64)
assert np.array_equal(result, expected)
class TestNullObj(object):
_1d_methods = ['isnullobj', 'isnullobj_old']
_2d_methods = ['isnullobj2d', 'isnullobj2d_old']
def _check_behavior(self, arr, expected):
for method in TestNullObj._1d_methods:
result = getattr(lib, method)(arr)
tm.assert_numpy_array_equal(result, expected)
arr = np.atleast_2d(arr)
expected = np.atleast_2d(expected)
for method in TestNullObj._2d_methods:
result = getattr(lib, method)(arr)
tm.assert_numpy_array_equal(result, expected)
def test_basic(self):
arr = np.array([1, None, 'foo', -5.1, pd.NaT, np.nan])
expected = np.array([False, True, False, False, True, True])
self._check_behavior(arr, expected)
def test_non_obj_dtype(self):
arr = np.array([1, 3, np.nan, 5], dtype=float)
expected = np.array([False, False, True, False])
self._check_behavior(arr, expected)
def test_empty_arr(self):
arr = np.array([])
expected = np.array([], dtype=bool)
self._check_behavior(arr, expected)
def test_empty_str_inp(self):
arr = np.array([""]) # empty but not null
expected = np.array([False])
self._check_behavior(arr, expected)
def test_empty_like(self):
# see gh-13717: no segfaults!
arr = np.empty_like([None])
expected = np.array([True])
self._check_behavior(arr, expected)
| mit |
nvictus/hic2cool | hic2cool_extractnorms.py | 1 | 13474 | from __future__ import absolute_import, division, print_function, unicode_literals
import sys
import struct
import zlib
import numpy as np
import h5py
import math
import cooler
import pandas as pd
from collections import OrderedDict
version = 'dummy'
NORMS = ['VC', 'VC_SQRT', 'KR']
#read function
def readcstr(f):
# buf = bytearray()
buf = b""
while True:
b = f.read(1)
if b is None or b == b"\0":
# return str(buf,encoding="utf-8", errors="strict")
return buf.decode("utf-8", errors="ignore")
else:
buf += b
# buf.append(b)
def read_header(infile):
"""
Takes in a .hic file and returns a dictionary containing information about
the chromosome. Keys are chromosome index numbers (0 through # of chroms contained
in file) and values are [chr idx (int), chr name (str), chrom length (str)].
Returns the masterindex used by the file as well as the open file object.
"""
req=open(infile, 'rb')
chrs = {}
resolutions = []
magic_string = struct.unpack('<3s', req.read(3))[0]
req.read(1)
if (magic_string != b"HIC"):
print('This does not appear to be a HiC file; magic string is incorrect')
sys.exit()
global version
version = struct.unpack('<i',req.read(4))[0]
masterindex = struct.unpack('<q',req.read(8))[0]
genome = b""
c=req.read(1)
while (c != b'\0'):
genome += c
c=req.read(1)
genome = genome.decode('ascii')
nattributes = struct.unpack('<i',req.read(4))[0]
for x in range(nattributes):
key = readcstr(req)
value = readcstr(req)
nChrs = struct.unpack('<i',req.read(4))[0]
for i in range(0, nChrs):
name = readcstr(req)
length = struct.unpack('<i',req.read(4))[0]
if name and length:
formatted_name = ('chr' + name if ('all' not in name.lower() and
'chr' not in name.lower()) else name)
formatted_name = ('chrM' if formatted_name == 'chrMT' else
formatted_name)
chrs[i] = [i, formatted_name, length]
nBpRes = struct.unpack('<i',req.read(4))[0]
# find bp delimited resolutions supported by the hic file
for x in range(0, nBpRes):
res = struct.unpack('<i',req.read(4))[0]
resolutions.append(res)
return req, chrs, resolutions, masterindex, genome
def read_footer(req, master, norm, unit, resolution):
"""
Takes in an open hic file and generates two dictionaries. pair_footer_info
contains the file position of info for any given chromosome pair (formatted
as a string). Chr_footer_info gives chromosome-level size and position info
relative to the file. This way, this function only has to run once
All of the unused read() code is used to find the correct place in the file,
supposedly. This is code from straw.
"""
pair_footer_info={}
chr_footer_info={}
req.seek(master)
nBytes = struct.unpack('<i', req.read(4))[0]
nEntries = struct.unpack('<i', req.read(4))[0]
found = False
for i in range(nEntries):
stri = readcstr(req)
fpos = struct.unpack('<q', req.read(8))[0]
sizeinbytes = struct.unpack('<i', req.read(4))[0]
pair_footer_info[stri] = fpos
nExpectedValues = struct.unpack('<i',req.read(4))[0]
for i in range(nExpectedValues):
str_ = readcstr(req)
binSize = struct.unpack('<i',req.read(4))[0]
nValues = struct.unpack('<i',req.read(4))[0]
for j in range(nValues):
v = struct.unpack('<d',req.read(8))[0]
nNormalizationFactors = struct.unpack('<i',req.read(4))[0]
for j in range(nNormalizationFactors):
chrIdx = struct.unpack('<i',req.read(4))[0]
v = struct.unpack('<d',req.read(8))[0]
nExpectedValues = struct.unpack('<i',req.read(4))[0]
for i in range(nExpectedValues):
str_ = readcstr(req)
str_ = readcstr(req)
binSize = struct.unpack('<i',req.read(4))[0]
nValues = struct.unpack('<i',req.read(4))[0]
for j in range(nValues):
v = struct.unpack('<d',req.read(8))[0]
nNormalizationFactors = struct.unpack('<i',req.read(4))[0]
for j in range(nNormalizationFactors):
chrIdx = struct.unpack('<i',req.read(4))[0]
v = struct.unpack('<d',req.read(8))[0]
nEntries = struct.unpack('<i',req.read(4))[0]
for i in range(nEntries):
normtype = readcstr(req)
chrIdx = struct.unpack('<i',req.read(4))[0]
unit1 = readcstr(req)
resolution1 = struct.unpack('<i',req.read(4))[0]
filePosition = struct.unpack('<q',req.read(8))[0]
sizeInBytes = struct.unpack('<i',req.read(4))[0]
if (normtype==norm and unit1==unit and resolution1==resolution):
chr_footer_info[chrIdx] = {'position':filePosition, 'size':sizeInBytes}
return req, pair_footer_info, chr_footer_info
#FUN(fin, entry) Return Norm
def read_normalization_vector(req, entry):
req.seek(entry['position'])
nValues = struct.unpack('<i',req.read(4))[0]
value = []
for i in range(nValues):
d = struct.unpack('<d',req.read(8))[0]
value.append(d)
return value
def parse_norm(norm, req, chr1, chr2, unit, binsize, covered_chr_pairs,
pair_footer_info, chr_footer_info, chrom_map):
"""
Adapted from the straw() function in the original straw package.
Mainly, since all chroms are iterated over, the read_header and read_footer
functions were placed outside of straw() and made to be reusable across
any chromosome pair.
Main function is to build a bin_map, which contains normalization values
for every bin, and a count_map, which is a nested dictionary which contains
the contact count for any two bins.
"""
magic_string = ""
if (not (norm=="VC" or norm=="VC_SQRT" or norm=="KR")):
print("Norm specified incorrectly, must be one of <NONE/VC/VC_SQRT/KR>")
force_exit(warn_string, req)
if (not (unit=="BP" or unit=="FRAG")):
print("Unit specified incorrectly, must be one of <BP/FRAG>")
force_exit(warn_string, req)
chr1ind = chr1[0]
chr2ind = chr2[0]
c1pos1 = 0
c1pos2 = int(chr1[2])
c2pos1 = 0
c2pos2 = int(chr2[2])
c1 = min(chr1ind, chr2ind)
c2 = max(chr1ind, chr2ind)
chr_key = str(c1) + "_" + str(c2)
try:
pair_footer_info[chr_key]
except KeyError:
warn_string = (
'ERROR. There is a discrepancy between the chrs declared in the ' +
'infile header and the actual information it contains.\nThe '
'intersection between ' + chr1[1] + ' and ' + chr2[1] +
' could not be found in the file.')
force_exit(warn_string, req)
myFilePos = pair_footer_info[chr_key]
if (norm != "NONE"):
#import ipdb; ipdb.set_trace()
c1Norm = read_normalization_vector(req, chr_footer_info[c1])
c2Norm = read_normalization_vector(req, chr_footer_info[c2])
chrom_map[chr1[1]] = c1Norm
chrom_map[chr2[1]] = c2Norm
covered_chr_pairs.append(chr_key)
def hic2cool_extractnorms(infile, outfile, resolution=0,
exclude_MT=False, command_line=False):
"""
Main function that coordinates the reading of header and footer from infile
and uses that information to parse the hic matrix.
Opens outfile and writes in form of .cool file
Params:
<infile> str .hic filename
<outfile> str .cool output filename
<resolution> int bp bin size. If 0, use all. Defaults to 0.
Final .cool structure will change depending on this param (see README)
<norm> str normalization type. Defaults to KR, optionally NONE, VC, or VC_SQRT
<exclude_MT> bool. If True, ignore MT contacts. Defaults to False.
<command_line> bool. True if executing from run_hic.py. Prompts hic headers
be printed to stdout.
"""
from collections import OrderedDict
import cooler
unit = 'BP' # only using base pair unit for now
resolution = int(resolution)
req, used_chrs, resolutions, masteridx, genome = read_header(infile)
chromosomes = [used_chrs[i][1] for i in range(1, len(used_chrs))]
lengths = [used_chrs[i][2] for i in range(1, len(used_chrs))]
chromsizes = pd.Series(index=chromosomes, data=lengths)
if command_line: # print hic header info for command line usage
chr_names = [used_chrs[key][1] for key in used_chrs.keys()]
print('################')
print('### hic2cool ###')
print('################')
print('hic file header info:')
print('Chromosomes: ', chr_names)
print('Resolutions: ', resolutions)
print('Genome: ', genome)
if exclude_MT: # remove chr25, which is MT, if this flag is set
used_chrs.pop(25, None)
# ensure user input binsize is a resolution supported by the hic file
if resolution != 0 and resolution not in resolutions:
error_str = (
'ERROR. Given binsize (in bp) is not a supported resolution in ' +
'this file.\nPlease use 0 (all resolutions) or use one of: ' +
resolutions)
force_exit(error_str, req)
use_resolutions = resolutions if resolution == 0 else [resolution]
cooler_groups = {}
for path in cooler.io.ls(outfile):
binsize = cooler.Cooler(outfile + '::' + path).info['bin-size']
cooler_groups[binsize] = path
print('MCOOL contents:')
print(cooler_groups)
for norm in NORMS:
print('Norm:', norm)
for binsize in use_resolutions:
chrom_map = {}
bins = cooler.binnify(chromsizes, binsize)
req, pair_footer_info, chr_footer_info = read_footer(
req, masteridx, norm, unit, binsize)
covered_chr_pairs = []
for chr_x in used_chrs:
if used_chrs[chr_x][1].lower() == 'all':
continue
for chr_y in used_chrs:
if used_chrs[chr_y][1].lower() == 'all':
continue
c1 = min(chr_x, chr_y)
c2 = max(chr_x, chr_y)
# ensure this is true
# since matrices are upper triangular, no need to cover
# c1-c2 and c2-c1 reciprocally
if str(c1) + "_" + str(c2) in covered_chr_pairs:
continue
parse_norm(
norm,
req,
used_chrs[c1],
used_chrs[c2],
unit,
binsize,
covered_chr_pairs,
pair_footer_info,
chr_footer_info,
chrom_map
)
lengths_in_bins = bins.groupby('chrom').size()
# hic normalization vector lengths have inconsistent lengths...
# truncate appropriately
vector = np.concatenate([
chrom_map[chrom][:lengths_in_bins.loc[chrom]]
for chrom in chromosomes
])
bins[norm] = vector
print('Resolution:', binsize)
print(bins.head())
print('Writing to cool file...')
group_path = cooler_groups[binsize]
cooler.io.append(
outfile + '::' + group_path,
'bins',
{norm: bins[norm].values},
force=True)
req.close()
def force_exit(message, req):
"""
Exit the program due to some error. Print out message and close the given
input files.
"""
req.close()
print(message, file=sys.stderr)
sys.exit()
if __name__ == '__main__':
import argparse
def main():
"""
Execute the program from the command line
Args are:
python hic2cool.py <infile (.hic)> <outfile (.cool)> <resolutions
desired (defaults to all, optionally bp int)> <normalization type
(defaults to 'KR', optionally 'NONE', 'VC', or 'VC_SQRT')>
<exclude MT (default False)>
"""
parser = argparse.ArgumentParser()
parser.add_argument("infile", help=".hic input file")
parser.add_argument("outfile", help=".cool output file")
parser.add_argument("-r", "--resolution",
help="integer bp resolution desired in cooler file. "
"Setting to 0 (default) will use all resolutions. "
"If all resolutions are used, a multi-res .cool file will be "
"created, which has a different hdf5 structure. See the "
"README for more info", type=int, default=0)
parser.add_argument("-e", "--exclude_MT",
help="if used, exclude the mitochondria (MT) from the output",
action="store_true")
args = parser.parse_args()
# these parameters adapted from theaidenlab/straw
# KR is default normalization type and BP is the unit for binsize
hic2cool_extractnorms(
args.infile,
args.outfile,
args.resolution,
#args.normalization,
args.exclude_MT,
True)
main()
| mit |
jpautom/scikit-learn | examples/cluster/plot_birch_vs_minibatchkmeans.py | 333 | 3694 | """
=================================
Compare BIRCH and MiniBatchKMeans
=================================
This example compares the timing of Birch (with and without the global
clustering step) and MiniBatchKMeans on a synthetic dataset having
100,000 samples and 2 features generated using make_blobs.
If ``n_clusters`` is set to None, the data is reduced from 100,000
samples to a set of 158 clusters. This can be viewed as a preprocessing
step before the final (global) clustering step that further reduces these
158 clusters to 100 clusters.
"""
# Authors: Manoj Kumar <[email protected]
# Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
print(__doc__)
from itertools import cycle
from time import time
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import Birch, MiniBatchKMeans
from sklearn.datasets.samples_generator import make_blobs
# Generate centers for the blobs so that it forms a 10 X 10 grid.
xx = np.linspace(-22, 22, 10)
yy = np.linspace(-22, 22, 10)
xx, yy = np.meshgrid(xx, yy)
n_centres = np.hstack((np.ravel(xx)[:, np.newaxis],
np.ravel(yy)[:, np.newaxis]))
# Generate blobs to do a comparison between MiniBatchKMeans and Birch.
X, y = make_blobs(n_samples=100000, centers=n_centres, random_state=0)
# Use all colors that matplotlib provides by default.
colors_ = cycle(colors.cnames.keys())
fig = plt.figure(figsize=(12, 4))
fig.subplots_adjust(left=0.04, right=0.98, bottom=0.1, top=0.9)
# Compute clustering with Birch with and without the final clustering step
# and plot.
birch_models = [Birch(threshold=1.7, n_clusters=None),
Birch(threshold=1.7, n_clusters=100)]
final_step = ['without global clustering', 'with global clustering']
for ind, (birch_model, info) in enumerate(zip(birch_models, final_step)):
t = time()
birch_model.fit(X)
time_ = time() - t
print("Birch %s as the final step took %0.2f seconds" % (
info, (time() - t)))
# Plot result
labels = birch_model.labels_
centroids = birch_model.subcluster_centers_
n_clusters = np.unique(labels).size
print("n_clusters : %d" % n_clusters)
ax = fig.add_subplot(1, 3, ind + 1)
for this_centroid, k, col in zip(centroids, range(n_clusters), colors_):
mask = labels == k
ax.plot(X[mask, 0], X[mask, 1], 'w',
markerfacecolor=col, marker='.')
if birch_model.n_clusters is None:
ax.plot(this_centroid[0], this_centroid[1], '+', markerfacecolor=col,
markeredgecolor='k', markersize=5)
ax.set_ylim([-25, 25])
ax.set_xlim([-25, 25])
ax.set_autoscaley_on(False)
ax.set_title('Birch %s' % info)
# Compute clustering with MiniBatchKMeans.
mbk = MiniBatchKMeans(init='k-means++', n_clusters=100, batch_size=100,
n_init=10, max_no_improvement=10, verbose=0,
random_state=0)
t0 = time()
mbk.fit(X)
t_mini_batch = time() - t0
print("Time taken to run MiniBatchKMeans %0.2f seconds" % t_mini_batch)
mbk_means_labels_unique = np.unique(mbk.labels_)
ax = fig.add_subplot(1, 3, 3)
for this_centroid, k, col in zip(mbk.cluster_centers_,
range(n_clusters), colors_):
mask = mbk.labels_ == k
ax.plot(X[mask, 0], X[mask, 1], 'w', markerfacecolor=col, marker='.')
ax.plot(this_centroid[0], this_centroid[1], '+', markeredgecolor='k',
markersize=5)
ax.set_xlim([-25, 25])
ax.set_ylim([-25, 25])
ax.set_title("MiniBatchKMeans")
ax.set_autoscaley_on(False)
plt.show()
| bsd-3-clause |
kdebrab/pandas | pandas/tests/io/json/test_pandas.py | 2 | 50233 | # -*- coding: utf-8 -*-
# pylint: disable-msg=W0612,E1101
import pytest
from pandas.compat import (range, lrange, StringIO,
OrderedDict, is_platform_32bit)
import os
import numpy as np
from pandas import (Series, DataFrame, DatetimeIndex, Timestamp,
read_json, compat)
from datetime import timedelta
import pandas as pd
import json
from pandas.util.testing import (assert_almost_equal, assert_frame_equal,
assert_series_equal, network,
ensure_clean, assert_index_equal)
import pandas.util.testing as tm
_seriesd = tm.getSeriesData()
_tsd = tm.getTimeSeriesData()
_frame = DataFrame(_seriesd)
_frame2 = DataFrame(_seriesd, columns=['D', 'C', 'B', 'A'])
_intframe = DataFrame(dict((k, v.astype(np.int64))
for k, v in compat.iteritems(_seriesd)))
_tsframe = DataFrame(_tsd)
_cat_frame = _frame.copy()
cat = ['bah'] * 5 + ['bar'] * 5 + ['baz'] * \
5 + ['foo'] * (len(_cat_frame) - 15)
_cat_frame.index = pd.CategoricalIndex(cat, name='E')
_cat_frame['E'] = list(reversed(cat))
_cat_frame['sort'] = np.arange(len(_cat_frame), dtype='int64')
_mixed_frame = _frame.copy()
class TestPandasContainer(object):
@pytest.fixture(scope="function", autouse=True)
def setup(self, datapath):
self.dirpath = datapath("io", "json", "data")
self.ts = tm.makeTimeSeries()
self.ts.name = 'ts'
self.series = tm.makeStringSeries()
self.series.name = 'series'
self.objSeries = tm.makeObjectSeries()
self.objSeries.name = 'objects'
self.empty_series = Series([], index=[])
self.empty_frame = DataFrame({})
self.frame = _frame.copy()
self.frame2 = _frame2.copy()
self.intframe = _intframe.copy()
self.tsframe = _tsframe.copy()
self.mixed_frame = _mixed_frame.copy()
self.categorical = _cat_frame.copy()
yield
del self.dirpath
del self.ts
del self.series
del self.objSeries
del self.empty_series
del self.empty_frame
del self.frame
del self.frame2
del self.intframe
del self.tsframe
del self.mixed_frame
def test_frame_double_encoded_labels(self):
df = DataFrame([['a', 'b'], ['c', 'd']],
index=['index " 1', 'index / 2'],
columns=['a \\ b', 'y / z'])
assert_frame_equal(df, read_json(df.to_json(orient='split'),
orient='split'))
assert_frame_equal(df, read_json(df.to_json(orient='columns'),
orient='columns'))
assert_frame_equal(df, read_json(df.to_json(orient='index'),
orient='index'))
df_unser = read_json(df.to_json(orient='records'), orient='records')
assert_index_equal(df.columns, df_unser.columns)
tm.assert_numpy_array_equal(df.values, df_unser.values)
def test_frame_non_unique_index(self):
df = DataFrame([['a', 'b'], ['c', 'd']], index=[1, 1],
columns=['x', 'y'])
pytest.raises(ValueError, df.to_json, orient='index')
pytest.raises(ValueError, df.to_json, orient='columns')
assert_frame_equal(df, read_json(df.to_json(orient='split'),
orient='split'))
unser = read_json(df.to_json(orient='records'), orient='records')
tm.assert_index_equal(df.columns, unser.columns)
tm.assert_almost_equal(df.values, unser.values)
unser = read_json(df.to_json(orient='values'), orient='values')
tm.assert_numpy_array_equal(df.values, unser.values)
def test_frame_non_unique_columns(self):
df = DataFrame([['a', 'b'], ['c', 'd']], index=[1, 2],
columns=['x', 'x'])
pytest.raises(ValueError, df.to_json, orient='index')
pytest.raises(ValueError, df.to_json, orient='columns')
pytest.raises(ValueError, df.to_json, orient='records')
assert_frame_equal(df, read_json(df.to_json(orient='split'),
orient='split', dtype=False))
unser = read_json(df.to_json(orient='values'), orient='values')
tm.assert_numpy_array_equal(df.values, unser.values)
# GH4377; duplicate columns not processing correctly
df = DataFrame([['a', 'b'], ['c', 'd']], index=[
1, 2], columns=['x', 'y'])
result = read_json(df.to_json(orient='split'), orient='split')
assert_frame_equal(result, df)
def _check(df):
result = read_json(df.to_json(orient='split'), orient='split',
convert_dates=['x'])
assert_frame_equal(result, df)
for o in [[['a', 'b'], ['c', 'd']],
[[1.5, 2.5], [3.5, 4.5]],
[[1, 2.5], [3, 4.5]],
[[Timestamp('20130101'), 3.5],
[Timestamp('20130102'), 4.5]]]:
_check(DataFrame(o, index=[1, 2], columns=['x', 'x']))
def test_frame_from_json_to_json(self):
def _check_orient(df, orient, dtype=None, numpy=False,
convert_axes=True, check_dtype=True, raise_ok=None,
sort=None, check_index_type=True,
check_column_type=True, check_numpy_dtype=False):
if sort is not None:
df = df.sort_values(sort)
else:
df = df.sort_index()
# if we are not unique, then check that we are raising ValueError
# for the appropriate orients
if not df.index.is_unique and orient in ['index', 'columns']:
pytest.raises(
ValueError, lambda: df.to_json(orient=orient))
return
if (not df.columns.is_unique and
orient in ['index', 'columns', 'records']):
pytest.raises(
ValueError, lambda: df.to_json(orient=orient))
return
dfjson = df.to_json(orient=orient)
try:
unser = read_json(dfjson, orient=orient, dtype=dtype,
numpy=numpy, convert_axes=convert_axes)
except Exception as detail:
if raise_ok is not None:
if isinstance(detail, raise_ok):
return
raise
if sort is not None and sort in unser.columns:
unser = unser.sort_values(sort)
else:
unser = unser.sort_index()
if dtype is False:
check_dtype = False
if not convert_axes and df.index.dtype.type == np.datetime64:
unser.index = DatetimeIndex(
unser.index.values.astype('i8') * 1e6)
if orient == "records":
# index is not captured in this orientation
tm.assert_almost_equal(df.values, unser.values,
check_dtype=check_numpy_dtype)
tm.assert_index_equal(df.columns, unser.columns,
exact=check_column_type)
elif orient == "values":
# index and cols are not captured in this orientation
if numpy is True and df.shape == (0, 0):
assert unser.shape[0] == 0
else:
tm.assert_almost_equal(df.values, unser.values,
check_dtype=check_numpy_dtype)
elif orient == "split":
# index and col labels might not be strings
unser.index = [str(i) for i in unser.index]
unser.columns = [str(i) for i in unser.columns]
if sort is None:
unser = unser.sort_index()
tm.assert_almost_equal(df.values, unser.values,
check_dtype=check_numpy_dtype)
else:
if convert_axes:
tm.assert_frame_equal(df, unser, check_dtype=check_dtype,
check_index_type=check_index_type,
check_column_type=check_column_type)
else:
tm.assert_frame_equal(df, unser, check_less_precise=False,
check_dtype=check_dtype)
def _check_all_orients(df, dtype=None, convert_axes=True,
raise_ok=None, sort=None, check_index_type=True,
check_column_type=True):
# numpy=False
if convert_axes:
_check_orient(df, "columns", dtype=dtype, sort=sort,
check_index_type=False, check_column_type=False)
_check_orient(df, "records", dtype=dtype, sort=sort,
check_index_type=False, check_column_type=False)
_check_orient(df, "split", dtype=dtype, sort=sort,
check_index_type=False, check_column_type=False)
_check_orient(df, "index", dtype=dtype, sort=sort,
check_index_type=False, check_column_type=False)
_check_orient(df, "values", dtype=dtype, sort=sort,
check_index_type=False, check_column_type=False)
_check_orient(df, "columns", dtype=dtype,
convert_axes=False, sort=sort)
_check_orient(df, "records", dtype=dtype,
convert_axes=False, sort=sort)
_check_orient(df, "split", dtype=dtype,
convert_axes=False, sort=sort)
_check_orient(df, "index", dtype=dtype,
convert_axes=False, sort=sort)
_check_orient(df, "values", dtype=dtype,
convert_axes=False, sort=sort)
# numpy=True and raise_ok might be not None, so ignore the error
if convert_axes:
_check_orient(df, "columns", dtype=dtype, numpy=True,
raise_ok=raise_ok, sort=sort,
check_index_type=False, check_column_type=False)
_check_orient(df, "records", dtype=dtype, numpy=True,
raise_ok=raise_ok, sort=sort,
check_index_type=False, check_column_type=False)
_check_orient(df, "split", dtype=dtype, numpy=True,
raise_ok=raise_ok, sort=sort,
check_index_type=False, check_column_type=False)
_check_orient(df, "index", dtype=dtype, numpy=True,
raise_ok=raise_ok, sort=sort,
check_index_type=False, check_column_type=False)
_check_orient(df, "values", dtype=dtype, numpy=True,
raise_ok=raise_ok, sort=sort,
check_index_type=False, check_column_type=False)
_check_orient(df, "columns", dtype=dtype, numpy=True,
convert_axes=False, raise_ok=raise_ok, sort=sort)
_check_orient(df, "records", dtype=dtype, numpy=True,
convert_axes=False, raise_ok=raise_ok, sort=sort)
_check_orient(df, "split", dtype=dtype, numpy=True,
convert_axes=False, raise_ok=raise_ok, sort=sort)
_check_orient(df, "index", dtype=dtype, numpy=True,
convert_axes=False, raise_ok=raise_ok, sort=sort)
_check_orient(df, "values", dtype=dtype, numpy=True,
convert_axes=False, raise_ok=raise_ok, sort=sort)
# basic
_check_all_orients(self.frame)
assert self.frame.to_json() == self.frame.to_json(orient="columns")
_check_all_orients(self.intframe, dtype=self.intframe.values.dtype)
_check_all_orients(self.intframe, dtype=False)
# big one
# index and columns are strings as all unserialised JSON object keys
# are assumed to be strings
biggie = DataFrame(np.zeros((200, 4)),
columns=[str(i) for i in range(4)],
index=[str(i) for i in range(200)])
_check_all_orients(biggie, dtype=False, convert_axes=False)
# dtypes
_check_all_orients(DataFrame(biggie, dtype=np.float64),
dtype=np.float64, convert_axes=False)
_check_all_orients(DataFrame(biggie, dtype=np.int), dtype=np.int,
convert_axes=False)
_check_all_orients(DataFrame(biggie, dtype='U3'), dtype='U3',
convert_axes=False, raise_ok=ValueError)
# categorical
_check_all_orients(self.categorical, sort='sort', raise_ok=ValueError)
# empty
_check_all_orients(self.empty_frame, check_index_type=False,
check_column_type=False)
# time series data
_check_all_orients(self.tsframe)
# mixed data
index = pd.Index(['a', 'b', 'c', 'd', 'e'])
data = {'A': [0., 1., 2., 3., 4.],
'B': [0., 1., 0., 1., 0.],
'C': ['foo1', 'foo2', 'foo3', 'foo4', 'foo5'],
'D': [True, False, True, False, True]}
df = DataFrame(data=data, index=index)
_check_orient(df, "split", check_dtype=False)
_check_orient(df, "records", check_dtype=False)
_check_orient(df, "values", check_dtype=False)
_check_orient(df, "columns", check_dtype=False)
# index oriented is problematic as it is read back in in a transposed
# state, so the columns are interpreted as having mixed data and
# given object dtypes.
# force everything to have object dtype beforehand
_check_orient(df.transpose().transpose(), "index", dtype=False)
def test_frame_from_json_bad_data(self):
pytest.raises(ValueError, read_json, StringIO('{"key":b:a:d}'))
# too few indices
json = StringIO('{"columns":["A","B"],'
'"index":["2","3"],'
'"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}')
pytest.raises(ValueError, read_json, json,
orient="split")
# too many columns
json = StringIO('{"columns":["A","B","C"],'
'"index":["1","2","3"],'
'"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}')
pytest.raises(AssertionError, read_json, json,
orient="split")
# bad key
json = StringIO('{"badkey":["A","B"],'
'"index":["2","3"],'
'"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}')
with tm.assert_raises_regex(ValueError,
r"unexpected key\(s\): badkey"):
read_json(json, orient="split")
def test_frame_from_json_nones(self):
df = DataFrame([[1, 2], [4, 5, 6]])
unser = read_json(df.to_json())
assert np.isnan(unser[2][0])
df = DataFrame([['1', '2'], ['4', '5', '6']])
unser = read_json(df.to_json())
assert np.isnan(unser[2][0])
unser = read_json(df.to_json(), dtype=False)
assert unser[2][0] is None
unser = read_json(df.to_json(), convert_axes=False, dtype=False)
assert unser['2']['0'] is None
unser = read_json(df.to_json(), numpy=False)
assert np.isnan(unser[2][0])
unser = read_json(df.to_json(), numpy=False, dtype=False)
assert unser[2][0] is None
unser = read_json(df.to_json(), numpy=False,
convert_axes=False, dtype=False)
assert unser['2']['0'] is None
# infinities get mapped to nulls which get mapped to NaNs during
# deserialisation
df = DataFrame([[1, 2], [4, 5, 6]])
df.loc[0, 2] = np.inf
unser = read_json(df.to_json())
assert np.isnan(unser[2][0])
unser = read_json(df.to_json(), dtype=False)
assert np.isnan(unser[2][0])
df.loc[0, 2] = np.NINF
unser = read_json(df.to_json())
assert np.isnan(unser[2][0])
unser = read_json(df.to_json(), dtype=False)
assert np.isnan(unser[2][0])
@pytest.mark.skipif(is_platform_32bit(),
reason="not compliant on 32-bit, xref #15865")
def test_frame_to_json_float_precision(self):
df = pd.DataFrame([dict(a_float=0.95)])
encoded = df.to_json(double_precision=1)
assert encoded == '{"a_float":{"0":1.0}}'
df = pd.DataFrame([dict(a_float=1.95)])
encoded = df.to_json(double_precision=1)
assert encoded == '{"a_float":{"0":2.0}}'
df = pd.DataFrame([dict(a_float=-1.95)])
encoded = df.to_json(double_precision=1)
assert encoded == '{"a_float":{"0":-2.0}}'
df = pd.DataFrame([dict(a_float=0.995)])
encoded = df.to_json(double_precision=2)
assert encoded == '{"a_float":{"0":1.0}}'
df = pd.DataFrame([dict(a_float=0.9995)])
encoded = df.to_json(double_precision=3)
assert encoded == '{"a_float":{"0":1.0}}'
df = pd.DataFrame([dict(a_float=0.99999999999999944)])
encoded = df.to_json(double_precision=15)
assert encoded == '{"a_float":{"0":1.0}}'
def test_frame_to_json_except(self):
df = DataFrame([1, 2, 3])
pytest.raises(ValueError, df.to_json, orient="garbage")
def test_frame_empty(self):
df = DataFrame(columns=['jim', 'joe'])
assert not df._is_mixed_type
assert_frame_equal(read_json(df.to_json(), dtype=dict(df.dtypes)), df,
check_index_type=False)
# GH 7445
result = pd.DataFrame({'test': []}, index=[]).to_json(orient='columns')
expected = '{"test":{}}'
assert result == expected
def test_frame_empty_mixedtype(self):
# mixed type
df = DataFrame(columns=['jim', 'joe'])
df['joe'] = df['joe'].astype('i8')
assert df._is_mixed_type
assert_frame_equal(read_json(df.to_json(), dtype=dict(df.dtypes)), df,
check_index_type=False)
def test_frame_mixedtype_orient(self): # GH10289
vals = [[10, 1, 'foo', .1, .01],
[20, 2, 'bar', .2, .02],
[30, 3, 'baz', .3, .03],
[40, 4, 'qux', .4, .04]]
df = DataFrame(vals, index=list('abcd'),
columns=['1st', '2nd', '3rd', '4th', '5th'])
assert df._is_mixed_type
right = df.copy()
for orient in ['split', 'index', 'columns']:
inp = df.to_json(orient=orient)
left = read_json(inp, orient=orient, convert_axes=False)
assert_frame_equal(left, right)
right.index = np.arange(len(df))
inp = df.to_json(orient='records')
left = read_json(inp, orient='records', convert_axes=False)
assert_frame_equal(left, right)
right.columns = np.arange(df.shape[1])
inp = df.to_json(orient='values')
left = read_json(inp, orient='values', convert_axes=False)
assert_frame_equal(left, right)
def test_v12_compat(self):
df = DataFrame(
[[1.56808523, 0.65727391, 1.81021139, -0.17251653],
[-0.2550111, -0.08072427, -0.03202878, -0.17581665],
[1.51493992, 0.11805825, 1.629455, -1.31506612],
[-0.02765498, 0.44679743, 0.33192641, -0.27885413],
[0.05951614, -2.69652057, 1.28163262, 0.34703478]],
columns=['A', 'B', 'C', 'D'],
index=pd.date_range('2000-01-03', '2000-01-07'))
df['date'] = pd.Timestamp('19920106 18:21:32.12')
df.iloc[3, df.columns.get_loc('date')] = pd.Timestamp('20130101')
df['modified'] = df['date']
df.iloc[1, df.columns.get_loc('modified')] = pd.NaT
v12_json = os.path.join(self.dirpath, 'tsframe_v012.json')
df_unser = pd.read_json(v12_json)
assert_frame_equal(df, df_unser)
df_iso = df.drop(['modified'], axis=1)
v12_iso_json = os.path.join(self.dirpath, 'tsframe_iso_v012.json')
df_unser_iso = pd.read_json(v12_iso_json)
assert_frame_equal(df_iso, df_unser_iso)
def test_blocks_compat_GH9037(self):
index = pd.date_range('20000101', periods=10, freq='H')
df_mixed = DataFrame(OrderedDict(
float_1=[-0.92077639, 0.77434435, 1.25234727, 0.61485564,
-0.60316077, 0.24653374, 0.28668979, -2.51969012,
0.95748401, -1.02970536],
int_1=[19680418, 75337055, 99973684, 65103179, 79373900,
40314334, 21290235, 4991321, 41903419, 16008365],
str_1=['78c608f1', '64a99743', '13d2ff52', 'ca7f4af2', '97236474',
'bde7e214', '1a6bde47', 'b1190be5', '7a669144', '8d64d068'],
float_2=[-0.0428278, -1.80872357, 3.36042349, -0.7573685,
-0.48217572, 0.86229683, 1.08935819, 0.93898739,
-0.03030452, 1.43366348],
str_2=['14f04af9', 'd085da90', '4bcfac83', '81504caf', '2ffef4a9',
'08e2f5c4', '07e1af03', 'addbd4a7', '1f6a09ba', '4bfc4d87'],
int_2=[86967717, 98098830, 51927505, 20372254, 12601730, 20884027,
34193846, 10561746, 24867120, 76131025]
), index=index)
# JSON deserialisation always creates unicode strings
df_mixed.columns = df_mixed.columns.astype('unicode')
df_roundtrip = pd.read_json(df_mixed.to_json(orient='split'),
orient='split')
assert_frame_equal(df_mixed, df_roundtrip,
check_index_type=True,
check_column_type=True,
check_frame_type=True,
by_blocks=True,
check_exact=True)
def test_frame_nonprintable_bytes(self):
# GH14256: failing column caused segfaults, if it is not the last one
class BinaryThing(object):
def __init__(self, hexed):
self.hexed = hexed
if compat.PY2:
self.binary = hexed.decode('hex')
else:
self.binary = bytes.fromhex(hexed)
def __str__(self):
return self.hexed
hexed = '574b4454ba8c5eb4f98a8f45'
binthing = BinaryThing(hexed)
# verify the proper conversion of printable content
df_printable = DataFrame({'A': [binthing.hexed]})
assert df_printable.to_json() == \
'{{"A":{{"0":"{hex}"}}}}'.format(hex=hexed)
# check if non-printable content throws appropriate Exception
df_nonprintable = DataFrame({'A': [binthing]})
with pytest.raises(OverflowError):
df_nonprintable.to_json()
# the same with multiple columns threw segfaults
df_mixed = DataFrame({'A': [binthing], 'B': [1]},
columns=['A', 'B'])
with pytest.raises(OverflowError):
df_mixed.to_json()
# default_handler should resolve exceptions for non-string types
assert df_nonprintable.to_json(default_handler=str) == \
'{{"A":{{"0":"{hex}"}}}}'.format(hex=hexed)
assert df_mixed.to_json(default_handler=str) == \
'{{"A":{{"0":"{hex}"}},"B":{{"0":1}}}}'.format(hex=hexed)
def test_label_overflow(self):
# GH14256: buffer length not checked when writing label
df = pd.DataFrame({'bar' * 100000: [1], 'foo': [1337]})
assert df.to_json() == \
'{{"{bar}":{{"0":1}},"foo":{{"0":1337}}}}'.format(
bar=('bar' * 100000))
def test_series_non_unique_index(self):
s = Series(['a', 'b'], index=[1, 1])
pytest.raises(ValueError, s.to_json, orient='index')
assert_series_equal(s, read_json(s.to_json(orient='split'),
orient='split', typ='series'))
unser = read_json(s.to_json(orient='records'),
orient='records', typ='series')
tm.assert_numpy_array_equal(s.values, unser.values)
def test_series_from_json_to_json(self):
def _check_orient(series, orient, dtype=None, numpy=False,
check_index_type=True):
series = series.sort_index()
unser = read_json(series.to_json(orient=orient),
typ='series', orient=orient, numpy=numpy,
dtype=dtype)
unser = unser.sort_index()
if orient == "records" or orient == "values":
assert_almost_equal(series.values, unser.values)
else:
if orient == "split":
assert_series_equal(series, unser,
check_index_type=check_index_type)
else:
assert_series_equal(series, unser, check_names=False,
check_index_type=check_index_type)
def _check_all_orients(series, dtype=None, check_index_type=True):
_check_orient(series, "columns", dtype=dtype,
check_index_type=check_index_type)
_check_orient(series, "records", dtype=dtype,
check_index_type=check_index_type)
_check_orient(series, "split", dtype=dtype,
check_index_type=check_index_type)
_check_orient(series, "index", dtype=dtype,
check_index_type=check_index_type)
_check_orient(series, "values", dtype=dtype)
_check_orient(series, "columns", dtype=dtype, numpy=True,
check_index_type=check_index_type)
_check_orient(series, "records", dtype=dtype, numpy=True,
check_index_type=check_index_type)
_check_orient(series, "split", dtype=dtype, numpy=True,
check_index_type=check_index_type)
_check_orient(series, "index", dtype=dtype, numpy=True,
check_index_type=check_index_type)
_check_orient(series, "values", dtype=dtype, numpy=True,
check_index_type=check_index_type)
# basic
_check_all_orients(self.series)
assert self.series.to_json() == self.series.to_json(orient="index")
objSeries = Series([str(d) for d in self.objSeries],
index=self.objSeries.index,
name=self.objSeries.name)
_check_all_orients(objSeries, dtype=False)
# empty_series has empty index with object dtype
# which cannot be revert
assert self.empty_series.index.dtype == np.object_
_check_all_orients(self.empty_series, check_index_type=False)
_check_all_orients(self.ts)
# dtype
s = Series(lrange(6), index=['a', 'b', 'c', 'd', 'e', 'f'])
_check_all_orients(Series(s, dtype=np.float64), dtype=np.float64)
_check_all_orients(Series(s, dtype=np.int), dtype=np.int)
def test_series_to_json_except(self):
s = Series([1, 2, 3])
pytest.raises(ValueError, s.to_json, orient="garbage")
def test_series_from_json_precise_float(self):
s = Series([4.56, 4.56, 4.56])
result = read_json(s.to_json(), typ='series', precise_float=True)
assert_series_equal(result, s, check_index_type=False)
def test_frame_from_json_precise_float(self):
df = DataFrame([[4.56, 4.56, 4.56], [4.56, 4.56, 4.56]])
result = read_json(df.to_json(), precise_float=True)
assert_frame_equal(result, df, check_index_type=False,
check_column_type=False)
def test_typ(self):
s = Series(lrange(6), index=['a', 'b', 'c',
'd', 'e', 'f'], dtype='int64')
result = read_json(s.to_json(), typ=None)
assert_series_equal(result, s)
def test_reconstruction_index(self):
df = DataFrame([[1, 2, 3], [4, 5, 6]])
result = read_json(df.to_json())
assert_frame_equal(result, df)
df = DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]}, index=['A', 'B', 'C'])
result = read_json(df.to_json())
assert_frame_equal(result, df)
def test_path(self):
with ensure_clean('test.json') as path:
for df in [self.frame, self.frame2, self.intframe, self.tsframe,
self.mixed_frame]:
df.to_json(path)
read_json(path)
def test_axis_dates(self):
# frame
json = self.tsframe.to_json()
result = read_json(json)
assert_frame_equal(result, self.tsframe)
# series
json = self.ts.to_json()
result = read_json(json, typ='series')
assert_series_equal(result, self.ts, check_names=False)
assert result.name is None
def test_convert_dates(self):
# frame
df = self.tsframe.copy()
df['date'] = Timestamp('20130101')
json = df.to_json()
result = read_json(json)
assert_frame_equal(result, df)
df['foo'] = 1.
json = df.to_json(date_unit='ns')
result = read_json(json, convert_dates=False)
expected = df.copy()
expected['date'] = expected['date'].values.view('i8')
expected['foo'] = expected['foo'].astype('int64')
assert_frame_equal(result, expected)
# series
ts = Series(Timestamp('20130101'), index=self.ts.index)
json = ts.to_json()
result = read_json(json, typ='series')
assert_series_equal(result, ts)
def test_convert_dates_infer(self):
# GH10747
from pandas.io.json import dumps
infer_words = ['trade_time', 'date', 'datetime', 'sold_at',
'modified', 'timestamp', 'timestamps']
for infer_word in infer_words:
data = [{'id': 1, infer_word: 1036713600000}, {'id': 2}]
expected = DataFrame([[1, Timestamp('2002-11-08')], [2, pd.NaT]],
columns=['id', infer_word])
result = read_json(dumps(data))[['id', infer_word]]
assert_frame_equal(result, expected)
def test_date_format_frame(self):
df = self.tsframe.copy()
def test_w_date(date, date_unit=None):
df['date'] = Timestamp(date)
df.iloc[1, df.columns.get_loc('date')] = pd.NaT
df.iloc[5, df.columns.get_loc('date')] = pd.NaT
if date_unit:
json = df.to_json(date_format='iso', date_unit=date_unit)
else:
json = df.to_json(date_format='iso')
result = read_json(json)
assert_frame_equal(result, df)
test_w_date('20130101 20:43:42.123')
test_w_date('20130101 20:43:42', date_unit='s')
test_w_date('20130101 20:43:42.123', date_unit='ms')
test_w_date('20130101 20:43:42.123456', date_unit='us')
test_w_date('20130101 20:43:42.123456789', date_unit='ns')
pytest.raises(ValueError, df.to_json, date_format='iso',
date_unit='foo')
def test_date_format_series(self):
def test_w_date(date, date_unit=None):
ts = Series(Timestamp(date), index=self.ts.index)
ts.iloc[1] = pd.NaT
ts.iloc[5] = pd.NaT
if date_unit:
json = ts.to_json(date_format='iso', date_unit=date_unit)
else:
json = ts.to_json(date_format='iso')
result = read_json(json, typ='series')
assert_series_equal(result, ts)
test_w_date('20130101 20:43:42.123')
test_w_date('20130101 20:43:42', date_unit='s')
test_w_date('20130101 20:43:42.123', date_unit='ms')
test_w_date('20130101 20:43:42.123456', date_unit='us')
test_w_date('20130101 20:43:42.123456789', date_unit='ns')
ts = Series(Timestamp('20130101 20:43:42.123'), index=self.ts.index)
pytest.raises(ValueError, ts.to_json, date_format='iso',
date_unit='foo')
def test_date_unit(self):
df = self.tsframe.copy()
df['date'] = Timestamp('20130101 20:43:42')
dl = df.columns.get_loc('date')
df.iloc[1, dl] = Timestamp('19710101 20:43:42')
df.iloc[2, dl] = Timestamp('21460101 20:43:42')
df.iloc[4, dl] = pd.NaT
for unit in ('s', 'ms', 'us', 'ns'):
json = df.to_json(date_format='epoch', date_unit=unit)
# force date unit
result = read_json(json, date_unit=unit)
assert_frame_equal(result, df)
# detect date unit
result = read_json(json, date_unit=None)
assert_frame_equal(result, df)
def test_weird_nested_json(self):
# this used to core dump the parser
s = r'''{
"status": "success",
"data": {
"posts": [
{
"id": 1,
"title": "A blog post",
"body": "Some useful content"
},
{
"id": 2,
"title": "Another blog post",
"body": "More content"
}
]
}
}'''
read_json(s)
def test_doc_example(self):
dfj2 = DataFrame(np.random.randn(5, 2), columns=list('AB'))
dfj2['date'] = Timestamp('20130101')
dfj2['ints'] = lrange(5)
dfj2['bools'] = True
dfj2.index = pd.date_range('20130101', periods=5)
json = dfj2.to_json()
result = read_json(json, dtype={'ints': np.int64, 'bools': np.bool_})
assert_frame_equal(result, result)
def test_misc_example(self):
# parsing unordered input fails
result = read_json('[{"a": 1, "b": 2}, {"b":2, "a" :1}]', numpy=True)
expected = DataFrame([[1, 2], [1, 2]], columns=['a', 'b'])
error_msg = """DataFrame\\.index are different
DataFrame\\.index values are different \\(100\\.0 %\\)
\\[left\\]: Index\\(\\[u?'a', u?'b'\\], dtype='object'\\)
\\[right\\]: RangeIndex\\(start=0, stop=2, step=1\\)"""
with tm.assert_raises_regex(AssertionError, error_msg):
assert_frame_equal(result, expected, check_index_type=False)
result = read_json('[{"a": 1, "b": 2}, {"b":2, "a" :1}]')
expected = DataFrame([[1, 2], [1, 2]], columns=['a', 'b'])
assert_frame_equal(result, expected)
@network
def test_round_trip_exception_(self):
# GH 3867
csv = 'https://raw.github.com/hayd/lahman2012/master/csvs/Teams.csv'
df = pd.read_csv(csv)
s = df.to_json()
result = pd.read_json(s)
assert_frame_equal(result.reindex(
index=df.index, columns=df.columns), df)
@network
def test_url(self):
url = 'https://api.github.com/repos/pandas-dev/pandas/issues?per_page=5' # noqa
result = read_json(url, convert_dates=True)
for c in ['created_at', 'closed_at', 'updated_at']:
assert result[c].dtype == 'datetime64[ns]'
def test_timedelta(self):
converter = lambda x: pd.to_timedelta(x, unit='ms')
s = Series([timedelta(23), timedelta(seconds=5)])
assert s.dtype == 'timedelta64[ns]'
result = pd.read_json(s.to_json(), typ='series').apply(converter)
assert_series_equal(result, s)
s = Series([timedelta(23), timedelta(seconds=5)],
index=pd.Index([0, 1]))
assert s.dtype == 'timedelta64[ns]'
result = pd.read_json(s.to_json(), typ='series').apply(converter)
assert_series_equal(result, s)
frame = DataFrame([timedelta(23), timedelta(seconds=5)])
assert frame[0].dtype == 'timedelta64[ns]'
assert_frame_equal(frame, pd.read_json(frame.to_json())
.apply(converter))
frame = DataFrame({'a': [timedelta(days=23), timedelta(seconds=5)],
'b': [1, 2],
'c': pd.date_range(start='20130101', periods=2)})
result = pd.read_json(frame.to_json(date_unit='ns'))
result['a'] = pd.to_timedelta(result.a, unit='ns')
result['c'] = pd.to_datetime(result.c)
assert_frame_equal(frame, result)
def test_mixed_timedelta_datetime(self):
frame = DataFrame({'a': [timedelta(23), pd.Timestamp('20130101')]},
dtype=object)
expected = DataFrame({'a': [pd.Timedelta(frame.a[0]).value,
pd.Timestamp(frame.a[1]).value]})
result = pd.read_json(frame.to_json(date_unit='ns'),
dtype={'a': 'int64'})
assert_frame_equal(result, expected, check_index_type=False)
def test_default_handler(self):
value = object()
frame = DataFrame({'a': [7, value]})
expected = DataFrame({'a': [7, str(value)]})
result = pd.read_json(frame.to_json(default_handler=str))
assert_frame_equal(expected, result, check_index_type=False)
def test_default_handler_indirect(self):
from pandas.io.json import dumps
def default(obj):
if isinstance(obj, complex):
return [('mathjs', 'Complex'),
('re', obj.real),
('im', obj.imag)]
return str(obj)
df_list = [9, DataFrame({'a': [1, 'STR', complex(4, -5)],
'b': [float('nan'), None, 'N/A']},
columns=['a', 'b'])]
expected = ('[9,[[1,null],["STR",null],[[["mathjs","Complex"],'
'["re",4.0],["im",-5.0]],"N\\/A"]]]')
assert dumps(df_list, default_handler=default,
orient="values") == expected
def test_default_handler_numpy_unsupported_dtype(self):
# GH12554 to_json raises 'Unhandled numpy dtype 15'
df = DataFrame({'a': [1, 2.3, complex(4, -5)],
'b': [float('nan'), None, complex(1.2, 0)]},
columns=['a', 'b'])
expected = ('[["(1+0j)","(nan+0j)"],'
'["(2.3+0j)","(nan+0j)"],'
'["(4-5j)","(1.2+0j)"]]')
assert df.to_json(default_handler=str, orient="values") == expected
def test_default_handler_raises(self):
def my_handler_raises(obj):
raise TypeError("raisin")
pytest.raises(TypeError,
DataFrame({'a': [1, 2, object()]}).to_json,
default_handler=my_handler_raises)
pytest.raises(TypeError,
DataFrame({'a': [1, 2, complex(4, -5)]}).to_json,
default_handler=my_handler_raises)
def test_categorical(self):
# GH4377 df.to_json segfaults with non-ndarray blocks
df = DataFrame({"A": ["a", "b", "c", "a", "b", "b", "a"]})
df["B"] = df["A"]
expected = df.to_json()
df["B"] = df["A"].astype('category')
assert expected == df.to_json()
s = df["A"]
sc = df["B"]
assert s.to_json() == sc.to_json()
def test_datetime_tz(self):
# GH4377 df.to_json segfaults with non-ndarray blocks
tz_range = pd.date_range('20130101', periods=3, tz='US/Eastern')
tz_naive = tz_range.tz_convert('utc').tz_localize(None)
df = DataFrame({
'A': tz_range,
'B': pd.date_range('20130101', periods=3)})
df_naive = df.copy()
df_naive['A'] = tz_naive
expected = df_naive.to_json()
assert expected == df.to_json()
stz = Series(tz_range)
s_naive = Series(tz_naive)
assert stz.to_json() == s_naive.to_json()
def test_sparse(self):
# GH4377 df.to_json segfaults with non-ndarray blocks
df = pd.DataFrame(np.random.randn(10, 4))
df.loc[:8] = np.nan
sdf = df.to_sparse()
expected = df.to_json()
assert expected == sdf.to_json()
s = pd.Series(np.random.randn(10))
s.loc[:8] = np.nan
ss = s.to_sparse()
expected = s.to_json()
assert expected == ss.to_json()
def test_tz_is_utc(self):
from pandas.io.json import dumps
exp = '"2013-01-10T05:00:00.000Z"'
ts = Timestamp('2013-01-10 05:00:00Z')
assert dumps(ts, iso_dates=True) == exp
dt = ts.to_pydatetime()
assert dumps(dt, iso_dates=True) == exp
ts = Timestamp('2013-01-10 00:00:00', tz='US/Eastern')
assert dumps(ts, iso_dates=True) == exp
dt = ts.to_pydatetime()
assert dumps(dt, iso_dates=True) == exp
ts = Timestamp('2013-01-10 00:00:00-0500')
assert dumps(ts, iso_dates=True) == exp
dt = ts.to_pydatetime()
assert dumps(dt, iso_dates=True) == exp
def test_tz_range_is_utc(self):
from pandas.io.json import dumps
exp = '["2013-01-01T05:00:00.000Z","2013-01-02T05:00:00.000Z"]'
dfexp = ('{"DT":{'
'"0":"2013-01-01T05:00:00.000Z",'
'"1":"2013-01-02T05:00:00.000Z"}}')
tz_range = pd.date_range('2013-01-01 05:00:00Z', periods=2)
assert dumps(tz_range, iso_dates=True) == exp
dti = pd.DatetimeIndex(tz_range)
assert dumps(dti, iso_dates=True) == exp
df = DataFrame({'DT': dti})
assert dumps(df, iso_dates=True) == dfexp
tz_range = pd.date_range('2013-01-01 00:00:00', periods=2,
tz='US/Eastern')
assert dumps(tz_range, iso_dates=True) == exp
dti = pd.DatetimeIndex(tz_range)
assert dumps(dti, iso_dates=True) == exp
df = DataFrame({'DT': dti})
assert dumps(df, iso_dates=True) == dfexp
tz_range = pd.date_range('2013-01-01 00:00:00-0500', periods=2)
assert dumps(tz_range, iso_dates=True) == exp
dti = pd.DatetimeIndex(tz_range)
assert dumps(dti, iso_dates=True) == exp
df = DataFrame({'DT': dti})
assert dumps(df, iso_dates=True) == dfexp
def test_read_inline_jsonl(self):
# GH9180
result = read_json('{"a": 1, "b": 2}\n{"b":2, "a" :1}\n', lines=True)
expected = DataFrame([[1, 2], [1, 2]], columns=['a', 'b'])
assert_frame_equal(result, expected)
def test_read_s3_jsonl(self, s3_resource):
# GH17200
result = read_json('s3n://pandas-test/items.jsonl', lines=True)
expected = DataFrame([[1, 2], [1, 2]], columns=['a', 'b'])
assert_frame_equal(result, expected)
def test_read_local_jsonl(self):
# GH17200
with ensure_clean('tmp_items.json') as path:
with open(path, 'w') as infile:
infile.write('{"a": 1, "b": 2}\n{"b":2, "a" :1}\n')
result = read_json(path, lines=True)
expected = DataFrame([[1, 2], [1, 2]], columns=['a', 'b'])
assert_frame_equal(result, expected)
def test_read_jsonl_unicode_chars(self):
# GH15132: non-ascii unicode characters
# \u201d == RIGHT DOUBLE QUOTATION MARK
# simulate file handle
json = '{"a": "foo”", "b": "bar"}\n{"a": "foo", "b": "bar"}\n'
json = StringIO(json)
result = read_json(json, lines=True)
expected = DataFrame([[u"foo\u201d", "bar"], ["foo", "bar"]],
columns=['a', 'b'])
assert_frame_equal(result, expected)
# simulate string
json = '{"a": "foo”", "b": "bar"}\n{"a": "foo", "b": "bar"}\n'
result = read_json(json, lines=True)
expected = DataFrame([[u"foo\u201d", "bar"], ["foo", "bar"]],
columns=['a', 'b'])
assert_frame_equal(result, expected)
def test_read_json_large_numbers(self):
# GH18842
json = '{"articleId": "1404366058080022500245"}'
json = StringIO(json)
result = read_json(json, typ="series")
expected = Series(1.404366e+21, index=['articleId'])
assert_series_equal(result, expected)
json = '{"0": {"articleId": "1404366058080022500245"}}'
json = StringIO(json)
result = read_json(json)
expected = DataFrame(1.404366e+21, index=['articleId'], columns=[0])
assert_frame_equal(result, expected)
def test_to_jsonl(self):
# GH9180
df = DataFrame([[1, 2], [1, 2]], columns=['a', 'b'])
result = df.to_json(orient="records", lines=True)
expected = '{"a":1,"b":2}\n{"a":1,"b":2}'
assert result == expected
df = DataFrame([["foo}", "bar"], ['foo"', "bar"]], columns=['a', 'b'])
result = df.to_json(orient="records", lines=True)
expected = '{"a":"foo}","b":"bar"}\n{"a":"foo\\"","b":"bar"}'
assert result == expected
assert_frame_equal(pd.read_json(result, lines=True), df)
# GH15096: escaped characters in columns and data
df = DataFrame([["foo\\", "bar"], ['foo"', "bar"]],
columns=["a\\", 'b'])
result = df.to_json(orient="records", lines=True)
expected = ('{"a\\\\":"foo\\\\","b":"bar"}\n'
'{"a\\\\":"foo\\"","b":"bar"}')
assert result == expected
assert_frame_equal(pd.read_json(result, lines=True), df)
def test_latin_encoding(self):
if compat.PY2:
tm.assert_raises_regex(
TypeError, r'\[unicode\] is not implemented as a table column')
return
# GH 13774
pytest.skip("encoding not implemented in .to_json(), "
"xref #13774")
values = [[b'E\xc9, 17', b'', b'a', b'b', b'c'],
[b'E\xc9, 17', b'a', b'b', b'c'],
[b'EE, 17', b'', b'a', b'b', b'c'],
[b'E\xc9, 17', b'\xf8\xfc', b'a', b'b', b'c'],
[b'', b'a', b'b', b'c'],
[b'\xf8\xfc', b'a', b'b', b'c'],
[b'A\xf8\xfc', b'', b'a', b'b', b'c'],
[np.nan, b'', b'b', b'c'],
[b'A\xf8\xfc', np.nan, b'', b'b', b'c']]
def _try_decode(x, encoding='latin-1'):
try:
return x.decode(encoding)
except AttributeError:
return x
# not sure how to remove latin-1 from code in python 2 and 3
values = [[_try_decode(x) for x in y] for y in values]
examples = []
for dtype in ['category', object]:
for val in values:
examples.append(Series(val, dtype=dtype))
def roundtrip(s, encoding='latin-1'):
with ensure_clean('test.json') as path:
s.to_json(path, encoding=encoding)
retr = read_json(path, encoding=encoding)
assert_series_equal(s, retr, check_categorical=False)
for s in examples:
roundtrip(s)
def test_data_frame_size_after_to_json(self):
# GH15344
df = DataFrame({'a': [str(1)]})
size_before = df.memory_usage(index=True, deep=True).sum()
df.to_json()
size_after = df.memory_usage(index=True, deep=True).sum()
assert size_before == size_after
@pytest.mark.parametrize('data, expected', [
(DataFrame([[1, 2], [4, 5]], columns=['a', 'b']),
{'columns': ['a', 'b'], 'data': [[1, 2], [4, 5]]}),
(DataFrame([[1, 2], [4, 5]], columns=['a', 'b']).rename_axis('foo'),
{'columns': ['a', 'b'], 'data': [[1, 2], [4, 5]]}),
(DataFrame([[1, 2], [4, 5]], columns=['a', 'b'],
index=[['a', 'b'], ['c', 'd']]),
{'columns': ['a', 'b'], 'data': [[1, 2], [4, 5]]}),
(Series([1, 2, 3], name='A'),
{'name': 'A', 'data': [1, 2, 3]}),
(Series([1, 2, 3], name='A').rename_axis('foo'),
{'name': 'A', 'data': [1, 2, 3]}),
(Series([1, 2], name='A', index=[['a', 'b'], ['c', 'd']]),
{'name': 'A', 'data': [1, 2]}),
])
def test_index_false_to_json_split(self, data, expected):
# GH 17394
# Testing index=False in to_json with orient='split'
result = data.to_json(orient='split', index=False)
result = json.loads(result)
assert result == expected
@pytest.mark.parametrize('data', [
(DataFrame([[1, 2], [4, 5]], columns=['a', 'b'])),
(DataFrame([[1, 2], [4, 5]], columns=['a', 'b']).rename_axis('foo')),
(DataFrame([[1, 2], [4, 5]], columns=['a', 'b'],
index=[['a', 'b'], ['c', 'd']])),
(Series([1, 2, 3], name='A')),
(Series([1, 2, 3], name='A').rename_axis('foo')),
(Series([1, 2], name='A', index=[['a', 'b'], ['c', 'd']])),
])
def test_index_false_to_json_table(self, data):
# GH 17394
# Testing index=False in to_json with orient='table'
result = data.to_json(orient='table', index=False)
result = json.loads(result)
expected = {
'schema': pd.io.json.build_table_schema(data, index=False),
'data': DataFrame(data).to_dict(orient='records')
}
assert result == expected
@pytest.mark.parametrize('orient', [
'records', 'index', 'columns', 'values'
])
def test_index_false_error_to_json(self, orient):
# GH 17394
# Testing error message from to_json with index=False
df = pd.DataFrame([[1, 2], [4, 5]], columns=['a', 'b'])
with tm.assert_raises_regex(ValueError, "'index=False' is only "
"valid when 'orient' is "
"'split' or 'table'"):
df.to_json(orient=orient, index=False)
| bsd-3-clause |
cybercomgroup/Big_Data | Cloudera/Code/million_song_dataset/Spark_scripts/spark_visualisehottnessbyartist.py | 1 | 1183 | from pyspark import SparkConf
from pyspark import SparkContext
import sys
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
import random
#To run: PYSPARK_PYTHON=/opt/cloudera/parcels/Anaconda/bin/python spark-submit spark_visualisehottnessbyartist.py /user/cloudera/song/song_final.csv
def rddToPand(RDD):
header = "temp"
first = True
data = []
# Convert unicode to ascii
for x in RDD.collect():
if first:
first = False
header = x.encode("ascii").split(',')
else:
data.append(tuple(x.encode("ascii").split(',')))
return pd.DataFrame.from_records(data, columns = header)
def test(row):
for x in range(0, row.count()):
if x!=3 and x!=5:
row[x]=''
return row
# Init Spark
conf = SparkConf()
conf.setMaster('yarn-client')
conf.setAppName('artisthotness-job')
sc = SparkContext(conf=conf)
rdd = sc.textFile(str(sys.argv[1]))
mapped = rdd.map(lambda line: line.split(',')).map(lambda line: row[3])
mapped2 = rdd.map(lambda line: line.split(',')).map(lambda line: row[5])
maps = mapped.join(mapped2)
df = rddToPand(mapped)
file = open('visualise.txt', 'w')
file.write(str(mapped2.take(10)))
file.close()
| gpl-3.0 |
jangorecki/h2o-3 | h2o-py/dynamic_tests/testdir_algos/glm/pyunit_glm_binomial_large.py | 4 | 116214 | from __future__ import print_function
import sys
sys.path.insert(1, "../../../")
import random
import os
import math
import numpy as np
import h2o
import time
from builtins import range
from tests import pyunit_utils
from h2o.estimators.glm import H2OGeneralizedLinearEstimator
from h2o.grid.grid_search import H2OGridSearch
from scipy import stats
from sklearn.linear_model import LogisticRegression
from sklearn import metrics
class TestGLMBinomial:
"""
This class is created to test the GLM algo with Binomial family. In this case, the relationship
between the response Y and predictor vector X is assumed to be
Prob(Y = 1|X) = exp(W^T * X + E)/(1+exp(W^T * X + E)) where E is unknown Gaussian noise. We
generate random data set using the exact formula. To evaluate the H2O GLM Model, we run the sklearn
logistic regression with the same data sets and compare the performance of the two. If they are close
enough within a certain tolerance, we declare the H2O model working. When regularization and other
parameters are enabled, we can evaluate H2O GLM model performance by comparing the logloss/accuracy
from H2O model and to the H2O model generated without regularization. As long as they do not deviate
too much, we consider the H2O model performance satisfactory.
In particular, I have written 8 tests in the hope to exercise as many parameters settings of the GLM
algo with Binomial distribution as possible. Tomas has requested 2 tests to be added to test his new
feature of missing_values_handling with predictors with both categorical/real columns. Here is a list
of all tests descriptions:
test1_glm_no_regularization(): sklearn logistic regression model is built.
H2O GLM is built for Binomial family with the same random data sets. We observe
the weights, confusion matrices from the two models. We compare the logloss, prediction
accuracy from the two models to determine if H2O GLM model shall pass the test.
test2_glm_lambda_search(): test lambda search with alpha set to 0.5 per Tomas's
suggestion. Make sure logloss and prediction accuracy generated here is comparable in
value to H2O GLM with no regularization.
test3_glm_grid_search_over_params(): test grid search over
various alpha values while lambda is set to be the best value obtained
from test 2. Cross validation with k=5 and random assignment is enabled
as well. The best model performance hopefully will generate logloss and
prediction accuracies close to H2O with no regularization in test 1.
test4_glm_remove_collinear_columns(): test parameter remove_collinear_columns=True
with lambda set to best lambda from test 2, alpha set to best alpha from Gridsearch
and solver set to the one which generate the smallest validation logloss. The same dataset
is used here except that we randomly choose predictor columns to repeat and scale.
Make sure logloss and prediction accuracies generated here is comparable in value
to H2O GLM model with no regularization.
test5_missing_values(): Test parameter missing_values_handling="MeanImputation" with
only real value predictors. The same data sets as before is used. However, we
go into the predictor matrix and randomly decide to replace a value with
nan and create missing values. Sklearn logistic regression model is built using the
data set where we have imputed the missing values. This Sklearn model will be used to
compare our H2O models with.
test6_enum_missing_values(): Test parameter missing_values_handling="MeanImputation" with
mixed predictors (categorical/real value columns). We first generate a data set that
contains a random number of columns of categorical and real value columns. Next, we
encode the categorical columns. Then, we generate the random data set using the formula
as before. Next, we go into the predictor matrix and randomly
decide to change a value to be nan and create missing values. Again, we build a Sklearn
logistic regression model and compare our H2O model with it.
test7_missing_enum_values_lambda_search(): Test parameter
missing_values_handling="MeanImputation" with mixed predictors (categorical/real value columns).
Test parameter missing_values_handling="MeanImputation" with
mixed predictors (categorical/real value columns) and setting lambda search to be True.
We use the same prediction data with missing values from test6. Next, we encode the categorical columns using
true one hot encoding since Lambda-search will be enabled with alpha set to 0.5. Since the encoding
is different in this case from test6, we will build a brand new Sklearn logistic regression model and
compare the best H2O model logloss/prediction accuracy with it.
"""
# parameters set by users, change with care
max_col_count = 50 # set maximum values of train/test row and column counts
max_col_count_ratio = 500 # set max row count to be multiples of col_count to avoid overfitting
min_col_count_ratio = 100 # set min row count to be multiples of col_count to avoid overfitting
###### for debugging
# max_col_count = 5 # set maximum values of train/test row and column counts
# max_col_count_ratio = 50 # set max row count to be multiples of col_count to avoid overfitting
# min_col_count_ratio = 10
max_p_value = 2 # set maximum predictor value
min_p_value = -2 # set minimum predictor value
max_w_value = 2 # set maximum weight value
min_w_value = -2 # set minimum weight value
enum_levels = 5 # maximum number of levels for categorical variables not counting NAs
class_method = 'probability' # can be 'probability' or 'threshold', control how discrete response is generated
test_class_method = 'probability' # for test data set
margin = 0.0 # only used when class_method = 'threshold'
test_class_margin = 0.2 # for test data set
family = 'binomial' # this test is for Binomial GLM
curr_time = str(round(time.time()))
# parameters denoting filenames of interested that store training/validation/test data sets
training_filename = family+"_"+curr_time+"_training_set.csv"
training_filename_duplicate = family+"_"+curr_time+"_training_set_duplicate.csv"
training_filename_nans = family+"_"+curr_time+"_training_set_NA.csv"
training_filename_enum = family+"_"+curr_time+"_training_set_enum.csv"
training_filename_enum_true_one_hot = family+"_"+curr_time+"_training_set_enum_trueOneHot.csv"
training_filename_enum_nans = family+"_"+curr_time+"_training_set_enum_NAs.csv"
training_filename_enum_nans_true_one_hot = family+"_"+curr_time+"_training_set_enum_NAs_trueOneHot.csv"
validation_filename = family+"_"+curr_time+"_validation_set.csv"
validation_filename_enum = family+"_"+curr_time+"_validation_set_enum.csv"
validation_filename_enum_true_one_hot = family+"_"+curr_time+"_validation_set_enum_trueOneHot.csv"
validation_filename_enum_nans = family+"_"+curr_time+"_validation_set_enum_NAs.csv"
validation_filename_enum_nans_true_one_hot = family+"_"+curr_time+"_validation_set_enum_NAs_trueOneHot.csv"
test_filename = family+"_"+curr_time+"_test_set.csv"
test_filename_duplicate = family+"_"+curr_time+"_test_set_duplicate.csv"
test_filename_nans = family+"_"+curr_time+"_test_set_NA.csv"
test_filename_enum = family+"_"+curr_time+"_test_set_enum.csv"
test_filename_enum_true_one_hot = family+"_"+curr_time+"_test_set_enum_trueOneHot.csv"
test_filename_enum_nans = family+"_"+curr_time+"_test_set_enum_NAs.csv"
test_filename_enum_nans_true_one_hot = family+"_"+curr_time+"_test_set_enum_NAs_trueOneHot.csv"
weight_filename = family+"_"+curr_time+"_weight.csv"
weight_filename_enum = family+"_"+curr_time+"_weight_enum.csv"
total_test_number = 7 # total number of tests being run for GLM Binomial family
ignored_eps = 1e-15 # if p-values < than this value, no comparison is performed, only for Gaussian
allowed_diff = 0.1 # tolerance of comparison for logloss/prediction accuracy, okay to be loose. Condition
# to run the codes are different
duplicate_col_counts = 5 # maximum number of times to duplicate a column
duplicate_threshold = 0.2 # for each column, a coin is tossed to see if we duplicate that column or not
duplicate_max_scale = 2 # maximum scale factor for duplicated columns
nan_fraction = 0.2 # denote maximum fraction of NA's to be inserted into a column
# System parameters, do not change. Dire consequences may follow if you do
current_dir = os.path.dirname(os.path.realpath(sys.argv[1])) # directory of this test file
enum_col = 0 # set maximum number of categorical columns in predictor
enum_level_vec = [] # vector containing number of levels for each categorical column
noise_std = 0 # noise variance in Binomial noise generation added to response
train_row_count = 0 # training data row count, randomly generated later
train_col_count = 0 # training data column count, randomly generated later
class_number = 2 # actual number of classes existed in data set, randomly generated later
data_type = 2 # determine data type of data set and weight, 1: integers, 2: real
# parameters denoting filenames with absolute paths
training_data_file = os.path.join(current_dir, training_filename)
training_data_file_duplicate = os.path.join(current_dir, training_filename_duplicate)
training_data_file_nans = os.path.join(current_dir, training_filename_nans)
training_data_file_enum = os.path.join(current_dir, training_filename_enum)
training_data_file_enum_true_one_hot = os.path.join(current_dir, training_filename_enum_true_one_hot)
training_data_file_enum_nans = os.path.join(current_dir, training_filename_enum_nans)
training_data_file_enum_nans_true_one_hot = os.path.join(current_dir, training_filename_enum_nans_true_one_hot)
validation_data_file = os.path.join(current_dir, validation_filename)
validation_data_file_enum = os.path.join(current_dir, validation_filename_enum)
validation_data_file_enum_true_one_hot = os.path.join(current_dir, validation_filename_enum_true_one_hot)
validation_data_file_enum_nans = os.path.join(current_dir, validation_filename_enum_nans)
validation_data_file_enum_nans_true_one_hot = os.path.join(current_dir, validation_filename_enum_nans_true_one_hot)
test_data_file = os.path.join(current_dir, test_filename)
test_data_file_duplicate = os.path.join(current_dir, test_filename_duplicate)
test_data_file_nans = os.path.join(current_dir, test_filename_nans)
test_data_file_enum = os.path.join(current_dir, test_filename_enum)
test_data_file_enum_true_one_hot = os.path.join(current_dir, test_filename_enum_true_one_hot)
test_data_file_enum_nans = os.path.join(current_dir, test_filename_enum_nans)
test_data_file_enum_nans_true_one_hot = os.path.join(current_dir, test_filename_enum_nans_true_one_hot)
weight_data_file = os.path.join(current_dir, weight_filename)
weight_data_file_enum = os.path.join(current_dir, weight_filename_enum)
# store template model performance values for later comparison
test1_model = None # store template model for later comparison
test1_model_metrics = None # store template model test metrics for later comparison
best_lambda = 0.0 # store best lambda obtained using lambda search
test_name = "pyunit_glm_binomial.py" # name of this test
sandbox_dir = "" # sandbox directory where we are going to save our failed test data sets
# store information about training data set, validation and test data sets that are used
# by many tests. We do not want to keep loading them for each set in the hope of
# saving time. Trading off memory and speed here.
x_indices = [] # store predictor indices in the data set
y_index = [] # store response index in the data set
training_data = [] # store training data set
test_data = [] # store test data set
valid_data = [] # store validation data set
training_data_grid = [] # store combined training and validation data set for cross validation
best_alpha = 0.5 # store best alpha value found
best_grid_logloss = -1 # store lowest MSE found from grid search
test_failed_array = [0]*total_test_number # denote test results for all tests run. 1 error, 0 pass
test_num = 0 # index representing which test is being run
duplicate_col_indices = [] # denote column indices when column duplication is applied
duplicate_col_scales = [] # store scaling factor for all columns when duplication is applied
noise_var = noise_std*noise_std # Binomial noise variance
test_failed = 0 # count total number of tests that have failed
sklearn_class_weight = {} # used to make sure Sklearn will know the correct number of classes
def __init__(self):
self.setup()
def setup(self):
"""
This function performs all initializations necessary:
1. generates all the random values for our dynamic tests like the Binomial
noise std, column count and row count for training data set;
2. generate the training/validation/test data sets with only real values;
3. insert missing values into training/valid/test data sets.
4. taken the training/valid/test data sets, duplicate random certain columns,
each duplicated column is repeated for a random number of times and randomly scaled;
5. generate the training/validation/test data sets with predictors containing enum
and real values as well***.
6. insert missing values into the training/validation/test data sets with predictors
containing enum and real values as well
*** according to Tomas, when working with mixed predictors (contains both enum/real
value columns), the encoding used is different when regularization is enabled or disabled.
When regularization is enabled, true one hot encoding is enabled to encode the enum
values to binary bits. When regularization is disabled, a reference level plus one hot encoding
is enabled when encoding the enum values to binary bits. One data set is generated
when we work with mixed predictors.
"""
# clean out the sandbox directory first
self.sandbox_dir = pyunit_utils.make_Rsandbox_dir(self.current_dir, self.test_name, True)
# randomly set Binomial noise standard deviation as a fraction of actual predictor standard deviation
self.noise_std = random.uniform(0, math.sqrt(pow((self.max_p_value - self.min_p_value), 2) / 12))
self.noise_var = self.noise_std*self.noise_std
# randomly determine data set size in terms of column and row counts
self.train_col_count = random.randint(3, self.max_col_count) # account for enum columns later
self.train_row_count = int(round(self.train_col_count*random.uniform(self.min_col_count_ratio,
self.max_col_count_ratio)))
# # DEBUGGING setup_data, remember to comment them out once done.
# self.train_col_count = 3
# self.train_row_count = 500
# end DEBUGGING
# randomly set number of enum and real columns in the data set
self.enum_col = random.randint(1, self.train_col_count-1)
# randomly set number of levels for each categorical column
self.enum_level_vec = np.random.random_integers(2, self.enum_levels-1, [self.enum_col, 1])
# generate real value weight vector and training/validation/test data sets for GLM
pyunit_utils.write_syn_floating_point_dataset_glm(self.training_data_file,
self.validation_data_file,
self.test_data_file, self.weight_data_file,
self.train_row_count, self.train_col_count, self.data_type,
self.max_p_value, self.min_p_value, self.max_w_value,
self.min_w_value, self.noise_std, self.family,
self.train_row_count, self.train_row_count,
class_number=self.class_number,
class_method=[self.class_method, self.class_method,
self.test_class_method],
class_margin=[self.margin, self.margin,
self.test_class_margin])
# randomly generate the duplicated and scaled columns
(self.duplicate_col_indices, self.duplicate_col_scales) = \
pyunit_utils.random_col_duplication(self.train_col_count, self.duplicate_threshold,
self.duplicate_col_counts, True, self.duplicate_max_scale)
# apply the duplication and scaling to training and test set
# need to add the response column to the end of duplicated column indices and scale
dup_col_indices = self.duplicate_col_indices
dup_col_indices.append(self.train_col_count)
dup_col_scale = self.duplicate_col_scales
dup_col_scale.append(1.0)
# print out duplication information for easy debugging
print("duplication column and duplication scales are: ")
print(dup_col_indices)
print(dup_col_scale)
# print out duplication information for easy debugging
print("duplication column and duplication scales are: ")
print(dup_col_indices)
print(dup_col_scale)
pyunit_utils.duplicate_scale_cols(dup_col_indices, dup_col_scale, self.training_data_file,
self.training_data_file_duplicate)
pyunit_utils.duplicate_scale_cols(dup_col_indices, dup_col_scale, self.test_data_file,
self.test_data_file_duplicate)
# insert NAs into training/test data sets
pyunit_utils.insert_nan_in_data(self.training_data_file, self.training_data_file_nans, self.nan_fraction)
pyunit_utils.insert_nan_in_data(self.test_data_file, self.test_data_file_nans, self.nan_fraction)
# generate data sets with enum as well as real values
pyunit_utils.write_syn_mixed_dataset_glm(self.training_data_file_enum,
self.training_data_file_enum_true_one_hot,
self.validation_data_file_enum,
self.validation_data_file_enum_true_one_hot,
self.test_data_file_enum, self.test_data_file_enum_true_one_hot,
self.weight_data_file_enum, self.train_row_count, self.train_col_count,
self.max_p_value, self.min_p_value, self.max_w_value, self.min_w_value,
self.noise_std, self.family, self.train_row_count,
self.train_row_count, self.enum_col, self.enum_level_vec,
class_number=self.class_number,
class_method=[self.class_method,
self.class_method,
self.test_class_method],
class_margin=[self.margin, self.margin, self.test_class_margin])
# insert NAs into data set with categorical columns
pyunit_utils.insert_nan_in_data(self.training_data_file_enum, self.training_data_file_enum_nans,
self.nan_fraction)
pyunit_utils.insert_nan_in_data(self.validation_data_file_enum, self.validation_data_file_enum_nans,
self.nan_fraction)
pyunit_utils.insert_nan_in_data(self.test_data_file_enum, self.test_data_file_enum_nans,
self.nan_fraction)
pyunit_utils.insert_nan_in_data(self.training_data_file_enum_true_one_hot,
self.training_data_file_enum_nans_true_one_hot, self.nan_fraction)
pyunit_utils.insert_nan_in_data(self.validation_data_file_enum_true_one_hot,
self.validation_data_file_enum_nans_true_one_hot, self.nan_fraction)
pyunit_utils.insert_nan_in_data(self.test_data_file_enum_true_one_hot,
self.test_data_file_enum_nans_true_one_hot,
self.nan_fraction)
# only preload data sets that will be used for multiple tests and change the response to enums
self.training_data = h2o.import_file(pyunit_utils.locate(self.training_data_file))
# set indices for response and predictor columns in data set for H2O GLM model to use
self.y_index = self.training_data.ncol-1
self.x_indices = list(range(self.y_index))
# added the round() so that this will work on win8.
self.training_data[self.y_index] = self.training_data[self.y_index].round().asfactor()
# check to make sure all response classes are represented, otherwise, quit
if self.training_data[self.y_index].nlevels()[0] < self.class_number:
print("Response classes are not represented in training dataset.")
sys.exit(0)
self.valid_data = h2o.import_file(pyunit_utils.locate(self.validation_data_file))
self.valid_data[self.y_index] = self.valid_data[self.y_index].round().asfactor()
self.test_data = h2o.import_file(pyunit_utils.locate(self.test_data_file))
self.test_data[self.y_index] = self.test_data[self.y_index].round().asfactor()
# make a bigger training set for grid search by combining data from validation data set
self.training_data_grid = self.training_data.rbind(self.valid_data)
# setup_data sklearn class weight of all ones. Used only to make sure sklearn know the correct number of classes
for ind in range(self.class_number):
self.sklearn_class_weight[ind] = 1.0
# save the training data files just in case the code crashed.
pyunit_utils.remove_csv_files(self.current_dir, ".csv", action='copy', new_dir_path=self.sandbox_dir)
def teardown(self):
"""
This function performs teardown after the dynamic test is completed. If all tests
passed, it will delete all data sets generated since they can be quite large. It
will move the training/validation/test data sets into a Rsandbox directory so that
we can re-run the failed test.
"""
remove_files = []
# create Rsandbox directory to keep data sets and weight information
self.sandbox_dir = pyunit_utils.make_Rsandbox_dir(self.current_dir, self.test_name, True)
# Do not want to save all data sets. Only save data sets that are needed for failed tests
if sum(self.test_failed_array[0:4]):
pyunit_utils.move_files(self.sandbox_dir, self.training_data_file, self.training_filename)
pyunit_utils.move_files(self.sandbox_dir, self.validation_data_file, self.validation_filename)
pyunit_utils.move_files(self.sandbox_dir, self.test_data_file, self.test_filename)
else: # remove those files instead of moving them
remove_files.append(self.training_data_file)
remove_files.append(self.validation_data_file)
remove_files.append(self.test_data_file)
if sum(self.test_failed_array[0:6]):
pyunit_utils.move_files(self.sandbox_dir, self.weight_data_file, self.weight_filename)
else:
remove_files.append(self.weight_data_file)
if self.test_failed_array[3]:
pyunit_utils.move_files(self.sandbox_dir, self.training_data_file, self.training_filename)
pyunit_utils.move_files(self.sandbox_dir, self.test_data_file, self.test_filename)
pyunit_utils.move_files(self.sandbox_dir, self.test_data_file_duplicate, self.test_filename_duplicate)
pyunit_utils.move_files(self.sandbox_dir, self.training_data_file_duplicate,
self.training_filename_duplicate)
else:
remove_files.append(self.training_data_file_duplicate)
remove_files.append(self.test_data_file_duplicate)
if self.test_failed_array[4]:
pyunit_utils.move_files(self.sandbox_dir, self.training_data_file, self.training_filename)
pyunit_utils.move_files(self.sandbox_dir, self.test_data_file, self.test_filename)
pyunit_utils.move_files(self.sandbox_dir, self.training_data_file_nans, self.training_filename_nans)
pyunit_utils.move_files(self.sandbox_dir, self.test_data_file_nans, self.test_filename_nans)
else:
remove_files.append(self.training_data_file_nans)
remove_files.append(self.test_data_file_nans)
if self.test_failed_array[5]:
pyunit_utils.move_files(self.sandbox_dir, self.training_data_file_enum_nans,
self.training_filename_enum_nans)
pyunit_utils.move_files(self.sandbox_dir, self.test_data_file_enum_nans, self.test_filename_enum_nans)
pyunit_utils.move_files(self.sandbox_dir, self.weight_data_file_enum, self.weight_filename_enum)
else:
remove_files.append(self.training_data_file_enum_nans)
remove_files.append(self.training_data_file_enum)
remove_files.append(self.test_data_file_enum_nans)
remove_files.append(self.test_data_file_enum)
remove_files.append(self.validation_data_file_enum_nans)
remove_files.append(self.validation_data_file_enum)
remove_files.append(self.weight_data_file_enum)
if self.test_failed_array[6]:
pyunit_utils.move_files(self.sandbox_dir, self.training_data_file_enum_nans_true_one_hot,
self.training_filename_enum_nans_true_one_hot)
pyunit_utils.move_files(self.sandbox_dir, self.validation_data_file_enum_nans_true_one_hot,
self.validation_filename_enum_nans_true_one_hot)
pyunit_utils.move_files(self.sandbox_dir, self.test_data_file_enum_nans_true_one_hot,
self.test_filename_enum_nans_true_one_hot)
pyunit_utils.move_files(self.sandbox_dir, self.weight_data_file_enum, self.weight_filename_enum)
else:
remove_files.append(self.training_data_file_enum_nans_true_one_hot)
remove_files.append(self.training_data_file_enum_true_one_hot)
remove_files.append(self.validation_data_file_enum_nans_true_one_hot)
remove_files.append(self.validation_data_file_enum_true_one_hot)
remove_files.append(self.test_data_file_enum_nans_true_one_hot)
remove_files.append(self.test_data_file_enum_true_one_hot)
if not(self.test_failed): # all tests have passed. Delete sandbox if if was not wiped before
pyunit_utils.make_Rsandbox_dir(self.current_dir, self.test_name, False)
# remove any csv files left in test directory, do not remove them, shared computing resources
if len(remove_files) > 0:
for file in remove_files:
pyunit_utils.remove_files(file)
def test1_glm_no_regularization(self):
"""
In this test, a sklearn logistic regression model and a H2O GLM are built for Binomial family with the same
random data sets. We observe the weights, confusion matrices from the two models. We compare the logloss,
prediction accuracy from the two models to determine if H2O GLM model shall pass the test.
"""
print("*******************************************************************************************")
print("Test1: build H2O GLM with Binomial with no regularization.")
h2o.cluster_info()
# training result from python Sklearn logistic regression model
(p_weights, p_logloss_train, p_cm_train, p_accuracy_training, p_logloss_test, p_cm_test, p_accuracy_test) = \
self.sklearn_binomial_result(self.training_data_file, self.test_data_file, False, False)
# build our H2O model
self.test1_model = H2OGeneralizedLinearEstimator(family=self.family, Lambda=0)
self.test1_model.train(x=self.x_indices, y=self.y_index, training_frame=self.training_data)
# calculate test metrics
self.test1_model_metrics = self.test1_model.model_performance(test_data=self.test_data)
num_test_failed = self.test_failed # used to determine if the current test has failed
# print out comparison results for weight/logloss/prediction accuracy
self.test_failed = \
pyunit_utils.extract_comparison_attributes_and_print_multinomial(self.test1_model,
self.test1_model_metrics,
self.family, "\nTest1 Done!",
compare_att_str=[
"\nComparing intercept and "
"weights ....",
"\nComparing logloss from training "
"dataset ....",
"\nComparing logloss from"
" test dataset ....",
"\nComparing confusion matrices from "
"training dataset ....",
"\nComparing confusion matrices from "
"test dataset ...",
"\nComparing accuracy from training "
"dataset ....",
"\nComparing accuracy from test "
"dataset ...."],
h2o_att_str=[
"H2O intercept and weights: \n",
"H2O logloss from training dataset: ",
"H2O logloss from test dataset",
"H2O confusion matrix from training "
"dataset: \n",
"H2O confusion matrix from test"
" dataset: \n",
"H2O accuracy from training dataset: ",
"H2O accuracy from test dataset: "],
template_att_str=[
"Sklearn intercept and weights: \n",
"Sklearn logloss from training "
"dataset: ",
"Sklearn logloss from test dataset: ",
"Sklearn confusion matrix from"
" training dataset: \n",
"Sklearn confusion matrix from test "
"dataset: \n",
"Sklearn accuracy from training "
"dataset: ",
"Sklearn accuracy from test "
"dataset: "],
att_str_fail=[
"Intercept and weights are not equal!",
"Logloss from training dataset differ "
"too much!",
"Logloss from test dataset differ too "
"much!", "", "",
"Accuracies from training dataset "
"differ too much!",
"Accuracies from test dataset differ "
"too much!"],
att_str_success=[
"Intercept and weights are close"
" enough!",
"Logloss from training dataset are "
"close enough!",
"Logloss from test dataset are close "
"enough!", "", "",
"Accuracies from training dataset are "
"close enough!",
"Accuracies from test dataset are"
" close enough!"],
can_be_better_than_template=[
True, True, True, True, True,
True, True],
just_print=[True, True, True, True, True,
True, False],
failed_test_number=self.test_failed,
template_params=[
p_weights, p_logloss_train, p_cm_train,
p_accuracy_training, p_logloss_test,
p_cm_test, p_accuracy_test],
ignored_eps=self.ignored_eps,
allowed_diff=self.allowed_diff)
# print out test results and update test_failed_array status to reflect if this test has failed
self.test_failed_array[self.test_num] += pyunit_utils.show_test_results("test1_glm_no_regularization",
num_test_failed, self.test_failed)
self.test_num += 1 # update test index
def test2_glm_lambda_search(self):
"""
This test is used to test the lambda search. Recall that lambda search enables efficient and
automatic search for the optimal value of the lambda parameter. When lambda search is enabled,
GLM will first fit a model with maximum regularization and then keep decreasing it until
over-fitting occurs. The resulting model is based on the best lambda value. According to Tomas,
set alpha = 0.5 and enable validation but not cross-validation.
"""
print("*******************************************************************************************")
print("Test2: tests the lambda search.")
h2o.cluster_info()
# generate H2O model with lambda search enabled
model_h2o_0p5 = H2OGeneralizedLinearEstimator(family=self.family, lambda_search=True, alpha=0.5,
lambda_min_ratio=1e-20)
model_h2o_0p5.train(x=self.x_indices, y=self.y_index, training_frame=self.training_data,
validation_frame=self.valid_data)
# get best lambda here
self.best_lambda = pyunit_utils.get_train_glm_params(model_h2o_0p5, 'best_lambda')
# get test performance here
h2o_model_0p5_test_metrics = model_h2o_0p5.model_performance(test_data=self.test_data)
num_test_failed = self.test_failed
# print out comparison results for our H2O GLM and test1 H2O model
self.test_failed = \
pyunit_utils.extract_comparison_attributes_and_print_multinomial(model_h2o_0p5, h2o_model_0p5_test_metrics,
self.family, "\nTest2 Done!",
test_model=self.test1_model,
test_model_metric=self.test1_model_metrics,
compare_att_str=[
"\nComparing intercept and"
" weights ....",
"\nComparing logloss from training "
"dataset ....",
"\nComparing logloss from test"
" dataset ....",
"\nComparing confusion matrices from "
"training dataset ....",
"\nComparing confusion matrices from "
"test dataset ...",
"\nComparing accuracy from training "
"dataset ....",
"\nComparing accuracy from test"
" dataset ...."],
h2o_att_str=[
"H2O lambda search intercept and "
"weights: \n",
"H2O lambda search logloss from"
" training dataset: ",
"H2O lambda search logloss from test "
"dataset",
"H2O lambda search confusion matrix "
"from training dataset: \n",
"H2O lambda search confusion matrix "
"from test dataset: \n",
"H2O lambda search accuracy from "
"training dataset: ",
"H2O lambda search accuracy from test"
" dataset: "],
template_att_str=[
"H2O test1 template intercept and"
" weights: \n",
"H2O test1 template logloss from "
"training dataset: ",
"H2O test1 template logloss from "
"test dataset: ",
"H2O test1 template confusion"
" matrix from training dataset: \n",
"H2O test1 template confusion"
" matrix from test dataset: \n",
"H2O test1 template accuracy from "
"training dataset: ",
"H2O test1 template accuracy from"
" test dataset: "],
att_str_fail=[
"Intercept and weights are not equal!",
"Logloss from training dataset differ "
"too much!",
"Logloss from test dataset differ too"
" much!", "", "",
"Accuracies from training dataset"
" differ too much!",
"Accuracies from test dataset differ"
" too much!"],
att_str_success=[
"Intercept and weights are close "
"enough!",
"Logloss from training dataset are"
" close enough!",
"Logloss from test dataset are close"
" enough!", "", "",
"Accuracies from training dataset are"
" close enough!",
"Accuracies from test dataset are"
" close enough!"],
can_be_better_than_template=[
True, False, True, True, True,
True, True],
just_print=[True, False, False, True, True,
True, False],
failed_test_number=self.test_failed,
ignored_eps=self.ignored_eps,
allowed_diff=self.allowed_diff)
# print out test results and update test_failed_array status to reflect if this test has failed
self.test_failed_array[self.test_num] += pyunit_utils.show_test_results("test2_glm_lambda_search",
num_test_failed, self.test_failed)
self.test_num += 1
def test3_glm_grid_search(self):
"""
This test is used to test GridSearch with the following parameters:
1. Lambda = best_lambda value from test2
2. alpha = [0 0.5 0.99]
3. cross-validation with k = 5, fold_assignment = "Random"
We will look at the best results from the grid search and compare it with H2O model built in test 1.
:return: None
"""
print("*******************************************************************************************")
print("Test3: explores various parameter settings in training the GLM using GridSearch using solver ")
h2o.cluster_info()
hyper_parameters = {'alpha': [0, 0.5, 0.99]} # set hyper_parameters for grid search
# train H2O GLM model with grid search
model_h2o_gridsearch = \
H2OGridSearch(H2OGeneralizedLinearEstimator(family=self.family, Lambda=self.best_lambda, nfolds=5,
fold_assignment='Random'), hyper_parameters)
model_h2o_gridsearch.train(x=self.x_indices, y=self.y_index, training_frame=self.training_data_grid)
# print out the model sequence ordered by the best validation logloss values, thanks Ludi!
temp_model = model_h2o_gridsearch.sort_by("logloss(xval=True)")
# obtain the model ID of best model (with smallest MSE) and use that for our evaluation
best_model_id = temp_model['Model Id'][0]
self.best_grid_logloss = temp_model['logloss(xval=True)'][0]
self.best_alpha = model_h2o_gridsearch.get_hyperparams(best_model_id)
best_model = h2o.get_model(best_model_id)
best_model_test_metrics = best_model.model_performance(test_data=self.test_data)
num_test_failed = self.test_failed
# print out comparison results for our H2O GLM with H2O model from test 1
self.test_failed = \
pyunit_utils.extract_comparison_attributes_and_print_multinomial(best_model, best_model_test_metrics,
self.family,
"\nTest3 " + " Done!",
test_model=self.test1_model,
test_model_metric=self.test1_model_metrics,
compare_att_str=[
"\nComparing intercept and"
" weights ....",
"\nComparing logloss from training "
"dataset ....",
"\nComparing logloss from test dataset"
" ....",
"\nComparing confusion matrices from "
"training dataset ....",
"\nComparing confusion matrices from "
"test dataset ...",
"\nComparing accuracy from training "
"dataset ....",
"\nComparing accuracy from test "
" sdataset ...."],
h2o_att_str=[
"H2O grid search intercept and "
"weights: \n",
"H2O grid search logloss from training"
" dataset: ",
"H2O grid search logloss from test "
"dataset",
"H2O grid search confusion matrix from"
" training dataset: \n",
"H2O grid search confusion matrix from"
" test dataset: \n",
"H2O grid search accuracy from"
" training dataset: ",
"H2O grid search accuracy from test "
"dataset: "],
template_att_str=[
"H2O test1 template intercept and"
" weights: \n",
"H2O test1 template logloss from"
" training dataset: ",
"H2O test1 template logloss from"
" test dataset: ",
"H2O test1 template confusion"
" matrix from training dataset: \n",
"H2O test1 template confusion"
" matrix from test dataset: \n",
"H2O test1 template accuracy from"
" training dataset: ",
"H2O test1 template accuracy from"
" test dataset: "],
att_str_fail=[
"Intercept and weights are not equal!",
"Logloss from training dataset differ"
" too much!",
"Logloss from test dataset differ too"
" much!", "", "",
"Accuracies from training dataset"
" differ too much!",
"Accuracies from test dataset differ"
" too much!"],
att_str_success=[
"Intercept and weights are close"
" enough!",
"Logloss from training dataset are"
" close enough!",
"Logloss from test dataset are close"
" enough!", "", "",
"Accuracies from training dataset are"
" close enough!",
"Accuracies from test dataset are"
" close enough!"],
can_be_better_than_template=[
True, True, True, True, True,
True, True],
just_print=[
True, True, True, True, True,
True, False],
failed_test_number=self.test_failed,
ignored_eps=self.ignored_eps,
allowed_diff=self.allowed_diff)
# print out test results and update test_failed_array status to reflect if this test has failed
self.test_failed_array[self.test_num] += pyunit_utils.show_test_results("test_glm_grid_search_over_params",
num_test_failed, self.test_failed)
self.test_num += 1
def test4_glm_remove_collinear_columns(self):
"""
With the best parameters obtained from test 3 grid search, we will trained GLM
with duplicated columns and enable remove_collinear_columns and see if the
algorithm catches the duplicated columns. We will compare the results with test
1 results.
"""
print("*******************************************************************************************")
print("Test4: test the GLM remove_collinear_columns.")
h2o.cluster_info()
# read in training data sets with duplicated columns
training_data = h2o.import_file(pyunit_utils.locate(self.training_data_file_duplicate))
test_data = h2o.import_file(pyunit_utils.locate(self.test_data_file_duplicate))
y_index = training_data.ncol-1
x_indices = list(range(y_index))
# change response variable to be categorical
training_data[y_index] = training_data[y_index].round().asfactor()
test_data[y_index] = test_data[y_index].round().asfactor()
# train H2O model with remove_collinear_columns=True
model_h2o = H2OGeneralizedLinearEstimator(family=self.family, Lambda=self.best_lambda, alpha=self.best_alpha,
remove_collinear_columns=True)
model_h2o.train(x=x_indices, y=y_index, training_frame=training_data)
print("Best lambda is {0}, best alpha is {1}".format(self.best_lambda, self.best_alpha))
# evaluate model over test data set
model_h2o_metrics = model_h2o.model_performance(test_data=test_data)
num_test_failed = self.test_failed
# print out comparison results our H2O GLM and test1 H2O model
self.test_failed = \
pyunit_utils.extract_comparison_attributes_and_print_multinomial(model_h2o, model_h2o_metrics, self.family,
"\nTest3 Done!",
test_model=self.test1_model,
test_model_metric=self.test1_model_metrics,
compare_att_str=[
"\nComparing intercept and weights"
" ....",
"\nComparing logloss from training "
"dataset ....",
"\nComparing logloss from test"
" dataset ....",
"\nComparing confusion matrices from"
" training dataset ....",
"\nComparing confusion matrices from"
" test dataset ...",
"\nComparing accuracy from training"
" dataset ....",
"\nComparing accuracy from test"
" dataset ...."],
h2o_att_str=[
"H2O remove_collinear_columns "
"intercept and weights: \n",
"H2O remove_collinear_columns"
" logloss from training dataset: ",
"H2O remove_collinear_columns"
" logloss from test dataset",
"H2O remove_collinear_columns"
" confusion matrix from "
"training dataset: \n",
"H2O remove_collinear_columns"
" confusion matrix from"
" test dataset: \n",
"H2O remove_collinear_columns"
" accuracy from"
" training dataset: ",
"H2O remove_collinear_columns"
" accuracy from test"
" dataset: "],
template_att_str=[
"H2O test1 template intercept and"
" weights: \n",
"H2O test1 template logloss from"
" training dataset: ",
"H2O test1 template logloss from"
" test dataset: ",
"H2O test1 template confusion"
" matrix from training dataset: \n",
"H2O test1 template confusion"
" matrix from test dataset: \n",
"H2O test1 template accuracy from"
" training dataset: ",
"H2O test1 template accuracy from"
" test dataset: "],
att_str_fail=[
"Intercept and weights are not equal!",
"Logloss from training dataset differ"
" too much!",
"Logloss from test dataset differ too"
" much!", "", "",
"Accuracies from training dataset"
" differ too much!",
"Accuracies from test dataset differ"
" too much!"],
att_str_success=[
"Intercept and weights are close"
" enough!",
"Logloss from training dataset are"
" close enough!",
"Logloss from test dataset are close"
" enough!", "", "",
"Accuracies from training dataset are"
" close enough!",
"Accuracies from test dataset are"
" close enough!"],
can_be_better_than_template=[
True, True, True, True, True,
True, True],
just_print=[True, True, True, True, True,
True, False],
failed_test_number=self.test_failed,
ignored_eps=self.ignored_eps,
allowed_diff=self.allowed_diff)
# print out test results and update test_failed_array status to reflect if this test has failed
self.test_failed_array[self.test_num] += pyunit_utils.show_test_results("test4_glm_remove_collinear_columns",
num_test_failed, self.test_failed)
self.test_num += 1
def test5_missing_values(self):
"""
Test parameter missing_values_handling="MeanImputation" with
only real value predictors. The same data sets as before is used. However, we
go into the predictor matrix and randomly decide to replace a value with
nan and create missing values. Sklearn logistic regression model is built using the
data set where we have imputed the missing values. This Sklearn model will be used to
compare our H2O models with.
"""
print("*******************************************************************************************")
print("Test5: test the GLM with imputation of missing values with column averages.")
h2o.cluster_info()
# training result from python sklearn
(p_weights, p_logloss_train, p_cm_train, p_accuracy_training, p_logloss_test, p_cm_test, p_accuracy_test) = \
self.sklearn_binomial_result(self.training_data_file_nans, self.test_data_file_nans, False, False)
# import training set and test set
training_data = h2o.import_file(pyunit_utils.locate(self.training_data_file_nans))
test_data = h2o.import_file(pyunit_utils.locate(self.test_data_file_nans))
# change the response columns to be categorical
training_data[self.y_index] = training_data[self.y_index].round().asfactor()
test_data[self.y_index] = test_data[self.y_index].round().asfactor()
# train H2O models with missing_values_handling="MeanImputation"
model_h2o = H2OGeneralizedLinearEstimator(family=self.family, Lambda=0,
missing_values_handling="MeanImputation")
model_h2o.train(x=self.x_indices, y=self.y_index, training_frame=training_data)
# calculate H2O model performance with test data set
h2o_model_test_metrics = model_h2o.model_performance(test_data=test_data)
num_test_failed = self.test_failed
# print out comparison results our H2O GLM and Sklearn model
self.test_failed = \
pyunit_utils.extract_comparison_attributes_and_print_multinomial(model_h2o, h2o_model_test_metrics,
self.family, "\nTest5 Done!",
compare_att_str=[
"\nComparing intercept and weights"
" ....",
"\nComparing logloss from training"
" dataset ....",
"\nComparing logloss from test"
" dataset ....",
"\nComparing confusion matrices from"
" training dataset ....",
"\nComparing confusion matrices from"
" test dataset ...",
"\nComparing accuracy from training"
" dataset ....",
"\nComparing accuracy from test"
" dataset ...."],
h2o_att_str=[
"H2O missing values intercept and"
" weights: \n",
"H2O missing values logloss from"
" training dataset: ",
"H2O missing values logloss from"
" test dataset",
"H2O missing values confusion matrix"
" from training dataset: \n",
"H2O missing values confusion matrix"
" from test dataset: \n",
"H2O missing values accuracy from"
" training dataset: ",
"H2O missing values accuracy from"
" test dataset: "],
template_att_str=[
"Sklearn missing values intercept"
" and weights: \n",
"Sklearn missing values logloss from"
" training dataset: ",
"Sklearn missing values logloss from"
" test dataset: ",
"Sklearn missing values confusion"
" matrix from training dataset: \n",
"Sklearn missing values confusion"
" matrix from test dataset: \n",
"Sklearn missing values accuracy"
" from training dataset: ",
"Sklearn missing values accuracy"
" from test dataset: "],
att_str_fail=[
"Intercept and weights are not equal!",
"Logloss from training dataset differ"
" too much!",
"Logloss from test dataset differ"
" too much!", "", "",
"Accuracies from training dataset"
" differ too much!",
"Accuracies from test dataset differ"
" too much!"],
att_str_success=[
"Intercept and weights are close "
"enough!",
"Logloss from training dataset are"
" close enough!",
"Logloss from test dataset are close"
" enough!", "", "",
"Accuracies from training dataset are"
" close enough!",
"Accuracies from test dataset are"
" close enough!"],
can_be_better_than_template=[
True, True, True, True, True,
True, True],
just_print=[
True, True, True, True, True,
True, False],
failed_test_number=self.test_failed,
template_params=[
p_weights, p_logloss_train, p_cm_train,
p_accuracy_training, p_logloss_test,
p_cm_test, p_accuracy_test],
ignored_eps=self.ignored_eps,
allowed_diff=self.allowed_diff)
# print out test results and update test_failed_array status to reflect if tests have failed
self.test_failed_array[self.test_num] += pyunit_utils.show_test_results("test5_missing_values",
num_test_failed, self.test_failed)
self.test_num += 1
def test6_enum_missing_values(self):
"""
Test parameter missing_values_handling="MeanImputation" with
mixed predictors (categorical/real value columns). We first generate a data set that
contains a random number of columns of categorical and real value columns. Next, we
encode the categorical columns. Then, we generate the random data set using the formula
as before. Next, we go into the predictor matrix and randomly
decide to change a value to be nan and create missing values. Again, we build a Sklearn
logistic regression and compare our H2O models with it.
"""
# no regularization in this case, use reference level plus one-hot-encoding
print("*******************************************************************************************")
print("Test6: test the GLM with enum/real values.")
h2o.cluster_info()
# training result from python sklearn
(p_weights, p_logloss_train, p_cm_train, p_accuracy_training, p_logloss_test, p_cm_test, p_accuracy_test) = \
self.sklearn_binomial_result(self.training_data_file_enum_nans, self.test_data_file_enum_nans, True, False)
# import training set and test set with missing values
training_data = h2o.import_file(pyunit_utils.locate(self.training_data_file_enum_nans))
test_data = h2o.import_file(pyunit_utils.locate(self.test_data_file_enum_nans))
# change the categorical data using .asfactor()
for ind in range(self.enum_col):
training_data[ind] = training_data[ind].round().asfactor()
test_data[ind] = test_data[ind].round().asfactor()
num_col = training_data.ncol
y_index = num_col - 1
x_indices = list(range(y_index))
# change response variables to be categorical
training_data[y_index] = training_data[y_index].round().asfactor()
# check to make sure all response classes are represented, otherwise, quit
if training_data[y_index].nlevels()[0] < self.class_number:
print("Response classes are not represented in training dataset.")
sys.exit(0)
test_data[y_index] = test_data[y_index].round().asfactor()
# generate H2O model
model_h2o = H2OGeneralizedLinearEstimator(family=self.family, Lambda=0,
missing_values_handling="MeanImputation")
model_h2o.train(x=x_indices, y=y_index, training_frame=training_data)
h2o_model_test_metrics = model_h2o.model_performance(test_data=test_data)
num_test_failed = self.test_failed
# print out comparison results our H2O GLM with Sklearn model
self.test_failed = \
pyunit_utils.extract_comparison_attributes_and_print_multinomial(model_h2o, h2o_model_test_metrics,
self.family, "\nTest6 Done!",
compare_att_str=[
"\nComparing intercept and "
"weights ....",
"\nComparing logloss from training"
" dataset ....",
"\nComparing logloss from test"
" dataset ....",
"\nComparing confusion matrices from"
" training dataset ....",
"\nComparing confusion matrices from"
" test dataset ...",
"\nComparing accuracy from training"
" dataset ....",
"\nComparing accuracy from test"
" dataset ...."],
h2o_att_str=[
"H2O with enum/real values, "
"no regularization and missing values"
" intercept and weights: \n",
"H2O with enum/real values, no "
"regularization and missing values"
" logloss from training dataset: ",
"H2O with enum/real values, no"
" regularization and missing values"
" logloss from test dataset",
"H2O with enum/real values, no"
" regularization and missing values"
" confusion matrix from training"
" dataset: \n",
"H2O with enum/real values, no"
" regularization and missing values"
" confusion matrix from test"
" dataset: \n",
"H2O with enum/real values, no"
" regularization and missing values "
"accuracy from training dataset: ",
"H2O with enum/real values, no "
"regularization and missing values"
" accuracy from test dataset: "],
template_att_str=[
"Sklearn missing values intercept "
"and weights: \n",
"Sklearn with enum/real values, no"
" regularization and missing values"
" logloss from training dataset: ",
"Sklearn with enum/real values, no "
"regularization and missing values"
" logloss from test dataset: ",
"Sklearn with enum/real values, no "
"regularization and missing values "
"confusion matrix from training"
" dataset: \n",
"Sklearn with enum/real values, no "
"regularization and missing values "
"confusion matrix from test "
"dataset: \n",
"Sklearn with enum/real values, no "
"regularization and missing values "
"accuracy from training dataset: ",
"Sklearn with enum/real values, no "
"regularization and missing values "
"accuracy from test dataset: "],
att_str_fail=[
"Intercept and weights are not equal!",
"Logloss from training dataset differ"
" too much!",
"Logloss from test dataset differ too"
" much!", "", "",
"Accuracies from training dataset"
" differ too much!",
"Accuracies from test dataset differ"
" too much!"],
att_str_success=[
"Intercept and weights are close"
" enough!",
"Logloss from training dataset are"
" close enough!",
"Logloss from test dataset are close"
" enough!", "", "",
"Accuracies from training dataset are"
" close enough!",
"Accuracies from test dataset are"
" close enough!"],
can_be_better_than_template=[
True, True, True, True, True,
True, True],
just_print=[
True, True, True, True, True,
True, False],
failed_test_number=self.test_failed,
template_params=[
p_weights, p_logloss_train, p_cm_train,
p_accuracy_training, p_logloss_test,
p_cm_test, p_accuracy_test],
ignored_eps=self.ignored_eps,
allowed_diff=self.allowed_diff)
h2o.cluster_info()
# print out test results and update test_failed_array status to reflect if this test has failed
self.test_failed_array[self.test_num] += pyunit_utils.show_test_results("test6_enum_missing_values",
num_test_failed, self.test_failed)
self.test_num += 1
def test7_missing_enum_values_lambda_search(self):
"""
Test parameter
missing_values_handling="MeanImputation" with mixed predictors (categorical/real value columns).
Test parameter missing_values_handling="MeanImputation" with
mixed predictors (categorical/real value columns) and setting lambda search to be True.
We use the same predictors with missing values from test6. Next, we encode the categorical columns using
true one hot encoding since Lambda-search will be enabled with alpha set to 0.5. Since the encoding
is different in this case from test6, we will build a brand new Sklearn logistic regression model and
compare the best H2O model logloss/prediction accuracy with it.
"""
# perform lambda_search, regularization and one hot encoding.
print("*******************************************************************************************")
print("Test7: test the GLM with imputation of missing enum/real values under lambda search.")
h2o.cluster_info()
# training result from python sklearn
(p_weights, p_logloss_train, p_cm_train, p_accuracy_training, p_logloss_test, p_cm_test, p_accuracy_test) = \
self.sklearn_binomial_result(self.training_data_file_enum_nans,
self.test_data_file_enum_nans_true_one_hot, True, True,
validation_data_file=self.validation_data_file_enum_nans_true_one_hot)
# import training set and test set with missing values and true one hot encoding
training_data = h2o.import_file(pyunit_utils.locate(self.training_data_file_enum_nans_true_one_hot))
validation_data = h2o.import_file(pyunit_utils.locate(self.validation_data_file_enum_nans_true_one_hot))
test_data = h2o.import_file(pyunit_utils.locate(self.test_data_file_enum_nans_true_one_hot))
# change the categorical data using .asfactor()
for ind in range(self.enum_col):
training_data[ind] = training_data[ind].round().asfactor()
validation_data[ind] = validation_data[ind].round().asfactor()
test_data[ind] = test_data[ind].round().asfactor()
num_col = training_data.ncol
y_index = num_col - 1
x_indices = list(range(y_index))
# change response column to be categorical
training_data[y_index] = training_data[y_index].round().asfactor()
# check to make sure all response classes are represented, otherwise, quit
if training_data[y_index].nlevels()[0] < self.class_number:
print("Response classes are not represented in training dataset.")
sys.exit(0)
validation_data[y_index] = validation_data[y_index].round().asfactor()
test_data[y_index] = test_data[y_index].round().asfactor()
# train H2O model
model_h2o_0p5 = H2OGeneralizedLinearEstimator(family=self.family, lambda_search=True, alpha=0.5,
lambda_min_ratio=1e-20, missing_values_handling="MeanImputation")
model_h2o_0p5.train(x=x_indices, y=y_index, training_frame=training_data, validation_frame=validation_data)
h2o_model_0p5_test_metrics = model_h2o_0p5.model_performance(test_data=test_data)
num_test_failed = self.test_failed
# print out comparison results for our H2O GLM with Sklearn model
self.test_failed = \
pyunit_utils.extract_comparison_attributes_and_print_multinomial(model_h2o_0p5, h2o_model_0p5_test_metrics,
self.family, "\nTest7 Done!",
compare_att_str=[
"\nComparing intercept and "
"weights ....",
"\nComparing logloss from training"
" dataset ....",
"\nComparing logloss from test"
" dataset ....",
"\nComparing confusion matrices from"
" training dataset ....",
"\nComparing confusion matrices from"
" test dataset ...",
"\nComparing accuracy from training"
" dataset ....",
"\nComparing accuracy from test"
" dataset ...."],
h2o_att_str=[
"H2O with enum/real values, lamba "
"search and missing values intercept"
" and weights: \n",
"H2O with enum/real values, lamba "
"search and missing values logloss "
"from training dataset: ",
"H2O with enum/real values, lamba "
"search and missing values logloss "
"from test dataset",
"H2O with enum/real values, lamba "
"search and missing values confusion "
"matrix from training dataset: \n",
"H2O with enum/real values, lamba "
"search and missing values confusion "
"matrix from test dataset: \n",
"H2O with enum/real values, lamba "
"search and missing values accuracy "
"from training dataset: ",
"H2O with enum/real values, lamba "
"search and missing values accuracy "
"from test dataset: "],
template_att_str=[
"Sklearn with enum/real values, lamba"
" search and missing values intercept"
" and weights: \n",
"Sklearn with enum/real values, lamba"
" search and missing values logloss "
"from training dataset: ",
"Sklearn with enum/real values, lamba"
" search and missing values logloss "
"from test dataset: ",
"Sklearn with enum/real values, lamba"
" search and missing values confusion"
" matrix from training dataset: \n",
"Sklearn with enum/real values, lamba"
" search and missing values confusion"
" matrix from test dataset: \n",
"Sklearn with enum/real values, lamba"
" search and missing values accuracy"
" from training dataset: ",
"Sklearn with enum/real values, lamba"
" search and missing values accuracy"
" from test dataset: "],
att_str_fail=[
"Intercept and weights are not equal!",
"Logloss from training dataset differ "
"too much!",
"Logloss from test dataset differ too"
" much!", "", "", "Accuracies from"
" training dataset differ too much!",
"Accuracies from test dataset differ"
" too much!"],
att_str_success=[
"Intercept and weights are close "
"enough!",
"Logloss from training dataset are"
" close enough!",
"Logloss from test dataset are close"
" enough!", "", "",
"Accuracies from training dataset are"
" close enough!",
"Accuracies from test dataset are"
" close enough!"],
can_be_better_than_template=[
True, True, True, True, True,
True, True],
just_print=[
True, True, True, True, True,
True, False],
failed_test_number=self.test_failed,
template_params=[
p_weights, p_logloss_train, p_cm_train,
p_accuracy_training, p_logloss_test,
p_cm_test, p_accuracy_test],
ignored_eps=self.ignored_eps,
allowed_diff=self.allowed_diff)
# print out test results and update test_failed_array status to reflect if this test has failed
self.test_failed_array[self.test_num] += \
pyunit_utils.show_test_results("test7_missing_enum_values_lambda_search", num_test_failed, self.test_failed)
self.test_num += 1
def sklearn_binomial_result(self, training_data_file, test_data_file, has_categorical, true_one_hot,
validation_data_file=""):
"""
This function will generate a Sklearn logistic model using the same set of data sets we have used to build
our H2O models. The purpose here is to be able to compare the performance of H2O
models with the Sklearn model built here. This is useful in cases where theoretical solutions
do not exist. If the data contains missing values, mean imputation is applied to the data set before
a Sklearn model is built. In addition, if there are enum columns in predictors and also missing values,
the same encoding and missing value imputation method used by H2O is applied to the data set before we build
the Sklearn model.
:param training_data_file: string storing training data set filename with directory path.
:param test_data_file: string storing test data set filename with directory path.
:param has_categorical: bool indicating if we data set contains mixed predictors (both enum and real)
:param true_one_hot: bool True: true one hot encoding is used. False: reference level plus one hot encoding
is used
:param validation_data_file: optional string, denoting validation file so that we can concatenate
training and validation data sets into a big training set since H2O model is using a training
and a validation data set.
:return: a tuple containing the weights, logloss, confusion matrix, prediction accuracy calculated on training
data set and test data set respectively.
"""
# read in the training data into a matrix
training_data_xy = np.asmatrix(np.genfromtxt(training_data_file, delimiter=',', dtype=None))
test_data_xy = np.asmatrix(np.genfromtxt(test_data_file, delimiter=',', dtype=None))
if len(validation_data_file) > 0: # validation data set exist and add it to training_data
temp_data_xy = np.asmatrix(np.genfromtxt(validation_data_file, delimiter=',', dtype=None))
training_data_xy = np.concatenate((training_data_xy, temp_data_xy), axis=0)
# if predictor contains categorical data, perform encoding of enums to binary bits
# for missing categorical enums, a new level is created for the nans
if has_categorical:
training_data_xy = pyunit_utils.encode_enum_dataset(training_data_xy, self.enum_level_vec, self.enum_col,
true_one_hot, np.any(training_data_xy))
test_data_xy = pyunit_utils.encode_enum_dataset(test_data_xy, self.enum_level_vec, self.enum_col,
true_one_hot, np.any(training_data_xy))
# replace missing values for real value columns with column mean before proceeding for training/test data sets
if np.isnan(training_data_xy).any():
inds = np.where(np.isnan(training_data_xy))
col_means = np.asarray(np.nanmean(training_data_xy, axis=0))[0]
training_data_xy[inds] = np.take(col_means, inds[1])
if np.isnan(test_data_xy).any():
# replace the actual means with column means from training
inds = np.where(np.isnan(test_data_xy))
test_data_xy = pyunit_utils.replace_nan_with_mean(test_data_xy, inds, col_means)
# now data is ready to be massaged into format that sklearn can use
(response_y, x_mat) = pyunit_utils.prepare_data_sklearn_multinomial(training_data_xy)
(t_response_y, t_x_mat) = pyunit_utils.prepare_data_sklearn_multinomial(test_data_xy)
# train the sklearn Model
sklearn_model = LogisticRegression(class_weight=self.sklearn_class_weight)
sklearn_model = sklearn_model.fit(x_mat, response_y)
# grab the performance metrics on training data set
accuracy_training = sklearn_model.score(x_mat, response_y)
weights = sklearn_model.coef_
p_response_y = sklearn_model.predict(x_mat)
log_prob = sklearn_model.predict_log_proba(x_mat)
logloss_training = self.logloss_sklearn(response_y, log_prob)
cm_train = metrics.confusion_matrix(response_y, p_response_y)
# grab the performance metrics on the test data set
p_response_y = sklearn_model.predict(t_x_mat)
log_prob = sklearn_model.predict_log_proba(t_x_mat)
logloss_test = self.logloss_sklearn(t_response_y, log_prob)
cm_test = metrics.confusion_matrix(t_response_y, p_response_y)
accuracy_test = metrics.accuracy_score(t_response_y, p_response_y)
return weights, logloss_training, cm_train, accuracy_training, logloss_test, cm_test, accuracy_test
def logloss_sklearn(self, true_y, log_prob):
"""
This function calculate the average logloss for SKlean model given the true response (trueY) and the log
probabilities (logProb).
:param true_y: array denoting the true class label
:param log_prob: matrix containing the log of Prob(Y=0) and Prob(Y=1)
:return: average logloss.
"""
(num_row, num_class) = log_prob.shape
logloss = 0.0
for ind in range(num_row):
logloss += log_prob[ind, int(true_y[ind])]
return -1.0 * logloss / num_row
def test_glm_binomial():
"""
Create and instantiate TestGLMBinomial class and perform tests specified for GLM
Binomial family.
:return: None
"""
test_glm_binomial = TestGLMBinomial()
test_glm_binomial.test1_glm_no_regularization()
test_glm_binomial.test2_glm_lambda_search()
test_glm_binomial.test3_glm_grid_search()
test_glm_binomial.test4_glm_remove_collinear_columns()
test_glm_binomial.test5_missing_values()
test_glm_binomial.test6_enum_missing_values()
test_glm_binomial.test7_missing_enum_values_lambda_search()
test_glm_binomial.teardown()
sys.stdout.flush()
if test_glm_binomial.test_failed: # exit with error if any tests have failed
sys.exit(1)
if __name__ == "__main__":
pyunit_utils.standalone_test(test_glm_binomial)
else:
test_glm_binomial()
| apache-2.0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.