repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
BiaDarkia/scikit-learn | sklearn/neighbors/tests/test_ball_tree.py | 5 | 10219 | import pickle
import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.neighbors.ball_tree import (BallTree, NeighborsHeap,
simultaneous_sort, kernel_norm,
nodeheap_sort, DTYPE, ITYPE)
from sklearn.neighbors.dist_metrics import DistanceMetric
from sklearn.utils import check_random_state
from sklearn.utils.testing import SkipTest, assert_allclose
rng = np.random.RandomState(10)
V_mahalanobis = rng.rand(3, 3)
V_mahalanobis = np.dot(V_mahalanobis, V_mahalanobis.T)
DIMENSION = 3
METRICS = {'euclidean': {},
'manhattan': {},
'minkowski': dict(p=3),
'chebyshev': {},
'seuclidean': dict(V=rng.random_sample(DIMENSION)),
'wminkowski': dict(p=3, w=rng.random_sample(DIMENSION)),
'mahalanobis': dict(V=V_mahalanobis)}
DISCRETE_METRICS = ['hamming',
'canberra',
'braycurtis']
BOOLEAN_METRICS = ['matching', 'jaccard', 'dice', 'kulsinski',
'rogerstanimoto', 'russellrao', 'sokalmichener',
'sokalsneath']
def dist_func(x1, x2, p):
return np.sum((x1 - x2) ** p) ** (1. / p)
def brute_force_neighbors(X, Y, k, metric, **kwargs):
D = DistanceMetric.get_metric(metric, **kwargs).pairwise(Y, X)
ind = np.argsort(D, axis=1)[:, :k]
dist = D[np.arange(Y.shape[0])[:, None], ind]
return dist, ind
def test_ball_tree_query():
rng = check_random_state(0)
X = rng.random_sample((40, DIMENSION))
Y = rng.random_sample((10, DIMENSION))
def check_neighbors(dualtree, breadth_first, k, metric, kwargs):
bt = BallTree(X, leaf_size=1, metric=metric, **kwargs)
dist1, ind1 = bt.query(Y, k, dualtree=dualtree,
breadth_first=breadth_first)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric, **kwargs)
# don't check indices here: if there are any duplicate distances,
# the indices may not match. Distances should not have this problem.
assert_array_almost_equal(dist1, dist2)
for (metric, kwargs) in METRICS.items():
for k in (1, 3, 5):
for dualtree in (True, False):
for breadth_first in (True, False):
yield (check_neighbors,
dualtree, breadth_first,
k, metric, kwargs)
def test_ball_tree_query_boolean_metrics():
rng = check_random_state(0)
X = rng.random_sample((40, 10)).round(0)
Y = rng.random_sample((10, 10)).round(0)
k = 5
def check_neighbors(metric):
bt = BallTree(X, leaf_size=1, metric=metric)
dist1, ind1 = bt.query(Y, k)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric)
assert_array_almost_equal(dist1, dist2)
for metric in BOOLEAN_METRICS:
yield check_neighbors, metric
def test_ball_tree_query_discrete_metrics():
rng = check_random_state(0)
X = (4 * rng.random_sample((40, 10))).round(0)
Y = (4 * rng.random_sample((10, 10))).round(0)
k = 5
def check_neighbors(metric):
bt = BallTree(X, leaf_size=1, metric=metric)
dist1, ind1 = bt.query(Y, k)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric)
assert_array_almost_equal(dist1, dist2)
for metric in DISCRETE_METRICS:
yield check_neighbors, metric
def test_ball_tree_query_radius(n_samples=100, n_features=10):
rng = check_random_state(0)
X = 2 * rng.random_sample(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
bt = BallTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind = bt.query_radius([query_pt], r + eps)[0]
i = np.where(rad <= r + eps)[0]
ind.sort()
i.sort()
assert_array_almost_equal(i, ind)
def test_ball_tree_query_radius_distance(n_samples=100, n_features=10):
rng = check_random_state(0)
X = 2 * rng.random_sample(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
bt = BallTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind, dist = bt.query_radius([query_pt], r + eps, return_distance=True)
ind = ind[0]
dist = dist[0]
d = np.sqrt(((query_pt - X[ind]) ** 2).sum(1))
assert_array_almost_equal(d, dist)
def compute_kernel_slow(Y, X, kernel, h):
d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1))
norm = kernel_norm(h, X.shape[1], kernel)
if kernel == 'gaussian':
return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1)
elif kernel == 'tophat':
return norm * (d < h).sum(-1)
elif kernel == 'epanechnikov':
return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1)
elif kernel == 'exponential':
return norm * (np.exp(-d / h)).sum(-1)
elif kernel == 'linear':
return norm * ((1 - d / h) * (d < h)).sum(-1)
elif kernel == 'cosine':
return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1)
else:
raise ValueError('kernel not recognized')
def check_results(kernel, h, atol, rtol, breadth_first, bt, Y, dens_true):
dens = bt.kernel_density(Y, h, atol=atol, rtol=rtol,
kernel=kernel,
breadth_first=breadth_first)
assert_allclose(dens, dens_true,
atol=atol, rtol=max(rtol, 1e-7))
def test_ball_tree_kde(n_samples=100, n_features=3):
rng = check_random_state(0)
X = rng.random_sample((n_samples, n_features))
Y = rng.random_sample((n_samples, n_features))
bt = BallTree(X, leaf_size=10)
for kernel in ['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']:
for h in [0.01, 0.1, 1]:
dens_true = compute_kernel_slow(Y, X, kernel, h)
for rtol in [0, 1E-5]:
for atol in [1E-6, 1E-2]:
for breadth_first in (True, False):
yield (check_results, kernel, h, atol, rtol,
breadth_first, bt, Y, dens_true)
def test_gaussian_kde(n_samples=1000):
# Compare gaussian KDE results to scipy.stats.gaussian_kde
from scipy.stats import gaussian_kde
rng = check_random_state(0)
x_in = rng.normal(0, 1, n_samples)
x_out = np.linspace(-5, 5, 30)
for h in [0.01, 0.1, 1]:
bt = BallTree(x_in[:, None])
gkde = gaussian_kde(x_in, bw_method=h / np.std(x_in))
dens_bt = bt.kernel_density(x_out[:, None], h) / n_samples
dens_gkde = gkde.evaluate(x_out)
assert_array_almost_equal(dens_bt, dens_gkde, decimal=3)
def test_ball_tree_two_point(n_samples=100, n_features=3):
rng = check_random_state(0)
X = rng.random_sample((n_samples, n_features))
Y = rng.random_sample((n_samples, n_features))
r = np.linspace(0, 1, 10)
bt = BallTree(X, leaf_size=10)
D = DistanceMetric.get_metric("euclidean").pairwise(Y, X)
counts_true = [(D <= ri).sum() for ri in r]
def check_two_point(r, dualtree):
counts = bt.two_point_correlation(Y, r=r, dualtree=dualtree)
assert_array_almost_equal(counts, counts_true)
for dualtree in (True, False):
yield check_two_point, r, dualtree
def test_ball_tree_pickle():
rng = check_random_state(0)
X = rng.random_sample((10, 3))
bt1 = BallTree(X, leaf_size=1)
# Test if BallTree with callable metric is picklable
bt1_pyfunc = BallTree(X, metric=dist_func, leaf_size=1, p=2)
ind1, dist1 = bt1.query(X)
ind1_pyfunc, dist1_pyfunc = bt1_pyfunc.query(X)
def check_pickle_protocol(protocol):
s = pickle.dumps(bt1, protocol=protocol)
bt2 = pickle.loads(s)
s_pyfunc = pickle.dumps(bt1_pyfunc, protocol=protocol)
bt2_pyfunc = pickle.loads(s_pyfunc)
ind2, dist2 = bt2.query(X)
ind2_pyfunc, dist2_pyfunc = bt2_pyfunc.query(X)
assert_array_almost_equal(ind1, ind2)
assert_array_almost_equal(dist1, dist2)
assert_array_almost_equal(ind1_pyfunc, ind2_pyfunc)
assert_array_almost_equal(dist1_pyfunc, dist2_pyfunc)
for protocol in (0, 1, 2):
yield check_pickle_protocol, protocol
def test_neighbors_heap(n_pts=5, n_nbrs=10):
heap = NeighborsHeap(n_pts, n_nbrs)
for row in range(n_pts):
d_in = rng.random_sample(2 * n_nbrs).astype(DTYPE)
i_in = np.arange(2 * n_nbrs, dtype=ITYPE)
for d, i in zip(d_in, i_in):
heap.push(row, d, i)
ind = np.argsort(d_in)
d_in = d_in[ind]
i_in = i_in[ind]
d_heap, i_heap = heap.get_arrays(sort=True)
assert_array_almost_equal(d_in[:n_nbrs], d_heap[row])
assert_array_almost_equal(i_in[:n_nbrs], i_heap[row])
def test_node_heap(n_nodes=50):
vals = rng.random_sample(n_nodes).astype(DTYPE)
i1 = np.argsort(vals)
vals2, i2 = nodeheap_sort(vals)
assert_array_almost_equal(i1, i2)
assert_array_almost_equal(vals[i1], vals2)
def test_simultaneous_sort(n_rows=10, n_pts=201):
dist = rng.random_sample((n_rows, n_pts)).astype(DTYPE)
ind = (np.arange(n_pts) + np.zeros((n_rows, 1))).astype(ITYPE)
dist2 = dist.copy()
ind2 = ind.copy()
# simultaneous sort rows using function
simultaneous_sort(dist, ind)
# simultaneous sort rows using numpy
i = np.argsort(dist2, axis=1)
row_ind = np.arange(n_rows)[:, None]
dist2 = dist2[row_ind, i]
ind2 = ind2[row_ind, i]
assert_array_almost_equal(dist, dist2)
assert_array_almost_equal(ind, ind2)
def test_query_haversine():
rng = check_random_state(0)
X = 2 * np.pi * rng.random_sample((40, 2))
bt = BallTree(X, leaf_size=1, metric='haversine')
dist1, ind1 = bt.query(X, k=5)
dist2, ind2 = brute_force_neighbors(X, X, k=5, metric='haversine')
assert_array_almost_equal(dist1, dist2)
assert_array_almost_equal(ind1, ind2)
| bsd-3-clause |
hopgal/data-science-from-scratch | code/clustering.py | 60 | 6438 | from __future__ import division
from linear_algebra import squared_distance, vector_mean, distance
import math, random
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
class KMeans:
"""performs k-means clustering"""
def __init__(self, k):
self.k = k # number of clusters
self.means = None # means of clusters
def classify(self, input):
"""return the index of the cluster closest to the input"""
return min(range(self.k),
key=lambda i: squared_distance(input, self.means[i]))
def train(self, inputs):
self.means = random.sample(inputs, self.k)
assignments = None
while True:
# Find new assignments
new_assignments = map(self.classify, inputs)
# If no assignments have changed, we're done.
if assignments == new_assignments:
return
# Otherwise keep the new assignments,
assignments = new_assignments
for i in range(self.k):
i_points = [p for p, a in zip(inputs, assignments) if a == i]
# avoid divide-by-zero if i_points is empty
if i_points:
self.means[i] = vector_mean(i_points)
def squared_clustering_errors(inputs, k):
"""finds the total squared error from k-means clustering the inputs"""
clusterer = KMeans(k)
clusterer.train(inputs)
means = clusterer.means
assignments = map(clusterer.classify, inputs)
return sum(squared_distance(input,means[cluster])
for input, cluster in zip(inputs, assignments))
def plot_squared_clustering_errors(plt):
ks = range(1, len(inputs) + 1)
errors = [squared_clustering_errors(inputs, k) for k in ks]
plt.plot(ks, errors)
plt.xticks(ks)
plt.xlabel("k")
plt.ylabel("total squared error")
plt.show()
#
# using clustering to recolor an image
#
def recolor_image(input_file, k=5):
img = mpimg.imread(path_to_png_file)
pixels = [pixel for row in img for pixel in row]
clusterer = KMeans(k)
clusterer.train(pixels) # this might take a while
def recolor(pixel):
cluster = clusterer.classify(pixel) # index of the closest cluster
return clusterer.means[cluster] # mean of the closest cluster
new_img = [[recolor(pixel) for pixel in row]
for row in img]
plt.imshow(new_img)
plt.axis('off')
plt.show()
#
# hierarchical clustering
#
def is_leaf(cluster):
"""a cluster is a leaf if it has length 1"""
return len(cluster) == 1
def get_children(cluster):
"""returns the two children of this cluster if it's a merged cluster;
raises an exception if this is a leaf cluster"""
if is_leaf(cluster):
raise TypeError("a leaf cluster has no children")
else:
return cluster[1]
def get_values(cluster):
"""returns the value in this cluster (if it's a leaf cluster)
or all the values in the leaf clusters below it (if it's not)"""
if is_leaf(cluster):
return cluster # is already a 1-tuple containing value
else:
return [value
for child in get_children(cluster)
for value in get_values(child)]
def cluster_distance(cluster1, cluster2, distance_agg=min):
"""finds the aggregate distance between elements of cluster1
and elements of cluster2"""
return distance_agg([distance(input1, input2)
for input1 in get_values(cluster1)
for input2 in get_values(cluster2)])
def get_merge_order(cluster):
if is_leaf(cluster):
return float('inf')
else:
return cluster[0] # merge_order is first element of 2-tuple
def bottom_up_cluster(inputs, distance_agg=min):
# start with every input a leaf cluster / 1-tuple
clusters = [(input,) for input in inputs]
# as long as we have more than one cluster left...
while len(clusters) > 1:
# find the two closest clusters
c1, c2 = min([(cluster1, cluster2)
for i, cluster1 in enumerate(clusters)
for cluster2 in clusters[:i]],
key=lambda (x, y): cluster_distance(x, y, distance_agg))
# remove them from the list of clusters
clusters = [c for c in clusters if c != c1 and c != c2]
# merge them, using merge_order = # of clusters left
merged_cluster = (len(clusters), [c1, c2])
# and add their merge
clusters.append(merged_cluster)
# when there's only one cluster left, return it
return clusters[0]
def generate_clusters(base_cluster, num_clusters):
# start with a list with just the base cluster
clusters = [base_cluster]
# as long as we don't have enough clusters yet...
while len(clusters) < num_clusters:
# choose the last-merged of our clusters
next_cluster = min(clusters, key=get_merge_order)
# remove it from the list
clusters = [c for c in clusters if c != next_cluster]
# and add its children to the list (i.e., unmerge it)
clusters.extend(get_children(next_cluster))
# once we have enough clusters...
return clusters
if __name__ == "__main__":
inputs = [[-14,-5],[13,13],[20,23],[-19,-11],[-9,-16],[21,27],[-49,15],[26,13],[-46,5],[-34,-1],[11,15],[-49,0],[-22,-16],[19,28],[-12,-8],[-13,-19],[-41,8],[-11,-6],[-25,-9],[-18,-3]]
random.seed(0) # so you get the same results as me
clusterer = KMeans(3)
clusterer.train(inputs)
print "3-means:"
print clusterer.means
print
random.seed(0)
clusterer = KMeans(2)
clusterer.train(inputs)
print "2-means:"
print clusterer.means
print
print "errors as a function of k"
for k in range(1, len(inputs) + 1):
print k, squared_clustering_errors(inputs, k)
print
print "bottom up hierarchical clustering"
base_cluster = bottom_up_cluster(inputs)
print base_cluster
print
print "three clusters, min:"
for cluster in generate_clusters(base_cluster, 3):
print get_values(cluster)
print
print "three clusters, max:"
base_cluster = bottom_up_cluster(inputs, max)
for cluster in generate_clusters(base_cluster, 3):
print get_values(cluster)
| unlicense |
jian-li/rpg_svo | svo_analysis/src/svo_analysis/analyse_dataset.py | 17 | 1178 | # -*- coding: utf-8 -*-
import associate
import numpy as np
import matplotlib.pyplot as plt
import yaml
def loadDataset(filename):
file = open(filename)
data = file.read()
lines = data.replace(","," ").replace("\t"," ").split("\n")
D = np.array([[v.strip() for v in line.split(" ") if v.strip()!=""] for line in lines if len(line)>0 and line[0]!="#"], dtype=np.float64)
return D
dataset_dir = '/home/cforster/Datasets/SlamBenchmark/px4_r2'
trajectory_data = dataset_dir+'/groundtruth.txt'
stepsize = 10
# load dataset
data = loadDataset(trajectory_data)
n = data.shape[0]
steps = np.arange(0,n,stepsize)
# compute trajectory length
last_pos = data[0,1:4]
trajectory_length = 0
for i in steps[1:]:
new_pos = data[i,1:4]
trajectory_length += np.linalg.norm(new_pos-last_pos)
last_pos = new_pos
print 'trajectory lenght = ' + str(trajectory_length) + 'm'
print 'height mean = ' + str(np.mean(data[:,3])) + 'm'
print 'height median = ' + str(np.median(data[:,3])) + 'm'
print 'height std = ' + str(np.std(data[:,3])) + 'm'
print 'duration = ' + str(data[-1,0]-data[0,0]) + 's'
print 'speed = ' + str(trajectory_length/(data[-1,0]-data[0,0])) + 'm/s'
| gpl-3.0 |
IraKorshunova/kaggle-seizure-prediction | cnn/conv_net.py | 1 | 8166 | import numpy as np
import theano
import theano.tensor as T
from theano import Param
from sklearn.metrics import roc_curve, auc
from utils.train_iterator import RandomTrainIterator
from layers.softmax_layer import SoftmaxLayer
from layers.feature_extractor import FeatureExtractor
class ConvNet(object):
def __init__(self, param_dict):
self.param_dict = param_dict
self.training_batch_size = param_dict['training_batch_size']
nkerns = param_dict['nkerns']
recept_width = param_dict['recept_width']
pool_width = param_dict['pool_width']
stride = param_dict['stride']
dropout_prob = param_dict['dropout_prob']
weight_decay = param_dict['l2_reg']
activation = param_dict['activation']
weights_variance = param_dict['weights_variance']
n_channels = param_dict['n_channels']
n_timesteps = param_dict['n_timesteps']
n_fbins = param_dict['n_fbins']
global_pooling = param_dict['global_pooling']
rng = np.random.RandomState(23455)
self.training_mode = T.iscalar('training_mode')
self.x = T.tensor4('x')
self.y = T.bvector('y')
self.batch_size = theano.shared(self.training_batch_size)
self.input = self.x.reshape((self.batch_size, 1, n_channels * n_fbins, n_timesteps))
self.feature_extractor = FeatureExtractor(rng, self.input, nkerns, recept_width, pool_width, stride,
self.training_mode,
dropout_prob[0],
activation, weights_variance, n_channels, n_timesteps, n_fbins,
global_pooling)
self.classifier = SoftmaxLayer(rng=rng, input=self.feature_extractor.output, n_in=nkerns[-1],
training_mode=self.training_mode, dropout_prob=dropout_prob[-1])
self.weights = self.feature_extractor.weights + self.classifier.weights
# ---------------------- BACKPROP
self.cost = self.classifier.cross_entropy_cost(self.y)
self.cost = self.classifier.cross_entropy_cost(self.y)
L2_sqr = sum((weight ** 2).sum() for weight in self.weights[::2])
self.grads = T.grad(self.cost + weight_decay * L2_sqr, self.weights)
self.updates = self.adadelta_updates(self.grads, self.weights)
# self.updates = self.nesterov_momentum(self.grads, self.weights)
# --------------------- FUNCTIONS
self.train_model = theano.function([self.x, self.y, Param(self.training_mode, default=1)],
outputs=self.cost,
updates=self.updates)
self.validate_model = theano.function([self.x, self.y, Param(self.training_mode, default=0)],
self.cost)
self.test_model = theano.function([self.x, Param(self.training_mode, default=0)],
self.classifier.p_y_given_x[:, 1])
def train(self, train_set, max_iter):
print 'training for', max_iter, 'iterations'
self.batch_size.set_value(self.training_batch_size)
train_set_iterator = RandomTrainIterator(train_set, self.training_batch_size)
done_looping = False
iter = 0
while not done_looping:
for train_x, train_y in train_set_iterator:
self.train_model(train_x, train_y)
# if iter % 10 == 0:
# self.batch_size.set_value(train_set[0].shape[0])
# print self.validate_model(train_set[0], train_set[1])
# self.batch_size.set_value(self.training_batch_size)
if iter > max_iter:
done_looping = True
break
iter += 1
def validate(self, train_set, valid_set, valid_freq, max_iter, fname_out):
train_set_iterator = RandomTrainIterator(train_set, self.training_batch_size)
valid_set_size = len(valid_set[1])
f_out = open(fname_out, 'w')
# ------------------------------ TRAINING
epoch = 0
iter = 0
best_ce = np.inf
best_iter_ce = 0
best_auc = 0
best_iter_auc = 0
done_looping = False
patience = 100000
patience_increase = 2
improvement_threshold = 0.995
while iter < max_iter and not done_looping:
epoch += 1
for train_x, train_y in train_set_iterator:
self.train_model(train_x, train_y)
iter += 1
# ------------------------ VALIDATION
if iter % valid_freq == 0:
self.batch_size.set_value(valid_set_size)
cost_valid = self.validate_model(valid_set[0], valid_set[1])
auc_valid = self.get_auc(valid_set)
# print "%4s %7s %15s %15s %10s " % (
# epoch, iter, auc_valid, cost_valid,
# patience)
f_out.write("%s \t %s \t %s \n" % (
iter, auc_valid, cost_valid))
self.batch_size.set_value(self.training_batch_size)
if cost_valid <= best_ce:
if cost_valid < best_ce * improvement_threshold:
patience = max(patience, iter * patience_increase)
best_iter_ce = iter
best_ce = cost_valid
if auc_valid >= best_auc:
best_iter_auc = iter
best_auc = auc_valid
if patience <= iter:
done_looping = True
print 'best_iter_cost:', best_iter_ce, 'best_cost:', best_ce
print 'best_iter_auc:', best_iter_auc, 'best_auc:', best_auc
f_out.close()
return max(best_iter_ce, best_iter_auc)
def get_auc(self, data_xy):
x, y = data_xy[0], data_xy[1]
p_y_given_x = self.get_test_proba(x)
fpr, tpr, thresholds = roc_curve(y, p_y_given_x, pos_label=1)
roc_auc = auc(fpr, tpr)
return roc_auc
def get_test_proba(self, x_test):
self.batch_size.set_value(len(x_test))
p_y_given_x = self.test_model(x_test)
return p_y_given_x
def nesterov_momentum(self, grads, weights, learning_rate=0.001, momentum=0.9):
updates = []
for param_i, grad_i in zip(weights, grads):
mparam_i = theano.shared(np.zeros(param_i.get_value().shape, dtype=theano.config.floatX))
v = momentum * mparam_i - learning_rate * grad_i
w = param_i + momentum * v - learning_rate * grad_i
updates.append((mparam_i, v))
updates.append((param_i, w))
return updates
def adadelta_updates(self, grads, weights, learning_rate=0.01, rho=0.95, epsilon=1e-6):
accumulators = [theano.shared(np.zeros_like(param_i.get_value())) for param_i in weights]
delta_accumulators = [theano.shared(np.zeros_like(param_i.get_value())) for param_i in weights]
updates = []
for param_i, grad_i, acc_i, acc_delta_i in zip(weights, grads, accumulators, delta_accumulators):
acc_i_new = rho * acc_i + (1 - rho) * grad_i ** 2
updates.append((acc_i, acc_i_new))
update_i = grad_i * T.sqrt(acc_delta_i + epsilon) / T.sqrt(acc_i_new + epsilon)
updates.append((param_i, param_i - learning_rate * update_i))
acc_delta_i_new = rho * acc_delta_i + (1 - rho) * update_i ** 2
updates.append((acc_delta_i, acc_delta_i_new))
return updates
def get_state(self):
state = {}
state['params'] = self.param_dict
weights_vals = []
for p in self.weights:
weights_vals.append(p.get_value())
state['weights'] = weights_vals
return state
def set_weights(self, weights_vals):
for i, w in enumerate(weights_vals):
self.weights[i].set_value(w) | mit |
hitszxp/scikit-learn | sklearn/gaussian_process/tests/test_gaussian_process.py | 17 | 6093 | """
Testing for Gaussian Process module (sklearn.gaussian_process)
"""
# Author: Vincent Dubourg <[email protected]>
# Licence: BSD 3 clause
from nose.tools import raises
from nose.tools import assert_true
import numpy as np
from sklearn.gaussian_process import GaussianProcess
from sklearn.gaussian_process import regression_models as regression
from sklearn.gaussian_process import correlation_models as correlation
from sklearn.utils.testing import assert_greater
f = lambda x: x * np.sin(x)
X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T
X2 = np.atleast_2d([2., 4., 5.5, 6.5, 7.5]).T
y = f(X).ravel()
def test_1d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
"""
MLE estimation of a one-dimensional Gaussian Process model.
Check random start optimization.
Test the interpolating property.
"""
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=1e-2, thetaL=1e-4, thetaU=1e-1,
random_start=random_start, verbose=False).fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
y2_pred, MSE2 = gp.predict(X2, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.)
and np.allclose(MSE2, 0., atol=10))
def test_2d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
"""
MLE estimation of a two-dimensional Gaussian Process model accounting for
anisotropy. Check random start optimization.
Test the interpolating property.
"""
b, kappa, e = 5., .5, .1
g = lambda x: b - x[:, 1] - kappa * (x[:, 0] - e) ** 2.
X = np.array([[-4.61611719, -6.00099547],
[4.10469096, 5.32782448],
[0.00000000, -0.50000000],
[-6.17289014, -4.6984743],
[1.3109306, -6.93271427],
[-5.03823144, 3.10584743],
[-2.87600388, 6.74310541],
[5.21301203, 4.26386883]])
y = g(X).ravel()
thetaL = [1e-4] * 2
thetaU = [1e-1] * 2
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=[1e-2] * 2, thetaL=thetaL,
thetaU=thetaU,
random_start=random_start, verbose=False)
gp.fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.))
assert_true(np.all(gp.theta_ >= thetaL)) # Lower bounds of hyperparameters
assert_true(np.all(gp.theta_ <= thetaU)) # Upper bounds of hyperparameters
def test_2d_2d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
"""
MLE estimation of a two-dimensional Gaussian Process model accounting for
anisotropy. Check random start optimization.
Test the GP interpolation for 2D output
"""
b, kappa, e = 5., .5, .1
g = lambda x: b - x[:, 1] - kappa * (x[:, 0] - e) ** 2.
f = lambda x: np.vstack((g(x), g(x))).T
X = np.array([[-4.61611719, -6.00099547],
[4.10469096, 5.32782448],
[0.00000000, -0.50000000],
[-6.17289014, -4.6984743],
[1.3109306, -6.93271427],
[-5.03823144, 3.10584743],
[-2.87600388, 6.74310541],
[5.21301203, 4.26386883]])
y = f(X)
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=[1e-2] * 2, thetaL=[1e-4] * 2,
thetaU=[1e-1] * 2,
random_start=random_start, verbose=False)
gp.fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.))
@raises(ValueError)
def test_wrong_number_of_outputs():
gp = GaussianProcess()
gp.fit([[1, 2, 3], [4, 5, 6]], [1, 2, 3])
def test_more_builtin_correlation_models(random_start=1):
"""
Repeat test_1d and test_2d for several built-in correlation
models specified as strings.
"""
all_corr = ['absolute_exponential', 'squared_exponential', 'cubic',
'linear']
for corr in all_corr:
test_1d(regr='constant', corr=corr, random_start=random_start)
test_2d(regr='constant', corr=corr, random_start=random_start)
test_2d_2d(regr='constant', corr=corr, random_start=random_start)
def test_ordinary_kriging():
"""
Repeat test_1d and test_2d with given regression weights (beta0) for
different regression models (Ordinary Kriging).
"""
test_1d(regr='linear', beta0=[0., 0.5])
test_1d(regr='quadratic', beta0=[0., 0.5, 0.5])
test_2d(regr='linear', beta0=[0., 0.5, 0.5])
test_2d(regr='quadratic', beta0=[0., 0.5, 0.5, 0.5, 0.5, 0.5])
test_2d_2d(regr='linear', beta0=[0., 0.5, 0.5])
test_2d_2d(regr='quadratic', beta0=[0., 0.5, 0.5, 0.5, 0.5, 0.5])
def test_no_normalize():
gp = GaussianProcess(normalize=False).fit(X, y)
y_pred = gp.predict(X)
assert_true(np.allclose(y_pred, y))
def test_random_starts():
"""
Test that an increasing number of random-starts of GP fitting only
increases the reduced likelihood function of the optimal theta.
"""
n_samples, n_features = 50, 3
np.random.seed(0)
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features) * 2 - 1
y = np.sin(X).sum(axis=1) + np.sin(3 * X).sum(axis=1)
best_likelihood = -np.inf
for random_start in range(1, 5):
gp = GaussianProcess(regr="constant", corr="squared_exponential",
theta0=[1e-0] * n_features,
thetaL=[1e-4] * n_features,
thetaU=[1e+1] * n_features,
random_start=random_start, random_state=0,
verbose=False).fit(X, y)
rlf = gp.reduced_likelihood_function()[0]
assert_greater(rlf, best_likelihood - np.finfo(np.float32).eps)
best_likelihood = rlf
| bsd-3-clause |
Mctigger/KagglePlanetPytorch | datasets.py | 1 | 3591 | import os
import torch
from torch import np
from torch.utils.data.dataset import Dataset
from sklearn.preprocessing import MultiLabelBinarizer
import skimage.io
tags = [
'blooming',
'selective_logging',
'blow_down',
'conventional_mine',
'bare_ground',
'artisinal_mine',
'primary',
'agriculture',
'water',
'habitation',
'road',
'cultivation',
'slash_burn'
]
tags_weather = [
'cloudy',
'partly_cloudy',
'haze',
'clear'
]
mlb = MultiLabelBinarizer()
mlb = mlb.fit([tags, tags_weather])
class KaggleAmazonDataset(Dataset):
def __init__(self, dataframe, img_path, transform):
self.img_path = img_path
self.transform = transform
self.X_train = dataframe['image_name'].as_matrix()
self.y_train = mlb.transform(dataframe['tags'].str.split()).astype(np.float32)
def __len__(self):
return len(self.X_train)
class KaggleAmazonJPGDataset(KaggleAmazonDataset):
def __init__(self, dataframe, img_path, transform, divide=True):
super(KaggleAmazonJPGDataset, self).__init__(dataframe, img_path, transform)
self.divide = divide
def __getitem__(self, index):
img = skimage.io.imread(self.img_path + self.X_train[index] + '.jpg')
if self.divide:
img = img / 255
if self.transform:
img = self.transform(img)
label = torch.from_numpy(self.y_train[index])
return img, label
class KaggleAmazonTestDataset(Dataset):
def __init__(self, test_images, img_path, img_ext, transform, divide=True):
self.img_path = img_path
self.img_ext = img_ext
self.transform = transform
self.test_images = test_images
self.divide = divide
def __getitem__(self, index):
img = skimage.io.imread(self.img_path + self.test_images[index] + self.img_ext)
if self.divide:
img = img / 255
img = self.transform(img)
return img, self.test_images[index]
def __len__(self):
return len(self.test_images)
class KaggleAmazonUnsupervisedDataset(Dataset):
def __init__(self, paths, img_path, img_ext, transform_train, transform_val, y_train):
self.img_path = img_path
self.img_ext = img_ext
self.transform_train = transform_train
self.transform = transform_train
self.transform_val = transform_val
self.X_train = paths
self.y_train = y_train
def __getitem__(self, index):
img = skimage.io.imread(self.img_path + self.X_train[index] + self.img_ext)
if self.transform:
img = self.transform(img)
label = torch.from_numpy(self.y_train[index])
return img, label
def __len__(self):
return len(self.X_train)
class KaggleAmazonSemiSupervisedDataset(Dataset):
def __init__(self, supervised, unsupervised, transform, indices=True):
self.supervised = supervised
self.unsupervised = unsupervised
self.transform = transform
self.indices = indices
def __getitem__(self, index):
if index < len(self.supervised):
x, y = self.supervised[index]
i = 0
else:
x, y = self.unsupervised[index-len(self.supervised)]
i = 1
if self.transform:
x = self.transform(x)
if self.indices:
return x, y, i
else:
return x, y
def __len__(self):
return len(self.supervised) + len(self.unsupervised)
if __name__ == "__main__":
print(mlb.classes)
| mit |
anirudhjayaraman/scikit-learn | sklearn/feature_extraction/hashing.py | 183 | 6155 | # Author: Lars Buitinck <[email protected]>
# License: BSD 3 clause
import numbers
import numpy as np
import scipy.sparse as sp
from . import _hashing
from ..base import BaseEstimator, TransformerMixin
def _iteritems(d):
"""Like d.iteritems, but accepts any collections.Mapping."""
return d.iteritems() if hasattr(d, "iteritems") else d.items()
class FeatureHasher(BaseEstimator, TransformerMixin):
"""Implements feature hashing, aka the hashing trick.
This class turns sequences of symbolic feature names (strings) into
scipy.sparse matrices, using a hash function to compute the matrix column
corresponding to a name. The hash function employed is the signed 32-bit
version of Murmurhash3.
Feature names of type byte string are used as-is. Unicode strings are
converted to UTF-8 first, but no Unicode normalization is done.
Feature values must be (finite) numbers.
This class is a low-memory alternative to DictVectorizer and
CountVectorizer, intended for large-scale (online) learning and situations
where memory is tight, e.g. when running prediction code on embedded
devices.
Read more in the :ref:`User Guide <feature_hashing>`.
Parameters
----------
n_features : integer, optional
The number of features (columns) in the output matrices. Small numbers
of features are likely to cause hash collisions, but large numbers
will cause larger coefficient dimensions in linear learners.
dtype : numpy type, optional
The type of feature values. Passed to scipy.sparse matrix constructors
as the dtype argument. Do not set this to bool, np.boolean or any
unsigned integer type.
input_type : string, optional
Either "dict" (the default) to accept dictionaries over
(feature_name, value); "pair" to accept pairs of (feature_name, value);
or "string" to accept single strings.
feature_name should be a string, while value should be a number.
In the case of "string", a value of 1 is implied.
The feature_name is hashed to find the appropriate column for the
feature. The value's sign might be flipped in the output (but see
non_negative, below).
non_negative : boolean, optional, default np.float64
Whether output matrices should contain non-negative values only;
effectively calls abs on the matrix prior to returning it.
When True, output values can be interpreted as frequencies.
When False, output values will have expected value zero.
Examples
--------
>>> from sklearn.feature_extraction import FeatureHasher
>>> h = FeatureHasher(n_features=10)
>>> D = [{'dog': 1, 'cat':2, 'elephant':4},{'dog': 2, 'run': 5}]
>>> f = h.transform(D)
>>> f.toarray()
array([[ 0., 0., -4., -1., 0., 0., 0., 0., 0., 2.],
[ 0., 0., 0., -2., -5., 0., 0., 0., 0., 0.]])
See also
--------
DictVectorizer : vectorizes string-valued features using a hash table.
sklearn.preprocessing.OneHotEncoder : handles nominal/categorical features
encoded as columns of integers.
"""
def __init__(self, n_features=(2 ** 20), input_type="dict",
dtype=np.float64, non_negative=False):
self._validate_params(n_features, input_type)
self.dtype = dtype
self.input_type = input_type
self.n_features = n_features
self.non_negative = non_negative
@staticmethod
def _validate_params(n_features, input_type):
# strangely, np.int16 instances are not instances of Integral,
# while np.int64 instances are...
if not isinstance(n_features, (numbers.Integral, np.integer)):
raise TypeError("n_features must be integral, got %r (%s)."
% (n_features, type(n_features)))
elif n_features < 1 or n_features >= 2 ** 31:
raise ValueError("Invalid number of features (%d)." % n_features)
if input_type not in ("dict", "pair", "string"):
raise ValueError("input_type must be 'dict', 'pair' or 'string',"
" got %r." % input_type)
def fit(self, X=None, y=None):
"""No-op.
This method doesn't do anything. It exists purely for compatibility
with the scikit-learn transformer API.
Returns
-------
self : FeatureHasher
"""
# repeat input validation for grid search (which calls set_params)
self._validate_params(self.n_features, self.input_type)
return self
def transform(self, raw_X, y=None):
"""Transform a sequence of instances to a scipy.sparse matrix.
Parameters
----------
raw_X : iterable over iterable over raw features, length = n_samples
Samples. Each sample must be iterable an (e.g., a list or tuple)
containing/generating feature names (and optionally values, see
the input_type constructor argument) which will be hashed.
raw_X need not support the len function, so it can be the result
of a generator; n_samples is determined on the fly.
y : (ignored)
Returns
-------
X : scipy.sparse matrix, shape = (n_samples, self.n_features)
Feature matrix, for use with estimators or further transformers.
"""
raw_X = iter(raw_X)
if self.input_type == "dict":
raw_X = (_iteritems(d) for d in raw_X)
elif self.input_type == "string":
raw_X = (((f, 1) for f in x) for x in raw_X)
indices, indptr, values = \
_hashing.transform(raw_X, self.n_features, self.dtype)
n_samples = indptr.shape[0] - 1
if n_samples == 0:
raise ValueError("Cannot vectorize empty sequence.")
X = sp.csr_matrix((values, indices, indptr), dtype=self.dtype,
shape=(n_samples, self.n_features))
X.sum_duplicates() # also sorts the indices
if self.non_negative:
np.abs(X.data, X.data)
return X
| bsd-3-clause |
jreback/pandas | pandas/tests/frame/methods/test_equals.py | 1 | 2714 | import numpy as np
from pandas import DataFrame, date_range
import pandas._testing as tm
class TestEquals:
def test_dataframe_not_equal(self):
# see GH#28839
df1 = DataFrame({"a": [1, 2], "b": ["s", "d"]})
df2 = DataFrame({"a": ["s", "d"], "b": [1, 2]})
assert df1.equals(df2) is False
def test_equals_different_blocks(self):
# GH#9330
df0 = DataFrame({"A": ["x", "y"], "B": [1, 2], "C": ["w", "z"]})
df1 = df0.reset_index()[["A", "B", "C"]]
# this assert verifies that the above operations have
# induced a block rearrangement
assert df0._mgr.blocks[0].dtype != df1._mgr.blocks[0].dtype
# do the real tests
tm.assert_frame_equal(df0, df1)
assert df0.equals(df1)
assert df1.equals(df0)
def test_equals(self):
# Add object dtype column with nans
index = np.random.random(10)
df1 = DataFrame(np.random.random(10), index=index, columns=["floats"])
df1["text"] = "the sky is so blue. we could use more chocolate.".split()
df1["start"] = date_range("2000-1-1", periods=10, freq="T")
df1["end"] = date_range("2000-1-1", periods=10, freq="D")
df1["diff"] = df1["end"] - df1["start"]
df1["bool"] = np.arange(10) % 3 == 0
df1.loc[::2] = np.nan
df2 = df1.copy()
assert df1["text"].equals(df2["text"])
assert df1["start"].equals(df2["start"])
assert df1["end"].equals(df2["end"])
assert df1["diff"].equals(df2["diff"])
assert df1["bool"].equals(df2["bool"])
assert df1.equals(df2)
assert not df1.equals(object)
# different dtype
different = df1.copy()
different["floats"] = different["floats"].astype("float32")
assert not df1.equals(different)
# different index
different_index = -index
different = df2.set_index(different_index)
assert not df1.equals(different)
# different columns
different = df2.copy()
different.columns = df2.columns[::-1]
assert not df1.equals(different)
# DatetimeIndex
index = date_range("2000-1-1", periods=10, freq="T")
df1 = df1.set_index(index)
df2 = df1.copy()
assert df1.equals(df2)
# MultiIndex
df3 = df1.set_index(["text"], append=True)
df2 = df1.set_index(["text"], append=True)
assert df3.equals(df2)
df2 = df1.set_index(["floats"], append=True)
assert not df3.equals(df2)
# NaN in index
df3 = df1.set_index(["floats"], append=True)
df2 = df1.set_index(["floats"], append=True)
assert df3.equals(df2)
| bsd-3-clause |
myusuf3/vincent | examples/stacked_bar_examples.py | 11 | 2691 | # -*- coding: utf-8 -*-
"""
Vincent Stacked Bar Examples
"""
#Build a Stacked Bar Chart from scratch
import pandas as pd
from vincent import *
farm_1 = {'apples': 10, 'berries': 32, 'squash': 21, 'melons': 13, 'corn': 18}
farm_2 = {'apples': 15, 'berries': 40, 'squash': 17, 'melons': 10, 'corn': 22}
farm_3 = {'apples': 6, 'berries': 24, 'squash': 22, 'melons': 16, 'corn': 30}
farm_4 = {'apples': 12, 'berries': 30, 'squash': 15, 'melons': 9, 'corn': 15}
farm_5 = {'apples': 20, 'berries': 35, 'squash': 19, 'melons': 17, 'corn': 19}
farm_6 = {'apples': 3, 'berries': 28, 'squash': 21, 'melons': 11, 'corn': 23}
data = [farm_1, farm_2, farm_3, farm_4, farm_5, farm_6]
index = ['Farm 1', 'Farm 2', 'Farm 3', 'Farm 4', 'Farm 5', 'Farm 6']
df = pd.DataFrame(data, index=index)
vis = Visualization(width=500, height=300)
vis.padding = {'top': 10, 'left': 50, 'bottom': 50, 'right': 100}
data = Data.from_pandas(df)
vis.data['table'] = data
facets = Transform(type='facet', keys=['data.idx'])
stats = Transform(type='stats', value='data.val')
stat_dat = Data(name='stats', source='table', transform=[facets, stats])
vis.data['stats'] = stat_dat
vis.scales['x'] = Scale(name='x', type='ordinal', range='width',
domain=DataRef(data='table', field="data.idx"))
vis.scales['y'] = Scale(name='y', range='height', type='linear', nice=True,
domain=DataRef(data='stats', field="sum"))
vis.scales['color'] = Scale(name='color', type='ordinal',
domain=DataRef(data='table', field='data.col'),
range='category20')
vis.axes.extend([Axis(type='x', scale='x'),
Axis(type='y', scale='y')])
facet = Transform(type='facet', keys=['data.col'])
stack = Transform(type='stack', point='data.idx', height='data.val')
transform = MarkRef(data='table',transform=[facet, stack])
enter_props = PropertySet(x=ValueRef(scale='x', field="data.idx"),
y=ValueRef(scale='y', field="y"),
width=ValueRef(scale='x', band=True, offset=-1),
y2=ValueRef(field='y2', scale='y'),
fill=ValueRef(scale='color', field='data.col'))
mark = Mark(type='group', from_=transform,
marks=[Mark(type='rect',
properties=MarkProperties(enter=enter_props))])
vis.marks.append(mark)
vis.axis_titles(x='Farms', y='Total Produce')
vis.legend(title='Produce Type')
vis.to_json('vega.json')
#Convenience method
vis = StackedBar(df)
vis.axis_titles(x='Farms', y='Total Produce')
vis.legend(title='Produce Type')
vis.scales['x'].padding = 0.2
vis.colors(brew='Set2')
vis.to_json('vega.json')
| mit |
anntzer/scikit-learn | sklearn/model_selection/_search_successive_halving.py | 3 | 40300 | from math import ceil, floor, log
from abc import abstractmethod
from numbers import Integral
import numpy as np
from ._search import _check_param_grid
from ._search import BaseSearchCV
from . import ParameterGrid, ParameterSampler
from ..utils.validation import _num_samples
from ..base import is_classifier
from ._split import check_cv, _yields_constant_splits
from ..utils import resample
__all__ = ['HalvingGridSearchCV', 'HalvingRandomSearchCV']
class _SubsampleMetaSplitter:
"""Splitter that subsamples a given fraction of the dataset"""
def __init__(self, *, base_cv, fraction, subsample_test, random_state):
self.base_cv = base_cv
self.fraction = fraction
self.subsample_test = subsample_test
self.random_state = random_state
def split(self, X, y, groups=None):
for train_idx, test_idx in self.base_cv.split(X, y, groups):
train_idx = resample(
train_idx, replace=False, random_state=self.random_state,
n_samples=int(self.fraction * train_idx.shape[0])
)
if self.subsample_test:
test_idx = resample(
test_idx, replace=False, random_state=self.random_state,
n_samples=int(self.fraction * test_idx.shape[0])
)
yield train_idx, test_idx
def _refit_callable(results):
# Custom refit callable to return the index of the best candidate. We want
# the best candidate out of the last iteration. By default BaseSearchCV
# would return the best candidate out of all iterations.
last_iter = np.max(results['iter'])
last_iter_indices = np.flatnonzero(results['iter'] == last_iter)
best_idx = np.argmax(results['mean_test_score'][last_iter_indices])
return last_iter_indices[best_idx]
def _top_k(results, k, itr):
# Return the best candidates of a given iteration
iteration, mean_test_score, params = (
np.asarray(a) for a in (results['iter'],
results['mean_test_score'],
results['params'])
)
iter_indices = np.flatnonzero(iteration == itr)
sorted_indices = np.argsort(mean_test_score[iter_indices])
return np.array(params[iter_indices][sorted_indices[-k:]])
class BaseSuccessiveHalving(BaseSearchCV):
"""Implements successive halving.
Ref:
Almost optimal exploration in multi-armed bandits, ICML 13
Zohar Karnin, Tomer Koren, Oren Somekh
"""
def __init__(self, estimator, *, scoring=None,
n_jobs=None, refit=True, cv=5, verbose=0, random_state=None,
error_score=np.nan, return_train_score=True,
max_resources='auto', min_resources='exhaust',
resource='n_samples', factor=3, aggressive_elimination=False):
refit = _refit_callable if refit else False
super().__init__(estimator, scoring=scoring,
n_jobs=n_jobs, refit=refit, cv=cv,
verbose=verbose,
error_score=error_score,
return_train_score=return_train_score)
self.random_state = random_state
self.max_resources = max_resources
self.resource = resource
self.factor = factor
self.min_resources = min_resources
self.aggressive_elimination = aggressive_elimination
def _check_input_parameters(self, X, y, groups):
if self.scoring is not None and not (isinstance(self.scoring, str)
or callable(self.scoring)):
raise ValueError('scoring parameter must be a string, '
'a callable or None. Multimetric scoring is not '
'supported.')
# We need to enforce that successive calls to cv.split() yield the same
# splits: see https://github.com/scikit-learn/scikit-learn/issues/15149
if not _yields_constant_splits(self._checked_cv_orig):
raise ValueError(
"The cv parameter must yield consistent folds across "
"calls to split(). Set its random_state to an int, or set "
"shuffle=False."
)
if (self.resource != 'n_samples'
and self.resource not in self.estimator.get_params()):
raise ValueError(
f'Cannot use resource={self.resource} which is not supported '
f'by estimator {self.estimator.__class__.__name__}'
)
if (isinstance(self.max_resources, str) and
self.max_resources != 'auto'):
raise ValueError(
"max_resources must be either 'auto' or a positive integer"
)
if self.max_resources != 'auto' and (
not isinstance(self.max_resources, Integral) or
self.max_resources <= 0):
raise ValueError(
"max_resources must be either 'auto' or a positive integer"
)
if self.min_resources not in ('smallest', 'exhaust') and (
not isinstance(self.min_resources, Integral) or
self.min_resources <= 0):
raise ValueError(
"min_resources must be either 'smallest', 'exhaust', "
"or a positive integer "
"no greater than max_resources."
)
if isinstance(self, HalvingRandomSearchCV):
if self.min_resources == self.n_candidates == 'exhaust':
# for n_candidates=exhaust to work, we need to know what
# min_resources is. Similarly min_resources=exhaust needs to
# know the actual number of candidates.
raise ValueError(
"n_candidates and min_resources cannot be both set to "
"'exhaust'."
)
if self.n_candidates != 'exhaust' and (
not isinstance(self.n_candidates, Integral) or
self.n_candidates <= 0):
raise ValueError(
"n_candidates must be either 'exhaust' "
"or a positive integer"
)
self.min_resources_ = self.min_resources
if self.min_resources_ in ('smallest', 'exhaust'):
if self.resource == 'n_samples':
n_splits = self._checked_cv_orig.get_n_splits(X, y, groups)
# please see https://gph.is/1KjihQe for a justification
magic_factor = 2
self.min_resources_ = n_splits * magic_factor
if is_classifier(self.estimator):
n_classes = np.unique(y).shape[0]
self.min_resources_ *= n_classes
else:
self.min_resources_ = 1
# if 'exhaust', min_resources_ might be set to a higher value later
# in _run_search
self.max_resources_ = self.max_resources
if self.max_resources_ == 'auto':
if not self.resource == 'n_samples':
raise ValueError(
"max_resources can only be 'auto' if resource='n_samples'")
self.max_resources_ = _num_samples(X)
if self.min_resources_ > self.max_resources_:
raise ValueError(
f'min_resources_={self.min_resources_} is greater '
f'than max_resources_={self.max_resources_}.'
)
def fit(self, X, y=None, groups=None, **fit_params):
"""Run fit with all sets of parameters.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,) or (n_samples, n_output), optional
Target relative to X for classification or regression;
None for unsupervised learning.
groups : array-like of shape (n_samples,), default=None
Group labels for the samples used while splitting the dataset into
train/test set. Only used in conjunction with a "Group" :term:`cv`
instance (e.g., :class:`~sklearn.model_selection.GroupKFold`).
**fit_params : dict of string -> object
Parameters passed to the ``fit`` method of the estimator
"""
self._checked_cv_orig = check_cv(
self.cv, y, classifier=is_classifier(self.estimator))
self._check_input_parameters(
X=X,
y=y,
groups=groups,
)
self._n_samples_orig = _num_samples(X)
super().fit(X, y=y, groups=None, **fit_params)
# Set best_score_: BaseSearchCV does not set it, as refit is a callable
self.best_score_ = (
self.cv_results_['mean_test_score'][self.best_index_])
return self
def _run_search(self, evaluate_candidates):
candidate_params = self._generate_candidate_params()
if self.resource != 'n_samples' and any(
self.resource in candidate for candidate in candidate_params):
# Can only check this now since we need the candidates list
raise ValueError(
f"Cannot use parameter {self.resource} as the resource since "
"it is part of the searched parameters."
)
# n_required_iterations is the number of iterations needed so that the
# last iterations evaluates less than `factor` candidates.
n_required_iterations = 1 + floor(log(len(candidate_params),
self.factor))
if self.min_resources == 'exhaust':
# To exhaust the resources, we want to start with the biggest
# min_resources possible so that the last (required) iteration
# uses as many resources as possible
last_iteration = n_required_iterations - 1
self.min_resources_ = max(
self.min_resources_,
self.max_resources_ // self.factor**last_iteration
)
# n_possible_iterations is the number of iterations that we can
# actually do starting from min_resources and without exceeding
# max_resources. Depending on max_resources and the number of
# candidates, this may be higher or smaller than
# n_required_iterations.
n_possible_iterations = 1 + floor(log(
self.max_resources_ // self.min_resources_, self.factor))
if self.aggressive_elimination:
n_iterations = n_required_iterations
else:
n_iterations = min(n_possible_iterations, n_required_iterations)
if self.verbose:
print(f'n_iterations: {n_iterations}')
print(f'n_required_iterations: {n_required_iterations}')
print(f'n_possible_iterations: {n_possible_iterations}')
print(f'min_resources_: {self.min_resources_}')
print(f'max_resources_: {self.max_resources_}')
print(f'aggressive_elimination: {self.aggressive_elimination}')
print(f'factor: {self.factor}')
self.n_resources_ = []
self.n_candidates_ = []
for itr in range(n_iterations):
power = itr # default
if self.aggressive_elimination:
# this will set n_resources to the initial value (i.e. the
# value of n_resources at the first iteration) for as many
# iterations as needed (while candidates are being
# eliminated), and then go on as usual.
power = max(
0,
itr - n_required_iterations + n_possible_iterations
)
n_resources = int(self.factor**power * self.min_resources_)
# guard, probably not needed
n_resources = min(n_resources, self.max_resources_)
self.n_resources_.append(n_resources)
n_candidates = len(candidate_params)
self.n_candidates_.append(n_candidates)
if self.verbose:
print('-' * 10)
print(f'iter: {itr}')
print(f'n_candidates: {n_candidates}')
print(f'n_resources: {n_resources}')
if self.resource == 'n_samples':
# subsampling will be done in cv.split()
cv = _SubsampleMetaSplitter(
base_cv=self._checked_cv_orig,
fraction=n_resources / self._n_samples_orig,
subsample_test=True,
random_state=self.random_state
)
else:
# Need copy so that the n_resources of next iteration does
# not overwrite
candidate_params = [c.copy() for c in candidate_params]
for candidate in candidate_params:
candidate[self.resource] = n_resources
cv = self._checked_cv_orig
more_results = {'iter': [itr] * n_candidates,
'n_resources': [n_resources] * n_candidates}
results = evaluate_candidates(candidate_params, cv,
more_results=more_results)
n_candidates_to_keep = ceil(n_candidates / self.factor)
candidate_params = _top_k(results, n_candidates_to_keep, itr)
self.n_remaining_candidates_ = len(candidate_params)
self.n_required_iterations_ = n_required_iterations
self.n_possible_iterations_ = n_possible_iterations
self.n_iterations_ = n_iterations
@abstractmethod
def _generate_candidate_params(self):
pass
class HalvingGridSearchCV(BaseSuccessiveHalving):
"""Search over specified parameter values with successive halving.
The search strategy starts evaluating all the candidates with a small
amount of resources and iteratively selects the best candidates, using
more and more resources.
Read more in the :ref:`User guide <successive_halving_user_guide>`.
.. note::
This estimator is still **experimental** for now: the predictions
and the API might change without any deprecation cycle. To use it,
you need to explicitly import ``enable_halving_search_cv``::
>>> # explicitly require this experimental feature
>>> from sklearn.experimental import enable_halving_search_cv # noqa
>>> # now you can import normally from model_selection
>>> from sklearn.model_selection import HalvingGridSearchCV
Parameters
----------
estimator : estimator object.
This is assumed to implement the scikit-learn estimator interface.
Either estimator needs to provide a ``score`` function,
or ``scoring`` must be passed.
param_grid : dict or list of dictionaries
Dictionary with parameters names (string) as keys and lists of
parameter settings to try as values, or a list of such
dictionaries, in which case the grids spanned by each dictionary
in the list are explored. This enables searching over any sequence
of parameter settings.
factor : int or float, default=3
The 'halving' parameter, which determines the proportion of candidates
that are selected for each subsequent iteration. For example,
``factor=3`` means that only one third of the candidates are selected.
resource : ``'n_samples'`` or str, default='n_samples'
Defines the resource that increases with each iteration. By default,
the resource is the number of samples. It can also be set to any
parameter of the base estimator that accepts positive integer
values, e.g. 'n_iterations' or 'n_estimators' for a gradient
boosting estimator. In this case ``max_resources`` cannot be 'auto'
and must be set explicitly.
max_resources : int, default='auto'
The maximum amount of resource that any candidate is allowed to use
for a given iteration. By default, this is set to ``n_samples`` when
``resource='n_samples'`` (default), else an error is raised.
min_resources : {'exhaust', 'smallest'} or int, default='exhaust'
The minimum amount of resource that any candidate is allowed to use
for a given iteration. Equivalently, this defines the amount of
resources `r0` that are allocated for each candidate at the first
iteration.
- 'smallest' is a heuristic that sets `r0` to a small value:
- ``n_splits * 2`` when ``resource='n_samples'`` for a regression
problem
- ``n_classes * n_splits * 2`` when ``resource='n_samples'`` for a
classification problem
- ``1`` when ``resource != 'n_samples'``
- 'exhaust' will set `r0` such that the **last** iteration uses as
much resources as possible. Namely, the last iteration will use the
highest value smaller than ``max_resources`` that is a multiple of
both ``min_resources`` and ``factor``. In general, using 'exhaust'
leads to a more accurate estimator, but is slightly more time
consuming.
Note that the amount of resources used at each iteration is always a
multiple of ``min_resources``.
aggressive_elimination : bool, default=False
This is only relevant in cases where there isn't enough resources to
reduce the remaining candidates to at most `factor` after the last
iteration. If ``True``, then the search process will 'replay' the
first iteration for as long as needed until the number of candidates
is small enough. This is ``False`` by default, which means that the
last iteration may evaluate more than ``factor`` candidates. See
:ref:`aggressive_elimination` for more details.
cv : int, cross-validation generator or iterable, default=5
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- integer, to specify the number of folds in a `(Stratified)KFold`,
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
.. note::
Due to implementation details, the folds produced by `cv` must be
the same across multiple calls to `cv.split()`. For
built-in `scikit-learn` iterators, this can be achieved by
deactivating shuffling (`shuffle=False`), or by setting the
`cv`'s `random_state` parameter to an integer.
scoring : string, callable, or None, default=None
A single string (see :ref:`scoring_parameter`) or a callable
(see :ref:`scoring`) to evaluate the predictions on the test set.
If None, the estimator's score method is used.
refit : bool, default=True
If True, refit an estimator using the best found parameters on the
whole dataset.
The refitted estimator is made available at the ``best_estimator_``
attribute and permits using ``predict`` directly on this
``HalvingGridSearchCV`` instance.
error_score : 'raise' or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error. Default is ``np.nan``
return_train_score : bool, default=False
If ``False``, the ``cv_results_`` attribute will not include training
scores.
Computing training scores is used to get insights on how different
parameter settings impact the overfitting/underfitting trade-off.
However computing the scores on the training set can be computationally
expensive and is not strictly required to select the parameters that
yield the best generalization performance.
random_state : int, RandomState instance or None, default=None
Pseudo random number generator state used for subsampling the dataset
when `resources != 'n_samples'`. Ignored otherwise.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
n_jobs : int or None, default=None
Number of jobs to run in parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
verbose : int
Controls the verbosity: the higher, the more messages.
Attributes
----------
n_resources_ : list of int
The amount of resources used at each iteration.
n_candidates_ : list of int
The number of candidate parameters that were evaluated at each
iteration.
n_remaining_candidates_ : int
The number of candidate parameters that are left after the last
iteration. It corresponds to `ceil(n_candidates[-1] / factor)`
max_resources_ : int
The maximum number of resources that any candidate is allowed to use
for a given iteration. Note that since the number of resources used
at each iteration must be a multiple of ``min_resources_``, the
actual number of resources used at the last iteration may be smaller
than ``max_resources_``.
min_resources_ : int
The amount of resources that are allocated for each candidate at the
first iteration.
n_iterations_ : int
The actual number of iterations that were run. This is equal to
``n_required_iterations_`` if ``aggressive_elimination`` is ``True``.
Else, this is equal to ``min(n_possible_iterations_,
n_required_iterations_)``.
n_possible_iterations_ : int
The number of iterations that are possible starting with
``min_resources_`` resources and without exceeding
``max_resources_``.
n_required_iterations_ : int
The number of iterations that are required to end up with less than
``factor`` candidates at the last iteration, starting with
``min_resources_`` resources. This will be smaller than
``n_possible_iterations_`` when there isn't enough resources.
cv_results_ : dict of numpy (masked) ndarrays
A dict with keys as column headers and values as columns, that can be
imported into a pandas ``DataFrame``. It contains many informations for
analysing the results of a search.
Please refer to the :ref:`User guide<successive_halving_cv_results>`
for details.
best_estimator_ : estimator or dict
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if ``refit=False``.
best_score_ : float
Mean cross-validated score of the best_estimator.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
best_index_ : int
The index (of the ``cv_results_`` arrays) which corresponds to the best
candidate parameter setting.
The dict at ``search.cv_results_['params'][search.best_index_]`` gives
the parameter setting for the best model, that gives the highest
mean score (``search.best_score_``).
scorer_ : function or a dict
Scorer function used on the held out data to choose the best
parameters for the model.
n_splits_ : int
The number of cross-validation splits (folds/iterations).
refit_time_ : float
Seconds used for refitting the best model on the whole dataset.
This is present only if ``refit`` is not False.
See Also
--------
:class:`HalvingRandomSearchCV`:
Random search over a set of parameters using successive halving.
Notes
-----
The parameters selected are those that maximize the score of the held-out
data, according to the scoring parameter.
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.ensemble import RandomForestClassifier
>>> from sklearn.experimental import enable_halving_search_cv # noqa
>>> from sklearn.model_selection import HalvingGridSearchCV
...
>>> X, y = load_iris(return_X_y=True)
>>> clf = RandomForestClassifier(random_state=0)
...
>>> param_grid = {"max_depth": [3, None],
... "min_samples_split": [5, 10]}
>>> search = HalvingGridSearchCV(clf, param_grid, resource='n_estimators',
... max_resources=10,
... random_state=0).fit(X, y)
>>> search.best_params_ # doctest: +SKIP
{'max_depth': None, 'min_samples_split': 10, 'n_estimators': 9}
"""
_required_parameters = ["estimator", "param_grid"]
def __init__(self, estimator, param_grid, *,
factor=3, resource='n_samples', max_resources='auto',
min_resources='exhaust', aggressive_elimination=False,
cv=5, scoring=None, refit=True, error_score=np.nan,
return_train_score=True, random_state=None, n_jobs=None,
verbose=0):
super().__init__(estimator, scoring=scoring,
n_jobs=n_jobs, refit=refit, verbose=verbose, cv=cv,
random_state=random_state, error_score=error_score,
return_train_score=return_train_score,
max_resources=max_resources, resource=resource,
factor=factor, min_resources=min_resources,
aggressive_elimination=aggressive_elimination)
self.param_grid = param_grid
_check_param_grid(self.param_grid)
def _generate_candidate_params(self):
return ParameterGrid(self.param_grid)
class HalvingRandomSearchCV(BaseSuccessiveHalving):
"""Randomized search on hyper parameters.
The search strategy starts evaluating all the candidates with a small
amount of resources and iteratively selects the best candidates, using more
and more resources.
The candidates are sampled at random from the parameter space and the
number of sampled candidates is determined by ``n_candidates``.
Read more in the :ref:`User guide<successive_halving_user_guide>`.
.. note::
This estimator is still **experimental** for now: the predictions
and the API might change without any deprecation cycle. To use it,
you need to explicitly import ``enable_halving_search_cv``::
>>> # explicitly require this experimental feature
>>> from sklearn.experimental import enable_halving_search_cv # noqa
>>> # now you can import normally from model_selection
>>> from sklearn.model_selection import HalvingRandomSearchCV
Parameters
----------
estimator : estimator object.
This is assumed to implement the scikit-learn estimator interface.
Either estimator needs to provide a ``score`` function,
or ``scoring`` must be passed.
param_distributions : dict
Dictionary with parameters names (string) as keys and distributions
or lists of parameters to try. Distributions must provide a ``rvs``
method for sampling (such as those from scipy.stats.distributions).
If a list is given, it is sampled uniformly.
n_candidates : int, default='exhaust'
The number of candidate parameters to sample, at the first
iteration. Using 'exhaust' will sample enough candidates so that the
last iteration uses as many resources as possible, based on
`min_resources`, `max_resources` and `factor`. In this case,
`min_resources` cannot be 'exhaust'.
factor : int or float, default=3
The 'halving' parameter, which determines the proportion of candidates
that are selected for each subsequent iteration. For example,
``factor=3`` means that only one third of the candidates are selected.
resource : ``'n_samples'`` or str, default='n_samples'
Defines the resource that increases with each iteration. By default,
the resource is the number of samples. It can also be set to any
parameter of the base estimator that accepts positive integer
values, e.g. 'n_iterations' or 'n_estimators' for a gradient
boosting estimator. In this case ``max_resources`` cannot be 'auto'
and must be set explicitly.
max_resources : int, default='auto'
The maximum number of resources that any candidate is allowed to use
for a given iteration. By default, this is set ``n_samples`` when
``resource='n_samples'`` (default), else an error is raised.
min_resources : {'exhaust', 'smallest'} or int, default='smallest'
The minimum amount of resource that any candidate is allowed to use
for a given iteration. Equivalently, this defines the amount of
resources `r0` that are allocated for each candidate at the first
iteration.
- 'smallest' is a heuristic that sets `r0` to a small value:
- ``n_splits * 2`` when ``resource='n_samples'`` for a regression
problem
- ``n_classes * n_splits * 2`` when ``resource='n_samples'`` for a
classification problem
- ``1`` when ``resource != 'n_samples'``
- 'exhaust' will set `r0` such that the **last** iteration uses as
much resources as possible. Namely, the last iteration will use the
highest value smaller than ``max_resources`` that is a multiple of
both ``min_resources`` and ``factor``. In general, using 'exhaust'
leads to a more accurate estimator, but is slightly more time
consuming. 'exhaust' isn't available when `n_candidates='exhaust'`.
Note that the amount of resources used at each iteration is always a
multiple of ``min_resources``.
aggressive_elimination : bool, default=False
This is only relevant in cases where there isn't enough resources to
reduce the remaining candidates to at most `factor` after the last
iteration. If ``True``, then the search process will 'replay' the
first iteration for as long as needed until the number of candidates
is small enough. This is ``False`` by default, which means that the
last iteration may evaluate more than ``factor`` candidates. See
:ref:`aggressive_elimination` for more details.
cv : int, cross-validation generator or an iterable, default=5
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- integer, to specify the number of folds in a `(Stratified)KFold`,
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
.. note::
Due to implementation details, the folds produced by `cv` must be
the same across multiple calls to `cv.split()`. For
built-in `scikit-learn` iterators, this can be achieved by
deactivating shuffling (`shuffle=False`), or by setting the
`cv`'s `random_state` parameter to an integer.
scoring : string, callable, or None, default=None
A single string (see :ref:`scoring_parameter`) or a callable
(see :ref:`scoring`) to evaluate the predictions on the test set.
If None, the estimator's score method is used.
refit : bool, default=True
If True, refit an estimator using the best found parameters on the
whole dataset.
The refitted estimator is made available at the ``best_estimator_``
attribute and permits using ``predict`` directly on this
``HalvingRandomSearchCV`` instance.
error_score : 'raise' or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error. Default is ``np.nan``
return_train_score : bool, default=False
If ``False``, the ``cv_results_`` attribute will not include training
scores.
Computing training scores is used to get insights on how different
parameter settings impact the overfitting/underfitting trade-off.
However computing the scores on the training set can be computationally
expensive and is not strictly required to select the parameters that
yield the best generalization performance.
random_state : int, RandomState instance or None, default=None
Pseudo random number generator state used for subsampling the dataset
when `resources != 'n_samples'`. Also used for random uniform
sampling from lists of possible values instead of scipy.stats
distributions.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
n_jobs : int or None, default=None
Number of jobs to run in parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
verbose : int
Controls the verbosity: the higher, the more messages.
Attributes
----------
n_resources_ : list of int
The amount of resources used at each iteration.
n_candidates_ : list of int
The number of candidate parameters that were evaluated at each
iteration.
n_remaining_candidates_ : int
The number of candidate parameters that are left after the last
iteration. It corresponds to `ceil(n_candidates[-1] / factor)`
max_resources_ : int
The maximum number of resources that any candidate is allowed to use
for a given iteration. Note that since the number of resources used at
each iteration must be a multiple of ``min_resources_``, the actual
number of resources used at the last iteration may be smaller than
``max_resources_``.
min_resources_ : int
The amount of resources that are allocated for each candidate at the
first iteration.
n_iterations_ : int
The actual number of iterations that were run. This is equal to
``n_required_iterations_`` if ``aggressive_elimination`` is ``True``.
Else, this is equal to ``min(n_possible_iterations_,
n_required_iterations_)``.
n_possible_iterations_ : int
The number of iterations that are possible starting with
``min_resources_`` resources and without exceeding
``max_resources_``.
n_required_iterations_ : int
The number of iterations that are required to end up with less than
``factor`` candidates at the last iteration, starting with
``min_resources_`` resources. This will be smaller than
``n_possible_iterations_`` when there isn't enough resources.
cv_results_ : dict of numpy (masked) ndarrays
A dict with keys as column headers and values as columns, that can be
imported into a pandas ``DataFrame``. It contains many informations for
analysing the results of a search.
Please refer to the :ref:`User guide<successive_halving_cv_results>`
for details.
best_estimator_ : estimator or dict
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if ``refit=False``.
best_score_ : float
Mean cross-validated score of the best_estimator.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
best_index_ : int
The index (of the ``cv_results_`` arrays) which corresponds to the best
candidate parameter setting.
The dict at ``search.cv_results_['params'][search.best_index_]`` gives
the parameter setting for the best model, that gives the highest
mean score (``search.best_score_``).
scorer_ : function or a dict
Scorer function used on the held out data to choose the best
parameters for the model.
n_splits_ : int
The number of cross-validation splits (folds/iterations).
refit_time_ : float
Seconds used for refitting the best model on the whole dataset.
This is present only if ``refit`` is not False.
See Also
--------
:class:`HalvingGridSearchCV`:
Search over a grid of parameters using successive halving.
Notes
-----
The parameters selected are those that maximize the score of the held-out
data, according to the scoring parameter.
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.ensemble import RandomForestClassifier
>>> from sklearn.experimental import enable_halving_search_cv # noqa
>>> from sklearn.model_selection import HalvingRandomSearchCV
>>> from scipy.stats import randint
...
>>> X, y = load_iris(return_X_y=True)
>>> clf = RandomForestClassifier(random_state=0)
>>> np.random.seed(0)
...
>>> param_distributions = {"max_depth": [3, None],
... "min_samples_split": randint(2, 11)}
>>> search = HalvingRandomSearchCV(clf, param_distributions,
... resource='n_estimators',
... max_resources=10,
... random_state=0).fit(X, y)
>>> search.best_params_ # doctest: +SKIP
{'max_depth': None, 'min_samples_split': 10, 'n_estimators': 9}
"""
_required_parameters = ["estimator", "param_distributions"]
def __init__(self, estimator, param_distributions, *,
n_candidates='exhaust', factor=3, resource='n_samples',
max_resources='auto', min_resources='smallest',
aggressive_elimination=False, cv=5, scoring=None,
refit=True, error_score=np.nan, return_train_score=True,
random_state=None, n_jobs=None, verbose=0):
super().__init__(estimator, scoring=scoring,
n_jobs=n_jobs, refit=refit, verbose=verbose, cv=cv,
random_state=random_state, error_score=error_score,
return_train_score=return_train_score,
max_resources=max_resources, resource=resource,
factor=factor, min_resources=min_resources,
aggressive_elimination=aggressive_elimination)
self.param_distributions = param_distributions
self.n_candidates = n_candidates
def _generate_candidate_params(self):
n_candidates_first_iter = self.n_candidates
if n_candidates_first_iter == 'exhaust':
# This will generate enough candidate so that the last iteration
# uses as much resources as possible
n_candidates_first_iter = (
self.max_resources_ // self.min_resources_)
return ParameterSampler(self.param_distributions,
n_candidates_first_iter,
random_state=self.random_state)
| bsd-3-clause |
dsandeephegde/servo | tests/heartbeats/process_logs.py | 139 | 16143 | #!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import argparse
import matplotlib.pyplot as plt
import numpy as np
import os
from os import path
import sys
import warnings
HB_LOG_IDX_START_TIME = 7
HB_LOG_IDX_END_TIME = HB_LOG_IDX_START_TIME + 1
HB_LOG_IDX_START_ENERGY = 14
HB_LOG_IDX_END_ENERGY = HB_LOG_IDX_START_ENERGY + 1
ENERGY_PROFILER_NAME = 'ApplicationHeartbeat'
SUMMARY_OUTPUT = "summary.txt"
SUMMARY_TIME_IDX = 8
SUMMARY_ENERGY_IDX = SUMMARY_TIME_IDX + 1
SUMMARY_POWER_IDX = SUMMARY_ENERGY_IDX + 1
def autolabel(rects, ax):
"""Attach some text labels.
"""
for rect in rects:
ax.text(rect.get_x() + rect.get_width() / 2., 1.05 * rect.get_height(), '', ha='center', va='bottom')
def plot_raw_totals(config, plot_data, max_time, max_time_std, max_energy, max_energy_std, output_dir, normalize):
"""Plot the raw totals for a configuration.
Keyword arguments:
config -- configuration name
plot_data -- (profiler name, total_time, total_time_std, total_energy, total_energy_std)
max_time, max_time_std, max_energy, max_energy_std -- single values
normalize -- True/False
"""
plot_data = sorted(plot_data)
keys = [p for (p, tt, tts, te, tes) in plot_data]
total_times = [tt for (p, tt, tts, te, tes) in plot_data]
total_times_std = [tts for (p, tt, tts, te, tes) in plot_data]
total_energies = [te for (p, tt, tts, te, tes) in plot_data]
total_energies_std = [tes for (p, tt, tts, te, tes) in plot_data]
fig, ax1 = plt.subplots()
ind = np.arange(len(keys)) # the x locations for the groups
width = 0.35 # the width of the bars
# add some text for labels, title and axes ticks
ax1.set_title('Time/Energy Data for Configuration ' + config)
ax1.set_xticks(ind + width)
ax1.set_xticklabels(keys, rotation=45)
fig.set_tight_layout(True)
fig.set_size_inches(len(plot_data) / 1.5, 8)
ax2 = ax1.twinx()
# Normalize
if normalize:
total_times_std /= np.sum(total_times)
total_times /= np.sum(total_times)
total_energies_std /= np.sum(total_energies)
total_energies /= np.sum(total_energies)
ax1.set_ylabel('Time (Normalized)')
ax2.set_ylabel('Energy (Normalized)')
else:
# set time in us instead of ns
total_times_std /= np.array(1000000.0)
total_times /= np.array(1000000.0)
total_energies_std /= np.array(1000000.0)
total_energies /= np.array(1000000.0)
ax1.set_ylabel('Time (ms)')
ax2.set_ylabel('Energy (Joules)')
rects1 = ax1.bar(ind, total_times, width, color='r', yerr=total_times_std)
rects2 = ax2.bar(ind + width, total_energies, width, color='y', yerr=total_energies_std)
ax1.legend([rects1[0], rects2[0]], ['Time', 'Energy'])
# set axis
x1, x2, y1, y2 = plt.axis()
if normalize:
ax1.set_ylim(ymin=0, ymax=1)
ax2.set_ylim(ymin=0, ymax=1)
else:
ax1.set_ylim(ymin=0, ymax=((max_time + max_time_std) * 1.25 / 1000000.0))
ax2.set_ylim(ymin=0, ymax=((max_energy + max_energy_std) * 1.25 / 1000000.0))
autolabel(rects1, ax1)
autolabel(rects2, ax2)
# plt.show()
plt.savefig(path.join(output_dir, config + ".png"))
plt.close(fig)
def create_raw_total_data(config_data):
"""Get the raw data to plot for a configuration
Return: [(profiler, time_mean, time_stddev, energy_mean, energy_stddev)]
Keyword arguments:
config_data -- (trial, trial_data)
"""
# We can't assume that the same number of heartbeats are always issued across trials
# key: profiler name; value: list of timing sums for each trial
profiler_total_times = {}
# key: profiler name; value: list of energy sums for each trial
profiler_total_energies = {}
for (t, td) in config_data:
for (profiler, ts, te, es, ee) in td:
# sum the total times and energies for each profiler in this trial
total_time = np.sum(te - ts)
total_energy = np.sum(ee - es)
# add to list to be averaged later
time_list = profiler_total_times.get(profiler, [])
time_list.append(total_time)
profiler_total_times[profiler] = time_list
energy_list = profiler_total_energies.get(profiler, [])
energy_list.append(total_energy)
profiler_total_energies[profiler] = energy_list
# Get mean and stddev for time and energy totals
return [(profiler,
np.mean(profiler_total_times[profiler]),
np.std(profiler_total_times[profiler]),
np.mean(profiler_total_energies[profiler]),
np.std(profiler_total_energies[profiler]))
for profiler in profiler_total_times.keys()]
def plot_all_raw_totals(config_list, output_dir):
"""Plot column charts of the raw total time/energy spent in each profiler category.
Keyword arguments:
config_list -- [(config, result of process_config_dir(...))]
output_dir -- where to write plots to
"""
raw_total_norm_out_dir = path.join(output_dir, 'raw_totals_normalized')
os.makedirs(raw_total_norm_out_dir)
raw_total_out_dir = path.join(output_dir, 'raw_totals')
os.makedirs(raw_total_out_dir)
# (name, (profiler, (time_mean, time_stddev, energy_mean, energy_stddev)))
raw_totals_data = [(config, create_raw_total_data(config_data)) for (config, config_data) in config_list]
mean_times = []
mean_times_std = []
mean_energies = []
mean_energies_std = []
for profiler_tup in [config_tup[1] for config_tup in raw_totals_data]:
for (p, tt, tts, te, tes) in profiler_tup:
mean_times.append(tt)
mean_times_std.append(tts)
mean_energies.append(te)
mean_energies_std.append(tes)
# get consistent max time/energy values across plots
max_t = np.max(mean_times)
max_t_std = np.max(mean_times_std)
max_e = np.max(mean_energies)
max_e_std = np.max(mean_energies_std)
[plot_raw_totals(data[0], data[1], max_t, max_t_std, max_e, max_e_std, raw_total_norm_out_dir, True)
for data in raw_totals_data]
[plot_raw_totals(data[0], data[1], max_t, max_t_std, max_e, max_e_std, raw_total_out_dir, False)
for data in raw_totals_data]
def plot_trial_time_series(config, trial, trial_data, max_end_time, max_power, output_dir):
"""Plot time series for a single trial.
Keyword arguments:
config -- the config name
trial -- the trial name
trial_data -- [(profiler, [start times], [end times], [start energies], [end energies])]
max_end_time -- single value to use as max X axis value (for consistency across trials)
output_dir -- the output directory
"""
# TODO: Some profilers may have parallel tasks - need to identify this on plots
max_end_time = max_end_time / 1000000.0
trial_data = sorted(trial_data)
fig, ax1 = plt.subplots()
keys = [p for (p, ts, te, es, ee) in trial_data]
# add some text for labels, title and axes ticks
ax1.set_title('Profiler Activity for ' + config + ', ' + trial)
ax1.set_xlabel('Time (ms)')
ax1.grid(True)
width = 8 # the width of the bars
ax1.set_yticks(10 * np.arange(1, len(keys) + 2))
ax1.set_yticklabels(keys)
ax1.set_ylim(ymin=0, ymax=((len(trial_data) + 1) * 10))
ax1.set_xlim(xmin=0, xmax=max_end_time)
fig.set_tight_layout(True)
fig.set_size_inches(16, len(trial_data) / 3)
i = 10
for (p, ts, te, es, ee) in trial_data:
xranges = [(ts[j] / 1000000.0, (te[j] - ts[j]) / 1000000.0) for j in xrange(len(ts))]
ax1.broken_barh(xranges, (i - 0.5 * width, width))
i += 10
# place a vbar at the final time for this trial
last_profiler_times = map(np.nanmax, filter(lambda x: len(x) > 0, [te for (p, ts, te, es, ee) in trial_data]))
plt.axvline(np.max(last_profiler_times) / 1000000.0, color='black')
power_times = []
power_values = []
for (p, ts, te, es, ee) in trial_data:
if p == ENERGY_PROFILER_NAME:
power_times = te / 1000000.0
power_values = (ee - es) / ((te - ts) / 1000.0)
ax2 = ax1.twinx()
ax2.set_xlim(xmin=0, xmax=max_end_time)
ax2.set_ylim(ymin=0, ymax=max_power)
ax2.set_ylabel('Power (Watts)')
ax2.plot(power_times, power_values, color='r')
# plt.show()
plt.savefig(path.join(output_dir, "ts_" + config + "_" + trial + ".png"))
plt.close(fig)
def hb_energy_times_to_power(es, ee, ts, te):
"""Compute power from start and end energy and times.
Return: power values
"""
return (ee - es) / ((te - ts) / 1000.0)
def plot_all_time_series(config_list, output_dir):
"""Plot column charts of the raw total time/energy spent in each profiler category.
Keyword arguments:
config_list -- [(config, result of process_config_dir(...))]
output_dir -- where to write plots to
"""
time_series_out_dir = path.join(output_dir, 'time_series')
os.makedirs(time_series_out_dir)
max_end_times = []
max_power_values = []
for (c, cd) in config_list:
for (t, td) in cd:
trial_max_end_times = map(np.nanmax, filter(lambda x: len(x) > 0, [te for (p, ts, te, es, ee) in td]))
max_end_times.append(np.nanmax(trial_max_end_times))
for (p, ts, te, es, ee) in td:
# We only care about the energy profiler (others aren't reliable for instant power anyway)
if p == ENERGY_PROFILER_NAME and len(te) > 0:
max_power_values.append(np.nanmax(hb_energy_times_to_power(es, ee, ts, te)))
max_time = np.nanmax(max_end_times)
max_power = np.nanmax(np.array(max_power_values)) * 1.2 # leave a little space at the top
for (config, config_data) in config_list:
[plot_trial_time_series(config, trial, trial_data, max_time, max_power, time_series_out_dir)
for (trial, trial_data) in config_data]
def read_heartbeat_log(profiler_hb_log):
"""Read a heartbeat log file.
Return: (profiler name, [start times], [end times], [start energies], [end energies], [instant powers])
Keyword arguments:
profiler_hb_log -- the file to read
"""
with warnings.catch_warnings():
try:
warnings.simplefilter("ignore")
time_start, time_end, energy_start, energy_end = \
np.loadtxt(profiler_hb_log,
dtype=np.dtype('uint64'),
skiprows=1,
usecols=(HB_LOG_IDX_START_TIME,
HB_LOG_IDX_END_TIME,
HB_LOG_IDX_START_ENERGY,
HB_LOG_IDX_END_ENERGY),
unpack=True,
ndmin=1)
except ValueError:
time_start, time_end, energy_start, energy_end = [], [], [], []
name = path.split(profiler_hb_log)[1].split('-')[1].split('.')[0]
return (name,
np.atleast_1d(time_start),
np.atleast_1d(time_end),
np.atleast_1d(energy_start),
np.atleast_1d(energy_end))
def process_trial_dir(trial_dir):
"""Process trial directory.
Return: [(profiler name, [start times], [end times], [start energies], [end energies])]
Time and energy are normalized to 0 start values.
Keyword arguments:
trial_dir -- the directory for this trial
"""
log_data = map(lambda h: read_heartbeat_log(path.join(trial_dir, h)),
filter(lambda f: f.endswith(".log"), os.listdir(trial_dir)))
# Find the earliest timestamps and energy readings
min_t = np.nanmin(map(np.nanmin, filter(lambda x: len(x) > 0, [ts for (profiler, ts, te, es, ee) in log_data])))
min_e = np.nanmin(map(np.nanmin, filter(lambda x: len(x) > 0, [es for (profiler, ts, te, es, ee) in log_data])))
# Normalize timing/energy data to start values of 0
return [(profiler, ts - min_t, te - min_t, es - min_e, ee - min_e) for (profiler, ts, te, es, ee) in log_data]
def process_config_dir(config_dir):
"""Process a configuration directory.
Return: [(trial, [(profiler name, [start times], [end times], [start energies], [end energies])])]
Keyword arguments:
config_dir -- the directory for this configuration - contains subdirectories for each trial
"""
return [(trial_dir, process_trial_dir(path.join(config_dir, trial_dir))) for trial_dir in os.listdir(config_dir)]
def process_logs(log_dir):
"""Process log directory.
Return: [(config, [(trial, [(profiler name, [start times], [end times], [start energies], [end energies])])])]
Keyword arguments:
log_dir -- the log directory to process - contains subdirectories for each configuration
"""
return [((config_dir.split('_')[1], process_config_dir(path.join(log_dir, config_dir))))
for config_dir in os.listdir(log_dir)]
def find_best_executions(log_dir):
"""Get the best time, energy, and power from the characterization summaries.
Return: ((config, trial, min_time), (config, trial, min_energy), (config, trial, min_power))
Keyword arguments:
results -- the results from process_logs(...).
"""
DEFAULT = ('', '', 1000000000.0)
min_time = DEFAULT
min_energy = DEFAULT
min_power = DEFAULT
for config_dir in os.listdir(log_dir):
for trial_dir in os.listdir(path.join(log_dir, config_dir)):
with open(path.join(log_dir, config_dir, trial_dir, SUMMARY_OUTPUT), "r") as s:
lines = s.readlines()
time = float(lines[SUMMARY_TIME_IDX].split(':')[1])
energy = int(lines[SUMMARY_ENERGY_IDX].split(':')[1])
power = float(lines[SUMMARY_POWER_IDX].split(':')[1])
if time < min_time[2]:
min_time = (config_dir, trial_dir, time)
if energy < min_energy[2]:
min_energy = (config_dir, trial_dir, energy)
if power < min_power:
min_power = (config_dir, trial_dir, power)
return (min_time, min_energy, min_power)
def main():
"""This script processes the log files from the "characterize.py" script and produces visualizations.
"""
# Default log directory
directory = 'heartbeat_logs'
# Default output directory
output_dir = 'plots'
# Default android
android = False
# Parsing the input of the script
parser = argparse.ArgumentParser(description="Process Heartbeat log files from characterization")
parser.add_argument("-d", "--directory",
default=directory,
help="Heartbeat log directory \"-d heartbeat_logs\"")
parser.add_argument("-o", "--output",
default=output_dir,
help="Specify the log output directory, for example \"-o plots\"")
parser.add_argument("--android",
action="store_true",
dest="android",
default=False,
help="Specify if processing results from Android")
args = parser.parse_args()
if args.directory:
directory = args.directory
if args.output:
output_dir = args.output
if args.android:
android = args.android
if not os.path.exists(directory):
print "Input directory does not exist: " + directory
sys.exit(1)
if os.path.exists(output_dir):
print "Output directory already exists: " + output_dir
sys.exit(1)
res = process_logs(directory)
if not android:
best = find_best_executions(directory)
print 'Best time:', best[0]
print 'Best energy:', best[1]
print 'Best power:', best[2]
os.makedirs(output_dir)
plot_all_raw_totals(res, output_dir)
plot_all_time_series(res, output_dir)
if __name__ == "__main__":
main()
| mpl-2.0 |
mit-crpg/openmc | tests/regression_tests/surface_tally/test.py | 9 | 7315 | import numpy as np
import openmc
import pandas as pd
from tests.testing_harness import PyAPITestHarness
class SurfaceTallyTestHarness(PyAPITestHarness):
def _build_inputs(self):
# Instantiate some Materials and register the appropriate Nuclides
uo2 = openmc.Material(name='UO2 fuel at 2.4% wt enrichment')
uo2.set_density('g/cc', 10.0)
uo2.add_nuclide('U238', 1.0)
uo2.add_nuclide('U235', 0.02)
uo2.add_nuclide('O16', 2.0)
borated_water = openmc.Material(name='Borated water')
borated_water.set_density('g/cm3', 1)
borated_water.add_nuclide('B10', 10e-5)
borated_water.add_nuclide('H1', 2.0)
borated_water.add_nuclide('O16', 1.0)
# Instantiate a Materials collection and export to XML
materials_file = openmc.Materials([uo2, borated_water])
materials_file.export_to_xml()
# Instantiate ZCylinder surfaces
fuel_or = openmc.ZCylinder(surface_id=1, x0=0, y0=0, r=1,
name='Fuel OR')
left = openmc.XPlane(surface_id=2, x0=-2, name='left')
right = openmc.XPlane(surface_id=3, x0=2, name='right')
bottom = openmc.YPlane(y0=-2, name='bottom')
top = openmc.YPlane(y0=2, name='top')
left.boundary_type = 'vacuum'
right.boundary_type = 'reflective'
top.boundary_type = 'reflective'
bottom.boundary_type = 'reflective'
# Instantiate Cells
fuel = openmc.Cell(name='fuel')
water = openmc.Cell(name='water')
# Use surface half-spaces to define regions
fuel.region = -fuel_or
water.region = +fuel_or & -right & +bottom & -top
# Register Materials with Cells
fuel.fill = uo2
water.fill = borated_water
# Instantiate pin cell Universe
pin_cell = openmc.Universe(name='pin cell')
pin_cell.add_cells([fuel, water])
# Instantiate root Cell and Universe
root_cell = openmc.Cell(name='root cell')
root_cell.region = +left & -right & +bottom & -top
root_cell.fill = pin_cell
root_univ = openmc.Universe(universe_id=0, name='root universe')
root_univ.add_cell(root_cell)
# Instantiate a Geometry, register the root Universe
geometry = openmc.Geometry(root_univ)
geometry.export_to_xml()
# Instantiate a Settings object, set all runtime parameters
settings_file = openmc.Settings()
settings_file.batches = 10
settings_file.inactive = 0
settings_file.particles = 1000
#settings_file.output = {'tallies': True}
# Create an initial uniform spatial source distribution
bounds = [-0.62992, -0.62992, -1, 0.62992, 0.62992, 1]
uniform_dist = openmc.stats.Box(bounds[:3], bounds[3:],\
only_fissionable=True)
settings_file.source = openmc.source.Source(space=uniform_dist)
settings_file.export_to_xml()
# Tallies file
tallies_file = openmc.Tallies()
# Create partial current tallies from fuel to water
# Filters
two_groups = [0., 4e6, 20e6]
energy_filter = openmc.EnergyFilter(two_groups)
polar_filter = openmc.PolarFilter([0, np.pi / 4, np.pi])
azimuthal_filter = openmc.AzimuthalFilter([0, np.pi / 4, np.pi])
surface_filter = openmc.SurfaceFilter([1])
cell_from_filter = openmc.CellFromFilter(fuel)
cell_filter = openmc.CellFilter(water)
# Use Cell to cell filters for partial current
cell_to_cell_tally = openmc.Tally(name=str('fuel_to_water_1'))
cell_to_cell_tally.filters = [cell_from_filter, cell_filter, \
energy_filter, polar_filter, azimuthal_filter]
cell_to_cell_tally.scores = ['current']
tallies_file.append(cell_to_cell_tally)
# Use a Cell from + surface filters for partial current
cell_to_cell_tally = openmc.Tally(name=str('fuel_to_water_2'))
cell_to_cell_tally.filters = [cell_from_filter, surface_filter, \
energy_filter, polar_filter, azimuthal_filter]
cell_to_cell_tally.scores = ['current']
tallies_file.append(cell_to_cell_tally)
# Create partial current tallies from water to fuel
# Filters
cell_from_filter = openmc.CellFromFilter(water)
cell_filter = openmc.CellFilter(fuel)
# Cell to cell filters for partial current
cell_to_cell_tally = openmc.Tally(name=str('water_to_fuel_1'))
cell_to_cell_tally.filters = [cell_from_filter, cell_filter, \
energy_filter, polar_filter, azimuthal_filter]
cell_to_cell_tally.scores = ['current']
tallies_file.append(cell_to_cell_tally)
# Cell from + surface filters for partial current
cell_to_cell_tally = openmc.Tally(name=str('water_to_fuel_2'))
cell_to_cell_tally.filters = [cell_from_filter, surface_filter, \
energy_filter, polar_filter, azimuthal_filter]
cell_to_cell_tally.scores = ['current']
tallies_file.append(cell_to_cell_tally)
# Create a net current tally on inner surface using a surface filter
surface_filter = openmc.SurfaceFilter([1])
surf_tally1 = openmc.Tally(name='net_cylinder')
surf_tally1.filters = [surface_filter, energy_filter, polar_filter, \
azimuthal_filter]
surf_tally1.scores = ['current']
tallies_file.append(surf_tally1)
# Create a net current tally on left surface using a surface filter
# This surface has a vacuum boundary condition, so leakage is tallied
surface_filter = openmc.SurfaceFilter([2])
surf_tally2 = openmc.Tally(name='leakage_left')
surf_tally2.filters = [surface_filter, energy_filter, polar_filter, \
azimuthal_filter]
surf_tally2.scores = ['current']
tallies_file.append(surf_tally2)
# Create a net current tally on right surface using a surface filter
# This surface has a reflective boundary condition, so the net current
# should be zero.
surface_filter = openmc.SurfaceFilter([3])
surf_tally3 = openmc.Tally(name='net_right')
surf_tally3.filters = [surface_filter, energy_filter]
surf_tally3.scores = ['current']
tallies_file.append(surf_tally3)
surface_filter = openmc.SurfaceFilter([3])
surf_tally3 = openmc.Tally(name='net_right')
surf_tally3.filters = [surface_filter, energy_filter]
surf_tally3.scores = ['current']
tallies_file.append(surf_tally3)
tallies_file.export_to_xml()
def _get_results(self):
"""Digest info in the statepoint and return as a string."""
# Read the statepoint file.
sp = openmc.StatePoint(self._sp_name)
# Extract the tally data as a Pandas DataFrame.
df = pd.DataFrame()
for t in sp.tallies.values():
df = df.append(t.get_pandas_dataframe(), ignore_index=True)
# Extract the relevant data as a CSV string.
cols = ('mean', 'std. dev.')
return df.to_csv(None, columns=cols, index=False, float_format='%.7e')
return outstr
def test_surface_tally():
harness = SurfaceTallyTestHarness('statepoint.10.h5')
harness.main()
| mit |
datapythonista/pandas | pandas/tests/indexes/multi/test_reindex.py | 1 | 4216 | import numpy as np
import pytest
import pandas as pd
from pandas import (
Index,
MultiIndex,
)
import pandas._testing as tm
def test_reindex(idx):
result, indexer = idx.reindex(list(idx[:4]))
assert isinstance(result, MultiIndex)
assert result.names == ["first", "second"]
assert [level.name for level in result.levels] == ["first", "second"]
result, indexer = idx.reindex(list(idx))
assert isinstance(result, MultiIndex)
assert indexer is None
assert result.names == ["first", "second"]
assert [level.name for level in result.levels] == ["first", "second"]
def test_reindex_level(idx):
index = Index(["one"])
target, indexer = idx.reindex(index, level="second")
target2, indexer2 = index.reindex(idx, level="second")
exp_index = idx.join(index, level="second", how="right")
exp_index2 = idx.join(index, level="second", how="left")
assert target.equals(exp_index)
exp_indexer = np.array([0, 2, 4])
tm.assert_numpy_array_equal(indexer, exp_indexer, check_dtype=False)
assert target2.equals(exp_index2)
exp_indexer2 = np.array([0, -1, 0, -1, 0, -1])
tm.assert_numpy_array_equal(indexer2, exp_indexer2, check_dtype=False)
with pytest.raises(TypeError, match="Fill method not supported"):
idx.reindex(idx, method="pad", level="second")
with pytest.raises(TypeError, match="Fill method not supported"):
index.reindex(index, method="bfill", level="first")
def test_reindex_preserves_names_when_target_is_list_or_ndarray(idx):
# GH6552
idx = idx.copy()
target = idx.copy()
idx.names = target.names = [None, None]
other_dtype = MultiIndex.from_product([[1, 2], [3, 4]])
# list & ndarray cases
assert idx.reindex([])[0].names == [None, None]
assert idx.reindex(np.array([]))[0].names == [None, None]
assert idx.reindex(target.tolist())[0].names == [None, None]
assert idx.reindex(target.values)[0].names == [None, None]
assert idx.reindex(other_dtype.tolist())[0].names == [None, None]
assert idx.reindex(other_dtype.values)[0].names == [None, None]
idx.names = ["foo", "bar"]
assert idx.reindex([])[0].names == ["foo", "bar"]
assert idx.reindex(np.array([]))[0].names == ["foo", "bar"]
assert idx.reindex(target.tolist())[0].names == ["foo", "bar"]
assert idx.reindex(target.values)[0].names == ["foo", "bar"]
assert idx.reindex(other_dtype.tolist())[0].names == ["foo", "bar"]
assert idx.reindex(other_dtype.values)[0].names == ["foo", "bar"]
def test_reindex_lvl_preserves_names_when_target_is_list_or_array():
# GH7774
idx = MultiIndex.from_product([[0, 1], ["a", "b"]], names=["foo", "bar"])
assert idx.reindex([], level=0)[0].names == ["foo", "bar"]
assert idx.reindex([], level=1)[0].names == ["foo", "bar"]
def test_reindex_lvl_preserves_type_if_target_is_empty_list_or_array():
# GH7774
idx = MultiIndex.from_product([[0, 1], ["a", "b"]])
assert idx.reindex([], level=0)[0].levels[0].dtype.type == np.int64
assert idx.reindex([], level=1)[0].levels[1].dtype.type == np.object_
def test_reindex_base(idx):
idx = idx
expected = np.arange(idx.size, dtype=np.intp)
actual = idx.get_indexer(idx)
tm.assert_numpy_array_equal(expected, actual)
with pytest.raises(ValueError, match="Invalid fill method"):
idx.get_indexer(idx, method="invalid")
def test_reindex_non_unique():
idx = MultiIndex.from_tuples([(0, 0), (1, 1), (1, 1), (2, 2)])
a = pd.Series(np.arange(4), index=idx)
new_idx = MultiIndex.from_tuples([(0, 0), (1, 1), (2, 2)])
msg = "cannot handle a non-unique multi-index!"
with pytest.raises(ValueError, match=msg):
a.reindex(new_idx)
@pytest.mark.parametrize("values", [[["a"], ["x"]], [[], []]])
def test_reindex_empty_with_level(values):
# GH41170
idx = MultiIndex.from_arrays(values)
result, result_indexer = idx.reindex(np.array(["b"]), level=0)
expected = MultiIndex(levels=[["b"], values[1]], codes=[[], []])
expected_indexer = np.array([], dtype=result_indexer.dtype)
tm.assert_index_equal(result, expected)
tm.assert_numpy_array_equal(result_indexer, expected_indexer)
| bsd-3-clause |
fbagirov/scikit-learn | examples/linear_model/plot_ols_ridge_variance.py | 387 | 2060 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Ordinary Least Squares and Ridge Regression Variance
=========================================================
Due to the few points in each dimension and the straight
line that linear regression uses to follow these points
as well as it can, noise on the observations will cause
great variance as shown in the first plot. Every line's slope
can vary quite a bit for each prediction due to the noise
induced in the observations.
Ridge regression is basically minimizing a penalised version
of the least-squared function. The penalising `shrinks` the
value of the regression coefficients.
Despite the few data points in each dimension, the slope
of the prediction is much more stable and the variance
in the line itself is greatly reduced, in comparison to that
of the standard linear regression
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
X_train = np.c_[.5, 1].T
y_train = [.5, 1]
X_test = np.c_[0, 2].T
np.random.seed(0)
classifiers = dict(ols=linear_model.LinearRegression(),
ridge=linear_model.Ridge(alpha=.1))
fignum = 1
for name, clf in classifiers.items():
fig = plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.title(name)
ax = plt.axes([.12, .12, .8, .8])
for _ in range(6):
this_X = .1 * np.random.normal(size=(2, 1)) + X_train
clf.fit(this_X, y_train)
ax.plot(X_test, clf.predict(X_test), color='.5')
ax.scatter(this_X, y_train, s=3, c='.5', marker='o', zorder=10)
clf.fit(X_train, y_train)
ax.plot(X_test, clf.predict(X_test), linewidth=2, color='blue')
ax.scatter(X_train, y_train, s=30, c='r', marker='+', zorder=10)
ax.set_xticks(())
ax.set_yticks(())
ax.set_ylim((0, 1.6))
ax.set_xlabel('X')
ax.set_ylabel('y')
ax.set_xlim(0, 2)
fignum += 1
plt.show()
| bsd-3-clause |
Juanlu001/xlwings | xlwings/main.py | 1 | 67142 | """
xlwings - Make Excel fly with Python!
Homepage and documentation: http://xlwings.org
See also: http://zoomeranalytics.com
Copyright (C) 2014-2016, Zoomer Analytics LLC.
All rights reserved.
License: BSD 3-clause (see LICENSE.txt for details)
"""
import os
import sys
import re
import numbers
import itertools
import inspect
import collections
import tempfile
import shutil
from . import xlplatform, string_types, xrange, map, ShapeAlreadyExists
from .constants import ChartType
# Optional imports
try:
import numpy as np
except ImportError:
np = None
try:
import pandas as pd
except ImportError:
pd = None
try:
from matplotlib.backends.backend_agg import FigureCanvas
except ImportError:
FigureCanvas = None
try:
from PIL import Image
except ImportError:
Image = None
class Application(object):
"""
Application is dependent on the Workbook since there might be different application instances on Windows.
"""
def __init__(self, wkb):
self.wkb = wkb
self.xl_app = wkb.xl_app
@property
def version(self):
"""
Returns Excel's version string.
.. versionadded:: 0.5.0
"""
return xlplatform.get_app_version_string(self.wkb.xl_workbook)
def quit(self):
"""
Quits the application without saving any workbooks.
.. versionadded:: 0.3.3
"""
xlplatform.quit_app(self.xl_app)
@property
def screen_updating(self):
"""
True if screen updating is turned on. Read/write Boolean.
.. versionadded:: 0.3.3
"""
return xlplatform.get_screen_updating(self.xl_app)
@screen_updating.setter
def screen_updating(self, value):
xlplatform.set_screen_updating(self.xl_app, value)
@property
def visible(self):
"""
Gets or sets the visibility of Excel to ``True`` or ``False``. This property can also be
conveniently set during instantiation of a new Workbook: ``Workbook(app_visible=False)``
.. versionadded:: 0.3.3
"""
return xlplatform.get_visible(self.xl_app)
@visible.setter
def visible(self, value):
xlplatform.set_visible(self.xl_app, value)
@property
def calculation(self):
"""
Returns or sets a Calculation value that represents the calculation mode.
Example
-------
>>> from xlwings import Workbook, Application
>>> from xlwings.constants import Calculation
>>> wb = Workbook()
>>> Application(wkb=wb).calculation = Calculation.xlCalculationManual
.. versionadded:: 0.3.3
"""
return xlplatform.get_calculation(self.xl_app)
@calculation.setter
def calculation(self, value):
xlplatform.set_calculation(self.xl_app, value)
def calculate(self):
"""
Calculates all open Workbooks
.. versionadded:: 0.3.6
"""
xlplatform.calculate(self.xl_app)
class Workbook(object):
"""
``Workbook`` connects an Excel Workbook with Python. You can create a new connection from Python with
* a new workbook: ``wb = Workbook()``
* the active workbook: ``wb = Workbook.active()``
* an unsaved workbook: ``wb = Workbook('Book1')``
* a saved (open) workbook by name (incl. xlsx etc): ``wb = Workbook('MyWorkbook.xlsx')``
* a saved (open or closed) workbook by path: ``wb = Workbook(r'C:\\path\\to\\file.xlsx')``
Keyword Arguments
-----------------
fullname : str, default None
Full path or name (incl. xlsx, xlsm etc.) of existing workbook or name of an unsaved workbook.
xl_workbook : pywin32 or appscript Workbook object, default None
This enables to turn existing Workbook objects of the underlying libraries into xlwings objects
app_visible : boolean, default True
The resulting Workbook will be visible by default. To open it without showing a window,
set ``app_visible=False``. Or, to not alter the visibility (e.g., if Excel is already running),
set ``app_visible=None``. Note that this property acts on the whole Excel instance, not just the
specific Workbook.
app_target : str, default None
Mac-only, use the full path to the Excel application,
e.g. ``/Applications/Microsoft Office 2011/Microsoft Excel`` or ``/Applications/Microsoft Excel``
On Windows, if you want to change the version of Excel that xlwings talks to, go to ``Control Panel >
Programs and Features`` and ``Repair`` the Office version that you want as default.
To create a connection when the Python function is called from Excel, use:
``wb = Workbook.caller()``
"""
def __init__(self, fullname=None, xl_workbook=None, app_visible=True, app_target=None):
if xl_workbook:
self.xl_workbook = xl_workbook
self.xl_app = xlplatform.get_app(self.xl_workbook, app_target)
elif fullname:
self.fullname = fullname
if not os.path.isfile(fullname) or xlplatform.is_file_open(self.fullname):
# Connect to unsaved Workbook (e.g. 'Workbook1') or to an opened Workbook
self.xl_app, self.xl_workbook = xlplatform.get_open_workbook(self.fullname, app_target)
else:
# Open Excel and the Workbook
self.xl_app, self.xl_workbook = xlplatform.open_workbook(self.fullname, app_target)
else:
# Open Excel if necessary and create a new workbook
self.xl_app, self.xl_workbook = xlplatform.new_workbook(app_target)
self.name = xlplatform.get_workbook_name(self.xl_workbook)
if fullname is None:
self.fullname = xlplatform.get_fullname(self.xl_workbook)
# Make the most recently created Workbook the default when creating Range objects directly
xlplatform.set_xl_workbook_current(self.xl_workbook)
if app_visible is not None:
xlplatform.set_visible(self.xl_app, app_visible)
@property
def active_sheet(self):
return Sheet.active(wkb=self)
@classmethod
def active(cls, app_target=None):
"""
Returns the Workbook that is currently active or has been active last. On Windows,
this works across all instances.
.. versionadded:: 0.4.1
"""
xl_workbook = xlplatform.get_active_workbook(app_target=app_target)
return cls(xl_workbook=xl_workbook, app_target=app_target)
@classmethod
def caller(cls):
"""
Creates a connection when the Python function is called from Excel:
``wb = Workbook.caller()``
Always pack the ``Workbook`` call into the function being called from Excel, e.g.:
.. code-block:: python
def my_macro():
wb = Workbook.caller()
Range('A1').value = 1
To be able to easily invoke such code from Python for debugging, use ``Workbook.set_mock_caller()``.
.. versionadded:: 0.3.0
"""
if hasattr(Workbook, '_mock_file'):
# Use mocking Workbook, see Workbook.set_mock_caller()
_, xl_workbook = xlplatform.get_open_workbook(Workbook._mock_file)
return cls(xl_workbook=xl_workbook)
elif len(sys.argv) > 2 and sys.argv[2] == 'from_xl':
# Connect to the workbook from which this code has been invoked
fullname = sys.argv[1].lower()
if sys.platform.startswith('win'):
xl_app, xl_workbook = xlplatform.get_open_workbook(fullname, hwnd=sys.argv[4])
return cls(xl_workbook=xl_workbook)
else:
xl_app, xl_workbook = xlplatform.get_open_workbook(fullname, app_target=sys.argv[3])
return cls(xl_workbook=xl_workbook, app_target=sys.argv[3])
elif xlplatform.get_xl_workbook_current():
# Called through ExcelPython connection
return cls(xl_workbook=xlplatform.get_xl_workbook_current())
else:
raise Exception('Workbook.caller() must not be called directly. Call through Excel or set a mock caller '
'first with Workbook.set_mock_caller().')
@staticmethod
def set_mock_caller(fullpath):
"""
Sets the Excel file which is used to mock ``Workbook.caller()`` when the code is called from within Python.
Examples
--------
::
# This code runs unchanged from Excel and Python directly
import os
from xlwings import Workbook, Range
def my_macro():
wb = Workbook.caller()
Range('A1').value = 'Hello xlwings!'
if __name__ == '__main__':
# Mock the calling Excel file
Workbook.set_mock_caller(r'C:\\path\\to\\file.xlsx')
my_macro()
.. versionadded:: 0.3.1
"""
Workbook._mock_file = fullpath
@classmethod
def current(cls):
"""
Returns the current Workbook object, i.e. the default Workbook used by ``Sheet``, ``Range`` and ``Chart`` if not
specified otherwise. On Windows, in case there are various instances of Excel running, opening an existing or
creating a new Workbook through ``Workbook()`` is acting on the same instance of Excel as this Workbook. Use
like this: ``Workbook.current()``.
.. versionadded:: 0.2.2
"""
return cls(xl_workbook=xlplatform.get_xl_workbook_current(), app_visible=None)
def set_current(self):
"""
This makes the Workbook the default that ``Sheet``, ``Range`` and ``Chart`` use if not specified
otherwise. On Windows, in case there are various instances of Excel running, opening an existing or creating a
new Workbook through ``Workbook()`` is acting on the same instance of Excel as this Workbook.
.. versionadded:: 0.2.2
"""
xlplatform.set_xl_workbook_current(self.xl_workbook)
def get_selection(self):
"""
Returns the currently selected cells from Excel as ``Range`` object.
Example
-------
>>> import xlwings as xw
>>> wb = xw.Workbook.active()
>>> wb.get_selection()
<Range on Sheet 'Sheet1' of Workbook 'Workbook1'>
>>> wb.get_selection.value
[[1.0, 2.0], [3.0, 4.0]]
>>> wb.get_selection().options(transpose=True).value
[[1.0, 3.0], [2.0, 4.0]]
Returns
-------
Range object
"""
return Range(xlplatform.get_selection_address(self.xl_app), wkb=self)
def close(self):
"""
Closes the Workbook without saving it.
.. versionadded:: 0.1.1
"""
xlplatform.close_workbook(self.xl_workbook)
def save(self, path=None):
"""
Saves the Workbook. If a path is being provided, this works like SaveAs() in Excel. If no path is specified and
if the file hasn't been saved previously, it's being saved in the current working directory with the current
filename. Existing files are overwritten without prompting.
Arguments
---------
path : str, default None
Full path to the workbook
Example
-------
>>> from xlwings import Workbook
>>> wb = Workbook()
>>> wb.save()
>>> wb.save(r'C:\\path\\to\\new_file_name.xlsx')
.. versionadded:: 0.3.1
"""
xlplatform.save_workbook(self.xl_workbook, path)
@staticmethod
def get_xl_workbook(wkb):
"""
Returns the ``xl_workbook_current`` if ``wkb`` is ``None``, otherwise the ``xl_workbook`` of ``wkb``. On Windows,
``xl_workbook`` is a pywin32 COM object, on Mac it's an appscript object.
Arguments
---------
wkb : Workbook or None
Workbook object
"""
if wkb is None and xlplatform.get_xl_workbook_current() is None:
raise NameError('You must first instantiate a Workbook object.')
elif wkb is None:
xl_workbook = xlplatform.get_xl_workbook_current()
else:
xl_workbook = wkb.xl_workbook
return xl_workbook
@staticmethod
def open_template():
"""
Creates a new Excel file with the xlwings VBA module already included. This method must be called from an
interactive Python shell::
>>> Workbook.open_template()
.. versionadded:: 0.3.3
"""
this_dir = os.path.abspath(os.path.dirname(inspect.getfile(inspect.currentframe())))
template_file = 'xlwings_template.xltm'
try:
os.remove(os.path.join(this_dir, '~$' + template_file))
except OSError:
pass
xlplatform.open_template(os.path.realpath(os.path.join(this_dir, template_file)))
@property
def names(self):
"""
A collection of all the (platform-specific) name objects in the application or workbook.
Each name object represents a defined name for a range of cells (built-in or custom ones).
.. versionadded:: 0.4.0
"""
names = NamesDict(self.xl_workbook)
xlplatform.set_names(self.xl_workbook, names)
return names
def vba_macro(self, name):
"""
Runs a Sub or Function in Excel VBA.
Arguments:
----------
name : Name of Sub or Function with or without module name, e.g. ``'Module1.MyMacro'`` or ``'MyMacro'``
Examples:
---------
This VBA function:
.. code-block:: vb
Function MySum(x, y)
MySum = x + y
End Function
can be accessed like this:
>>> wb = xw.Workbook.active()
>>> my_sum = wb.vba_macro('MySum')
>>> my_sum(1, 2)
3
.. versionadded:: 0.7.2
"""
return VBAMacro(name, self)
def macro(self, name):
"""
Runs a Sub or Function in Excel.
Arguments:
----------
name : Name of Sub or Function with or without module name, e.g. ``'Module1.MyMacro'`` or ``'MyMacro'``
Examples:
---------
An addin function or macro named ``'addin_function_or_macro'`` can be accessed like this:
>>> wb = xw.Workbook.active()
>>> addin_function_or_macro = wb.macro('addin_function_or_macro')
>>> addin_function_or_macro()
'Hello world!'
.. versionadded:: 0.7.1
"""
return Macro(name, self)
def __repr__(self):
return "<Workbook '{0}'>".format(self.name)
class Sheet(object):
"""
Represents a Sheet of the current Workbook. Either call it with the Sheet name or index::
Sheet('Sheet1')
Sheet(1)
Arguments
---------
sheet : str or int
Sheet name or index
Keyword Arguments
-----------------
wkb : Workbook object, default Workbook.current()
Defaults to the Workbook that was instantiated last or set via ``Workbook.set_current()``.
.. versionadded:: 0.2.3
"""
def __init__(self, sheet, wkb=None):
self.xl_workbook = Workbook.get_xl_workbook(wkb)
self.sheet = sheet
self.xl_sheet = xlplatform.get_xl_sheet(self.xl_workbook, self.sheet)
def activate(self):
"""Activates the sheet."""
xlplatform.activate_sheet(self.xl_workbook, self.sheet)
def autofit(self, axis=None):
"""
Autofits the width of either columns, rows or both on a whole Sheet.
Arguments
---------
axis : string, default None
- To autofit rows, use one of the following: ``rows`` or ``r``
- To autofit columns, use one of the following: ``columns`` or ``c``
- To autofit rows and columns, provide no arguments
Examples
--------
::
# Autofit columns
Sheet('Sheet1').autofit('c')
# Autofit rows
Sheet('Sheet1').autofit('r')
# Autofit columns and rows
Range('Sheet1').autofit()
.. versionadded:: 0.2.3
"""
xlplatform.autofit_sheet(self, axis)
def clear_contents(self):
"""Clears the content of the whole sheet but leaves the formatting."""
xlplatform.clear_contents_worksheet(self.xl_workbook, self.sheet)
def clear(self):
"""Clears the content and formatting of the whole sheet."""
xlplatform.clear_worksheet(self.xl_workbook, self.sheet)
@property
def name(self):
"""Get or set the name of the Sheet."""
return xlplatform.get_worksheet_name(self.xl_sheet)
@name.setter
def name(self, value):
xlplatform.set_worksheet_name(self.xl_sheet, value)
@property
def index(self):
"""Returns the index of the Sheet."""
return xlplatform.get_worksheet_index(self.xl_sheet)
@classmethod
def active(cls, wkb=None):
"""Returns the active Sheet. Use like so: ``Sheet.active()``"""
xl_workbook = Workbook.get_xl_workbook(wkb)
return cls(xlplatform.get_worksheet_name(xlplatform.get_active_sheet(xl_workbook)), wkb)
@classmethod
def add(cls, name=None, before=None, after=None, wkb=None):
"""
Creates a new worksheet: the new worksheet becomes the active sheet. If neither ``before`` nor
``after`` is specified, the new Sheet will be placed at the end.
Arguments
---------
name : str, default None
Sheet name, defaults to Excel standard name
before : str or int, default None
Sheet name or index
after : str or int, default None
Sheet name or index
Returns
-------
Sheet object
Examples
--------
>>> Sheet.add() # Place at end with default name
>>> Sheet.add('NewSheet', before='Sheet1') # Include name and position
>>> new_sheet = Sheet.add(after=3)
>>> new_sheet.index
4
.. versionadded:: 0.2.3
"""
xl_workbook = Workbook.get_xl_workbook(wkb)
if before is None and after is None:
after = Sheet(Sheet.count(wkb=wkb), wkb=wkb)
elif before:
before = Sheet(before, wkb=wkb)
elif after:
after = Sheet(after, wkb=wkb)
if name:
if name.lower() in [i.name.lower() for i in Sheet.all(wkb=wkb)]:
raise Exception('That sheet name is already taken.')
else:
xl_sheet = xlplatform.add_sheet(xl_workbook, before, after)
xlplatform.set_worksheet_name(xl_sheet, name)
return cls(name, wkb)
else:
xl_sheet = xlplatform.add_sheet(xl_workbook, before, after)
return cls(xlplatform.get_worksheet_name(xl_sheet), wkb)
@staticmethod
def count(wkb=None):
"""
Counts the number of Sheets.
Keyword Arguments
-----------------
wkb : Workbook object, default Workbook.current()
Defaults to the Workbook that was instantiated last or set via ``Workbook.set_current()``.
Examples
--------
>>> Sheet.count()
3
.. versionadded:: 0.2.3
"""
xl_workbook = Workbook.get_xl_workbook(wkb)
return xlplatform.count_worksheets(xl_workbook)
@staticmethod
def all(wkb=None):
"""
Returns a list with all Sheet objects.
Keyword Arguments
-----------------
wkb : Workbook object, default Workbook.current()
Defaults to the Workbook that was instantiated last or set via ``Workbook.set_current()``.
Examples
--------
>>> Sheet.all()
[<Sheet 'Sheet1' of Workbook 'Book1'>, <Sheet 'Sheet2' of Workbook 'Book1'>]
>>> [i.name.lower() for i in Sheet.all()]
['sheet1', 'sheet2']
>>> [i.autofit() for i in Sheet.all()]
.. versionadded:: 0.2.3
"""
xl_workbook = Workbook.get_xl_workbook(wkb)
sheet_list = []
for i in range(1, xlplatform.count_worksheets(xl_workbook) + 1):
sheet_list.append(Sheet(i, wkb=wkb))
return sheet_list
def delete(self):
"""
Deletes the Sheet.
.. versionadded: 0.6.0
"""
xlplatform.delete_sheet(self)
def __repr__(self):
return "<Sheet '{0}' of Workbook '{1}'>".format(self.name, xlplatform.get_workbook_name(self.xl_workbook))
class Range(object):
"""
Range(*args, wkb=None)
A Range object can be instantiated with the following arguments::
Range('A1') Range('Sheet1', 'A1') Range(1, 'A1')
Range('A1:C3') Range('Sheet1', 'A1:C3') Range(1, 'A1:C3')
Range((1,2)) Range('Sheet1, (1,2)) Range(1, (1,2))
Range((1,1), (3,3)) Range('Sheet1', (1,1), (3,3)) Range(1, (1,1), (3,3))
Range('NamedRange') Range('Sheet1', 'NamedRange') Range(1, 'NamedRange')
The Sheet can also be provided as Sheet object::
sh = Sheet(1)
Range(sh, 'A1')
If no worksheet name is provided as first argument, it will take the Range from the active sheet.
You usually want to go for ``Range(...).value`` to get the values (as list of lists). You can
influence the reading/writing behavior by making use of :ref:`converters` and their options:
``Range(...).options(...).value``
Arguments
---------
*args :
Definition of sheet (optional) and Range in the above described combinations.
Keyword Arguments
-----------------
wkb : Workbook object, default Workbook.current()
Defaults to the Workbook that was instantiated last or set via `Workbook.set_current()``.
"""
def __init__(self, *args, **options):
self.workbook = options.pop('wkb', None)
# Arguments
if xlplatform.is_range_instance(args[0]):
self.xl_range = args[0]
self.xl_sheet = xlplatform.get_range_sheet(self.xl_range)
self.xl_workbook = xlplatform.get_sheet_workbook(self.xl_sheet)
self.workbook = Workbook(xl_workbook=self.xl_workbook)
self.row1, self.col1, self.row2, self.col2 = xlplatform.get_range_coordinates(self.xl_range)
range_address = None
elif len(args) == 1 and isinstance(args[0], string_types):
sheet_name_or_index = None
range_address = args[0]
elif len(args) == 1 and isinstance(args[0], tuple):
sheet_name_or_index = None
range_address = None
self.row1 = args[0][0]
self.col1 = args[0][1]
self.row2 = self.row1
self.col2 = self.col1
elif (len(args) == 2
and isinstance(args[0], (numbers.Number, string_types, Sheet))
and isinstance(args[1], string_types)):
if isinstance(args[0], Sheet):
sheet_name_or_index = args[0].index
else:
sheet_name_or_index = args[0]
range_address = args[1]
elif (len(args) == 2
and isinstance(args[0], (numbers.Number, string_types, Sheet))
and isinstance(args[1], tuple)):
if isinstance(args[0], Sheet):
sheet_name_or_index = args[0].index
else:
sheet_name_or_index = args[0]
range_address = None
self.row1 = args[1][0]
self.col1 = args[1][1]
self.row2 = self.row1
self.col2 = self.col1
elif len(args) == 2 and isinstance(args[0], tuple):
sheet_name_or_index = None
range_address = None
self.row1 = args[0][0]
self.col1 = args[0][1]
self.row2 = args[1][0]
self.col2 = args[1][1]
elif len(args) == 3:
if isinstance(args[0], Sheet):
sheet_name_or_index = args[0].index
else:
sheet_name_or_index = args[0]
range_address = None
self.row1 = args[1][0]
self.col1 = args[1][1]
self.row2 = args[2][0]
self.col2 = args[2][1]
# Keyword Arguments
self._options = options
if self.workbook is None and xlplatform.get_xl_workbook_current() is None:
raise NameError('You must first instantiate a Workbook object.')
elif self.workbook is None:
self.xl_workbook = xlplatform.get_xl_workbook_current()
else:
self.xl_workbook = self.workbook.xl_workbook
# Get sheet
if not hasattr(self, 'xl_sheet'):
if sheet_name_or_index:
self.xl_sheet = xlplatform.get_worksheet(self.xl_workbook, sheet_name_or_index)
else:
self.xl_sheet = xlplatform.get_active_sheet(self.xl_workbook)
# Get xl_range object
if range_address:
self.row1 = xlplatform.get_first_row(self.xl_sheet, range_address)
self.col1 = xlplatform.get_first_column(self.xl_sheet, range_address)
self.row2 = self.row1 + xlplatform.count_rows(self.xl_sheet, range_address) - 1
self.col2 = self.col1 + xlplatform.count_columns(self.xl_sheet, range_address) - 1
if 0 in (self.row1, self.col1, self.row2, self.col2):
raise IndexError("Attempted to access 0-based Range. xlwings/Excel Ranges are 1-based.")
if not hasattr(self, 'xl_range'):
self.xl_range = xlplatform.get_range_from_indices(self.xl_sheet, self.row1, self.col1, self.row2, self.col2)
def __iter__(self):
# Iterator object that returns cell Ranges: (1, 1), (1, 2) etc.
return map(lambda cell: Range(xlplatform.get_worksheet_name(self.xl_sheet), cell, wkb=self.workbook, **self._options),
itertools.product(xrange(self.row1, self.row2 + 1), xrange(self.col1, self.col2 + 1)))
def options(self, convert=None, **options):
"""
Allows you to set a converter and their options. Converters define how Excel Ranges and their values are
being converted both during reading and writing operations. If no explicit converter is specified, the
base converter is being applied, see :ref:`converters`.
Arguments
---------
``convert`` : object, default None
A converter, e.g. ``dict``, ``np.array``, ``pd.DataFrame``, ``pd.Series``, defaults to default converter
Keyword Arguments
-----------------
ndim : int, default None
number of dimensions
numbers : type, default None
type of numbers, e.g. ``int``
dates : type, default None
e.g. ``datetime.date`` defaults to ``datetime.datetime``
empty : object, default None
transformation of empty cells
transpose : Boolean, default False
transpose values
expand : str, default None
One of ``'table'``, ``'vertical'``, ``'horizontal'``, see also ``Range.table`` etc
=> For converter-specific options, see :ref:`converters`.
Returns
-------
Range object
.. versionadded:: 0.7.0
"""
options['convert'] = convert
return Range(
xlplatform.get_worksheet_name(self.xl_sheet),
(self.row1, self.col1),
(self.row2, self.col2),
**options
)
def is_cell(self):
"""
Returns ``True`` if the Range consists of a single Cell otherwise ``False``.
.. versionadded:: 0.1.1
"""
if self.row1 == self.row2 and self.col1 == self.col2:
return True
else:
return False
def is_row(self):
"""
Returns ``True`` if the Range consists of a single Row otherwise ``False``.
.. versionadded:: 0.1.1
"""
if self.row1 == self.row2 and self.col1 != self.col2:
return True
else:
return False
def is_column(self):
"""
Returns ``True`` if the Range consists of a single Column otherwise ``False``.
.. versionadded:: 0.1.1
"""
if self.row1 != self.row2 and self.col1 == self.col2:
return True
else:
return False
def is_table(self):
"""
Returns ``True`` if the Range consists of a 2d array otherwise ``False``.
.. versionadded:: 0.1.1
"""
if self.row1 != self.row2 and self.col1 != self.col2:
return True
else:
return False
@property
def shape(self):
"""
Tuple of Range dimensions.
.. versionadded:: 0.3.0
"""
return self.row2 - self.row1 + 1, self.col2 - self.col1 + 1
@property
def size(self):
"""
Number of elements in the Range.
.. versionadded:: 0.3.0
"""
return self.shape[0] * self.shape[1]
def __len__(self):
return self.row2 - self.row1 + 1
@property
def value(self):
"""
Gets and sets the values for the given Range.
Returns
-------
object
Empty cells are set to ``None``.
"""
return conversion.read(self, None, self._options)
@value.setter
def value(self, data):
conversion.write(data, self, self._options)
@property
def formula(self):
"""
Gets or sets the formula for the given Range.
"""
return xlplatform.get_formula(self.xl_range)
@formula.setter
def formula(self, value):
xlplatform.set_formula(self.xl_range, value)
@property
def formula_array(self):
"""
Gets or sets an array formula for the given Range.
.. versionadded:: 0.7.1
"""
return xlplatform.get_formula_array(self.xl_range)
@formula_array.setter
def formula_array(self, value):
xlplatform.set_formula_array(self.xl_range, value)
@property
def table(self):
"""
Returns a contiguous Range starting with the indicated cell as top-left corner and going down and right as
long as no empty cell is hit.
Keyword Arguments
-----------------
strict : boolean, default False
``True`` stops the table at empty cells even if they contain a formula. Less efficient than if set to
``False``.
Returns
-------
Range object
Examples
--------
To get the values of a contiguous range or clear its contents use::
Range('A1').table.value
Range('A1').table.clear_contents()
"""
row2 = Range(xlplatform.get_worksheet_name(self.xl_sheet),
(self.row1, self.col1), wkb=self.workbook, **self._options).vertical.row2
col2 = Range(xlplatform.get_worksheet_name(self.xl_sheet),
(self.row1, self.col1), wkb=self.workbook, **self._options).horizontal.col2
return Range(xlplatform.get_worksheet_name(self.xl_sheet),
(self.row1, self.col1), (row2, col2), wkb=self.workbook, **self._options)
@property
def vertical(self):
"""
Returns a contiguous Range starting with the indicated cell and going down as long as no empty cell is hit.
This corresponds to ``Ctrl-Shift-DownArrow`` in Excel.
Arguments
---------
strict : bool, default False
``True`` stops the table at empty cells even if they contain a formula. Less efficient than if set to
``False``.
Returns
-------
Range object
Examples
--------
To get the values of a contiguous range or clear its contents use::
Range('A1').vertical.value
Range('A1').vertical.clear_contents()
"""
# A single cell is a special case as End(xlDown) jumps over adjacent empty cells
if xlplatform.get_value_from_index(self.xl_sheet, self.row1 + 1, self.col1) in [None, ""]:
row2 = self.row1
else:
row2 = xlplatform.get_row_index_end_down(self.xl_sheet, self.row1, self.col1)
# Strict stops at cells that contain a formula but show an empty value
if self.strict:
row2 = self.row1
while xlplatform.get_value_from_index(self.xl_sheet, row2 + 1, self.col1) not in [None, ""]:
row2 += 1
col2 = self.col2
return Range(xlplatform.get_worksheet_name(self.xl_sheet),
(self.row1, self.col1), (row2, col2), wkb=self.workbook, **self._options)
@property
def strict(self):
return self._options.get('strict', False)
@property
def horizontal(self):
"""
Returns a contiguous Range starting with the indicated cell and going right as long as no empty cell is hit.
Keyword Arguments
-----------------
strict : bool, default False
``True`` stops the table at empty cells even if they contain a formula. Less efficient than if set to
``False``.
Returns
-------
Range object
Examples
--------
To get the values of a contiguous Range or clear its contents use::
Range('A1').horizontal.value
Range('A1').horizontal.clear_contents()
"""
# A single cell is a special case as End(xlToRight) jumps over adjacent empty cells
if xlplatform.get_value_from_index(self.xl_sheet, self.row1, self.col1 + 1) in [None, ""]:
col2 = self.col1
else:
col2 = xlplatform.get_column_index_end_right(self.xl_sheet, self.row1, self.col1)
# Strict: stops at cells that contain a formula but show an empty value
if self.strict:
col2 = self.col1
while xlplatform.get_value_from_index(self.xl_sheet, self.row1, col2 + 1) not in [None, ""]:
col2 += 1
row2 = self.row2
return Range(xlplatform.get_worksheet_name(self.xl_sheet),
(self.row1, self.col1), (row2, col2), wbk=self.workbook, **self._options)
@property
def current_region(self):
"""
This property returns a Range object representing a range bounded by (but not including) any
combination of blank rows and blank columns or the edges of the worksheet. It corresponds to ``Ctrl-*`` on
Windows and ``Shift-Ctrl-Space`` on Mac.
Returns
-------
Range object
"""
address = xlplatform.get_current_region_address(self.xl_sheet, self.row1, self.col1)
return Range(xlplatform.get_worksheet_name(self.xl_sheet), address, wbk=self.workbook, **self._options)
@property
def number_format(self):
"""
Gets and sets the number_format of a Range.
Examples
--------
>>> Range('A1').number_format
'General'
>>> Range('A1:C3').number_format = '0.00%'
>>> Range('A1:C3').number_format
'0.00%'
.. versionadded:: 0.2.3
"""
return xlplatform.get_number_format(self)
@number_format.setter
def number_format(self, value):
xlplatform.set_number_format(self, value)
def clear(self):
"""
Clears the content and the formatting of a Range.
"""
xlplatform.clear_range(self.xl_range)
def clear_contents(self):
"""
Clears the content of a Range but leaves the formatting.
"""
xlplatform.clear_contents_range(self.xl_range)
@property
def column_width(self):
"""
Gets or sets the width, in characters, of a Range.
One unit of column width is equal to the width of one character in the Normal style.
For proportional fonts, the width of the character 0 (zero) is used.
If all columns in the Range have the same width, returns the width.
If columns in the Range have different widths, returns None.
column_width must be in the range:
0 <= column_width <= 255
Note: If the Range is outside the used range of the Worksheet, and columns in the Range have different widths,
returns the width of the first column.
Returns
-------
float
.. versionadded:: 0.4.0
"""
return xlplatform.get_column_width(self.xl_range)
@column_width.setter
def column_width(self, value):
xlplatform.set_column_width(self.xl_range, value)
@property
def row_height(self):
"""
Gets or sets the height, in points, of a Range.
If all rows in the Range have the same height, returns the height.
If rows in the Range have different heights, returns None.
row_height must be in the range:
0 <= row_height <= 409.5
Note: If the Range is outside the used range of the Worksheet, and rows in the Range have different heights,
returns the height of the first row.
Returns
-------
float
.. versionadded:: 0.4.0
"""
return xlplatform.get_row_height(self.xl_range)
@row_height.setter
def row_height(self, value):
xlplatform.set_row_height(self.xl_range, value)
@property
def width(self):
"""
Returns the width, in points, of a Range. Read-only.
Returns
-------
float
.. versionadded:: 0.4.0
"""
return xlplatform.get_width(self.xl_range)
@property
def height(self):
"""
Returns the height, in points, of a Range. Read-only.
Returns
-------
float
.. versionadded:: 0.4.0
"""
return xlplatform.get_height(self.xl_range)
@property
def left(self):
"""
Returns the distance, in points, from the left edge of column A to the left edge of the range. Read-only.
Returns
-------
float
.. versionadded:: 0.6.0
"""
return xlplatform.get_left(self.xl_range)
@property
def top(self):
"""
Returns the distance, in points, from the top edge of row 1 to the top edge of the range. Read-only.
Returns
-------
float
.. versionadded:: 0.6.0
"""
return xlplatform.get_top(self.xl_range)
def autofit(self, axis=None):
"""
Autofits the width of either columns, rows or both.
Arguments
---------
axis : string or integer, default None
- To autofit rows, use one of the following: ``rows`` or ``r``
- To autofit columns, use one of the following: ``columns`` or ``c``
- To autofit rows and columns, provide no arguments
Examples
--------
::
# Autofit column A
Range('A:A').autofit('c')
# Autofit row 1
Range('1:1').autofit('r')
# Autofit columns and rows, taking into account Range('A1:E4')
Range('A1:E4').autofit()
# AutoFit rows, taking into account Range('A1:E4')
Range('A1:E4').autofit('rows')
.. versionadded:: 0.2.2
"""
xlplatform.autofit(self, axis)
def get_address(self, row_absolute=True, column_absolute=True, include_sheetname=False, external=False):
"""
Returns the address of the range in the specified format.
Arguments
---------
row_absolute : bool, default True
Set to True to return the row part of the reference as an absolute reference.
column_absolute : bool, default True
Set to True to return the column part of the reference as an absolute reference.
include_sheetname : bool, default False
Set to True to include the Sheet name in the address. Ignored if external=True.
external : bool, default False
Set to True to return an external reference with workbook and worksheet name.
Returns
-------
str
Examples
--------
::
>>> Range((1,1)).get_address()
'$A$1'
>>> Range((1,1)).get_address(False, False)
'A1'
>>> Range('Sheet1', (1,1), (3,3)).get_address(True, False, True)
'Sheet1!A$1:C$3'
>>> Range('Sheet1', (1,1), (3,3)).get_address(True, False, external=True)
'[Workbook1]Sheet1!A$1:C$3'
.. versionadded:: 0.2.3
"""
if include_sheetname and not external:
# TODO: when the Workbook name contains spaces but not the Worksheet name, it will still be surrounded
# by '' when include_sheetname=True. Also, should probably changed to regex
temp_str = xlplatform.get_address(self.xl_range, row_absolute, column_absolute, True)
if temp_str.find("[") > -1:
results_address = temp_str[temp_str.rfind("]") + 1:]
if results_address.find("'") > -1:
results_address = "'" + results_address
return results_address
else:
return temp_str
else:
return xlplatform.get_address(self.xl_range, row_absolute, column_absolute, external)
def __repr__(self):
return "<Range on Sheet '{0}' of Workbook '{1}'>".format(xlplatform.get_worksheet_name(self.xl_sheet),
xlplatform.get_workbook_name(self.xl_workbook))
@property
def hyperlink(self):
"""
Returns the hyperlink address of the specified Range (single Cell only)
Examples
--------
>>> Range('A1').value
'www.xlwings.org'
>>> Range('A1').hyperlink
'http://www.xlwings.org'
.. versionadded:: 0.3.0
"""
if self.formula.lower().startswith('='):
# If it's a formula, extract the URL from the formula string
formula = self.formula
try:
return re.compile(r'\"(.+?)\"').search(formula).group(1)
except AttributeError:
raise Exception("The cell doesn't seem to contain a hyperlink!")
else:
# If it has been set pragmatically
return xlplatform.get_hyperlink_address(self.xl_range)
def add_hyperlink(self, address, text_to_display=None, screen_tip=None):
"""
Adds a hyperlink to the specified Range (single Cell)
Arguments
---------
address : str
The address of the hyperlink.
text_to_display : str, default None
The text to be displayed for the hyperlink. Defaults to the hyperlink address.
screen_tip: str, default None
The screen tip to be displayed when the mouse pointer is paused over the hyperlink.
Default is set to '<address> - Click once to follow. Click and hold to select this cell.'
.. versionadded:: 0.3.0
"""
if text_to_display is None:
text_to_display = address
if address[:4] == 'www.':
address = 'http://' + address
if screen_tip is None:
screen_tip = address + ' - Click once to follow. Click and hold to select this cell.'
xlplatform.set_hyperlink(self.xl_range, address, text_to_display, screen_tip)
@property
def color(self):
"""
Gets and sets the background color of the specified Range.
To set the color, either use an RGB tuple ``(0, 0, 0)`` or a color constant.
To remove the background, set the color to ``None``, see Examples.
Returns
-------
RGB : tuple
Examples
--------
>>> Range('A1').color = (255,255,255)
>>> from xlwings import RgbColor
>>> Range('A2').color = RgbColor.rgbAqua
>>> Range('A2').color
(0, 255, 255)
>>> Range('A2').color = None
>>> Range('A2').color is None
True
.. versionadded:: 0.3.0
"""
return xlplatform.get_color(self.xl_range)
@color.setter
def color(self, color_or_rgb):
xlplatform.set_color(self.xl_range, color_or_rgb)
def resize(self, row_size=None, column_size=None):
"""
Resizes the specified Range
Arguments
---------
row_size: int > 0
The number of rows in the new range (if None, the number of rows in the range is unchanged).
column_size: int > 0
The number of columns in the new range (if None, the number of columns in the range is unchanged).
Returns
-------
Range : Range object
.. versionadded:: 0.3.0
"""
if row_size is not None:
assert row_size > 0
row2 = self.row1 + row_size - 1
else:
row2 = self.row2
if column_size is not None:
assert column_size > 0
col2 = self.col1 + column_size - 1
else:
col2 = self.col2
return Range(xlplatform.get_worksheet_name(self.xl_sheet), (self.row1, self.col1), (row2, col2), wkb=self.workbook, **self._options)
def offset(self, row_offset=None, column_offset=None):
"""
Returns a Range object that represents a Range that's offset from the specified range.
Returns
-------
Range : Range object
.. versionadded:: 0.3.0
"""
if row_offset:
row1 = self.row1 + row_offset
row2 = self.row2 + row_offset
else:
row1, row2 = self.row1, self.row2
if column_offset:
col1 = self.col1 + column_offset
col2 = self.col2 + column_offset
else:
col1, col2 = self.col1, self.col2
return Range(xlplatform.get_worksheet_name(self.xl_sheet), (row1, col1), (row2, col2), wkb=self.workbook, **self._options)
@property
def column(self):
"""
Returns the number of the first column in the in the specified range. Read-only.
Returns
-------
Integer
.. versionadded:: 0.3.5
"""
return self.col1
@property
def row(self):
"""
Returns the number of the first row in the in the specified range. Read-only.
Returns
-------
Integer
.. versionadded:: 0.3.5
"""
return self.row1
@property
def last_cell(self):
"""
Returns the bottom right cell of the specified range. Read-only.
Returns
-------
Range object
Example
-------
>>> rng = Range('A1').table
>>> rng.last_cell.row, rng.last_cell.column
(4, 5)
.. versionadded:: 0.3.5
"""
return Range(xlplatform.get_worksheet_name(self.xl_sheet),
(self.row2, self.col2), **self._options)
@property
def name(self):
"""
Sets or gets the name of a Range.
To delete a named Range, use ``del wb.names['NamedRange']`` if ``wb`` is
your Workbook object.
.. versionadded:: 0.4.0
"""
return xlplatform.get_named_range(self)
@name.setter
def name(self, value):
xlplatform.set_named_range(self, value)
# This has to be after definition of Range to resolve circular reference
from . import conversion
class Shape(object):
"""
A Shape object represents an existing Excel shape and can be instantiated with the following arguments::
Shape(1) Shape('Sheet1', 1) Shape(1, 1)
Shape('Shape 1') Shape('Sheet1', 'Shape 1') Shape(1, 'Shape 1')
The Sheet can also be provided as Sheet object::
sh = Sheet(1)
Shape(sh, 'Shape 1')
If no Worksheet is provided as first argument, it will take the Shape from the active Sheet.
Arguments
---------
*args
Definition of Sheet (optional) and shape in the above described combinations.
Keyword Arguments
-----------------
wkb : Workbook object, default Workbook.current()
Defaults to the Workbook that was instantiated last or set via ``Workbook.set_current()``.
.. versionadded:: 0.5.0
"""
def __init__(self, *args, **kwargs):
# Use current Workbook if none provided
self.wkb = kwargs.get('wkb', None)
self.xl_workbook = Workbook.get_xl_workbook(self.wkb)
# Arguments
if len(args) == 1:
self.sheet_name_or_index = xlplatform.get_worksheet_name(xlplatform.get_active_sheet(self.xl_workbook))
self.name_or_index = args[0]
elif len(args) == 2:
if isinstance(args[0], Sheet):
self.sheet_name_or_index = args[0].index
else:
self.sheet_name_or_index = args[0]
self.name_or_index = args[1]
self.xl_shape = xlplatform.get_shape(self)
self.name = xlplatform.get_shape_name(self)
@property
def name(self):
"""
Returns or sets a String value representing the name of the object.
.. versionadded:: 0.5.0
"""
return xlplatform.get_shape_name(self)
@name.setter
def name(self, value):
self.xl_shape = xlplatform.set_shape_name(self.xl_workbook, self.sheet_name_or_index, self.xl_shape, value)
@property
def left(self):
"""
Returns or sets a value that represents the distance, in points, from the left edge of the object to the
left edge of column A.
.. versionadded:: 0.5.0
"""
return xlplatform.get_shape_left(self)
@left.setter
def left(self, value):
xlplatform.set_shape_left(self, value)
@property
def top(self):
"""
Returns or sets a value that represents the distance, in points, from the top edge of the topmost shape
in the shape range to the top edge of the worksheet.
.. versionadded:: 0.5.0
"""
return xlplatform.get_shape_top(self)
@top.setter
def top(self, value):
xlplatform.set_shape_top(self, value)
@property
def width(self):
"""
Returns or sets a value that represents the width, in points, of the object.
.. versionadded:: 0.5.0
"""
return xlplatform.get_shape_width(self)
@width.setter
def width(self, value):
xlplatform.set_shape_width(self, value)
@property
def height(self):
"""
Returns or sets a value that represents the height, in points, of the object.
.. versionadded:: 0.5.0
"""
return xlplatform.get_shape_height(self)
@height.setter
def height(self, value):
xlplatform.set_shape_height(self, value)
def delete(self):
"""
Deletes the object.
.. versionadded:: 0.5.0
"""
xlplatform.delete_shape(self)
def activate(self):
"""
Activates the object.
.. versionadded:: 0.5.0
"""
xlplatform.activate_shape(self.xl_shape)
class Chart(Shape):
"""
A Chart object represents an existing Excel chart and can be instantiated with the following arguments::
Chart(1) Chart('Sheet1', 1) Chart(1, 1)
Chart('Chart 1') Chart('Sheet1', 'Chart 1') Chart(1, 'Chart 1')
The Sheet can also be provided as Sheet object::
sh = Sheet(1)
Chart(sh, 'Chart 1')
If no Worksheet is provided as first argument, it will take the Chart from the active Sheet.
To insert a new Chart into Excel, create it as follows::
Chart.add()
Arguments
---------
*args
Definition of Sheet (optional) and chart in the above described combinations.
Keyword Arguments
-----------------
wkb : Workbook object, default Workbook.current()
Defaults to the Workbook that was instantiated last or set via ``Workbook.set_current()``.
Example
-------
>>> from xlwings import Workbook, Range, Chart, ChartType
>>> wb = Workbook()
>>> Range('A1').value = [['Foo1', 'Foo2'], [1, 2]]
>>> chart = Chart.add(source_data=Range('A1').table, chart_type=ChartType.xlLine)
>>> chart.name
'Chart1'
>>> chart.chart_type = ChartType.xl3DArea
"""
def __init__(self, *args, **kwargs):
super(Chart, self).__init__(*args, **kwargs)
# Get xl_chart object
self.xl_chart = xlplatform.get_chart_object(self.xl_workbook, self.sheet_name_or_index, self.name_or_index)
self.index = xlplatform.get_chart_index(self.xl_chart)
# Chart Type
chart_type = kwargs.get('chart_type')
if chart_type:
self.chart_type = chart_type
# Source Data
source_data = kwargs.get('source_data')
if source_data:
self.set_source_data(source_data)
@classmethod
def add(cls, sheet=None, left=0, top=0, width=355, height=211, **kwargs):
"""
Inserts a new Chart into Excel.
Arguments
---------
sheet : str or int or xlwings.Sheet, default None
Name or index of the Sheet or Sheet object, defaults to the active Sheet
left : float, default 0
left position in points
top : float, default 0
top position in points
width : float, default 375
width in points
height : float, default 225
height in points
Keyword Arguments
-----------------
chart_type : xlwings.ChartType member, default xlColumnClustered
Excel chart type. E.g. xlwings.ChartType.xlLine
name : str, default None
Excel chart name. Defaults to Excel standard name if not provided, e.g. 'Chart 1'
source_data : Range
e.g. Range('A1').table
wkb : Workbook object, default Workbook.current()
Defaults to the Workbook that was instantiated last or set via ``Workbook.set_current()``.
Returns
-------
xlwings Chart object
"""
wkb = kwargs.get('wkb', None)
xl_workbook = Workbook.get_xl_workbook(wkb)
chart_type = kwargs.get('chart_type', ChartType.xlColumnClustered)
name = kwargs.get('name')
source_data = kwargs.get('source_data')
if isinstance(sheet, Sheet):
sheet = sheet.index
if sheet is None:
sheet = xlplatform.get_worksheet_index(xlplatform.get_active_sheet(xl_workbook))
xl_chart = xlplatform.add_chart(xl_workbook, sheet, left, top, width, height)
if name:
xlplatform.set_chart_name(xl_chart, name)
else:
name = xlplatform.get_chart_name(xl_chart)
return cls(sheet, name, wkb=wkb, chart_type=chart_type, source_data=source_data)
@property
def chart_type(self):
"""
Gets and sets the chart type of a chart.
.. versionadded:: 0.1.1
"""
return xlplatform.get_chart_type(self.xl_chart)
@chart_type.setter
def chart_type(self, value):
xlplatform.set_chart_type(self.xl_chart, value)
def set_source_data(self, source):
"""
Sets the source for the chart.
Arguments
---------
source : Range
Range object, e.g. ``Range('A1')``
"""
xlplatform.set_source_data_chart(self.xl_chart, source.xl_range)
def __repr__(self):
return "<Chart '{0}' on Sheet '{1}' of Workbook '{2}'>".format(self.name,
Sheet(self.sheet_name_or_index).name,
xlplatform.get_workbook_name(self.xl_workbook))
class Picture(Shape):
"""
A Picture object represents an existing Excel Picture and can be instantiated with the following arguments::
Picture(1) Picture('Sheet1', 1) Picture(1, 1)
Picture('Picture 1') Picture('Sheet1', 'Picture 1') Picture(1, 'Picture 1')
The Sheet can also be provided as Sheet object::
sh = Sheet(1)
Shape(sh, 'Picture 1')
If no Worksheet is provided as first argument, it will take the Picture from the active Sheet.
Arguments
---------
*args
Definition of Sheet (optional) and picture in the above described combinations.
Keyword Arguments
-----------------
wkb : Workbook object, default Workbook.current()
Defaults to the Workbook that was instantiated last or set via ``Workbook.set_current()``.
.. versionadded:: 0.5.0
"""
def __init__(self, *args, **kwargs):
super(Picture, self).__init__(*args, **kwargs)
self.xl_picture = xlplatform.get_picture(self)
self.index = xlplatform.get_picture_index(self)
@classmethod
def add(cls, filename, sheet=None, name=None, link_to_file=False, save_with_document=True,
left=0, top=0, width=None, height=None, wkb=None):
"""
Inserts a picture into Excel.
Arguments
---------
filename : str
The full path to the file.
Keyword Arguments
-----------------
sheet : str or int or xlwings.Sheet, default None
Name or index of the Sheet or ``xlwings.Sheet`` object, defaults to the active Sheet
name : str, default None
Excel picture name. Defaults to Excel standard name if not provided, e.g. 'Picture 1'
left : float, default 0
Left position in points.
top : float, default 0
Top position in points.
width : float, default None
Width in points. If PIL/Pillow is installed, it defaults to the width of the picture.
Otherwise it defaults to 100 points.
height : float, default None
Height in points. If PIL/Pillow is installed, it defaults to the height of the picture.
Otherwise it defaults to 100 points.
wkb : Workbook object, default Workbook.current()
Defaults to the Workbook that was instantiated last or set via ``Workbook.set_current()``.
Returns
-------
xlwings Picture object
.. versionadded:: 0.5.0
"""
xl_workbook = Workbook.get_xl_workbook(wkb)
if isinstance(sheet, Sheet):
sheet = sheet.index
if sheet is None:
sheet = xlplatform.get_worksheet_index(xlplatform.get_active_sheet(xl_workbook))
if name:
if name in xlplatform.get_shapes_names(xl_workbook, sheet):
raise ShapeAlreadyExists('A shape with this name already exists.')
if sys.platform.startswith('darwin') and xlplatform.get_major_app_version_number(xl_workbook) >= 15:
# Office 2016 for Mac is sandboxed. This path seems to work without the need of granting access explicitly
xlwings_picture = os.path.expanduser("~") + '/Library/Containers/com.microsoft.Excel/Data/xlwings_picture.png'
shutil.copy2(filename, xlwings_picture)
filename = xlwings_picture
# Image dimensions
im_width, im_height = None, None
if width is None or height is None:
if Image:
im = Image.open(filename)
im_width, im_height = im.size
if width is None:
if im_width is not None:
width = im_width
else:
width = 100
if height is None:
if im_height is not None:
height = im_height
else:
height = 100
xl_picture = xlplatform.add_picture(xl_workbook, sheet, filename, link_to_file, save_with_document,
left, top, width, height)
if sys.platform.startswith('darwin') and xlplatform.get_major_app_version_number(xl_workbook) >= 15:
os.remove(xlwings_picture)
if name is None:
name = xlplatform.get_picture_name(xl_picture)
else:
xlplatform.set_shape_name(xl_workbook, sheet, xl_picture, name)
return cls(sheet, name, wkb=wkb)
def update(self, filename):
"""
Replaces an existing picture with a new one, taking over the attributes of the existing picture.
Arguments
---------
filename : str
Path to the picture.
.. versionadded:: 0.5.0
"""
wkb = self.wkb
name = self.name
left, top, width, height = self.left, self.top, self.width, self.height
sheet_name_or_index = self.sheet_name_or_index
xlplatform.delete_shape(self)
# TODO: link_to_file, save_with_document
Picture.add(filename, sheet=sheet_name_or_index, left=left, top=top, width=width, height=height,
name=name, wkb=wkb)
class Plot(object):
"""
Plot allows to easily display Matplotlib figures as pictures in Excel.
Arguments
---------
figure : matplotlib.figure.Figure
Matplotlib figure
Example
-------
Get a matplotlib ``figure`` object:
* via PyPlot interface::
import matplotlib.pyplot as plt
fig = plt.figure()
plt.plot([1, 2, 3, 4, 5])
* via object oriented interface::
from matplotlib.figure import Figure
fig = Figure(figsize=(8, 6))
ax = fig.add_subplot(111)
ax.plot([1, 2, 3, 4, 5])
* via Pandas::
import pandas as pd
import numpy as np
df = pd.DataFrame(np.random.rand(10, 4), columns=['a', 'b', 'c', 'd'])
ax = df.plot(kind='bar')
fig = ax.get_figure()
Then show it in Excel as picture::
plot = Plot(fig)
plot.show('Plot1')
.. versionadded:: 0.5.0
"""
def __init__(self, figure):
self.figure = figure
def show(self, name, sheet=None, left=0, top=0, width=None, height=None, wkb=None):
"""
Inserts the matplotlib figure as picture into Excel if a picture with that name doesn't exist yet.
Otherwise it replaces the picture, taking over its position and size.
Arguments
---------
name : str
Name of the picture in Excel
Keyword Arguments
-----------------
sheet : str or int or xlwings.Sheet, default None
Name or index of the Sheet or ``xlwings.Sheet`` object, defaults to the active Sheet
left : float, default 0
Left position in points. Only has an effect if the picture doesn't exist yet in Excel.
top : float, default 0
Top position in points. Only has an effect if the picture doesn't exist yet in Excel.
width : float, default None
Width in points, defaults to the width of the matplotlib figure.
Only has an effect if the picture doesn't exist yet in Excel.
height : float, default None
Height in points, defaults to the height of the matplotlib figure.
Only has an effect if the picture doesn't exist yet in Excel.
wkb : Workbook object, default Workbook.current()
Defaults to the Workbook that was instantiated last or set via ``Workbook.set_current()``.
Returns
-------
xlwings Picture object
.. versionadded:: 0.5.0
"""
xl_workbook = Workbook.get_xl_workbook(wkb)
if isinstance(sheet, Sheet):
sheet = sheet.index
if sheet is None:
sheet = xlplatform.get_worksheet_index(xlplatform.get_active_sheet(xl_workbook))
if sys.platform.startswith('darwin') and xlplatform.get_major_app_version_number(xl_workbook) >= 15:
# Office 2016 for Mac is sandboxed. This path seems to work without the need of granting access explicitly
filename = os.path.expanduser("~") + '/Library/Containers/com.microsoft.Excel/Data/xlwings_plot.png'
else:
temp_dir = os.path.realpath(tempfile.gettempdir())
filename = os.path.join(temp_dir, 'xlwings_plot.png')
canvas = FigureCanvas(self.figure)
canvas.draw()
self.figure.savefig(filename, format='png', bbox_inches='tight')
if width is None:
width = self.figure.bbox.bounds[2:][0]
if height is None:
height = self.figure.bbox.bounds[2:][1]
try:
return Picture.add(sheet=sheet, filename=filename, left=left, top=top, width=width,
height=height, name=name, wkb=wkb)
except ShapeAlreadyExists:
pic = Picture(sheet, name, wkb=wkb)
pic.update(filename)
return pic
finally:
os.remove(filename)
class NamesDict(collections.MutableMapping):
"""
Implements the Workbook.Names collection.
Currently only used to be able to do ``del wb.names['NamedRange']``
"""
def __init__(self, xl_workbook, *args, **kwargs):
self.xl_workbook = xl_workbook
self.store = dict()
self.update(dict(*args, **kwargs))
def __getitem__(self, key):
return self.store[self.__keytransform__(key)]
def __setitem__(self, key, value):
self.store[self.__keytransform__(key)] = value
def __delitem__(self, key):
xlplatform.delete_name(self.xl_workbook, key)
def __iter__(self):
return iter(self.store)
def __len__(self):
return len(self.store)
def __keytransform__(self, key):
return key
def view(obj):
"""
Opens a new workbook and displays an object on its first sheet.
Parameters
----------
obj : any type with built-in converter
the object to display
>>> import xlwings as xw
>>> import pandas as pd
>>> import numpy as np
>>> df = pd.DataFrame(np.random.rand(10, 4), columns=['a', 'b', 'c', 'd'])
>>> xw.view(df)
.. versionadded:: 0.7.1
"""
sht = Workbook().active_sheet
Range(sht, 'A1').value = obj
sht.autofit()
class Macro(object):
def __init__(self, name, wb=None, app=None):
self.name = name
self.wb = wb
self.app = app
def run(self, *args):
return xlplatform.run(self.name, self.app or Application(self.wb), args)
__call__ = run
class VBAMacro(Macro):
def __init__(self, name, wb=None, app=None):
super(VBAMacro, self).__init__(name, wb=wb, app=app)
def run(self, *args):
return xlplatform.run("'{0}'!{1}".format(self.wb.name, self.name), self.app or Application(self.wb), args)
__call__ = run
| apache-2.0 |
jjx02230808/project0223 | examples/cluster/plot_kmeans_digits.py | 230 | 4524 | """
===========================================================
A demo of K-Means clustering on the handwritten digits data
===========================================================
In this example we compare the various initialization strategies for
K-means in terms of runtime and quality of the results.
As the ground truth is known here, we also apply different cluster
quality metrics to judge the goodness of fit of the cluster labels to the
ground truth.
Cluster quality metrics evaluated (see :ref:`clustering_evaluation` for
definitions and discussions of the metrics):
=========== ========================================================
Shorthand full name
=========== ========================================================
homo homogeneity score
compl completeness score
v-meas V measure
ARI adjusted Rand index
AMI adjusted mutual information
silhouette silhouette coefficient
=========== ========================================================
"""
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn import metrics
from sklearn.cluster import KMeans
from sklearn.datasets import load_digits
from sklearn.decomposition import PCA
from sklearn.preprocessing import scale
np.random.seed(42)
digits = load_digits()
data = scale(digits.data)
n_samples, n_features = data.shape
n_digits = len(np.unique(digits.target))
labels = digits.target
sample_size = 300
print("n_digits: %d, \t n_samples %d, \t n_features %d"
% (n_digits, n_samples, n_features))
print(79 * '_')
print('% 9s' % 'init'
' time inertia homo compl v-meas ARI AMI silhouette')
def bench_k_means(estimator, name, data):
t0 = time()
estimator.fit(data)
print('% 9s %.2fs %i %.3f %.3f %.3f %.3f %.3f %.3f'
% (name, (time() - t0), estimator.inertia_,
metrics.homogeneity_score(labels, estimator.labels_),
metrics.completeness_score(labels, estimator.labels_),
metrics.v_measure_score(labels, estimator.labels_),
metrics.adjusted_rand_score(labels, estimator.labels_),
metrics.adjusted_mutual_info_score(labels, estimator.labels_),
metrics.silhouette_score(data, estimator.labels_,
metric='euclidean',
sample_size=sample_size)))
bench_k_means(KMeans(init='k-means++', n_clusters=n_digits, n_init=10),
name="k-means++", data=data)
bench_k_means(KMeans(init='random', n_clusters=n_digits, n_init=10),
name="random", data=data)
# in this case the seeding of the centers is deterministic, hence we run the
# kmeans algorithm only once with n_init=1
pca = PCA(n_components=n_digits).fit(data)
bench_k_means(KMeans(init=pca.components_, n_clusters=n_digits, n_init=1),
name="PCA-based",
data=data)
print(79 * '_')
###############################################################################
# Visualize the results on PCA-reduced data
reduced_data = PCA(n_components=2).fit_transform(data)
kmeans = KMeans(init='k-means++', n_clusters=n_digits, n_init=10)
kmeans.fit(reduced_data)
# Step size of the mesh. Decrease to increase the quality of the VQ.
h = .02 # point in the mesh [x_min, m_max]x[y_min, y_max].
# Plot the decision boundary. For that, we will assign a color to each
x_min, x_max = reduced_data[:, 0].min() - 1, reduced_data[:, 0].max() + 1
y_min, y_max = reduced_data[:, 1].min() - 1, reduced_data[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Obtain labels for each point in mesh. Use last trained model.
Z = kmeans.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1)
plt.clf()
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()),
cmap=plt.cm.Paired,
aspect='auto', origin='lower')
plt.plot(reduced_data[:, 0], reduced_data[:, 1], 'k.', markersize=2)
# Plot the centroids as a white X
centroids = kmeans.cluster_centers_
plt.scatter(centroids[:, 0], centroids[:, 1],
marker='x', s=169, linewidths=3,
color='w', zorder=10)
plt.title('K-means clustering on the digits dataset (PCA-reduced data)\n'
'Centroids are marked with white cross')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
timy/dm_spec | ana/seidner/tmp/plot_ppar_2d.py | 1 | 1414 | import numpy as np
import matplotlib.pyplot as plt
import sys
ny, nt = 400, 2000
t_lower, t_upper = -20.0, 500.0
y_lower, y_upper = 0.0, 200.0
# grid
grid = np.loadtxt("../res/grid.dat")
y, t = grid[::nt,0], grid[:nt,1]
# data
rawData = np.loadtxt("../res/ppar_2ds_ 2_ 2.dat")
idx_y, = np.nonzero( (y >= y_lower) & (y <= y_upper) )
idx_t, = np.nonzero( (t >= t_lower) & (t <= t_upper) )
idx = np.nonzero( (grid[:,0] >= y_lower) & (grid[:,0] <= y_upper) & \
(grid[:,1] >= t_lower) & (grid[:,1] <= t_upper) )
dat = np.sqrt( rawData[idx, 0]**2 + rawData[idx, 1]**2 )
#dat = rawData[idx, 0]
dat_max, dat_min = np.amax(dat), np.amin(dat)
#lvl_percent = np.array([ 0.2, 0.4, 0.45, 0.48, 0.52, 0.55, 0.6, 0.8 ])
lvl_percent = np.array([ 0.04, 0.1, 0.2, 0.6])
lvl_contour = dat_min + (dat_max - dat_min) * lvl_percent
print( "contour levels: ", lvl_contour )
n_y, n_t = np.size( idx_y ), np.size( idx_t )
matData = np.reshape( dat, (n_y, n_t) )
extent = ( t[idx_t[0]], t[idx_t[-1]], y[idx_y[0]], y[idx_y[-1]] )
print("idx_t[0]: ", idx_t[0])
origin = 'lower'
norm = plt.cm.colors.Normalize( vmax=matData.max(), vmin=matData.min() )
im = plt.imshow( matData, cmap=plt.cm.summer, norm=norm, origin=origin,
extent=extent )
plt.contour( matData, lvl_contour, hold='on', colors = 'k', origin=origin,
extent=extent )
plt.grid( True )
plt.savefig("fig/test.svg")
plt.show()
| mit |
tturowski/gwide | gwide/Classes/tRNAFromConcatv2.py | 1 | 65364 | #!/usr/bin/env python
import numpy as np
import sys, collections, re
from pypeaks import Data
from pyCRAC.Parsers import GTF2
import matplotlib.pyplot as plt
import pandas as pd
# import seaborn
class tRNAFromConcatv2():
def __init__(self, gtf_file, five_prime_flank, three_prime_flank, hits_threshold, lookahead, prefix, print_valleys, print_peaks, readthrough_start, normalized):
self.gtf_file = str(gtf_file)
self.gtf = GTF2.Parse_GTF()
self.gtf.read_GTF(self.gtf_file)
self.genes = dict()
self.data = dict()
self.id_to_names = dict()
self.rt = dict() # designated to work with one experiment only
self.genes_name_list = list()
self.genes_id_list = list()
self.five_prime_flank = five_prime_flank
self.three_prime_flank = three_prime_flank
self.five_prime_to_print= 50
self.longest_gene = 0
self.hits_threshold = hits_threshold
self.lookahead = lookahead
self.prefix = str(prefix)
self.print_peaks = print_peaks
self.print_valleys = print_valleys
self.readthrough_start = readthrough_start
self.experiments = list()
self.normalized = normalized # -n option allows for using normalized dataset (reads p[er Million)
if self.print_peaks == True or self.print_valleys == True:
self.list_of_peaks = dict()
self.dna_dna = { 'AA' : [7.9, 0.0222],
'TT' : [7.9, 0.0222],
'AT' : [7.2, 0.0204],
'TA' : [7.2, 0.0213],
'CA' : [8.5, 0.0227],
'TG' : [8.5, 0.0227],
'GT' : [8.4, 0.0224],
'AC' : [8.4, 0.0224],
'CT' : [7.8, 0.021],
'AG' : [7.8, 0.021],
'GA' : [8.2, 0.0222],
'TC' : [8.2, 0.0222],
'CG' : [10.6, 0.0272],
'GC' : [9.8, 0.0244],
'GG' : [8.0, 0.0199],
'CC' : [8.0, 0.0199]
}
self.rna_dna = { 'AA' : [7.8, 0.0219],
'TT' : [11.5, 0.0364],
'AT' : [8.3, 0.0239],
'TA' : [7.8, 0.0232],
'CA' : [9.0, 0.0261],
'TG' : [10.4, 0.0284],
'GT' : [7.8, 0.0216],
'AC' : [5.9, 0.0123],
'CT' : [7.0, 0.0197],
'AG' : [9.1, 0.0235],
'GA' : [5.5, 0.0135],
'TC' : [8.6, 0.0229],
'CG' : [16.3, 0.0471],
'GC' : [8.0, 0.0171],
'GG' : [12.8, 0.0319],
'CC' : [9.3, 0.0232]
}
def read_csv(self, concat_file, null_substitution=False):
print "# Reading CSV file..."
header = ['gene','position','nucleotide','hits','substitutions','deletions','exp_name','n_hits','n_substitutions','n_deletions']
concat_csv = pd.read_csv(concat_file, sep='\t', names=header, comment='#')
self.experiments = sorted(set(concat_csv['exp_name'].tolist()))
self.genes_name_list = sorted(set(concat_csv['gene'].tolist()))
#adding new entry in genes dict
print "# Getting details from GTF file and filling dataframe..."
for gene_name in self.genes_name_list:
gene_id = self.gtf.genes[gene_name]['gene_id']
gene_length = self.gtf.geneLength(gene_name)
if self.longest_gene < gene_length:
self.longest_gene = gene_length
self.genes[gene_name] = {
'gene_name' : gene_name,
'gene_id' : gene_id,
'gene_length' : gene_length,
'introns' : self.get_introns(gene_name), # [[len,len...][(start,stop),etc]] <-[[list_with_lengths],[list_with_start and stop]]
'RT' : dict(),
}
self.genes[gene_name]['exons'] = self.get_exons(gene_name)
self.id_to_names[gene_id] = gene_name
self.genes_id_list.append(gene_id)
#filling dataframe
gene_csv = concat_csv[concat_csv.gene == gene_name].set_index(['position'])
frame_index = range(1,(self.five_prime_flank + gene_length + self.three_prime_flank+1))
positions = range(-self.five_prime_flank,0)+range(1,(gene_length+self.three_prime_flank+1))
# columns = ['position','nucleotide']+[self.experiments]
columns = ['position','nucleotide']
self.data[gene_name] = pd.DataFrame(index=frame_index, columns=columns)
self.data[gene_name]['position'] = positions
self.data[gene_name]['nucleotide'] = gene_csv['nucleotide'][:(self.five_prime_flank + gene_length + self.three_prime_flank):]
for e in self.experiments:
self.data[gene_name][e] = gene_csv[gene_csv.exp_name == e]['hits']
if self.normalized == True:
self.data[gene_name][(e+"_nrpm")] = gene_csv[gene_csv.exp_name == e]['n_hits'] #to work with data normalized reads per M
self.data[gene_name] = self.data[gene_name].fillna(0)
if null_substitution == True:
self.data[gene_name] = self.data[gene_name].replace(0,1)
# print self.data[gene_name][230:330]
self.genes_id_list.sort()
return True
def get_introns(self, gene_name):
gene_coord = self.gtf.chromosomeCoordinates(gene_name)
introns_coord_raw = self.gtf.intronCoordinates(gene_name)
if not introns_coord_raw:
return [[],[]]
else:
introns = [[],[]]
for i in range(0,len(introns_coord_raw)):
intron_coord = list(introns_coord_raw[i])
intron_len = max(intron_coord) - min(intron_coord) + 2 #corrected
introns[0].append(intron_len)
if self.gtf.strand(gene_name) == "+":
intron_start = min(intron_coord) - min(gene_coord)
elif self.gtf.strand(gene_name) == "-":
intron_start = max(gene_coord) - max(intron_coord)
intron_stop = intron_start + intron_len - 1 #corrected
introns[1].append((intron_start,intron_stop))
return introns
def get_exons(self, gene_name):
exons = list()
introns = self.genes[gene_name]['introns']
if len(introns[0]) == 0:
exons = [[1, self.genes[gene_name]['gene_length']]]
# print 'no introns for this gene'
elif len(introns[0]) == 1:
exons = [[1,introns[1][0][0]-1],[introns[1][0][1]+1, self.genes[gene_name]['gene_length']]]
else:
print "ERROR: Genes with more than one intron are not supported in this version."
return exons
def find_peaks(self):
print '# Finding peaks...'
for i in self.data:
self.list_of_peaks[i] = dict()
for e in self.experiments:
self.list_of_peaks[i][e] = dict()
hist = Data(list(self.data[i].index), list(self.data[i][e]), smoothness=1, default_smooth=False)
hist.normalize()
try:
hist.get_peaks(method="slope", peak_amp_thresh=0.00005, valley_thresh=0.00003, intervals=None,
lookahead=self.lookahead, avg_interval=100)
self.list_of_peaks[i][e]['peaks'] = sorted(np.array(hist.peaks['peaks'][0]).tolist())
self.list_of_peaks[i][e]['valleys'] = sorted(np.array(hist.peaks['valleys'][0]).tolist())
except ValueError:
pass
self.list_of_peaks[i][e]['peaks'] = []
self.list_of_peaks[i][e]['valleys'] = []
# print self.list_of_peaks
return True
def calculate(self, details=False, ntotal=False, nmax=False, pscounts=False):
if details == True or nmax == True or ntotal == True:
print '# Calculating readthrough and others...'
elif details == True and len(self.experiments) > 1:
print '# WORNING: -d parameter works only with one experiment.'
exit()
else:
print '# Calculating readthrough...'
for gene_name in self.genes:
transcription_start = self.five_prime_flank-21
gene_length = self.genes[gene_name]['gene_length']
gene_end = self.five_prime_flank + gene_length
RT_begin = self.five_prime_flank + gene_length + self.readthrough_start
gene_middle = self.five_prime_flank + ( gene_length / 2 )
three_middle = self.five_prime_flank + gene_length + (self.three_prime_flank / 2)
three_one_third = self.five_prime_flank + gene_length + (self.three_prime_flank / 3)
three_two_third = self.five_prime_flank + gene_length + (2*(self.three_prime_flank / 3))
#getting intron length
introns = list()
if not self.genes[gene_name]['introns'][0]:
intron_length = 0
else:
for intron in range(0,len(self.genes[gene_name]['introns'][0])):
introns.append(str(self.genes[gene_name]['introns'][0][intron]))
intron_length = int(''.join(map(str,introns)))
intron_start_stop = self.genes[gene_name]['introns'][1][0] #start and stop of first intron only!
for exp in self.experiments:
# !! changes in exp name !!
exp_old = exp
try:
if max(list(self.data[gene_name][exp])) >= self.hits_threshold:
#normalization options
if nmax == True:
exp = exp_old+'_nmax'
self.data[gene_name][exp] = self.data[gene_name][exp_old]/self.data[gene_name][exp_old].max()
if pscounts == True:
self.data[gene_name][exp] = self.data[gene_name][exp].add(0.000001) #adding pseudocounts
if ntotal == True:
exp = exp_old+'_ntotal'
self.data[gene_name][exp] = self.data[gene_name][exp_old]/self.data[gene_name][exp_old].sum()
if pscounts == True:
self.data[gene_name][exp] = self.data[gene_name][exp].add(0.000001) #adding pseudocounts
if pscounts == True:
self.data[gene_name][exp_old] = self.data[gene_name][exp_old].add(10) #adding pseudocounts
#slicing dataframes
total = self.data[gene_name][transcription_start:].sum()[exp]
total_av = self.data[gene_name][transcription_start:].mean()[exp]
total_med = self.data[gene_name][transcription_start:].median()[exp]
total_SD = self.data[gene_name][transcription_start:].std()[exp]
c = float(self.data[gene_name][RT_begin:].sum()[exp])
if details==True:
g = self.data[gene_name][transcription_start:gene_end].sum()[exp]
g_av = self.data[gene_name][transcription_start:gene_end].mean()[exp]
g_med = self.data[gene_name][transcription_start:gene_end].median()[exp]
g_SD = self.data[gene_name][transcription_start:gene_end].std()[exp]
a1 = float(self.data[gene_name][transcription_start:gene_middle].sum()[exp])
a2 = self.data[gene_name][gene_middle+1:gene_end].sum()[exp]
d = self.data[gene_name][gene_end+1:].sum()[exp]
d_av = self.data[gene_name][gene_end+1:].mean()[exp]
d_med = self.data[gene_name][gene_end+1:].median()[exp]
d_SD = self.data[gene_name][gene_end+1:].std()[exp]
e1 = float(self.data[gene_name][gene_end+1:three_middle].sum()[exp])
e2 = self.data[gene_name][three_middle+1:].sum()[exp]
f1 = float(self.data[gene_name][gene_end+1:three_one_third].sum()[exp])
f2 = self.data[gene_name][three_one_third+1:three_two_third].sum()[exp]
f3 = self.data[gene_name][three_two_third+1:].sum()[exp]
if intron_length > 0:
b1 = float(self.data[gene_name][transcription_start:intron_start_stop[0]+self.five_prime_flank].sum()[exp])
b2 = float(self.data[gene_name][intron_start_stop[0]+1+self.five_prime_flank:intron_start_stop[1]+self.five_prime_flank].sum()[exp])
b3 = self.data[gene_name][intron_start_stop[1]+1+self.five_prime_flank:gene_end].sum()[exp]
#calculating
RT = np.float64(c) / total #allows for dividing by 0
if details==True:
a = np.float64(a1) / a2
e = np.float64(e1) / e2
f = np.float64(f1) / f3
if intron_length > 0:
b = np.float64(b1) / b3
i = np.float64(b2) / (b1 + b3)
else:
b = 0
i = 0
else:
RT, total, total_av, total_med, total_SD, c = ['too_low_reads'] * 6
if details == True:
g, g_av, g_med, g_SD, d, d_av, d_med, d_SD, a, e, f, b, i = ['too_low_reads'] * 13
except KeyError as e:
# print "Error raised by key: "+str(e)
RT, total, total_av, total_med, total_SD, c = ['no_reads'] * 6
if details == True:
g, g_av, g_med, g_SD, d, d_av, d_med, d_SD, a, e, f, b, i = ['no_reads'] * 13
#stroing in dictionary
self.genes[gene_name]['RT'][exp_old] = RT
if details==True:
self.genes[gene_name]['total'] = total
self.genes[gene_name]['total_av'] = total_av
self.genes[gene_name]['total_med'] = total_med
self.genes[gene_name]['total_std'] = total_SD
self.genes[gene_name]['a'] = a
self.genes[gene_name]['b'] = b
self.genes[gene_name]['i'] = i
self.genes[gene_name]['e'] = e
self.genes[gene_name]['f'] = f
self.genes[gene_name]['d'] = d
self.genes[gene_name]['d_av'] = d_av
self.genes[gene_name]['d_med'] = d_med
self.genes[gene_name]['d_std'] = d_SD
self.genes[gene_name]['g'] = g
self.genes[gene_name]['g_av'] = g_av
self.genes[gene_name]['g_med'] = g_med
self.genes[gene_name]['g_std'] = g_SD
### below part of function is needed to sort tRNA according to readthrough
tRNA_group = self.genes[gene_name]['gene_id'][0:2]
gene_id = self.genes[gene_name]['gene_id']
if tRNA_group not in self.rt:
self.rt[tRNA_group] = dict()
self.rt[tRNA_group][gene_id] = RT
return True
def calculate_dG(self):
print '# Calculating delta G energy for RNA/DNA and DNA/DNA for 20 nt after 3` end'
for gene_name in self.genes_name_list:
gene_end = self.genes[gene_name]['gene_length'] + self.five_prime_flank #as before slice_dataframe
# calculating energy
r_delta_H = 0
r_delta_S = 0
d_delta_H = 0
d_delta_S = 0
pair = str()
for i in range(gene_end+1,gene_end+21):
if len(pair) > 1:
pair = pair[1]
pair += self.data[gene_name]['nucleotide'][i]
if len(pair) == 2:
r_delta_H += self.rna_dna[pair][0]
r_delta_S += self.rna_dna[pair][1]
d_delta_H += self.dna_dna[pair][0]
d_delta_S += self.dna_dna[pair][1]
r_delta_G = r_delta_H * (1-(310/(r_delta_H/r_delta_S)))
d_delta_G = d_delta_H * (1-(310/(d_delta_H/d_delta_S)))
# to_print = str(r_delta_G)+'_'+str(d_delta_G)
self.genes[gene_name]['r_delta_G'] = r_delta_G
self.genes[gene_name]['d_delta_G'] = d_delta_G
return True
# making output; if -p option then text file, else to standard output
def make_text_file(self, filename, details=False, print_dG=False, ntotal=False, nmax=False):
print '# Making text file...'
output_dict = dict()
for i in self.genes:
output_dict[i] = list()
introns = list()
output_dict[i].append(str(self.genes[i]['gene_name']))
output_dict[i].append(str(self.genes[i]['gene_id']))
output_dict[i].append(str(self.genes[i]['gene_length']))
if not self.genes[i]['introns'][0]:
introns_to_print = 'none'
else:
for intron in range(0,len(self.genes[i]['introns'][0])):
introns.append(str(self.genes[i]['introns'][0][intron]))
introns_to_print = ', '.join(introns)
output_dict[i].append(introns_to_print) # introns length
if print_dG == True:
output_dict[i].append(str(self.genes[i]['r_delta_G']))
output_dict[i].append(str(self.genes[i]['d_delta_G']))
for e in self.experiments:
output_dict[i].append(str(self.genes[i]['RT'][e]))
if details==True:
output_dict[i].append(str(self.genes[i]['total']))
output_dict[i].append(str(self.genes[i]['total_av']))
output_dict[i].append(str(self.genes[i]['total_med']))
output_dict[i].append(str(self.genes[i]['total_std']))
output_dict[i].append(str(self.genes[i]['a']))
output_dict[i].append(str(self.genes[i]['b']))
output_dict[i].append(str(self.genes[i]['i']))
output_dict[i].append(str(self.genes[i]['e']))
output_dict[i].append(str(self.genes[i]['f']))
output_dict[i].append(str(self.genes[i]['d']))
output_dict[i].append(str(self.genes[i]['d_av']))
output_dict[i].append(str(self.genes[i]['d_med']))
output_dict[i].append(str(self.genes[i]['d_std']))
output_dict[i].append(str(self.genes[i]['g']))
output_dict[i].append(str(self.genes[i]['g_av']))
output_dict[i].append(str(self.genes[i]['g_med']))
output_dict[i].append(str(self.genes[i]['g_std']))
# experiments = '\t'.join(self.experiments)
filename.write("# analyse_tRNA_pileups output file using gtf file:"+"\n")
filename.write("# "+self.gtf_file+"\n")
filename.write("# 5` flank: "+str(self.five_prime_flank)+"\n")
filename.write("# 3` flank: "+str(self.three_prime_flank)+"\n")
# if self.normalized == True:
# filename.write("# calculations performed on data normalized to reads per Million \n")
# else:
# filename.write("# calculations performed on non-normalized data \n")
filename.write("# readthrough calculated: last nucleotide of gene + "+str(self.readthrough_start)+" nt\n")
filename.write("# threshold of hits: "+str(self.hits_threshold)+"\n")
filename.write("# lookahead option for pypeaks: "+str(self.lookahead)+"\n")
if nmax == True and ntotal == False:
filename.write("# Calculations performed on normalized data (max = 1)"+"\n")
elif ntotal == True:
filename.write("# Calculations performed on normalized data (sum = 1)"+"\n")
else:
filename.write("# Calculations performed on non-normalized data "+"\n")
header_list = ["# name","ID","length","int_len"]
if print_dG == True:
header_list += ["dG_RNA/DNA","dG_DNA/DNA"]
header_list += self.experiments
if details == True:
header_list += ['total','total_av','total_med','total_std','a','b','i','e','f','d','d_av','d_med','d_std','g','g_av','g_med','g_std']
filename.write('\t'.join(header_list)+'\n')
for g in self.genes_id_list:
line = self.id_to_names[g]
filename.write('\t'.join(output_dict[line]) + '\n')
if filename != sys.stdout:
filename.close()
return True
def termination_efficency_valleys(self):
print '# Calculating energy for termination efficiency...'
for e in self.experiments:
fig = plt.figure(figsize=(12, 9), dpi=100, facecolor='w', edgecolor='k')
fig_no = 0
plot_no = 0
for i_gene_id in self.genes_id_list:
gene_name = self.id_to_names[i_gene_id]
valleys = self.list_of_peaks[gene_name][e]['valleys']
gene_end = self.genes[gene_name]['gene_length'] + self.five_prime_flank #as before slice_dataframe
plot_no += 1
fig.add_subplot(3, 2, plot_no)
plt.tight_layout()
plt.title(e)
plt.ylabel("no. of reads")
try:
plt.plot(self.data[gene_name][e]['position'], self.data[gene_name][e]['hits'])
if self.print_peaks == True:
plt.plot(self.data[gene_name][e]['position'], self.data[gene_name][e]['peaks_to_plot'], color='green')
if self.print_valleys == True:
plt.plot(self.data[gene_name][e]['position'], self.data[gene_name][e]['valleys_to_plot'], color='red')
for i in self.genes[gene_name]['exons']:
plt.axvspan(i[0], i[1], alpha=0.2, color='green')
# some trick figured out by Hywel to plot without problems
y_array = np.array(self.data[gene_name][e]['RT_to_plot'])
x_array = np.array(self.data[gene_name][e]['position'])
y_array[0] = 0
y_array[len(y_array)-1] = 0
plt.fill(x_array, y_array)
plt.xlabel('ID: '+i_gene_id+', Name: '+gene_name)
plt.text(210-self.five_prime_flank,max(self.data[gene_name][e]['hits'])-150,'RT='+str(round(self.genes[gene_name]['RT'][e],3)))
plt.text(gene_end-200,max(self.data[gene_name][e]['hits'])-150,'delta G for: RNA/DNA_DNA/DNA')
except KeyError:
plt.xlabel("NO READS")
aaa = max(self.data[gene_name][e]['hits'])/10 #parameter to print energy in non-overlapping way
inkr = 1
print 'valleys: '+str(valleys)
for v in valleys:
if v >= gene_end:
print 'for valley '+str(v)+' energy is:'
r_delta_H = 0
r_delta_S = 0
d_delta_H = 0
d_delta_S = 0
pair = str()
bbb = aaa * inkr
inkr += 1
for i in range(v-5,v+2):
print 'position: '+str(i)+' nucleotide: '+self.data[gene_name][e]['nucleotides'][i]
if len(pair) > 1:
pair = pair[1]
pair += self.data[gene_name][e]['nucleotides'][i]
if len(pair) == 2:
r_delta_H += self.rna_dna[pair][0]
r_delta_S += self.rna_dna[pair][1]
d_delta_H += self.dna_dna[pair][0]
d_delta_S += self.dna_dna[pair][1]
r_delta_G = r_delta_H * (1-(310/(r_delta_H/r_delta_S)))
d_delta_G = d_delta_H * (1-(310/(d_delta_H/d_delta_S)))
to_print = str(r_delta_G)+'_'+str(d_delta_G)
print 'r_delta_G: '+str(r_delta_G)
print 'd_delta_G: '+str(d_delta_G)
plt.text(v-self.five_prime_flank,bbb,to_print)
if plot_no == 6:
fig_no += 1
plt.savefig(self.prefix+e+'_l'+str(self.lookahead)+'_t'+str(self.hits_threshold)+'_fig_'+str(fig_no)+'.png')
plt.clf()
plot_no = 0
if plot_no > 0:
plt.savefig(self.prefix+e+'_l'+str(self.lookahead)+'_t'+str(self.hits_threshold)+'_fig_'+str(fig_no+1)+'.png')
plt.clf()
return True
def termination_efficency(self):
print '# Calculating energy for termination efficiency...'
for e in self.experiments:
fig = plt.figure(figsize=(12, 9), dpi=100, facecolor='w', edgecolor='k')
fig_no = 0
plot_no = 0
for i_gene_id in self.genes_id_list:
gene_name = self.id_to_names[i_gene_id]
gene_end = self.genes[gene_name]['gene_length'] + self.five_prime_flank #as before slice_dataframe
plot_no += 1
fig.add_subplot(3, 2, plot_no)
plt.tight_layout()
plt.title(e)
plt.ylabel("no. of reads")
try:
plt.plot(self.data[gene_name][e]['position'], self.data[gene_name][e]['hits'])
if self.print_peaks == True:
plt.plot(self.data[gene_name][e]['position'], self.data[gene_name][e]['peaks_to_plot'], color='green')
if self.print_valleys == True:
plt.plot(self.data[gene_name][e]['position'], self.data[gene_name][e]['valleys_to_plot'], color='red')
for i in self.genes[gene_name]['exons']:
plt.axvspan(i[0], i[1], alpha=0.2, color='green')
# some trick figured out by Hywel to plot without problems
y_array = np.array(self.data[gene_name][e]['RT_to_plot'])
x_array = np.array(self.data[gene_name][e]['position'])
y_array[0] = 0
y_array[len(y_array)-1] = 0
plt.fill(x_array, y_array)
plt.xlabel('ID: '+i_gene_id+', Name: '+gene_name)
plt.text(210-self.five_prime_flank,max(self.data[gene_name][e]['hits'])-150,'RT='+str(round(self.genes[gene_name]['RT'][e],3)))
plt.text(gene_end-200,max(self.data[gene_name][e]['hits'])-150,'delta G for: RNA/DNA_DNA/DNA')
except KeyError:
plt.xlabel("NO READS")
# calculating energy
r_delta_H = 0
r_delta_S = 0
d_delta_H = 0
d_delta_S = 0
pair = str()
print gene_name
for i in range(gene_end +1,gene_end+21):
print 'position: '+str(i)+' nucleotide: '+self.data[gene_name][e]['nucleotides'][i]
if len(pair) > 1:
pair = pair[1]
pair += self.data[gene_name][e]['nucleotides'][i]
if len(pair) == 2:
r_delta_H += self.rna_dna[pair][0]
r_delta_S += self.rna_dna[pair][1]
d_delta_H += self.dna_dna[pair][0]
d_delta_S += self.dna_dna[pair][1]
r_delta_G = r_delta_H * (1-(310/(r_delta_H/r_delta_S)))
d_delta_G = d_delta_H * (1-(310/(d_delta_H/d_delta_S)))
to_print = str(r_delta_G)+'_'+str(d_delta_G)
print 'r_delta_G: '+str(r_delta_G)
print 'd_delta_G: '+str(d_delta_G)
plt.text(self.genes[gene_name]['gene_length'],(max(self.data[gene_name][e]['hits'])/2),to_print)
if plot_no == 6:
fig_no += 1
plt.savefig(self.prefix+e+'_l'+str(self.lookahead)+'_t'+str(self.hits_threshold)+'_fig_'+str(fig_no)+'.png')
plt.clf()
plot_no = 0
if plot_no > 0:
plt.savefig(self.prefix+e+'_l'+str(self.lookahead)+'_t'+str(self.hits_threshold)+'_fig_'+str(fig_no+1)+'.png')
plt.clf()
return True
def slice_dataframe(self):
five_prime_to_out = self.five_prime_flank - self.five_prime_to_print # maybe changed - it simply gives only -50 flank independly what -r value was used for pyPileups
for i in self.data:
for e in self.data[i]:
self.data[i][e] = self.data[i][e][five_prime_to_out::]
# if self.list_of_peaks:
# for i in self.list_of_peaks:
# for e in self.list_of_peaks[i]:
# self.list_of_peaks[i][e]['peaks'] = [x - five_prime_to_out for x in self.list_of_peaks[i][e]['peaks']]
# self.list_of_peaks[i][e]['valleys'] = [x - five_prime_to_out for x in self.list_of_peaks[i][e]['valleys']]
return True
### making output plots, one gene, different experiments per page
def fig_gene_pp(self):
print '# Plotting 1 gene per page (all experiments).'
if len(self.experiments) <= 6:
subplot_layout = [3, 2]
elif len(self.experiments) > 6 and len(self.experiments) <= 9:
subplot_layout = [3,3]
elif len(self.experiments) > 9 and len(self.experiments) <= 12:
subplot_layout = [4,3]
elif len(self.experiments) > 12 and len(self.experiments) <= 16:
subplot_layout = [4,4]
else:
print '# Unsupported layout - more than 16 different experiments'
exit()
for i_gene_id in self.genes_id_list:
fig = plt.figure(figsize=(12, 9), facecolor='w', edgecolor='k')
gene_name = self.id_to_names[i_gene_id]
gene_length = self.genes[gene_name]['gene_length']
RT_begin = self.five_prime_flank + gene_length + self.readthrough_start
plot_no = 0
for e in self.experiments:
plot_no += 1
fig.add_subplot(subplot_layout[0],subplot_layout[1],plot_no)
fig.tight_layout()
plt.title(e)
plt.ylabel("no. of reads")
try:
plt.plot(self.data[gene_name]['position'], self.data[gene_name][e])
if self.print_peaks == True:
for i in self.list_of_peaks[gene_name][e]['peaks']:
plt.axvline(i-self.five_prime_flank, color='blue')
if self.print_valleys == True:
for i in self.list_of_peaks[gene_name][e]['valleys']:
plt.axvline(i-self.five_prime_flank, color='red')
for i in self.genes[gene_name]['exons']:
plt.axvspan(i[0], i[1], alpha=0.2, color='green')
# some trick figured out by Hywel to plot without problems
y_array = np.array(self.data[gene_name][e][RT_begin:])
x_array = np.array(self.data[gene_name]['position'][RT_begin:])
y_array[0] = 0
y_array[len(y_array)-1] = 0
plt.fill(x_array, y_array)
plt.xlabel('ID: '+i_gene_id+', Name: '+gene_name)
# plt.text(0.5,0.5,'RT='+str(round(self.genes[gene_name]['RT'][e],3)))
except KeyError:
plt.xlabel("NO READS")
if plot_no == len(self.experiments):
plot_no = 0
plt.savefig(self.prefix+i_gene_id+'_l'+str(self.lookahead)+'_t'+str(self.hits_threshold)+'.png', dpi=150)
plt.clf()
return True
### making output plots, one gene, different experiments per page
def fig_gene_pp_tight(self):
print '# Plotting 1 gene per page (all experiments).'
if len(self.experiments) > 6:
exit('# Unsupported layout - more than 16 different experiments')
for i_gene_id in self.genes_id_list:
#setup figure
fig, axes = plt.subplots(len(self.experiments), 1)
# fig.tight_layout()
fig.subplots_adjust(hspace=0) #leave no space between
plt.setp([a.get_xticklabels() for a in fig.axes[:-1]], visible=False) #from stackoverflow to remove xlabels exept bottom plot
gene_name = self.id_to_names[i_gene_id]
gene_length = self.genes[gene_name]['gene_length']
RT_begin = self.five_prime_flank + gene_length + self.readthrough_start
plot_no = 0
for e in self.experiments:
#plot subplots
plot_no += 1
axes[plot_no-1].set_ylabel(e)
try:
axes[plot_no-1].plot(self.data[gene_name]['position'], self.data[gene_name][e], color='black')
if self.print_peaks == True:
for i in self.list_of_peaks[gene_name][e]['peaks']:
axes[plot_no-1].axvline(i-self.five_prime_flank, color='blue')
if self.print_valleys == True:
for i in self.list_of_peaks[gene_name][e]['valleys']:
axes[plot_no-1].axvline(i-self.five_prime_flank, color='red')
for i in self.genes[gene_name]['exons']:
axes[plot_no-1].axvspan(i[0], i[1], alpha=0.2, color='green')
# some trick figured out by Hywel to plot without problems
y_array = np.array(self.data[gene_name][e][RT_begin:])
x_array = np.array(self.data[gene_name]['position'][RT_begin:])
y_array[0] = 0
y_array[len(y_array)-1] = 0
axes[plot_no-1].fill(x_array, y_array, color='grey')
plt.xlabel('ID: '+i_gene_id+', Name: '+gene_name)
# plt.text(0.5,0.5,'RT='+str(round(self.genes[gene_name]['RT'][e],3)))
except KeyError:
plt.xlabel("NO READS")
if plot_no == len(self.experiments):
plot_no = 0
plt.savefig(self.prefix+i_gene_id+'_l'+str(self.lookahead)+'_t'+str(self.hits_threshold)+'.png', dpi=150)
plt.clf()
plt.close()
return True
###function to plot ratio between sample with -a parameter and -b parameter
def fig_ratio(self, a, b):
print '# Plotting 1 gene per page. Log2 ratio for '+a+' divided by '+b+' (all experiments).'
new_exp_list = self.group_experiments(a,b)
for i_gene_id in self.genes_id_list:
fig = plt.figure(figsize=(12, 9), dpi=100, facecolor='w', edgecolor='k')
gene_name = self.id_to_names[i_gene_id]
gene_length = self.genes[gene_name]['gene_length']
RT_begin = self.five_prime_flank + gene_length + self.readthrough_start
plot_no = 0
fig_no = 0
for e in new_exp_list:
plot_no += 1
fig_no += 1
#plot a
fig.add_subplot(4,2,plot_no)
# plt.tight_layout()
plt.title(e[0])
plt.ylabel("no. of reads")
try:
plt.plot(self.data[gene_name]['position'], self.data[gene_name][e[0]], color='black')
if self.print_peaks == True:
for i in self.list_of_peaks[gene_name][e[0]]['peaks']:
plt.axvline(i-self.five_prime_flank, color='blue')
if self.print_valleys == True:
for i in self.list_of_peaks[gene_name][e[0]]['valleys']:
plt.axvline(i-self.five_prime_flank, color='red')
for i in self.genes[gene_name]['exons']:
plt.axvspan(i[0], i[1], alpha=0.2, color='green')
# some trick figured out by Hywel to plot without problems
y_array = np.array(self.data[gene_name][e[0]][RT_begin:])
x_array = np.array(self.data[gene_name]['position'][RT_begin:])
y_array[0] = 0
y_array[len(y_array)-1] = 0
plt.fill(x_array, y_array, color='#808080')
plt.xlabel('ID: '+i_gene_id+', Name: '+gene_name)
except KeyError:
plt.xlabel("NO READS")
#plot b
fig.add_subplot(4,2,plot_no+2)
# plt.tight_layout()
plt.title(e[1])
plt.ylabel("no. of reads")
try:
plt.plot(self.data[gene_name]['position'], self.data[gene_name][e[1]], color='black')
if self.print_peaks == True:
for i in self.list_of_peaks[gene_name][e[1]]['peaks']:
plt.axvline(i-self.five_prime_flank, color='blue')
if self.print_valleys == True:
for i in self.list_of_peaks[gene_name][e[1]]['valleys']:
plt.axvline(i-self.five_prime_flank, color='red')
for i in self.genes[gene_name]['exons']:
plt.axvspan(i[0], i[1], alpha=0.2, color='green')
# some trick figured out by Hywel to plot without problems
y_array = np.array(self.data[gene_name][e[1]][RT_begin:])
x_array = np.array(self.data[gene_name]['position'][RT_begin:])
y_array[0] = 0
y_array[len(y_array)-1] = 0
plt.fill(x_array, y_array, color='#808080')
plt.xlabel('ID: '+i_gene_id+', Name: '+gene_name)
# plt.text(0.5,0.5,'RT='+str(round(self.genes[gene_name]['RT'][e],3)))
except KeyError:
plt.xlabel("NO READS")
#plot ratio
fig.add_subplot(4,2,plot_no+4)
plt.tight_layout()
plt.title(e[0]+'/'+e[1]+' ratio')
plt.ylabel("no. of reads")
try:
self.data[gene_name][str("ratio_"+e[0]+"_"+e[1])] = self.data[gene_name][e[0]]/self.data[gene_name][e[1]] ###getting ratio
plt.plot(self.data[gene_name]['position'], self.data[gene_name][str("ratio_"+e[0]+"_"+e[1])], color='#994C00')
for i in self.genes[gene_name]['exons']:
plt.axvspan(i[0], i[1], alpha=0.2, color='green')
plt.xlabel('ID: '+i_gene_id+', Name: '+gene_name)
except KeyError:
plt.xlabel("NO READS")
#plot log2 ratio
fig.add_subplot(4,2,plot_no+6)
plt.tight_layout()
plt.title('log2 '+e[0]+'/'+e[1]+' ratio')
plt.ylabel("no. of reads")
try:
self.data[gene_name][str("ratio_"+e[0]+"_"+e[1])] = np.log2(self.data[gene_name][e[0]]/self.data[gene_name][e[1]]) ###getting ratio
plt.plot(self.data[gene_name]['position'], self.data[gene_name][str("ratio_"+e[0]+"_"+e[1])], color='#994C00')
for i in self.genes[gene_name]['exons']:
plt.axvspan(i[0], i[1], alpha=0.2, color='green')
plt.xlabel('ID: '+i_gene_id+', Name: '+gene_name)
except KeyError:
plt.xlabel("NO READS")
if plot_no == 2:
plot_no = 0
plt.savefig(self.prefix+i_gene_id+'_l'+str(self.lookahead)+'_t'+str(self.hits_threshold)+'ratio_fig'+str(fig_no)+'.png')
plt.clf()
plot_no = 0
plt.savefig(self.prefix+i_gene_id+'_l'+str(self.lookahead)+'_t'+str(self.hits_threshold)+'ratio_fig'+str(fig_no)+'.png')
plt.clf()
plt.savefig(self.prefix+i_gene_id+'_l'+str(self.lookahead)+'_t'+str(self.hits_threshold)+'ratio_fig'+str(fig_no)+'.png')
plt.clf()
return True
def group_experiments(self, a, b):
print 'Looking for groups of experiments...'
### function identifiying 'to_divide' and 'divisor' for ratio results plotting.
### Please use nomenclature gene_variant
### IMPORTANT: only two experiments may contain the same root(gene) added to 'a' or 'b'
### i.e. list_of_experiments = ['A190_MD', 'A190_total', 'A135_MD', 'A135_total'] additional A190 or A135 is forbidden
a_experiments = list()
b_experiments = list()
for e in self.experiments: ##making list with experiments containing -a or -b
if re.search(a, e):
a_experiments.append(e)
elif re.search(b, e):
b_experiments.append(e)
else:
print "fig_ratio module found experiment "+e+" neither containing -a nor -b parameter. \n " \
"This experiment won't be considered"
if len(a_experiments) == len(b_experiments): ##checking no. of experiments to divide each other
pass
else:
print "Unequal no. of experiments with -a and -b parameter:"
print "-a exp: "+','.join(a_experiments)+"\n -b exp: "+','.join(b_experiments)
exit()
a_experiments.sort()
b_experiments.sort()
ratio_exp_list = list()
for e in a_experiments:
gene = re.search(r'(.*)_'+a, e).group(1)
second = [d for d in b_experiments if re.search(gene,d)]
if len(second) > 1:
print 'To many elements to divide for '+gene
exit()
couple = [e, second[0]]
ratio_exp_list.append(couple)
print 'Found pairs:'
print ratio_exp_list
return ratio_exp_list
# print plots, gene after gene,
def fig_gene_after_gene(self):
print '# Plotting gene after gene.'
for e in self.experiments:
fig = plt.figure(figsize=(12, 9), dpi=200, facecolor='w', edgecolor='k')
fig_no = 0
plot_no = 0
for i_gene_id in self.genes_id_list:
gene_name = self.id_to_names[i_gene_id]
gene_length = self.genes[gene_name]['gene_length']
RT_begin = self.five_prime_flank + gene_length + self.readthrough_start
plot_no += 1
fig.add_subplot(3, 2, plot_no)
plt.tight_layout()
plt.title(e)
plt.ylabel("no. of reads")
try:
plt.plot(self.data[gene_name]['position'], self.data[gene_name][e])
if self.print_peaks == True:
for i in self.list_of_peaks[gene_name][e]['peaks']:
plt.axvline(i-self.five_prime_flank, color='blue')
if self.print_valleys == True:
for i in self.list_of_peaks[gene_name][e]['valleys']:
plt.axvline(i-self.five_prime_flank, color='red')
for i in self.genes[gene_name]['exons']:
plt.axvspan(i[0], i[1], alpha=0.2, color='green')
# some trick figured out by Hywel to plot without problems
y_array = np.array(self.data[gene_name][e][RT_begin:])
x_array = np.array(self.data[gene_name]['position'][RT_begin:])
y_array[0] = 0
y_array[len(y_array)-1] = 0
plt.fill(x_array, y_array)
plt.xlabel('ID: '+i_gene_id+', Name: '+gene_name)
# plt.text(210-self.five_prime_flank,max(self.data[gene_name][e])-150,'RT='+str(round(self.genes[gene_name]['RT'][e],3)))
except KeyError:
plt.xlabel("NO READS")
if plot_no == 6:
fig_no += 1
plt.savefig(self.prefix+e+'_l'+str(self.lookahead)+'_t'+str(self.hits_threshold)+'_fig_'+str(fig_no)+'.png')
plt.clf()
plot_no = 0
if plot_no > 0:
plt.savefig(self.prefix+e+'_l'+str(self.lookahead)+'_t'+str(self.hits_threshold)+'_fig_'+str(fig_no+1)+'.png')
plt.clf()
return True
# print plots, gene after gene,
def mark_T(self, anti_plot=False):
print '# Plotting gene after gene marking each T.'
for e in self.experiments:
fig = plt.figure(figsize=(12, 9), dpi=200, facecolor='w', edgecolor='k')
fig_no = 0
plot_no = 0
for i_gene_id in self.genes_id_list:
gene_name = self.id_to_names[i_gene_id]
gene_length = self.genes[gene_name]['gene_length']
RT_begin = self.five_prime_flank + gene_length + self.readthrough_start
plot_no += 1
fig.add_subplot(3, 2, plot_no)
plt.tight_layout()
plt.title(e)
plt.ylabel("no. of reads")
try:
plt.plot(self.data[gene_name]['position'], self.data[gene_name][e])
if self.print_peaks == True:
for i in self.list_of_peaks[gene_name][e]['peaks']:
plt.axvline(i-self.five_prime_flank, color='blue')
if self.print_valleys == True:
for i in self.list_of_peaks[gene_name][e]['valleys']:
plt.axvline(i-self.five_prime_flank, color='red')
for i in self.genes[gene_name]['exons']:
plt.axvspan(i[0], i[1], alpha=0.2, color='green')
#marking all T nucleotides
new_data = self.data[gene_name][self.data[gene_name].nucleotide == 'T']
for n in list(new_data["position"]):
plt.axvline(n, color='grey')
if anti_plot == True:
new_data = self.data[gene_name][self.data[gene_name].nucleotide == ('G' or 'C')]
for n in list(new_data["position"]):
plt.axvline(n, color='red', alpha=0.7)
# some trick figured out by Hywel to plot without problems
y_array = np.array(self.data[gene_name][e][RT_begin:])
x_array = np.array(self.data[gene_name]['position'][RT_begin:])
y_array[0] = 0
y_array[len(y_array)-1] = 0
plt.fill(x_array, y_array)
plt.xlabel('ID: '+i_gene_id+', Name: '+gene_name)
# plt.text(210-self.five_prime_flank,max(self.data[gene_name][e])-150,'RT='+str(round(self.genes[gene_name]['RT'][e],3)))
except KeyError:
plt.xlabel("NO READS")
if plot_no == 6:
fig_no += 1
plt.savefig(self.prefix+e+'_l'+str(self.lookahead)+'_t'+str(self.hits_threshold)+'_fig_'+str(fig_no)+'.png')
plt.clf()
plot_no = 0
if plot_no > 0:
plt.savefig(self.prefix+e+'_l'+str(self.lookahead)+'_t'+str(self.hits_threshold)+'_fig_'+str(fig_no+1)+'.png')
plt.clf()
return True
# print plots, gene after gene,
def fig_boxes(self,abox,bbox):
print '# Reading A and A box files'
abox_dict = dict()
abox_len = 16
for line in abox:
try:
if line[0] == "#": continue
line_elements = line.strip().split('\t')
gene_name, start = line_elements[0],int(line_elements[1])
if gene_name not in abox_dict:
abox_dict[gene_name] = start
except IndexError:
sys.stderr.write("\nIndexError at line:\n%s\n" % line)
pass
bbox_dict = dict()
bbox_len = 9
for line in bbox:
try:
if line[0] == "#": continue
line_elements = line.strip().split('\t')
gene_name, start = line_elements[0],int(line_elements[1])
if gene_name not in bbox_dict:
bbox_dict[gene_name] = start
except IndexError:
sys.stderr.write("\nIndexError at line:\n%s\n" % line)
pass
print '# Plotting gene after gene, marking boxes.'
for e in self.experiments:
fig = plt.figure(figsize=(12, 9), dpi=200, facecolor='w', edgecolor='k')
fig_no = 0
plot_no = 0
for i_gene_id in self.genes_id_list:
gene_name = self.id_to_names[i_gene_id]
plot_no += 1
fig.add_subplot(3, 2, plot_no)
plt.tight_layout()
plt.title(e)
plt.ylabel("no. of reads")
try:
plt.plot(self.data[gene_name][e]['position'], self.data[gene_name][e]['hits'])
if self.print_peaks == True:
plt.plot(self.data[gene_name][e]['position'], self.data[gene_name][e]['peaks_to_plot'], color='green')
if self.print_valleys == True:
plt.plot(self.data[gene_name][e]['position'], self.data[gene_name][e]['valleys_to_plot'], color='red')
#plot exons background
for i in self.genes[gene_name]['exons']:
plt.axvspan(i[0], i[1], alpha=0.2, color='green')
#plot A and B box
try:
plt.axvspan(abox_dict[gene_name], abox_dict[gene_name]+abox_len, alpha=0.6, color='red')
except:
pass
try:
plt.axvspan(bbox_dict[gene_name], bbox_dict[gene_name]+bbox_len, alpha=0.6, color='green')
except:
pass
# some trick figured out by Hywel to plot without problems
y_array = np.array(self.data[gene_name][e]['RT_to_plot'])
x_array = np.array(self.data[gene_name][e]['position'])
y_array[0] = 0
y_array[len(y_array)-1] = 0
plt.fill(x_array, y_array)
plt.xlabel('ID: '+i_gene_id+', Name: '+gene_name)
except KeyError:
plt.xlabel("NO READS")
if plot_no == 6:
fig_no += 1
plt.savefig(self.prefix+e+'_l'+str(self.lookahead)+'_t'+str(self.hits_threshold)+'_fig_'+str(fig_no)+'.png')
plt.clf()
plot_no = 0
if plot_no > 0:
plt.savefig(self.prefix+e+'_l'+str(self.lookahead)+'_t'+str(self.hits_threshold)+'_fig_'+str(fig_no+1)+'.png')
plt.clf()
return True
# print histogram for 3` end including nucleotides
def fig_3end_nucleotide_resolution(self):
print '# Plotting 3` end at nucleotide resolution.'
fig = plt.figure(figsize=(15, 10), dpi=80, facecolor='w', edgecolor='k')
if len(self.experiments) > 1:
exit('More than one experiment. Preprocess concat file to get only one experiment:' \
"cat your.concat | awk '$7 == \"experiment\" {print $0}' > experiment.concat")
else:
e = self.experiments[0]
for tRNA_group in self.rt:
fig_no = 0
plot_no = 0
rt_sorted = collections.OrderedDict(sorted(self.rt[tRNA_group].items(), reverse=True, key=lambda t: t[1]))
for i_gene_id in rt_sorted:
gene_name = self.id_to_names[i_gene_id]
plot_no += 1
ax = fig.add_subplot(5, 1, plot_no)
fig.tight_layout()
plt.title(tRNA_group)
try:
self.data[gene_name] = self.data[gene_name][(-10-self.three_prime_flank):(120-self.three_prime_flank):] # plot from last 130 nt
# print self.data[gene_name]
bar = ax.bar(self.data[gene_name]['position'], self.data[gene_name][e], width=0.5)
for i in range(0,10):
bar[i].set_color('green')
for i in range(10,len(bar)):
bar[i].set_color('grey')
plt.xticks(list(self.data[gene_name]['position']), list(self.data[gene_name]['nucleotide']), fontsize=8)
# uno_to_plot = self.data[gene_name][(-10-self.three_prime_flank):(120-self.three_prime_flank):] # plot from last 130 nt
# # print uno_to_plot
# # exit()
# ax.set_ylabel("no. of reads")
# ax.set_xlabel('ID: '+i_gene_id+', Name: '+gene_name)
# bar = ax.bar(uno_to_plot['position'], uno_to_plot[e], width=0.5)
# axes = plt.gca()
# axes.set_ylim([0,2000])
# for i in range(0,10):
# bar[i].set_color('green')
# for i in range(10,len(bar)):
# bar[i].set_color('grey')
# plt.xticks(list(uno_to_plot['position']), list(uno_to_plot['nucleotide']), fontsize=8)
#
# # plot_no += 1
# ax = fig.add_subplot(5, 1, plot_no)
# ax.set_ylabel("no. of reads")
# ax.set_xlabel('ID: '+i_gene_id+', Name: '+gene_name)
# duo_to_plot = self.data[gene_name][(120-self.three_prime_flank)::] # plot from last 130 nt
# bar = ax.bar(duo_to_plot['position'], duo_to_plot[e], width=0.5, color='grey')
# axes = plt.gca()
# axes.set_ylim([0,2000])
# # axes.set_ylim([0,max(self.data[gene_name][e]['hits'])])
# plt.xticks(list(duo_to_plot['position']), list(duo_to_plot['nucleotide']), fontsize=8)
except KeyError:
plt.text(0.5,0.5,"NO READS")
if plot_no == 5:
fig_no += 1
plt.savefig(self.prefix+'nuc3end'+'_l'+str(self.lookahead)+'_t'+str(self.hits_threshold)+'_'+tRNA_group+'_fig_'+str(fig_no)+'.png')
plt.clf()
plot_no = 0
if plot_no > 0:
plt.savefig(self.prefix+'nuc3end'+'_l'+str(self.lookahead)+'_t'+str(self.hits_threshold)+'_'+tRNA_group+'_fig_'+str(fig_no+1)+'.png')
plt.clf()
return True
# print histogram for 3` end including nucleotides
def fig_5end_nucleotide_resolution(self):
print "# Plotting 5' end at nucleotide resolution."
fig = plt.figure(figsize=(15, 10), dpi=80, facecolor='w', edgecolor='k')
if len(self.experiments) > 1:
exit('More than one experiment. Preprocess concat file to get only one experiment:' \
"cat your.concat | awk '$7 == \"experiment\" {print $0}' > experiment.concat")
else:
e = self.experiments[0]
for tRNA_group in self.rt:
fig_no = 0
plot_no = 0
rt_sorted = collections.OrderedDict(
sorted(self.rt[tRNA_group].items(), reverse=True, key=lambda t: t[1]))
for i_gene_id in rt_sorted:
gene_name = self.id_to_names[i_gene_id]
plot_no += 1
ax = fig.add_subplot(5, 1, plot_no)
fig.tight_layout()
plt.title(tRNA_group)
try:
self.data[gene_name] = self.data[gene_name][self.five_prime_flank-20:self.five_prime_flank+10:] # plot first 20 nt
# print self.data[gene_name]
bar = ax.bar(self.data[gene_name]['position'], self.data[gene_name][e], width=0.5)
for i in range(0, 20):
bar[i].set_color('green')
for i in range(20, len(bar)):
bar[i].set_color('grey')
plt.xticks(list(self.data[gene_name]['position']), list(self.data[gene_name]['nucleotide']),fontsize=8)
except KeyError:
plt.text(0.5, 0.5, "NO READS")
if plot_no == 5:
fig_no += 1
plt.savefig(self.prefix + 'nuc5end' + '_l' + str(self.lookahead) + '_t' + str(
self.hits_threshold) + '_' + tRNA_group + '_fig_' + str(fig_no) + '.png')
plt.clf()
plot_no = 0
if plot_no > 0:
plt.savefig(self.prefix + 'nuc5end' + '_l' + str(self.lookahead) + '_t' + str(
self.hits_threshold) + '_' + tRNA_group + '_fig_' + str(fig_no + 1) + '.png')
plt.clf()
return True
def fig_nucleotide_gene(self):
print '# Plotting gene in nucleotide resolution.'
fig = plt.figure(figsize=(15, 10), dpi=80, facecolor='w', edgecolor='k')
if len(self.experiments) > 1:
print 'More than one experiment. Preprocess concat file to get only one experiment:' \
"cat your.concat | awk '$7 == \"experiment\" {print $0}' > experiment.concat"
exit()
else:
e = self.experiments[0]
fig_no, plot_no = 0, 0
for i_gene_id in self.genes_id_list:
gene_name = self.id_to_names[i_gene_id]
plot_no += 1
ax = fig.add_subplot(5, 1, plot_no)
fig.tight_layout()
plt.title(e)
ax.set_ylabel("no. of reads")
ax.set_xlabel('ID: '+i_gene_id+', Name: '+gene_name)
try:
self.data[gene_name][e] = self.data[gene_name][e][self.three_prime_flank:-self.three_prime_flank:] # plot only gene
bar = ax.bar(self.data[gene_name][e]['position'], self.data[gene_name][e]['hits'], width=0.5)
for i in self.genes[gene_name]['exons']:
plt.axvspan(i[0], i[1], alpha=0.2, color='green')
plt.xticks(list(self.data[gene_name][e]['position']), list(self.data[gene_name][e]['nucleotides']), fontsize=8)
except KeyError:
plt.text(0.5,0.5,"NO READS")
if plot_no == 5:
fig_no += 1
plt.savefig(self.prefix+'nuc_gene'+'_l'+str(self.lookahead)+'_t'+str(self.hits_threshold)+'_'+'_fig_'+str(fig_no)+'.png')
plt.clf()
plot_no = 0
if plot_no > 0:
plt.savefig(self.prefix+'nuc_gene'+'_l'+str(self.lookahead)+'_t'+str(self.hits_threshold)+'_'+'_fig_'+str(fig_no+1)+'.png')
plt.clf()
return True
def fig_energy(self, step):
print 'Plotting 3` end and calculating energy for plots...'
fig = plt.figure(figsize=(15, 10), dpi=80, facecolor='w', edgecolor='k')
if len(self.experiments) > 1:
print 'More than one experiment. Preprocess concat file to get only one experiment:' \
"cat your.concat | awk '$7 == \"experiment\" {print $0}' > experiment.concat"
exit()
else:
e = self.experiments[0]
for tRNA_group in self.rt:
fig_no = 0
plot_no = 0
rt_sorted = collections.OrderedDict(sorted(self.rt[tRNA_group].items(), reverse=True, key=lambda t: t[1]))
for i_gene_id in rt_sorted:
gene_name = self.id_to_names[i_gene_id]
plot_no += 1
fig.add_subplot(5, 1, plot_no)
fig.tight_layout()
plt.title(tRNA_group)
plt.ylabel("no. of reads")
plt.xlabel('ID: '+i_gene_id+', Name: '+gene_name)
plt.text(str(self.genes[gene_name]['RT'][e]), horizontalalignment='right', verticalalignment='top')
try:
self.data[gene_name][e] = self.data[gene_name][e][(-10-self.three_prime_flank):(140-self.three_prime_flank):] # plot from last 150 nt
bar = plt.bar(self.data[gene_name][e]['position'], self.data[gene_name][e]['hits'], width=0.6)
for i in range(0,10):
bar[i].set_color('green')
for i in range(10,len(bar)):
bar[i].set_color('grey')
plt.xticks(list(self.data[gene_name][e]['position']), list(self.data[gene_name][e]['nucleotides']), fontsize=8)
ax2 = plt.twinx()
ax2.set_ylabel("energy")
## plot energy every five nucleotides
r_delta_H = 0
r_delta_S = 0
d_delta_H = 0
d_delta_S = 0
window = step
pair = str()
list_for_G = list()
for i in range(min(self.data[gene_name][e].index),max(self.data[gene_name][e].index)+1):
if len(pair) > 1:
pair = pair[1]
pair += self.data[gene_name][e]['nucleotides'][i]
if len(pair) == 2:
r_delta_H += self.rna_dna[pair][0]
r_delta_S += self.rna_dna[pair][1]
d_delta_H += self.dna_dna[pair][0]
d_delta_S += self.dna_dna[pair][1]
list_for_G.append(i)
window -= 1
if window == 0:
r_delta_G = r_delta_H * (1-(310/(r_delta_H/r_delta_S)))
d_delta_G = d_delta_H * (1-(310/(d_delta_H/d_delta_S)))
for a in list_for_G:
self.data[gene_name][e].loc[a,'r_delta_G'] = r_delta_G
self.data[gene_name][e].loc[a,'d_delta_G'] = d_delta_G
list_for_G = []
r_delta_G = 0
d_delta_G = 0
r_delta_H = 0
r_delta_S = 0
d_delta_H = 0
d_delta_S = 0
window = step
plt.plot(list(self.data[gene_name][e]['position']), list(self.data[gene_name][e]['r_delta_G']))
plt.plot(list(self.data[gene_name][e]['position']), list(self.data[gene_name][e]['d_delta_G']))
except KeyError:
plt.text(10, 10, "NO READS")
if plot_no == 5:
fig_no += 1
plt.savefig(self.prefix+'nuc_energy'+'_w'+str(step)+'_'+tRNA_group+'_fig_'+str(fig_no)+'.png')
plt.clf()
plot_no = 0
if plot_no > 0:
plt.savefig(self.prefix+'nuc_energy'+'_w'+str(step)+'_'+tRNA_group+'_fig_'+str(fig_no+1)+'.png')
plt.clf()
return True
def test_print(self, what):
pd.set_option('display.max_rows', 5000)
print what
pd.reset_option('display.max_rows')
return True | apache-2.0 |
xodus7/tensorflow | tensorflow/examples/learn/text_classification_character_rnn.py | 38 | 4036 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of recurrent neural networks over characters for DBpedia dataset.
This model is similar to one described in this paper:
"Character-level Convolutional Networks for Text Classification"
http://arxiv.org/abs/1509.01626
and is somewhat alternative to the Lua code from here:
https://github.com/zhangxiangxiao/Crepe
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import numpy as np
import pandas
import tensorflow as tf
FLAGS = None
MAX_DOCUMENT_LENGTH = 100
HIDDEN_SIZE = 20
MAX_LABEL = 15
CHARS_FEATURE = 'chars' # Name of the input character feature.
def char_rnn_model(features, labels, mode):
"""Character level recurrent neural network model to predict classes."""
byte_vectors = tf.one_hot(features[CHARS_FEATURE], 256, 1., 0.)
byte_list = tf.unstack(byte_vectors, axis=1)
cell = tf.nn.rnn_cell.GRUCell(HIDDEN_SIZE)
_, encoding = tf.nn.static_rnn(cell, byte_list, dtype=tf.float32)
logits = tf.layers.dense(encoding, MAX_LABEL, activation=None)
predicted_classes = tf.argmax(logits, 1)
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(
mode=mode,
predictions={
'class': predicted_classes,
'prob': tf.nn.softmax(logits)
})
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.AdamOptimizer(learning_rate=0.01)
train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
eval_metric_ops = {
'accuracy': tf.metrics.accuracy(
labels=labels, predictions=predicted_classes)
}
return tf.estimator.EstimatorSpec(
mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
def main(unused_argv):
# Prepare training and testing data
dbpedia = tf.contrib.learn.datasets.load_dataset(
'dbpedia', test_with_fake_data=FLAGS.test_with_fake_data)
x_train = pandas.DataFrame(dbpedia.train.data)[1]
y_train = pandas.Series(dbpedia.train.target)
x_test = pandas.DataFrame(dbpedia.test.data)[1]
y_test = pandas.Series(dbpedia.test.target)
# Process vocabulary
char_processor = tf.contrib.learn.preprocessing.ByteProcessor(
MAX_DOCUMENT_LENGTH)
x_train = np.array(list(char_processor.fit_transform(x_train)))
x_test = np.array(list(char_processor.transform(x_test)))
# Build model
classifier = tf.estimator.Estimator(model_fn=char_rnn_model)
# Train.
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={CHARS_FEATURE: x_train},
y=y_train,
batch_size=128,
num_epochs=None,
shuffle=True)
classifier.train(input_fn=train_input_fn, steps=100)
# Eval.
test_input_fn = tf.estimator.inputs.numpy_input_fn(
x={CHARS_FEATURE: x_test},
y=y_test,
num_epochs=1,
shuffle=False)
scores = classifier.evaluate(input_fn=test_input_fn)
print('Accuracy: {0:f}'.format(scores['accuracy']))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--test_with_fake_data',
default=False,
help='Test the example code with fake data.',
action='store_true')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
elkingtonmcb/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/colors.py | 69 | 31676 | """
A module for converting numbers or color arguments to *RGB* or *RGBA*
*RGB* and *RGBA* are sequences of, respectively, 3 or 4 floats in the
range 0-1.
This module includes functions and classes for color specification
conversions, and for mapping numbers to colors in a 1-D array of
colors called a colormap. Colormapping typically involves two steps:
a data array is first mapped onto the range 0-1 using an instance
of :class:`Normalize` or of a subclass; then this number in the 0-1
range is mapped to a color using an instance of a subclass of
:class:`Colormap`. Two are provided here:
:class:`LinearSegmentedColormap`, which is used to generate all
the built-in colormap instances, but is also useful for making
custom colormaps, and :class:`ListedColormap`, which is used for
generating a custom colormap from a list of color specifications.
The module also provides a single instance, *colorConverter*, of the
:class:`ColorConverter` class providing methods for converting single
color specifications or sequences of them to *RGB* or *RGBA*.
Commands which take color arguments can use several formats to specify
the colors. For the basic builtin colors, you can use a single letter
- b : blue
- g : green
- r : red
- c : cyan
- m : magenta
- y : yellow
- k : black
- w : white
Gray shades can be given as a string encoding a float in the 0-1
range, e.g.::
color = '0.75'
For a greater range of colors, you have two options. You can specify
the color using an html hex string, as in::
color = '#eeefff'
or you can pass an *R* , *G* , *B* tuple, where each of *R* , *G* , *B*
are in the range [0,1].
Finally, legal html names for colors, like 'red', 'burlywood' and
'chartreuse' are supported.
"""
import re
import numpy as np
from numpy import ma
import matplotlib.cbook as cbook
parts = np.__version__.split('.')
NP_MAJOR, NP_MINOR = map(int, parts[:2])
# true if clip supports the out kwarg
NP_CLIP_OUT = NP_MAJOR>=1 and NP_MINOR>=2
cnames = {
'aliceblue' : '#F0F8FF',
'antiquewhite' : '#FAEBD7',
'aqua' : '#00FFFF',
'aquamarine' : '#7FFFD4',
'azure' : '#F0FFFF',
'beige' : '#F5F5DC',
'bisque' : '#FFE4C4',
'black' : '#000000',
'blanchedalmond' : '#FFEBCD',
'blue' : '#0000FF',
'blueviolet' : '#8A2BE2',
'brown' : '#A52A2A',
'burlywood' : '#DEB887',
'cadetblue' : '#5F9EA0',
'chartreuse' : '#7FFF00',
'chocolate' : '#D2691E',
'coral' : '#FF7F50',
'cornflowerblue' : '#6495ED',
'cornsilk' : '#FFF8DC',
'crimson' : '#DC143C',
'cyan' : '#00FFFF',
'darkblue' : '#00008B',
'darkcyan' : '#008B8B',
'darkgoldenrod' : '#B8860B',
'darkgray' : '#A9A9A9',
'darkgreen' : '#006400',
'darkkhaki' : '#BDB76B',
'darkmagenta' : '#8B008B',
'darkolivegreen' : '#556B2F',
'darkorange' : '#FF8C00',
'darkorchid' : '#9932CC',
'darkred' : '#8B0000',
'darksalmon' : '#E9967A',
'darkseagreen' : '#8FBC8F',
'darkslateblue' : '#483D8B',
'darkslategray' : '#2F4F4F',
'darkturquoise' : '#00CED1',
'darkviolet' : '#9400D3',
'deeppink' : '#FF1493',
'deepskyblue' : '#00BFFF',
'dimgray' : '#696969',
'dodgerblue' : '#1E90FF',
'firebrick' : '#B22222',
'floralwhite' : '#FFFAF0',
'forestgreen' : '#228B22',
'fuchsia' : '#FF00FF',
'gainsboro' : '#DCDCDC',
'ghostwhite' : '#F8F8FF',
'gold' : '#FFD700',
'goldenrod' : '#DAA520',
'gray' : '#808080',
'green' : '#008000',
'greenyellow' : '#ADFF2F',
'honeydew' : '#F0FFF0',
'hotpink' : '#FF69B4',
'indianred' : '#CD5C5C',
'indigo' : '#4B0082',
'ivory' : '#FFFFF0',
'khaki' : '#F0E68C',
'lavender' : '#E6E6FA',
'lavenderblush' : '#FFF0F5',
'lawngreen' : '#7CFC00',
'lemonchiffon' : '#FFFACD',
'lightblue' : '#ADD8E6',
'lightcoral' : '#F08080',
'lightcyan' : '#E0FFFF',
'lightgoldenrodyellow' : '#FAFAD2',
'lightgreen' : '#90EE90',
'lightgrey' : '#D3D3D3',
'lightpink' : '#FFB6C1',
'lightsalmon' : '#FFA07A',
'lightseagreen' : '#20B2AA',
'lightskyblue' : '#87CEFA',
'lightslategray' : '#778899',
'lightsteelblue' : '#B0C4DE',
'lightyellow' : '#FFFFE0',
'lime' : '#00FF00',
'limegreen' : '#32CD32',
'linen' : '#FAF0E6',
'magenta' : '#FF00FF',
'maroon' : '#800000',
'mediumaquamarine' : '#66CDAA',
'mediumblue' : '#0000CD',
'mediumorchid' : '#BA55D3',
'mediumpurple' : '#9370DB',
'mediumseagreen' : '#3CB371',
'mediumslateblue' : '#7B68EE',
'mediumspringgreen' : '#00FA9A',
'mediumturquoise' : '#48D1CC',
'mediumvioletred' : '#C71585',
'midnightblue' : '#191970',
'mintcream' : '#F5FFFA',
'mistyrose' : '#FFE4E1',
'moccasin' : '#FFE4B5',
'navajowhite' : '#FFDEAD',
'navy' : '#000080',
'oldlace' : '#FDF5E6',
'olive' : '#808000',
'olivedrab' : '#6B8E23',
'orange' : '#FFA500',
'orangered' : '#FF4500',
'orchid' : '#DA70D6',
'palegoldenrod' : '#EEE8AA',
'palegreen' : '#98FB98',
'palevioletred' : '#AFEEEE',
'papayawhip' : '#FFEFD5',
'peachpuff' : '#FFDAB9',
'peru' : '#CD853F',
'pink' : '#FFC0CB',
'plum' : '#DDA0DD',
'powderblue' : '#B0E0E6',
'purple' : '#800080',
'red' : '#FF0000',
'rosybrown' : '#BC8F8F',
'royalblue' : '#4169E1',
'saddlebrown' : '#8B4513',
'salmon' : '#FA8072',
'sandybrown' : '#FAA460',
'seagreen' : '#2E8B57',
'seashell' : '#FFF5EE',
'sienna' : '#A0522D',
'silver' : '#C0C0C0',
'skyblue' : '#87CEEB',
'slateblue' : '#6A5ACD',
'slategray' : '#708090',
'snow' : '#FFFAFA',
'springgreen' : '#00FF7F',
'steelblue' : '#4682B4',
'tan' : '#D2B48C',
'teal' : '#008080',
'thistle' : '#D8BFD8',
'tomato' : '#FF6347',
'turquoise' : '#40E0D0',
'violet' : '#EE82EE',
'wheat' : '#F5DEB3',
'white' : '#FFFFFF',
'whitesmoke' : '#F5F5F5',
'yellow' : '#FFFF00',
'yellowgreen' : '#9ACD32',
}
# add british equivs
for k, v in cnames.items():
if k.find('gray')>=0:
k = k.replace('gray', 'grey')
cnames[k] = v
def is_color_like(c):
'Return *True* if *c* can be converted to *RGB*'
try:
colorConverter.to_rgb(c)
return True
except ValueError:
return False
def rgb2hex(rgb):
'Given a len 3 rgb tuple of 0-1 floats, return the hex string'
return '#%02x%02x%02x' % tuple([round(val*255) for val in rgb])
hexColorPattern = re.compile("\A#[a-fA-F0-9]{6}\Z")
def hex2color(s):
"""
Take a hex string *s* and return the corresponding rgb 3-tuple
Example: #efefef -> (0.93725, 0.93725, 0.93725)
"""
if not isinstance(s, basestring):
raise TypeError('hex2color requires a string argument')
if hexColorPattern.match(s) is None:
raise ValueError('invalid hex color string "%s"' % s)
return tuple([int(n, 16)/255.0 for n in (s[1:3], s[3:5], s[5:7])])
class ColorConverter:
"""
Provides methods for converting color specifications to *RGB* or *RGBA*
Caching is used for more efficient conversion upon repeated calls
with the same argument.
Ordinarily only the single instance instantiated in this module,
*colorConverter*, is needed.
"""
colors = {
'b' : (0.0, 0.0, 1.0),
'g' : (0.0, 0.5, 0.0),
'r' : (1.0, 0.0, 0.0),
'c' : (0.0, 0.75, 0.75),
'm' : (0.75, 0, 0.75),
'y' : (0.75, 0.75, 0),
'k' : (0.0, 0.0, 0.0),
'w' : (1.0, 1.0, 1.0),
}
cache = {}
def to_rgb(self, arg):
"""
Returns an *RGB* tuple of three floats from 0-1.
*arg* can be an *RGB* or *RGBA* sequence or a string in any of
several forms:
1) a letter from the set 'rgbcmykw'
2) a hex color string, like '#00FFFF'
3) a standard name, like 'aqua'
4) a float, like '0.4', indicating gray on a 0-1 scale
if *arg* is *RGBA*, the *A* will simply be discarded.
"""
try: return self.cache[arg]
except KeyError: pass
except TypeError: # could be unhashable rgb seq
arg = tuple(arg)
try: return self.cache[arg]
except KeyError: pass
except TypeError:
raise ValueError(
'to_rgb: arg "%s" is unhashable even inside a tuple'
% (str(arg),))
try:
if cbook.is_string_like(arg):
color = self.colors.get(arg, None)
if color is None:
str1 = cnames.get(arg, arg)
if str1.startswith('#'):
color = hex2color(str1)
else:
fl = float(arg)
if fl < 0 or fl > 1:
raise ValueError(
'gray (string) must be in range 0-1')
color = tuple([fl]*3)
elif cbook.iterable(arg):
if len(arg) > 4 or len(arg) < 3:
raise ValueError(
'sequence length is %d; must be 3 or 4'%len(arg))
color = tuple(arg[:3])
if [x for x in color if (float(x) < 0) or (x > 1)]:
# This will raise TypeError if x is not a number.
raise ValueError('number in rbg sequence outside 0-1 range')
else:
raise ValueError('cannot convert argument to rgb sequence')
self.cache[arg] = color
except (KeyError, ValueError, TypeError), exc:
raise ValueError('to_rgb: Invalid rgb arg "%s"\n%s' % (str(arg), exc))
# Error messages could be improved by handling TypeError
# separately; but this should be rare and not too hard
# for the user to figure out as-is.
return color
def to_rgba(self, arg, alpha=None):
"""
Returns an *RGBA* tuple of four floats from 0-1.
For acceptable values of *arg*, see :meth:`to_rgb`.
If *arg* is an *RGBA* sequence and *alpha* is not *None*,
*alpha* will replace the original *A*.
"""
try:
if not cbook.is_string_like(arg) and cbook.iterable(arg):
if len(arg) == 4:
if [x for x in arg if (float(x) < 0) or (x > 1)]:
# This will raise TypeError if x is not a number.
raise ValueError('number in rbga sequence outside 0-1 range')
if alpha is None:
return tuple(arg)
if alpha < 0.0 or alpha > 1.0:
raise ValueError("alpha must be in range 0-1")
return arg[0], arg[1], arg[2], arg[3] * alpha
r,g,b = arg[:3]
if [x for x in (r,g,b) if (float(x) < 0) or (x > 1)]:
raise ValueError('number in rbg sequence outside 0-1 range')
else:
r,g,b = self.to_rgb(arg)
if alpha is None:
alpha = 1.0
return r,g,b,alpha
except (TypeError, ValueError), exc:
raise ValueError('to_rgba: Invalid rgba arg "%s"\n%s' % (str(arg), exc))
def to_rgba_array(self, c, alpha=None):
"""
Returns a numpy array of *RGBA* tuples.
Accepts a single mpl color spec or a sequence of specs.
Special case to handle "no color": if *c* is "none" (case-insensitive),
then an empty array will be returned. Same for an empty list.
"""
try:
if c.lower() == 'none':
return np.zeros((0,4), dtype=np.float_)
except AttributeError:
pass
if len(c) == 0:
return np.zeros((0,4), dtype=np.float_)
try:
result = np.array([self.to_rgba(c, alpha)], dtype=np.float_)
except ValueError:
if isinstance(c, np.ndarray):
if c.ndim != 2 and c.dtype.kind not in 'SU':
raise ValueError("Color array must be two-dimensional")
result = np.zeros((len(c), 4))
for i, cc in enumerate(c):
result[i] = self.to_rgba(cc, alpha) # change in place
return np.asarray(result, np.float_)
colorConverter = ColorConverter()
def makeMappingArray(N, data):
"""Create an *N* -element 1-d lookup table
*data* represented by a list of x,y0,y1 mapping correspondences.
Each element in this list represents how a value between 0 and 1
(inclusive) represented by x is mapped to a corresponding value
between 0 and 1 (inclusive). The two values of y are to allow
for discontinuous mapping functions (say as might be found in a
sawtooth) where y0 represents the value of y for values of x
<= to that given, and y1 is the value to be used for x > than
that given). The list must start with x=0, end with x=1, and
all values of x must be in increasing order. Values between
the given mapping points are determined by simple linear interpolation.
The function returns an array "result" where ``result[x*(N-1)]``
gives the closest value for values of x between 0 and 1.
"""
try:
adata = np.array(data)
except:
raise TypeError("data must be convertable to an array")
shape = adata.shape
if len(shape) != 2 and shape[1] != 3:
raise ValueError("data must be nx3 format")
x = adata[:,0]
y0 = adata[:,1]
y1 = adata[:,2]
if x[0] != 0. or x[-1] != 1.0:
raise ValueError(
"data mapping points must start with x=0. and end with x=1")
if np.sometrue(np.sort(x)-x):
raise ValueError(
"data mapping points must have x in increasing order")
# begin generation of lookup table
x = x * (N-1)
lut = np.zeros((N,), np.float)
xind = np.arange(float(N))
ind = np.searchsorted(x, xind)[1:-1]
lut[1:-1] = ( ((xind[1:-1] - x[ind-1]) / (x[ind] - x[ind-1]))
* (y0[ind] - y1[ind-1]) + y1[ind-1])
lut[0] = y1[0]
lut[-1] = y0[-1]
# ensure that the lut is confined to values between 0 and 1 by clipping it
np.clip(lut, 0.0, 1.0)
#lut = where(lut > 1., 1., lut)
#lut = where(lut < 0., 0., lut)
return lut
class Colormap:
"""Base class for all scalar to rgb mappings
Important methods:
* :meth:`set_bad`
* :meth:`set_under`
* :meth:`set_over`
"""
def __init__(self, name, N=256):
"""
Public class attributes:
:attr:`N` : number of rgb quantization levels
:attr:`name` : name of colormap
"""
self.name = name
self.N = N
self._rgba_bad = (0.0, 0.0, 0.0, 0.0) # If bad, don't paint anything.
self._rgba_under = None
self._rgba_over = None
self._i_under = N
self._i_over = N+1
self._i_bad = N+2
self._isinit = False
def __call__(self, X, alpha=1.0, bytes=False):
"""
*X* is either a scalar or an array (of any dimension).
If scalar, a tuple of rgba values is returned, otherwise
an array with the new shape = oldshape+(4,). If the X-values
are integers, then they are used as indices into the array.
If they are floating point, then they must be in the
interval (0.0, 1.0).
Alpha must be a scalar.
If bytes is False, the rgba values will be floats on a
0-1 scale; if True, they will be uint8, 0-255.
"""
if not self._isinit: self._init()
alpha = min(alpha, 1.0) # alpha must be between 0 and 1
alpha = max(alpha, 0.0)
self._lut[:-3, -1] = alpha
mask_bad = None
if not cbook.iterable(X):
vtype = 'scalar'
xa = np.array([X])
else:
vtype = 'array'
xma = ma.asarray(X)
xa = xma.filled(0)
mask_bad = ma.getmask(xma)
if xa.dtype.char in np.typecodes['Float']:
np.putmask(xa, xa==1.0, 0.9999999) #Treat 1.0 as slightly less than 1.
# The following clip is fast, and prevents possible
# conversion of large positive values to negative integers.
if NP_CLIP_OUT:
np.clip(xa * self.N, -1, self.N, out=xa)
else:
xa = np.clip(xa * self.N, -1, self.N)
xa = xa.astype(int)
# Set the over-range indices before the under-range;
# otherwise the under-range values get converted to over-range.
np.putmask(xa, xa>self.N-1, self._i_over)
np.putmask(xa, xa<0, self._i_under)
if mask_bad is not None and mask_bad.shape == xa.shape:
np.putmask(xa, mask_bad, self._i_bad)
if bytes:
lut = (self._lut * 255).astype(np.uint8)
else:
lut = self._lut
rgba = np.empty(shape=xa.shape+(4,), dtype=lut.dtype)
lut.take(xa, axis=0, mode='clip', out=rgba)
# twice as fast as lut[xa];
# using the clip or wrap mode and providing an
# output array speeds it up a little more.
if vtype == 'scalar':
rgba = tuple(rgba[0,:])
return rgba
def set_bad(self, color = 'k', alpha = 1.0):
'''Set color to be used for masked values.
'''
self._rgba_bad = colorConverter.to_rgba(color, alpha)
if self._isinit: self._set_extremes()
def set_under(self, color = 'k', alpha = 1.0):
'''Set color to be used for low out-of-range values.
Requires norm.clip = False
'''
self._rgba_under = colorConverter.to_rgba(color, alpha)
if self._isinit: self._set_extremes()
def set_over(self, color = 'k', alpha = 1.0):
'''Set color to be used for high out-of-range values.
Requires norm.clip = False
'''
self._rgba_over = colorConverter.to_rgba(color, alpha)
if self._isinit: self._set_extremes()
def _set_extremes(self):
if self._rgba_under:
self._lut[self._i_under] = self._rgba_under
else:
self._lut[self._i_under] = self._lut[0]
if self._rgba_over:
self._lut[self._i_over] = self._rgba_over
else:
self._lut[self._i_over] = self._lut[self.N-1]
self._lut[self._i_bad] = self._rgba_bad
def _init():
'''Generate the lookup table, self._lut'''
raise NotImplementedError("Abstract class only")
def is_gray(self):
if not self._isinit: self._init()
return (np.alltrue(self._lut[:,0] == self._lut[:,1])
and np.alltrue(self._lut[:,0] == self._lut[:,2]))
class LinearSegmentedColormap(Colormap):
"""Colormap objects based on lookup tables using linear segments.
The lookup table is generated using linear interpolation for each
primary color, with the 0-1 domain divided into any number of
segments.
"""
def __init__(self, name, segmentdata, N=256):
"""Create color map from linear mapping segments
segmentdata argument is a dictionary with a red, green and blue
entries. Each entry should be a list of *x*, *y0*, *y1* tuples,
forming rows in a table.
Example: suppose you want red to increase from 0 to 1 over
the bottom half, green to do the same over the middle half,
and blue over the top half. Then you would use::
cdict = {'red': [(0.0, 0.0, 0.0),
(0.5, 1.0, 1.0),
(1.0, 1.0, 1.0)],
'green': [(0.0, 0.0, 0.0),
(0.25, 0.0, 0.0),
(0.75, 1.0, 1.0),
(1.0, 1.0, 1.0)],
'blue': [(0.0, 0.0, 0.0),
(0.5, 0.0, 0.0),
(1.0, 1.0, 1.0)]}
Each row in the table for a given color is a sequence of
*x*, *y0*, *y1* tuples. In each sequence, *x* must increase
monotonically from 0 to 1. For any input value *z* falling
between *x[i]* and *x[i+1]*, the output value of a given color
will be linearly interpolated between *y1[i]* and *y0[i+1]*::
row i: x y0 y1
/
/
row i+1: x y0 y1
Hence y0 in the first row and y1 in the last row are never used.
.. seealso::
:func:`makeMappingArray`
"""
self.monochrome = False # True only if all colors in map are identical;
# needed for contouring.
Colormap.__init__(self, name, N)
self._segmentdata = segmentdata
def _init(self):
self._lut = np.ones((self.N + 3, 4), np.float)
self._lut[:-3, 0] = makeMappingArray(self.N, self._segmentdata['red'])
self._lut[:-3, 1] = makeMappingArray(self.N, self._segmentdata['green'])
self._lut[:-3, 2] = makeMappingArray(self.N, self._segmentdata['blue'])
self._isinit = True
self._set_extremes()
class ListedColormap(Colormap):
"""Colormap object generated from a list of colors.
This may be most useful when indexing directly into a colormap,
but it can also be used to generate special colormaps for ordinary
mapping.
"""
def __init__(self, colors, name = 'from_list', N = None):
"""
Make a colormap from a list of colors.
*colors*
a list of matplotlib color specifications,
or an equivalent Nx3 floating point array (*N* rgb values)
*name*
a string to identify the colormap
*N*
the number of entries in the map. The default is *None*,
in which case there is one colormap entry for each
element in the list of colors. If::
N < len(colors)
the list will be truncated at *N*. If::
N > len(colors)
the list will be extended by repetition.
"""
self.colors = colors
self.monochrome = False # True only if all colors in map are identical;
# needed for contouring.
if N is None:
N = len(self.colors)
else:
if cbook.is_string_like(self.colors):
self.colors = [self.colors] * N
self.monochrome = True
elif cbook.iterable(self.colors):
self.colors = list(self.colors) # in case it was a tuple
if len(self.colors) == 1:
self.monochrome = True
if len(self.colors) < N:
self.colors = list(self.colors) * N
del(self.colors[N:])
else:
try: gray = float(self.colors)
except TypeError: pass
else: self.colors = [gray] * N
self.monochrome = True
Colormap.__init__(self, name, N)
def _init(self):
rgb = np.array([colorConverter.to_rgb(c)
for c in self.colors], np.float)
self._lut = np.zeros((self.N + 3, 4), np.float)
self._lut[:-3, :-1] = rgb
self._lut[:-3, -1] = 1
self._isinit = True
self._set_extremes()
class Normalize:
"""
Normalize a given value to the 0-1 range
"""
def __init__(self, vmin=None, vmax=None, clip=False):
"""
If *vmin* or *vmax* is not given, they are taken from the input's
minimum and maximum value respectively. If *clip* is *True* and
the given value falls outside the range, the returned value
will be 0 or 1, whichever is closer. Returns 0 if::
vmin==vmax
Works with scalars or arrays, including masked arrays. If
*clip* is *True*, masked values are set to 1; otherwise they
remain masked. Clipping silently defeats the purpose of setting
the over, under, and masked colors in the colormap, so it is
likely to lead to surprises; therefore the default is
*clip* = *False*.
"""
self.vmin = vmin
self.vmax = vmax
self.clip = clip
def __call__(self, value, clip=None):
if clip is None:
clip = self.clip
if cbook.iterable(value):
vtype = 'array'
val = ma.asarray(value).astype(np.float)
else:
vtype = 'scalar'
val = ma.array([value]).astype(np.float)
self.autoscale_None(val)
vmin, vmax = self.vmin, self.vmax
if vmin > vmax:
raise ValueError("minvalue must be less than or equal to maxvalue")
elif vmin==vmax:
return 0.0 * val
else:
if clip:
mask = ma.getmask(val)
val = ma.array(np.clip(val.filled(vmax), vmin, vmax),
mask=mask)
result = (val-vmin) * (1.0/(vmax-vmin))
if vtype == 'scalar':
result = result[0]
return result
def inverse(self, value):
if not self.scaled():
raise ValueError("Not invertible until scaled")
vmin, vmax = self.vmin, self.vmax
if cbook.iterable(value):
val = ma.asarray(value)
return vmin + val * (vmax - vmin)
else:
return vmin + value * (vmax - vmin)
def autoscale(self, A):
'''
Set *vmin*, *vmax* to min, max of *A*.
'''
self.vmin = ma.minimum(A)
self.vmax = ma.maximum(A)
def autoscale_None(self, A):
' autoscale only None-valued vmin or vmax'
if self.vmin is None: self.vmin = ma.minimum(A)
if self.vmax is None: self.vmax = ma.maximum(A)
def scaled(self):
'return true if vmin and vmax set'
return (self.vmin is not None and self.vmax is not None)
class LogNorm(Normalize):
"""
Normalize a given value to the 0-1 range on a log scale
"""
def __call__(self, value, clip=None):
if clip is None:
clip = self.clip
if cbook.iterable(value):
vtype = 'array'
val = ma.asarray(value).astype(np.float)
else:
vtype = 'scalar'
val = ma.array([value]).astype(np.float)
self.autoscale_None(val)
vmin, vmax = self.vmin, self.vmax
if vmin > vmax:
raise ValueError("minvalue must be less than or equal to maxvalue")
elif vmin<=0:
raise ValueError("values must all be positive")
elif vmin==vmax:
return 0.0 * val
else:
if clip:
mask = ma.getmask(val)
val = ma.array(np.clip(val.filled(vmax), vmin, vmax),
mask=mask)
result = (ma.log(val)-np.log(vmin))/(np.log(vmax)-np.log(vmin))
if vtype == 'scalar':
result = result[0]
return result
def inverse(self, value):
if not self.scaled():
raise ValueError("Not invertible until scaled")
vmin, vmax = self.vmin, self.vmax
if cbook.iterable(value):
val = ma.asarray(value)
return vmin * ma.power((vmax/vmin), val)
else:
return vmin * pow((vmax/vmin), value)
class BoundaryNorm(Normalize):
'''
Generate a colormap index based on discrete intervals.
Unlike :class:`Normalize` or :class:`LogNorm`,
:class:`BoundaryNorm` maps values to integers instead of to the
interval 0-1.
Mapping to the 0-1 interval could have been done via
piece-wise linear interpolation, but using integers seems
simpler, and reduces the number of conversions back and forth
between integer and floating point.
'''
def __init__(self, boundaries, ncolors, clip=False):
'''
*boundaries*
a monotonically increasing sequence
*ncolors*
number of colors in the colormap to be used
If::
b[i] <= v < b[i+1]
then v is mapped to color j;
as i varies from 0 to len(boundaries)-2,
j goes from 0 to ncolors-1.
Out-of-range values are mapped to -1 if low and ncolors
if high; these are converted to valid indices by
:meth:`Colormap.__call__` .
'''
self.clip = clip
self.vmin = boundaries[0]
self.vmax = boundaries[-1]
self.boundaries = np.asarray(boundaries)
self.N = len(self.boundaries)
self.Ncmap = ncolors
if self.N-1 == self.Ncmap:
self._interp = False
else:
self._interp = True
def __call__(self, x, clip=None):
if clip is None:
clip = self.clip
x = ma.asarray(x)
mask = ma.getmaskarray(x)
xx = x.filled(self.vmax+1)
if clip:
np.clip(xx, self.vmin, self.vmax)
iret = np.zeros(x.shape, dtype=np.int16)
for i, b in enumerate(self.boundaries):
iret[xx>=b] = i
if self._interp:
iret = (iret * (float(self.Ncmap-1)/(self.N-2))).astype(np.int16)
iret[xx<self.vmin] = -1
iret[xx>=self.vmax] = self.Ncmap
ret = ma.array(iret, mask=mask)
if ret.shape == () and not mask:
ret = int(ret) # assume python scalar
return ret
def inverse(self, value):
return ValueError("BoundaryNorm is not invertible")
class NoNorm(Normalize):
'''
Dummy replacement for Normalize, for the case where we
want to use indices directly in a
:class:`~matplotlib.cm.ScalarMappable` .
'''
def __call__(self, value, clip=None):
return value
def inverse(self, value):
return value
# compatibility with earlier class names that violated convention:
normalize = Normalize
no_norm = NoNorm
| agpl-3.0 |
iwegner/MITK | Modules/Biophotonics/python/iMC/scripts/ipcai_to_caffe/script_create_hdf5_database.py | 6 | 1025 | import h5py, os
import pandas as pd
from regression.preprocessing import preprocess
def create_hdf5(path_to_simulation_results, hdf5_name):
df = pd.read_csv(path_to_simulation_results, header=[0, 1])
X, y = preprocess(df, snr=10.)
y = y.values
with h5py.File(hdf5_name,'w') as H:
H.create_dataset('data', data=X ) # note the name X given to the dataset!
H.create_dataset('label', data=y ) # note the name y given to the dataset!
with open(hdf5_name + '_list.txt','w') as L:
L.write(hdf5_name) # list all h5 files you are going to use
data_root = "/media/wirkert/data/Data/2016_02_02_IPCAI/results/intermediate"
TRAIN_IMAGES = os.path.join(data_root,
"ipcai_revision_colon_mean_scattering_train_all_spectrocam.txt")
TEST_IMAGES = os.path.join(data_root,
"ipcai_revision_colon_mean_scattering_test_all_spectrocam.txt")
create_hdf5(TRAIN_IMAGES, "ipcai_train_hdf5.h5")
create_hdf5(TEST_IMAGES, "ipcai_test_hdf5.h5")
| bsd-3-clause |
henrynj/PMLMC | plot/plot_sample_numbers.py | 1 | 4135 | #!/usr/bin/env python
import sys
import numpy as np
import matplotlib.pyplot as plt
MESHSIZES = np.array( [2862, 5982, 8968, 13584, 20822, 32283] )
L = 5
cost = np.array( [195.8, 467.0, 876.9, 1379.7, 2418.4, 4285.1] )
cost_eq = np.array( [cost[i]+cost[i-1] if i!=0 else cost[i] for i in range(L+1) ] )
epss = [5.e-4, 4.e-4, 3.e-4, 2.e-4, 1.e-4, 1.15e-4]
### MC
NM_MC = np.array( [ 2411, 3767, 6697, 15068, 60270, 45573] )
COST_MC = np.array( [ 1.03313761e+07, 1.61419717e+07, 2.86973147e+07, 6.45678868e+07, 2.58262977e+08, 1.95284862e+08] )
### MLMC
NM_MLMC = np.array(
[[ 7086, 1190, 236, 107, 39, 16],
[ 11071, 1858, 369, 167, 60, 25],
[ 19682, 3304, 656, 296, 106, 44],
[ 44283, 7432, 1475, 665, 238, 97],
[177132, 29728, 5897, 2659, 951, 388],
[133937, 22479, 4459, 2010, 719, 293]])
COST_MLMC = np.array( [ 2990169.3, 4667409., 8290731.4, 18633619.8, 74524392.8, 56347121.3] )
lf_ress = [-4.0, -4.5, -5.0, -5.5, -6.0, -8.0]
COST_MLMF = np.load('cost_total.npy')
print COST_MLMF
def plot_nm_all(eps, lf_res):
""" """
eps_idx = epss.index(eps)
lf_idx = lf_ress.index(lf_res)
levels = np.arange(L+1)
nm_mc = NM_MC[eps_idx]
nm_mlmc = NM_MLMC[eps_idx]
nm_mlmf = np.load('res_%d.npy' %lf_idx)
nm_hf = nm_mlmf[-2, eps_idx]
nm_lf = nm_mlmf[-1, eps_idx]
plt.figure(figsize=(10, 8))
plt.subplot(1, 1, 1)
plt.plot(L, nm_mc, 'ko', label='MC')
plt.semilogy(levels, nm_mlmc, 'rx--', label=r'MLMC')
plt.semilogy(levels, nm_hf, 'bo--', label=r'$MLMF\_HF$')
plt.semilogy(levels, nm_lf, 'b*-', label=r'$MLMF\_LF$')
# plt.xlim(-1, L+1)
# plt.ylim(1, 1e5)
plt.grid(True, which='both', linestyle='--')
plt.xlabel('level $l$')
plt.ylabel('$N_l$')
plt.legend(loc='upper right', frameon=True)#, fontsize='x-small')
# plt.title('$eps=%.2e$' %eps)
plt.savefig('RANS_nm_all.eps')
# plt.close()
def plot_nm_mf(eps, lf_res):
""" plot ns of HF & LF for MLMFMC and MLMFMC_new"""
eps_idx = epss.index(eps)
lf_idx = lf_ress.index(lf_res)
levels = np.arange(L+1)
nm_mlmf = np.load('res_%d.npy' %lf_idx)
nm_hf_0 = nm_mlmf[0, eps_idx]
nm_lf_0 = nm_mlmf[1, eps_idx]
nm_hf_n = nm_mlmf[-2, eps_idx]
nm_lf_n = nm_mlmf[-1, eps_idx]
plt.figure(figsize=(10, 8))
plt.subplot(1, 1, 1)
plt.semilogy(levels, nm_hf_0, 'rx--', label=r'$MLMF\_HF$')
plt.semilogy(levels, nm_lf_0, 'ro-', label=r'$MLMF\_HF$')
plt.semilogy(levels, nm_hf_n, 'bx--', label=r'$MLMF\_HF\_new$')
plt.semilogy(levels, nm_lf_n, 'bo-', label=r'$MLMF\_LF\_new$')
# plt.xlim(-1, L+1)
# plt.ylim(1, 1e5)
plt.grid(True, which='both', linestyle='--')
plt.xlabel('level $l$')
plt.ylabel('$N_l$')
plt.legend(loc='upper right', frameon=True)#, fontsize='x-small')
# plt.title('$eps=%.2e$' %eps)
plt.savefig('RANS_nm_impv.eps')
# plt.close()
def plot_nm_res(eps):
""" plot ns of HF & LF using MLMFMC_new for different LF_res"""
eps_idx = epss.index(eps)
levels = np.arange(L+1)
plt.figure(figsize=(10, 8))
plt.subplot(1, 1, 1)
styles = ['ko', 'bx', 'rd', 'c*', 'ms', 'g^', '>']
for i,lf_res in enumerate(lf_ress[:-1]):
# if i ==0:
# continue
lf_idx = lf_ress.index(lf_res)
nm_mlmf = np.load('res_%d.npy' %lf_idx)
nm_hf_n = nm_mlmf[-2, eps_idx]
nm_lf_n = nm_mlmf[-1, eps_idx]
plt.semilogy(levels, nm_hf_n, styles[i]+'--', label=r'$HF\_res=%.1f$'%lf_res)
plt.semilogy(levels, nm_lf_n, styles[i]+'-', label=r'$LF\_res=%.1f$'%lf_res)
#
# plt.xlim(-1, L+1)
# plt.ylim(1, 1e5)
plt.grid(True, which='both', linestyle='--')
plt.xlabel('level $l$')
plt.ylabel('$N_l$')
plt.legend(loc='upper right', frameon=True)#, fontsize='x-small')
# plt.title('$eps=%.2e$' %eps)
plt.savefig('RANS_nm_lfs.eps')
if __name__ == '__main__':
eps = 2.e-4; lf_res = -5.0
# plot_nm_all(eps, lf_res)
# plot_nm_mf(eps, lf_res)
plot_nm_res(eps) | gpl-3.0 |
boada/desCluster | analysis/plot_mkProb.py | 2 | 2944 | import pylab as pyl
from matplotlib.patches import Rectangle
import h5py as hdf
### Targeted ###
################
with hdf.File('./result_targetedPerfect.hdf5', 'r') as f:
dset = f[list(f.keys())[0]]
#data = dset['IDX', 'HALOID', 'ZSPEC', 'M200c', 'NGAL', 'LOSVD',
# 'LOSVD_err', 'MASS', 'LOSVD_dist']
data = dset['ZSPEC', 'M200c', 'LOSVD']
mask = ((pyl.log10(data['LOSVD']) > 3.12) & (data['M200c'] < 10**14.5) |
(data['LOSVD'] < 50))
data = data[~mask]
badData = data[mask]
bins = [25, 25]
extent = [[0.0, 0.5], [pyl.log10(50), 3.12]]
thresh = 3
xdat = data['ZSPEC']
ydat = pyl.log10(data['LOSVD'])
f, ax = pyl.subplots(1,
2,
figsize=(7, 7 * (pyl.sqrt(5.) - 1.0) / 2.0),
squeeze=True)
ax = ax.ravel()
hh, locx, locy = pyl.histogram2d(xdat, ydat, range=extent, bins=bins)
posx = pyl.digitize(xdat, locx)
posy = pyl.digitize(ydat, locy)
# finds the bins which contain points. posx = 0 for points outside "range"
ind = (posx > 0) & (posx <= bins[0]) & (posy > 0) & (posy <= bins[1])
# values of histogram with points in the bins.
hhsub = hh[posx[ind] - 1, posy[ind] - 1]
xdat1 = xdat[ind][hhsub < thresh] # low density points
ydat1 = ydat[ind][hhsub < thresh]
hh[hh < thresh] = 0 # fill the areas with low density by NaNs
# the CMD on the left
ax[0].scatter(xdat1, ydat1, s=10, c='0.8', edgecolor='0.8')
ax[0].imshow(
pyl.log10(hh.T),
cmap='gray_r',
extent=pyl.array(extent).flatten(),
interpolation='nearest')
ax[0].set_xlabel('Redshift')
ax[0].set_ylabel('Log $\sigma$ (km $s^{-1}$)')
#ax[0].set_xticks([-24, -20, -16, -12])
# add some text lables
ax[0].text(0.16, 2.4, '1', color='#467821', fontsize=20)
ax[0].text(0.36, 2.7, '2', color='#cf4457', fontsize=20)
### add the histograms and little boxes
# the two boxes
xcoord = [0.15, 0.35]
ycoord = [2.3, 2.6]
colors = ['#467821', '#cf4457']
for x, y, c in zip(xcoord, ycoord, colors):
# find the bins of the color/mag point of interest
xbin = pyl.digitize([x], locx)
ybin = pyl.digitize([y], locy)
# find all of the points inside the bin we are interested ind
i = (locx[xbin - 1] < xdat) & (xdat < locx[xbin]) & \
(locy[ybin - 1] < ydat) & (ydat < locy[ybin])
ax[1].hist(
pyl.log10(data['M200c'][i]),
bins=10,
normed=True,
histtype='step',
lw=2,
edgecolor=c)
# little boxes
rec = Rectangle((locx[xbin], locy[ybin]),
locx[xbin + 1] - locx[xbin],
locy[ybin + 1] - locy[ybin],
lw=2,
zorder=10,
fc='none',
ec=c)
ax[0].add_patch(rec)
ax[1].set_xlabel('Log $M_{200c}$ ($M_{\odot}$)')
ax[1].set_ylabel('P(Log $M_{200c}| z, Log\, \sigma)$')
ax[1].text(12.25, 1, '1', color='#467821', fontsize=20)
ax[1].text(13.75, 1, '2', color='#cf4457', fontsize=20)
pyl.show()
| mit |
okuta/chainer | chainer/training/extensions/plot_report.py | 3 | 7084 | import json
from os import path
import warnings
import numpy
import six
from chainer import reporter
from chainer import serializer as serializer_module
from chainer.training import extension
from chainer.training import trigger as trigger_module
from chainer.utils import argument
_available = None
def _try_import_matplotlib():
global matplotlib, _available
try:
import matplotlib # NOQA
_available = True
except (ImportError, TypeError):
_available = False
def _check_available():
if _available is None:
_try_import_matplotlib()
if not _available:
warnings.warn('matplotlib is not installed on your environment, '
'so nothing will be plotted at this time. '
'Please install matplotlib to plot figures.\n\n'
' $ pip install matplotlib\n')
class PlotReport(extension.Extension):
"""__init__(\
y_keys, x_key='iteration', trigger=(1, 'epoch'), postprocess=None, \
filename='plot.png', marker='x', grid=True)
Trainer extension to output plots.
This extension accumulates the observations of the trainer to
:class:`~chainer.DictSummary` at a regular interval specified by a supplied
trigger, and plot a graph with using them.
There are two triggers to handle this extension. One is the trigger to
invoke this extension, which is used to handle the timing of accumulating
the results. It is set to ``1, 'iteration'`` by default. The other is the
trigger to determine when to emit the result. When this trigger returns
True, this extension appends the summary of accumulated values to the list
of past summaries, and writes the list to the log file. Then, this
extension makes a new fresh summary object which is used until the next
time that the trigger fires.
It also adds ``'epoch'`` and ``'iteration'`` entries to each result
dictionary, which are the epoch and iteration counts at the output.
.. warning::
If your environment needs to specify a backend of matplotlib
explicitly, please call ``matplotlib.use`` before calling
``trainer.run``. For example:
.. code-block:: python
import matplotlib
matplotlib.use('Agg')
trainer.extend(
extensions.PlotReport(['main/loss', 'validation/main/loss'],
'epoch', filename='loss.png'))
trainer.run()
Then, once one of instances of this extension is called,
``matplotlib.use`` will have no effect.
For the details, please see here:
https://matplotlib.org/faq/usage_faq.html#what-is-a-backend
Args:
y_keys (iterable of strs): Keys of values regarded as y. If this is
``None``, nothing is output to the graph.
x_key (str): Keys of values regarded as x. The default value is
'iteration'.
trigger: Trigger that decides when to aggregate the result and output
the values. This is distinct from the trigger of this extension
itself. If it is a tuple in the form ``<int>, 'epoch'`` or ``<int>,
'iteration'``, it is passed to :class:`IntervalTrigger`.
postprocess: Callback to postprocess the result dictionaries. Figure
object, Axes object, and all plot data are passed to this callback
in this order. This callback can modify the figure.
filename (str): Name of the figure file under the output directory.
It can be a format string.
For historical reasons ``file_name`` is also accepted as an alias
of this argument.
marker (str): The marker used to plot the graph. Default is ``'x'``. If
``None`` is given, it draws with no markers.
grid (bool): If ``True``, set the axis grid on.
The default value is ``True``.
"""
def __init__(self, y_keys, x_key='iteration', trigger=(1, 'epoch'),
postprocess=None, filename=None, marker='x',
grid=True, **kwargs):
file_name, = argument.parse_kwargs(kwargs, ('file_name', 'plot.png'))
if filename is None:
filename = file_name
del file_name # avoid accidental use
_check_available()
self._x_key = x_key
if isinstance(y_keys, str):
y_keys = (y_keys,)
self._y_keys = y_keys
self._trigger = trigger_module.get_trigger(trigger)
self._file_name = filename
self._marker = marker
self._grid = grid
self._postprocess = postprocess
self._init_summary()
self._data = {k: [] for k in y_keys}
@staticmethod
def available():
_check_available()
return _available
def __call__(self, trainer):
if self.available():
# Dynamically import pyplot to call matplotlib.use()
# after importing chainer.training.extensions
import matplotlib.pyplot as plt
else:
return
keys = self._y_keys
observation = trainer.observation
summary = self._summary
if keys is None:
summary.add(observation)
else:
summary.add({k: observation[k] for k in keys if k in observation})
if self._trigger(trainer):
stats = self._summary.compute_mean()
stats_cpu = {}
for name, value in six.iteritems(stats):
stats_cpu[name] = float(value) # copy to CPU
updater = trainer.updater
stats_cpu['epoch'] = updater.epoch
stats_cpu['iteration'] = updater.iteration
x = stats_cpu[self._x_key]
data = self._data
for k in keys:
if k in stats_cpu:
data[k].append((x, stats_cpu[k]))
f = plt.figure()
a = f.add_subplot(111)
a.set_xlabel(self._x_key)
if self._grid:
a.grid()
for k in keys:
xy = data[k]
if len(xy) == 0:
continue
xy = numpy.array(xy)
a.plot(xy[:, 0], xy[:, 1], marker=self._marker, label=k)
if a.has_data():
if self._postprocess is not None:
self._postprocess(f, a, summary)
l = a.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
f.savefig(path.join(trainer.out, self._file_name),
bbox_extra_artists=(l,), bbox_inches='tight')
plt.close()
self._init_summary()
def serialize(self, serializer):
if isinstance(serializer, serializer_module.Serializer):
serializer('_plot_{}'.format(self._file_name),
json.dumps(self._data))
else:
self._data = json.loads(
serializer('_plot_{}'.format(self._file_name), ''))
def _init_summary(self):
self._summary = reporter.DictSummary()
| mit |
AnasGhrab/scikit-learn | examples/ensemble/plot_gradient_boosting_oob.py | 230 | 4762 | """
======================================
Gradient Boosting Out-of-Bag estimates
======================================
Out-of-bag (OOB) estimates can be a useful heuristic to estimate
the "optimal" number of boosting iterations.
OOB estimates are almost identical to cross-validation estimates but
they can be computed on-the-fly without the need for repeated model
fitting.
OOB estimates are only available for Stochastic Gradient Boosting
(i.e. ``subsample < 1.0``), the estimates are derived from the improvement
in loss based on the examples not included in the bootstrap sample
(the so-called out-of-bag examples).
The OOB estimator is a pessimistic estimator of the true
test loss, but remains a fairly good approximation for a small number of trees.
The figure shows the cumulative sum of the negative OOB improvements
as a function of the boosting iteration. As you can see, it tracks the test
loss for the first hundred iterations but then diverges in a
pessimistic way.
The figure also shows the performance of 3-fold cross validation which
usually gives a better estimate of the test loss
but is computationally more demanding.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn.cross_validation import KFold
from sklearn.cross_validation import train_test_split
# Generate data (adapted from G. Ridgeway's gbm example)
n_samples = 1000
random_state = np.random.RandomState(13)
x1 = random_state.uniform(size=n_samples)
x2 = random_state.uniform(size=n_samples)
x3 = random_state.randint(0, 4, size=n_samples)
p = 1 / (1.0 + np.exp(-(np.sin(3 * x1) - 4 * x2 + x3)))
y = random_state.binomial(1, p, size=n_samples)
X = np.c_[x1, x2, x3]
X = X.astype(np.float32)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5,
random_state=9)
# Fit classifier with out-of-bag estimates
params = {'n_estimators': 1200, 'max_depth': 3, 'subsample': 0.5,
'learning_rate': 0.01, 'min_samples_leaf': 1, 'random_state': 3}
clf = ensemble.GradientBoostingClassifier(**params)
clf.fit(X_train, y_train)
acc = clf.score(X_test, y_test)
print("Accuracy: {:.4f}".format(acc))
n_estimators = params['n_estimators']
x = np.arange(n_estimators) + 1
def heldout_score(clf, X_test, y_test):
"""compute deviance scores on ``X_test`` and ``y_test``. """
score = np.zeros((n_estimators,), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
score[i] = clf.loss_(y_test, y_pred)
return score
def cv_estimate(n_folds=3):
cv = KFold(n=X_train.shape[0], n_folds=n_folds)
cv_clf = ensemble.GradientBoostingClassifier(**params)
val_scores = np.zeros((n_estimators,), dtype=np.float64)
for train, test in cv:
cv_clf.fit(X_train[train], y_train[train])
val_scores += heldout_score(cv_clf, X_train[test], y_train[test])
val_scores /= n_folds
return val_scores
# Estimate best n_estimator using cross-validation
cv_score = cv_estimate(3)
# Compute best n_estimator for test data
test_score = heldout_score(clf, X_test, y_test)
# negative cumulative sum of oob improvements
cumsum = -np.cumsum(clf.oob_improvement_)
# min loss according to OOB
oob_best_iter = x[np.argmin(cumsum)]
# min loss according to test (normalize such that first loss is 0)
test_score -= test_score[0]
test_best_iter = x[np.argmin(test_score)]
# min loss according to cv (normalize such that first loss is 0)
cv_score -= cv_score[0]
cv_best_iter = x[np.argmin(cv_score)]
# color brew for the three curves
oob_color = list(map(lambda x: x / 256.0, (190, 174, 212)))
test_color = list(map(lambda x: x / 256.0, (127, 201, 127)))
cv_color = list(map(lambda x: x / 256.0, (253, 192, 134)))
# plot curves and vertical lines for best iterations
plt.plot(x, cumsum, label='OOB loss', color=oob_color)
plt.plot(x, test_score, label='Test loss', color=test_color)
plt.plot(x, cv_score, label='CV loss', color=cv_color)
plt.axvline(x=oob_best_iter, color=oob_color)
plt.axvline(x=test_best_iter, color=test_color)
plt.axvline(x=cv_best_iter, color=cv_color)
# add three vertical lines to xticks
xticks = plt.xticks()
xticks_pos = np.array(xticks[0].tolist() +
[oob_best_iter, cv_best_iter, test_best_iter])
xticks_label = np.array(list(map(lambda t: int(t), xticks[0])) +
['OOB', 'CV', 'Test'])
ind = np.argsort(xticks_pos)
xticks_pos = xticks_pos[ind]
xticks_label = xticks_label[ind]
plt.xticks(xticks_pos, xticks_label)
plt.legend(loc='upper right')
plt.ylabel('normalized loss')
plt.xlabel('number of iterations')
plt.show()
| bsd-3-clause |
sem-geologist/hyperspy | hyperspy/roi.py | 2 | 47245 | # -*- coding: utf-8 -*-
# Copyright 2007-2016 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
"""Region of interests (ROIs).
ROIs operate on `BaseSignal` instances and include widgets for interactive
operation.
The following 1D ROIs are available:
Point1DROI
Single element ROI of a 1D signal.
SpanROI
Interval ROI of a 1D signal.
The following 2D ROIs are available:
Point2DROI
Single element ROI of a 2D signal.
RectangularROI
Rectagular ROI of a 2D signal.
CircleROI
(Hollow) circular ROI of a 2D signal
Line2DROI
Line profile of a 2D signal with customisable width.
"""
from functools import partial
import traits.api as t
import numpy as np
from hyperspy.events import Events, Event
from hyperspy.interactive import interactive
from hyperspy.axes import DataAxis
from hyperspy.drawing import widgets
from hyperspy.ui_registry import add_gui_method
class BaseROI(t.HasTraits):
"""Base class for all ROIs.
Provides some basic functionality that is likely to be shared between all
ROIs, and serve as a common type that can be checked for.
"""
def __init__(self):
"""Sets up events.changed event, and inits HasTraits.
"""
super(BaseROI, self).__init__()
self.events = Events()
self.events.changed = Event("""
Event that triggers when the ROI has changed.
What constitues a change varies from ROI to ROI, but in general it
should correspond to the region selected by the ROI has changed.
Arguments:
----------
roi :
The ROI that was changed.
""", arguments=['roi'])
self.signal_map = dict()
_ndim = 0
ndim = property(lambda s: s._ndim)
def is_valid(self):
"""
Determine if the ROI is in a valid state.
This is typically determined by all the coordinates being defined,
and that the values makes sense relative to each other.
"""
raise NotImplementedError()
def update(self):
"""Function responsible for updating anything that depends on the ROI.
It should be called by implementors whenever the ROI changes.
The base implementation simply triggers the changed event.
"""
if self.is_valid():
self.events.changed.trigger(self)
def _get_ranges(self):
"""
Utility to get the value ranges that the ROI would select.
If the ROI is point base or is rectangluar in nature, these can be used
slice a signal. Extracted from `_make_slices` to ease implementation
in inherited ROIs.
"""
raise NotImplementedError()
def _make_slices(self, axes_collection, axes, ranges=None):
"""
Utility function to make a slice structure that will slice all the axes
in 'axes_collection'. The axes in the `axes` argument will be sliced by
the ROI, all other axes with 'slice(None)'. Alternatively, if 'ranges'
is passed, `axes[i]` will be sliced with 'ranges[i]'.
"""
if ranges is None:
# Use ROI to slice
ranges = self._get_ranges()
slices = []
for ax in axes_collection:
if ax in axes:
i = axes.index(ax)
try:
ilow = ax.value2index(ranges[i][0])
except ValueError:
if ranges[i][0] < ax.low_value:
ilow = ax.low_index
else:
raise
if len(ranges[i]) == 1:
slices.append(ilow)
else:
try:
ihigh = ax.value2index(ranges[i][1])
except ValueError:
if ranges[i][1] > ax.high_value:
ihigh = ax.high_index + 1
else:
raise
slices.append(slice(ilow, ihigh))
else:
slices.append(slice(None))
return tuple(slices)
def __call__(self, signal, out=None, axes=None):
"""Slice the signal according to the ROI, and return it.
Arguments
---------
signal : Signal
The signal to slice with the ROI.
out : Signal, default = None
If the 'out' argument is supplied, the sliced output will be put
into this instead of returning a Signal. See Signal.__getitem__()
for more details on 'out'.
axes : specification of axes to use, default = None
The axes argument specifies which axes the ROI will be applied on.
The items in the collection can be either of the following:
* a tuple of:
- DataAxis. These will not be checked with
signal.axes_manager.
- anything that will index signal.axes_manager
* For any other value, it will check whether the navigation
space can fit the right number of axis, and use that if it
fits. If not, it will try the signal space.
"""
if axes is None and signal in self.signal_map:
axes = self.signal_map[signal][1]
else:
axes = self._parse_axes(axes, signal.axes_manager)
natax = signal.axes_manager._get_axes_in_natural_order()
slices = self._make_slices(natax, axes)
nav_axes = [ax.navigate for ax in axes]
nav_dim = signal.axes_manager.navigation_dimension
if True in nav_axes:
if False in nav_axes:
slicer = signal.inav[slices[:nav_dim]].isig.__getitem__
slices = slices[nav_dim:]
else:
slicer = signal.inav.__getitem__
slices = slices[0:nav_dim]
else:
slicer = signal.isig.__getitem__
slices = slices[nav_dim:]
roi = slicer(slices, out=out)
return roi
def _parse_axes(self, axes, axes_manager):
"""Utility function to parse the 'axes' argument to a list of
DataAxis.
Arguments
---------
axes : specification of axes to use, default = None
The axes argument specifies which axes the ROI will be applied on.
The DataAxis in the collection can be either of the following:
* a tuple of:
- DataAxis. These will not be checked with
signal.axes_manager.
- anything that will index signal.axes_manager
* For any other value, it will check whether the navigation
space can fit the right number of axis, and use that if it
fits. If not, it will try the signal space.
axes_manager : AxesManager
The AxesManager to use for parsing axes, if axes is not already a
tuple of DataAxis.
Returns
-------
[<DataAxis>]
"""
nd = self.ndim
if isinstance(axes, (tuple, list)):
axes_out = axes_manager[axes[:nd]]
else:
if axes_manager.navigation_dimension >= nd:
axes_out = axes_manager.navigation_axes[:nd]
elif axes_manager.signal_dimension >= nd:
axes_out = axes_manager.signal_axes[:nd]
elif nd == 2 and axes_manager.navigation_dimension == 1 and \
axes_manager.signal_dimension == 1:
# We probably have a navigator plot including both nav and sig
# axes.
axes_out = [axes_manager.signal_axes[0],
axes_manager.navigation_axes[0]]
else:
raise ValueError("Could not find valid axes configuration.")
return axes_out
def _get_mpl_ax(plot, axes):
"""
Returns MPL Axes that contains the `axes`.
The space of the first DataAxis in axes will be used to determine which
plot's matplotlib Axes to return.
Arguments:
----------
plot : MPL_HyperExplorer
The explorer that contains the navigation and signal plots
axes : collection of DataAxis
The axes to infer from.
"""
if axes[0].navigate:
ax = plot.navigator_plot.ax
else:
if len(axes) == 2 and axes[1].navigate:
ax = plot.navigator_plot.ax
else:
ax = plot.signal_plot.ax
return ax
class BaseInteractiveROI(BaseROI):
"""Base class for interactive ROIs, i.e. ROIs with widget interaction.
The base class defines a lot of the common code for interacting with
widgets, but inhertors need to implement the following functions:
_get_widget_type()
_apply_roi2widget(widget)
_set_from_widget(widget)
"""
def __init__(self):
super(BaseInteractiveROI, self).__init__()
self.widgets = set()
self._applying_widget_change = False
def update(self):
"""Function responsible for updating anything that depends on the ROI.
It should be called by implementors whenever the ROI changes.
This implementation updates the widgets associated with it, and
triggers the changed event.
"""
if self.is_valid():
if not self._applying_widget_change:
self._update_widgets()
self.events.changed.trigger(self)
def _update_widgets(self, exclude=None):
"""Internal function for updating the associated widgets to the
geometry contained in the ROI.
Arguments
---------
exclude : set()
A set of widgets to exclude from the update. Useful e.g. if a
widget has triggered a change in the ROI: Then all widgets,
excluding the one that was the source for the change, should be
updated.
"""
if exclude is None:
exclude = set()
if not isinstance(exclude, set):
exclude = set(exclude)
for w in self.widgets - exclude:
with w.events.changed.suppress_callback(self._on_widget_change):
self._apply_roi2widget(w)
def _get_widget_type(self, axes, signal):
"""Get the type of a widget that can represent the ROI on the given
axes and signal.
"""
raise NotImplementedError()
def _apply_roi2widget(self, widget):
"""This function is responsible for applying the ROI geometry to the
widget. When this function is called, the widget's events are already
suppressed, so this should not be necessary for _apply_roi2widget to
handle.
"""
raise NotImplementedError()
def _set_from_widget(self, widget):
"""Sets the internal representation of the ROI from the passed widget,
without doing anything to events.
"""
raise NotImplementedError()
def interactive(self, signal, navigation_signal="same", out=None,
color="green", **kwargs):
"""Creates an interactively sliced Signal (sliced by this ROI) via
hyperspy.interactive.
Arguments:
----------
signal : Signal
The source signal to slice
navigation_signal : Signal, None or "same" (default)
If not None, it will automatically create a widget on
navigation_signal. Passing "same" is identical to passing the same
signal to 'signal' and 'navigation_signal', but is less ambigous,
and allows "same" to be the default value.
out : Signal
If not None, it will use 'out' as the output instead of returning
a new Signal.
color : Matplotlib color specifier (default: 'green')
The color for the widget. Any format that matplotlib uses should be
ok. This will not change the color fo any widget passed with the
'widget' argument.
**kwargs
All kwargs are passed to the roi __call__ method which is called
interactivel on any roi attribute change.
"""
if hasattr(signal, '_plot_kwargs'):
kwargs.update({'_plot_kwargs': signal._plot_kwargs})
# in case of complex signal, it is possible to shift the signal
# during plotting, if so this is currently not supported and we
# raise a NotImplementedError
if signal._plot.signal_data_function_kwargs.get(
'fft_shift', False):
raise NotImplementedError('ROIs are not supported when data '
'are shifted during plotting.')
if isinstance(navigation_signal, str) and navigation_signal == "same":
navigation_signal = signal
if navigation_signal is not None:
if navigation_signal not in self.signal_map:
self.add_widget(navigation_signal, color=color,
axes=kwargs.get("axes", None))
if (self.update not in
signal.axes_manager.events.any_axis_changed.connected):
signal.axes_manager.events.any_axis_changed.connect(
self.update,
[])
if out is None:
return interactive(self.__call__,
event=self.events.changed,
signal=signal,
**kwargs)
else:
return interactive(self.__call__,
event=self.events.changed,
signal=signal, out=out, **kwargs)
def _on_widget_change(self, widget):
"""Callback for widgets' 'changed' event. Updates the internal state
from the widget, and triggers events (excluding connections to the
source widget).
"""
with self.events.suppress():
self._bounds_check = False
self._applying_widget_change = True
try:
self._set_from_widget(widget)
finally:
self._bounds_check = True
self._applying_widget_change = False
self._update_widgets(exclude=(widget,))
self.events.changed.trigger(self)
def add_widget(self, signal, axes=None, widget=None,
color='green', **kwargs):
"""Add a widget to visually represent the ROI, and connect it so any
changes in either are reflected in the other. Note that only one
widget can be added per signal/axes combination.
Arguments:
----------
signal : Signal
The signal to witch the widget is added. This is used to determine
with plot to add the widget to, and it supplies the axes_manager
for the widget.
axes : specification of axes to use, default = None
The axes argument specifies which axes the ROI will be applied on.
The DataAxis in the collection can be either of the following:
* a tuple of:
- DataAxis. These will not be checked with
signal.axes_manager.
- anything that will index signal.axes_manager
* For any other value, it will check whether the navigation
space can fit the right number of axis, and use that if it
fits. If not, it will try the signal space.
widget : Widget or None (default)
If specified, this is the widget that will be added. If None, the
default widget will be used, as given by _get_widget_type().
color : Matplotlib color specifier (default: 'green')
The color for the widget. Any format that matplotlib uses should be
ok. This will not change the color fo any widget passed with the
'widget' argument.
kwargs:
All keyword argument are passed to the widget constructor.
"""
axes = self._parse_axes(axes, signal.axes_manager,)
if widget is None:
widget = self._get_widget_type(
axes, signal)(
signal.axes_manager, **kwargs)
widget.color = color
# Remove existing ROI, if it exsists and axes match
if signal in self.signal_map and \
self.signal_map[signal][1] == axes:
self.remove_widget(signal)
# Set DataAxes
widget.axes = axes
if widget.ax is None:
if signal._plot is None:
raise Exception(
"%s does not have an active plot. Plot the signal before "
"calling this method using its `plot` method." %
repr(signal))
ax = _get_mpl_ax(signal._plot, axes)
widget.set_mpl_ax(ax)
with widget.events.changed.suppress_callback(self._on_widget_change):
self._apply_roi2widget(widget)
# Connect widget changes to on_widget_change
widget.events.changed.connect(self._on_widget_change,
{'obj': 'widget'})
# When widget closes, remove from internal list
widget.events.closed.connect(self._remove_widget, {'obj': 'widget'})
self.widgets.add(widget)
self.signal_map[signal] = (widget, axes)
return widget
def _remove_widget(self, widget):
widget.events.closed.disconnect(self._remove_widget)
widget.events.changed.disconnect(self._on_widget_change)
widget.close()
for signal, w in self.signal_map.items():
if w[0] == widget:
self.signal_map.pop(signal)
break
def remove_widget(self, signal):
if signal in self.signal_map:
w = self.signal_map.pop(signal)[0]
self._remove_widget(w)
class BasePointROI(BaseInteractiveROI):
"""Base ROI class for point ROIs, i.e. ROIs with a unit size in each of its
dimensions.
"""
def __call__(self, signal, out=None, axes=None):
if axes is None and signal in self.signal_map:
axes = self.signal_map[signal][1]
else:
axes = self._parse_axes(axes, signal.axes_manager)
s = super(BasePointROI, self).__call__(signal=signal, out=out,
axes=axes)
return s
def guess_vertical_or_horizontal(axes, signal):
# Figure out whether to use horizontal or veritcal line:
if axes[0].navigate:
plotdim = len(signal._plot.navigator_data_function().shape)
axdim = signal.axes_manager.navigation_dimension
idx = signal.axes_manager.navigation_axes.index(axes[0])
else:
plotdim = len(signal._plot.signal_data_function().shape)
axdim = signal.axes_manager.signal_dimension
idx = signal.axes_manager.signal_axes.index(axes[0])
if plotdim == 2: # Plot is an image
# axdim == 1 and plotdim == 2 indicates "spectrum stack"
if idx == 0 and axdim != 1: # Axis is horizontal
return "vertical"
else: # Axis is vertical
return "horizontal"
elif plotdim == 1: # It is a spectrum
return "vertical"
else:
raise ValueError(
"Could not find valid widget type for the given `axes` value")
@add_gui_method(toolkey="hyperspy.Point1DROI")
class Point1DROI(BasePointROI):
"""Selects a single point in a 1D space. The coordinate of the point in the
1D space is stored in the 'value' trait.
"""
value = t.CFloat(t.Undefined)
_ndim = 1
def __init__(self, value):
super(Point1DROI, self).__init__()
self.value = value
def is_valid(self):
return self.value != t.Undefined
def _value_changed(self, old, new):
self.update()
def _get_ranges(self):
ranges = ((self.value,),)
return ranges
def _set_from_widget(self, widget):
self.value = widget.position[0]
def _apply_roi2widget(self, widget):
widget.position = (self.value,)
def _get_widget_type(self, axes, signal):
direction = guess_vertical_or_horizontal(axes=axes, signal=signal)
if direction == "vertical":
return widgets.VerticalLineWidget
elif direction == "horizontal":
return widgets.HorizontalLineWidget
else:
raise ValueError("direction must be either horizontal or vertical")
def __repr__(self):
return "%s(value=%g)" % (
self.__class__.__name__,
self.value)
@add_gui_method(toolkey="hyperspy.Point2DROI")
class Point2DROI(BasePointROI):
"""Selects a single point in a 2D space. The coordinates of the point in
the 2D space are stored in the traits 'x' and 'y'.
"""
x, y = (t.CFloat(t.Undefined),) * 2
_ndim = 2
def __init__(self, x, y):
super(Point2DROI, self).__init__()
self.x, self.y = x, y
def is_valid(self):
return t.Undefined not in (self.x, self.y)
def _x_changed(self, old, new):
self.update()
def _y_changed(self, old, new):
self.update()
def _get_ranges(self):
ranges = ((self.x,), (self.y,),)
return ranges
def _set_from_widget(self, widget):
self.x, self.y = widget.position
def _apply_roi2widget(self, widget):
widget.position = (self.x, self.y)
def _get_widget_type(self, axes, signal):
return widgets.SquareWidget
def __repr__(self):
return "%s(x=%g, y=%g)" % (
self.__class__.__name__,
self.x, self.y)
@add_gui_method(toolkey="hyperspy.SpanROI")
class SpanROI(BaseInteractiveROI):
"""Selects a range in a 1D space. The coordinates of the range in
the 1D space are stored in the traits 'left' and 'right'.
"""
left, right = (t.CFloat(t.Undefined),) * 2
_ndim = 1
def __init__(self, left, right):
super(SpanROI, self).__init__()
self._bounds_check = True # Use reponsibly!
self.left, self.right = left, right
def is_valid(self):
return (t.Undefined not in (self.left, self.right) and
self.right >= self.left)
def _right_changed(self, old, new):
if self._bounds_check and \
self.left is not t.Undefined and new <= self.left:
self.right = old
else:
self.update()
def _left_changed(self, old, new):
if self._bounds_check and \
self.right is not t.Undefined and new >= self.right:
self.left = old
else:
self.update()
def _get_ranges(self):
ranges = ((self.left, self.right),)
return ranges
def _set_from_widget(self, widget):
value = (widget.position[0], widget.position[0] + widget.size[0])
self.left, self.right = value
def _apply_roi2widget(self, widget):
widget.set_bounds(left=self.left, right=self.right)
def _get_widget_type(self, axes, signal):
direction = guess_vertical_or_horizontal(axes=axes, signal=signal)
if direction == "vertical":
return partial(widgets.RangeWidget, direction="horizontal")
elif direction == "horizontal":
return partial(widgets.RangeWidget, direction="vertical")
else:
raise ValueError("direction must be either horizontal or vertical")
def __repr__(self):
return "%s(left=%g, right=%g)" % (
self.__class__.__name__,
self.left,
self.right)
@add_gui_method(toolkey="hyperspy.RectangularROI")
class RectangularROI(BaseInteractiveROI):
"""Selects a range in a 2D space. The coordinates of the range in
the 2D space are stored in the traits 'left', 'right', 'top' and 'bottom'.
Convenience properties 'x', 'y', 'width' and 'height' are also available,
but cannot be used for initialization.
"""
top, bottom, left, right = (t.CFloat(t.Undefined),) * 4
_ndim = 2
def __init__(self, left, top, right, bottom):
super(RectangularROI, self).__init__()
self._bounds_check = True # Use reponsibly!
self.top, self.bottom, self.left, self.right = top, bottom, left, right
def is_valid(self):
return (t.Undefined not in (self.top, self.bottom,
self.left, self.right) and
self.right >= self.left and self.bottom >= self.top)
def _top_changed(self, old, new):
if self._bounds_check and \
self.bottom is not t.Undefined and new >= self.bottom:
self.top = old
else:
self.update()
@property
def width(self):
"""Returns / sets the width of the ROI"""
return self.right - self.left
@width.setter
def width(self, value):
if value == self.width:
return
self.right -= self.width - value
@property
def height(self):
"""Returns / sets the height of the ROI"""
return self.bottom - self.top
@height.setter
def height(self, value):
if value == self.height:
return
self.bottom -= self.height - value
@property
def x(self):
"""Returns / sets the x coordinate of the ROI without changing its
width"""
return self.left
@x.setter
def x(self, value):
if value != self.x:
diff = value - self.x
try:
self._applying_widget_change = True
self._bounds_check = False
with self.events.changed.suppress():
self.right += diff
self.left += diff
finally:
self._applying_widget_change = False
self._bounds_check = True
self.update()
@property
def y(self):
"""Returns / sets the y coordinate of the ROI without changing its
height"""
return self.top
@y.setter
def y(self, value):
if value != self.y:
diff = value - self.y
try:
self._applying_widget_change = True
self._bounds_check = False
with self.events.changed.suppress():
self.top += diff
self.bottom += diff
finally:
self._applying_widget_change = False
self._bounds_check = True
self.update()
def _bottom_changed(self, old, new):
if self._bounds_check and \
self.top is not t.Undefined and new <= self.top:
self.bottom = old
else:
self.update()
def _right_changed(self, old, new):
if self._bounds_check and \
self.left is not t.Undefined and new <= self.left:
self.right = old
else:
self.update()
def _left_changed(self, old, new):
if self._bounds_check and \
self.right is not t.Undefined and new >= self.right:
self.left = old
else:
self.update()
def _get_ranges(self):
ranges = ((self.left, self.right), (self.top, self.bottom),)
return ranges
def _set_from_widget(self, widget):
p = np.array(widget.position)
s = np.array(widget.size)
(self.left, self.top), (self.right, self.bottom) = (p, p + s)
def _apply_roi2widget(self, widget):
widget.set_bounds(left=self.left, bottom=self.bottom,
right=self.right, top=self.top)
def _get_widget_type(self, axes, signal):
return widgets.RectangleWidget
def __repr__(self):
return "%s(left=%g, top=%g, right=%g, bottom=%g)" % (
self.__class__.__name__,
self.left,
self.top,
self.right,
self.bottom)
@add_gui_method(toolkey="hyperspy.CircleROI")
class CircleROI(BaseInteractiveROI):
cx, cy, r, r_inner = (t.CFloat(t.Undefined),) * 4
_ndim = 2
def __init__(self, cx, cy, r, r_inner=None):
super(CircleROI, self).__init__()
self._bounds_check = True # Use reponsibly!
self.cx, self.cy, self.r = cx, cy, r
if r_inner:
self.r_inner = r_inner
def is_valid(self):
return (t.Undefined not in (self.cx, self.cy, self.r,) and
(self.r_inner is t.Undefined or
t.Undefined not in (self.r, self.r_inner) and
self.r >= self.r_inner))
def _cx_changed(self, old, new):
self.update()
def _cy_changed(self, old, new):
self.update()
def _r_changed(self, old, new):
if self._bounds_check and \
self.r_inner is not t.Undefined and new < self.r_inner:
self.r = old
else:
self.update()
def _r_inner_changed(self, old, new):
if self._bounds_check and \
self.r is not t.Undefined and new >= self.r:
self.r_inner = old
else:
self.update()
def _set_from_widget(self, widget):
"""Sets the internal representation of the ROI from the passed widget,
without doing anything to events.
"""
self.cx, self.cy = widget.position
self.r, self.r_inner = widget.size
def _apply_roi2widget(self, widget):
widget.position = (self.cx, self.cy)
inner = self.r_inner if self.r_inner != t.Undefined else 0.0
widget.size = (self.r, inner)
def _get_widget_type(self, axes, signal):
return widgets.CircleWidget
def __call__(self, signal, out=None, axes=None):
"""Slice the signal according to the ROI, and return it.
Arguments
---------
signal : Signal
The signal to slice with the ROI.
out : Signal, default = None
If the 'out' argument is supplied, the sliced output will be put
into this instead of returning a Signal. See Signal.__getitem__()
for more details on 'out'.
axes : specification of axes to use, default = None
The axes argument specifies which axes the ROI will be applied on.
The items in the collection can be either of the following:
* a tuple of:
- DataAxis. These will not be checked with
signal.axes_manager.
- anything that will index signal.axes_manager
* For any other value, it will check whether the navigation
space can fit the right number of axis, and use that if it
fits. If not, it will try the signal space.
"""
if axes is None and signal in self.signal_map:
axes = self.signal_map[signal][1]
else:
axes = self._parse_axes(axes, signal.axes_manager)
natax = signal.axes_manager._get_axes_in_natural_order()
# Slice original data with a circumscribed rectangle
cx = self.cx + 0.5001 * axes[0].scale
cy = self.cy + 0.5001 * axes[1].scale
ranges = [[cx - self.r, cx + self.r],
[cy - self.r, cy + self.r]]
slices = self._make_slices(natax, axes, ranges)
ir = [slices[natax.index(axes[0])],
slices[natax.index(axes[1])]]
vx = axes[0].axis[ir[0]] - cx
vy = axes[1].axis[ir[1]] - cy
gx, gy = np.meshgrid(vx, vy)
gr = gx**2 + gy**2
mask = gr > self.r**2
if self.r_inner != t.Undefined:
mask |= gr < self.r_inner**2
tiles = []
shape = []
chunks = []
for i in range(len(slices)):
if signal._lazy:
chunks.append(signal.data.chunks[i][0])
if i == natax.index(axes[0]):
thisshape = mask.shape[0]
tiles.append(thisshape)
shape.append(thisshape)
elif i == natax.index(axes[1]):
thisshape = mask.shape[1]
tiles.append(thisshape)
shape.append(thisshape)
else:
tiles.append(signal.axes_manager._axes[i].size)
shape.append(1)
mask = mask.reshape(shape)
nav_axes = [ax.navigate for ax in axes]
nav_dim = signal.axes_manager.navigation_dimension
if True in nav_axes:
if False in nav_axes:
slicer = signal.inav[slices[:nav_dim]].isig.__getitem__
slices = slices[nav_dim:]
else:
slicer = signal.inav.__getitem__
slices = slices[0:nav_dim]
else:
slicer = signal.isig.__getitem__
slices = slices[nav_dim:]
roi = slicer(slices, out=out)
roi = out or roi
if roi._lazy:
import dask.array as da
mask = da.from_array(mask, chunks=chunks)
mask = da.broadcast_to(mask, tiles)
# By default promotes dtype to float if required
roi.data = da.where(mask, np.nan, roi.data)
else:
mask = np.broadcast_to(mask, tiles)
roi.data = np.ma.masked_array(roi.data, mask, hard_mask=True)
if out is None:
return roi
else:
out.events.data_changed.trigger(out)
def __repr__(self):
if self.r_inner == t.Undefined:
return "%s(cx=%g, cy=%g, r=%g)" % (
self.__class__.__name__,
self.cx,
self.cy,
self.r)
else:
return "%s(cx=%g, cy=%g, r=%g, r_inner=%g)" % (
self.__class__.__name__,
self.cx,
self.cy,
self.r,
self.r_inner)
@add_gui_method(toolkey="hyperspy.Line2DROI")
class Line2DROI(BaseInteractiveROI):
x1, y1, x2, y2, linewidth = (t.CFloat(t.Undefined),) * 5
_ndim = 2
def __init__(self, x1, y1, x2, y2, linewidth=0):
super(Line2DROI, self).__init__()
self.x1, self.y1, self.x2, self.y2 = x1, y1, x2, y2
self.linewidth = linewidth
def is_valid(self):
return t.Undefined not in (self.x1, self.y1, self.x2, self.y2)
def _x1_changed(self, old, new):
self.update()
def _x2_changed(self, old, new):
self.update()
def _y1_changed(self, old, new):
self.update()
def _y2_changed(self, old, new):
self.update()
def _linewidth_changed(self, old, new):
self.update()
def _set_from_widget(self, widget):
"""Sets the internal representation of the ROI from the passed widget,
without doing anything to events.
"""
c = widget.position
s = widget.size[0]
(self.x1, self.y1), (self.x2, self.y2) = c
self.linewidth = s
def _apply_roi2widget(self, widget):
widget.position = (self.x1, self.y1), (self.x2, self.y2)
widget.size = np.array([self.linewidth])
def _get_widget_type(self, axes, signal):
return widgets.Line2DWidget
@staticmethod
def _line_profile_coordinates(src, dst, linewidth=1):
"""Return the coordinates of the profile of an image along a scan line.
Parameters
----------
src : 2-tuple of numeric scalar (float or int)
The start point of the scan line.
dst : 2-tuple of numeric scalar (float or int)
The end point of the scan line.
linewidth : int, optional
Width of the scan, perpendicular to the line
Returns
-------
coords : array, shape (2, N, C), float
The coordinates of the profile along the scan line. The length of
the profile is the ceil of the computed length of the scan line.
Notes
-----
This is a utility method meant to be used internally by skimage
functions. The destination point is included in the profile, in
contrast to standard numpy indexing.
"""
src_row, src_col = src = np.asarray(src, dtype=float)
dst_row, dst_col = dst = np.asarray(dst, dtype=float)
d_row, d_col = dst - src
theta = np.arctan2(d_row, d_col)
length = np.ceil(np.hypot(d_row, d_col) + 1).astype(int)
# we add one above because we include the last point in the profile
# (in contrast to standard numpy indexing)
line_col = np.linspace(src_col, dst_col, length)
line_row = np.linspace(src_row, dst_row, length)
data = np.zeros((2, length, linewidth))
data[0, :, :] = np.tile(line_col, [linewidth, 1]).T
data[1, :, :] = np.tile(line_row, [linewidth, 1]).T
if linewidth != 1:
# we subtract 1 from linewidth to change from pixel-counting
# (make this line 3 pixels wide) to point distances (the
# distance between pixel centers)
col_width = (linewidth - 1) * np.sin(-theta) / 2
row_width = (linewidth - 1) * np.cos(theta) / 2
row_off = np.linspace(-row_width, row_width, linewidth)
col_off = np.linspace(-col_width, col_width, linewidth)
data[0, :, :] += np.tile(col_off, [length, 1])
data[1, :, :] += np.tile(row_off, [length, 1])
return data
@property
def length(self):
p0 = np.array((self.x1, self.y1), dtype=np.float)
p1 = np.array((self.x2, self.y2), dtype=np.float)
d_row, d_col = p1 - p0
return np.hypot(d_row, d_col)
def angle(self, axis='horizontal', units='degrees'):
""""Angle between ROI line and selected axis
Parameters
----------
axis : str, {'horizontal', 'vertical'}, optional
Select axis against which the angle of the ROI line is measured.
'x' is alias to 'horizontal' and 'y' is 'vertical'
(Default: 'horizontal')
units : str, {'degrees', 'radians'}
The angle units of the output
(Default: 'degrees')
Returns
-------
angle : float
Examples
--------
>>> import hyperspy.api as hs
>>> hs.roi.Line2DROI(0., 0., 1., 2., 1)
>>> r.angle()
63.43494882292201
"""
x = self.x2 - self.x1
y = self.y2 - self.y1
if units == 'degrees':
conversation = 180. / np.pi
elif units == 'radians':
conversation = 1.
else:
raise ValueError(
"Units are not recognized. Use either 'degrees' or 'radians'.")
if axis == 'horizontal':
return np.arctan2(y, x) * conversation
elif axis == 'vertical':
return np.arctan2(x, y) * conversation
else:
raise ValueError("Axis is not recognized. "
"Use either 'horizontal' or 'vertical'.")
@staticmethod
def profile_line(img, src, dst, axes, linewidth=1,
order=1, mode='constant', cval=0.0):
"""Return the intensity profile of an image measured along a scan line.
Parameters
----------
img : numeric array, shape (M, N[, C])
The image, either grayscale (2D array) or multichannel
(3D array, where the final axis contains the channel
information).
src : 2-tuple of numeric scalar (float or int)
The start point of the scan line.
dst : 2-tuple of numeric scalar (float or int)
The end point of the scan line.
linewidth : int, optional
Width of the scan, perpendicular to the line
order : int in {0, 1, 2, 3, 4, 5}, optional
The order of the spline interpolation to compute image values at
non-integer coordinates. 0 means nearest-neighbor interpolation.
mode : string, one of {'constant', 'nearest', 'reflect', 'wrap'},
optional
How to compute any values falling outside of the image.
cval : float, optional
If `mode` is 'constant', what constant value to use outside the
image.
Returns
-------
return_value : array
The intensity profile along the scan line. The length of the
profile is the ceil of the computed length of the scan line.
Examples
--------
>>> x = np.array([[1, 1, 1, 2, 2, 2]])
>>> img = np.vstack([np.zeros_like(x), x, x, x, np.zeros_like(x)])
>>> img
array([[0, 0, 0, 0, 0, 0],
[1, 1, 1, 2, 2, 2],
[1, 1, 1, 2, 2, 2],
[1, 1, 1, 2, 2, 2],
[0, 0, 0, 0, 0, 0]])
>>> profile_line(img, (2, 1), (2, 4))
array([ 1., 1., 2., 2.])
Notes
-----
The destination point is included in the profile, in contrast to
standard numpy indexing.
"""
import scipy.ndimage as nd
# Convert points coordinates from axes units to pixels
p0 = ((src[0] - axes[0].offset) / axes[0].scale,
(src[1] - axes[1].offset) / axes[1].scale)
p1 = ((dst[0] - axes[0].offset) / axes[0].scale,
(dst[1] - axes[1].offset) / axes[1].scale)
if linewidth < 0:
raise ValueError("linewidth must be positive number")
linewidth_px = linewidth / np.min([ax.scale for ax in axes])
linewidth_px = int(round(linewidth_px))
# Minimum size 1 pixel
linewidth_px = linewidth_px if linewidth_px >= 1 else 1
perp_lines = Line2DROI._line_profile_coordinates(p0, p1,
linewidth=linewidth_px)
if img.ndim > 2:
idx = [ax.index_in_array for ax in axes]
if idx[0] < idx[1]:
img = np.rollaxis(img, idx[0], 0)
img = np.rollaxis(img, idx[1], 1)
else:
img = np.rollaxis(img, idx[1], 0)
img = np.rollaxis(img, idx[0], 0)
orig_shape = img.shape
img = np.reshape(img, orig_shape[0:2] +
(np.product(orig_shape[2:]),))
pixels = [nd.map_coordinates(img[..., i].T, perp_lines,
order=order, mode=mode, cval=cval)
for i in range(img.shape[2])]
i0 = min(axes[0].index_in_array, axes[1].index_in_array)
pixels = np.transpose(np.asarray(pixels), (1, 2, 0))
intensities = pixels.mean(axis=1)
intensities = np.rollaxis(
np.reshape(intensities,
intensities.shape[0:1] + orig_shape[2:]),
0, i0 + 1)
else:
pixels = nd.map_coordinates(img, perp_lines,
order=order, mode=mode, cval=cval)
intensities = pixels.mean(axis=1)
return intensities
def __call__(self, signal, out=None, axes=None, order=0):
"""Slice the signal according to the ROI, and return it.
Arguments
---------
signal : Signal
The signal to slice with the ROI.
out : Signal, default = None
If the 'out' argument is supplied, the sliced output will be put
into this instead of returning a Signal. See Signal.__getitem__()
for more details on 'out'.
axes : specification of axes to use, default = None
The axes argument specifies which axes the ROI will be applied on.
The items in the collection can be either of the following:
* a tuple of:
- DataAxis. These will not be checked with
signal.axes_manager.
- anything that will index signal.axes_manager
* For any other value, it will check whether the navigation
space can fit the right number of axis, and use that if it
fits. If not, it will try the signal space.
order : The spline interpolation order to use when extracting the line
profile. 0 means nearest-neighbor interpolation, and is both the
default and the fastest.
"""
if axes is None and signal in self.signal_map:
axes = self.signal_map[signal][1]
else:
axes = self._parse_axes(axes, signal.axes_manager)
profile = Line2DROI.profile_line(signal.data,
(self.x1, self.y1),
(self.x2, self.y2),
axes=axes,
linewidth=self.linewidth,
order=order)
length = np.linalg.norm(np.diff(
np.array(((self.x1, self.y1), (self.x2, self.y2))), axis=0),
axis=1)[0]
if out is None:
axm = signal.axes_manager.deepcopy()
i0 = min(axes[0].index_in_array, axes[1].index_in_array)
axm.remove([ax.index_in_array + 3j for ax in axes])
axis = DataAxis(profile.shape[i0],
scale=length / profile.shape[i0],
units=axes[0].units,
navigate=axes[0].navigate)
axis.axes_manager = axm
axm._axes.insert(i0, axis)
from hyperspy.signals import BaseSignal
roi = BaseSignal(profile, axes=axm._get_axes_dicts(),
metadata=signal.metadata.deepcopy(
).as_dictionary(),
original_metadata=signal.original_metadata.
deepcopy().as_dictionary())
return roi
else:
out.data = profile
i0 = min(axes[0].index_in_array, axes[1].index_in_array)
ax = out.axes_manager._axes[i0]
size = len(profile)
scale = length / len(profile)
axchange = size != ax.size or scale != ax.scale
if axchange:
ax.size = len(profile)
ax.scale = length / len(profile)
out.events.data_changed.trigger(out)
def __repr__(self):
return "%s(x1=%g, y1=%g, x2=%g, y2=%g, linewidth=%g)" % (
self.__class__.__name__,
self.x1,
self.y1,
self.x2,
self.y2,
self.linewidth)
| gpl-3.0 |
crichardson17/starburst_atlas | Low_resolution_sims/DustFree_LowRes/Padova_cont/padova_cont_0/UV2.py | 33 | 7365 | import csv
import matplotlib.pyplot as plt
from numpy import *
import scipy.interpolate
import math
from pylab import *
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
import matplotlib.patches as patches
from matplotlib.path import Path
import os
# ------------------------------------------------------------------------------------------------------
#inputs
for file in os.listdir('.'):
if file.endswith(".grd"):
inputfile = file
for file in os.listdir('.'):
if file.endswith(".txt"):
inputfile2 = file
# ------------------------------------------------------------------------------------------------------
#Patches data
#for the Kewley and Levesque data
verts = [
(1., 7.97712125471966000000), # left, bottom
(1., 9.57712125471966000000), # left, top
(2., 10.57712125471970000000), # right, top
(2., 8.97712125471966000000), # right, bottom
(0., 0.), # ignored
]
codes = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO,
Path.CLOSEPOLY,
]
path = Path(verts, codes)
# ------------------------
#for the Kewley 01 data
verts2 = [
(2.4, 9.243038049), # left, bottom
(2.4, 11.0211893), # left, top
(2.6, 11.0211893), # right, top
(2.6, 9.243038049), # right, bottom
(0, 0.), # ignored
]
path = Path(verts, codes)
path2 = Path(verts2, codes)
# -------------------------
#for the Moy et al data
verts3 = [
(1., 6.86712125471966000000), # left, bottom
(1., 10.18712125471970000000), # left, top
(3., 12.18712125471970000000), # right, top
(3., 8.86712125471966000000), # right, bottom
(0., 0.), # ignored
]
path = Path(verts, codes)
path3 = Path(verts3, codes)
# ------------------------------------------------------------------------------------------------------
#the routine to add patches for others peoples' data onto our plots.
def add_patches(ax):
patch3 = patches.PathPatch(path3, facecolor='yellow', lw=0)
patch2 = patches.PathPatch(path2, facecolor='green', lw=0)
patch = patches.PathPatch(path, facecolor='red', lw=0)
ax1.add_patch(patch3)
ax1.add_patch(patch2)
ax1.add_patch(patch)
# ------------------------------------------------------------------------------------------------------
#the subplot routine
def add_sub_plot(sub_num):
numplots = 16
plt.subplot(numplots/4.,4,sub_num)
rbf = scipy.interpolate.Rbf(x, y, z[:,sub_num-1], function='linear')
zi = rbf(xi, yi)
contour = plt.contour(xi,yi,zi, levels, colors='c', linestyles = 'dashed')
contour2 = plt.contour(xi,yi,zi, levels2, colors='k', linewidths=1.5)
plt.scatter(max_values[line[sub_num-1],2], max_values[line[sub_num-1],3], c ='k',marker = '*')
plt.annotate(headers[line[sub_num-1]], xy=(8,11), xytext=(6,8.5), fontsize = 10)
plt.annotate(max_values[line[sub_num-1],0], xy= (max_values[line[sub_num-1],2], max_values[line[sub_num-1],3]), xytext = (0, -10), textcoords = 'offset points', ha = 'right', va = 'bottom', fontsize=10)
if sub_num == numplots / 2.:
print "half the plots are complete"
#axis limits
yt_min = 8
yt_max = 23
xt_min = 0
xt_max = 12
plt.ylim(yt_min,yt_max)
plt.xlim(xt_min,xt_max)
plt.yticks(arange(yt_min+1,yt_max,1),fontsize=10)
plt.xticks(arange(xt_min+1,xt_max,1), fontsize = 10)
if sub_num in [2,3,4,6,7,8,10,11,12,14,15,16]:
plt.tick_params(labelleft = 'off')
else:
plt.tick_params(labelleft = 'on')
plt.ylabel('Log ($ \phi _{\mathrm{H}} $)')
if sub_num in [1,2,3,4,5,6,7,8,9,10,11,12]:
plt.tick_params(labelbottom = 'off')
else:
plt.tick_params(labelbottom = 'on')
plt.xlabel('Log($n _{\mathrm{H}} $)')
if sub_num == 1:
plt.yticks(arange(yt_min+1,yt_max+1,1),fontsize=10)
if sub_num == 13:
plt.yticks(arange(yt_min,yt_max,1),fontsize=10)
plt.xticks(arange(xt_min,xt_max,1), fontsize = 10)
if sub_num == 16 :
plt.xticks(arange(xt_min+1,xt_max+1,1), fontsize = 10)
# ---------------------------------------------------
#this is where the grid information (phi and hdens) is read in and saved to grid.
grid = [];
with open(inputfile, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid.append(row);
grid = asarray(grid)
#here is where the data for each line is read in and saved to dataEmissionlines
dataEmissionlines = [];
with open(inputfile2, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
dataEmissionlines.append(row);
dataEmissionlines = asarray(dataEmissionlines)
print "import files complete"
# ---------------------------------------------------
#for grid
phi_values = grid[1:len(dataEmissionlines)+1,6]
hdens_values = grid[1:len(dataEmissionlines)+1,7]
#for lines
headers = headers[1:]
Emissionlines = dataEmissionlines[:, 1:]
concatenated_data = zeros((len(Emissionlines),len(Emissionlines[0])))
max_values = zeros((len(Emissionlines[0]),4))
#select the scaling factor
#for 1215
#incident = Emissionlines[1:,4]
#for 4860
incident = Emissionlines[:,57]
#take the ratio of incident and all the lines and put it all in an array concatenated_data
for i in range(len(Emissionlines)):
for j in range(len(Emissionlines[0])):
if math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10) > 0:
concatenated_data[i,j] = math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10)
else:
concatenated_data[i,j] == 0
# for 1215
#for i in range(len(Emissionlines)):
# for j in range(len(Emissionlines[0])):
# if math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10) > 0:
# concatenated_data[i,j] = math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10)
# else:
# concatenated_data[i,j] == 0
#find the maxima to plot onto the contour plots
for j in range(len(concatenated_data[0])):
max_values[j,0] = max(concatenated_data[:,j])
max_values[j,1] = argmax(concatenated_data[:,j], axis = 0)
max_values[j,2] = hdens_values[max_values[j,1]]
max_values[j,3] = phi_values[max_values[j,1]]
#to round off the maxima
max_values[:,0] = [ '%.1f' % elem for elem in max_values[:,0] ]
print "data arranged"
# ---------------------------------------------------
#Creating the grid to interpolate with for contours.
gridarray = zeros((len(Emissionlines),2))
gridarray[:,0] = hdens_values
gridarray[:,1] = phi_values
x = gridarray[:,0]
y = gridarray[:,1]
#change desired lines here!
line = [18, #1549
19, #1640
20, #1665
21, #1671
23, #1750
24, #1860
25, #1888
26, #1907
27, #2297
28, #2321
29, #2471
30, #2326
31, #2335
32, #2665
33, #2798
34] #2803
#create z array for this plot
z = concatenated_data[:,line[:]]
# ---------------------------------------------------
# Interpolate
print "starting interpolation"
xi, yi = linspace(x.min(), x.max(), 10), linspace(y.min(), y.max(), 10)
xi, yi = meshgrid(xi, yi)
# ---------------------------------------------------
print "interpolatation complete; now plotting"
#plot
plt.subplots_adjust(wspace=0, hspace=0) #remove space between plots
levels = arange(10**-1,10, .2)
levels2 = arange(10**-2,10**2, 1)
plt.suptitle("UV Lines Continued", fontsize=14)
# ---------------------------------------------------
for i in range(16):
add_sub_plot(i)
ax1 = plt.subplot(4,4,1)
add_patches(ax1)
print "complete"
plt.savefig('UV_Lines_cntd.pdf')
plt.clf()
| gpl-2.0 |
mueller-lab/PyFRAP | setup.py | 2 | 20933 | #Import setuptools
from setuptools import setup
#We need those two for doing some file permission magic later
import os
import platform
import shutil
#Overwrite setuptools install to be able to set file permissions
from setuptools.command.install import install
from distutils import log
from setuptools.command.install_scripts import install_scripts
#Import option parser so we can parse in options
import sys
# Define version
version='1.1.13'
def getOptions():
"""Checks options given to script.
If --fiji is in sys.argv, will set dFiji=1. \n
If --gmsh is in sys.argv, will set dGmsh=1.
If --silent is in sys.argv, will set silent=1.
Note: Makes dGmsh and dFiji global: Not nice but seems easiest
way to get options into OverrideInstall.
"""
global dGmsh
global dFiji
global silent
dFiji=getOpt("--fiji")
dGmsh=getOpt("--noGmsh",default=0)
silent=getOpt("--silent")
def getOpt(optStr,default=0):
"""Checks if optStr is in sys.argv. If this is the case,
returns 1 and removes it form sys.argv so setup.py will not crash,
otherwise returns default.
"""
if optStr in sys.argv:
opt=1
sys.argv.remove(optStr)
else:
opt=default
return opt
#Get Options
if __name__ == '__main__':
getOptions()
class OverrideInstall(install):
"""Override class subclassing install class from setuptools.
The Main purpose of this class is to give more possibilities when installing PyFRAP, such as:
* Download Gmsh and enter it automatically into path spec file
* Download Fiji and enter it automatically into path spec file
* Set ownership of data files so that even PyFRAP gets installed as superuser,
users will be able to use its full capacities.
Idea taken from http://stackoverflow.com/questions/5932804/set-file-permission-in-setup-py-file (thanks a bunch!)
"""
def initOptions(self):
"""Parses options into override class.
"""
self.dFiji=bool(dFiji)
self.dGmsh=not bool(dGmsh)
self.silent=bool(silent)
log.info("Install options are:")
log.info("Install Fiji: " + str(self.dFiji))
log.info("Install gmsh: " + str(self.dGmsh))
log.info("Silent Mode: " + str(self.silent))
#Define pathFile
self.pathFile='paths'
def run(self):
"""Runs install.
"""
self.initOptions()
#Try to download gmsh
if self.dGmsh:
self.downloadGmsh()
else:
self.gmshDownloaded=False
#Try to download fiji
if self.dFiji:
self.downloadFiji()
else:
self.fijiDownloaded=False
#Run setuptools install
install.run(self)
#Print log info
if not self.silent:
log.info("Overriding setuptools mode of scripts ...")
#Add Data and edit file permissions
self.addData()
def addData(self):
"""Adds Datafiles to PyFRAP installation.
Makes sure that $USER has proper read/write/execute rights. Note that for Windows it will change rights,
since it is not necesary. \n
Also makes sure that gmsh/Fiji bin ins properly linked.
"""
if not self.silent:
log.info("in add data")
uid,gid,mode=self.getPermDetails()
#Overwrite file permissions
for filepath in self.get_outputs():
#log.info("Copying files.")
if platform.system() not in ["Windows"]:
if "meshfiles" in filepath or "configurations" in filepath or "executables" in filepath:
#Change permissions for folder containing files
folderpath=os.path.dirname(os.path.realpath(filepath))
self.changePermissions(folderpath,uid,gid,mode)
#Change permissions of file
self.changePermissions(filepath,uid,gid,mode)
#Make some more necessary data folders
if folderpath.endswith("meshfiles"):
self.makeAdditionalDataFolders(folderpath,"field",uid,gid,mode)
self.makeAdditionalDataFolders(folderpath,"field/custom",uid,gid,mode)
if folderpath.endswith("configurations"):
self.makeAdditionalDataFolders(folderpath,"macros",uid,gid,mode)
#log.info("Adding executables to path file")
#Add gmsh into paths.default if download was successful
if self.pathFile == os.path.basename(filepath):
if self.gmshDownloaded:
self.setGmshPath(filepath)
if self.fijiDownloaded:
self.setFijiPath(filepath)
if platform.system() not in ["Windows"]:
folderpath=os.path.dirname(os.path.realpath(filepath))
self.changePermissions(folderpath,uid,gid,mode)
self.changePermissions(filepath,uid,gid,mode)
def getPermDetails(self):
"""Returns the permission details used to change permissions.
"""
if platform.system() not in ["Windows"]:
import pwd
#Grab user ID and group ID of actual use
try:
uid=pwd.getpwnam(os.getlogin())[2]
gid=pwd.getpwnam(os.getlogin())[3]
except:
if not self.silent:
log.info("Was not able to retrieve UID via os.getlogin, using os.getuid instead.")
uid=os.getuid()
gid=os.getgid()
#Mode for files (everyone can read/write/execute. This is somewhat an overkill, but 0666 seems somehow not to work.)
mode=0777
return uid,gid,mode
return 0,0,0
def cleanUpExe(self,fnDL,folderFn,filesBefore,exePath):
"""Moves it to executables directory and cleans up afterwards.
"""
#Copy file to pyfrp/executables/
try:
shutil.rmtree(exePath)
except:
pass
if os.path.isdir(folderFn):
shutil.copytree(folderFn+"/",exePath)
else:
try:
os.mkdir(os.path.dirname(exePath))
except:
log.info("Was not able to create folder " + os.path.dirname(exePath))
shutil.copy(folderFn,exePath)
#Remove downloaded files
os.remove(fnDL)
#Get fileList before
filesAfter=os.listdir('.')
self.cleanDiff(filesBefore,filesAfter)
def cleanDiff(self,filesBefore,filesAfter):
#Difference between files
filesDiff=list(set(filesAfter)-set(filesBefore))
for fn in filesDiff:
try:
if os.path.isdir(fn):
shutil.rmtree(fn)
else:
os.remove(fn)
except:
log.info("cleanDiff report: Was not able to delete file:" + fn)
def downloadGmsh(self):
"""Downloads Gmsh, moves it to executables directory and cleans up afterwards.
Note that this will only work if *wget* is installed.
"""
#Define gmshVersion (might need to update this line once in a while)
gmshVersion='2.14.0'
#Flag to see if gmsh DL went through
self.gmshDownloaded=False
self.makeExeFolder()
#Get fileList before
filesBefore=os.listdir('.')
#Try to import wget
try:
import wget
#Get Architecture
arch=platform.architecture()[0].replace('bit','')
if platform.system() in ["Windows"]:
fnDL,folderFn=self.downloadGmshWin(arch,gmshVersion)
elif platform.system() in ["Linux"]:
fnDL,folderFn=self.downloadGmshLinux(arch,gmshVersion)
elif platform.system() in ["Darwin"]:
fnDL,folderFn=self.downloadGmshOSX(arch,gmshVersion)
#Remove files
self.cleanUpExe(fnDL,folderFn,filesBefore,'pyfrp/executables/gmsh/')
uid,gid,mode=self.getPermDetails()
if platform.system() not in ["Windows"]:
self.changePermissions(self.gmshPath,uid,gid,mode)
#self.addPathToWinPATHs(self.gmshPath)
log.info("Installed gmsh to "+ self.gmshPath)
#Set Flag=True
self.gmshDownloaded=True
except ImportError:
log.info("Cannot find wget, will not be downloading gmsh. You will need to install it later manually")
def downloadFileIfNotExist(self,url):
"""Downloads URL if file does not already exist.
Args:
url (str): URL to download.
Returns:
tuple: Tuple containing:
* fnDL (str): Donwload filename
* folderFn (str): Filename of extracted download files
"""
import wget
cwd=os.getcwd()+"/"
if not os.path.exists(cwd+os.path.basename(url)):
log.info(cwd+os.path.basename(url) +" does not exist, will download it.")
folderFn=wget.download(url)
else:
log.info(cwd+os.path.basename(url) +" alreay exists, will not download.")
folderFn=os.path.basename(url)
fnDL=str(folderFn)
print
return folderFn, fnDL
def downloadGmshWin(self,arch,gmshVersion):
"""Downloads Gmsh from Gmsh website for Windows
Args:
arch (str): System architecture, e.g. 64/32.
gmshVersion (str): gmshVersion String, e.g. 2.12.0 .
Returns:
tuple: Tuple containing:
* fnDL (str): Donwload filename
* folderFn (str): Filename of extracted download files
"""
#Download Gmsh
url='http://gmsh.info/bin/Windows/gmsh-'+gmshVersion+'-Windows'+arch+'.zip'
folderFn, fnDL=self.downloadFileIfNotExist(url)
#Decompress
import zipfile
with zipfile.ZipFile(folderFn) as zf:
zf.extractall()
folderFn='gmsh-'+gmshVersion+'-Windows'
self.gmshPath='executables/gmsh/gmsh.exe'
return fnDL,folderFn
def downloadGmshOSX(self,arch,gmshVersion):
"""Downloads Gmsh from Gmsh website for OSX.
Args:
arch (str): System architecture, e.g. 64/32.
gmshVersion (str): gmshVersion String, e.g. 2.12.0 .
Returns:
tuple: Tuple containing:
* fnDL (str): Donwload filename
* folderFn (str): Filename of extracted download files
"""
#Download Gmsh (if file isn't there yet)
url='http://gmsh.info/bin/MacOSX/gmsh-'+gmshVersion+'-MacOSX'+'.dmg'
folderFn, fnDL=self.downloadFileIfNotExist(url)
#Mount dmg file (Here the user need to read through LICENSE, don't know how to fix this)
log.info("executing: "+ 'hdiutil attach '+folderFn)
os.system('hdiutil attach '+folderFn)
folderFn=folderFn.replace('.dmg','')
#try:
#os.mkdir(folderFn)
#except OSError:
#pass
cwd=os.getcwd()
#Copy gmsh executable to cwd
#Note: It seems to vary where gmsh executable is in mounted dmg file, hence we
#just have to try out, take the one that actually worked and remember it
rets=[]
possFiles=["bin/","share/","gmsh"]
rets.append(os.system('cp -rv /Volumes/'+folderFn+'/Gmsh.app/Contents/MacOS/bin/ '+ cwd))
rets.append(os.system('cp -rv /Volumes/'+folderFn+'/Gmsh.app/Contents/MacOS/share/ '+ cwd))
rets.append(os.system('cp -rv /Volumes/'+folderFn+'/Gmsh.app/Contents/MacOS/gmsh '+ cwd))
fnWorked=possFiles[rets.index(0)]
#Unmount gmsh
os.system('hdiutil detach /Volumes/'+folderFn+'/')
#Build filename of acutally copied file
folderFn=cwd+"/"+fnWorked
self.gmshPath='executables/gmsh/./gmsh'
return fnDL,folderFn
def downloadGmshLinux(self,arch,gmshVersion):
"""Downloads Gmsh from Gmsh website for Linux.
Args:
arch (str): System architecture, e.g. 64/32.
gmshVersion (str): gmshVersion String, e.g. 2.12.0 .
Returns:
tuple: Tuple containing:
* fnDL (str): Donwload filename
* folderFn (str): Filename of extracted download files
"""
#Download Gmsh
url='http://gmsh.info/bin/Linux/gmsh-'+gmshVersion+'-Linux'+arch+'.tgz'
folderFn, fnDL=self.downloadFileIfNotExist(url)
#Decompress
import tarfile
with tarfile.open(folderFn,mode='r:gz') as zf:
zf.extractall()
folderFn='gmsh-'+gmshVersion+'-Linux'
self.gmshPath='executables/gmsh/bin/./gmsh'
return fnDL,folderFn
def makeExeFolder(self):
#Make executables folder if it doesn't exist yet
try:
os.mkdir('pyfrp/executables')
except OSError:
log.info('Was not able to create directory pyfrp/executables')
def addPathToWinPATHs(self,path):
"""Adds a path to Windows' PATH list.
.. note:: Only adds path if file exits.
.. note:: You will need to restart the terminal to
be sure that the change has any effect.
Args:
path (str): Path to be added.
Returns:
bool: True if successful.
"""
if platform.system() not in ["Windows"]:
log.info("OS is not Windows, won't set path")
return False
if path in os.environ['PATH']:
log.info("Path is already in PATH, won't set path")
return False
if os.path.exists(path):
os.system("set PATH=%PATH%;"+path)
return True
else:
log.info(path + " does not exist, won't set path")
return False
def downloadFiji(self):
"""Downloads Gmsh, moves it to executables directory and cleans up afterwards.
Note that this will only work if *wget* is installed.
"""
#Flag to see if gmsh DL went through
self.fijiDownloaded=False
self.makeExeFolder()
#Get fileList before
filesBefore=os.listdir('.')
#Try to import wget
try:
import wget
#Get Architecture
arch=platform.architecture()[0].replace('bit','')
if platform.system() in ["Windows"]:
fnDL,folderFn=self.downloadFijiWin(arch)
elif platform.system() in ["Linux"]:
fnDL,folderFn=self.downloadFijiLinux(arch)
elif platform.system() in ["Darwin"]:
fnDL,folderFn=self.downloadFijiOSX(arch)
#Remove files
self.cleanUpExe(fnDL,folderFn,filesBefore,'pyfrp/executables/Fiji.app/')
uid,gid,mode=self.getPermDetails()
if platform.system() not in ["Windows"]:
self.changePermissions(self.fijiPath,uid,gid,mode)
#self.addPathToWinPATHs(self.fijiPath)
log.info("Installed Fiji to "+ self.fijiPath)
#Set Flag=True
self.fijiDownloaded=True
except ImportError:
log.info("Cannot find wget, will not be downloading fiji. You will need to install it later manually")
def downloadFijiLinux(self,arch):
"""Downloads Fiji from Fiji website for Linux.
Args:
arch (str): System architecture, e.g. 64/32.
Returns:
tuple: Tuple containing:
* fnDL (str): Donwload filename
* folderFn (str): Filename of extracted download files
"""
import wget
#Download Fiji
url='http://downloads.imagej.net/fiji/latest/fiji-linux'+arch+'.zip'
folderFn=wget.download(url)
fnDL=str(folderFn)
print
#Decompress
import zipfile
with zipfile.ZipFile(folderFn) as zf:
zf.extractall()
folderFn='Fiji.app'
self.fijiPath='executables/Fiji.app/./ImageJ-linux64'
return fnDL,folderFn
def downloadFijiWin(self,arch):
"""Downloads Fiji from Fiji website for Windows.
Args:
arch (str): System architecture, e.g. 64/32.
Returns:
tuple: Tuple containing:
* fnDL (str): Donwload filename
* folderFn (str): Filename of extracted download files
"""
import wget
#Download fiji
url='http://downloads.imagej.net/fiji/latest/fiji-win'+arch+'.zip'
folderFn=wget.download(url)
fnDL=str(folderFn)
print
#Decompress
import zipfile
with zipfile.ZipFile(folderFn) as zf:
zf.extractall()
folderFn='Fiji.app'
self.fijiPath='executables/Fiji.app/ImageJ-linux64.exe'
return fnDL,folderFn
def downloadFijiOSX(self,arch):
"""Downloads Fiji from Fiji website for OSX.
Returns:
tuple: Tuple containing:
* fnDL (str): Donwload filename
* folderFn (str): Filename of extracted download files
"""
import wget
#Download fiji
url='http://downloads.imagej.net/fiji/latest/fiji-macosx.dmg'
folderFn=wget.download(url)
fnDL=str(folderFn)
print
#Mount dmg file
os.system('hdiutil attach '+folderFn)
cwd=os.getcwd()
#Copy fiji executable to cwd
os.system('cp -rv /Volumes/Fiji/Fiji.app '+ cwd)
#Unmount gmsh
os.system('hdiutil detach /Volumes/Fiji')
folderFn='Fiji.app'
self.fijiPath='executables/Fiji.app/Contents/MacOS/./ImageJ-macosx'
return fnDL,folderFn
def winToLinPath(self,path):
return path.replace('\\','/')
def setExePath(self,fn,identifier,exePath):
"""Enters executable path into path spec file.
Args:
fn (str): Path to gmsh executable.
identifier (str): Identifier in spec file.
exePath (str): Path to exe file
"""
#Make backup of default path file
shutil.copy(fn,fn+'_backup')
#Get filepath to PyFRAP
fnPyfrp=fn.split('configurations')[0]
# Remove possible backslashes from path
fn=self.winToLinPath(fn)
#Open file and enter new gmsh bin
with open(fn,'rb') as fPath:
with open(fn+"_new",'wb') as fPathNew:
for line in fPath:
if line.strip().startswith(identifier):
ident,path=line.split('=')
path=path.strip()
lineNew=ident+"="+fnPyfrp+exePath
fPathNew.write(lineNew+'\n')
else:
fPathNew.write(line)
#Rename file
shutil.move(fn+'_new',fn)
def setGmshPath(self,fn):
"""Enters gmsh executable path into path spec file.
Args:
fn (str): Path to gmsh executable.
"""
self.setExePath(fn,'gmshBin',self.gmshPath)
def setFijiPath(self,fn):
"""Enters fiji executable path into path spec file.
Args:
fn (str): Path to fiji executable.
"""
self.setExePath(fn,'fijiBin',self.fijiPath)
def changePermissions(self,filepath,uid,gid,mode):
"""Sets File Permissions.
Args:
filepath (str): Path to file.
uid (int): user ID.
gid (int): group ID.
mode (int): Permission mode.
Returns:
bool: True if success
"""
ret=True
try:
os.chown(filepath, uid, gid)
if not self.silent:
log.info("Changing ownership of %s to uid:%s gid %s" %(filepath, uid, gid))
except:
if not self.silent:
log.info("Was not able to change ownership of file %s" %(filepath))
ret=False
try:
if not self.silent:
log.info("Changing permissions of %s to %s" %(filepath, oct(mode)))
os.chmod(filepath, mode)
except:
if not self.silent:
log.info("Was not able to change file permissions of file %s" %(filepath))
ret=False
return ret
def makeAdditionalDataFolders(self,folder,fn,uid,gid,mode):
"""Tries to generate additional data folders.
Args:
folder (str): Path to containing folder.
fn (str): New folder name
uid (int): user ID.
gid (int): group ID.
mode (int): Permission mode.
Returns:
bool: True if success
"""
if not folder.endswith("/"):
folder=folder+"/"
if os.path.isdir(folder+fn):
return False
else:
try:
os.mkdir(folder+fn)
self.changePermissions(folder+fn,uid,gid,mode)
return True
except:
log.info("Unable to create folder %s" %(folder+fn))
return False
#Define setup
#Check if setup.py is used to build RTD, then don't overwrite install command
if os.environ.get('READTHEDOCS', None) == 'True':
print "Installing on RTD, will not overwrite install command."
setup(name='pyfrp',
version=version,
description='PyFRAP: A Python based FRAP analysis tool box',
url='https://github.com/alexblaessle/PyFRAP',
author='Alexander Blaessle',
author_email='[email protected]',
license='GNU GPL Version 3',
packages=['pyfrp','pyfrp.modules','pyfrp.subclasses','pyfrp.gui'],
package_dir={'pyfrp': 'pyfrp',
'pyfrp.modules': 'pyfrp/modules',
'pyfrp.gui' : 'pyfrp/gui'
},
#package_data = {'pyfrp':['meshfiles','configurations']},
include_package_data=True,
classifiers= [
'Development Status :: 4 - Beta',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Topic :: Scientific/Engineering :: Bio-Informatics',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',\
'Programming Language :: Python :: 2.7',
],
install_requires=['pyopenssl','ndg-httpsclient','pyasn1','ez_setup','numpy','scipy','matplotlib','scikit-image','FiPy','colorama','numpy-stl','solidpython','wget','python-bioformats'],
platforms=['ALL'],
keywords=["FRAP", "fluorescence",'recovery','after','photobleaching','reaction-diffusion','fitting'
],
zip_safe=False
)
else:
setup(name='pyfrp',
version=version,
description='PyFRAP: A Python based FRAP analysis tool box',
url='https://github.com/alexblaessle/PyFRAP',
author='Alexander Blaessle',
author_email='[email protected]',
license='GNU GPL Version 3',
packages=['pyfrp','pyfrp.modules','pyfrp.subclasses','pyfrp.gui'],
package_dir={'pyfrp': 'pyfrp',
'pyfrp.modules': 'pyfrp/modules',
'pyfrp.gui' : 'pyfrp/gui'
},
#package_data = {'pyfrp':['meshfiles','configurations']},
include_package_data=True,
classifiers= [
'Development Status :: 4 - Beta',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Topic :: Scientific/Engineering :: Bio-Informatics',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',\
'Programming Language :: Python :: 2.7',
],
install_requires=['pyopenssl','ndg-httpsclient','pyasn1','ez_setup','numpy','scipy','matplotlib','scikit-image','FiPy','colorama','numpy-stl','solidpython','wget','python-bioformats'],
platforms=['ALL'],
keywords=["FRAP", "fluorescence",'recovery','after','photobleaching','reaction-diffusion','fitting'
],
zip_safe=False,
cmdclass={'install': OverrideInstall} #Need this here to overwrite our install
)
| gpl-3.0 |
kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/pandas/tseries/timedeltas.py | 7 | 6047 | """
timedelta support tools
"""
import numpy as np
import pandas as pd
import pandas.tslib as tslib
from pandas.types.common import (_ensure_object,
is_integer_dtype,
is_timedelta64_dtype,
is_list_like)
from pandas.types.generic import ABCSeries, ABCIndexClass
from pandas.util.decorators import deprecate_kwarg
@deprecate_kwarg(old_arg_name='coerce', new_arg_name='errors',
mapping={True: 'coerce', False: 'raise'})
def to_timedelta(arg, unit='ns', box=True, errors='raise', coerce=None):
"""
Convert argument to timedelta
Parameters
----------
arg : string, timedelta, list, tuple, 1-d array, or Series
unit : unit of the arg (D,h,m,s,ms,us,ns) denote the unit, which is an
integer/float number
box : boolean, default True
- If True returns a Timedelta/TimedeltaIndex of the results
- if False returns a np.timedelta64 or ndarray of values of dtype
timedelta64[ns]
errors : {'ignore', 'raise', 'coerce'}, default 'raise'
- If 'raise', then invalid parsing will raise an exception
- If 'coerce', then invalid parsing will be set as NaT
- If 'ignore', then invalid parsing will return the input
Returns
-------
ret : timedelta64/arrays of timedelta64 if parsing succeeded
Examples
--------
Parsing a single string to a Timedelta:
>>> pd.to_timedelta('1 days 06:05:01.00003')
Timedelta('1 days 06:05:01.000030')
>>> pd.to_timedelta('15.5us')
Timedelta('0 days 00:00:00.000015')
Parsing a list or array of strings:
>>> pd.to_timedelta(['1 days 06:05:01.00003', '15.5us', 'nan'])
TimedeltaIndex(['1 days 06:05:01.000030', '0 days 00:00:00.000015', NaT],
dtype='timedelta64[ns]', freq=None)
Converting numbers by specifying the `unit` keyword argument:
>>> pd.to_timedelta(np.arange(5), unit='s')
TimedeltaIndex(['00:00:00', '00:00:01', '00:00:02',
'00:00:03', '00:00:04'],
dtype='timedelta64[ns]', freq=None)
>>> pd.to_timedelta(np.arange(5), unit='d')
TimedeltaIndex(['0 days', '1 days', '2 days', '3 days', '4 days'],
dtype='timedelta64[ns]', freq=None)
"""
unit = _validate_timedelta_unit(unit)
if errors not in ('ignore', 'raise', 'coerce'):
raise ValueError("errors must be one of 'ignore', "
"'raise', or 'coerce'}")
if arg is None:
return arg
elif isinstance(arg, ABCSeries):
from pandas import Series
values = _convert_listlike(arg._values, unit=unit,
box=False, errors=errors)
return Series(values, index=arg.index, name=arg.name)
elif isinstance(arg, ABCIndexClass):
return _convert_listlike(arg, unit=unit, box=box,
errors=errors, name=arg.name)
elif is_list_like(arg) and getattr(arg, 'ndim', 1) == 1:
return _convert_listlike(arg, unit=unit, box=box, errors=errors)
elif getattr(arg, 'ndim', 1) > 1:
raise TypeError('arg must be a string, timedelta, list, tuple, '
'1-d array, or Series')
# ...so it must be a scalar value. Return scalar.
return _coerce_scalar_to_timedelta_type(arg, unit=unit,
box=box, errors=errors)
_unit_map = {
'Y': 'Y',
'y': 'Y',
'W': 'W',
'w': 'W',
'D': 'D',
'd': 'D',
'days': 'D',
'Days': 'D',
'day': 'D',
'Day': 'D',
'M': 'M',
'H': 'h',
'h': 'h',
'm': 'm',
'T': 'm',
'S': 's',
's': 's',
'L': 'ms',
'MS': 'ms',
'ms': 'ms',
'US': 'us',
'us': 'us',
'NS': 'ns',
'ns': 'ns',
}
def _validate_timedelta_unit(arg):
""" provide validation / translation for timedelta short units """
try:
return _unit_map[arg]
except:
if arg is None:
return 'ns'
raise ValueError("invalid timedelta unit {0} provided".format(arg))
def _coerce_scalar_to_timedelta_type(r, unit='ns', box=True, errors='raise'):
"""Convert string 'r' to a timedelta object."""
try:
result = tslib.convert_to_timedelta64(r, unit)
except ValueError:
if errors == 'raise':
raise
elif errors == 'ignore':
return r
# coerce
result = pd.NaT
if box:
result = tslib.Timedelta(result)
return result
def _convert_listlike(arg, unit='ns', box=True, errors='raise', name=None):
"""Convert a list of objects to a timedelta index object."""
if isinstance(arg, (list, tuple)) or not hasattr(arg, 'dtype'):
arg = np.array(list(arg), dtype='O')
# these are shortcut-able
if is_timedelta64_dtype(arg):
value = arg.astype('timedelta64[ns]')
elif is_integer_dtype(arg):
value = arg.astype('timedelta64[{0}]'.format(
unit)).astype('timedelta64[ns]', copy=False)
else:
try:
value = tslib.array_to_timedelta64(_ensure_object(arg),
unit=unit, errors=errors)
value = value.astype('timedelta64[ns]', copy=False)
except ValueError:
if errors == 'ignore':
return arg
else:
# This else-block accounts for the cases when errors='raise'
# and errors='coerce'. If errors == 'raise', these errors
# should be raised. If errors == 'coerce', we shouldn't
# expect any errors to be raised, since all parsing errors
# cause coercion to pd.NaT. However, if an error / bug is
# introduced that causes an Exception to be raised, we would
# like to surface it.
raise
if box:
from pandas import TimedeltaIndex
value = TimedeltaIndex(value, unit='ns', name=name)
return value
| gpl-3.0 |
ainafp/nilearn | plot_haxby_grid_search.py | 1 | 5358 | """
Setting a parameter by cross-validation
=======================================================
Here we set the number of features selected in an Anova-SVC approach to
maximize the cross-validation score.
After separating 2 sessions for validation, we vary that parameter and
measure the cross-validation score. We also measure the prediction score
on the left-out validation data. As we can see, the two scores vary by a
significant amount: this is due to sampling noise in cross validation,
and choosing the parameter k to maximize the cross-validation score,
might not maximize the score on left-out data.
Thus using data to maximize a cross-validation score computed on that
same data is likely to optimistic and lead to an overfit.
The proper appraoch is known as a "nested cross-validation". It consists
in doing cross-validation loops to set the model parameters inside the
cross-validation loop used to judge the prediction performance: the
parameters are set separately on each fold, never using the data used to
measure performance.
In scikit-learn, this can be done using the GridSearchCV object, that
will automatically select the best parameters of an estimator from a
grid of parameter values.
One difficulty here is that we are working with a composite estimator: a
pipeline of feature selection followed by SVC. Thus to give the name
of the parameter that we want to tune we need to give the name of the
step in the pipeline, followed by the name of the parameter, with '__' as
a separator.
"""
### Load Haxby dataset ########################################################
from nilearn import datasets
import numpy as np
dataset_files = datasets.fetch_haxby_simple()
y, session = np.loadtxt(dataset_files.session_target).astype("int").T
conditions = np.recfromtxt(dataset_files.conditions_target)['f0']
mask_file = dataset_files.mask
# fmri_data.shape is (40, 64, 64, 1452)
# and mask.shape is (40, 64, 64)
### Preprocess data ###########################################################
### Restrict to faces and houses ##############################################
# Keep only data corresponding to shoes or bottles
condition_mask = np.logical_or(conditions == 'shoe', conditions == 'bottle')
y = y[condition_mask]
session = session[condition_mask]
conditions = conditions[condition_mask]
### Loading step ##############################################################
from nilearn.input_data import NiftiMasker
# For decoding, standardizing is often very important
nifti_masker = NiftiMasker(mask=mask_file, sessions=session, smoothing_fwhm=4,
standardize=True, memory="nilearn_cache",
memory_level=1)
X = nifti_masker.fit_transform(dataset_files.func)
# Restrict to non rest data
X = X[condition_mask]
### Prediction function #######################################################
### Define the prediction function to be used.
# Here we use a Support Vector Classification, with a linear kernel
from sklearn.svm import SVC
svc = SVC(kernel='linear')
### Dimension reduction #######################################################
from sklearn.feature_selection import SelectKBest, f_classif
### Define the dimension reduction to be used.
# Here we use a classical univariate feature selection based on F-test,
# namely Anova. We set the number of features to be selected to 500
feature_selection = SelectKBest(f_classif, k=500)
# We have our classifier (SVC), our feature selection (SelectKBest), and now,
# we can plug them together in a *pipeline* that performs the two operations
# successively:
from sklearn.pipeline import Pipeline
anova_svc = Pipeline([('anova', feature_selection), ('svc', svc)])
### Cross validation ##########################################################
anova_svc.fit(X, y)
y_pred = anova_svc.predict(X)
from sklearn.cross_validation import LeaveOneLabelOut, cross_val_score
cv = LeaveOneLabelOut(session[session < 10])
k_range = [10, 15, 30, 50, 150, 300, 500, 1000, 1500, 3000, 5000]
cv_scores = []
scores_validation = []
for k in k_range:
feature_selection.k = k
cv_scores.append(np.mean(
cross_val_score(anova_svc, X[session < 10], y[session < 10])))
print "CV score", cv_scores[-1]
anova_svc.fit(X[session < 10], y[session < 10])
y_pred = anova_svc.predict(X[session == 10])
scores_validation.append(np.mean(y_pred == y[session == 10]))
print "score validation", scores_validation[-1]
from matplotlib import pyplot as plt
plt.figure(figsize=(6, 4))
plt.plot(cv_scores, label='Cross validation scores')
plt.plot(scores_validation, label='Left-out validation data scores')
plt.xticks(np.arange(len(k_range)), k_range)
plt.axis('tight')
plt.xlabel('k')
### Nested cross-validation ###################################################
from sklearn.grid_search import GridSearchCV
# We are going to tune the parameter 'k' of the step called 'anova' in
# the pipeline. Thus we need to address it as 'anova__k'.
# Note that GridSearchCV takes an n_jobs argument that can make it go
# much faster
grid = GridSearchCV(anova_svc, param_grid={'anova__k': k_range}, verbose=True)
nested_cv_scores = cross_val_score(grid, X, y)
plt.axhline(np.mean(nested_cv_scores),
label='Nested cross-validation',
color='r')
plt.legend(loc='best', frameon=False)
plt.show()
| bsd-3-clause |
wavelets/hmmlearn | examples/plot_hmm_stock_analysis.py | 4 | 2785 | """
==========================
Gaussian HMM of stock data
==========================
This script shows how to use Gaussian HMM.
It uses stock price data, which can be obtained from yahoo finance.
For more information on how to get stock prices with matplotlib, please refer
to date_demo1.py of matplotlib.
"""
from __future__ import print_function
import datetime
import numpy as np
import pylab as pl
from matplotlib.finance import quotes_historical_yahoo
from matplotlib.dates import YearLocator, MonthLocator, DateFormatter
from hmmlearn.hmm import GaussianHMM
print(__doc__)
###############################################################################
# Downloading the data
date1 = datetime.date(1995, 1, 1) # start date
date2 = datetime.date(2012, 1, 6) # end date
# get quotes from yahoo finance
quotes = quotes_historical_yahoo("INTC", date1, date2)
if len(quotes) == 0:
raise SystemExit
# unpack quotes
dates = np.array([q[0] for q in quotes], dtype=int)
close_v = np.array([q[2] for q in quotes])
volume = np.array([q[5] for q in quotes])[1:]
# take diff of close value
# this makes len(diff) = len(close_t) - 1
# therefore, others quantity also need to be shifted
diff = close_v[1:] - close_v[:-1]
dates = dates[1:]
close_v = close_v[1:]
# pack diff and volume for training
X = np.column_stack([diff, volume])
###############################################################################
# Run Gaussian HMM
print("fitting to HMM and decoding ...", end='')
n_components = 5
# make an HMM instance and execute fit
model = GaussianHMM(n_components, covariance_type="diag", n_iter=1000)
model.fit([X])
# predict the optimal sequence of internal hidden state
hidden_states = model.predict(X)
print("done\n")
###############################################################################
# print trained parameters and plot
print("Transition matrix")
print(model.transmat_)
print()
print("means and vars of each hidden state")
for i in range(n_components):
print("%dth hidden state" % i)
print("mean = ", model.means_[i])
print("var = ", np.diag(model.covars_[i]))
print()
years = YearLocator() # every year
months = MonthLocator() # every month
yearsFmt = DateFormatter('%Y')
fig = pl.figure()
ax = fig.add_subplot(111)
for i in range(n_components):
# use fancy indexing to plot data in each state
idx = (hidden_states == i)
ax.plot_date(dates[idx], close_v[idx], 'o', label="%dth hidden state" % i)
ax.legend()
# format the ticks
ax.xaxis.set_major_locator(years)
ax.xaxis.set_major_formatter(yearsFmt)
ax.xaxis.set_minor_locator(months)
ax.autoscale_view()
# format the coords message box
ax.fmt_xdata = DateFormatter('%Y-%m-%d')
ax.fmt_ydata = lambda x: '$%1.2f' % x
ax.grid(True)
fig.autofmt_xdate()
pl.show()
| bsd-3-clause |
RussianOtter/networking | RoA.py | 1 | 7731 | """
R --------- R
O O ------ O
N C A --- A
D C L
O U G +Copyright SavSec (c) 2017
M r O
E R +Algorithms are intellectual
N I so no Copyright except
C T directly to ownership of the
E H base algorithm.
M -MIT License-
"""
import time, random, sys, string, itertools, os
import base64, hashlib
from Crypto import Random
from Crypto.Cipher import AES
class AESCipher (object):
def __init__(self, key):
self.bs = 32
self.key = key
def encrypt(self, raw):
raw = self._pad(raw)
iv = Random.new().read(AES.block_size)
cipher = AES.new(self.key, AES.MODE_CBC, iv)
return base64.b64encode(iv + cipher.encrypt(raw))
def decrypt(self, enc):
enc = base64.b64decode(enc)
iv = enc[:AES.block_size]
cipher = AES.new(self.key, AES.MODE_CBC, iv)
return self._unpad(cipher.decrypt(enc[AES.block_size:])).decode("utf-8")
def _pad(self, s):
return s + (self.bs - len(s) % self.bs) * chr(self.bs - len(s) % self.bs)
@staticmethod
def _unpad(s):
return s[:-ord(s[len(s)-1:])]
class RoA (object):
"""
RoA / Random Occurence Algorithm is an algorithm developed by Russian Otter! This basic algorithm generates random data based on the exponential outputs given from the algorithm!
Requirements To Decrypt:
1.) AES Key
2.) RoA Key
3.) Salt
4.) Encrypted Dictionary
"""
def __init__(self,verbose=False):
"""
Setting this option to True makes RoA display all active activities!
"""
self.verbose = verbose
def algorithm(self,tm,base_length):
"""
RoA.algorithm(3 Digits, Key Length) -> Numeric Algorithm Output
This is how RoA generates it's data! The output to RoA is simular to Pi in the fact that it doesnt end and doesnt repeat!
"""
base = ""
d = ".".join("`"*i+" "[i : i+i] for i in range(0,125,random.randint(1,4)))+ ".".join("`"*i+" "[i : i] for i in range(0,125,random.randint(1,10)))
b = "".join(d[a : i] for i in range(0,10+tm,2) for a in range(i+~-i*i))
for _ in "".join(b[i:i+i] for i in range(0,tm+base_length,2)):
base += _
time.sleep(0.000005)
base = base[:base_length]
n = ""
for _ in range(tm):
n += str("".join(random.sample(base,random.choice(range(len(base))))).count("."))
time.sleep(0.0005)
try:
n = hex(int(n))[2:].replace("L","")[:base_length]
except:
return n
while 1:
if len(n) == base_length:
break
n += random.choice(string.hexdigits.lower())
return n[:base_length]
def generate_key(self,AESkey):
"""
RoA.generate_key(32bit AES Key)
This is used to general the basic RoA Encryption Key
"""
if len(AESkey) != 32:
raise "AESkey length is %s. 32 bit key is required!"
tm = random.choice(range(0,100))
if len(str(tm)) == 2 and tm < 77:
tm = int("0"+str(tm))
if len(str(tm)) == 1:
tm = int("00"+str(tm))
key = self.algorithm(tm,16)
key = key + AESkey
return key
def encrypt(self,msg,key):
"""
RoA.Encrypt(message, RoAKey)
This is the main encryption process of RoA which makes it secure
"""
if self.verbose:
print "Master Key:",key
print "\nAES Key:",key[16:]
print "\nRoA Key:",key[:16]
print "\nEncrypting Message in AES..."
AESKey = key[16:]
rokey = key[:16] *2
aes = AESCipher(key[16:])
en_msg = aes.encrypt(msg)
if self.verbose:
print "\nEncrypted AES:",en_msg
print "\nApplying Salt Mix..."
msg = en_msg
frag = None
while 1:
for _ in range(4,9):
l = len(msg)/float(_)
if l.is_integer():
frag = _
msg += b"\x01"
if frag != None:
break
sp = len(msg)/frag
fragged = []
salt = []
while 1:
pun = True
for _ in msg:
if _ in string.punctuation:
t = "".join(random.sample(string.ascii_letters + string.digits + string.punctuation.strip("}").strip("'").strip("\"") +b"\x01",7))
pun = False
break
if pun:
t = "".join(random.sample(string.ascii_letters + string.digits+b"\x01",7))
if t not in "".join(msg):
salt.append(t)
if len(salt) > 15:
break
prev = 0
while True:
f = random.randint(1,sp)
fragged.append(msg[prev:prev+f])
prev = prev + f
if prev >= len(msg)-1:
break
if self.verbose:
print "Fragmented Text:"
print fragged
print "\nSalt & Pepper:"
print salt
refrag = {}
def shuffled(x):
y = x[:]
random.shuffle(y)
return y
x = shuffled(fragged)
for _ in fragged:
for i in x:
if i == _:
refrag.update(
{str(x[x.index(_)]
):fragged.index(i)})
if self.verbose:
print "\nMixed Fragments:"
print x
print "\nDictionary:"
print refrag
comp_refrag = str(refrag).replace(", '",",'")
comp_salt = str(salt).replace(", '",",'")
comp_dict = comp_refrag+b"\x02"+comp_salt.strip("\n")
if self.verbose:
print "\nCompressed Dictionary:"
print comp_dict
salt_mix = ""
sl = 0
for _ in x:
if sl > len(salt)-1:
sl = 0
salt_mix += _+salt[sl]
sl = sl + 1
if self.verbose:
print "\nSalted Text:"
print salt_mix
aessalt = AESCipher(rokey)
en_comp_dict = aessalt.encrypt(comp_dict)
en_salt = aessalt.encrypt(salt_mix)
if self.verbose:
print "\nEncrypted Dictionary:"
print en_comp_dict
print "\nEncrypted Salt:"
print en_salt
return en_salt+b"\x03"+en_comp_dict,rokey,AESKey
def decrypt(self,en_saltdict,key,AESKey):
"""
RoA.decrypt(encrypted salt dictionary, RoA Key, AES Key)
This fuction process reverses all the past actions based on the information it decrypts
"""
en_comp_dict = en_saltdict.split(b"\x03")[1]
en_salt = en_saltdict.split(b"\x03")[0]
aessalt = AESCipher(key)
comp_dict = aessalt.decrypt(en_comp_dict)
aessalt = AESCipher(key)
salt_mix = aessalt.decrypt(en_salt)
if self.verbose:
print "\n - Starting Decryption Process -"
print "\nCollecting Dictionary Data..."
de_dict = eval(comp_dict.split(b"\x02")[0])
water = eval(comp_dict.split(b"\x02")[1])
de_salt = salt_mix
remain = []
if self.verbose:
print "Diluting Salt..."
for _ in water:
de_salt = de_salt.replace(_,"")
if self.verbose:
print "Re-arranging Text..."
c = 0
en_txt = ""
for _ in de_dict:
for _ in de_dict:
if de_dict[_] == c:
en_txt += _
c = c + 1
aes = AESCipher(AESKey)
de_txt = aes.decrypt(en_txt)
if self.verbose:
print "\nEncrypted Text:"
print en_txt
print "\nDecrypting..."
print "\nDecrypted Text:"
print de_txt
return de_txt
Ro = RoA(True)
def example():
Ro = RoA(True)
saltdic = Ro.encrypt("Follow @Russian_Otter on Instagram!",Ro.generate_key("PythonRo"*4))
time.sleep(5)
decrypted = Ro.decrypt(saltdic[0],saltdic[1],saltdic[2])
def pattern_test():
import matplotlib.pyplot as plt
import numpy as np
amount = "\r\r\ra: %s b: %s c: %s d: %s e: %s f: %s 1: %s 2: %s 3: %s 4: %s 5: %s 6: %s 7: %s 8: %s 9: %s 0: %s "
for _ in range(100):
key = Ro.algorithm(10,16)
f = open("tmp.bit","a")
f.write(key)
f.close()
total = open("tmp.bit").read()
a = amount.replace(" "," | ")
t = total
data = a % (t.count("a"),t.count("b"),t.count("c"
),t.count("d"),t.count("e"),t.count("f"),t.count("1"),t.count("2"
),t.count("3"),t.count("4"),t.count("5"),t.count("6"),t.count("7"),t.count("8"),t.count("9"),
t.count("0"))
sys.stdout.write(data)
time.sleep(0.00005)
t = data.split(" | ")
alphab = list("abcdef1234567890")
frequencies = []
for _ in range(1,32,2):
frequencies.append(int(t[_].replace(" ","")))
pos = np.arange(len(alphab))
width = 1.0
ax = plt.axes()
ax.set_xticks(pos + (width / 1))
ax.set_xticklabels(alphab)
plt.bar(pos, frequencies, width, color="lime")
plt.show()
os.remove("./tmp.bit")
| gpl-3.0 |
robblack007/clase-dinamica-robot | Practicas/practica2/robots/estaciones.py | 4 | 2501 | def estacion_3gdl(puerto_zmq = "5555"):
'''
Esta función crea un socket de 0MQ para publicar datos de tres referencias.
>>> from robots.estaciones import estacion_3gdl
>>> estacion_3gdl("5555")
Iniciando estacion de referencias en el puerto 5555
'''
from zmq import Context, PUB
from msgpack import packb
from ipywidgets import interact
context = Context()
socket = context.socket(PUB)
socket.bind("tcp://*:" + puerto_zmq)
def mandar_mensaje(q1=0, q2=0, q3=0):
socket.send(packb([q1, q2, q3]))
print("Iniciando estacion de referencias en el puerto " + puerto_zmq)
interact(mandar_mensaje, q1=(-180.0, 180.0), q2=(-180.0, 180.0), q3=(-180.0, 180.0));
def estacion_1gdl(puerto_zmq = "5555"):
'''
Esta función crea un socket de 0MQ para publicar datos de tres referencias.
>>> from robots.estaciones import estacion_3gdl
>>> estacion_3gdl("5555")
Iniciando estacion de referencias en el puerto 5555
'''
from zmq import Context, PUB
from msgpack import packb
from ipywidgets import interact
context = Context()
socket = context.socket(PUB)
socket.bind("tcp://*:" + puerto_zmq)
def mandar_mensaje(q1=0):
socket.send(packb([q1]))
print("Iniciando estacion de referencias en el puerto " + puerto_zmq)
interact(mandar_mensaje, q1=(-180.0, 180.0));
def gen_sen_3gdl(puerto_zmq, gen1=True, gen2=True, gen3=True):
from zmq import Context, PUB
from msgpack import packb
from matplotlib.pyplot import figure
from time import time, sleep
from numpy import sin, pi
context = Context()
socket = context.socket(PUB)
socket.bind("tcp://*:" + puerto_zmq)
def mandar_mensaje(señal, g1, g2, g3):
socket.send(packb([señal if gen else 0 for gen in [g1, g2, g3]]))
fig = figure(figsize=(6,3))
ax = fig.gca()
t0 = time()
ts = []
ys = []
while True:
try:
t = time()-t0
if t >= 0.005:
y = 30*sin(pi*t)
mandar_mensaje(y, gen1, gen2, gen3)
ts.append(t)
ys.append(y)
ax.clear()
if len(ys) > 100:
ax.plot(ts[-100:], ys[-100:])
else:
ax.plot(ts, ys)
fig.canvas.draw()
except KeyboardInterrupt:
break | mit |
IssamLaradji/scikit-learn | sklearn/datasets/mlcomp.py | 41 | 3803 | # Copyright (c) 2010 Olivier Grisel <[email protected]>
# License: BSD 3 clause
"""Glue code to load http://mlcomp.org data as a scikit.learn dataset"""
import os
import numbers
from sklearn.datasets.base import load_files
def _load_document_classification(dataset_path, metadata, set_=None, **kwargs):
if set_ is not None:
dataset_path = os.path.join(dataset_path, set_)
return load_files(dataset_path, metadata.get('description'), **kwargs)
LOADERS = {
'DocumentClassification': _load_document_classification,
# TODO: implement the remaining domain formats
}
def load_mlcomp(name_or_id, set_="raw", mlcomp_root=None, **kwargs):
"""Load a datasets as downloaded from http://mlcomp.org
Parameters
----------
name_or_id : the integer id or the string name metadata of the MLComp
dataset to load
set_ : select the portion to load: 'train', 'test' or 'raw'
mlcomp_root : the filesystem path to the root folder where MLComp datasets
are stored, if mlcomp_root is None, the MLCOMP_DATASETS_HOME
environment variable is looked up instead.
**kwargs : domain specific kwargs to be passed to the dataset loader.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'filenames', the files holding the raw to learn, 'target', the
classification labels (integer index), 'target_names',
the meaning of the labels, and 'DESCR', the full description of the
dataset.
Note on the lookup process: depending on the type of name_or_id,
will choose between integer id lookup or metadata name lookup by
looking at the unzipped archives and metadata file.
TODO: implement zip dataset loading too
"""
if mlcomp_root is None:
try:
mlcomp_root = os.environ['MLCOMP_DATASETS_HOME']
except KeyError:
raise ValueError("MLCOMP_DATASETS_HOME env variable is undefined")
mlcomp_root = os.path.expanduser(mlcomp_root)
mlcomp_root = os.path.abspath(mlcomp_root)
mlcomp_root = os.path.normpath(mlcomp_root)
if not os.path.exists(mlcomp_root):
raise ValueError("Could not find folder: " + mlcomp_root)
# dataset lookup
if isinstance(name_or_id, numbers.Integral):
# id lookup
dataset_path = os.path.join(mlcomp_root, str(name_or_id))
else:
# assume name based lookup
dataset_path = None
expected_name_line = "name: " + name_or_id
for dataset in os.listdir(mlcomp_root):
metadata_file = os.path.join(mlcomp_root, dataset, 'metadata')
if not os.path.exists(metadata_file):
continue
with open(metadata_file) as f:
for line in f:
if line.strip() == expected_name_line:
dataset_path = os.path.join(mlcomp_root, dataset)
break
if dataset_path is None:
raise ValueError("Could not find dataset with metadata line: " +
expected_name_line)
# loading the dataset metadata
metadata = dict()
metadata_file = os.path.join(dataset_path, 'metadata')
if not os.path.exists(metadata_file):
raise ValueError(dataset_path + ' is not a valid MLComp dataset')
with open(metadata_file) as f:
for line in f:
if ":" in line:
key, value = line.split(":", 1)
metadata[key.strip()] = value.strip()
format = metadata.get('format', 'unknow')
loader = LOADERS.get(format)
if loader is None:
raise ValueError("No loader implemented for format: " + format)
return loader(dataset_path, metadata, set_=set_, **kwargs)
| bsd-3-clause |
michigraber/scikit-learn | examples/linear_model/plot_ols_3d.py | 350 | 2040 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Sparsity Example: Fitting only features 1 and 2
=========================================================
Features 1 and 2 of the diabetes-dataset are fitted and
plotted below. It illustrates that although feature 2
has a strong coefficient on the full model, it does not
give us much regarding `y` when compared to just feature 1
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets, linear_model
diabetes = datasets.load_diabetes()
indices = (0, 1)
X_train = diabetes.data[:-20, indices]
X_test = diabetes.data[-20:, indices]
y_train = diabetes.target[:-20]
y_test = diabetes.target[-20:]
ols = linear_model.LinearRegression()
ols.fit(X_train, y_train)
###############################################################################
# Plot the figure
def plot_figs(fig_num, elev, azim, X_train, clf):
fig = plt.figure(fig_num, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, elev=elev, azim=azim)
ax.scatter(X_train[:, 0], X_train[:, 1], y_train, c='k', marker='+')
ax.plot_surface(np.array([[-.1, -.1], [.15, .15]]),
np.array([[-.1, .15], [-.1, .15]]),
clf.predict(np.array([[-.1, -.1, .15, .15],
[-.1, .15, -.1, .15]]).T
).reshape((2, 2)),
alpha=.5)
ax.set_xlabel('X_1')
ax.set_ylabel('X_2')
ax.set_zlabel('Y')
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
#Generate the three different figures from different views
elev = 43.5
azim = -110
plot_figs(1, elev, azim, X_train, ols)
elev = -.5
azim = 0
plot_figs(2, elev, azim, X_train, ols)
elev = -.5
azim = 90
plot_figs(3, elev, azim, X_train, ols)
plt.show()
| bsd-3-clause |
lthurlow/Network-Grapher | proj/external/matplotlib-1.2.1/build/lib.linux-i686-2.7/matplotlib/axes.py | 2 | 324939 | from __future__ import division, print_function
import math, sys, warnings, datetime
from operator import itemgetter
import itertools
import numpy as np
from numpy import ma
import matplotlib
rcParams = matplotlib.rcParams
import matplotlib.artist as martist
from matplotlib.artist import allow_rasterization
import matplotlib.axis as maxis
import matplotlib.cbook as cbook
import matplotlib.collections as mcoll
import matplotlib.colors as mcolors
import matplotlib.contour as mcontour
import matplotlib.dates as _ # <-registers a date unit converter
from matplotlib import docstring
import matplotlib.font_manager as font_manager
import matplotlib.image as mimage
import matplotlib.legend as mlegend
import matplotlib.lines as mlines
import matplotlib.markers as mmarkers
import matplotlib.mlab as mlab
import matplotlib.path as mpath
import matplotlib.patches as mpatches
import matplotlib.spines as mspines
import matplotlib.quiver as mquiver
import matplotlib.scale as mscale
import matplotlib.stackplot as mstack
import matplotlib.streamplot as mstream
import matplotlib.table as mtable
import matplotlib.text as mtext
import matplotlib.ticker as mticker
import matplotlib.transforms as mtransforms
import matplotlib.tri as mtri
from matplotlib import MatplotlibDeprecationWarning as mplDeprecation
from matplotlib.container import BarContainer, ErrorbarContainer, StemContainer
iterable = cbook.iterable
is_string_like = cbook.is_string_like
is_sequence_of_strings = cbook.is_sequence_of_strings
def _string_to_bool(s):
if not is_string_like(s):
return s
if s == 'on':
return True
if s == 'off':
return False
raise ValueError("string argument must be either 'on' or 'off'")
def _process_plot_format(fmt):
"""
Process a MATLAB style color/line style format string. Return a
(*linestyle*, *color*) tuple as a result of the processing. Default
values are ('-', 'b'). Example format strings include:
* 'ko': black circles
* '.b': blue dots
* 'r--': red dashed lines
.. seealso::
:func:`~matplotlib.Line2D.lineStyles` and
:func:`~matplotlib.pyplot.colors`
for all possible styles and color format string.
"""
linestyle = None
marker = None
color = None
# Is fmt just a colorspec?
try:
color = mcolors.colorConverter.to_rgb(fmt)
# We need to differentiate grayscale '1.0' from tri_down marker '1'
try:
fmtint = str(int(fmt))
except ValueError:
return linestyle, marker, color # Yes
else:
if fmt != fmtint:
# user definitely doesn't want tri_down marker
return linestyle, marker, color # Yes
else:
# ignore converted color
color = None
except ValueError:
pass # No, not just a color.
# handle the multi char special cases and strip them from the
# string
if fmt.find('--')>=0:
linestyle = '--'
fmt = fmt.replace('--', '')
if fmt.find('-.')>=0:
linestyle = '-.'
fmt = fmt.replace('-.', '')
if fmt.find(' ')>=0:
linestyle = 'None'
fmt = fmt.replace(' ', '')
chars = [c for c in fmt]
for c in chars:
if c in mlines.lineStyles:
if linestyle is not None:
raise ValueError(
'Illegal format string "%s"; two linestyle symbols' % fmt)
linestyle = c
elif c in mlines.lineMarkers:
if marker is not None:
raise ValueError(
'Illegal format string "%s"; two marker symbols' % fmt)
marker = c
elif c in mcolors.colorConverter.colors:
if color is not None:
raise ValueError(
'Illegal format string "%s"; two color symbols' % fmt)
color = c
else:
raise ValueError(
'Unrecognized character %c in format string' % c)
if linestyle is None and marker is None:
linestyle = rcParams['lines.linestyle']
if linestyle is None:
linestyle = 'None'
if marker is None:
marker = 'None'
return linestyle, marker, color
def set_default_color_cycle(clist):
"""
Change the default cycle of colors that will be used by the plot
command. This must be called before creating the
:class:`Axes` to which it will apply; it will
apply to all future axes.
*clist* is a sequence of mpl color specifiers.
See also: :meth:`~matplotlib.axes.Axes.set_color_cycle`.
.. Note:: Deprecated 2010/01/03.
Set rcParams['axes.color_cycle'] directly.
"""
rcParams['axes.color_cycle'] = clist
warnings.warn("Set rcParams['axes.color_cycle'] directly", mplDeprecation)
class _process_plot_var_args(object):
"""
Process variable length arguments to the plot command, so that
plot commands like the following are supported::
plot(t, s)
plot(t1, s1, t2, s2)
plot(t1, s1, 'ko', t2, s2)
plot(t1, s1, 'ko', t2, s2, 'r--', t3, e3)
an arbitrary number of *x*, *y*, *fmt* are allowed
"""
def __init__(self, axes, command='plot'):
self.axes = axes
self.command = command
self.set_color_cycle()
def __getstate__(self):
# note: it is not possible to pickle a itertools.cycle instance
return {'axes': self.axes, 'command': self.command}
def __setstate__(self, state):
self.__dict__ = state.copy()
self.set_color_cycle()
def set_color_cycle(self, clist=None):
if clist is None:
clist = rcParams['axes.color_cycle']
self.color_cycle = itertools.cycle(clist)
def __call__(self, *args, **kwargs):
if self.axes.xaxis is not None and self.axes.yaxis is not None:
xunits = kwargs.pop( 'xunits', self.axes.xaxis.units)
if self.axes.name == 'polar':
xunits = kwargs.pop( 'thetaunits', xunits )
yunits = kwargs.pop( 'yunits', self.axes.yaxis.units)
if self.axes.name == 'polar':
yunits = kwargs.pop( 'runits', yunits )
if xunits!=self.axes.xaxis.units:
self.axes.xaxis.set_units(xunits)
if yunits!=self.axes.yaxis.units:
self.axes.yaxis.set_units(yunits)
ret = self._grab_next_args(*args, **kwargs)
return ret
def set_lineprops(self, line, **kwargs):
assert self.command == 'plot', 'set_lineprops only works with "plot"'
for key, val in kwargs.items():
funcName = "set_%s"%key
if not hasattr(line,funcName):
raise TypeError('There is no line property "%s"'%key)
func = getattr(line,funcName)
func(val)
def set_patchprops(self, fill_poly, **kwargs):
assert self.command == 'fill', 'set_patchprops only works with "fill"'
for key, val in kwargs.items():
funcName = "set_%s"%key
if not hasattr(fill_poly,funcName):
raise TypeError('There is no patch property "%s"'%key)
func = getattr(fill_poly,funcName)
func(val)
def _xy_from_xy(self, x, y):
if self.axes.xaxis is not None and self.axes.yaxis is not None:
bx = self.axes.xaxis.update_units(x)
by = self.axes.yaxis.update_units(y)
if self.command!='plot':
# the Line2D class can handle unitized data, with
# support for post hoc unit changes etc. Other mpl
# artists, eg Polygon which _process_plot_var_args
# also serves on calls to fill, cannot. So this is a
# hack to say: if you are not "plot", which is
# creating Line2D, then convert the data now to
# floats. If you are plot, pass the raw data through
# to Line2D which will handle the conversion. So
# polygons will not support post hoc conversions of
# the unit type since they are not storing the orig
# data. Hopefully we can rationalize this at a later
# date - JDH
if bx:
x = self.axes.convert_xunits(x)
if by:
y = self.axes.convert_yunits(y)
x = np.atleast_1d(x) #like asanyarray, but converts scalar to array
y = np.atleast_1d(y)
if x.shape[0] != y.shape[0]:
raise ValueError("x and y must have same first dimension")
if x.ndim > 2 or y.ndim > 2:
raise ValueError("x and y can be no greater than 2-D")
if x.ndim == 1:
x = x[:,np.newaxis]
if y.ndim == 1:
y = y[:,np.newaxis]
return x, y
def _makeline(self, x, y, kw, kwargs):
kw = kw.copy() # Don't modify the original kw.
if not 'color' in kw and not 'color' in kwargs.keys():
kw['color'] = self.color_cycle.next()
# (can't use setdefault because it always evaluates
# its second argument)
seg = mlines.Line2D(x, y,
axes=self.axes,
**kw
)
self.set_lineprops(seg, **kwargs)
return seg
def _makefill(self, x, y, kw, kwargs):
try:
facecolor = kw['color']
except KeyError:
facecolor = self.color_cycle.next()
seg = mpatches.Polygon(np.hstack(
(x[:,np.newaxis],y[:,np.newaxis])),
facecolor = facecolor,
fill=True,
closed=kw['closed']
)
self.set_patchprops(seg, **kwargs)
return seg
def _plot_args(self, tup, kwargs):
ret = []
if len(tup) > 1 and is_string_like(tup[-1]):
linestyle, marker, color = _process_plot_format(tup[-1])
tup = tup[:-1]
elif len(tup) == 3:
raise ValueError('third arg must be a format string')
else:
linestyle, marker, color = None, None, None
kw = {}
for k, v in zip(('linestyle', 'marker', 'color'),
(linestyle, marker, color)):
if v is not None:
kw[k] = v
y = np.atleast_1d(tup[-1])
if len(tup) == 2:
x = np.atleast_1d(tup[0])
else:
x = np.arange(y.shape[0], dtype=float)
x, y = self._xy_from_xy(x, y)
if self.command == 'plot':
func = self._makeline
else:
kw['closed'] = kwargs.get('closed', True)
func = self._makefill
ncx, ncy = x.shape[1], y.shape[1]
for j in xrange(max(ncx, ncy)):
seg = func(x[:,j%ncx], y[:,j%ncy], kw, kwargs)
ret.append(seg)
return ret
def _grab_next_args(self, *args, **kwargs):
remaining = args
while 1:
if len(remaining)==0:
return
if len(remaining) <= 3:
for seg in self._plot_args(remaining, kwargs):
yield seg
return
if is_string_like(remaining[2]):
isplit = 3
else:
isplit = 2
for seg in self._plot_args(remaining[:isplit], kwargs):
yield seg
remaining=remaining[isplit:]
class Axes(martist.Artist):
"""
The :class:`Axes` contains most of the figure elements:
:class:`~matplotlib.axis.Axis`, :class:`~matplotlib.axis.Tick`,
:class:`~matplotlib.lines.Line2D`, :class:`~matplotlib.text.Text`,
:class:`~matplotlib.patches.Polygon`, etc., and sets the
coordinate system.
The :class:`Axes` instance supports callbacks through a callbacks
attribute which is a :class:`~matplotlib.cbook.CallbackRegistry`
instance. The events you can connect to are 'xlim_changed' and
'ylim_changed' and the callback will be called with func(*ax*)
where *ax* is the :class:`Axes` instance.
"""
name = "rectilinear"
_shared_x_axes = cbook.Grouper()
_shared_y_axes = cbook.Grouper()
def __str__(self):
return "Axes(%g,%g;%gx%g)" % tuple(self._position.bounds)
def __init__(self, fig, rect,
axisbg = None, # defaults to rc axes.facecolor
frameon = True,
sharex=None, # use Axes instance's xaxis info
sharey=None, # use Axes instance's yaxis info
label='',
xscale=None,
yscale=None,
**kwargs
):
"""
Build an :class:`Axes` instance in
:class:`~matplotlib.figure.Figure` *fig* with
*rect=[left, bottom, width, height]* in
:class:`~matplotlib.figure.Figure` coordinates
Optional keyword arguments:
================ =========================================
Keyword Description
================ =========================================
*adjustable* [ 'box' | 'datalim' | 'box-forced']
*alpha* float: the alpha transparency (can be None)
*anchor* [ 'C', 'SW', 'S', 'SE', 'E', 'NE', 'N',
'NW', 'W' ]
*aspect* [ 'auto' | 'equal' | aspect_ratio ]
*autoscale_on* [ *True* | *False* ] whether or not to
autoscale the *viewlim*
*axis_bgcolor* any matplotlib color, see
:func:`~matplotlib.pyplot.colors`
*axisbelow* draw the grids and ticks below the other
artists
*cursor_props* a (*float*, *color*) tuple
*figure* a :class:`~matplotlib.figure.Figure`
instance
*frame_on* a boolean - draw the axes frame
*label* the axes label
*navigate* [ *True* | *False* ]
*navigate_mode* [ 'PAN' | 'ZOOM' | None ] the navigation
toolbar button status
*position* [left, bottom, width, height] in
class:`~matplotlib.figure.Figure` coords
*sharex* an class:`~matplotlib.axes.Axes` instance
to share the x-axis with
*sharey* an class:`~matplotlib.axes.Axes` instance
to share the y-axis with
*title* the title string
*visible* [ *True* | *False* ] whether the axes is
visible
*xlabel* the xlabel
*xlim* (*xmin*, *xmax*) view limits
*xscale* [%(scale)s]
*xticklabels* sequence of strings
*xticks* sequence of floats
*ylabel* the ylabel strings
*ylim* (*ymin*, *ymax*) view limits
*yscale* [%(scale)s]
*yticklabels* sequence of strings
*yticks* sequence of floats
================ =========================================
""" % {'scale': ' | '.join([repr(x) for x in mscale.get_scale_names()])}
martist.Artist.__init__(self)
if isinstance(rect, mtransforms.Bbox):
self._position = rect
else:
self._position = mtransforms.Bbox.from_bounds(*rect)
self._originalPosition = self._position.frozen()
self.set_axes(self)
self.set_aspect('auto')
self._adjustable = 'box'
self.set_anchor('C')
self._sharex = sharex
self._sharey = sharey
if sharex is not None:
self._shared_x_axes.join(self, sharex)
if sharex._adjustable == 'box':
sharex._adjustable = 'datalim'
#warnings.warn(
# 'shared axes: "adjustable" is being changed to "datalim"')
self._adjustable = 'datalim'
if sharey is not None:
self._shared_y_axes.join(self, sharey)
if sharey._adjustable == 'box':
sharey._adjustable = 'datalim'
#warnings.warn(
# 'shared axes: "adjustable" is being changed to "datalim"')
self._adjustable = 'datalim'
self.set_label(label)
self.set_figure(fig)
self.set_axes_locator(kwargs.get("axes_locator", None))
self.spines = self._gen_axes_spines()
# this call may differ for non-sep axes, eg polar
self._init_axis()
if axisbg is None: axisbg = rcParams['axes.facecolor']
self._axisbg = axisbg
self._frameon = frameon
self._axisbelow = rcParams['axes.axisbelow']
self._rasterization_zorder = None
self._hold = rcParams['axes.hold']
self._connected = {} # a dict from events to (id, func)
self.cla()
# funcs used to format x and y - fall back on major formatters
self.fmt_xdata = None
self.fmt_ydata = None
self.set_cursor_props((1,'k')) # set the cursor properties for axes
self._cachedRenderer = None
self.set_navigate(True)
self.set_navigate_mode(None)
if xscale:
self.set_xscale(xscale)
if yscale:
self.set_yscale(yscale)
if len(kwargs): martist.setp(self, **kwargs)
if self.xaxis is not None:
self._xcid = self.xaxis.callbacks.connect('units finalize',
self.relim)
if self.yaxis is not None:
self._ycid = self.yaxis.callbacks.connect('units finalize',
self.relim)
def __setstate__(self, state):
self.__dict__ = state
# put the _remove_method back on all artists contained within the axes
for container_name in ['lines', 'collections', 'tables', 'patches',
'texts', 'images']:
container = getattr(self, container_name)
for artist in container:
artist._remove_method = container.remove
def get_window_extent(self, *args, **kwargs):
"""
get the axes bounding box in display space; *args* and
*kwargs* are empty
"""
return self.bbox
def _init_axis(self):
"move this out of __init__ because non-separable axes don't use it"
self.xaxis = maxis.XAxis(self)
self.spines['bottom'].register_axis(self.xaxis)
self.spines['top'].register_axis(self.xaxis)
self.yaxis = maxis.YAxis(self)
self.spines['left'].register_axis(self.yaxis)
self.spines['right'].register_axis(self.yaxis)
self._update_transScale()
def set_figure(self, fig):
"""
Set the class:`~matplotlib.axes.Axes` figure
accepts a class:`~matplotlib.figure.Figure` instance
"""
martist.Artist.set_figure(self, fig)
self.bbox = mtransforms.TransformedBbox(self._position, fig.transFigure)
#these will be updated later as data is added
self.dataLim = mtransforms.Bbox.unit()
self.viewLim = mtransforms.Bbox.unit()
self.transScale = mtransforms.TransformWrapper(
mtransforms.IdentityTransform())
self._set_lim_and_transforms()
def _set_lim_and_transforms(self):
"""
set the *dataLim* and *viewLim*
:class:`~matplotlib.transforms.Bbox` attributes and the
*transScale*, *transData*, *transLimits* and *transAxes*
transformations.
.. note::
This method is primarily used by rectilinear projections
of the :class:`~matplotlib.axes.Axes` class, and is meant
to be overridden by new kinds of projection axes that need
different transformations and limits. (See
:class:`~matplotlib.projections.polar.PolarAxes` for an
example.
"""
self.transAxes = mtransforms.BboxTransformTo(self.bbox)
# Transforms the x and y axis separately by a scale factor.
# It is assumed that this part will have non-linear components
# (e.g. for a log scale).
self.transScale = mtransforms.TransformWrapper(
mtransforms.IdentityTransform())
# An affine transformation on the data, generally to limit the
# range of the axes
self.transLimits = mtransforms.BboxTransformFrom(
mtransforms.TransformedBbox(self.viewLim, self.transScale))
# The parentheses are important for efficiency here -- they
# group the last two (which are usually affines) separately
# from the first (which, with log-scaling can be non-affine).
self.transData = self.transScale + (self.transLimits + self.transAxes)
self._xaxis_transform = mtransforms.blended_transform_factory(
self.transData, self.transAxes)
self._yaxis_transform = mtransforms.blended_transform_factory(
self.transAxes, self.transData)
def get_xaxis_transform(self,which='grid'):
"""
Get the transformation used for drawing x-axis labels, ticks
and gridlines. The x-direction is in data coordinates and the
y-direction is in axis coordinates.
.. note::
This transformation is primarily used by the
:class:`~matplotlib.axis.Axis` class, and is meant to be
overridden by new kinds of projections that may need to
place axis elements in different locations.
"""
if which=='grid':
return self._xaxis_transform
elif which=='tick1':
# for cartesian projection, this is bottom spine
return self.spines['bottom'].get_spine_transform()
elif which=='tick2':
# for cartesian projection, this is top spine
return self.spines['top'].get_spine_transform()
else:
raise ValueError('unknown value for which')
def get_xaxis_text1_transform(self, pad_points):
"""
Get the transformation used for drawing x-axis labels, which
will add the given amount of padding (in points) between the
axes and the label. The x-direction is in data coordinates
and the y-direction is in axis coordinates. Returns a
3-tuple of the form::
(transform, valign, halign)
where *valign* and *halign* are requested alignments for the
text.
.. note::
This transformation is primarily used by the
:class:`~matplotlib.axis.Axis` class, and is meant to be
overridden by new kinds of projections that may need to
place axis elements in different locations.
"""
return (self.get_xaxis_transform(which='tick1') +
mtransforms.ScaledTranslation(0, -1 * pad_points / 72.0,
self.figure.dpi_scale_trans),
"top", "center")
def get_xaxis_text2_transform(self, pad_points):
"""
Get the transformation used for drawing the secondary x-axis
labels, which will add the given amount of padding (in points)
between the axes and the label. The x-direction is in data
coordinates and the y-direction is in axis coordinates.
Returns a 3-tuple of the form::
(transform, valign, halign)
where *valign* and *halign* are requested alignments for the
text.
.. note::
This transformation is primarily used by the
:class:`~matplotlib.axis.Axis` class, and is meant to be
overridden by new kinds of projections that may need to
place axis elements in different locations.
"""
return (self.get_xaxis_transform(which='tick2') +
mtransforms.ScaledTranslation(0, pad_points / 72.0,
self.figure.dpi_scale_trans),
"bottom", "center")
def get_yaxis_transform(self,which='grid'):
"""
Get the transformation used for drawing y-axis labels, ticks
and gridlines. The x-direction is in axis coordinates and the
y-direction is in data coordinates.
.. note::
This transformation is primarily used by the
:class:`~matplotlib.axis.Axis` class, and is meant to be
overridden by new kinds of projections that may need to
place axis elements in different locations.
"""
if which=='grid':
return self._yaxis_transform
elif which=='tick1':
# for cartesian projection, this is bottom spine
return self.spines['left'].get_spine_transform()
elif which=='tick2':
# for cartesian projection, this is top spine
return self.spines['right'].get_spine_transform()
else:
raise ValueError('unknown value for which')
def get_yaxis_text1_transform(self, pad_points):
"""
Get the transformation used for drawing y-axis labels, which
will add the given amount of padding (in points) between the
axes and the label. The x-direction is in axis coordinates
and the y-direction is in data coordinates. Returns a 3-tuple
of the form::
(transform, valign, halign)
where *valign* and *halign* are requested alignments for the
text.
.. note::
This transformation is primarily used by the
:class:`~matplotlib.axis.Axis` class, and is meant to be
overridden by new kinds of projections that may need to
place axis elements in different locations.
"""
return (self.get_yaxis_transform(which='tick1') +
mtransforms.ScaledTranslation(-1 * pad_points / 72.0, 0,
self.figure.dpi_scale_trans),
"center", "right")
def get_yaxis_text2_transform(self, pad_points):
"""
Get the transformation used for drawing the secondary y-axis
labels, which will add the given amount of padding (in points)
between the axes and the label. The x-direction is in axis
coordinates and the y-direction is in data coordinates.
Returns a 3-tuple of the form::
(transform, valign, halign)
where *valign* and *halign* are requested alignments for the
text.
.. note::
This transformation is primarily used by the
:class:`~matplotlib.axis.Axis` class, and is meant to be
overridden by new kinds of projections that may need to
place axis elements in different locations.
"""
return (self.get_yaxis_transform(which='tick2') +
mtransforms.ScaledTranslation(pad_points / 72.0, 0,
self.figure.dpi_scale_trans),
"center", "left")
def _update_transScale(self):
self.transScale.set(
mtransforms.blended_transform_factory(
self.xaxis.get_transform(), self.yaxis.get_transform()))
if hasattr(self, "lines"):
for line in self.lines:
try:
line._transformed_path.invalidate()
except AttributeError:
pass
def get_position(self, original=False):
'Return the a copy of the axes rectangle as a Bbox'
if original:
return self._originalPosition.frozen()
else:
return self._position.frozen()
def set_position(self, pos, which='both'):
"""
Set the axes position with::
pos = [left, bottom, width, height]
in relative 0,1 coords, or *pos* can be a
:class:`~matplotlib.transforms.Bbox`
There are two position variables: one which is ultimately
used, but which may be modified by :meth:`apply_aspect`, and a
second which is the starting point for :meth:`apply_aspect`.
Optional keyword arguments:
*which*
========== ====================
value description
========== ====================
'active' to change the first
'original' to change the second
'both' to change both
========== ====================
"""
if not isinstance(pos, mtransforms.BboxBase):
pos = mtransforms.Bbox.from_bounds(*pos)
if which in ('both', 'active'):
self._position.set(pos)
if which in ('both', 'original'):
self._originalPosition.set(pos)
def reset_position(self):
"""Make the original position the active position"""
pos = self.get_position(original=True)
self.set_position(pos, which='active')
def set_axes_locator(self, locator):
"""
set axes_locator
ACCEPT : a callable object which takes an axes instance and renderer and
returns a bbox.
"""
self._axes_locator = locator
def get_axes_locator(self):
"""
return axes_locator
"""
return self._axes_locator
def _set_artist_props(self, a):
"""set the boilerplate props for artists added to axes"""
a.set_figure(self.figure)
if not a.is_transform_set():
a.set_transform(self.transData)
a.set_axes(self)
def _gen_axes_patch(self):
"""
Returns the patch used to draw the background of the axes. It
is also used as the clipping path for any data elements on the
axes.
In the standard axes, this is a rectangle, but in other
projections it may not be.
.. note::
Intended to be overridden by new projection types.
"""
return mpatches.Rectangle((0.0, 0.0), 1.0, 1.0)
def _gen_axes_spines(self, locations=None, offset=0.0, units='inches'):
"""
Returns a dict whose keys are spine names and values are
Line2D or Patch instances. Each element is used to draw a
spine of the axes.
In the standard axes, this is a single line segment, but in
other projections it may not be.
.. note::
Intended to be overridden by new projection types.
"""
return {
'left':mspines.Spine.linear_spine(self,'left'),
'right':mspines.Spine.linear_spine(self,'right'),
'bottom':mspines.Spine.linear_spine(self,'bottom'),
'top':mspines.Spine.linear_spine(self,'top'),
}
def cla(self):
"""Clear the current axes."""
# Note: this is called by Axes.__init__()
self.xaxis.cla()
self.yaxis.cla()
for name,spine in self.spines.iteritems():
spine.cla()
self.ignore_existing_data_limits = True
self.callbacks = cbook.CallbackRegistry()
if self._sharex is not None:
# major and minor are class instances with
# locator and formatter attributes
self.xaxis.major = self._sharex.xaxis.major
self.xaxis.minor = self._sharex.xaxis.minor
x0, x1 = self._sharex.get_xlim()
self.set_xlim(x0, x1, emit=False, auto=None)
# Save the current formatter/locator so we don't lose it
majf = self._sharex.xaxis.get_major_formatter()
minf = self._sharex.xaxis.get_minor_formatter()
majl = self._sharex.xaxis.get_major_locator()
minl = self._sharex.xaxis.get_minor_locator()
# This overwrites the current formatter/locator
self.xaxis.set_scale(self._sharex.xaxis.get_scale())
# Reset the formatter/locator
self.xaxis.set_major_formatter(majf)
self.xaxis.set_minor_formatter(minf)
self.xaxis.set_major_locator(majl)
self.xaxis.set_minor_locator(minl)
else:
self.xaxis.set_scale('linear')
if self._sharey is not None:
self.yaxis.major = self._sharey.yaxis.major
self.yaxis.minor = self._sharey.yaxis.minor
y0, y1 = self._sharey.get_ylim()
self.set_ylim(y0, y1, emit=False, auto=None)
# Save the current formatter/locator so we don't lose it
majf = self._sharey.yaxis.get_major_formatter()
minf = self._sharey.yaxis.get_minor_formatter()
majl = self._sharey.yaxis.get_major_locator()
minl = self._sharey.yaxis.get_minor_locator()
# This overwrites the current formatter/locator
self.yaxis.set_scale(self._sharey.yaxis.get_scale())
# Reset the formatter/locator
self.yaxis.set_major_formatter(majf)
self.yaxis.set_minor_formatter(minf)
self.yaxis.set_major_locator(majl)
self.yaxis.set_minor_locator(minl)
else:
self.yaxis.set_scale('linear')
self._autoscaleXon = True
self._autoscaleYon = True
self._xmargin = 0
self._ymargin = 0
self._tight = False
self._update_transScale() # needed?
self._get_lines = _process_plot_var_args(self)
self._get_patches_for_fill = _process_plot_var_args(self, 'fill')
self._gridOn = rcParams['axes.grid']
self.lines = []
self.patches = []
self.texts = []
self.tables = []
self.artists = []
self.images = []
self._current_image = None # strictly for pyplot via _sci, _gci
self.legend_ = None
self.collections = [] # collection.Collection instances
self.containers = [] #
self.grid(self._gridOn)
props = font_manager.FontProperties(size=rcParams['axes.titlesize'])
self.titleOffsetTrans = mtransforms.ScaledTranslation(
0.0, 5.0 / 72.0, self.figure.dpi_scale_trans)
self.title = mtext.Text(
x=0.5, y=1.0, text='',
fontproperties=props,
verticalalignment='baseline',
horizontalalignment='center',
)
self.title.set_transform(self.transAxes + self.titleOffsetTrans)
self.title.set_clip_box(None)
self._set_artist_props(self.title)
# the patch draws the background of the axes. we want this to
# be below the other artists; the axesPatch name is
# deprecated. We use the frame to draw the edges so we are
# setting the edgecolor to None
self.patch = self.axesPatch = self._gen_axes_patch()
self.patch.set_figure(self.figure)
self.patch.set_facecolor(self._axisbg)
self.patch.set_edgecolor('None')
self.patch.set_linewidth(0)
self.patch.set_transform(self.transAxes)
self.axison = True
self.xaxis.set_clip_path(self.patch)
self.yaxis.set_clip_path(self.patch)
self._shared_x_axes.clean()
self._shared_y_axes.clean()
def get_frame(self):
raise AttributeError('Axes.frame was removed in favor of Axes.spines')
frame = property(get_frame)
def clear(self):
"""clear the axes"""
self.cla()
def set_color_cycle(self, clist):
"""
Set the color cycle for any future plot commands on this Axes.
*clist* is a list of mpl color specifiers.
"""
self._get_lines.set_color_cycle(clist)
self._get_patches_for_fill.set_color_cycle(clist)
def ishold(self):
"""return the HOLD status of the axes"""
return self._hold
def hold(self, b=None):
"""
Call signature::
hold(b=None)
Set the hold state. If *hold* is *None* (default), toggle the
*hold* state. Else set the *hold* state to boolean value *b*.
Examples::
# toggle hold
hold()
# turn hold on
hold(True)
# turn hold off
hold(False)
When hold is *True*, subsequent plot commands will be added to
the current axes. When hold is *False*, the current axes and
figure will be cleared on the next plot command
"""
if b is None:
self._hold = not self._hold
else:
self._hold = b
def get_aspect(self):
return self._aspect
def set_aspect(self, aspect, adjustable=None, anchor=None):
"""
*aspect*
======== ================================================
value description
======== ================================================
'auto' automatic; fill position rectangle with data
'normal' same as 'auto'; deprecated
'equal' same scaling from data to plot units for x and y
num a circle will be stretched such that the height
is num times the width. aspect=1 is the same as
aspect='equal'.
======== ================================================
*adjustable*
============ =====================================
value description
============ =====================================
'box' change physical size of axes
'datalim' change xlim or ylim
'box-forced' same as 'box', but axes can be shared
============ =====================================
'box' does not allow axes sharing, as this can cause
unintended side effect. For cases when sharing axes is
fine, use 'box-forced'.
*anchor*
===== =====================
value description
===== =====================
'C' centered
'SW' lower left corner
'S' middle of bottom edge
'SE' lower right corner
etc.
===== =====================
"""
if aspect in ('normal', 'auto'):
self._aspect = 'auto'
elif aspect == 'equal':
self._aspect = 'equal'
else:
self._aspect = float(aspect) # raise ValueError if necessary
if adjustable is not None:
self.set_adjustable(adjustable)
if anchor is not None:
self.set_anchor(anchor)
def get_adjustable(self):
return self._adjustable
def set_adjustable(self, adjustable):
"""
ACCEPTS: [ 'box' | 'datalim' | 'box-forced']
"""
if adjustable in ('box', 'datalim', 'box-forced'):
if self in self._shared_x_axes or self in self._shared_y_axes:
if adjustable == 'box':
raise ValueError(
'adjustable must be "datalim" for shared axes')
self._adjustable = adjustable
else:
raise ValueError('argument must be "box", or "datalim"')
def get_anchor(self):
return self._anchor
def set_anchor(self, anchor):
"""
*anchor*
===== ============
value description
===== ============
'C' Center
'SW' bottom left
'S' bottom
'SE' bottom right
'E' right
'NE' top right
'N' top
'NW' top left
'W' left
===== ============
"""
if anchor in mtransforms.Bbox.coefs.keys() or len(anchor) == 2:
self._anchor = anchor
else:
raise ValueError('argument must be among %s' %
', '.join(mtransforms.Bbox.coefs.keys()))
def get_data_ratio(self):
"""
Returns the aspect ratio of the raw data.
This method is intended to be overridden by new projection
types.
"""
xmin,xmax = self.get_xbound()
ymin,ymax = self.get_ybound()
xsize = max(math.fabs(xmax-xmin), 1e-30)
ysize = max(math.fabs(ymax-ymin), 1e-30)
return ysize/xsize
def get_data_ratio_log(self):
"""
Returns the aspect ratio of the raw data in log scale.
Will be used when both axis scales are in log.
"""
xmin,xmax = self.get_xbound()
ymin,ymax = self.get_ybound()
xsize = max(math.fabs(math.log10(xmax)-math.log10(xmin)), 1e-30)
ysize = max(math.fabs(math.log10(ymax)-math.log10(ymin)), 1e-30)
return ysize/xsize
def apply_aspect(self, position=None):
"""
Use :meth:`_aspect` and :meth:`_adjustable` to modify the
axes box or the view limits.
"""
if position is None:
position = self.get_position(original=True)
aspect = self.get_aspect()
if self.name != 'polar':
xscale, yscale = self.get_xscale(), self.get_yscale()
if xscale == "linear" and yscale == "linear":
aspect_scale_mode = "linear"
elif xscale == "log" and yscale == "log":
aspect_scale_mode = "log"
elif (xscale == "linear" and yscale == "log") or \
(xscale == "log" and yscale == "linear"):
if aspect is not "auto":
warnings.warn(
'aspect is not supported for Axes with xscale=%s, yscale=%s' \
% (xscale, yscale))
aspect = "auto"
else: # some custom projections have their own scales.
pass
else:
aspect_scale_mode = "linear"
if aspect == 'auto':
self.set_position( position , which='active')
return
if aspect == 'equal':
A = 1
else:
A = aspect
#Ensure at drawing time that any Axes involved in axis-sharing
# does not have its position changed.
if self in self._shared_x_axes or self in self._shared_y_axes:
if self._adjustable == 'box':
self._adjustable = 'datalim'
warnings.warn(
'shared axes: "adjustable" is being changed to "datalim"')
figW,figH = self.get_figure().get_size_inches()
fig_aspect = figH/figW
if self._adjustable in ['box', 'box-forced']:
if aspect_scale_mode == "log":
box_aspect = A * self.get_data_ratio_log()
else:
box_aspect = A * self.get_data_ratio()
pb = position.frozen()
pb1 = pb.shrunk_to_aspect(box_aspect, pb, fig_aspect)
self.set_position(pb1.anchored(self.get_anchor(), pb), 'active')
return
# reset active to original in case it had been changed
# by prior use of 'box'
self.set_position(position, which='active')
xmin,xmax = self.get_xbound()
ymin,ymax = self.get_ybound()
if aspect_scale_mode == "log":
xmin, xmax = math.log10(xmin), math.log10(xmax)
ymin, ymax = math.log10(ymin), math.log10(ymax)
xsize = max(math.fabs(xmax-xmin), 1e-30)
ysize = max(math.fabs(ymax-ymin), 1e-30)
l,b,w,h = position.bounds
box_aspect = fig_aspect * (h/w)
data_ratio = box_aspect / A
y_expander = (data_ratio*xsize/ysize - 1.0)
#print 'y_expander', y_expander
# If y_expander > 0, the dy/dx viewLim ratio needs to increase
if abs(y_expander) < 0.005:
#print 'good enough already'
return
if aspect_scale_mode == "log":
dL = self.dataLim
dL_width = math.log10(dL.x1) - math.log10(dL.x0)
dL_height = math.log10(dL.y1) - math.log10(dL.y0)
xr = 1.05 * dL_width
yr = 1.05 * dL_height
else:
dL = self.dataLim
xr = 1.05 * dL.width
yr = 1.05 * dL.height
xmarg = xsize - xr
ymarg = ysize - yr
Ysize = data_ratio * xsize
Xsize = ysize / data_ratio
Xmarg = Xsize - xr
Ymarg = Ysize - yr
xm = 0 # Setting these targets to, e.g., 0.05*xr does not seem to help.
ym = 0
#print 'xmin, xmax, ymin, ymax', xmin, xmax, ymin, ymax
#print 'xsize, Xsize, ysize, Ysize', xsize, Xsize, ysize, Ysize
changex = (self in self._shared_y_axes
and self not in self._shared_x_axes)
changey = (self in self._shared_x_axes
and self not in self._shared_y_axes)
if changex and changey:
warnings.warn("adjustable='datalim' cannot work with shared "
"x and y axes")
return
if changex:
adjust_y = False
else:
#print 'xmarg, ymarg, Xmarg, Ymarg', xmarg, ymarg, Xmarg, Ymarg
if xmarg > xm and ymarg > ym:
adjy = ((Ymarg > 0 and y_expander < 0)
or (Xmarg < 0 and y_expander > 0))
else:
adjy = y_expander > 0
#print 'y_expander, adjy', y_expander, adjy
adjust_y = changey or adjy #(Ymarg > xmarg)
if adjust_y:
yc = 0.5*(ymin+ymax)
y0 = yc - Ysize/2.0
y1 = yc + Ysize/2.0
if aspect_scale_mode == "log":
self.set_ybound((10.**y0, 10.**y1))
else:
self.set_ybound((y0, y1))
#print 'New y0, y1:', y0, y1
#print 'New ysize, ysize/xsize', y1-y0, (y1-y0)/xsize
else:
xc = 0.5*(xmin+xmax)
x0 = xc - Xsize/2.0
x1 = xc + Xsize/2.0
if aspect_scale_mode == "log":
self.set_xbound((10.**x0, 10.**x1))
else:
self.set_xbound((x0, x1))
#print 'New x0, x1:', x0, x1
#print 'New xsize, ysize/xsize', x1-x0, ysize/(x1-x0)
def axis(self, *v, **kwargs):
"""
Convenience method for manipulating the x and y view limits
and the aspect ratio of the plot. For details, see
:func:`~matplotlib.pyplot.axis`.
*kwargs* are passed on to :meth:`set_xlim` and
:meth:`set_ylim`
"""
if len(v) == 0 and len(kwargs) == 0:
xmin, xmax = self.get_xlim()
ymin, ymax = self.get_ylim()
return xmin, xmax, ymin, ymax
if len(v)==1 and is_string_like(v[0]):
s = v[0].lower()
if s=='on': self.set_axis_on()
elif s=='off': self.set_axis_off()
elif s in ('equal', 'tight', 'scaled', 'normal', 'auto', 'image'):
self.set_autoscale_on(True)
self.set_aspect('auto')
self.autoscale_view(tight=False)
# self.apply_aspect()
if s=='equal':
self.set_aspect('equal', adjustable='datalim')
elif s == 'scaled':
self.set_aspect('equal', adjustable='box', anchor='C')
self.set_autoscale_on(False) # Req. by Mark Bakker
elif s=='tight':
self.autoscale_view(tight=True)
self.set_autoscale_on(False)
elif s == 'image':
self.autoscale_view(tight=True)
self.set_autoscale_on(False)
self.set_aspect('equal', adjustable='box', anchor='C')
else:
raise ValueError('Unrecognized string %s to axis; '
'try on or off' % s)
xmin, xmax = self.get_xlim()
ymin, ymax = self.get_ylim()
return xmin, xmax, ymin, ymax
emit = kwargs.get('emit', True)
try:
v[0]
except IndexError:
xmin = kwargs.get('xmin', None)
xmax = kwargs.get('xmax', None)
auto = False # turn off autoscaling, unless...
if xmin is None and xmax is None:
auto = None # leave autoscaling state alone
xmin, xmax = self.set_xlim(xmin, xmax, emit=emit, auto=auto)
ymin = kwargs.get('ymin', None)
ymax = kwargs.get('ymax', None)
auto = False # turn off autoscaling, unless...
if ymin is None and ymax is None:
auto = None # leave autoscaling state alone
ymin, ymax = self.set_ylim(ymin, ymax, emit=emit, auto=auto)
return xmin, xmax, ymin, ymax
v = v[0]
if len(v) != 4:
raise ValueError('v must contain [xmin xmax ymin ymax]')
self.set_xlim([v[0], v[1]], emit=emit, auto=False)
self.set_ylim([v[2], v[3]], emit=emit, auto=False)
return v
def get_child_artists(self):
"""
Return a list of artists the axes contains.
.. deprecated:: 0.98
"""
raise mplDeprecation('Use get_children instead')
def get_frame(self):
"""Return the axes Rectangle frame"""
warnings.warn('use ax.patch instead', mplDeprecation)
return self.patch
def get_legend(self):
"""Return the legend.Legend instance, or None if no legend is defined"""
return self.legend_
def get_images(self):
"""return a list of Axes images contained by the Axes"""
return cbook.silent_list('AxesImage', self.images)
def get_lines(self):
"""Return a list of lines contained by the Axes"""
return cbook.silent_list('Line2D', self.lines)
def get_xaxis(self):
"""Return the XAxis instance"""
return self.xaxis
def get_xgridlines(self):
"""Get the x grid lines as a list of Line2D instances"""
return cbook.silent_list('Line2D xgridline', self.xaxis.get_gridlines())
def get_xticklines(self):
"""Get the xtick lines as a list of Line2D instances"""
return cbook.silent_list('Text xtickline', self.xaxis.get_ticklines())
def get_yaxis(self):
"""Return the YAxis instance"""
return self.yaxis
def get_ygridlines(self):
"""Get the y grid lines as a list of Line2D instances"""
return cbook.silent_list('Line2D ygridline', self.yaxis.get_gridlines())
def get_yticklines(self):
"""Get the ytick lines as a list of Line2D instances"""
return cbook.silent_list('Line2D ytickline', self.yaxis.get_ticklines())
#### Adding and tracking artists
def _sci(self, im):
"""
helper for :func:`~matplotlib.pyplot.sci`;
do not use elsewhere.
"""
if isinstance(im, matplotlib.contour.ContourSet):
if im.collections[0] not in self.collections:
raise ValueError(
"ContourSet must be in current Axes")
elif im not in self.images and im not in self.collections:
raise ValueError(
"Argument must be an image, collection, or ContourSet in this Axes")
self._current_image = im
def _gci(self):
"""
Helper for :func:`~matplotlib.pyplot.gci`;
do not use elsewhere.
"""
return self._current_image
def has_data(self):
"""
Return *True* if any artists have been added to axes.
This should not be used to determine whether the *dataLim*
need to be updated, and may not actually be useful for
anything.
"""
return (
len(self.collections) +
len(self.images) +
len(self.lines) +
len(self.patches))>0
def add_artist(self, a):
"""
Add any :class:`~matplotlib.artist.Artist` to the axes.
Returns the artist.
"""
a.set_axes(self)
self.artists.append(a)
self._set_artist_props(a)
a.set_clip_path(self.patch)
a._remove_method = lambda h: self.artists.remove(h)
return a
def add_collection(self, collection, autolim=True):
"""
Add a :class:`~matplotlib.collections.Collection` instance
to the axes.
Returns the collection.
"""
label = collection.get_label()
if not label:
collection.set_label('_collection%d'%len(self.collections))
self.collections.append(collection)
self._set_artist_props(collection)
if collection.get_clip_path() is None:
collection.set_clip_path(self.patch)
if autolim:
if collection._paths and len(collection._paths):
self.update_datalim(collection.get_datalim(self.transData))
collection._remove_method = lambda h: self.collections.remove(h)
return collection
def add_line(self, line):
"""
Add a :class:`~matplotlib.lines.Line2D` to the list of plot
lines
Returns the line.
"""
self._set_artist_props(line)
if line.get_clip_path() is None:
line.set_clip_path(self.patch)
self._update_line_limits(line)
if not line.get_label():
line.set_label('_line%d' % len(self.lines))
self.lines.append(line)
line._remove_method = lambda h: self.lines.remove(h)
return line
def _update_line_limits(self, line):
"""Figures out the data limit of the given line, updating self.dataLim."""
path = line.get_path()
if path.vertices.size == 0:
return
line_trans = line.get_transform()
if line_trans == self.transData:
data_path = path
elif any(line_trans.contains_branch_seperately(self.transData)):
# identify the transform to go from line's coordinates
# to data coordinates
trans_to_data = line_trans - self.transData
# if transData is affine we can use the cached non-affine component
# of line's path. (since the non-affine part of line_trans is
# entirely encapsulated in trans_to_data).
if self.transData.is_affine:
line_trans_path = line._get_transformed_path()
na_path, _ = line_trans_path.get_transformed_path_and_affine()
data_path = trans_to_data.transform_path_affine(na_path)
else:
data_path = trans_to_data.transform_path(path)
else:
# for backwards compatibility we update the dataLim with the
# coordinate range of the given path, even though the coordinate
# systems are completely different. This may occur in situations
# such as when ax.transAxes is passed through for absolute
# positioning.
data_path = path
if data_path.vertices.size > 0:
updatex, updatey = line_trans.contains_branch_seperately(
self.transData
)
self.dataLim.update_from_path(data_path,
self.ignore_existing_data_limits,
updatex=updatex,
updatey=updatey)
self.ignore_existing_data_limits = False
def add_patch(self, p):
"""
Add a :class:`~matplotlib.patches.Patch` *p* to the list of
axes patches; the clipbox will be set to the Axes clipping
box. If the transform is not set, it will be set to
:attr:`transData`.
Returns the patch.
"""
self._set_artist_props(p)
if p.get_clip_path() is None:
p.set_clip_path(self.patch)
self._update_patch_limits(p)
self.patches.append(p)
p._remove_method = lambda h: self.patches.remove(h)
return p
def _update_patch_limits(self, patch):
"""update the data limits for patch *p*"""
# hist can add zero height Rectangles, which is useful to keep
# the bins, counts and patches lined up, but it throws off log
# scaling. We'll ignore rects with zero height or width in
# the auto-scaling
# cannot check for '==0' since unitized data may not compare to zero
if (isinstance(patch, mpatches.Rectangle) and
((not patch.get_width()) or (not patch.get_height()))):
return
vertices = patch.get_path().vertices
if vertices.size > 0:
xys = patch.get_patch_transform().transform(vertices)
if patch.get_data_transform() != self.transData:
patch_to_data = (patch.get_data_transform() -
self.transData)
xys = patch_to_data.transform(xys)
updatex, updatey = patch.get_transform().\
contains_branch_seperately(self.transData)
self.update_datalim(xys, updatex=updatex,
updatey=updatey)
def add_table(self, tab):
"""
Add a :class:`~matplotlib.tables.Table` instance to the
list of axes tables
Returns the table.
"""
self._set_artist_props(tab)
self.tables.append(tab)
tab.set_clip_path(self.patch)
tab._remove_method = lambda h: self.tables.remove(h)
return tab
def add_container(self, container):
"""
Add a :class:`~matplotlib.container.Container` instance
to the axes.
Returns the collection.
"""
label = container.get_label()
if not label:
container.set_label('_container%d'%len(self.containers))
self.containers.append(container)
container.set_remove_method(lambda h: self.containers.remove(h))
return container
def relim(self):
"""
Recompute the data limits based on current artists.
At present, :class:`~matplotlib.collections.Collection`
instances are not supported.
"""
# Collections are deliberately not supported (yet); see
# the TODO note in artists.py.
self.dataLim.ignore(True)
self.ignore_existing_data_limits = True
for line in self.lines:
self._update_line_limits(line)
for p in self.patches:
self._update_patch_limits(p)
def update_datalim(self, xys, updatex=True, updatey=True):
"""Update the data lim bbox with seq of xy tups or equiv. 2-D array"""
# if no data is set currently, the bbox will ignore its
# limits and set the bound to be the bounds of the xydata.
# Otherwise, it will compute the bounds of it's current data
# and the data in xydata
if iterable(xys) and not len(xys): return
if not ma.isMaskedArray(xys):
xys = np.asarray(xys)
self.dataLim.update_from_data_xy(xys, self.ignore_existing_data_limits,
updatex=updatex, updatey=updatey)
self.ignore_existing_data_limits = False
def update_datalim_numerix(self, x, y):
"""Update the data lim bbox with seq of xy tups"""
# if no data is set currently, the bbox will ignore it's
# limits and set the bound to be the bounds of the xydata.
# Otherwise, it will compute the bounds of it's current data
# and the data in xydata
if iterable(x) and not len(x): return
self.dataLim.update_from_data(x, y, self.ignore_existing_data_limits)
self.ignore_existing_data_limits = False
def update_datalim_bounds(self, bounds):
"""
Update the datalim to include the given
:class:`~matplotlib.transforms.Bbox` *bounds*
"""
self.dataLim.set(mtransforms.Bbox.union([self.dataLim, bounds]))
def _process_unit_info(self, xdata=None, ydata=None, kwargs=None):
"""Look for unit *kwargs* and update the axis instances as necessary"""
if self.xaxis is None or self.yaxis is None: return
#print 'processing', self.get_geometry()
if xdata is not None:
# we only need to update if there is nothing set yet.
if not self.xaxis.have_units():
self.xaxis.update_units(xdata)
#print '\tset from xdata', self.xaxis.units
if ydata is not None:
# we only need to update if there is nothing set yet.
if not self.yaxis.have_units():
self.yaxis.update_units(ydata)
#print '\tset from ydata', self.yaxis.units
# process kwargs 2nd since these will override default units
if kwargs is not None:
xunits = kwargs.pop( 'xunits', self.xaxis.units)
if self.name == 'polar':
xunits = kwargs.pop( 'thetaunits', xunits )
if xunits!=self.xaxis.units:
#print '\tkw setting xunits', xunits
self.xaxis.set_units(xunits)
# If the units being set imply a different converter,
# we need to update.
if xdata is not None:
self.xaxis.update_units(xdata)
yunits = kwargs.pop('yunits', self.yaxis.units)
if self.name == 'polar':
yunits = kwargs.pop( 'runits', yunits )
if yunits!=self.yaxis.units:
#print '\tkw setting yunits', yunits
self.yaxis.set_units(yunits)
# If the units being set imply a different converter,
# we need to update.
if ydata is not None:
self.yaxis.update_units(ydata)
def in_axes(self, mouseevent):
"""
Return *True* if the given *mouseevent* (in display coords)
is in the Axes
"""
return self.patch.contains(mouseevent)[0]
def get_autoscale_on(self):
"""
Get whether autoscaling is applied for both axes on plot commands
"""
return self._autoscaleXon and self._autoscaleYon
def get_autoscalex_on(self):
"""
Get whether autoscaling for the x-axis is applied on plot commands
"""
return self._autoscaleXon
def get_autoscaley_on(self):
"""
Get whether autoscaling for the y-axis is applied on plot commands
"""
return self._autoscaleYon
def set_autoscale_on(self, b):
"""
Set whether autoscaling is applied on plot commands
accepts: [ *True* | *False* ]
"""
self._autoscaleXon = b
self._autoscaleYon = b
def set_autoscalex_on(self, b):
"""
Set whether autoscaling for the x-axis is applied on plot commands
accepts: [ *True* | *False* ]
"""
self._autoscaleXon = b
def set_autoscaley_on(self, b):
"""
Set whether autoscaling for the y-axis is applied on plot commands
accepts: [ *True* | *False* ]
"""
self._autoscaleYon = b
def set_xmargin(self, m):
"""
Set padding of X data limits prior to autoscaling.
*m* times the data interval will be added to each
end of that interval before it is used in autoscaling.
accepts: float in range 0 to 1
"""
if m < 0 or m > 1:
raise ValueError("margin must be in range 0 to 1")
self._xmargin = m
def set_ymargin(self, m):
"""
Set padding of Y data limits prior to autoscaling.
*m* times the data interval will be added to each
end of that interval before it is used in autoscaling.
accepts: float in range 0 to 1
"""
if m < 0 or m > 1:
raise ValueError("margin must be in range 0 to 1")
self._ymargin = m
def margins(self, *args, **kw):
"""
Set or retrieve autoscaling margins.
signatures::
margins()
returns xmargin, ymargin
::
margins(margin)
margins(xmargin, ymargin)
margins(x=xmargin, y=ymargin)
margins(..., tight=False)
All three forms above set the xmargin and ymargin parameters.
All keyword parameters are optional. A single argument
specifies both xmargin and ymargin. The *tight* parameter
is passed to :meth:`autoscale_view`, which is executed after
a margin is changed; the default here is *True*, on the
assumption that when margins are specified, no additional
padding to match tick marks is usually desired. Setting
*tight* to *None* will preserve the previous setting.
Specifying any margin changes only the autoscaling; for example,
if *xmargin* is not None, then *xmargin* times the X data
interval will be added to each end of that interval before
it is used in autoscaling.
"""
if not args and not kw:
return self._xmargin, self._ymargin
tight = kw.pop('tight', True)
mx = kw.pop('x', None)
my = kw.pop('y', None)
if len(args) == 1:
mx = my = args[0]
elif len(args) == 2:
mx, my = args
else:
raise ValueError("more than two arguments were supplied")
if mx is not None:
self.set_xmargin(mx)
if my is not None:
self.set_ymargin(my)
scalex = (mx is not None)
scaley = (my is not None)
self.autoscale_view(tight=tight, scalex=scalex, scaley=scaley)
def set_rasterization_zorder(self, z):
"""
Set zorder value below which artists will be rasterized. Set
to `None` to disable rasterizing of artists below a particular
zorder.
"""
self._rasterization_zorder = z
def get_rasterization_zorder(self):
"""
Get zorder value below which artists will be rasterized
"""
return self._rasterization_zorder
def autoscale(self, enable=True, axis='both', tight=None):
"""
Autoscale the axis view to the data (toggle).
Convenience method for simple axis view autoscaling.
It turns autoscaling on or off, and then,
if autoscaling for either axis is on, it performs
the autoscaling on the specified axis or axes.
*enable*: [True | False | None]
True (default) turns autoscaling on, False turns it off.
None leaves the autoscaling state unchanged.
*axis*: ['x' | 'y' | 'both']
which axis to operate on; default is 'both'
*tight*: [True | False | None]
If True, set view limits to data limits;
if False, let the locator and margins expand the view limits;
if None, use tight scaling if the only artist is an image,
otherwise treat *tight* as False.
The *tight* setting is retained for future autoscaling
until it is explicitly changed.
Returns None.
"""
if enable is None:
scalex = True
scaley = True
else:
scalex = False
scaley = False
if axis in ['x', 'both']:
self._autoscaleXon = bool(enable)
scalex = self._autoscaleXon
if axis in ['y', 'both']:
self._autoscaleYon = bool(enable)
scaley = self._autoscaleYon
self.autoscale_view(tight=tight, scalex=scalex, scaley=scaley)
def autoscale_view(self, tight=None, scalex=True, scaley=True):
"""
Autoscale the view limits using the data limits. You can
selectively autoscale only a single axis, eg, the xaxis by
setting *scaley* to *False*. The autoscaling preserves any
axis direction reversal that has already been done.
The data limits are not updated automatically when artist
data are changed after the artist has been added to an
Axes instance. In that case, use
:meth:`matplotlib.axes.Axes.relim`
prior to calling autoscale_view.
"""
if tight is None:
# if image data only just use the datalim
_tight = self._tight or (len(self.images)>0 and
len(self.lines)==0 and
len(self.patches)==0)
else:
_tight = self._tight = bool(tight)
if scalex and self._autoscaleXon:
xshared = self._shared_x_axes.get_siblings(self)
dl = [ax.dataLim for ax in xshared]
bb = mtransforms.BboxBase.union(dl)
x0, x1 = bb.intervalx
xlocator = self.xaxis.get_major_locator()
try:
# e.g. DateLocator has its own nonsingular()
x0, x1 = xlocator.nonsingular(x0, x1)
except AttributeError:
# Default nonsingular for, e.g., MaxNLocator
x0, x1 = mtransforms.nonsingular(x0, x1, increasing=False,
expander=0.05)
if self._xmargin > 0:
delta = (x1 - x0) * self._xmargin
x0 -= delta
x1 += delta
if not _tight:
x0, x1 = xlocator.view_limits(x0, x1)
self.set_xbound(x0, x1)
if scaley and self._autoscaleYon:
yshared = self._shared_y_axes.get_siblings(self)
dl = [ax.dataLim for ax in yshared]
bb = mtransforms.BboxBase.union(dl)
y0, y1 = bb.intervaly
ylocator = self.yaxis.get_major_locator()
try:
y0, y1 = ylocator.nonsingular(y0, y1)
except AttributeError:
y0, y1 = mtransforms.nonsingular(y0, y1, increasing=False,
expander=0.05)
if self._ymargin > 0:
delta = (y1 - y0) * self._ymargin
y0 -= delta
y1 += delta
if not _tight:
y0, y1 = ylocator.view_limits(y0, y1)
self.set_ybound(y0, y1)
#### Drawing
@allow_rasterization
def draw(self, renderer=None, inframe=False):
"""Draw everything (plot lines, axes, labels)"""
if renderer is None:
renderer = self._cachedRenderer
if renderer is None:
raise RuntimeError('No renderer defined')
if not self.get_visible(): return
renderer.open_group('axes')
locator = self.get_axes_locator()
if locator:
pos = locator(self, renderer)
self.apply_aspect(pos)
else:
self.apply_aspect()
artists = []
artists.extend(self.collections)
artists.extend(self.patches)
artists.extend(self.lines)
artists.extend(self.texts)
artists.extend(self.artists)
if self.axison and not inframe:
if self._axisbelow:
self.xaxis.set_zorder(0.5)
self.yaxis.set_zorder(0.5)
else:
self.xaxis.set_zorder(2.5)
self.yaxis.set_zorder(2.5)
artists.extend([self.xaxis, self.yaxis])
if not inframe: artists.append(self.title)
artists.extend(self.tables)
if self.legend_ is not None:
artists.append(self.legend_)
# the frame draws the edges around the axes patch -- we
# decouple these so the patch can be in the background and the
# frame in the foreground.
if self.axison and self._frameon:
artists.extend(self.spines.itervalues())
dsu = [ (a.zorder, a) for a in artists
if not a.get_animated() ]
# add images to dsu if the backend support compositing.
# otherwise, does the manaul compositing without adding images to dsu.
if len(self.images)<=1 or renderer.option_image_nocomposite():
dsu.extend([(im.zorder, im) for im in self.images])
_do_composite = False
else:
_do_composite = True
dsu.sort(key=itemgetter(0))
# rasterize artists with negative zorder
# if the minimum zorder is negative, start rasterization
rasterization_zorder = self._rasterization_zorder
if (rasterization_zorder is not None and
len(dsu) > 0 and dsu[0][0] < rasterization_zorder):
renderer.start_rasterizing()
dsu_rasterized = [l for l in dsu if l[0] < rasterization_zorder]
dsu = [l for l in dsu if l[0] >= rasterization_zorder]
else:
dsu_rasterized = []
# the patch draws the background rectangle -- the frame below
# will draw the edges
if self.axison and self._frameon:
self.patch.draw(renderer)
if _do_composite:
# make a composite image blending alpha
# list of (mimage.Image, ox, oy)
zorder_images = [(im.zorder, im) for im in self.images \
if im.get_visible()]
zorder_images.sort(key=lambda x: x[0])
mag = renderer.get_image_magnification()
ims = [(im.make_image(mag),0,0) for z,im in zorder_images]
l, b, r, t = self.bbox.extents
width = mag*((round(r) + 0.5) - (round(l) - 0.5))
height = mag*((round(t) + 0.5) - (round(b) - 0.5))
im = mimage.from_images(height,
width,
ims)
im.is_grayscale = False
l, b, w, h = self.bbox.bounds
# composite images need special args so they will not
# respect z-order for now
gc = renderer.new_gc()
gc.set_clip_rectangle(self.bbox)
gc.set_clip_path(mtransforms.TransformedPath(
self.patch.get_path(),
self.patch.get_transform()))
renderer.draw_image(gc, round(l), round(b), im)
gc.restore()
if dsu_rasterized:
for zorder, a in dsu_rasterized:
a.draw(renderer)
renderer.stop_rasterizing()
for zorder, a in dsu:
a.draw(renderer)
renderer.close_group('axes')
self._cachedRenderer = renderer
def draw_artist(self, a):
"""
This method can only be used after an initial draw which
caches the renderer. It is used to efficiently update Axes
data (axis ticks, labels, etc are not updated)
"""
assert self._cachedRenderer is not None
a.draw(self._cachedRenderer)
def redraw_in_frame(self):
"""
This method can only be used after an initial draw which
caches the renderer. It is used to efficiently update Axes
data (axis ticks, labels, etc are not updated)
"""
assert self._cachedRenderer is not None
self.draw(self._cachedRenderer, inframe=True)
def get_renderer_cache(self):
return self._cachedRenderer
def __draw_animate(self):
# ignore for now; broken
if self._lastRenderer is None:
raise RuntimeError('You must first call ax.draw()')
dsu = [(a.zorder, a) for a in self.animated.keys()]
dsu.sort(key=lambda x: x[0])
renderer = self._lastRenderer
renderer.blit()
for tmp, a in dsu:
a.draw(renderer)
#### Axes rectangle characteristics
def get_frame_on(self):
"""
Get whether the axes rectangle patch is drawn
"""
return self._frameon
def set_frame_on(self, b):
"""
Set whether the axes rectangle patch is drawn
ACCEPTS: [ *True* | *False* ]
"""
self._frameon = b
def get_axisbelow(self):
"""
Get whether axis below is true or not
"""
return self._axisbelow
def set_axisbelow(self, b):
"""
Set whether the axis ticks and gridlines are above or below most artists
ACCEPTS: [ *True* | *False* ]
"""
self._axisbelow = b
@docstring.dedent_interpd
def grid(self, b=None, which='major', axis='both', **kwargs):
"""
Turn the axes grids on or off.
Call signature::
grid(self, b=None, which='major', axis='both', **kwargs)
Set the axes grids on or off; *b* is a boolean. (For MATLAB
compatibility, *b* may also be a string, 'on' or 'off'.)
If *b* is *None* and ``len(kwargs)==0``, toggle the grid state. If
*kwargs* are supplied, it is assumed that you want a grid and *b*
is thus set to *True*.
*which* can be 'major' (default), 'minor', or 'both' to control
whether major tick grids, minor tick grids, or both are affected.
*axis* can be 'both' (default), 'x', or 'y' to control which
set of gridlines are drawn.
*kwargs* are used to set the grid line properties, eg::
ax.grid(color='r', linestyle='-', linewidth=2)
Valid :class:`~matplotlib.lines.Line2D` kwargs are
%(Line2D)s
"""
if len(kwargs):
b = True
b = _string_to_bool(b)
if axis == 'x' or axis == 'both':
self.xaxis.grid(b, which=which, **kwargs)
if axis == 'y' or axis == 'both':
self.yaxis.grid(b, which=which, **kwargs)
def ticklabel_format(self, **kwargs):
"""
Change the `~matplotlib.ticker.ScalarFormatter` used by
default for linear axes.
Optional keyword arguments:
============ =========================================
Keyword Description
============ =========================================
*style* [ 'sci' (or 'scientific') | 'plain' ]
plain turns off scientific notation
*scilimits* (m, n), pair of integers; if *style*
is 'sci', scientific notation will
be used for numbers outside the range
10`m`:sup: to 10`n`:sup:.
Use (0,0) to include all numbers.
*useOffset* [True | False | offset]; if True,
the offset will be calculated as needed;
if False, no offset will be used; if a
numeric offset is specified, it will be
used.
*axis* [ 'x' | 'y' | 'both' ]
*useLocale* If True, format the number according to
the current locale. This affects things
such as the character used for the
decimal separator. If False, use
C-style (English) formatting. The
default setting is controlled by the
axes.formatter.use_locale rcparam.
============ =========================================
Only the major ticks are affected.
If the method is called when the
:class:`~matplotlib.ticker.ScalarFormatter` is not the
:class:`~matplotlib.ticker.Formatter` being used, an
:exc:`AttributeError` will be raised.
"""
style = kwargs.pop('style', '').lower()
scilimits = kwargs.pop('scilimits', None)
useOffset = kwargs.pop('useOffset', None)
useLocale = kwargs.pop('useLocale', None)
axis = kwargs.pop('axis', 'both').lower()
if scilimits is not None:
try:
m, n = scilimits
m+n+1 # check that both are numbers
except (ValueError, TypeError):
raise ValueError("scilimits must be a sequence of 2 integers")
if style[:3] == 'sci':
sb = True
elif style in ['plain', 'comma']:
sb = False
if style == 'plain':
cb = False
else:
cb = True
raise NotImplementedError("comma style remains to be added")
elif style == '':
sb = None
else:
raise ValueError("%s is not a valid style value")
try:
if sb is not None:
if axis == 'both' or axis == 'x':
self.xaxis.major.formatter.set_scientific(sb)
if axis == 'both' or axis == 'y':
self.yaxis.major.formatter.set_scientific(sb)
if scilimits is not None:
if axis == 'both' or axis == 'x':
self.xaxis.major.formatter.set_powerlimits(scilimits)
if axis == 'both' or axis == 'y':
self.yaxis.major.formatter.set_powerlimits(scilimits)
if useOffset is not None:
if axis == 'both' or axis == 'x':
self.xaxis.major.formatter.set_useOffset(useOffset)
if axis == 'both' or axis == 'y':
self.yaxis.major.formatter.set_useOffset(useOffset)
if useLocale is not None:
if axis == 'both' or axis == 'x':
self.xaxis.major.formatter.set_useLocale(useLocale)
if axis == 'both' or axis == 'y':
self.yaxis.major.formatter.set_useLocale(useLocale)
except AttributeError:
raise AttributeError(
"This method only works with the ScalarFormatter.")
def locator_params(self, axis='both', tight=None, **kwargs):
"""
Control behavior of tick locators.
Keyword arguments:
*axis*
['x' | 'y' | 'both'] Axis on which to operate;
default is 'both'.
*tight*
[True | False | None] Parameter passed to :meth:`autoscale_view`.
Default is None, for no change.
Remaining keyword arguments are passed to directly to the
:meth:`~matplotlib.ticker.MaxNLocator.set_params` method.
Typically one might want to reduce the maximum number
of ticks and use tight bounds when plotting small
subplots, for example::
ax.locator_params(tight=True, nbins=4)
Because the locator is involved in autoscaling,
:meth:`autoscale_view` is called automatically after
the parameters are changed.
This presently works only for the
:class:`~matplotlib.ticker.MaxNLocator` used
by default on linear axes, but it may be generalized.
"""
_x = axis in ['x', 'both']
_y = axis in ['y', 'both']
if _x:
self.xaxis.get_major_locator().set_params(**kwargs)
if _y:
self.yaxis.get_major_locator().set_params(**kwargs)
self.autoscale_view(tight=tight, scalex=_x, scaley=_y)
def tick_params(self, axis='both', **kwargs):
"""
Change the appearance of ticks and tick labels.
Keyword arguments:
*axis* : ['x' | 'y' | 'both']
Axis on which to operate; default is 'both'.
*reset* : [True | False]
If *True*, set all parameters to defaults
before processing other keyword arguments. Default is
*False*.
*which* : ['major' | 'minor' | 'both']
Default is 'major'; apply arguments to *which* ticks.
*direction* : ['in' | 'out' | 'inout']
Puts ticks inside the axes, outside the axes, or both.
*length*
Tick length in points.
*width*
Tick width in points.
*color*
Tick color; accepts any mpl color spec.
*pad*
Distance in points between tick and label.
*labelsize*
Tick label font size in points or as a string (e.g. 'large').
*labelcolor*
Tick label color; mpl color spec.
*colors*
Changes the tick color and the label color to the same value:
mpl color spec.
*zorder*
Tick and label zorder.
*bottom*, *top*, *left*, *right* : [bool | 'on' | 'off']
controls whether to draw the respective ticks.
*labelbottom*, *labeltop*, *labelleft*, *labelright*
Boolean or ['on' | 'off'], controls whether to draw the
respective tick labels.
Example::
ax.tick_params(direction='out', length=6, width=2, colors='r')
This will make all major ticks be red, pointing out of the box,
and with dimensions 6 points by 2 points. Tick labels will
also be red.
"""
if axis in ['x', 'both']:
xkw = dict(kwargs)
xkw.pop('left', None)
xkw.pop('right', None)
xkw.pop('labelleft', None)
xkw.pop('labelright', None)
self.xaxis.set_tick_params(**xkw)
if axis in ['y', 'both']:
ykw = dict(kwargs)
ykw.pop('top', None)
ykw.pop('bottom', None)
ykw.pop('labeltop', None)
ykw.pop('labelbottom', None)
self.yaxis.set_tick_params(**ykw)
def set_axis_off(self):
"""turn off the axis"""
self.axison = False
def set_axis_on(self):
"""turn on the axis"""
self.axison = True
def get_axis_bgcolor(self):
"""Return the axis background color"""
return self._axisbg
def set_axis_bgcolor(self, color):
"""
set the axes background color
ACCEPTS: any matplotlib color - see
:func:`~matplotlib.pyplot.colors`
"""
self._axisbg = color
self.patch.set_facecolor(color)
### data limits, ticks, tick labels, and formatting
def invert_xaxis(self):
"Invert the x-axis."
left, right = self.get_xlim()
self.set_xlim(right, left, auto=None)
def xaxis_inverted(self):
"""Returns *True* if the x-axis is inverted."""
left, right = self.get_xlim()
return right < left
def get_xbound(self):
"""
Returns the x-axis numerical bounds where::
lowerBound < upperBound
"""
left, right = self.get_xlim()
if left < right:
return left, right
else:
return right, left
def set_xbound(self, lower=None, upper=None):
"""
Set the lower and upper numerical bounds of the x-axis.
This method will honor axes inversion regardless of parameter order.
It will not change the _autoscaleXon attribute.
"""
if upper is None and iterable(lower):
lower,upper = lower
old_lower,old_upper = self.get_xbound()
if lower is None: lower = old_lower
if upper is None: upper = old_upper
if self.xaxis_inverted():
if lower < upper:
self.set_xlim(upper, lower, auto=None)
else:
self.set_xlim(lower, upper, auto=None)
else:
if lower < upper:
self.set_xlim(lower, upper, auto=None)
else:
self.set_xlim(upper, lower, auto=None)
def get_xlim(self):
"""
Get the x-axis range [*left*, *right*]
"""
return tuple(self.viewLim.intervalx)
def set_xlim(self, left=None, right=None, emit=True, auto=False, **kw):
"""
Call signature::
set_xlim(self, *args, **kwargs):
Set the data limits for the xaxis
Examples::
set_xlim((left, right))
set_xlim(left, right)
set_xlim(left=1) # right unchanged
set_xlim(right=1) # left unchanged
Keyword arguments:
*left*: scalar
The left xlim; *xmin*, the previous name, may still be used
*right*: scalar
The right xlim; *xmax*, the previous name, may still be used
*emit*: [ *True* | *False* ]
Notify observers of limit change
*auto*: [ *True* | *False* | *None* ]
Turn *x* autoscaling on (*True*), off (*False*; default),
or leave unchanged (*None*)
Note, the *left* (formerly *xmin*) value may be greater than
the *right* (formerly *xmax*).
For example, suppose *x* is years before present.
Then one might use::
set_ylim(5000, 0)
so 5000 years ago is on the left of the plot and the
present is on the right.
Returns the current xlimits as a length 2 tuple
ACCEPTS: length 2 sequence of floats
"""
if 'xmin' in kw:
left = kw.pop('xmin')
if 'xmax' in kw:
right = kw.pop('xmax')
if kw:
raise ValueError("unrecognized kwargs: %s" % kw.keys())
if right is None and iterable(left):
left,right = left
self._process_unit_info(xdata=(left, right))
if left is not None:
left = self.convert_xunits(left)
if right is not None:
right = self.convert_xunits(right)
old_left, old_right = self.get_xlim()
if left is None: left = old_left
if right is None: right = old_right
if left==right:
warnings.warn(('Attempting to set identical left==right results\n'
+ 'in singular transformations; automatically expanding.\n'
+ 'left=%s, right=%s') % (left, right))
left, right = mtransforms.nonsingular(left, right, increasing=False)
left, right = self.xaxis.limit_range_for_scale(left, right)
self.viewLim.intervalx = (left, right)
if auto is not None:
self._autoscaleXon = bool(auto)
if emit:
self.callbacks.process('xlim_changed', self)
# Call all of the other x-axes that are shared with this one
for other in self._shared_x_axes.get_siblings(self):
if other is not self:
other.set_xlim(self.viewLim.intervalx,
emit=False, auto=auto)
if (other.figure != self.figure and
other.figure.canvas is not None):
other.figure.canvas.draw_idle()
return left, right
def get_xscale(self):
return self.xaxis.get_scale()
get_xscale.__doc__ = "Return the xaxis scale string: %s""" % (
", ".join(mscale.get_scale_names()))
@docstring.dedent_interpd
def set_xscale(self, value, **kwargs):
"""
Call signature::
set_xscale(value)
Set the scaling of the x-axis: %(scale)s
ACCEPTS: [%(scale)s]
Different kwargs are accepted, depending on the scale:
%(scale_docs)s
"""
self.xaxis.set_scale(value, **kwargs)
self.autoscale_view(scaley=False)
self._update_transScale()
def get_xticks(self, minor=False):
"""Return the x ticks as a list of locations"""
return self.xaxis.get_ticklocs(minor=minor)
def set_xticks(self, ticks, minor=False):
"""
Set the x ticks with list of *ticks*
ACCEPTS: sequence of floats
"""
return self.xaxis.set_ticks(ticks, minor=minor)
def get_xmajorticklabels(self):
"""
Get the xtick labels as a list of :class:`~matplotlib.text.Text`
instances.
"""
return cbook.silent_list('Text xticklabel',
self.xaxis.get_majorticklabels())
def get_xminorticklabels(self):
"""
Get the x minor tick labels as a list of
:class:`matplotlib.text.Text` instances.
"""
return cbook.silent_list('Text xticklabel',
self.xaxis.get_minorticklabels())
def get_xticklabels(self, minor=False):
"""
Get the x tick labels as a list of :class:`~matplotlib.text.Text`
instances.
"""
return cbook.silent_list('Text xticklabel',
self.xaxis.get_ticklabels(minor=minor))
@docstring.dedent_interpd
def set_xticklabels(self, labels, fontdict=None, minor=False, **kwargs):
"""
Call signature::
set_xticklabels(labels, fontdict=None, minor=False, **kwargs)
Set the xtick labels with list of strings *labels*. Return a
list of axis text instances.
*kwargs* set the :class:`~matplotlib.text.Text` properties.
Valid properties are
%(Text)s
ACCEPTS: sequence of strings
"""
return self.xaxis.set_ticklabels(labels, fontdict,
minor=minor, **kwargs)
def invert_yaxis(self):
"Invert the y-axis."
bottom, top = self.get_ylim()
self.set_ylim(top, bottom, auto=None)
def yaxis_inverted(self):
"""Returns *True* if the y-axis is inverted."""
bottom, top = self.get_ylim()
return top < bottom
def get_ybound(self):
"Return y-axis numerical bounds in the form of lowerBound < upperBound"
bottom, top = self.get_ylim()
if bottom < top:
return bottom, top
else:
return top, bottom
def set_ybound(self, lower=None, upper=None):
"""
Set the lower and upper numerical bounds of the y-axis.
This method will honor axes inversion regardless of parameter order.
It will not change the _autoscaleYon attribute.
"""
if upper is None and iterable(lower):
lower,upper = lower
old_lower,old_upper = self.get_ybound()
if lower is None: lower = old_lower
if upper is None: upper = old_upper
if self.yaxis_inverted():
if lower < upper:
self.set_ylim(upper, lower, auto=None)
else:
self.set_ylim(lower, upper, auto=None)
else:
if lower < upper:
self.set_ylim(lower, upper, auto=None)
else:
self.set_ylim(upper, lower, auto=None)
def get_ylim(self):
"""
Get the y-axis range [*bottom*, *top*]
"""
return tuple(self.viewLim.intervaly)
def set_ylim(self, bottom=None, top=None, emit=True, auto=False, **kw):
"""
Call signature::
set_ylim(self, *args, **kwargs):
Set the data limits for the yaxis
Examples::
set_ylim((bottom, top))
set_ylim(bottom, top)
set_ylim(bottom=1) # top unchanged
set_ylim(top=1) # bottom unchanged
Keyword arguments:
*bottom*: scalar
The bottom ylim; the previous name, *ymin*, may still be used
*top*: scalar
The top ylim; the previous name, *ymax*, may still be used
*emit*: [ *True* | *False* ]
Notify observers of limit change
*auto*: [ *True* | *False* | *None* ]
Turn *y* autoscaling on (*True*), off (*False*; default),
or leave unchanged (*None*)
Note, the *bottom* (formerly *ymin*) value may be greater than
the *top* (formerly *ymax*).
For example, suppose *y* is depth in the ocean.
Then one might use::
set_ylim(5000, 0)
so 5000 m depth is at the bottom of the plot and the
surface, 0 m, is at the top.
Returns the current ylimits as a length 2 tuple
ACCEPTS: length 2 sequence of floats
"""
if 'ymin' in kw:
bottom = kw.pop('ymin')
if 'ymax' in kw:
top = kw.pop('ymax')
if kw:
raise ValueError("unrecognized kwargs: %s" % kw.keys())
if top is None and iterable(bottom):
bottom,top = bottom
if bottom is not None:
bottom = self.convert_yunits(bottom)
if top is not None:
top = self.convert_yunits(top)
old_bottom, old_top = self.get_ylim()
if bottom is None: bottom = old_bottom
if top is None: top = old_top
if bottom==top:
warnings.warn(('Attempting to set identical bottom==top results\n'
+ 'in singular transformations; automatically expanding.\n'
+ 'bottom=%s, top=%s') % (bottom, top))
bottom, top = mtransforms.nonsingular(bottom, top, increasing=False)
bottom, top = self.yaxis.limit_range_for_scale(bottom, top)
self.viewLim.intervaly = (bottom, top)
if auto is not None:
self._autoscaleYon = bool(auto)
if emit:
self.callbacks.process('ylim_changed', self)
# Call all of the other y-axes that are shared with this one
for other in self._shared_y_axes.get_siblings(self):
if other is not self:
other.set_ylim(self.viewLim.intervaly,
emit=False, auto=auto)
if (other.figure != self.figure and
other.figure.canvas is not None):
other.figure.canvas.draw_idle()
return bottom, top
def get_yscale(self):
return self.yaxis.get_scale()
get_yscale.__doc__ = "Return the yaxis scale string: %s""" % (
", ".join(mscale.get_scale_names()))
@docstring.dedent_interpd
def set_yscale(self, value, **kwargs):
"""
Call signature::
set_yscale(value)
Set the scaling of the y-axis: %(scale)s
ACCEPTS: [%(scale)s]
Different kwargs are accepted, depending on the scale:
%(scale_docs)s
"""
self.yaxis.set_scale(value, **kwargs)
self.autoscale_view(scalex=False)
self._update_transScale()
def get_yticks(self, minor=False):
"""Return the y ticks as a list of locations"""
return self.yaxis.get_ticklocs(minor=minor)
def set_yticks(self, ticks, minor=False):
"""
Set the y ticks with list of *ticks*
ACCEPTS: sequence of floats
Keyword arguments:
*minor*: [ *False* | *True* ]
Sets the minor ticks if *True*
"""
return self.yaxis.set_ticks(ticks, minor=minor)
def get_ymajorticklabels(self):
"""
Get the major y tick labels as a list of
:class:`~matplotlib.text.Text` instances.
"""
return cbook.silent_list('Text yticklabel',
self.yaxis.get_majorticklabels())
def get_yminorticklabels(self):
"""
Get the minor y tick labels as a list of
:class:`~matplotlib.text.Text` instances.
"""
return cbook.silent_list('Text yticklabel',
self.yaxis.get_minorticklabels())
def get_yticklabels(self, minor=False):
"""
Get the y tick labels as a list of :class:`~matplotlib.text.Text`
instances
"""
return cbook.silent_list('Text yticklabel',
self.yaxis.get_ticklabels(minor=minor))
@docstring.dedent_interpd
def set_yticklabels(self, labels, fontdict=None, minor=False, **kwargs):
"""
Call signature::
set_yticklabels(labels, fontdict=None, minor=False, **kwargs)
Set the y tick labels with list of strings *labels*. Return a list of
:class:`~matplotlib.text.Text` instances.
*kwargs* set :class:`~matplotlib.text.Text` properties for the labels.
Valid properties are
%(Text)s
ACCEPTS: sequence of strings
"""
return self.yaxis.set_ticklabels(labels, fontdict,
minor=minor, **kwargs)
def xaxis_date(self, tz=None):
"""
Sets up x-axis ticks and labels that treat the x data as dates.
*tz* is a timezone string or :class:`tzinfo` instance.
Defaults to rc value.
"""
# should be enough to inform the unit conversion interface
# dates are coming in
self.xaxis.axis_date(tz)
def yaxis_date(self, tz=None):
"""
Sets up y-axis ticks and labels that treat the y data as dates.
*tz* is a timezone string or :class:`tzinfo` instance.
Defaults to rc value.
"""
self.yaxis.axis_date(tz)
def format_xdata(self, x):
"""
Return *x* string formatted. This function will use the attribute
self.fmt_xdata if it is callable, else will fall back on the xaxis
major formatter
"""
try: return self.fmt_xdata(x)
except TypeError:
func = self.xaxis.get_major_formatter().format_data_short
val = func(x)
return val
def format_ydata(self, y):
"""
Return y string formatted. This function will use the
:attr:`fmt_ydata` attribute if it is callable, else will fall
back on the yaxis major formatter
"""
try: return self.fmt_ydata(y)
except TypeError:
func = self.yaxis.get_major_formatter().format_data_short
val = func(y)
return val
def format_coord(self, x, y):
"""Return a format string formatting the *x*, *y* coord"""
if x is None:
xs = '???'
else:
xs = self.format_xdata(x)
if y is None:
ys = '???'
else:
ys = self.format_ydata(y)
return 'x=%s y=%s'%(xs,ys)
#### Interactive manipulation
def can_zoom(self):
"""
Return *True* if this axes supports the zoom box button functionality.
"""
return True
def can_pan(self) :
"""
Return *True* if this axes supports any pan/zoom button functionality.
"""
return True
def get_navigate(self):
"""
Get whether the axes responds to navigation commands
"""
return self._navigate
def set_navigate(self, b):
"""
Set whether the axes responds to navigation toolbar commands
ACCEPTS: [ *True* | *False* ]
"""
self._navigate = b
def get_navigate_mode(self):
"""
Get the navigation toolbar button status: 'PAN', 'ZOOM', or None
"""
return self._navigate_mode
def set_navigate_mode(self, b):
"""
Set the navigation toolbar button status;
.. warning::
this is not a user-API function.
"""
self._navigate_mode = b
def start_pan(self, x, y, button):
"""
Called when a pan operation has started.
*x*, *y* are the mouse coordinates in display coords.
button is the mouse button number:
* 1: LEFT
* 2: MIDDLE
* 3: RIGHT
.. note::
Intended to be overridden by new projection types.
"""
self._pan_start = cbook.Bunch(
lim = self.viewLim.frozen(),
trans = self.transData.frozen(),
trans_inverse = self.transData.inverted().frozen(),
bbox = self.bbox.frozen(),
x = x,
y = y
)
def end_pan(self):
"""
Called when a pan operation completes (when the mouse button
is up.)
.. note::
Intended to be overridden by new projection types.
"""
del self._pan_start
def drag_pan(self, button, key, x, y):
"""
Called when the mouse moves during a pan operation.
*button* is the mouse button number:
* 1: LEFT
* 2: MIDDLE
* 3: RIGHT
*key* is a "shift" key
*x*, *y* are the mouse coordinates in display coords.
.. note::
Intended to be overridden by new projection types.
"""
def format_deltas(key, dx, dy):
if key=='control':
if(abs(dx)>abs(dy)):
dy = dx
else:
dx = dy
elif key=='x':
dy = 0
elif key=='y':
dx = 0
elif key=='shift':
if 2*abs(dx) < abs(dy):
dx=0
elif 2*abs(dy) < abs(dx):
dy=0
elif(abs(dx)>abs(dy)):
dy=dy/abs(dy)*abs(dx)
else:
dx=dx/abs(dx)*abs(dy)
return (dx,dy)
p = self._pan_start
dx = x - p.x
dy = y - p.y
if dx == 0 and dy == 0:
return
if button == 1:
dx, dy = format_deltas(key, dx, dy)
result = p.bbox.translated(-dx, -dy) \
.transformed(p.trans_inverse)
elif button == 3:
try:
dx = -dx / float(self.bbox.width)
dy = -dy / float(self.bbox.height)
dx, dy = format_deltas(key, dx, dy)
if self.get_aspect() != 'auto':
dx = 0.5 * (dx + dy)
dy = dx
alpha = np.power(10.0, (dx, dy))
start = np.array([p.x, p.y])
oldpoints = p.lim.transformed(p.trans)
newpoints = start + alpha * (oldpoints - start)
result = mtransforms.Bbox(newpoints) \
.transformed(p.trans_inverse)
except OverflowError:
warnings.warn('Overflow while panning')
return
self.set_xlim(*result.intervalx)
self.set_ylim(*result.intervaly)
def get_cursor_props(self):
"""
Return the cursor propertiess as a (*linewidth*, *color*)
tuple, where *linewidth* is a float and *color* is an RGBA
tuple
"""
return self._cursorProps
def set_cursor_props(self, *args):
"""
Set the cursor property as::
ax.set_cursor_props(linewidth, color)
or::
ax.set_cursor_props((linewidth, color))
ACCEPTS: a (*float*, *color*) tuple
"""
if len(args)==1:
lw, c = args[0]
elif len(args)==2:
lw, c = args
else:
raise ValueError('args must be a (linewidth, color) tuple')
c =mcolors.colorConverter.to_rgba(c)
self._cursorProps = lw, c
def connect(self, s, func):
"""
Register observers to be notified when certain events occur. Register
with callback functions with the following signatures. The function
has the following signature::
func(ax) # where ax is the instance making the callback.
The following events can be connected to:
'xlim_changed','ylim_changed'
The connection id is is returned - you can use this with
disconnect to disconnect from the axes event
"""
raise mplDeprecation('use the callbacks CallbackRegistry instance '
'instead')
def disconnect(self, cid):
"""disconnect from the Axes event."""
raise mplDeprecation('use the callbacks CallbackRegistry instance '
'instead')
def get_children(self):
"""return a list of child artists"""
children = []
children.append(self.xaxis)
children.append(self.yaxis)
children.extend(self.lines)
children.extend(self.patches)
children.extend(self.texts)
children.extend(self.tables)
children.extend(self.artists)
children.extend(self.images)
if self.legend_ is not None:
children.append(self.legend_)
children.extend(self.collections)
children.append(self.title)
children.append(self.patch)
children.extend(self.spines.itervalues())
return children
def contains(self,mouseevent):
"""
Test whether the mouse event occured in the axes.
Returns *True* / *False*, {}
"""
if callable(self._contains): return self._contains(self,mouseevent)
return self.patch.contains(mouseevent)
def contains_point(self, point):
"""
Returns *True* if the point (tuple of x,y) is inside the axes
(the area defined by the its patch). A pixel coordinate is
required.
"""
return self.patch.contains_point(point, radius=1.0)
def pick(self, *args):
"""
Call signature::
pick(mouseevent)
each child artist will fire a pick event if mouseevent is over
the artist and the artist has picker set
"""
if len(args) > 1:
raise mplDeprecation('New pick API implemented -- '
'see API_CHANGES in the src distribution')
martist.Artist.pick(self, args[0])
def __pick(self, x, y, trans=None, among=None):
"""
Return the artist under point that is closest to the *x*, *y*.
If *trans* is *None*, *x*, and *y* are in window coords,
(0,0 = lower left). Otherwise, *trans* is a
:class:`~matplotlib.transforms.Transform` that specifies the
coordinate system of *x*, *y*.
The selection of artists from amongst which the pick function
finds an artist can be narrowed using the optional keyword
argument *among*. If provided, this should be either a sequence
of permitted artists or a function taking an artist as its
argument and returning a true value if and only if that artist
can be selected.
Note this algorithm calculates distance to the vertices of the
polygon, so if you want to pick a patch, click on the edge!
"""
# MGDTODO: Needs updating
if trans is not None:
xywin = trans.transform_point((x,y))
else:
xywin = x,y
def dist_points(p1, p2):
'return the distance between two points'
x1, y1 = p1
x2, y2 = p2
return math.sqrt((x1-x2)**2+(y1-y2)**2)
def dist_x_y(p1, x, y):
'*x* and *y* are arrays; return the distance to the closest point'
x1, y1 = p1
return min(np.sqrt((x-x1)**2+(y-y1)**2))
def dist(a):
if isinstance(a, Text):
bbox = a.get_window_extent()
l,b,w,h = bbox.bounds
verts = (l,b), (l,b+h), (l+w,b+h), (l+w, b)
xt, yt = zip(*verts)
elif isinstance(a, Patch):
path = a.get_path()
tverts = a.get_transform().transform_path(path)
xt, yt = zip(*tverts)
elif isinstance(a, mlines.Line2D):
xdata = a.get_xdata(orig=False)
ydata = a.get_ydata(orig=False)
xt, yt = a.get_transform().numerix_x_y(xdata, ydata)
return dist_x_y(xywin, np.asarray(xt), np.asarray(yt))
artists = self.lines + self.patches + self.texts
if callable(among):
artists = filter(test, artists)
elif iterable(among):
amongd = dict([(k,1) for k in among])
artists = [a for a in artists if a in amongd]
elif among is None:
pass
else:
raise ValueError('among must be callable or iterable')
if not len(artists): return None
ds = [ (dist(a),a) for a in artists]
ds.sort()
return ds[0][1]
#### Labelling
def get_title(self):
"""
Get the title text string.
"""
return self.title.get_text()
@docstring.dedent_interpd
def set_title(self, label, fontdict=None, **kwargs):
"""
Call signature::
set_title(label, fontdict=None, **kwargs):
Set the title for the axes.
kwargs are Text properties:
%(Text)s
ACCEPTS: str
.. seealso::
:meth:`text`
for information on how override and the optional args work
"""
default = {
'fontsize':rcParams['axes.titlesize'],
'verticalalignment' : 'baseline',
'horizontalalignment' : 'center'
}
self.title.set_text(label)
self.title.update(default)
if fontdict is not None: self.title.update(fontdict)
self.title.update(kwargs)
return self.title
def get_xlabel(self):
"""
Get the xlabel text string.
"""
label = self.xaxis.get_label()
return label.get_text()
@docstring.dedent_interpd
def set_xlabel(self, xlabel, fontdict=None, labelpad=None, **kwargs):
"""
Call signature::
set_xlabel(xlabel, fontdict=None, labelpad=None, **kwargs)
Set the label for the xaxis.
*labelpad* is the spacing in points between the label and the x-axis
Valid kwargs are :class:`~matplotlib.text.Text` properties:
%(Text)s
ACCEPTS: str
.. seealso::
:meth:`text`
for information on how override and the optional args work
"""
if labelpad is not None: self.xaxis.labelpad = labelpad
return self.xaxis.set_label_text(xlabel, fontdict, **kwargs)
def get_ylabel(self):
"""
Get the ylabel text string.
"""
label = self.yaxis.get_label()
return label.get_text()
@docstring.dedent_interpd
def set_ylabel(self, ylabel, fontdict=None, labelpad=None, **kwargs):
"""
Call signature::
set_ylabel(ylabel, fontdict=None, labelpad=None, **kwargs)
Set the label for the yaxis
*labelpad* is the spacing in points between the label and the y-axis
Valid kwargs are :class:`~matplotlib.text.Text` properties:
%(Text)s
ACCEPTS: str
.. seealso::
:meth:`text`
for information on how override and the optional args work
"""
if labelpad is not None: self.yaxis.labelpad = labelpad
return self.yaxis.set_label_text(ylabel, fontdict, **kwargs)
@docstring.dedent_interpd
def text(self, x, y, s, fontdict=None,
withdash=False, **kwargs):
"""
Add text to the axes.
Call signature::
text(x, y, s, fontdict=None, **kwargs)
Add text in string *s* to axis at location *x*, *y*, data
coordinates.
Keyword arguments:
*fontdict*:
A dictionary to override the default text properties.
If *fontdict* is *None*, the defaults are determined by your rc
parameters.
*withdash*: [ *False* | *True* ]
Creates a :class:`~matplotlib.text.TextWithDash` instance
instead of a :class:`~matplotlib.text.Text` instance.
Individual keyword arguments can be used to override any given
parameter::
text(x, y, s, fontsize=12)
The default transform specifies that text is in data coords,
alternatively, you can specify text in axis coords (0,0 is
lower-left and 1,1 is upper-right). The example below places
text in the center of the axes::
text(0.5, 0.5,'matplotlib',
horizontalalignment='center',
verticalalignment='center',
transform = ax.transAxes)
You can put a rectangular box around the text instance (eg. to
set a background color) by using the keyword *bbox*. *bbox* is
a dictionary of :class:`matplotlib.patches.Rectangle`
properties. For example::
text(x, y, s, bbox=dict(facecolor='red', alpha=0.5))
Valid kwargs are :class:`~matplotlib.text.Text` properties:
%(Text)s
"""
default = {
'verticalalignment' : 'baseline',
'horizontalalignment' : 'left',
'transform' : self.transData,
}
# At some point if we feel confident that TextWithDash
# is robust as a drop-in replacement for Text and that
# the performance impact of the heavier-weight class
# isn't too significant, it may make sense to eliminate
# the withdash kwarg and simply delegate whether there's
# a dash to TextWithDash and dashlength.
if withdash:
t = mtext.TextWithDash(
x=x, y=y, text=s,
)
else:
t = mtext.Text(
x=x, y=y, text=s,
)
self._set_artist_props(t)
t.update(default)
if fontdict is not None: t.update(fontdict)
t.update(kwargs)
self.texts.append(t)
t._remove_method = lambda h: self.texts.remove(h)
#if t.get_clip_on(): t.set_clip_box(self.bbox)
if 'clip_on' in kwargs: t.set_clip_box(self.bbox)
return t
@docstring.dedent_interpd
def annotate(self, *args, **kwargs):
"""
Create an annotation: a piece of text referring to a data
point.
Call signature::
annotate(s, xy, xytext=None, xycoords='data',
textcoords='data', arrowprops=None, **kwargs)
Keyword arguments:
%(Annotation)s
.. plot:: mpl_examples/pylab_examples/annotation_demo2.py
"""
a = mtext.Annotation(*args, **kwargs)
a.set_transform(mtransforms.IdentityTransform())
self._set_artist_props(a)
if kwargs.has_key('clip_on'): a.set_clip_path(self.patch)
self.texts.append(a)
a._remove_method = lambda h: self.texts.remove(h)
return a
#### Lines and spans
@docstring.dedent_interpd
def axhline(self, y=0, xmin=0, xmax=1, **kwargs):
"""
Add a horizontal line across the axis.
Call signature::
axhline(y=0, xmin=0, xmax=1, **kwargs)
Draw a horizontal line at *y* from *xmin* to *xmax*. With the
default values of *xmin* = 0 and *xmax* = 1, this line will
always span the horizontal extent of the axes, regardless of
the xlim settings, even if you change them, eg. with the
:meth:`set_xlim` command. That is, the horizontal extent is
in axes coords: 0=left, 0.5=middle, 1.0=right but the *y*
location is in data coordinates.
Return value is the :class:`~matplotlib.lines.Line2D`
instance. kwargs are the same as kwargs to plot, and can be
used to control the line properties. Eg.,
* draw a thick red hline at *y* = 0 that spans the xrange::
>>> axhline(linewidth=4, color='r')
* draw a default hline at *y* = 1 that spans the xrange::
>>> axhline(y=1)
* draw a default hline at *y* = .5 that spans the the middle half of
the xrange::
>>> axhline(y=.5, xmin=0.25, xmax=0.75)
Valid kwargs are :class:`~matplotlib.lines.Line2D` properties,
with the exception of 'transform':
%(Line2D)s
.. seealso::
:meth:`axhspan`
for example plot and source code
"""
if "transform" in kwargs:
raise ValueError(
"'transform' is not allowed as a kwarg;"
+ "axhline generates its own transform.")
ymin, ymax = self.get_ybound()
# We need to strip away the units for comparison with
# non-unitized bounds
self._process_unit_info( ydata=y, kwargs=kwargs )
yy = self.convert_yunits( y )
scaley = (yy<ymin) or (yy>ymax)
trans = mtransforms.blended_transform_factory(
self.transAxes, self.transData)
l = mlines.Line2D([xmin,xmax], [y,y], transform=trans, **kwargs)
self.add_line(l)
self.autoscale_view(scalex=False, scaley=scaley)
return l
@docstring.dedent_interpd
def axvline(self, x=0, ymin=0, ymax=1, **kwargs):
"""
Add a vertical line across the axes.
Call signature::
axvline(x=0, ymin=0, ymax=1, **kwargs)
Draw a vertical line at *x* from *ymin* to *ymax*. With the
default values of *ymin* = 0 and *ymax* = 1, this line will
always span the vertical extent of the axes, regardless of the
ylim settings, even if you change them, eg. with the
:meth:`set_ylim` command. That is, the vertical extent is in
axes coords: 0=bottom, 0.5=middle, 1.0=top but the *x* location
is in data coordinates.
Return value is the :class:`~matplotlib.lines.Line2D`
instance. kwargs are the same as kwargs to plot, and can be
used to control the line properties. Eg.,
* draw a thick red vline at *x* = 0 that spans the yrange::
>>> axvline(linewidth=4, color='r')
* draw a default vline at *x* = 1 that spans the yrange::
>>> axvline(x=1)
* draw a default vline at *x* = .5 that spans the the middle half of
the yrange::
>>> axvline(x=.5, ymin=0.25, ymax=0.75)
Valid kwargs are :class:`~matplotlib.lines.Line2D` properties,
with the exception of 'transform':
%(Line2D)s
.. seealso::
:meth:`axhspan`
for example plot and source code
"""
if "transform" in kwargs:
raise ValueError(
"'transform' is not allowed as a kwarg;"
+ "axvline generates its own transform.")
xmin, xmax = self.get_xbound()
# We need to strip away the units for comparison with
# non-unitized bounds
self._process_unit_info( xdata=x, kwargs=kwargs )
xx = self.convert_xunits( x )
scalex = (xx<xmin) or (xx>xmax)
trans = mtransforms.blended_transform_factory(
self.transData, self.transAxes)
l = mlines.Line2D([x,x], [ymin,ymax] , transform=trans, **kwargs)
self.add_line(l)
self.autoscale_view(scalex=scalex, scaley=False)
return l
@docstring.dedent_interpd
def axhspan(self, ymin, ymax, xmin=0, xmax=1, **kwargs):
"""
Add a horizontal span (rectangle) across the axis.
Call signature::
axhspan(ymin, ymax, xmin=0, xmax=1, **kwargs)
*y* coords are in data units and *x* coords are in axes (relative
0-1) units.
Draw a horizontal span (rectangle) from *ymin* to *ymax*.
With the default values of *xmin* = 0 and *xmax* = 1, this
always spans the xrange, regardless of the xlim settings, even
if you change them, eg. with the :meth:`set_xlim` command.
That is, the horizontal extent is in axes coords: 0=left,
0.5=middle, 1.0=right but the *y* location is in data
coordinates.
Return value is a :class:`matplotlib.patches.Polygon`
instance.
Examples:
* draw a gray rectangle from *y* = 0.25-0.75 that spans the
horizontal extent of the axes::
>>> axhspan(0.25, 0.75, facecolor='0.5', alpha=0.5)
Valid kwargs are :class:`~matplotlib.patches.Polygon` properties:
%(Polygon)s
**Example:**
.. plot:: mpl_examples/pylab_examples/axhspan_demo.py
"""
trans = mtransforms.blended_transform_factory(
self.transAxes, self.transData)
# process the unit information
self._process_unit_info( [xmin, xmax], [ymin, ymax], kwargs=kwargs )
# first we need to strip away the units
xmin, xmax = self.convert_xunits( [xmin, xmax] )
ymin, ymax = self.convert_yunits( [ymin, ymax] )
verts = (xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin)
p = mpatches.Polygon(verts, **kwargs)
p.set_transform(trans)
self.add_patch(p)
self.autoscale_view(scalex=False)
return p
@docstring.dedent_interpd
def axvspan(self, xmin, xmax, ymin=0, ymax=1, **kwargs):
"""
Add a vertical span (rectangle) across the axes.
Call signature::
axvspan(xmin, xmax, ymin=0, ymax=1, **kwargs)
*x* coords are in data units and *y* coords are in axes (relative
0-1) units.
Draw a vertical span (rectangle) from *xmin* to *xmax*. With
the default values of *ymin* = 0 and *ymax* = 1, this always
spans the yrange, regardless of the ylim settings, even if you
change them, eg. with the :meth:`set_ylim` command. That is,
the vertical extent is in axes coords: 0=bottom, 0.5=middle,
1.0=top but the *y* location is in data coordinates.
Return value is the :class:`matplotlib.patches.Polygon`
instance.
Examples:
* draw a vertical green translucent rectangle from x=1.25 to 1.55 that
spans the yrange of the axes::
>>> axvspan(1.25, 1.55, facecolor='g', alpha=0.5)
Valid kwargs are :class:`~matplotlib.patches.Polygon`
properties:
%(Polygon)s
.. seealso::
:meth:`axhspan`
for example plot and source code
"""
trans = mtransforms.blended_transform_factory(
self.transData, self.transAxes)
# process the unit information
self._process_unit_info( [xmin, xmax], [ymin, ymax], kwargs=kwargs )
# first we need to strip away the units
xmin, xmax = self.convert_xunits( [xmin, xmax] )
ymin, ymax = self.convert_yunits( [ymin, ymax] )
verts = [(xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin)]
p = mpatches.Polygon(verts, **kwargs)
p.set_transform(trans)
self.add_patch(p)
self.autoscale_view(scaley=False)
return p
@docstring.dedent
def hlines(self, y, xmin, xmax, colors='k', linestyles='solid',
label='', **kwargs):
"""
Plot horizontal lines.
call signature::
hlines(y, xmin, xmax, colors='k', linestyles='solid', **kwargs)
Plot horizontal lines at each *y* from *xmin* to *xmax*.
Returns the :class:`~matplotlib.collections.LineCollection`
that was added.
Required arguments:
*y*:
a 1-D numpy array or iterable.
*xmin* and *xmax*:
can be scalars or ``len(x)`` numpy arrays. If they are
scalars, then the respective values are constant, else the
widths of the lines are determined by *xmin* and *xmax*.
Optional keyword arguments:
*colors*:
a line collections color argument, either a single color
or a ``len(y)`` list of colors
*linestyles*:
[ 'solid' | 'dashed' | 'dashdot' | 'dotted' ]
**Example:**
.. plot:: mpl_examples/pylab_examples/hline_demo.py
"""
if kwargs.get('fmt') is not None:
raise mplDeprecation('hlines now uses a '
'collections.LineCollection and not a '
'list of Line2D to draw; see API_CHANGES')
# We do the conversion first since not all unitized data is uniform
# process the unit information
self._process_unit_info( [xmin, xmax], y, kwargs=kwargs )
y = self.convert_yunits( y )
xmin = self.convert_xunits(xmin)
xmax = self.convert_xunits(xmax)
if not iterable(y): y = [y]
if not iterable(xmin): xmin = [xmin]
if not iterable(xmax): xmax = [xmax]
y = np.asarray(y)
xmin = np.asarray(xmin)
xmax = np.asarray(xmax)
if len(xmin)==1:
xmin = np.resize( xmin, y.shape )
if len(xmax)==1:
xmax = np.resize( xmax, y.shape )
if len(xmin)!=len(y):
raise ValueError('xmin and y are unequal sized sequences')
if len(xmax)!=len(y):
raise ValueError('xmax and y are unequal sized sequences')
verts = [ ((thisxmin, thisy), (thisxmax, thisy))
for thisxmin, thisxmax, thisy in zip(xmin, xmax, y)]
coll = mcoll.LineCollection(verts, colors=colors,
linestyles=linestyles, label=label)
self.add_collection(coll)
coll.update(kwargs)
if len(y) > 0:
minx = min(xmin.min(), xmax.min())
maxx = max(xmin.max(), xmax.max())
miny = y.min()
maxy = y.max()
corners = (minx, miny), (maxx, maxy)
self.update_datalim(corners)
self.autoscale_view()
return coll
@docstring.dedent_interpd
def vlines(self, x, ymin, ymax, colors='k', linestyles='solid',
label='', **kwargs):
"""
Plot vertical lines.
Call signature::
vlines(x, ymin, ymax, color='k', linestyles='solid')
Plot vertical lines at each *x* from *ymin* to *ymax*. *ymin*
or *ymax* can be scalars or len(*x*) numpy arrays. If they are
scalars, then the respective values are constant, else the
heights of the lines are determined by *ymin* and *ymax*.
*colors* :
A line collection's color args, either a single color
or a ``len(x)`` list of colors
*linestyles* : [ 'solid' | 'dashed' | 'dashdot' | 'dotted' ]
Returns the :class:`matplotlib.collections.LineCollection`
that was added.
kwargs are :class:`~matplotlib.collections.LineCollection` properties:
%(LineCollection)s
"""
if kwargs.get('fmt') is not None:
raise mplDeprecation('vlines now uses a '
'collections.LineCollection and not a '
'list of Line2D to draw; see API_CHANGES')
self._process_unit_info(xdata=x, ydata=[ymin, ymax], kwargs=kwargs)
# We do the conversion first since not all unitized data is uniform
x = self.convert_xunits( x )
ymin = self.convert_yunits( ymin )
ymax = self.convert_yunits( ymax )
if not iterable(x): x = [x]
if not iterable(ymin): ymin = [ymin]
if not iterable(ymax): ymax = [ymax]
x = np.asarray(x)
ymin = np.asarray(ymin)
ymax = np.asarray(ymax)
if len(ymin)==1:
ymin = np.resize( ymin, x.shape )
if len(ymax)==1:
ymax = np.resize( ymax, x.shape )
if len(ymin)!=len(x):
raise ValueError('ymin and x are unequal sized sequences')
if len(ymax)!=len(x):
raise ValueError('ymax and x are unequal sized sequences')
Y = np.array([ymin, ymax]).T
verts = [ ((thisx, thisymin), (thisx, thisymax))
for thisx, (thisymin, thisymax) in zip(x,Y)]
#print 'creating line collection'
coll = mcoll.LineCollection(verts, colors=colors,
linestyles=linestyles, label=label)
self.add_collection(coll)
coll.update(kwargs)
if len(x) > 0:
minx = min( x )
maxx = max( x )
miny = min( min(ymin), min(ymax) )
maxy = max( max(ymin), max(ymax) )
corners = (minx, miny), (maxx, maxy)
self.update_datalim(corners)
self.autoscale_view()
return coll
#### Basic plotting
@docstring.dedent_interpd
def plot(self, *args, **kwargs):
"""
Plot lines and/or markers to the
:class:`~matplotlib.axes.Axes`. *args* is a variable length
argument, allowing for multiple *x*, *y* pairs with an
optional format string. For example, each of the following is
legal::
plot(x, y) # plot x and y using default line style and color
plot(x, y, 'bo') # plot x and y using blue circle markers
plot(y) # plot y using x as index array 0..N-1
plot(y, 'r+') # ditto, but with red plusses
If *x* and/or *y* is 2-dimensional, then the corresponding columns
will be plotted.
An arbitrary number of *x*, *y*, *fmt* groups can be
specified, as in::
a.plot(x1, y1, 'g^', x2, y2, 'g-')
Return value is a list of lines that were added.
By default, each line is assigned a different color specified by a
'color cycle'. To change this behavior, you can edit the
axes.color_cycle rcParam. Alternatively, you can use
:meth:`~matplotlib.axes.Axes.set_default_color_cycle`.
The following format string characters are accepted to control
the line style or marker:
================ ===============================
character description
================ ===============================
``'-'`` solid line style
``'--'`` dashed line style
``'-.'`` dash-dot line style
``':'`` dotted line style
``'.'`` point marker
``','`` pixel marker
``'o'`` circle marker
``'v'`` triangle_down marker
``'^'`` triangle_up marker
``'<'`` triangle_left marker
``'>'`` triangle_right marker
``'1'`` tri_down marker
``'2'`` tri_up marker
``'3'`` tri_left marker
``'4'`` tri_right marker
``'s'`` square marker
``'p'`` pentagon marker
``'*'`` star marker
``'h'`` hexagon1 marker
``'H'`` hexagon2 marker
``'+'`` plus marker
``'x'`` x marker
``'D'`` diamond marker
``'d'`` thin_diamond marker
``'|'`` vline marker
``'_'`` hline marker
================ ===============================
The following color abbreviations are supported:
========== ========
character color
========== ========
'b' blue
'g' green
'r' red
'c' cyan
'm' magenta
'y' yellow
'k' black
'w' white
========== ========
In addition, you can specify colors in many weird and
wonderful ways, including full names (``'green'``), hex
strings (``'#008000'``), RGB or RGBA tuples (``(0,1,0,1)``) or
grayscale intensities as a string (``'0.8'``). Of these, the
string specifications can be used in place of a ``fmt`` group,
but the tuple forms can be used only as ``kwargs``.
Line styles and colors are combined in a single format string, as in
``'bo'`` for blue circles.
The *kwargs* can be used to set line properties (any property that has
a ``set_*`` method). You can use this to set a line label (for auto
legends), linewidth, anitialising, marker face color, etc. Here is an
example::
plot([1,2,3], [1,2,3], 'go-', label='line 1', linewidth=2)
plot([1,2,3], [1,4,9], 'rs', label='line 2')
axis([0, 4, 0, 10])
legend()
If you make multiple lines with one plot command, the kwargs
apply to all those lines, e.g.::
plot(x1, y1, x2, y2, antialised=False)
Neither line will be antialiased.
You do not need to use format strings, which are just
abbreviations. All of the line properties can be controlled
by keyword arguments. For example, you can set the color,
marker, linestyle, and markercolor with::
plot(x, y, color='green', linestyle='dashed', marker='o',
markerfacecolor='blue', markersize=12).
See :class:`~matplotlib.lines.Line2D` for details.
The kwargs are :class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
kwargs *scalex* and *scaley*, if defined, are passed on to
:meth:`~matplotlib.axes.Axes.autoscale_view` to determine
whether the *x* and *y* axes are autoscaled; the default is
*True*.
"""
scalex = kwargs.pop( 'scalex', True)
scaley = kwargs.pop( 'scaley', True)
if not self._hold: self.cla()
lines = []
for line in self._get_lines(*args, **kwargs):
self.add_line(line)
lines.append(line)
self.autoscale_view(scalex=scalex, scaley=scaley)
return lines
@docstring.dedent_interpd
def plot_date(self, x, y, fmt='bo', tz=None, xdate=True, ydate=False,
**kwargs):
"""
Plot with data with dates.
Call signature::
plot_date(x, y, fmt='bo', tz=None, xdate=True, ydate=False, **kwargs)
Similar to the :func:`~matplotlib.pyplot.plot` command, except
the *x* or *y* (or both) data is considered to be dates, and the
axis is labeled accordingly.
*x* and/or *y* can be a sequence of dates represented as float
days since 0001-01-01 UTC.
Keyword arguments:
*fmt*: string
The plot format string.
*tz*: [ *None* | timezone string | :class:`tzinfo` instance]
The time zone to use in labeling dates. If *None*, defaults to rc
value.
*xdate*: [ *True* | *False* ]
If *True*, the *x*-axis will be labeled with dates.
*ydate*: [ *False* | *True* ]
If *True*, the *y*-axis will be labeled with dates.
Note if you are using custom date tickers and formatters, it
may be necessary to set the formatters/locators after the call
to :meth:`plot_date` since :meth:`plot_date` will set the
default tick locator to
:class:`matplotlib.dates.AutoDateLocator` (if the tick
locator is not already set to a
:class:`matplotlib.dates.DateLocator` instance) and the
default tick formatter to
:class:`matplotlib.dates.AutoDateFormatter` (if the tick
formatter is not already set to a
:class:`matplotlib.dates.DateFormatter` instance).
Valid kwargs are :class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
.. seealso::
:mod:`~matplotlib.dates` for helper functions
:func:`~matplotlib.dates.date2num`,
:func:`~matplotlib.dates.num2date` and
:func:`~matplotlib.dates.drange` for help on creating the required
floating point dates.
"""
if not self._hold: self.cla()
ret = self.plot(x, y, fmt, **kwargs)
if xdate:
self.xaxis_date(tz)
if ydate:
self.yaxis_date(tz)
self.autoscale_view()
return ret
@docstring.dedent_interpd
def loglog(self, *args, **kwargs):
"""
Make a plot with log scaling on both the *x* and *y* axis.
Call signature::
loglog(*args, **kwargs)
:func:`~matplotlib.pyplot.loglog` supports all the keyword
arguments of :func:`~matplotlib.pyplot.plot` and
:meth:`matplotlib.axes.Axes.set_xscale` /
:meth:`matplotlib.axes.Axes.set_yscale`.
Notable keyword arguments:
*basex*/*basey*: scalar > 1
Base of the *x*/*y* logarithm
*subsx*/*subsy*: [ *None* | sequence ]
The location of the minor *x*/*y* ticks; *None* defaults
to autosubs, which depend on the number of decades in the
plot; see :meth:`matplotlib.axes.Axes.set_xscale` /
:meth:`matplotlib.axes.Axes.set_yscale` for details
*nonposx*/*nonposy*: ['mask' | 'clip' ]
Non-positive values in *x* or *y* can be masked as
invalid, or clipped to a very small positive number
The remaining valid kwargs are
:class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
**Example:**
.. plot:: mpl_examples/pylab_examples/log_demo.py
"""
if not self._hold: self.cla()
dx = {'basex': kwargs.pop('basex', 10),
'subsx': kwargs.pop('subsx', None),
'nonposx': kwargs.pop('nonposx', 'mask'),
}
dy = {'basey': kwargs.pop('basey', 10),
'subsy': kwargs.pop('subsy', None),
'nonposy': kwargs.pop('nonposy', 'mask'),
}
self.set_xscale('log', **dx)
self.set_yscale('log', **dy)
b = self._hold
self._hold = True # we've already processed the hold
l = self.plot(*args, **kwargs)
self._hold = b # restore the hold
return l
@docstring.dedent_interpd
def semilogx(self, *args, **kwargs):
"""
Make a plot with log scaling on the *x* axis.
Call signature::
semilogx(*args, **kwargs)
:func:`semilogx` supports all the keyword arguments of
:func:`~matplotlib.pyplot.plot` and
:meth:`matplotlib.axes.Axes.set_xscale`.
Notable keyword arguments:
*basex*: scalar > 1
Base of the *x* logarithm
*subsx*: [ *None* | sequence ]
The location of the minor xticks; *None* defaults to
autosubs, which depend on the number of decades in the
plot; see :meth:`~matplotlib.axes.Axes.set_xscale` for
details.
*nonposx*: [ 'mask' | 'clip' ]
Non-positive values in *x* can be masked as
invalid, or clipped to a very small positive number
The remaining valid kwargs are
:class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
.. seealso::
:meth:`loglog`
For example code and figure
"""
if not self._hold: self.cla()
d = {'basex': kwargs.pop( 'basex', 10),
'subsx': kwargs.pop( 'subsx', None),
'nonposx': kwargs.pop('nonposx', 'mask'),
}
self.set_xscale('log', **d)
b = self._hold
self._hold = True # we've already processed the hold
l = self.plot(*args, **kwargs)
self._hold = b # restore the hold
return l
@docstring.dedent_interpd
def semilogy(self, *args, **kwargs):
"""
Make a plot with log scaling on the *y* axis.
call signature::
semilogy(*args, **kwargs)
:func:`semilogy` supports all the keyword arguments of
:func:`~matplotlib.pylab.plot` and
:meth:`matplotlib.axes.Axes.set_yscale`.
Notable keyword arguments:
*basey*: scalar > 1
Base of the *y* logarithm
*subsy*: [ *None* | sequence ]
The location of the minor yticks; *None* defaults to
autosubs, which depend on the number of decades in the
plot; see :meth:`~matplotlib.axes.Axes.set_yscale` for
details.
*nonposy*: [ 'mask' | 'clip' ]
Non-positive values in *y* can be masked as
invalid, or clipped to a very small positive number
The remaining valid kwargs are
:class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
.. seealso::
:meth:`loglog`
For example code and figure
"""
if not self._hold: self.cla()
d = {'basey': kwargs.pop('basey', 10),
'subsy': kwargs.pop('subsy', None),
'nonposy': kwargs.pop('nonposy', 'mask'),
}
self.set_yscale('log', **d)
b = self._hold
self._hold = True # we've already processed the hold
l = self.plot(*args, **kwargs)
self._hold = b # restore the hold
return l
@docstring.dedent_interpd
def acorr(self, x, **kwargs):
"""
Plot the autocorrelation of *x*.
Call signature::
acorr(x, normed=True, detrend=mlab.detrend_none, usevlines=True,
maxlags=10, **kwargs)
If *normed* = *True*, normalize the data by the autocorrelation at
0-th lag. *x* is detrended by the *detrend* callable (default no
normalization).
Data are plotted as ``plot(lags, c, **kwargs)``
Return value is a tuple (*lags*, *c*, *line*) where:
- *lags* are a length 2*maxlags+1 lag vector
- *c* is the 2*maxlags+1 auto correlation vector
- *line* is a :class:`~matplotlib.lines.Line2D` instance
returned by :meth:`plot`
The default *linestyle* is None and the default *marker* is
``'o'``, though these can be overridden with keyword args.
The cross correlation is performed with
:func:`numpy.correlate` with *mode* = 2.
If *usevlines* is *True*, :meth:`~matplotlib.axes.Axes.vlines`
rather than :meth:`~matplotlib.axes.Axes.plot` is used to draw
vertical lines from the origin to the acorr. Otherwise, the
plot style is determined by the kwargs, which are
:class:`~matplotlib.lines.Line2D` properties.
*maxlags* is a positive integer detailing the number of lags
to show. The default value of *None* will return all
``(2*len(x)-1)`` lags.
The return value is a tuple (*lags*, *c*, *linecol*, *b*)
where
- *linecol* is the
:class:`~matplotlib.collections.LineCollection`
- *b* is the *x*-axis.
.. seealso::
:meth:`~matplotlib.axes.Axes.plot` or
:meth:`~matplotlib.axes.Axes.vlines`
For documentation on valid kwargs.
**Example:**
:func:`~matplotlib.pyplot.xcorr` is top graph, and
:func:`~matplotlib.pyplot.acorr` is bottom graph.
.. plot:: mpl_examples/pylab_examples/xcorr_demo.py
"""
return self.xcorr(x, x, **kwargs)
@docstring.dedent_interpd
def xcorr(self, x, y, normed=True, detrend=mlab.detrend_none,
usevlines=True, maxlags=10, **kwargs):
"""
Plot the cross correlation between *x* and *y*.
Call signature::
xcorr(self, x, y, normed=True, detrend=mlab.detrend_none,
usevlines=True, maxlags=10, **kwargs)
If *normed* = *True*, normalize the data by the cross
correlation at 0-th lag. *x* and y are detrended by the
*detrend* callable (default no normalization). *x* and *y*
must be equal length.
Data are plotted as ``plot(lags, c, **kwargs)``
Return value is a tuple (*lags*, *c*, *line*) where:
- *lags* are a length ``2*maxlags+1`` lag vector
- *c* is the ``2*maxlags+1`` auto correlation vector
- *line* is a :class:`~matplotlib.lines.Line2D` instance
returned by :func:`~matplotlib.pyplot.plot`.
The default *linestyle* is *None* and the default *marker* is
'o', though these can be overridden with keyword args. The
cross correlation is performed with :func:`numpy.correlate`
with *mode* = 2.
If *usevlines* is *True*:
:func:`~matplotlib.pyplot.vlines`
rather than :func:`~matplotlib.pyplot.plot` is used to draw
vertical lines from the origin to the xcorr. Otherwise the
plotstyle is determined by the kwargs, which are
:class:`~matplotlib.lines.Line2D` properties.
The return value is a tuple (*lags*, *c*, *linecol*, *b*)
where *linecol* is the
:class:`matplotlib.collections.LineCollection` instance and
*b* is the *x*-axis.
*maxlags* is a positive integer detailing the number of lags to show.
The default value of *None* will return all ``(2*len(x)-1)`` lags.
**Example:**
:func:`~matplotlib.pyplot.xcorr` is top graph, and
:func:`~matplotlib.pyplot.acorr` is bottom graph.
.. plot:: mpl_examples/pylab_examples/xcorr_demo.py
"""
Nx = len(x)
if Nx!=len(y):
raise ValueError('x and y must be equal length')
x = detrend(np.asarray(x))
y = detrend(np.asarray(y))
c = np.correlate(x, y, mode=2)
if normed: c/= np.sqrt(np.dot(x,x) * np.dot(y,y))
if maxlags is None: maxlags = Nx - 1
if maxlags >= Nx or maxlags < 1:
raise ValueError('maglags must be None or strictly '
'positive < %d'%Nx)
lags = np.arange(-maxlags,maxlags+1)
c = c[Nx-1-maxlags:Nx+maxlags]
if usevlines:
a = self.vlines(lags, [0], c, **kwargs)
b = self.axhline(**kwargs)
else:
kwargs.setdefault('marker', 'o')
kwargs.setdefault('linestyle', 'None')
a, = self.plot(lags, c, **kwargs)
b = None
return lags, c, a, b
def _get_legend_handles(self, legend_handler_map=None):
"return artists that will be used as handles for legend"
handles_original = self.lines + self.patches + \
self.collections + self.containers
# collections
handler_map = mlegend.Legend.get_default_handler_map()
if legend_handler_map is not None:
handler_map = handler_map.copy()
handler_map.update(legend_handler_map)
handles = []
for h in handles_original:
if h.get_label() == "_nolegend_": #.startswith('_'):
continue
if mlegend.Legend.get_legend_handler(handler_map, h):
handles.append(h)
return handles
def get_legend_handles_labels(self, legend_handler_map=None):
"""
Return handles and labels for legend
``ax.legend()`` is equivalent to ::
h, l = ax.get_legend_handles_labels()
ax.legend(h, l)
"""
handles = []
labels = []
for handle in self._get_legend_handles(legend_handler_map):
label = handle.get_label()
#if (label is not None and label != '' and not label.startswith('_')):
if label and not label.startswith('_'):
handles.append(handle)
labels.append(label)
return handles, labels
def legend(self, *args, **kwargs):
"""
Place a legend on the current axes.
Call signature::
legend(*args, **kwargs)
Places legend at location *loc*. Labels are a sequence of
strings and *loc* can be a string or an integer specifying the
legend location.
To make a legend with existing lines::
legend()
:meth:`legend` by itself will try and build a legend using the label
property of the lines/patches/collections. You can set the label of
a line by doing::
plot(x, y, label='my data')
or::
line.set_label('my data').
If label is set to '_nolegend_', the item will not be shown in
legend.
To automatically generate the legend from labels::
legend( ('label1', 'label2', 'label3') )
To make a legend for a list of lines and labels::
legend( (line1, line2, line3), ('label1', 'label2', 'label3') )
To make a legend at a given location, using a location argument::
legend( ('label1', 'label2', 'label3'), loc='upper left')
or::
legend( (line1, line2, line3), ('label1', 'label2', 'label3'), loc=2)
The location codes are
=============== =============
Location String Location Code
=============== =============
'best' 0
'upper right' 1
'upper left' 2
'lower left' 3
'lower right' 4
'right' 5
'center left' 6
'center right' 7
'lower center' 8
'upper center' 9
'center' 10
=============== =============
Users can specify any arbitrary location for the legend using the
*bbox_to_anchor* keyword argument. bbox_to_anchor can be an instance
of BboxBase(or its derivatives) or a tuple of 2 or 4 floats.
For example,
loc = 'upper right', bbox_to_anchor = (0.5, 0.5)
will place the legend so that the upper right corner of the legend at
the center of the axes.
The legend location can be specified in other coordinate, by using the
*bbox_transform* keyword.
The loc itslef can be a 2-tuple giving x,y of the lower-left corner of
the legend in axes coords (*bbox_to_anchor* is ignored).
Keyword arguments:
*prop*: [ *None* | FontProperties | dict ]
A :class:`matplotlib.font_manager.FontProperties`
instance. If *prop* is a dictionary, a new instance will be
created with *prop*. If *None*, use rc settings.
*fontsize*: [ size in points | 'xx-small' | 'x-small' | 'small' | 'medium' | 'large' | 'x-large' | 'xx-large' ]
Set the font size. May be either a size string, relative to
the default font size, or an absolute font size in points. This
argument is only used if prop is not specified.
*numpoints*: integer
The number of points in the legend for line
*scatterpoints*: integer
The number of points in the legend for scatter plot
*scatteroffsets*: list of floats
a list of yoffsets for scatter symbols in legend
*markerscale*: [ *None* | scalar ]
The relative size of legend markers vs. original. If *None*,
use rc settings.
*frameon*: [ *True* | *False* ]
if *True*, draw a frame around the legend.
The default is set by the rcParam 'legend.frameon'
*fancybox*: [ *None* | *False* | *True* ]
if *True*, draw a frame with a round fancybox. If *None*,
use rc settings
*shadow*: [ *None* | *False* | *True* ]
If *True*, draw a shadow behind legend. If *None*,
use rc settings.
*ncol* : integer
number of columns. default is 1
*mode* : [ "expand" | *None* ]
if mode is "expand", the legend will be horizontally expanded
to fill the axes area (or *bbox_to_anchor*)
*bbox_to_anchor* : an instance of BboxBase or a tuple of 2 or 4 floats
the bbox that the legend will be anchored.
*bbox_transform* : [ an instance of Transform | *None* ]
the transform for the bbox. transAxes if *None*.
*title* : string
the legend title
Padding and spacing between various elements use following
keywords parameters. These values are measure in font-size
units. E.g., a fontsize of 10 points and a handlelength=5
implies a handlelength of 50 points. Values from rcParams
will be used if None.
================ ==================================================================
Keyword Description
================ ==================================================================
borderpad the fractional whitespace inside the legend border
labelspacing the vertical space between the legend entries
handlelength the length of the legend handles
handletextpad the pad between the legend handle and text
borderaxespad the pad between the axes and legend border
columnspacing the spacing between columns
================ ==================================================================
.. Note:: Not all kinds of artist are supported by the legend command.
See LINK (FIXME) for details.
**Example:**
.. plot:: mpl_examples/api/legend_demo.py
.. seealso::
:ref:`plotting-guide-legend`.
"""
if len(args)==0:
handles, labels = self.get_legend_handles_labels()
if len(handles) == 0:
warnings.warn("No labeled objects found. "
"Use label='...' kwarg on individual plots.")
return None
elif len(args)==1:
# LABELS
labels = args[0]
handles = [h for h, label in zip(self._get_legend_handles(),
labels)]
elif len(args)==2:
if is_string_like(args[1]) or isinstance(args[1], int):
# LABELS, LOC
labels, loc = args
handles = [h for h, label in zip(self._get_legend_handles(),
labels)]
kwargs['loc'] = loc
else:
# LINES, LABELS
handles, labels = args
elif len(args)==3:
# LINES, LABELS, LOC
handles, labels, loc = args
kwargs['loc'] = loc
else:
raise TypeError('Invalid arguments to legend')
# Why do we need to call "flatten" here? -JJL
# handles = cbook.flatten(handles)
self.legend_ = mlegend.Legend(self, handles, labels, **kwargs)
return self.legend_
#### Specialized plotting
def step(self, x, y, *args, **kwargs):
"""
Make a step plot.
Call signature::
step(x, y, *args, **kwargs)
Additional keyword args to :func:`step` are the same as those
for :func:`~matplotlib.pyplot.plot`.
*x* and *y* must be 1-D sequences, and it is assumed, but not checked,
that *x* is uniformly increasing.
Keyword arguments:
*where*: [ 'pre' | 'post' | 'mid' ]
If 'pre', the interval from x[i] to x[i+1] has level y[i+1]
If 'post', that interval has level y[i]
If 'mid', the jumps in *y* occur half-way between the
*x*-values.
"""
where = kwargs.pop('where', 'pre')
if where not in ('pre', 'post', 'mid'):
raise ValueError("'where' argument to step must be "
"'pre', 'post' or 'mid'")
kwargs['linestyle'] = 'steps-' + where
return self.plot(x, y, *args, **kwargs)
@docstring.dedent_interpd
def bar(self, left, height, width=0.8, bottom=None, **kwargs):
"""
Make a bar plot.
Call signature::
bar(left, height, width=0.8, bottom=0, **kwargs)
Make a bar plot with rectangles bounded by:
*left*, *left* + *width*, *bottom*, *bottom* + *height*
(left, right, bottom and top edges)
*left*, *height*, *width*, and *bottom* can be either scalars
or sequences
Return value is a list of
:class:`matplotlib.patches.Rectangle` instances.
Required arguments:
======== ===============================================
Argument Description
======== ===============================================
*left* the x coordinates of the left sides of the bars
*height* the heights of the bars
======== ===============================================
Optional keyword arguments:
=============== ==========================================
Keyword Description
=============== ==========================================
*width* the widths of the bars
*bottom* the y coordinates of the bottom edges of
the bars
*color* the colors of the bars
*edgecolor* the colors of the bar edges
*linewidth* width of bar edges; None means use default
linewidth; 0 means don't draw edges.
*xerr* if not None, will be used to generate
errorbars on the bar chart
*yerr* if not None, will be used to generate
errorbars on the bar chart
*ecolor* specifies the color of any errorbar
*capsize* (default 3) determines the length in
points of the error bar caps
*error_kw* dictionary of kwargs to be passed to
errorbar method. *ecolor* and *capsize*
may be specified here rather than as
independent kwargs.
*align* 'edge' (default) | 'center'
*orientation* 'vertical' | 'horizontal'
*log* [False|True] False (default) leaves the
orientation axis as-is; True sets it to
log scale
=============== ==========================================
For vertical bars, *align* = 'edge' aligns bars by their left
edges in left, while *align* = 'center' interprets these
values as the *x* coordinates of the bar centers. For
horizontal bars, *align* = 'edge' aligns bars by their bottom
edges in bottom, while *align* = 'center' interprets these
values as the *y* coordinates of the bar centers.
The optional arguments *color*, *edgecolor*, *linewidth*,
*xerr*, and *yerr* can be either scalars or sequences of
length equal to the number of bars. This enables you to use
bar as the basis for stacked bar charts, or candlestick plots.
Detail: *xerr* and *yerr* are passed directly to
:meth:`errorbar`, so they can also have shape 2xN for
independent specification of lower and upper errors.
Other optional kwargs:
%(Rectangle)s
**Example:** A stacked bar chart.
.. plot:: mpl_examples/pylab_examples/bar_stacked.py
"""
if not self._hold: self.cla()
color = kwargs.pop('color', None)
edgecolor = kwargs.pop('edgecolor', None)
linewidth = kwargs.pop('linewidth', None)
# Because xerr and yerr will be passed to errorbar,
# most dimension checking and processing will be left
# to the errorbar method.
xerr = kwargs.pop('xerr', None)
yerr = kwargs.pop('yerr', None)
error_kw = kwargs.pop('error_kw', dict())
ecolor = kwargs.pop('ecolor', None)
capsize = kwargs.pop('capsize', 3)
error_kw.setdefault('ecolor', ecolor)
error_kw.setdefault('capsize', capsize)
align = kwargs.pop('align', 'edge')
orientation = kwargs.pop('orientation', 'vertical')
log = kwargs.pop('log', False)
label = kwargs.pop('label', '')
def make_iterable(x):
if not iterable(x):
return [x]
else:
return x
# make them safe to take len() of
_left = left
left = make_iterable(left)
height = make_iterable(height)
width = make_iterable(width)
_bottom = bottom
bottom = make_iterable(bottom)
linewidth = make_iterable(linewidth)
adjust_ylim = False
adjust_xlim = False
if orientation == 'vertical':
self._process_unit_info(xdata=left, ydata=height, kwargs=kwargs)
if log:
self.set_yscale('log', nonposy='clip')
# size width and bottom according to length of left
if _bottom is None:
if self.get_yscale() == 'log':
adjust_ylim = True
else:
bottom = [0]
nbars = len(left)
if len(width) == 1:
width *= nbars
if len(bottom) == 1:
bottom *= nbars
elif orientation == 'horizontal':
self._process_unit_info(xdata=width, ydata=bottom, kwargs=kwargs)
if log:
self.set_xscale('log', nonposx='clip')
# size left and height according to length of bottom
if _left is None:
if self.get_xscale() == 'log':
adjust_xlim = True
else:
left = [0]
nbars = len(bottom)
if len(left) == 1:
left *= nbars
if len(height) == 1:
height *= nbars
else:
raise ValueError('invalid orientation: %s' % orientation)
if len(linewidth) < nbars:
linewidth *= nbars
if color is None:
color = [None] * nbars
else:
color = list(mcolors.colorConverter.to_rgba_array(color))
if len(color) == 0: # until to_rgba_array is changed
color = [[0,0,0,0]]
if len(color) < nbars:
color *= nbars
if edgecolor is None:
edgecolor = [None] * nbars
else:
edgecolor = list(mcolors.colorConverter.to_rgba_array(edgecolor))
if len(edgecolor) == 0: # until to_rgba_array is changed
edgecolor = [[0,0,0,0]]
if len(edgecolor) < nbars:
edgecolor *= nbars
# FIXME: convert the following to proper input validation
# raising ValueError; don't use assert for this.
assert len(left)==nbars, "incompatible sizes: argument 'left' must be length %d or scalar" % nbars
assert len(height)==nbars, ("incompatible sizes: argument 'height' must be length %d or scalar" %
nbars)
assert len(width)==nbars, ("incompatible sizes: argument 'width' must be length %d or scalar" %
nbars)
assert len(bottom)==nbars, ("incompatible sizes: argument 'bottom' must be length %d or scalar" %
nbars)
patches = []
# lets do some conversions now since some types cannot be
# subtracted uniformly
if self.xaxis is not None:
left = self.convert_xunits( left )
width = self.convert_xunits( width )
if xerr is not None:
xerr = self.convert_xunits( xerr )
if self.yaxis is not None:
bottom = self.convert_yunits( bottom )
height = self.convert_yunits( height )
if yerr is not None:
yerr = self.convert_yunits( yerr )
if align == 'edge':
pass
elif align == 'center':
if orientation == 'vertical':
left = [left[i] - width[i]/2. for i in xrange(len(left))]
elif orientation == 'horizontal':
bottom = [bottom[i] - height[i]/2. for i in xrange(len(bottom))]
else:
raise ValueError('invalid alignment: %s' % align)
args = zip(left, bottom, width, height, color, edgecolor, linewidth)
for l, b, w, h, c, e, lw in args:
if h<0:
b += h
h = abs(h)
if w<0:
l += w
w = abs(w)
r = mpatches.Rectangle(
xy=(l, b), width=w, height=h,
facecolor=c,
edgecolor=e,
linewidth=lw,
label='_nolegend_'
)
r.update(kwargs)
r.get_path()._interpolation_steps = 100
#print r.get_label(), label, 'label' in kwargs
self.add_patch(r)
patches.append(r)
holdstate = self._hold
self.hold(True) # ensure hold is on before plotting errorbars
if xerr is not None or yerr is not None:
if orientation == 'vertical':
# using list comps rather than arrays to preserve unit info
x = [l+0.5*w for l, w in zip(left, width)]
y = [b+h for b,h in zip(bottom, height)]
elif orientation == 'horizontal':
# using list comps rather than arrays to preserve unit info
x = [l+w for l,w in zip(left, width)]
y = [b+0.5*h for b,h in zip(bottom, height)]
if "label" not in error_kw:
error_kw["label"] = '_nolegend_'
errorbar = self.errorbar(x, y,
yerr=yerr, xerr=xerr,
fmt=None, **error_kw)
else:
errorbar = None
self.hold(holdstate) # restore previous hold state
if adjust_xlim:
xmin, xmax = self.dataLim.intervalx
xmin = np.amin([w for w in width if w > 0])
if xerr is not None:
xmin = xmin - np.amax(xerr)
xmin = max(xmin*0.9, 1e-100)
self.dataLim.intervalx = (xmin, xmax)
if adjust_ylim:
ymin, ymax = self.dataLim.intervaly
ymin = np.amin([h for h in height if h > 0])
if yerr is not None:
ymin = ymin - np.amax(yerr)
ymin = max(ymin*0.9, 1e-100)
self.dataLim.intervaly = (ymin, ymax)
self.autoscale_view()
bar_container = BarContainer(patches, errorbar, label=label)
self.add_container(bar_container)
return bar_container
@docstring.dedent_interpd
def barh(self, bottom, width, height=0.8, left=None, **kwargs):
"""
Make a horizontal bar plot.
Call signature::
barh(bottom, width, height=0.8, left=0, **kwargs)
Make a horizontal bar plot with rectangles bounded by:
*left*, *left* + *width*, *bottom*, *bottom* + *height*
(left, right, bottom and top edges)
*bottom*, *width*, *height*, and *left* can be either scalars
or sequences
Return value is a list of
:class:`matplotlib.patches.Rectangle` instances.
Required arguments:
======== ======================================================
Argument Description
======== ======================================================
*bottom* the vertical positions of the bottom edges of the bars
*width* the lengths of the bars
======== ======================================================
Optional keyword arguments:
=============== ==========================================
Keyword Description
=============== ==========================================
*height* the heights (thicknesses) of the bars
*left* the x coordinates of the left edges of the
bars
*color* the colors of the bars
*edgecolor* the colors of the bar edges
*linewidth* width of bar edges; None means use default
linewidth; 0 means don't draw edges.
*xerr* if not None, will be used to generate
errorbars on the bar chart
*yerr* if not None, will be used to generate
errorbars on the bar chart
*ecolor* specifies the color of any errorbar
*capsize* (default 3) determines the length in
points of the error bar caps
*align* 'edge' (default) | 'center'
*log* [False|True] False (default) leaves the
horizontal axis as-is; True sets it to log
scale
=============== ==========================================
Setting *align* = 'edge' aligns bars by their bottom edges in
bottom, while *align* = 'center' interprets these values as
the *y* coordinates of the bar centers.
The optional arguments *color*, *edgecolor*, *linewidth*,
*xerr*, and *yerr* can be either scalars or sequences of
length equal to the number of bars. This enables you to use
barh as the basis for stacked bar charts, or candlestick
plots.
other optional kwargs:
%(Rectangle)s
"""
patches = self.bar(left=left, height=height, width=width, bottom=bottom,
orientation='horizontal', **kwargs)
return patches
@docstring.dedent_interpd
def broken_barh(self, xranges, yrange, **kwargs):
"""
Plot horizontal bars.
Call signature::
broken_barh(self, xranges, yrange, **kwargs)
A collection of horizontal bars spanning *yrange* with a sequence of
*xranges*.
Required arguments:
========= ==============================
Argument Description
========= ==============================
*xranges* sequence of (*xmin*, *xwidth*)
*yrange* sequence of (*ymin*, *ywidth*)
========= ==============================
kwargs are
:class:`matplotlib.collections.BrokenBarHCollection`
properties:
%(BrokenBarHCollection)s
these can either be a single argument, ie::
facecolors = 'black'
or a sequence of arguments for the various bars, ie::
facecolors = ('black', 'red', 'green')
**Example:**
.. plot:: mpl_examples/pylab_examples/broken_barh.py
"""
col = mcoll.BrokenBarHCollection(xranges, yrange, **kwargs)
self.add_collection(col, autolim=True)
self.autoscale_view()
return col
def stem(self, x, y, linefmt='b-', markerfmt='bo', basefmt='r-',
bottom=None, label=None):
"""
Create a stem plot.
Call signature::
stem(x, y, linefmt='b-', markerfmt='bo', basefmt='r-')
A stem plot plots vertical lines (using *linefmt*) at each *x*
location from the baseline to *y*, and places a marker there
using *markerfmt*. A horizontal line at 0 is is plotted using
*basefmt*.
Return value is a tuple (*markerline*, *stemlines*,
*baseline*).
.. seealso::
This `document <http://www.mathworks.com/help/techdoc/ref/stem.html>`_
for details.
**Example:**
.. plot:: mpl_examples/pylab_examples/stem_plot.py
"""
remember_hold=self._hold
if not self._hold: self.cla()
self.hold(True)
markerline, = self.plot(x, y, markerfmt, label="_nolegend_")
if bottom is None:
bottom = 0
stemlines = []
for thisx, thisy in zip(x, y):
l, = self.plot([thisx,thisx], [bottom, thisy], linefmt,
label="_nolegend_")
stemlines.append(l)
baseline, = self.plot([np.amin(x), np.amax(x)], [bottom,bottom],
basefmt, label="_nolegend_")
self.hold(remember_hold)
stem_container = StemContainer((markerline, stemlines, baseline),
label=label)
self.add_container(stem_container)
return stem_container
def pie(self, x, explode=None, labels=None, colors=None,
autopct=None, pctdistance=0.6, shadow=False,
labeldistance=1.1, startangle=None, radius=None):
r"""
Plot a pie chart.
Call signature::
pie(x, explode=None, labels=None,
colors=('b', 'g', 'r', 'c', 'm', 'y', 'k', 'w'),
autopct=None, pctdistance=0.6, shadow=False,
labeldistance=1.1, startangle=None, radius=None)
Make a pie chart of array *x*. The fractional area of each
wedge is given by x/sum(x). If sum(x) <= 1, then the values
of x give the fractional area directly and the array will not
be normalized. The wedges are plotted counterclockwise,
by default starting from the x-axis.
Keyword arguments:
*explode*: [ *None* | len(x) sequence ]
If not *None*, is a ``len(x)`` array which specifies the
fraction of the radius with which to offset each wedge.
*colors*: [ *None* | color sequence ]
A sequence of matplotlib color args through which the pie chart
will cycle.
*labels*: [ *None* | len(x) sequence of strings ]
A sequence of strings providing the labels for each wedge
*autopct*: [ *None* | format string | format function ]
If not *None*, is a string or function used to label the
wedges with their numeric value. The label will be placed inside
the wedge. If it is a format string, the label will be ``fmt%pct``.
If it is a function, it will be called.
*pctdistance*: scalar
The ratio between the center of each pie slice and the
start of the text generated by *autopct*. Ignored if
*autopct* is *None*; default is 0.6.
*labeldistance*: scalar
The radial distance at which the pie labels are drawn
*shadow*: [ *False* | *True* ]
Draw a shadow beneath the pie.
*startangle*: [ *None* | Offset angle ]
If not *None*, rotates the start of the pie chart by *angle*
degrees counterclockwise from the x-axis.
*radius*: [ *None* | scalar ]
The radius of the pie, if *radius* is *None* it will be set to 1.
The pie chart will probably look best if the figure and axes are
square. Eg.::
figure(figsize=(8,8))
ax = axes([0.1, 0.1, 0.8, 0.8])
Return value:
If *autopct* is *None*, return the tuple (*patches*, *texts*):
- *patches* is a sequence of
:class:`matplotlib.patches.Wedge` instances
- *texts* is a list of the label
:class:`matplotlib.text.Text` instances.
If *autopct* is not *None*, return the tuple (*patches*,
*texts*, *autotexts*), where *patches* and *texts* are as
above, and *autotexts* is a list of
:class:`~matplotlib.text.Text` instances for the numeric
labels.
"""
self.set_frame_on(False)
x = np.asarray(x).astype(np.float32)
sx = float(x.sum())
if sx>1: x = np.divide(x,sx)
if labels is None: labels = ['']*len(x)
if explode is None: explode = [0]*len(x)
assert(len(x)==len(labels))
assert(len(x)==len(explode))
if colors is None: colors = ('b', 'g', 'r', 'c', 'm', 'y', 'k', 'w')
center = 0,0
if radius is None:
radius = 1
# Starting theta1 is the start fraction of the circle
if startangle is None:
theta1 = 0
else:
theta1 = startangle / 360.0
texts = []
slices = []
autotexts = []
i = 0
for frac, label, expl in cbook.safezip(x,labels, explode):
x, y = center
theta2 = theta1 + frac
thetam = 2*math.pi*0.5*(theta1+theta2)
x += expl*math.cos(thetam)
y += expl*math.sin(thetam)
w = mpatches.Wedge((x,y), radius, 360.*theta1, 360.*theta2,
facecolor=colors[i%len(colors)])
slices.append(w)
self.add_patch(w)
w.set_label(label)
if shadow:
# make sure to add a shadow after the call to
# add_patch so the figure and transform props will be
# set
shad = mpatches.Shadow(w, -0.02, -0.02,
#props={'facecolor':w.get_facecolor()}
)
shad.set_zorder(0.9*w.get_zorder())
shad.set_label('_nolegend_')
self.add_patch(shad)
xt = x + labeldistance*radius*math.cos(thetam)
yt = y + labeldistance*radius*math.sin(thetam)
label_alignment = xt > 0 and 'left' or 'right'
t = self.text(xt, yt, label,
size=rcParams['xtick.labelsize'],
horizontalalignment=label_alignment,
verticalalignment='center')
texts.append(t)
if autopct is not None:
xt = x + pctdistance*radius*math.cos(thetam)
yt = y + pctdistance*radius*math.sin(thetam)
if is_string_like(autopct):
s = autopct%(100.*frac)
elif callable(autopct):
s = autopct(100.*frac)
else:
raise TypeError(
'autopct must be callable or a format string')
t = self.text(xt, yt, s,
horizontalalignment='center',
verticalalignment='center')
autotexts.append(t)
theta1 = theta2
i += 1
self.set_xlim((-1.25, 1.25))
self.set_ylim((-1.25, 1.25))
self.set_xticks([])
self.set_yticks([])
if autopct is None:
return slices, texts
else:
return slices, texts, autotexts
@docstring.dedent_interpd
def errorbar(self, x, y, yerr=None, xerr=None,
fmt='-', ecolor=None, elinewidth=None, capsize=3,
barsabove=False, lolims=False, uplims=False,
xlolims=False, xuplims=False, errorevery=1, capthick=None,
**kwargs):
"""
Plot an errorbar graph.
Call signature::
errorbar(x, y, yerr=None, xerr=None,
fmt='-', ecolor=None, elinewidth=None, capsize=3,
barsabove=False, lolims=False, uplims=False,
xlolims=False, xuplims=False, errorevery=1,
capthick=None)
Plot *x* versus *y* with error deltas in *yerr* and *xerr*.
Vertical errorbars are plotted if *yerr* is not *None*.
Horizontal errorbars are plotted if *xerr* is not *None*.
*x*, *y*, *xerr*, and *yerr* can all be scalars, which plots a
single error bar at *x*, *y*.
Optional keyword arguments:
*xerr*/*yerr*: [ scalar | N, Nx1, or 2xN array-like ]
If a scalar number, len(N) array-like object, or an Nx1
array-like object, errorbars are drawn at +/-value relative
to the data.
If a sequence of shape 2xN, errorbars are drawn at -row1
and +row2 relative to the data.
*fmt*: '-'
The plot format symbol. If *fmt* is *None*, only the
errorbars are plotted. This is used for adding
errorbars to a bar plot, for example.
*ecolor*: [ *None* | mpl color ]
A matplotlib color arg which gives the color the errorbar lines;
if *None*, use the marker color.
*elinewidth*: scalar
The linewidth of the errorbar lines. If *None*, use the linewidth.
*capsize*: scalar
The length of the error bar caps in points
*capthick*: scalar
An alias kwarg to *markeredgewidth* (a.k.a. - *mew*). This
setting is a more sensible name for the property that
controls the thickness of the error bar cap in points. For
backwards compatibility, if *mew* or *markeredgewidth* are given,
then they will over-ride *capthick*. This may change in future
releases.
*barsabove*: [ *True* | *False* ]
if *True*, will plot the errorbars above the plot
symbols. Default is below.
*lolims* / *uplims* / *xlolims* / *xuplims*: [ *False* | *True* ]
These arguments can be used to indicate that a value gives
only upper/lower limits. In that case a caret symbol is
used to indicate this. lims-arguments may be of the same
type as *xerr* and *yerr*.
*errorevery*: positive integer
subsamples the errorbars. Eg if everyerror=5, errorbars for every
5-th datapoint will be plotted. The data plot itself still shows
all data points.
All other keyword arguments are passed on to the plot command for the
markers. For example, this code makes big red squares with
thick green edges::
x,y,yerr = rand(3,10)
errorbar(x, y, yerr, marker='s',
mfc='red', mec='green', ms=20, mew=4)
where *mfc*, *mec*, *ms* and *mew* are aliases for the longer
property names, *markerfacecolor*, *markeredgecolor*, *markersize*
and *markeredgewith*.
valid kwargs for the marker properties are
%(Line2D)s
Returns (*plotline*, *caplines*, *barlinecols*):
*plotline*: :class:`~matplotlib.lines.Line2D` instance
*x*, *y* plot markers and/or line
*caplines*: list of error bar cap
:class:`~matplotlib.lines.Line2D` instances
*barlinecols*: list of
:class:`~matplotlib.collections.LineCollection` instances for
the horizontal and vertical error ranges.
**Example:**
.. plot:: mpl_examples/pylab_examples/errorbar_demo.py
"""
if errorevery < 1:
raise ValueError('errorevery has to be a strictly positive integer')
self._process_unit_info(xdata=x, ydata=y, kwargs=kwargs)
if not self._hold: self.cla()
holdstate = self._hold
self._hold = True
label = kwargs.pop("label", None)
# make sure all the args are iterable; use lists not arrays to
# preserve units
if not iterable(x):
x = [x]
if not iterable(y):
y = [y]
if xerr is not None:
if not iterable(xerr):
xerr = [xerr]*len(x)
if yerr is not None:
if not iterable(yerr):
yerr = [yerr]*len(y)
l0 = None
if barsabove and fmt is not None:
l0, = self.plot(x,y,fmt,label="_nolegend_", **kwargs)
barcols = []
caplines = []
lines_kw = {'label':'_nolegend_'}
if elinewidth:
lines_kw['linewidth'] = elinewidth
else:
if 'linewidth' in kwargs:
lines_kw['linewidth']=kwargs['linewidth']
if 'lw' in kwargs:
lines_kw['lw']=kwargs['lw']
if 'transform' in kwargs:
lines_kw['transform'] = kwargs['transform']
if 'alpha' in kwargs:
lines_kw['alpha'] = kwargs['alpha']
if 'zorder' in kwargs:
lines_kw['zorder'] = kwargs['zorder']
# arrays fine here, they are booleans and hence not units
if not iterable(lolims):
lolims = np.asarray([lolims]*len(x), bool)
else: lolims = np.asarray(lolims, bool)
if not iterable(uplims): uplims = np.array([uplims]*len(x), bool)
else: uplims = np.asarray(uplims, bool)
if not iterable(xlolims): xlolims = np.array([xlolims]*len(x), bool)
else: xlolims = np.asarray(xlolims, bool)
if not iterable(xuplims): xuplims = np.array([xuplims]*len(x), bool)
else: xuplims = np.asarray(xuplims, bool)
everymask = np.arange(len(x)) % errorevery == 0
def xywhere(xs, ys, mask):
"""
return xs[mask], ys[mask] where mask is True but xs and
ys are not arrays
"""
assert len(xs)==len(ys)
assert len(xs)==len(mask)
xs = [thisx for thisx, b in zip(xs, mask) if b]
ys = [thisy for thisy, b in zip(ys, mask) if b]
return xs, ys
if capsize > 0:
plot_kw = {
'ms':2*capsize,
'label':'_nolegend_'}
if capthick is not None:
# 'mew' has higher priority, I believe,
# if both 'mew' and 'markeredgewidth' exists.
# So, save capthick to markeredgewidth so that
# explicitly setting mew or markeredgewidth will
# over-write capthick.
plot_kw['markeredgewidth'] = capthick
# For backwards-compat, allow explicit setting of
# 'mew' or 'markeredgewidth' to over-ride capthick.
if 'markeredgewidth' in kwargs:
plot_kw['markeredgewidth']=kwargs['markeredgewidth']
if 'mew' in kwargs:
plot_kw['mew']=kwargs['mew']
if 'transform' in kwargs:
plot_kw['transform'] = kwargs['transform']
if 'alpha' in kwargs:
plot_kw['alpha'] = kwargs['alpha']
if 'zorder' in kwargs:
plot_kw['zorder'] = kwargs['zorder']
if xerr is not None:
if (iterable(xerr) and len(xerr)==2 and
iterable(xerr[0]) and iterable(xerr[1])):
# using list comps rather than arrays to preserve units
left = [thisx-thiserr for (thisx, thiserr)
in cbook.safezip(x,xerr[0])]
right = [thisx+thiserr for (thisx, thiserr)
in cbook.safezip(x,xerr[1])]
else:
# using list comps rather than arrays to preserve units
left = [thisx-thiserr for (thisx, thiserr)
in cbook.safezip(x,xerr)]
right = [thisx+thiserr for (thisx, thiserr)
in cbook.safezip(x,xerr)]
yo, _ = xywhere(y, right, everymask)
lo, ro= xywhere(left, right, everymask)
barcols.append( self.hlines(yo, lo, ro, **lines_kw ) )
if capsize > 0:
if xlolims.any():
# can't use numpy logical indexing since left and
# y are lists
leftlo, ylo = xywhere(left, y, xlolims & everymask)
caplines.extend(
self.plot(leftlo, ylo, ls='None',
marker=mlines.CARETLEFT, **plot_kw) )
xlolims = ~xlolims
leftlo, ylo = xywhere(left, y, xlolims & everymask)
caplines.extend( self.plot(leftlo, ylo, 'k|', **plot_kw) )
else:
leftlo, ylo = xywhere(left, y, everymask)
caplines.extend( self.plot(leftlo, ylo, 'k|', **plot_kw) )
if xuplims.any():
rightup, yup = xywhere(right, y, xuplims & everymask)
caplines.extend(
self.plot(rightup, yup, ls='None',
marker=mlines.CARETRIGHT, **plot_kw) )
xuplims = ~xuplims
rightup, yup = xywhere(right, y, xuplims & everymask)
caplines.extend( self.plot(rightup, yup, 'k|', **plot_kw) )
else:
rightup, yup = xywhere(right, y, everymask)
caplines.extend( self.plot(rightup, yup, 'k|', **plot_kw) )
if yerr is not None:
if (iterable(yerr) and len(yerr)==2 and
iterable(yerr[0]) and iterable(yerr[1])):
# using list comps rather than arrays to preserve units
lower = [thisy-thiserr for (thisy, thiserr)
in cbook.safezip(y,yerr[0])]
upper = [thisy+thiserr for (thisy, thiserr)
in cbook.safezip(y,yerr[1])]
else:
# using list comps rather than arrays to preserve units
lower = [thisy-thiserr for (thisy, thiserr)
in cbook.safezip(y,yerr)]
upper = [thisy+thiserr for (thisy, thiserr)
in cbook.safezip(y,yerr)]
xo, _ = xywhere(x, lower, everymask)
lo, uo= xywhere(lower, upper, everymask)
barcols.append( self.vlines(xo, lo, uo, **lines_kw) )
if capsize > 0:
if lolims.any():
xlo, lowerlo = xywhere(x, lower, lolims & everymask)
caplines.extend(
self.plot(xlo, lowerlo, ls='None',
marker=mlines.CARETDOWN, **plot_kw) )
lolims = ~lolims
xlo, lowerlo = xywhere(x, lower, lolims & everymask)
caplines.extend( self.plot(xlo, lowerlo, 'k_', **plot_kw) )
else:
xlo, lowerlo = xywhere(x, lower, everymask)
caplines.extend( self.plot(xlo, lowerlo, 'k_', **plot_kw) )
if uplims.any():
xup, upperup = xywhere(x, upper, uplims & everymask)
caplines.extend(
self.plot(xup, upperup, ls='None',
marker=mlines.CARETUP, **plot_kw) )
uplims = ~uplims
xup, upperup = xywhere(x, upper, uplims & everymask)
caplines.extend( self.plot(xup, upperup, 'k_', **plot_kw) )
else:
xup, upperup = xywhere(x, upper, everymask)
caplines.extend( self.plot(xup, upperup, 'k_', **plot_kw) )
if not barsabove and fmt is not None:
l0, = self.plot(x,y,fmt,**kwargs)
if ecolor is None:
if l0 is None:
ecolor = self._get_lines.color_cycle.next()
else:
ecolor = l0.get_color()
for l in barcols:
l.set_color(ecolor)
for l in caplines:
l.set_color(ecolor)
self.autoscale_view()
self._hold = holdstate
errorbar_container = ErrorbarContainer((l0, tuple(caplines), tuple(barcols)),
has_xerr=(xerr is not None),
has_yerr=(yerr is not None),
label=label)
self.containers.append(errorbar_container)
return errorbar_container # (l0, caplines, barcols)
def boxplot(self, x, notch=False, sym='b+', vert=True, whis=1.5,
positions=None, widths=None, patch_artist=False,
bootstrap=None, usermedians=None, conf_intervals=None):
"""
Make a box and whisker plot.
Call signature::
boxplot(x, notch=False, sym='+', vert=True, whis=1.5,
positions=None, widths=None, patch_artist=False,
bootstrap=None, usermedians=None, conf_intervals=None)
Make a box and whisker plot for each column of *x* or each
vector in sequence *x*. The box extends from the lower to
upper quartile values of the data, with a line at the median.
The whiskers extend from the box to show the range of the
data. Flier points are those past the end of the whiskers.
Function Arguments:
*x* :
Array or a sequence of vectors.
*notch* : [ False (default) | True ]
If False (default), produces a rectangular box plot.
If True, will produce a notched box plot
*sym* : [ default 'b+' ]
The default symbol for flier points.
Enter an empty string ('') if you don't want to show fliers.
*vert* : [ False | True (default) ]
If True (default), makes the boxes vertical.
If False, makes horizontal boxes.
*whis* : [ default 1.5 ]
Defines the length of the whiskers as a function of the inner
quartile range. They extend to the most extreme data point
within ( ``whis*(75%-25%)`` ) data range.
*bootstrap* : [ *None* (default) | integer ]
Specifies whether to bootstrap the confidence intervals
around the median for notched boxplots. If bootstrap==None,
no bootstrapping is performed, and notches are calculated
using a Gaussian-based asymptotic approximation (see McGill, R.,
Tukey, J.W., and Larsen, W.A., 1978, and Kendall and Stuart,
1967). Otherwise, bootstrap specifies the number of times to
bootstrap the median to determine it's 95% confidence intervals.
Values between 1000 and 10000 are recommended.
*usermedians* : [ default None ]
An array or sequence whose first dimension (or length) is
compatible with *x*. This overrides the medians computed by
matplotlib for each element of *usermedians* that is not None.
When an element of *usermedians* == None, the median will be
computed directly as normal.
*conf_intervals* : [ default None ]
Array or sequence whose first dimension (or length) is compatible
with *x* and whose second dimension is 2. When the current element
of *conf_intervals* is not None, the notch locations computed by
matplotlib are overridden (assuming notch is True). When an element of
*conf_intervals* is None, boxplot compute notches the method
specified by the other kwargs (e.g. *bootstrap*).
*positions* : [ default 1,2,...,n ]
Sets the horizontal positions of the boxes. The ticks and limits
are automatically set to match the positions.
*widths* : [ default 0.5 ]
Either a scalar or a vector and sets the width of each box. The
default is 0.5, or ``0.15*(distance between extreme positions)``
if that is smaller.
*patch_artist* : [ False (default) | True ]
If False produces boxes with the Line2D artist
If True produces boxes with the Patch artist
Returns a dictionary mapping each component of the boxplot
to a list of the :class:`matplotlib.lines.Line2D`
instances created. That dictionary has the following keys
(assuming vertical boxplots):
- boxes: the main body of the boxplot showing the quartiles
and the median's confidence intervals if enabled.
- medians: horizonal lines at the median of each box.
- whiskers: the vertical lines extending to the most extreme,
n-outlier data points.
- caps: the horizontal lines at the ends of the whiskers.
- fliers: points representing data that extend beyone the
whiskers (outliers).
**Example:**
.. plot:: pyplots/boxplot_demo.py
"""
def bootstrapMedian(data, N=5000):
# determine 95% confidence intervals of the median
M = len(data)
percentile = [2.5,97.5]
estimate = np.zeros(N)
for n in range(N):
bsIndex = np.random.random_integers(0,M-1,M)
bsData = data[bsIndex]
estimate[n] = mlab.prctile(bsData, 50)
CI = mlab.prctile(estimate, percentile)
return CI
def computeConfInterval(data, med, iq, bootstrap):
if bootstrap is not None:
# Do a bootstrap estimate of notch locations.
# get conf. intervals around median
CI = bootstrapMedian(data, N=bootstrap)
notch_min = CI[0]
notch_max = CI[1]
else:
# Estimate notch locations using Gaussian-based
# asymptotic approximation.
#
# For discussion: McGill, R., Tukey, J.W.,
# and Larsen, W.A. (1978) "Variations of
# Boxplots", The American Statistician, 32:12-16.
N = len(data)
notch_min = med - 1.57*iq/np.sqrt(N)
notch_max = med + 1.57*iq/np.sqrt(N)
return notch_min, notch_max
if not self._hold: self.cla()
holdStatus = self._hold
whiskers, caps, boxes, medians, fliers = [], [], [], [], []
# convert x to a list of vectors
if hasattr(x, 'shape'):
if len(x.shape) == 1:
if hasattr(x[0], 'shape'):
x = list(x)
else:
x = [x,]
elif len(x.shape) == 2:
nr, nc = x.shape
if nr == 1:
x = [x]
elif nc == 1:
x = [x.ravel()]
else:
x = [x[:,i] for i in xrange(nc)]
else:
raise ValueError("input x can have no more than 2 dimensions")
if not hasattr(x[0], '__len__'):
x = [x]
col = len(x)
# sanitize user-input medians
msg1 = "usermedians must either be a list/tuple or a 1d array"
msg2 = "usermedians' length must be compatible with x"
if usermedians is not None:
if hasattr(usermedians, 'shape'):
if len(usermedians.shape) != 1:
raise ValueError(msg1)
elif usermedians.shape[0] != col:
raise ValueError(msg2)
elif len(usermedians) != col:
raise ValueError(msg2)
#sanitize user-input confidence intervals
msg1 = "conf_intervals must either be a list of tuples or a 2d array"
msg2 = "conf_intervals' length must be compatible with x"
msg3 = "each conf_interval, if specificied, must have two values"
if conf_intervals is not None:
if hasattr(conf_intervals, 'shape'):
if len(conf_intervals.shape) != 2:
raise ValueError(msg1)
elif conf_intervals.shape[0] != col:
raise ValueError(msg2)
elif conf_intervals.shape[1] == 2:
raise ValueError(msg3)
else:
if len(conf_intervals) != col:
raise ValueError(msg2)
for ci in conf_intervals:
if ci is not None and len(ci) != 2:
raise ValueError(msg3)
# get some plot info
if positions is None:
positions = range(1, col + 1)
if widths is None:
distance = max(positions) - min(positions)
widths = min(0.15*max(distance,1.0), 0.5)
if isinstance(widths, float) or isinstance(widths, int):
widths = np.ones((col,), float) * widths
# loop through columns, adding each to plot
self.hold(True)
for i, pos in enumerate(positions):
d = np.ravel(x[i])
row = len(d)
if row==0:
# no data, skip this position
continue
# get median and quartiles
q1, med, q3 = mlab.prctile(d,[25,50,75])
# replace with input medians if available
if usermedians is not None:
if usermedians[i] is not None:
med = usermedians[i]
# get high extreme
iq = q3 - q1
hi_val = q3 + whis*iq
wisk_hi = np.compress( d <= hi_val , d )
if len(wisk_hi) == 0:
wisk_hi = q3
else:
wisk_hi = max(wisk_hi)
# get low extreme
lo_val = q1 - whis*iq
wisk_lo = np.compress( d >= lo_val, d )
if len(wisk_lo) == 0:
wisk_lo = q1
else:
wisk_lo = min(wisk_lo)
# get fliers - if we are showing them
flier_hi = []
flier_lo = []
flier_hi_x = []
flier_lo_x = []
if len(sym) != 0:
flier_hi = np.compress( d > wisk_hi, d )
flier_lo = np.compress( d < wisk_lo, d )
flier_hi_x = np.ones(flier_hi.shape[0]) * pos
flier_lo_x = np.ones(flier_lo.shape[0]) * pos
# get x locations for fliers, whisker, whisker cap and box sides
box_x_min = pos - widths[i] * 0.5
box_x_max = pos + widths[i] * 0.5
wisk_x = np.ones(2) * pos
cap_x_min = pos - widths[i] * 0.25
cap_x_max = pos + widths[i] * 0.25
cap_x = [cap_x_min, cap_x_max]
# get y location for median
med_y = [med, med]
# calculate 'notch' plot
if notch:
# conf. intervals from user, if available
if conf_intervals is not None and conf_intervals[i] is not None:
notch_max = np.max(conf_intervals[i])
notch_min = np.min(conf_intervals[i])
else:
notch_min, notch_max = computeConfInterval(d, med, iq,
bootstrap)
# make our notched box vectors
box_x = [box_x_min, box_x_max, box_x_max, cap_x_max, box_x_max,
box_x_max, box_x_min, box_x_min, cap_x_min, box_x_min,
box_x_min ]
box_y = [q1, q1, notch_min, med, notch_max, q3, q3, notch_max,
med, notch_min, q1]
# make our median line vectors
med_x = [cap_x_min, cap_x_max]
med_y = [med, med]
# calculate 'regular' plot
else:
# make our box vectors
box_x = [box_x_min, box_x_max, box_x_max, box_x_min, box_x_min ]
box_y = [q1, q1, q3, q3, q1 ]
# make our median line vectors
med_x = [box_x_min, box_x_max]
def to_vc(xs,ys):
# convert arguments to verts and codes
verts = []
#codes = []
for xi,yi in zip(xs,ys):
verts.append( (xi,yi) )
verts.append( (0,0) ) # ignored
codes = [mpath.Path.MOVETO] + \
[mpath.Path.LINETO]*(len(verts)-2) + \
[mpath.Path.CLOSEPOLY]
return verts,codes
def patch_list(xs,ys):
verts,codes = to_vc(xs,ys)
path = mpath.Path( verts, codes )
patch = mpatches.PathPatch(path)
self.add_artist(patch)
return [patch]
# vertical or horizontal plot?
if vert:
def doplot(*args):
return self.plot(*args)
def dopatch(xs,ys):
return patch_list(xs,ys)
else:
def doplot(*args):
shuffled = []
for i in xrange(0, len(args), 3):
shuffled.extend([args[i+1], args[i], args[i+2]])
return self.plot(*shuffled)
def dopatch(xs,ys):
xs,ys = ys,xs # flip X, Y
return patch_list(xs,ys)
if patch_artist:
median_color = 'k'
else:
median_color = 'r'
whiskers.extend(doplot(wisk_x, [q1, wisk_lo], 'b--',
wisk_x, [q3, wisk_hi], 'b--'))
caps.extend(doplot(cap_x, [wisk_hi, wisk_hi], 'k-',
cap_x, [wisk_lo, wisk_lo], 'k-'))
if patch_artist:
boxes.extend(dopatch(box_x, box_y))
else:
boxes.extend(doplot(box_x, box_y, 'b-'))
medians.extend(doplot(med_x, med_y, median_color+'-'))
fliers.extend(doplot(flier_hi_x, flier_hi, sym,
flier_lo_x, flier_lo, sym))
# fix our axes/ticks up a little
if vert:
setticks, setlim = self.set_xticks, self.set_xlim
else:
setticks, setlim = self.set_yticks, self.set_ylim
newlimits = min(positions)-0.5, max(positions)+0.5
setlim(newlimits)
setticks(positions)
# reset hold status
self.hold(holdStatus)
return dict(whiskers=whiskers, caps=caps, boxes=boxes,
medians=medians, fliers=fliers)
@docstring.dedent_interpd
def scatter(self, x, y, s=20, c='b', marker='o', cmap=None, norm=None,
vmin=None, vmax=None, alpha=None, linewidths=None,
faceted=True, verts=None,
**kwargs):
"""
Make a scatter plot.
Call signatures::
scatter(x, y, s=20, c='b', marker='o', cmap=None, norm=None,
vmin=None, vmax=None, alpha=None, linewidths=None,
verts=None, **kwargs)
Make a scatter plot of *x* versus *y*, where *x*, *y* are
converted to 1-D sequences which must be of the same length, *N*.
Keyword arguments:
*s*:
size in points^2. It is a scalar or an array of the same
length as *x* and *y*.
*c*:
a color. *c* can be a single color format string, or a
sequence of color specifications of length *N*, or a
sequence of *N* numbers to be mapped to colors using the
*cmap* and *norm* specified via kwargs (see below). Note
that *c* should not be a single numeric RGB or RGBA
sequence because that is indistinguishable from an array
of values to be colormapped. *c* can be a 2-D array in
which the rows are RGB or RGBA, however.
*marker*:
can be one of:
%(MarkerTable)s
Any or all of *x*, *y*, *s*, and *c* may be masked arrays, in
which case all masks will be combined and only unmasked points
will be plotted.
Other keyword arguments: the color mapping and normalization
arguments will be used only if *c* is an array of floats.
*cmap*: [ *None* | Colormap ]
A :class:`matplotlib.colors.Colormap` instance or registered
name. If *None*, defaults to rc ``image.cmap``. *cmap* is
only used if *c* is an array of floats.
*norm*: [ *None* | Normalize ]
A :class:`matplotlib.colors.Normalize` instance is used to
scale luminance data to 0, 1. If *None*, use the default
:func:`normalize`. *norm* is only used if *c* is an array
of floats.
*vmin*/*vmax*:
*vmin* and *vmax* are used in conjunction with norm to
normalize luminance data. If either are *None*, the min and
max of the color array *C* is used. Note if you pass a
*norm* instance, your settings for *vmin* and *vmax* will
be ignored.
*alpha*: ``0 <= scalar <= 1`` or *None*
The alpha value for the patches
*linewidths*: [ *None* | scalar | sequence ]
If *None*, defaults to (lines.linewidth,). Note that this
is a tuple, and if you set the linewidths argument you
must set it as a sequence of floats, as required by
:class:`~matplotlib.collections.RegularPolyCollection`.
Optional kwargs control the
:class:`~matplotlib.collections.Collection` properties; in
particular:
*edgecolors*:
The string 'none' to plot faces with no outlines
*facecolors*:
The string 'none' to plot unfilled outlines
Here are the standard descriptions of all the
:class:`~matplotlib.collections.Collection` kwargs:
%(Collection)s
A :class:`~matplotlib.collections.Collection` instance is
returned.
"""
if not self._hold: self.cla()
self._process_unit_info(xdata=x, ydata=y, kwargs=kwargs)
x = self.convert_xunits(x)
y = self.convert_yunits(y)
# np.ma.ravel yields an ndarray, not a masked array,
# unless its argument is a masked array.
x = np.ma.ravel(x)
y = np.ma.ravel(y)
if x.size != y.size:
raise ValueError("x and y must be the same size")
s = np.ma.ravel(s) # This doesn't have to match x, y in size.
c_is_stringy = is_string_like(c) or is_sequence_of_strings(c)
if not c_is_stringy:
c = np.asanyarray(c)
if c.size == x.size:
c = np.ma.ravel(c)
x, y, s, c = cbook.delete_masked_points(x, y, s, c)
scales = s # Renamed for readability below.
if c_is_stringy:
colors = mcolors.colorConverter.to_rgba_array(c, alpha)
else:
# The inherent ambiguity is resolved in favor of color
# mapping, not interpretation as rgb or rgba:
if c.size == x.size:
colors = None # use cmap, norm after collection is created
else:
colors = mcolors.colorConverter.to_rgba_array(c, alpha)
if faceted:
edgecolors = None
else:
edgecolors = 'none'
warnings.warn(
'''replace "faceted=False" with "edgecolors='none'"''',
mplDeprecation) # 2008/04/18
sym = None
symstyle = 0
# to be API compatible
if marker is None and not (verts is None):
marker = (verts, 0)
verts = None
marker_obj = mmarkers.MarkerStyle(marker)
path = marker_obj.get_path().transformed(
marker_obj.get_transform())
if not marker_obj.is_filled():
edgecolors = 'face'
collection = mcoll.PathCollection(
(path,), scales,
facecolors = colors,
edgecolors = edgecolors,
linewidths = linewidths,
offsets = zip(x,y),
transOffset = kwargs.pop('transform', self.transData),
)
collection.set_transform(mtransforms.IdentityTransform())
collection.set_alpha(alpha)
collection.update(kwargs)
if colors is None:
if norm is not None: assert(isinstance(norm, mcolors.Normalize))
collection.set_array(np.asarray(c))
collection.set_cmap(cmap)
collection.set_norm(norm)
if vmin is not None or vmax is not None:
collection.set_clim(vmin, vmax)
else:
collection.autoscale_None()
# The margin adjustment is a hack to deal with the fact that we don't
# want to transform all the symbols whose scales are in points
# to data coords to get the exact bounding box for efficiency
# reasons. It can be done right if this is deemed important.
# Also, only bother with this padding if there is anything to draw.
if self._xmargin < 0.05 and x.size > 0 :
self.set_xmargin(0.05)
if self._ymargin < 0.05 and x.size > 0 :
self.set_ymargin(0.05)
self.add_collection(collection)
self.autoscale_view()
return collection
@docstring.dedent_interpd
def hexbin(self, x, y, C = None, gridsize = 100, bins = None,
xscale = 'linear', yscale = 'linear', extent = None,
cmap=None, norm=None, vmin=None, vmax=None,
alpha=None, linewidths=None, edgecolors='none',
reduce_C_function = np.mean, mincnt=None, marginals=False,
**kwargs):
"""
Make a hexagonal binning plot.
Call signature::
hexbin(x, y, C = None, gridsize = 100, bins = None,
xscale = 'linear', yscale = 'linear',
cmap=None, norm=None, vmin=None, vmax=None,
alpha=None, linewidths=None, edgecolors='none'
reduce_C_function = np.mean, mincnt=None, marginals=True
**kwargs)
Make a hexagonal binning plot of *x* versus *y*, where *x*,
*y* are 1-D sequences of the same length, *N*. If *C* is *None*
(the default), this is a histogram of the number of occurences
of the observations at (x[i],y[i]).
If *C* is specified, it specifies values at the coordinate
(x[i],y[i]). These values are accumulated for each hexagonal
bin and then reduced according to *reduce_C_function*, which
defaults to numpy's mean function (np.mean). (If *C* is
specified, it must also be a 1-D sequence of the same length
as *x* and *y*.)
*x*, *y* and/or *C* may be masked arrays, in which case only
unmasked points will be plotted.
Optional keyword arguments:
*gridsize*: [ 100 | integer ]
The number of hexagons in the *x*-direction, default is
100. The corresponding number of hexagons in the
*y*-direction is chosen such that the hexagons are
approximately regular. Alternatively, gridsize can be a
tuple with two elements specifying the number of hexagons
in the *x*-direction and the *y*-direction.
*bins*: [ *None* | 'log' | integer | sequence ]
If *None*, no binning is applied; the color of each hexagon
directly corresponds to its count value.
If 'log', use a logarithmic scale for the color
map. Internally, :math:`log_{10}(i+1)` is used to
determine the hexagon color.
If an integer, divide the counts in the specified number
of bins, and color the hexagons accordingly.
If a sequence of values, the values of the lower bound of
the bins to be used.
*xscale*: [ 'linear' | 'log' ]
Use a linear or log10 scale on the horizontal axis.
*scale*: [ 'linear' | 'log' ]
Use a linear or log10 scale on the vertical axis.
*mincnt*: [ *None* | a positive integer ]
If not *None*, only display cells with more than *mincnt*
number of points in the cell
*marginals*: [ *True* | *False* ]
if marginals is *True*, plot the marginal density as
colormapped rectagles along the bottom of the x-axis and
left of the y-axis
*extent*: [ *None* | scalars (left, right, bottom, top) ]
The limits of the bins. The default assigns the limits
based on gridsize, x, y, xscale and yscale.
Other keyword arguments controlling color mapping and normalization
arguments:
*cmap*: [ *None* | Colormap ]
a :class:`matplotlib.colors.Colormap` instance. If *None*,
defaults to rc ``image.cmap``.
*norm*: [ *None* | Normalize ]
:class:`matplotlib.colors.Normalize` instance is used to
scale luminance data to 0,1.
*vmin* / *vmax*: scalar
*vmin* and *vmax* are used in conjunction with *norm* to normalize
luminance data. If either are *None*, the min and max of the color
array *C* is used. Note if you pass a norm instance, your settings
for *vmin* and *vmax* will be ignored.
*alpha*: scalar between 0 and 1, or *None*
the alpha value for the patches
*linewidths*: [ *None* | scalar ]
If *None*, defaults to rc lines.linewidth. Note that this
is a tuple, and if you set the linewidths argument you
must set it as a sequence of floats, as required by
:class:`~matplotlib.collections.RegularPolyCollection`.
Other keyword arguments controlling the Collection properties:
*edgecolors*: [ *None* | ``'none'`` | mpl color | color sequence ]
If ``'none'``, draws the edges in the same color as the fill color.
This is the default, as it avoids unsightly unpainted pixels
between the hexagons.
If *None*, draws the outlines in the default color.
If a matplotlib color arg or sequence of rgba tuples, draws the
outlines in the specified color.
Here are the standard descriptions of all the
:class:`~matplotlib.collections.Collection` kwargs:
%(Collection)s
The return value is a
:class:`~matplotlib.collections.PolyCollection` instance; use
:meth:`~matplotlib.collections.PolyCollection.get_array` on
this :class:`~matplotlib.collections.PolyCollection` to get
the counts in each hexagon. If *marginals* is *True*, horizontal
bar and vertical bar (both PolyCollections) will be attached
to the return collection as attributes *hbar* and *vbar*.
**Example:**
.. plot:: mpl_examples/pylab_examples/hexbin_demo.py
"""
if not self._hold: self.cla()
self._process_unit_info(xdata=x, ydata=y, kwargs=kwargs)
x, y, C = cbook.delete_masked_points(x, y, C)
# Set the size of the hexagon grid
if iterable(gridsize):
nx, ny = gridsize
else:
nx = gridsize
ny = int(nx/math.sqrt(3))
# Count the number of data in each hexagon
x = np.array(x, float)
y = np.array(y, float)
if xscale=='log':
if np.any(x <= 0.0):
raise ValueError("x contains non-positive values, so can not"
" be log-scaled")
x = np.log10(x)
if yscale=='log':
if np.any(y <= 0.0):
raise ValueError("y contains non-positive values, so can not"
" be log-scaled")
y = np.log10(y)
if extent is not None:
xmin, xmax, ymin, ymax = extent
else:
xmin = np.amin(x)
xmax = np.amax(x)
ymin = np.amin(y)
ymax = np.amax(y)
# In the x-direction, the hexagons exactly cover the region from
# xmin to xmax. Need some padding to avoid roundoff errors.
padding = 1.e-9 * (xmax - xmin)
xmin -= padding
xmax += padding
sx = (xmax-xmin) / nx
sy = (ymax-ymin) / ny
if marginals:
xorig = x.copy()
yorig = y.copy()
x = (x-xmin)/sx
y = (y-ymin)/sy
ix1 = np.round(x).astype(int)
iy1 = np.round(y).astype(int)
ix2 = np.floor(x).astype(int)
iy2 = np.floor(y).astype(int)
nx1 = nx + 1
ny1 = ny + 1
nx2 = nx
ny2 = ny
n = nx1*ny1+nx2*ny2
d1 = (x-ix1)**2 + 3.0 * (y-iy1)**2
d2 = (x-ix2-0.5)**2 + 3.0 * (y-iy2-0.5)**2
bdist = (d1<d2)
if C is None:
accum = np.zeros(n)
# Create appropriate views into "accum" array.
lattice1 = accum[:nx1*ny1]
lattice2 = accum[nx1*ny1:]
lattice1.shape = (nx1,ny1)
lattice2.shape = (nx2,ny2)
for i in xrange(len(x)):
if bdist[i]:
if ((ix1[i] >= 0) and (ix1[i] < nx1) and
(iy1[i] >= 0) and (iy1[i] < ny1)):
lattice1[ix1[i], iy1[i]]+=1
else:
if ((ix2[i] >= 0) and (ix2[i] < nx2) and
(iy2[i] >= 0) and (iy2[i] < ny2)):
lattice2[ix2[i], iy2[i]]+=1
# threshold
if mincnt is not None:
for i in xrange(nx1):
for j in xrange(ny1):
if lattice1[i,j]<mincnt:
lattice1[i,j] = np.nan
for i in xrange(nx2):
for j in xrange(ny2):
if lattice2[i,j]<mincnt:
lattice2[i,j] = np.nan
accum = np.hstack((
lattice1.astype(float).ravel(), lattice2.astype(float).ravel()))
good_idxs = ~np.isnan(accum)
else:
if mincnt is None:
mincnt = 0
# create accumulation arrays
lattice1 = np.empty((nx1,ny1),dtype=object)
for i in xrange(nx1):
for j in xrange(ny1):
lattice1[i,j] = []
lattice2 = np.empty((nx2,ny2),dtype=object)
for i in xrange(nx2):
for j in xrange(ny2):
lattice2[i,j] = []
for i in xrange(len(x)):
if bdist[i]:
if ((ix1[i] >= 0) and (ix1[i] < nx1) and
(iy1[i] >= 0) and (iy1[i] < ny1)):
lattice1[ix1[i], iy1[i]].append( C[i] )
else:
if ((ix2[i] >= 0) and (ix2[i] < nx2) and
(iy2[i] >= 0) and (iy2[i] < ny2)):
lattice2[ix2[i], iy2[i]].append( C[i] )
for i in xrange(nx1):
for j in xrange(ny1):
vals = lattice1[i,j]
if len(vals)>mincnt:
lattice1[i,j] = reduce_C_function( vals )
else:
lattice1[i,j] = np.nan
for i in xrange(nx2):
for j in xrange(ny2):
vals = lattice2[i,j]
if len(vals)>mincnt:
lattice2[i,j] = reduce_C_function( vals )
else:
lattice2[i,j] = np.nan
accum = np.hstack((
lattice1.astype(float).ravel(), lattice2.astype(float).ravel()))
good_idxs = ~np.isnan(accum)
offsets = np.zeros((n, 2), float)
offsets[:nx1*ny1,0] = np.repeat(np.arange(nx1), ny1)
offsets[:nx1*ny1,1] = np.tile(np.arange(ny1), nx1)
offsets[nx1*ny1:,0] = np.repeat(np.arange(nx2) + 0.5, ny2)
offsets[nx1*ny1:,1] = np.tile(np.arange(ny2), nx2) + 0.5
offsets[:,0] *= sx
offsets[:,1] *= sy
offsets[:,0] += xmin
offsets[:,1] += ymin
# remove accumulation bins with no data
offsets = offsets[good_idxs,:]
accum = accum[good_idxs]
polygon = np.zeros((6, 2), float)
polygon[:,0] = sx * np.array([ 0.5, 0.5, 0.0, -0.5, -0.5, 0.0])
polygon[:,1] = sy * np.array([-0.5, 0.5, 1.0, 0.5, -0.5, -1.0]) / 3.0
if edgecolors=='none':
edgecolors = 'face'
if xscale == 'log' or yscale == 'log':
polygons = np.expand_dims(polygon, 0) + np.expand_dims(offsets, 1)
if xscale == 'log':
polygons[:, :, 0] = 10.0 ** polygons[:, :, 0]
xmin = 10.0 ** xmin
xmax = 10.0 ** xmax
self.set_xscale(xscale)
if yscale == 'log':
polygons[:, :, 1] = 10.0 ** polygons[:, :, 1]
ymin = 10.0 ** ymin
ymax = 10.0 ** ymax
self.set_yscale(yscale)
collection = mcoll.PolyCollection(
polygons,
edgecolors=edgecolors,
linewidths=linewidths,
)
else:
collection = mcoll.PolyCollection(
[polygon],
edgecolors=edgecolors,
linewidths=linewidths,
offsets=offsets,
transOffset=mtransforms.IdentityTransform(),
offset_position="data"
)
if isinstance(norm, mcolors.LogNorm):
if (accum==0).any():
# make sure we have not zeros
accum += 1
# autoscale the norm with curren accum values if it hasn't
# been set
if norm is not None:
if norm.vmin is None and norm.vmax is None:
norm.autoscale(accum)
# Transform accum if needed
if bins=='log':
accum = np.log10(accum+1)
elif bins!=None:
if not iterable(bins):
minimum, maximum = min(accum), max(accum)
bins-=1 # one less edge than bins
bins = minimum + (maximum-minimum)*np.arange(bins)/bins
bins = np.sort(bins)
accum = bins.searchsorted(accum)
if norm is not None: assert(isinstance(norm, mcolors.Normalize))
collection.set_array(accum)
collection.set_cmap(cmap)
collection.set_norm(norm)
collection.set_alpha(alpha)
collection.update(kwargs)
if vmin is not None or vmax is not None:
collection.set_clim(vmin, vmax)
else:
collection.autoscale_None()
corners = ((xmin, ymin), (xmax, ymax))
self.update_datalim( corners)
self.autoscale_view(tight=True)
# add the collection last
self.add_collection(collection)
if not marginals:
return collection
if C is None:
C = np.ones(len(x))
def coarse_bin(x, y, coarse):
ind = coarse.searchsorted(x).clip(0, len(coarse)-1)
mus = np.zeros(len(coarse))
for i in range(len(coarse)):
mu = reduce_C_function(y[ind==i])
mus[i] = mu
return mus
coarse = np.linspace(xmin, xmax, gridsize)
xcoarse = coarse_bin(xorig, C, coarse)
valid = ~np.isnan(xcoarse)
verts, values = [], []
for i,val in enumerate(xcoarse):
thismin = coarse[i]
if i<len(coarse)-1:
thismax = coarse[i+1]
else:
thismax = thismin + np.diff(coarse)[-1]
if not valid[i]: continue
verts.append([(thismin, 0), (thismin, 0.05), (thismax, 0.05), (thismax, 0)])
values.append(val)
values = np.array(values)
trans = mtransforms.blended_transform_factory(
self.transData, self.transAxes)
hbar = mcoll.PolyCollection(verts, transform=trans, edgecolors='face')
hbar.set_array(values)
hbar.set_cmap(cmap)
hbar.set_norm(norm)
hbar.set_alpha(alpha)
hbar.update(kwargs)
self.add_collection(hbar)
coarse = np.linspace(ymin, ymax, gridsize)
ycoarse = coarse_bin(yorig, C, coarse)
valid = ~np.isnan(ycoarse)
verts, values = [], []
for i,val in enumerate(ycoarse):
thismin = coarse[i]
if i<len(coarse)-1:
thismax = coarse[i+1]
else:
thismax = thismin + np.diff(coarse)[-1]
if not valid[i]: continue
verts.append([(0, thismin), (0.0, thismax), (0.05, thismax), (0.05, thismin)])
values.append(val)
values = np.array(values)
trans = mtransforms.blended_transform_factory(
self.transAxes, self.transData)
vbar = mcoll.PolyCollection(verts, transform=trans, edgecolors='face')
vbar.set_array(values)
vbar.set_cmap(cmap)
vbar.set_norm(norm)
vbar.set_alpha(alpha)
vbar.update(kwargs)
self.add_collection(vbar)
collection.hbar = hbar
collection.vbar = vbar
def on_changed(collection):
hbar.set_cmap(collection.get_cmap())
hbar.set_clim(collection.get_clim())
vbar.set_cmap(collection.get_cmap())
vbar.set_clim(collection.get_clim())
collection.callbacksSM.connect('changed', on_changed)
return collection
@docstring.dedent_interpd
def arrow(self, x, y, dx, dy, **kwargs):
"""
Add an arrow to the axes.
Call signature::
arrow(x, y, dx, dy, **kwargs)
Draws arrow on specified axis from (*x*, *y*) to (*x* + *dx*,
*y* + *dy*). Uses FancyArrow patch to construct the arrow.
Optional kwargs control the arrow construction and properties:
%(FancyArrow)s
**Example:**
.. plot:: mpl_examples/pylab_examples/arrow_demo.py
"""
# Strip away units for the underlying patch since units
# do not make sense to most patch-like code
x = self.convert_xunits(x)
y = self.convert_yunits(y)
dx = self.convert_xunits(dx)
dy = self.convert_yunits(dy)
a = mpatches.FancyArrow(x, y, dx, dy, **kwargs)
self.add_artist(a)
return a
def quiverkey(self, *args, **kw):
qk = mquiver.QuiverKey(*args, **kw)
self.add_artist(qk)
return qk
quiverkey.__doc__ = mquiver.QuiverKey.quiverkey_doc
def quiver(self, *args, **kw):
if not self._hold: self.cla()
q = mquiver.Quiver(self, *args, **kw)
self.add_collection(q, False)
self.update_datalim(q.XY)
self.autoscale_view()
return q
quiver.__doc__ = mquiver.Quiver.quiver_doc
def stackplot(self, x, *args, **kwargs):
return mstack.stackplot(self, x, *args, **kwargs)
stackplot.__doc__ = mstack.stackplot.__doc__
def streamplot(self, x, y, u, v, density=1, linewidth=None, color=None,
cmap=None, norm=None, arrowsize=1, arrowstyle='-|>',
minlength=0.1, transform=None):
if not self._hold: self.cla()
stream_container = mstream.streamplot(self, x, y, u, v,
density=density,
linewidth=linewidth,
color=color,
cmap=cmap,
norm=norm,
arrowsize=arrowsize,
arrowstyle=arrowstyle,
minlength=minlength,
transform=transform)
return stream_container
streamplot.__doc__ = mstream.streamplot.__doc__
@docstring.dedent_interpd
def barbs(self, *args, **kw):
"""
%(barbs_doc)s
**Example:**
.. plot:: mpl_examples/pylab_examples/barb_demo.py
"""
if not self._hold: self.cla()
b = mquiver.Barbs(self, *args, **kw)
self.add_collection(b)
self.update_datalim(b.get_offsets())
self.autoscale_view()
return b
@docstring.dedent_interpd
def fill(self, *args, **kwargs):
"""
Plot filled polygons.
Call signature::
fill(*args, **kwargs)
*args* is a variable length argument, allowing for multiple
*x*, *y* pairs with an optional color format string; see
:func:`~matplotlib.pyplot.plot` for details on the argument
parsing. For example, to plot a polygon with vertices at *x*,
*y* in blue.::
ax.fill(x,y, 'b' )
An arbitrary number of *x*, *y*, *color* groups can be specified::
ax.fill(x1, y1, 'g', x2, y2, 'r')
Return value is a list of :class:`~matplotlib.patches.Patch`
instances that were added.
The same color strings that :func:`~matplotlib.pyplot.plot`
supports are supported by the fill format string.
If you would like to fill below a curve, eg. shade a region
between 0 and *y* along *x*, use :meth:`fill_between`
The *closed* kwarg will close the polygon when *True* (default).
kwargs control the :class:`~matplotlib.patches.Polygon` properties:
%(Polygon)s
**Example:**
.. plot:: mpl_examples/pylab_examples/fill_demo.py
"""
if not self._hold: self.cla()
patches = []
for poly in self._get_patches_for_fill(*args, **kwargs):
self.add_patch( poly )
patches.append( poly )
self.autoscale_view()
return patches
@docstring.dedent_interpd
def fill_between(self, x, y1, y2=0, where=None, interpolate=False,
**kwargs):
"""
Make filled polygons between two curves.
Call signature::
fill_between(x, y1, y2=0, where=None, **kwargs)
Create a :class:`~matplotlib.collections.PolyCollection`
filling the regions between *y1* and *y2* where
``where==True``
*x* :
An N-length array of the x data
*y1* :
An N-length array (or scalar) of the y data
*y2* :
An N-length array (or scalar) of the y data
*where* :
If *None*, default to fill between everywhere. If not *None*,
it is an N-length numpy boolean array and the fill will
only happen over the regions where ``where==True``.
*interpolate* :
If *True*, interpolate between the two lines to find the
precise point of intersection. Otherwise, the start and
end points of the filled region will only occur on explicit
values in the *x* array.
*kwargs* :
Keyword args passed on to the
:class:`~matplotlib.collections.PolyCollection`.
kwargs control the :class:`~matplotlib.patches.Polygon` properties:
%(PolyCollection)s
.. plot:: mpl_examples/pylab_examples/fill_between_demo.py
.. seealso::
:meth:`fill_betweenx`
for filling between two sets of x-values
"""
# Handle united data, such as dates
self._process_unit_info(xdata=x, ydata=y1, kwargs=kwargs)
self._process_unit_info(ydata=y2)
# Convert the arrays so we can work with them
x = ma.masked_invalid(self.convert_xunits(x))
y1 = ma.masked_invalid(self.convert_yunits(y1))
y2 = ma.masked_invalid(self.convert_yunits(y2))
if y1.ndim == 0:
y1 = np.ones_like(x)*y1
if y2.ndim == 0:
y2 = np.ones_like(x)*y2
if where is None:
where = np.ones(len(x), np.bool)
else:
where = np.asarray(where, np.bool)
if not (x.shape == y1.shape == y2.shape == where.shape):
raise ValueError("Argument dimensions are incompatible")
mask = reduce(ma.mask_or, [ma.getmask(a) for a in (x, y1, y2)])
if mask is not ma.nomask:
where &= ~mask
polys = []
for ind0, ind1 in mlab.contiguous_regions(where):
xslice = x[ind0:ind1]
y1slice = y1[ind0:ind1]
y2slice = y2[ind0:ind1]
if not len(xslice):
continue
N = len(xslice)
X = np.zeros((2*N+2, 2), np.float)
if interpolate:
def get_interp_point(ind):
im1 = max(ind-1, 0)
x_values = x[im1:ind+1]
diff_values = y1[im1:ind+1] - y2[im1:ind+1]
y1_values = y1[im1:ind+1]
if len(diff_values) == 2:
if np.ma.is_masked(diff_values[1]):
return x[im1], y1[im1]
elif np.ma.is_masked(diff_values[0]):
return x[ind], y1[ind]
diff_order = diff_values.argsort()
diff_root_x = np.interp(
0, diff_values[diff_order], x_values[diff_order])
diff_root_y = np.interp(diff_root_x, x_values, y1_values)
return diff_root_x, diff_root_y
start = get_interp_point(ind0)
end = get_interp_point(ind1)
else:
# the purpose of the next two lines is for when y2 is a
# scalar like 0 and we want the fill to go all the way
# down to 0 even if none of the y1 sample points do
start = xslice[0], y2slice[0]
end = xslice[-1], y2slice[-1]
X[0] = start
X[N+1] = end
X[1:N+1,0] = xslice
X[1:N+1,1] = y1slice
X[N+2:,0] = xslice[::-1]
X[N+2:,1] = y2slice[::-1]
polys.append(X)
collection = mcoll.PolyCollection(polys, **kwargs)
# now update the datalim and autoscale
XY1 = np.array([x[where], y1[where]]).T
XY2 = np.array([x[where], y2[where]]).T
self.dataLim.update_from_data_xy(XY1, self.ignore_existing_data_limits,
updatex=True, updatey=True)
self.dataLim.update_from_data_xy(XY2, self.ignore_existing_data_limits,
updatex=False, updatey=True)
self.add_collection(collection)
self.autoscale_view()
return collection
@docstring.dedent_interpd
def fill_betweenx(self, y, x1, x2=0, where=None, **kwargs):
"""
Make filled polygons between two horizontal curves.
Call signature::
fill_betweenx(y, x1, x2=0, where=None, **kwargs)
Create a :class:`~matplotlib.collections.PolyCollection`
filling the regions between *x1* and *x2* where
``where==True``
*y* :
An N-length array of the y data
*x1* :
An N-length array (or scalar) of the x data
*x2* :
An N-length array (or scalar) of the x data
*where* :
If *None*, default to fill between everywhere. If not *None*,
it is a N length numpy boolean array and the fill will
only happen over the regions where ``where==True``
*kwargs* :
keyword args passed on to the
:class:`~matplotlib.collections.PolyCollection`
kwargs control the :class:`~matplotlib.patches.Polygon` properties:
%(PolyCollection)s
.. plot:: mpl_examples/pylab_examples/fill_betweenx_demo.py
.. seealso::
:meth:`fill_between`
for filling between two sets of y-values
"""
# Handle united data, such as dates
self._process_unit_info(ydata=y, xdata=x1, kwargs=kwargs)
self._process_unit_info(xdata=x2)
# Convert the arrays so we can work with them
y = ma.masked_invalid(self.convert_yunits(y))
x1 = ma.masked_invalid(self.convert_xunits(x1))
x2 = ma.masked_invalid(self.convert_xunits(x2))
if x1.ndim == 0:
x1 = np.ones_like(y)*x1
if x2.ndim == 0:
x2 = np.ones_like(y)*x2
if where is None:
where = np.ones(len(y), np.bool)
else:
where = np.asarray(where, np.bool)
if not (y.shape == x1.shape == x2.shape == where.shape):
raise ValueError("Argument dimensions are incompatible")
mask = reduce(ma.mask_or, [ma.getmask(a) for a in (y, x1, x2)])
if mask is not ma.nomask:
where &= ~mask
polys = []
for ind0, ind1 in mlab.contiguous_regions(where):
yslice = y[ind0:ind1]
x1slice = x1[ind0:ind1]
x2slice = x2[ind0:ind1]
if not len(yslice):
continue
N = len(yslice)
Y = np.zeros((2*N+2, 2), np.float)
# the purpose of the next two lines is for when x2 is a
# scalar like 0 and we want the fill to go all the way
# down to 0 even if none of the x1 sample points do
Y[0] = x2slice[0], yslice[0]
Y[N+1] = x2slice[-1], yslice[-1]
Y[1:N+1,0] = x1slice
Y[1:N+1,1] = yslice
Y[N+2:,0] = x2slice[::-1]
Y[N+2:,1] = yslice[::-1]
polys.append(Y)
collection = mcoll.PolyCollection(polys, **kwargs)
# now update the datalim and autoscale
X1Y = np.array([x1[where], y[where]]).T
X2Y = np.array([x2[where], y[where]]).T
self.dataLim.update_from_data_xy(X1Y, self.ignore_existing_data_limits,
updatex=True, updatey=True)
self.dataLim.update_from_data_xy(X2Y, self.ignore_existing_data_limits,
updatex=False, updatey=True)
self.add_collection(collection)
self.autoscale_view()
return collection
#### plotting z(x,y): imshow, pcolor and relatives, contour
@docstring.dedent_interpd
def imshow(self, X, cmap=None, norm=None, aspect=None,
interpolation=None, alpha=None, vmin=None, vmax=None,
origin=None, extent=None, shape=None, filternorm=1,
filterrad=4.0, imlim=None, resample=None, url=None, **kwargs):
"""
Display an image on the axes.
Call signature::
imshow(X, cmap=None, norm=None, aspect=None, interpolation=None,
alpha=None, vmin=None, vmax=None, origin=None, extent=None,
**kwargs)
Display the image in *X* to current axes. *X* may be a float
array, a uint8 array or a PIL image. If *X* is an array, *X*
can have the following shapes:
* MxN -- luminance (grayscale, float array only)
* MxNx3 -- RGB (float or uint8 array)
* MxNx4 -- RGBA (float or uint8 array)
The value for each component of MxNx3 and MxNx4 float arrays should be
in the range 0.0 to 1.0; MxN float arrays may be normalised.
An :class:`matplotlib.image.AxesImage` instance is returned.
Keyword arguments:
*cmap*: [ *None* | Colormap ]
A :class:`matplotlib.colors.Colormap` instance, eg. cm.jet.
If *None*, default to rc ``image.cmap`` value.
*cmap* is ignored when *X* has RGB(A) information
*aspect*: [ *None* | 'auto' | 'equal' | scalar ]
If 'auto', changes the image aspect ratio to match that of the axes
If 'equal', and *extent* is *None*, changes the axes
aspect ratio to match that of the image. If *extent* is
not *None*, the axes aspect ratio is changed to match that
of the extent.
If *None*, default to rc ``image.aspect`` value.
*interpolation*:
Acceptable values are *None*, 'none', 'nearest', 'bilinear',
'bicubic', 'spline16', 'spline36', 'hanning', 'hamming',
'hermite', 'kaiser', 'quadric', 'catrom', 'gaussian',
'bessel', 'mitchell', 'sinc', 'lanczos'
If *interpolation* is *None*, default to rc
``image.interpolation``. See also the *filternorm* and
*filterrad* parameters
If *interpolation* is ``'none'``, then no interpolation is
performed on the Agg, ps and pdf backends. Other backends
will fall back to 'nearest'.
*norm*: [ *None* | Normalize ]
An :class:`matplotlib.colors.Normalize` instance; if
*None*, default is ``normalization()``. This scales
luminance -> 0-1
*norm* is only used for an MxN float array.
*vmin*/*vmax*: [ *None* | scalar ]
Used to scale a luminance image to 0-1. If either is
*None*, the min and max of the luminance values will be
used. Note if *norm* is not *None*, the settings for
*vmin* and *vmax* will be ignored.
*alpha*: scalar
The alpha blending value, between 0 (transparent) and 1 (opaque)
or *None*
*origin*: [ *None* | 'upper' | 'lower' ]
Place the [0,0] index of the array in the upper left or lower left
corner of the axes. If *None*, default to rc ``image.origin``.
*extent*: [ *None* | scalars (left, right, bottom, top) ]
Data limits for the axes. The default assigns zero-based row,
column indices to the *x*, *y* centers of the pixels.
*shape*: [ *None* | scalars (columns, rows) ]
For raw buffer images
*filternorm*:
A parameter for the antigrain image resize filter. From the
antigrain documentation, if *filternorm* = 1, the filter normalizes
integer values and corrects the rounding errors. It doesn't do
anything with the source floating point values, it corrects only
integers according to the rule of 1.0 which means that any sum of
pixel weights must be equal to 1.0. So, the filter function must
produce a graph of the proper shape.
*filterrad*:
The filter radius for filters that have a radius
parameter, i.e. when interpolation is one of: 'sinc',
'lanczos' or 'blackman'
Additional kwargs are :class:`~matplotlib.artist.Artist` properties.
%(Artist)s
**Example:**
.. plot:: mpl_examples/pylab_examples/image_demo.py
"""
if not self._hold: self.cla()
if norm is not None: assert(isinstance(norm, mcolors.Normalize))
if aspect is None: aspect = rcParams['image.aspect']
self.set_aspect(aspect)
im = mimage.AxesImage(self, cmap, norm, interpolation, origin, extent,
filternorm=filternorm,
filterrad=filterrad, resample=resample, **kwargs)
im.set_data(X)
im.set_alpha(alpha)
self._set_artist_props(im)
if im.get_clip_path() is None:
# image does not already have clipping set, clip to axes patch
im.set_clip_path(self.patch)
#if norm is None and shape is None:
# im.set_clim(vmin, vmax)
if vmin is not None or vmax is not None:
im.set_clim(vmin, vmax)
else:
im.autoscale_None()
im.set_url(url)
# update ax.dataLim, and, if autoscaling, set viewLim
# to tightly fit the image, regardless of dataLim.
im.set_extent(im.get_extent())
self.images.append(im)
im._remove_method = lambda h: self.images.remove(h)
return im
def _pcolorargs(self, funcname, *args):
if len(args)==1:
C = args[0]
numRows, numCols = C.shape
X, Y = np.meshgrid(np.arange(numCols+1), np.arange(numRows+1) )
elif len(args)==3:
X, Y, C = args
else:
raise TypeError(
'Illegal arguments to %s; see help(%s)' % (funcname, funcname))
Nx = X.shape[-1]
Ny = Y.shape[0]
if len(X.shape) != 2 or X.shape[0] == 1:
x = X.reshape(1,Nx)
X = x.repeat(Ny, axis=0)
if len(Y.shape) != 2 or Y.shape[1] == 1:
y = Y.reshape(Ny, 1)
Y = y.repeat(Nx, axis=1)
if X.shape != Y.shape:
raise TypeError(
'Incompatible X, Y inputs to %s; see help(%s)' % (
funcname, funcname))
return X, Y, C
@docstring.dedent_interpd
def pcolor(self, *args, **kwargs):
"""
Create a pseudocolor plot of a 2-D array.
Note: pcolor can be very slow for large arrays; consider
using the similar but much faster
:func:`~matplotlib.pyplot.pcolormesh` instead.
Call signatures::
pcolor(C, **kwargs)
pcolor(X, Y, C, **kwargs)
*C* is the array of color values.
*X* and *Y*, if given, specify the (*x*, *y*) coordinates of
the colored quadrilaterals; the quadrilateral for C[i,j] has
corners at::
(X[i, j], Y[i, j]),
(X[i, j+1], Y[i, j+1]),
(X[i+1, j], Y[i+1, j]),
(X[i+1, j+1], Y[i+1, j+1]).
Ideally the dimensions of *X* and *Y* should be one greater
than those of *C*; if the dimensions are the same, then the
last row and column of *C* will be ignored.
Note that the the column index corresponds to the
*x*-coordinate, and the row index corresponds to *y*; for
details, see the :ref:`Grid Orientation
<axes-pcolor-grid-orientation>` section below.
If either or both of *X* and *Y* are 1-D arrays or column vectors,
they will be expanded as needed into the appropriate 2-D arrays,
making a rectangular grid.
*X*, *Y* and *C* may be masked arrays. If either C[i, j], or one
of the vertices surrounding C[i,j] (*X* or *Y* at [i, j], [i+1, j],
[i, j+1],[i+1, j+1]) is masked, nothing is plotted.
Keyword arguments:
*cmap*: [ *None* | Colormap ]
A :class:`matplotlib.colors.Colormap` instance. If *None*, use
rc settings.
*norm*: [ *None* | Normalize ]
An :class:`matplotlib.colors.Normalize` instance is used
to scale luminance data to 0,1. If *None*, defaults to
:func:`normalize`.
*vmin*/*vmax*: [ *None* | scalar ]
*vmin* and *vmax* are used in conjunction with *norm* to
normalize luminance data. If either is *None*, it
is autoscaled to the respective min or max
of the color array *C*. If not *None*, *vmin* or
*vmax* passed in here override any pre-existing values
supplied in the *norm* instance.
*shading*: [ 'flat' | 'faceted' ]
If 'faceted', a black grid is drawn around each rectangle; if
'flat', edges are not drawn. Default is 'flat', contrary to
MATLAB.
This kwarg is deprecated; please use 'edgecolors' instead:
* shading='flat' -- edgecolors='none'
* shading='faceted -- edgecolors='k'
*edgecolors*: [ *None* | ``'none'`` | color | color sequence]
If *None*, the rc setting is used by default.
If ``'none'``, edges will not be visible.
An mpl color or sequence of colors will set the edge color
*alpha*: ``0 <= scalar <= 1`` or *None*
the alpha blending value
Return value is a :class:`matplotlib.collections.Collection`
instance.
.. _axes-pcolor-grid-orientation:
The grid orientation follows the MATLAB convention: an
array *C* with shape (*nrows*, *ncolumns*) is plotted with
the column number as *X* and the row number as *Y*, increasing
up; hence it is plotted the way the array would be printed,
except that the *Y* axis is reversed. That is, *C* is taken
as *C*(*y*, *x*).
Similarly for :func:`meshgrid`::
x = np.arange(5)
y = np.arange(3)
X, Y = meshgrid(x,y)
is equivalent to::
X = array([[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4]])
Y = array([[0, 0, 0, 0, 0],
[1, 1, 1, 1, 1],
[2, 2, 2, 2, 2]])
so if you have::
C = rand( len(x), len(y))
then you need::
pcolor(X, Y, C.T)
or::
pcolor(C.T)
MATLAB :func:`pcolor` always discards the last row and column
of *C*, but matplotlib displays the last row and column if *X* and
*Y* are not specified, or if *X* and *Y* have one more row and
column than *C*.
kwargs can be used to control the
:class:`~matplotlib.collections.PolyCollection` properties:
%(PolyCollection)s
Note: the default *antialiaseds* is False if the default
*edgecolors*="none" is used. This eliminates artificial lines
at patch boundaries, and works regardless of the value of
alpha. If *edgecolors* is not "none", then the default
*antialiaseds* is taken from
rcParams['patch.antialiased'], which defaults to *True*.
Stroking the edges may be preferred if *alpha* is 1, but
will cause artifacts otherwise.
.. seealso::
:func:`~matplotlib.pyplot.pcolormesh`
For an explanation of the differences between
pcolor and pcolormesh.
"""
if not self._hold: self.cla()
alpha = kwargs.pop('alpha', None)
norm = kwargs.pop('norm', None)
cmap = kwargs.pop('cmap', None)
vmin = kwargs.pop('vmin', None)
vmax = kwargs.pop('vmax', None)
shading = kwargs.pop('shading', 'flat')
X, Y, C = self._pcolorargs('pcolor', *args)
Ny, Nx = X.shape
# convert to MA, if necessary.
C = ma.asarray(C)
X = ma.asarray(X)
Y = ma.asarray(Y)
mask = ma.getmaskarray(X)+ma.getmaskarray(Y)
xymask = mask[0:-1,0:-1]+mask[1:,1:]+mask[0:-1,1:]+mask[1:,0:-1]
# don't plot if C or any of the surrounding vertices are masked.
mask = ma.getmaskarray(C)[0:Ny-1,0:Nx-1]+xymask
newaxis = np.newaxis
compress = np.compress
ravelmask = (mask==0).ravel()
X1 = compress(ravelmask, ma.filled(X[0:-1,0:-1]).ravel())
Y1 = compress(ravelmask, ma.filled(Y[0:-1,0:-1]).ravel())
X2 = compress(ravelmask, ma.filled(X[1:,0:-1]).ravel())
Y2 = compress(ravelmask, ma.filled(Y[1:,0:-1]).ravel())
X3 = compress(ravelmask, ma.filled(X[1:,1:]).ravel())
Y3 = compress(ravelmask, ma.filled(Y[1:,1:]).ravel())
X4 = compress(ravelmask, ma.filled(X[0:-1,1:]).ravel())
Y4 = compress(ravelmask, ma.filled(Y[0:-1,1:]).ravel())
npoly = len(X1)
xy = np.concatenate((X1[:,newaxis], Y1[:,newaxis],
X2[:,newaxis], Y2[:,newaxis],
X3[:,newaxis], Y3[:,newaxis],
X4[:,newaxis], Y4[:,newaxis],
X1[:,newaxis], Y1[:,newaxis]),
axis=1)
verts = xy.reshape((npoly, 5, 2))
C = compress(ravelmask, ma.filled(C[0:Ny-1,0:Nx-1]).ravel())
linewidths = (0.25,)
if 'linewidth' in kwargs:
kwargs['linewidths'] = kwargs.pop('linewidth')
kwargs.setdefault('linewidths', linewidths)
if shading == 'faceted':
edgecolors = 'k',
else:
edgecolors = 'none'
if 'edgecolor' in kwargs:
kwargs['edgecolors'] = kwargs.pop('edgecolor')
ec = kwargs.setdefault('edgecolors', edgecolors)
# aa setting will default via collections to patch.antialiased
# unless the boundary is not stroked, in which case the
# default will be False; with unstroked boundaries, aa
# makes artifacts that are often disturbing.
if 'antialiased' in kwargs:
kwargs['antialiaseds'] = kwargs.pop('antialiased')
if 'antialiaseds' not in kwargs and (is_string_like(ec) and
ec.lower() == "none"):
kwargs['antialiaseds'] = False
collection = mcoll.PolyCollection(verts, **kwargs)
collection.set_alpha(alpha)
collection.set_array(C)
if norm is not None: assert(isinstance(norm, mcolors.Normalize))
collection.set_cmap(cmap)
collection.set_norm(norm)
collection.set_clim(vmin, vmax)
collection.autoscale_None()
self.grid(False)
x = X.compressed()
y = Y.compressed()
# Transform from native to data coordinates?
t = collection._transform
if (not isinstance(t, mtransforms.Transform)
and hasattr(t, '_as_mpl_transform')):
t = t._as_mpl_transform(self.axes)
if t and any(t.contains_branch_seperately(self.transData)):
trans_to_data = t - self.transData
pts = np.vstack([x, y]).T.astype(np.float)
transformed_pts = trans_to_data.transform(pts)
x = transformed_pts[..., 0]
y = transformed_pts[..., 1]
minx = np.amin(x)
maxx = np.amax(x)
miny = np.amin(y)
maxy = np.amax(y)
corners = (minx, miny), (maxx, maxy)
self.update_datalim( corners)
self.autoscale_view()
self.add_collection(collection)
return collection
@docstring.dedent_interpd
def pcolormesh(self, *args, **kwargs):
"""
Plot a quadrilateral mesh.
Call signatures::
pcolormesh(C)
pcolormesh(X, Y, C)
pcolormesh(C, **kwargs)
Create a pseudocolor plot of a 2-D array.
pcolormesh is similar to :func:`~matplotlib.pyplot.pcolor`,
but uses a different mechanism and returns a different
object; pcolor returns a
:class:`~matplotlib.collections.PolyCollection` but pcolormesh
returns a
:class:`~matplotlib.collections.QuadMesh`. It is much faster,
so it is almost always preferred for large arrays.
*C* may be a masked array, but *X* and *Y* may not. Masked
array support is implemented via *cmap* and *norm*; in
contrast, :func:`~matplotlib.pyplot.pcolor` simply does not
draw quadrilaterals with masked colors or vertices.
Keyword arguments:
*cmap*: [ *None* | Colormap ]
A :class:`matplotlib.colors.Colormap` instance. If *None*, use
rc settings.
*norm*: [ *None* | Normalize ]
A :class:`matplotlib.colors.Normalize` instance is used to
scale luminance data to 0,1. If *None*, defaults to
:func:`normalize`.
*vmin*/*vmax*: [ *None* | scalar ]
*vmin* and *vmax* are used in conjunction with *norm* to
normalize luminance data. If either is *None*, it
is autoscaled to the respective min or max
of the color array *C*. If not *None*, *vmin* or
*vmax* passed in here override any pre-existing values
supplied in the *norm* instance.
*shading*: [ 'flat' | 'gouraud' ]
'flat' indicates a solid color for each quad. When
'gouraud', each quad will be Gouraud shaded. When gouraud
shading, edgecolors is ignored.
*edgecolors*: [ *None* | ``'None'`` | ``'face'`` | color | color sequence]
If *None*, the rc setting is used by default.
If ``'None'``, edges will not be visible.
If ``'face'``, edges will have the same color as the faces.
An mpl color or sequence of colors will set the edge color
*alpha*: ``0 <= scalar <= 1`` or *None*
the alpha blending value
Return value is a :class:`matplotlib.collections.QuadMesh`
object.
kwargs can be used to control the
:class:`matplotlib.collections.QuadMesh` properties:
%(QuadMesh)s
.. seealso::
:func:`~matplotlib.pyplot.pcolor`
For an explanation of the grid orientation and the
expansion of 1-D *X* and/or *Y* to 2-D arrays.
"""
if not self._hold: self.cla()
alpha = kwargs.pop('alpha', None)
norm = kwargs.pop('norm', None)
cmap = kwargs.pop('cmap', None)
vmin = kwargs.pop('vmin', None)
vmax = kwargs.pop('vmax', None)
shading = kwargs.pop('shading', 'flat').lower()
antialiased = kwargs.pop('antialiased', False)
kwargs.setdefault('edgecolors', 'None')
X, Y, C = self._pcolorargs('pcolormesh', *args)
Ny, Nx = X.shape
# convert to one dimensional arrays
if shading != 'gouraud':
C = ma.ravel(C[0:Ny-1, 0:Nx-1]) # data point in each cell is value at
# lower left corner
else:
C = C.ravel()
X = X.ravel()
Y = Y.ravel()
coords = np.zeros(((Nx * Ny), 2), dtype=float)
coords[:, 0] = X
coords[:, 1] = Y
collection = mcoll.QuadMesh(
Nx - 1, Ny - 1, coords,
antialiased=antialiased, shading=shading, **kwargs)
collection.set_alpha(alpha)
collection.set_array(C)
if norm is not None: assert(isinstance(norm, mcolors.Normalize))
collection.set_cmap(cmap)
collection.set_norm(norm)
collection.set_clim(vmin, vmax)
collection.autoscale_None()
self.grid(False)
# Transform from native to data coordinates?
t = collection._transform
if (not isinstance(t, mtransforms.Transform)
and hasattr(t, '_as_mpl_transform')):
t = t._as_mpl_transform(self.axes)
if t and any(t.contains_branch_seperately(self.transData)):
trans_to_data = t - self.transData
pts = np.vstack([X, Y]).T.astype(np.float)
transformed_pts = trans_to_data.transform(pts)
X = transformed_pts[..., 0]
Y = transformed_pts[..., 1]
minx = np.amin(X)
maxx = np.amax(X)
miny = np.amin(Y)
maxy = np.amax(Y)
corners = (minx, miny), (maxx, maxy)
self.update_datalim( corners)
self.autoscale_view()
self.add_collection(collection)
return collection
@docstring.dedent_interpd
def pcolorfast(self, *args, **kwargs):
"""
pseudocolor plot of a 2-D array
Experimental; this is a pcolor-type method that
provides the fastest possible rendering with the Agg
backend, and that can handle any quadrilateral grid.
It supports only flat shading (no outlines), it lacks
support for log scaling of the axes, and it does not
have a pyplot wrapper.
Call signatures::
ax.pcolorfast(C, **kwargs)
ax.pcolorfast(xr, yr, C, **kwargs)
ax.pcolorfast(x, y, C, **kwargs)
ax.pcolorfast(X, Y, C, **kwargs)
C is the 2D array of color values corresponding to quadrilateral
cells. Let (nr, nc) be its shape. C may be a masked array.
``ax.pcolorfast(C, **kwargs)`` is equivalent to
``ax.pcolorfast([0,nc], [0,nr], C, **kwargs)``
*xr*, *yr* specify the ranges of *x* and *y* corresponding to the
rectangular region bounding *C*. If::
xr = [x0, x1]
and::
yr = [y0,y1]
then *x* goes from *x0* to *x1* as the second index of *C* goes
from 0 to *nc*, etc. (*x0*, *y0*) is the outermost corner of
cell (0,0), and (*x1*, *y1*) is the outermost corner of cell
(*nr*-1, *nc*-1). All cells are rectangles of the same size.
This is the fastest version.
*x*, *y* are 1D arrays of length *nc* +1 and *nr* +1, respectively,
giving the x and y boundaries of the cells. Hence the cells are
rectangular but the grid may be nonuniform. The speed is
intermediate. (The grid is checked, and if found to be
uniform the fast version is used.)
*X* and *Y* are 2D arrays with shape (*nr* +1, *nc* +1) that specify
the (x,y) coordinates of the corners of the colored
quadrilaterals; the quadrilateral for C[i,j] has corners at
(X[i,j],Y[i,j]), (X[i,j+1],Y[i,j+1]), (X[i+1,j],Y[i+1,j]),
(X[i+1,j+1],Y[i+1,j+1]). The cells need not be rectangular.
This is the most general, but the slowest to render. It may
produce faster and more compact output using ps, pdf, and
svg backends, however.
Note that the the column index corresponds to the x-coordinate,
and the row index corresponds to y; for details, see
the "Grid Orientation" section below.
Optional keyword arguments:
*cmap*: [ *None* | Colormap ]
A :class:`matplotlib.colors.Colormap` instance from cm. If *None*,
use rc settings.
*norm*: [ *None* | Normalize ]
A :class:`matplotlib.colors.Normalize` instance is used to scale
luminance data to 0,1. If *None*, defaults to normalize()
*vmin*/*vmax*: [ *None* | scalar ]
*vmin* and *vmax* are used in conjunction with norm to normalize
luminance data. If either are *None*, the min and max
of the color array *C* is used. If you pass a norm instance,
*vmin* and *vmax* will be *None*.
*alpha*: ``0 <= scalar <= 1`` or *None*
the alpha blending value
Return value is an image if a regular or rectangular grid
is specified, and a :class:`~matplotlib.collections.QuadMesh`
collection in the general quadrilateral case.
"""
if not self._hold: self.cla()
alpha = kwargs.pop('alpha', None)
norm = kwargs.pop('norm', None)
cmap = kwargs.pop('cmap', None)
vmin = kwargs.pop('vmin', None)
vmax = kwargs.pop('vmax', None)
if norm is not None: assert(isinstance(norm, mcolors.Normalize))
C = args[-1]
nr, nc = C.shape
if len(args) == 1:
style = "image"
x = [0, nc]
y = [0, nr]
elif len(args) == 3:
x, y = args[:2]
x = np.asarray(x)
y = np.asarray(y)
if x.ndim == 1 and y.ndim == 1:
if x.size == 2 and y.size == 2:
style = "image"
else:
dx = np.diff(x)
dy = np.diff(y)
if (np.ptp(dx) < 0.01*np.abs(dx.mean()) and
np.ptp(dy) < 0.01*np.abs(dy.mean())):
style = "image"
else:
style = "pcolorimage"
elif x.ndim == 2 and y.ndim == 2:
style = "quadmesh"
else:
raise TypeError("arguments do not match valid signatures")
else:
raise TypeError("need 1 argument or 3 arguments")
if style == "quadmesh":
# convert to one dimensional arrays
# This should also be moved to the QuadMesh class
C = ma.ravel(C) # data point in each cell is value
# at lower left corner
X = x.ravel()
Y = y.ravel()
Nx = nc+1
Ny = nr+1
# The following needs to be cleaned up; the renderer
# requires separate contiguous arrays for X and Y,
# but the QuadMesh class requires the 2D array.
coords = np.empty(((Nx * Ny), 2), np.float64)
coords[:, 0] = X
coords[:, 1] = Y
# The QuadMesh class can also be changed to
# handle relevant superclass kwargs; the initializer
# should do much more than it does now.
collection = mcoll.QuadMesh(nc, nr, coords, 0, edgecolors="None")
collection.set_alpha(alpha)
collection.set_array(C)
collection.set_cmap(cmap)
collection.set_norm(norm)
self.add_collection(collection)
xl, xr, yb, yt = X.min(), X.max(), Y.min(), Y.max()
ret = collection
else:
# One of the image styles:
xl, xr, yb, yt = x[0], x[-1], y[0], y[-1]
if style == "image":
im = mimage.AxesImage(self, cmap, norm,
interpolation='nearest',
origin='lower',
extent=(xl, xr, yb, yt),
**kwargs)
im.set_data(C)
im.set_alpha(alpha)
self.images.append(im)
ret = im
if style == "pcolorimage":
im = mimage.PcolorImage(self, x, y, C,
cmap=cmap,
norm=norm,
alpha=alpha,
**kwargs)
self.images.append(im)
ret = im
self._set_artist_props(ret)
if vmin is not None or vmax is not None:
ret.set_clim(vmin, vmax)
else:
ret.autoscale_None()
self.update_datalim(np.array([[xl, yb], [xr, yt]]))
self.autoscale_view(tight=True)
return ret
def contour(self, *args, **kwargs):
if not self._hold: self.cla()
kwargs['filled'] = False
return mcontour.QuadContourSet(self, *args, **kwargs)
contour.__doc__ = mcontour.QuadContourSet.contour_doc
def contourf(self, *args, **kwargs):
if not self._hold: self.cla()
kwargs['filled'] = True
return mcontour.QuadContourSet(self, *args, **kwargs)
contourf.__doc__ = mcontour.QuadContourSet.contour_doc
def clabel(self, CS, *args, **kwargs):
return CS.clabel(*args, **kwargs)
clabel.__doc__ = mcontour.ContourSet.clabel.__doc__
@docstring.dedent_interpd
def table(self, **kwargs):
"""
Add a table to the current axes.
Call signature::
table(cellText=None, cellColours=None,
cellLoc='right', colWidths=None,
rowLabels=None, rowColours=None, rowLoc='left',
colLabels=None, colColours=None, colLoc='center',
loc='bottom', bbox=None):
Returns a :class:`matplotlib.table.Table` instance. For finer
grained control over tables, use the
:class:`~matplotlib.table.Table` class and add it to the axes
with :meth:`~matplotlib.axes.Axes.add_table`.
Thanks to John Gill for providing the class and table.
kwargs control the :class:`~matplotlib.table.Table`
properties:
%(Table)s
"""
return mtable.table(self, **kwargs)
def _make_twin_axes(self, *kl, **kwargs):
"""
make a twinx axes of self. This is used for twinx and twiny.
"""
ax2 = self.figure.add_axes(self.get_position(True), *kl, **kwargs)
return ax2
def twinx(self):
"""
Call signature::
ax = twinx()
create a twin of Axes for generating a plot with a sharex
x-axis but independent y axis. The y-axis of self will have
ticks on left and the returned axes will have ticks on the
right.
.. note::
For those who are 'picking' artists while using twinx, pick
events are only called for the artists in the top-most axes.
"""
ax2 = self._make_twin_axes(sharex=self, frameon=False)
ax2.yaxis.tick_right()
ax2.yaxis.set_label_position('right')
ax2.yaxis.set_offset_position('right')
self.yaxis.tick_left()
ax2.xaxis.set_visible(False)
return ax2
def twiny(self):
"""
Call signature::
ax = twiny()
create a twin of Axes for generating a plot with a shared
y-axis but independent x axis. The x-axis of self will have
ticks on bottom and the returned axes will have ticks on the
top.
.. note::
For those who are 'picking' artists while using twiny, pick
events are only called for the artists in the top-most axes.
"""
ax2 = self._make_twin_axes(sharey=self, frameon=False)
ax2.xaxis.tick_top()
ax2.xaxis.set_label_position('top')
self.xaxis.tick_bottom()
ax2.yaxis.set_visible(False)
return ax2
def get_shared_x_axes(self):
'Return a copy of the shared axes Grouper object for x axes'
return self._shared_x_axes
def get_shared_y_axes(self):
'Return a copy of the shared axes Grouper object for y axes'
return self._shared_y_axes
#### Data analysis
@docstring.dedent_interpd
def hist(self, x, bins=10, range=None, normed=False, weights=None,
cumulative=False, bottom=None, histtype='bar', align='mid',
orientation='vertical', rwidth=None, log=False,
color=None, label=None, stacked=False,
**kwargs):
"""
Plot a histogram.
Call signature::
hist(x, bins=10, range=None, normed=False, weights=None,
cumulative=False, bottom=None, histtype='bar', align='mid',
orientation='vertical', rwidth=None, log=False,
color=None, label=None, stacked=False,
**kwargs)
Compute and draw the histogram of *x*. The return value is a
tuple (*n*, *bins*, *patches*) or ([*n0*, *n1*, ...], *bins*,
[*patches0*, *patches1*,...]) if the input contains multiple
data.
Multiple data can be provided via *x* as a list of datasets
of potentially different length ([*x0*, *x1*, ...]), or as
a 2-D ndarray in which each column is a dataset. Note that
the ndarray form is transposed relative to the list form.
Masked arrays are not supported at present.
Keyword arguments:
*bins*:
Either an integer number of bins or a sequence giving the
bins. If *bins* is an integer, *bins* + 1 bin edges
will be returned, consistent with :func:`numpy.histogram`
for numpy version >= 1.3, and with the *new* = True argument
in earlier versions.
Unequally spaced bins are supported if *bins* is a sequence.
*range*:
The lower and upper range of the bins. Lower and upper outliers
are ignored. If not provided, *range* is (x.min(), x.max()).
Range has no effect if *bins* is a sequence.
If *bins* is a sequence or *range* is specified, autoscaling
is based on the specified bin range instead of the
range of x.
*normed*:
If *True*, the first element of the return tuple will
be the counts normalized to form a probability density, i.e.,
``n/(len(x)*dbin)``. In a probability density, the integral of
the histogram should be 1; you can verify that with a
trapezoidal integration of the probability density function::
pdf, bins, patches = ax.hist(...)
print np.sum(pdf * np.diff(bins))
.. note::
Until numpy release 1.5, the underlying numpy
histogram function was incorrect with *normed*=*True*
if bin sizes were unequal. MPL inherited that
error. It is now corrected within MPL when using
earlier numpy versions
*weights*:
An array of weights, of the same shape as *x*. Each value in
*x* only contributes its associated weight towards the bin
count (instead of 1). If *normed* is True, the weights are
normalized, so that the integral of the density over the range
remains 1.
*cumulative*:
If *True*, then a histogram is computed where each bin
gives the counts in that bin plus all bins for smaller values.
The last bin gives the total number of datapoints. If *normed*
is also *True* then the histogram is normalized such that the
last bin equals 1. If *cumulative* evaluates to less than 0
(e.g. -1), the direction of accumulation is reversed. In this
case, if *normed* is also *True*, then the histogram is normalized
such that the first bin equals 1.
*histtype*: [ 'bar' | 'barstacked' | 'step' | 'stepfilled' ]
The type of histogram to draw.
- 'bar' is a traditional bar-type histogram. If multiple data
are given the bars are aranged side by side.
- 'barstacked' is a bar-type histogram where multiple
data are stacked on top of each other.
- 'step' generates a lineplot that is by default
unfilled.
- 'stepfilled' generates a lineplot that is by default
filled.
*align*: ['left' | 'mid' | 'right' ]
Controls how the histogram is plotted.
- 'left': bars are centered on the left bin edges.
- 'mid': bars are centered between the bin edges.
- 'right': bars are centered on the right bin edges.
*orientation*: [ 'horizontal' | 'vertical' ]
If 'horizontal', :func:`~matplotlib.pyplot.barh` will be
used for bar-type histograms and the *bottom* kwarg will be
the left edges.
*rwidth*:
The relative width of the bars as a fraction of the bin
width. If *None*, automatically compute the width. Ignored
if *histtype* = 'step' or 'stepfilled'.
*log*:
If *True*, the histogram axis will be set to a log scale.
If *log* is *True* and *x* is a 1D array, empty bins will
be filtered out and only the non-empty (*n*, *bins*,
*patches*) will be returned.
*color*:
Color spec or sequence of color specs, one per
dataset. Default (*None*) uses the standard line
color sequence.
*label*:
String, or sequence of strings to match multiple
datasets. Bar charts yield multiple patches per
dataset, but only the first gets the label, so
that the legend command will work as expected::
ax.hist(10+2*np.random.randn(1000), label='men')
ax.hist(12+3*np.random.randn(1000), label='women', alpha=0.5)
ax.legend()
*stacked*:
If *True*, multiple data are stacked on top of each other
If *False* multiple data are aranged side by side if
histtype is 'bar' or on top of each other if histtype is 'step'
.
kwargs are used to update the properties of the
:class:`~matplotlib.patches.Patch` instances returned by *hist*:
%(Patch)s
**Example:**
.. plot:: mpl_examples/pylab_examples/histogram_demo.py
"""
if not self._hold: self.cla()
# xrange becomes range after 2to3
bin_range = range
range = __builtins__["range"]
# NOTE: the range keyword overwrites the built-in func range !!!
# needs to be fixed in numpy !!!
# Validate string inputs here so we don't have to clutter
# subsequent code.
if histtype not in ['bar', 'barstacked', 'step', 'stepfilled']:
raise ValueError("histtype %s is not recognized" % histtype)
if align not in ['left', 'mid', 'right']:
raise ValueError("align kwarg %s is not recognized" % align)
if orientation not in [ 'horizontal', 'vertical']:
raise ValueError(
"orientation kwarg %s is not recognized" % orientation)
if kwargs.get('width') is not None:
raise mplDeprecation(
'hist now uses the rwidth to give relative width '
'and not absolute width')
if histtype == 'barstacked' and not stacked:
stacked=True
# Massage 'x' for processing.
# NOTE: Be sure any changes here is also done below to 'weights'
if isinstance(x, np.ndarray) or not iterable(x[0]):
# TODO: support masked arrays;
x = np.asarray(x)
if x.ndim == 2:
x = x.T # 2-D input with columns as datasets; switch to rows
elif x.ndim == 1:
x = x.reshape(1, x.shape[0]) # new view, single row
else:
raise ValueError("x must be 1D or 2D")
if x.shape[1] < x.shape[0]:
warnings.warn('2D hist input should be nsamples x nvariables;\n '
'this looks transposed (shape is %d x %d)' % x.shape[::-1])
else:
# multiple hist with data of different length
x = [np.asarray(xi) for xi in x]
nx = len(x) # number of datasets
if color is None:
color = [self._get_lines.color_cycle.next()
for i in xrange(nx)]
else:
color = mcolors.colorConverter.to_rgba_array(color)
if len(color) != nx:
raise ValueError("color kwarg must have one color per dataset")
# We need to do to 'weights' what was done to 'x'
if weights is not None:
if isinstance(weights, np.ndarray) or not iterable(weights[0]) :
w = np.array(weights)
if w.ndim == 2:
w = w.T
elif w.ndim == 1:
w.shape = (1, w.shape[0])
else:
raise ValueError("weights must be 1D or 2D")
else:
w = [np.asarray(wi) for wi in weights]
if len(w) != nx:
raise ValueError('weights should have the same shape as x')
for i in xrange(nx):
if len(w[i]) != len(x[i]):
raise ValueError(
'weights should have the same shape as x')
else:
w = [None]*nx
# Save autoscale state for later restoration; turn autoscaling
# off so we can do it all a single time at the end, instead
# of having it done by bar or fill and then having to be redone.
_saved_autoscalex = self.get_autoscalex_on()
_saved_autoscaley = self.get_autoscaley_on()
self.set_autoscalex_on(False)
self.set_autoscaley_on(False)
# Save the datalimits for the same reason:
_saved_bounds = self.dataLim.bounds
# Check whether bins or range are given explicitly. In that
# case use those values for autoscaling.
binsgiven = (cbook.iterable(bins) or bin_range != None)
# If bins are not specified either explicitly or via range,
# we need to figure out the range required for all datasets,
# and supply that to np.histogram.
if not binsgiven:
xmin = np.inf
xmax = -np.inf
for xi in x:
xmin = min(xmin, xi.min())
xmax = max(xmax, xi.max())
bin_range = (xmin, xmax)
#hist_kwargs = dict(range=range, normed=bool(normed))
# We will handle the normed kwarg within mpl until we
# get to the point of requiring numpy >= 1.5.
hist_kwargs = dict(range=bin_range)
if np.__version__ < "1.3": # version 1.1 and 1.2
hist_kwargs['new'] = True
n = []
mlast = bottom
for i in xrange(nx):
# this will automatically overwrite bins,
# so that each histogram uses the same bins
m, bins = np.histogram(x[i], bins, weights=w[i], **hist_kwargs)
if mlast is None:
mlast = np.zeros(len(bins)-1, m.dtype)
if normed:
db = np.diff(bins)
m = (m.astype(float) / db) / m.sum()
if stacked:
m += mlast
mlast[:] = m
n.append(m)
if cumulative:
slc = slice(None)
if cbook.is_numlike(cumulative) and cumulative < 0:
slc = slice(None,None,-1)
if normed:
n = [(m * np.diff(bins))[slc].cumsum()[slc] for m in n]
else:
n = [m[slc].cumsum()[slc] for m in n]
patches = []
if histtype.startswith('bar'):
totwidth = np.diff(bins)
if rwidth is not None:
dr = min(1.0, max(0.0, rwidth))
elif len(n)>1:
dr = 0.8
else:
dr = 1.0
if histtype=='bar' and not stacked:
width = dr*totwidth/nx
dw = width
if nx > 1:
boffset = -0.5*dr*totwidth*(1.0-1.0/nx)
else:
boffset = 0.0
stacked = False
elif histtype=='barstacked' or stacked:
width = dr*totwidth
boffset, dw = 0.0, 0.0
if align == 'mid' or align == 'edge':
boffset += 0.5*totwidth
elif align == 'right':
boffset += totwidth
if orientation == 'horizontal':
_barfunc = self.barh
else: # orientation == 'vertical'
_barfunc = self.bar
for m, c in zip(n, color):
if bottom is None:
bottom = np.zeros(len(m), np.float)
if stacked:
height = m - bottom
else :
height = m
patch = _barfunc(bins[:-1]+boffset, height, width,
align='center', log=log,
color=c, bottom=bottom)
patches.append(patch)
if stacked:
bottom[:] = m
boffset += dw
elif histtype.startswith('step'):
# these define the perimeter of the polygon
x = np.zeros( 4*len(bins)-3, np.float )
y = np.zeros( 4*len(bins)-3, np.float )
x[0:2*len(bins)-1:2], x[1:2*len(bins)-1:2] = bins, bins[:-1]
x[2*len(bins)-1:] = x[1:2*len(bins)-1][::-1]
if log:
if orientation == 'horizontal':
self.set_xscale('log', nonposx = 'clip')
logbase = self.xaxis._scale.base
else: # orientation == 'vertical'
self.set_yscale('log', nonposy = 'clip')
logbase = self.yaxis._scale.base
# Setting a minimum of 0 results in problems for log plots
if normed:
# For normed data, set to log base * minimum data value
# (gives 1 full tick-label unit for the lowest filled bin)
ndata = np.array(n)
minimum = (np.min(ndata[ndata>0])) / logbase
else:
# For non-normed data, set the min to log base, again so that
# there is 1 full tick-label unit for the lowest bin
minimum = 1.0 / logbase
y[0], y[-1] = minimum, minimum
else:
minimum = np.min(bins)
if align == 'left' or align == 'center':
x -= 0.5*(bins[1]-bins[0])
elif align == 'right':
x += 0.5*(bins[1]-bins[0])
# If fill kwarg is set, it will be passed to the patch collection,
# overriding this
fill = (histtype == 'stepfilled')
xvals, yvals = [], []
for m in n:
# starting point for drawing polygon
y[0] = y[-1]
# top of the previous polygon becomes the bottom
y[2*len(bins)-1:] = y[1:2*len(bins)-1][::-1]
# set the top of this polygon
y[1:2*len(bins)-1:2], y[2:2*len(bins):2] = m, m
if log:
y[y<minimum]=minimum
if orientation == 'horizontal':
x,y = y,x
xvals.append(x.copy())
yvals.append(y.copy())
# add patches in reverse order so that when stacking,
# items lower in the stack are plottted on top of
# items higher in the stack
for x, y, c in reversed(zip(xvals, yvals, color)):
if fill:
patches.append( self.fill(x, y,
closed=False,
facecolor=c) )
else:
patches.append( self.fill(x, y,
closed=False, edgecolor=c,
fill=False) )
# we return patches, so put it back in the expected order
patches.reverse()
# adopted from adjust_x/ylim part of the bar method
if orientation == 'horizontal':
xmin0 = max(_saved_bounds[0]*0.9, minimum)
xmax = self.dataLim.intervalx[1]
for m in n:
xmin = np.amin(m[m!=0]) # filter out the 0 height bins
xmin = max(xmin*0.9, minimum)
xmin = min(xmin0, xmin)
self.dataLim.intervalx = (xmin, xmax)
elif orientation == 'vertical':
ymin0 = max(_saved_bounds[1]*0.9, minimum)
ymax = self.dataLim.intervaly[1]
for m in n:
ymin = np.amin(m[m!=0]) # filter out the 0 height bins
ymin = max(ymin*0.9, minimum)
ymin = min(ymin0, ymin)
self.dataLim.intervaly = (ymin, ymax)
if label is None:
labels = [None]
elif is_string_like(label):
labels = [label]
elif is_sequence_of_strings(label):
labels = list(label)
else:
raise ValueError('invalid label: must be string or sequence of strings')
if len(labels) < nx:
labels += [None] * (nx - len(labels))
for (patch, lbl) in zip(patches, labels):
if patch:
p = patch[0]
p.update(kwargs)
if lbl is not None: p.set_label(lbl)
p.set_snap(False)
for p in patch[1:]:
p.update(kwargs)
p.set_label('_nolegend_')
if binsgiven:
if orientation == 'vertical':
self.update_datalim([(bins[0],0), (bins[-1],0)], updatey=False)
else:
self.update_datalim([(0,bins[0]), (0,bins[-1])], updatex=False)
self.set_autoscalex_on(_saved_autoscalex)
self.set_autoscaley_on(_saved_autoscaley)
self.autoscale_view()
if nx == 1:
return n[0], bins, cbook.silent_list('Patch', patches[0])
else:
return n, bins, cbook.silent_list('Lists of Patches', patches)
@docstring.dedent_interpd
def hist2d(self, x, y, bins = 10, range=None, normed=False, weights=None,
cmin=None, cmax=None, **kwargs):
"""
Make a 2D histogram plot.
Call signature::
hist2d(x, y, bins = None, range=None, weights=None, cmin=None, cmax=None **kwargs)
Make a 2d histogram plot of *x* versus *y*, where *x*,
*y* are 1-D sequences of the same length.
The return value is ``(counts, xedges, yedges, Image)``.
Optional keyword arguments:
*bins*: [None | int | [int, int] | array_like | [array, array]]
The bin specification:
- If int, the number of bins for the two dimensions
(nx=ny=bins).
- If [int, int], the number of bins in each dimension
(nx, ny = bins).
- If array_like, the bin edges for the two dimensions
(x_edges=y_edges=bins).
- If [array, array], the bin edges in each dimension
(x_edges, y_edges = bins).
The default value is 10.
*range*: [*None* | array_like shape(2,2)]
The leftmost and rightmost edges of the bins along each
dimension (if not specified explicitly in the bins
parameters): [[xmin, xmax], [ymin, ymax]]. All values
outside of this range will be considered outliers and not
tallied in the histogram.
*normed*:[True|False]
Normalize histogram.
The default value is False
*weights*: [*None* | array]
An array of values w_i weighing each sample (x_i, y_i).
*cmin* : [None| scalar]
All bins that has count less than cmin will not be
displayed and these count values in the return value
count histogram will also be set to nan upon return
*cmax* : [None| scalar]
All bins that has count more than cmax will not be
displayed (set to none before passing to imshow) and
these count values in the return value count histogram
will also be set to nan upon return
Remaining keyword arguments are passed directly to :meth:`pcolorfast`.
Rendering the histogram with a logarithmic color scale is
accomplished by passing a :class:`colors.LogNorm` instance to
the *norm* keyword argument.
**Example:**
.. plot:: mpl_examples/pylab_examples/hist2d_demo.py
"""
# xrange becomes range after 2to3
bin_range = range
range = __builtins__["range"]
h,xedges,yedges = np.histogram2d(x, y, bins=bins, range=bin_range,
normed=normed, weights=weights)
if cmin is not None: h[h<cmin]=None
if cmax is not None: h[h>cmax]=None
pc = self.pcolorfast(xedges,yedges,h.T,**kwargs)
self.set_xlim(xedges[0],xedges[-1])
self.set_ylim(yedges[0],yedges[-1])
return h,xedges,yedges,pc
@docstring.dedent_interpd
def psd(self, x, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=0, pad_to=None,
sides='default', scale_by_freq=None, **kwargs):
"""
Plot the power spectral density.
Call signature::
psd(x, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=0, pad_to=None,
sides='default', scale_by_freq=None, **kwargs)
The power spectral density by Welch's average periodogram
method. The vector *x* is divided into *NFFT* length
segments. Each segment is detrended by function *detrend* and
windowed by function *window*. *noverlap* gives the length of
the overlap between segments. The :math:`|\mathrm{fft}(i)|^2`
of each segment :math:`i` are averaged to compute *Pxx*, with a
scaling to correct for power loss due to windowing. *Fs* is the
sampling frequency.
%(PSD)s
*noverlap*: integer
The number of points of overlap between blocks. The default value
is 0 (no overlap).
*Fc*: integer
The center frequency of *x* (defaults to 0), which offsets
the x extents of the plot to reflect the frequency range used
when a signal is acquired and then filtered and downsampled to
baseband.
Returns the tuple (*Pxx*, *freqs*).
For plotting, the power is plotted as
:math:`10\log_{10}(P_{xx})` for decibels, though *Pxx* itself
is returned.
References:
Bendat & Piersol -- Random Data: Analysis and Measurement
Procedures, John Wiley & Sons (1986)
kwargs control the :class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
**Example:**
.. plot:: mpl_examples/pylab_examples/psd_demo.py
"""
if not self._hold: self.cla()
pxx, freqs = mlab.psd(x, NFFT, Fs, detrend, window, noverlap, pad_to,
sides, scale_by_freq)
pxx.shape = len(freqs),
freqs += Fc
if scale_by_freq in (None, True):
psd_units = 'dB/Hz'
else:
psd_units = 'dB'
self.plot(freqs, 10*np.log10(pxx), **kwargs)
self.set_xlabel('Frequency')
self.set_ylabel('Power Spectral Density (%s)' % psd_units)
self.grid(True)
vmin, vmax = self.viewLim.intervaly
intv = vmax-vmin
logi = int(np.log10(intv))
if logi==0: logi=.1
step = 10*logi
#print vmin, vmax, step, intv, math.floor(vmin), math.ceil(vmax)+1
ticks = np.arange(math.floor(vmin), math.ceil(vmax)+1, step)
self.set_yticks(ticks)
return pxx, freqs
@docstring.dedent_interpd
def csd(self, x, y, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=0, pad_to=None,
sides='default', scale_by_freq=None, **kwargs):
"""
Plot cross-spectral density.
Call signature::
csd(x, y, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=0, pad_to=None,
sides='default', scale_by_freq=None, **kwargs)
The cross spectral density :math:`P_{xy}` by Welch's average
periodogram method. The vectors *x* and *y* are divided into
*NFFT* length segments. Each segment is detrended by function
*detrend* and windowed by function *window*. The product of
the direct FFTs of *x* and *y* are averaged over each segment
to compute :math:`P_{xy}`, with a scaling to correct for power
loss due to windowing.
Returns the tuple (*Pxy*, *freqs*). *P* is the cross spectrum
(complex valued), and :math:`10\log_{10}|P_{xy}|` is
plotted.
%(PSD)s
*noverlap*: integer
The number of points of overlap between blocks. The
default value is 0 (no overlap).
*Fc*: integer
The center frequency of *x* (defaults to 0), which offsets
the x extents of the plot to reflect the frequency range used
when a signal is acquired and then filtered and downsampled to
baseband.
References:
Bendat & Piersol -- Random Data: Analysis and Measurement
Procedures, John Wiley & Sons (1986)
kwargs control the Line2D properties:
%(Line2D)s
**Example:**
.. plot:: mpl_examples/pylab_examples/csd_demo.py
.. seealso:
:meth:`psd`
For a description of the optional parameters.
"""
if not self._hold: self.cla()
pxy, freqs = mlab.csd(x, y, NFFT, Fs, detrend, window, noverlap,
pad_to, sides, scale_by_freq)
pxy.shape = len(freqs),
# pxy is complex
freqs += Fc
self.plot(freqs, 10*np.log10(np.absolute(pxy)), **kwargs)
self.set_xlabel('Frequency')
self.set_ylabel('Cross Spectrum Magnitude (dB)')
self.grid(True)
vmin, vmax = self.viewLim.intervaly
intv = vmax-vmin
step = 10*int(np.log10(intv))
ticks = np.arange(math.floor(vmin), math.ceil(vmax)+1, step)
self.set_yticks(ticks)
return pxy, freqs
@docstring.dedent_interpd
def cohere(self, x, y, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=0, pad_to=None,
sides='default', scale_by_freq=None, **kwargs):
"""
Plot the coherence between *x* and *y*.
Call signature::
cohere(x, y, NFFT=256, Fs=2, Fc=0, detrend = mlab.detrend_none,
window = mlab.window_hanning, noverlap=0, pad_to=None,
sides='default', scale_by_freq=None, **kwargs)
Plot the coherence between *x* and *y*. Coherence is the
normalized cross spectral density:
.. math::
C_{xy} = \\frac{|P_{xy}|^2}{P_{xx}P_{yy}}
%(PSD)s
*noverlap*: integer
The number of points of overlap between blocks. The
default value is 0 (no overlap).
*Fc*: integer
The center frequency of *x* (defaults to 0), which offsets
the x extents of the plot to reflect the frequency range used
when a signal is acquired and then filtered and downsampled to
baseband.
The return value is a tuple (*Cxy*, *f*), where *f* are the
frequencies of the coherence vector.
kwargs are applied to the lines.
References:
* Bendat & Piersol -- Random Data: Analysis and Measurement
Procedures, John Wiley & Sons (1986)
kwargs control the :class:`~matplotlib.lines.Line2D`
properties of the coherence plot:
%(Line2D)s
**Example:**
.. plot:: mpl_examples/pylab_examples/cohere_demo.py
"""
if not self._hold: self.cla()
cxy, freqs = mlab.cohere(x, y, NFFT, Fs, detrend, window, noverlap,
scale_by_freq)
freqs += Fc
self.plot(freqs, cxy, **kwargs)
self.set_xlabel('Frequency')
self.set_ylabel('Coherence')
self.grid(True)
return cxy, freqs
@docstring.dedent_interpd
def specgram(self, x, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=128,
cmap=None, xextent=None, pad_to=None, sides='default',
scale_by_freq=None, **kwargs):
"""
Plot a spectrogram.
Call signature::
specgram(x, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=128,
cmap=None, xextent=None, pad_to=None, sides='default',
scale_by_freq=None, **kwargs)
Compute a spectrogram of data in *x*. Data are split into
*NFFT* length segments and the PSD of each section is
computed. The windowing function *window* is applied to each
segment, and the amount of overlap of each segment is
specified with *noverlap*.
%(PSD)s
*noverlap*: integer
The number of points of overlap between blocks. The
default value is 128.
*Fc*: integer
The center frequency of *x* (defaults to 0), which offsets
the y extents of the plot to reflect the frequency range used
when a signal is acquired and then filtered and downsampled to
baseband.
*cmap*:
A :class:`matplotlib.colors.Colormap` instance; if *None*, use
default determined by rc
*xextent*:
The image extent along the x-axis. xextent = (xmin,xmax)
The default is (0,max(bins)), where bins is the return
value from :func:`~matplotlib.mlab.specgram`
*kwargs*:
Additional kwargs are passed on to imshow which makes the
specgram image
Return value is (*Pxx*, *freqs*, *bins*, *im*):
- *bins* are the time points the spectrogram is calculated over
- *freqs* is an array of frequencies
- *Pxx* is an array of shape `(len(times), len(freqs))` of power
- *im* is a :class:`~matplotlib.image.AxesImage` instance
Note: If *x* is real (i.e. non-complex), only the positive
spectrum is shown. If *x* is complex, both positive and
negative parts of the spectrum are shown. This can be
overridden using the *sides* keyword argument.
**Example:**
.. plot:: mpl_examples/pylab_examples/specgram_demo.py
"""
if not self._hold: self.cla()
Pxx, freqs, bins = mlab.specgram(x, NFFT, Fs, detrend,
window, noverlap, pad_to, sides, scale_by_freq)
Z = 10. * np.log10(Pxx)
Z = np.flipud(Z)
if xextent is None: xextent = 0, np.amax(bins)
xmin, xmax = xextent
freqs += Fc
extent = xmin, xmax, freqs[0], freqs[-1]
im = self.imshow(Z, cmap, extent=extent, **kwargs)
self.axis('auto')
return Pxx, freqs, bins, im
def spy(self, Z, precision=0, marker=None, markersize=None,
aspect='equal', **kwargs):
"""
Plot the sparsity pattern on a 2-D array.
Call signature::
spy(Z, precision=0, marker=None, markersize=None,
aspect='equal', **kwargs)
``spy(Z)`` plots the sparsity pattern of the 2-D array *Z*.
If *precision* is 0, any non-zero value will be plotted;
else, values of :math:`|Z| > precision` will be plotted.
For :class:`scipy.sparse.spmatrix` instances, there is a
special case: if *precision* is 'present', any value present in
the array will be plotted, even if it is identically zero.
The array will be plotted as it would be printed, with
the first index (row) increasing down and the second
index (column) increasing to the right.
By default aspect is 'equal', so that each array element
occupies a square space; set the aspect kwarg to 'auto'
to allow the plot to fill the plot box, or to any scalar
number to specify the aspect ratio of an array element
directly.
Two plotting styles are available: image or marker. Both
are available for full arrays, but only the marker style
works for :class:`scipy.sparse.spmatrix` instances.
If *marker* and *markersize* are *None*, an image will be
returned and any remaining kwargs are passed to
:func:`~matplotlib.pyplot.imshow`; else, a
:class:`~matplotlib.lines.Line2D` object will be returned with
the value of marker determining the marker type, and any
remaining kwargs passed to the
:meth:`~matplotlib.axes.Axes.plot` method.
If *marker* and *markersize* are *None*, useful kwargs include:
* *cmap*
* *alpha*
.. seealso::
:func:`~matplotlib.pyplot.imshow`
For image options.
For controlling colors, e.g. cyan background and red marks,
use::
cmap = mcolors.ListedColormap(['c','r'])
If *marker* or *markersize* is not *None*, useful kwargs include:
* *marker*
* *markersize*
* *color*
Useful values for *marker* include:
* 's' square (default)
* 'o' circle
* '.' point
* ',' pixel
.. seealso::
:func:`~matplotlib.pyplot.plot`
For plotting options
"""
if precision is None:
precision = 0
warnings.warn("Use precision=0 instead of None", mplDeprecation)
# 2008/10/03
if marker is None and markersize is None and hasattr(Z, 'tocoo'):
marker = 's'
if marker is None and markersize is None:
Z = np.asarray(Z)
mask = np.absolute(Z)>precision
if 'cmap' not in kwargs:
kwargs['cmap'] = mcolors.ListedColormap(['w', 'k'],
name='binary')
nr, nc = Z.shape
extent = [-0.5, nc-0.5, nr-0.5, -0.5]
ret = self.imshow(mask, interpolation='nearest', aspect=aspect,
extent=extent, origin='upper', **kwargs)
else:
if hasattr(Z, 'tocoo'):
c = Z.tocoo()
if precision == 'present':
y = c.row
x = c.col
else:
nonzero = np.absolute(c.data) > precision
y = c.row[nonzero]
x = c.col[nonzero]
else:
Z = np.asarray(Z)
nonzero = np.absolute(Z)>precision
y, x = np.nonzero(nonzero)
if marker is None: marker = 's'
if markersize is None: markersize = 10
marks = mlines.Line2D(x, y, linestyle='None',
marker=marker, markersize=markersize, **kwargs)
self.add_line(marks)
nr, nc = Z.shape
self.set_xlim(xmin=-0.5, xmax=nc-0.5)
self.set_ylim(ymin=nr-0.5, ymax=-0.5)
self.set_aspect(aspect)
ret = marks
self.title.set_y(1.05)
self.xaxis.tick_top()
self.xaxis.set_ticks_position('both')
self.xaxis.set_major_locator(mticker.MaxNLocator(nbins=9,
steps=[1, 2, 5, 10],
integer=True))
self.yaxis.set_major_locator(mticker.MaxNLocator(nbins=9,
steps=[1, 2, 5, 10],
integer=True))
return ret
def matshow(self, Z, **kwargs):
"""
Plot a matrix or array as an image.
The matrix will be shown the way it would be printed,
with the first row at the top. Row and column numbering
is zero-based.
Argument:
*Z* anything that can be interpreted as a 2-D array
kwargs all are passed to :meth:`~matplotlib.axes.Axes.imshow`.
:meth:`matshow` sets defaults for *origin*,
*interpolation*, and *aspect*; if you want row zero to
be at the bottom instead of the top, you can set the *origin*
kwarg to "lower".
Returns: an :class:`matplotlib.image.AxesImage` instance.
"""
Z = np.asanyarray(Z)
nr, nc = Z.shape
kw = {'origin': 'upper',
'interpolation': 'nearest',
'aspect': 'equal'} # (already the imshow default)
kw.update(kwargs)
im = self.imshow(Z, **kw)
self.title.set_y(1.05)
self.xaxis.tick_top()
self.xaxis.set_ticks_position('both')
self.xaxis.set_major_locator(mticker.MaxNLocator(nbins=9,
steps=[1, 2, 5, 10],
integer=True))
self.yaxis.set_major_locator(mticker.MaxNLocator(nbins=9,
steps=[1, 2, 5, 10],
integer=True))
return im
def get_default_bbox_extra_artists(self):
bbox_extra_artists = [t for t in self.texts if t.get_visible()]
if self.legend_:
bbox_extra_artists.append(self.legend_)
return bbox_extra_artists
def get_tightbbox(self, renderer, call_axes_locator=True):
"""
Return the tight bounding box of the axes.
The dimension of the Bbox in canvas coordinate.
If *call_axes_locator* is *False*, it does not call the
_axes_locator attribute, which is necessary to get the correct
bounding box. ``call_axes_locator==False`` can be used if the
caller is only intereted in the relative size of the tightbbox
compared to the axes bbox.
"""
artists = []
bb = []
if not self.get_visible():
return None
locator = self.get_axes_locator()
if locator and call_axes_locator:
pos = locator(self, renderer)
self.apply_aspect(pos)
else:
self.apply_aspect()
bb.append(self.get_window_extent(renderer))
if self.title.get_visible():
bb.append(self.title.get_window_extent(renderer))
bb_xaxis = self.xaxis.get_tightbbox(renderer)
if bb_xaxis: bb.append(bb_xaxis)
bb_yaxis = self.yaxis.get_tightbbox(renderer)
if bb_yaxis: bb.append(bb_yaxis)
_bbox = mtransforms.Bbox.union([b for b in bb if b.width!=0 or b.height!=0])
return _bbox
def minorticks_on(self):
'Add autoscaling minor ticks to the axes.'
for ax in (self.xaxis, self.yaxis):
if ax.get_scale() == 'log':
s = ax._scale
ax.set_minor_locator(mticker.LogLocator(s.base, s.subs))
else:
ax.set_minor_locator(mticker.AutoMinorLocator())
def minorticks_off(self):
"""Remove minor ticks from the axes."""
self.xaxis.set_minor_locator(mticker.NullLocator())
self.yaxis.set_minor_locator(mticker.NullLocator())
def tricontour(self, *args, **kwargs):
return mtri.tricontour(self, *args, **kwargs)
tricontour.__doc__ = mtri.TriContourSet.tricontour_doc
def tricontourf(self, *args, **kwargs):
return mtri.tricontourf(self, *args, **kwargs)
tricontourf.__doc__ = mtri.TriContourSet.tricontour_doc
def tripcolor(self, *args, **kwargs):
return mtri.tripcolor(self, *args, **kwargs)
tripcolor.__doc__ = mtri.tripcolor.__doc__
def triplot(self, *args, **kwargs):
mtri.triplot(self, *args, **kwargs)
triplot.__doc__ = mtri.triplot.__doc__
from matplotlib.gridspec import GridSpec, SubplotSpec
class SubplotBase:
"""
Base class for subplots, which are :class:`Axes` instances with
additional methods to facilitate generating and manipulating a set
of :class:`Axes` within a figure.
"""
def __init__(self, fig, *args, **kwargs):
"""
*fig* is a :class:`matplotlib.figure.Figure` instance.
*args* is the tuple (*numRows*, *numCols*, *plotNum*), where
the array of subplots in the figure has dimensions *numRows*,
*numCols*, and where *plotNum* is the number of the subplot
being created. *plotNum* starts at 1 in the upper left
corner and increases to the right.
If *numRows* <= *numCols* <= *plotNum* < 10, *args* can be the
decimal integer *numRows* * 100 + *numCols* * 10 + *plotNum*.
"""
self.figure = fig
if len(args) == 1:
if isinstance(args[0], SubplotSpec):
self._subplotspec = args[0]
else:
try:
s = str(int(args[0]))
rows, cols, num = map(int, s)
except ValueError:
raise ValueError(
'Single argument to subplot must be a 3-digit integer')
self._subplotspec = GridSpec(rows, cols)[num-1]
# num - 1 for converting from MATLAB to python indexing
elif len(args)==3:
rows, cols, num = args
rows = int(rows)
cols = int(cols)
if isinstance(num, tuple) and len(num) == 2:
num = [int(n) for n in num]
self._subplotspec = GridSpec(rows, cols)[num[0]-1:num[1]]
else:
self._subplotspec = GridSpec(rows, cols)[int(num)-1]
# num - 1 for converting from MATLAB to python indexing
else:
raise ValueError('Illegal argument(s) to subplot: %s' % (args,))
self.update_params()
# _axes_class is set in the subplot_class_factory
self._axes_class.__init__(self, fig, self.figbox, **kwargs)
def __reduce__(self):
# get the first axes class which does not inherit from a subplotbase
not_subplotbase = lambda c: issubclass(c, Axes) and \
not issubclass(c, SubplotBase)
axes_class = [c for c in self.__class__.mro() if not_subplotbase(c)][0]
r = [_PicklableSubplotClassConstructor(),
(axes_class,),
self.__getstate__()]
return tuple(r)
def get_geometry(self):
"""get the subplot geometry, eg 2,2,3"""
rows, cols, num1, num2 = self.get_subplotspec().get_geometry()
return rows, cols, num1+1 # for compatibility
# COVERAGE NOTE: Never used internally or from examples
def change_geometry(self, numrows, numcols, num):
"""change subplot geometry, eg. from 1,1,1 to 2,2,3"""
self._subplotspec = GridSpec(numrows, numcols)[num-1]
self.update_params()
self.set_position(self.figbox)
def get_subplotspec(self):
"""get the SubplotSpec instance associated with the subplot"""
return self._subplotspec
def set_subplotspec(self, subplotspec):
"""set the SubplotSpec instance associated with the subplot"""
self._subplotspec = subplotspec
def update_params(self):
"""update the subplot position from fig.subplotpars"""
self.figbox, self.rowNum, self.colNum, self.numRows, self.numCols = \
self.get_subplotspec().get_position(self.figure,
return_all=True)
def is_first_col(self):
return self.colNum==0
def is_first_row(self):
return self.rowNum==0
def is_last_row(self):
return self.rowNum==self.numRows-1
def is_last_col(self):
return self.colNum==self.numCols-1
# COVERAGE NOTE: Never used internally or from examples
def label_outer(self):
"""
set the visible property on ticklabels so xticklabels are
visible only if the subplot is in the last row and yticklabels
are visible only if the subplot is in the first column
"""
lastrow = self.is_last_row()
firstcol = self.is_first_col()
for label in self.get_xticklabels():
label.set_visible(lastrow)
for label in self.get_yticklabels():
label.set_visible(firstcol)
def _make_twin_axes(self, *kl, **kwargs):
"""
make a twinx axes of self. This is used for twinx and twiny.
"""
ax2 = self.figure.add_subplot(self.get_subplotspec(), *kl, **kwargs)
return ax2
_subplot_classes = {}
def subplot_class_factory(axes_class=None):
# This makes a new class that inherits from SubplotBase and the
# given axes_class (which is assumed to be a subclass of Axes).
# This is perhaps a little bit roundabout to make a new class on
# the fly like this, but it means that a new Subplot class does
# not have to be created for every type of Axes.
if axes_class is None:
axes_class = Axes
new_class = _subplot_classes.get(axes_class)
if new_class is None:
new_class = type("%sSubplot" % (axes_class.__name__),
(SubplotBase, axes_class),
{'_axes_class': axes_class})
_subplot_classes[axes_class] = new_class
return new_class
# This is provided for backward compatibility
Subplot = subplot_class_factory()
class _PicklableSubplotClassConstructor(object):
"""
This stub class exists to return the appropriate subplot
class when __call__-ed with an axes class. This is purely to
allow Pickling of Axes and Subplots.
"""
def __call__(self, axes_class):
# create a dummy object instance
subplot_instance = _PicklableSubplotClassConstructor()
subplot_class = subplot_class_factory(axes_class)
# update the class to the desired subplot class
subplot_instance.__class__ = subplot_class
return subplot_instance
docstring.interpd.update(Axes=martist.kwdoc(Axes))
docstring.interpd.update(Subplot=martist.kwdoc(Axes))
"""
# this is some discarded code I was using to find the minimum positive
# data point for some log scaling fixes. I realized there was a
# cleaner way to do it, but am keeping this around as an example for
# how to get the data out of the axes. Might want to make something
# like this a method one day, or better yet make get_verts an Artist
# method
minx, maxx = self.get_xlim()
if minx<=0 or maxx<=0:
# find the min pos value in the data
xs = []
for line in self.lines:
xs.extend(line.get_xdata(orig=False))
for patch in self.patches:
xs.extend([x for x,y in patch.get_verts()])
for collection in self.collections:
xs.extend([x for x,y in collection.get_verts()])
posx = [x for x in xs if x>0]
if len(posx):
minx = min(posx)
maxx = max(posx)
# warning, probably breaks inverted axis
self.set_xlim((0.1*minx, maxx))
"""
| mit |
unnikrishnankgs/va | venv/lib/python3.5/site-packages/matplotlib/backends/backend_wxagg.py | 10 | 5840 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import matplotlib
from matplotlib.figure import Figure
from .backend_agg import FigureCanvasAgg
from . import wx_compat as wxc
from . import backend_wx
from .backend_wx import (FigureManagerWx, FigureCanvasWx,
FigureFrameWx, DEBUG_MSG, NavigationToolbar2Wx, Toolbar)
import wx
show = backend_wx.Show()
class FigureFrameWxAgg(FigureFrameWx):
def get_canvas(self, fig):
return FigureCanvasWxAgg(self, -1, fig)
def _get_toolbar(self, statbar):
if matplotlib.rcParams['toolbar'] == 'toolbar2':
toolbar = NavigationToolbar2WxAgg(self.canvas)
toolbar.set_status_bar(statbar)
else:
toolbar = None
return toolbar
class FigureCanvasWxAgg(FigureCanvasAgg, FigureCanvasWx):
"""
The FigureCanvas contains the figure and does event handling.
In the wxPython backend, it is derived from wxPanel, and (usually)
lives inside a frame instantiated by a FigureManagerWx. The parent
window probably implements a wxSizer to control the displayed
control size - but we give a hint as to our preferred minimum
size.
"""
def draw(self, drawDC=None):
"""
Render the figure using agg.
"""
DEBUG_MSG("draw()", 1, self)
FigureCanvasAgg.draw(self)
self.bitmap = _convert_agg_to_wx_bitmap(self.get_renderer(), None)
self._isDrawn = True
self.gui_repaint(drawDC=drawDC, origin='WXAgg')
def blit(self, bbox=None):
"""
Transfer the region of the agg buffer defined by bbox to the display.
If bbox is None, the entire buffer is transferred.
"""
if bbox is None:
self.bitmap = _convert_agg_to_wx_bitmap(self.get_renderer(), None)
self.gui_repaint()
return
l, b, w, h = bbox.bounds
r = l + w
t = b + h
x = int(l)
y = int(self.bitmap.GetHeight() - t)
srcBmp = _convert_agg_to_wx_bitmap(self.get_renderer(), None)
srcDC = wx.MemoryDC()
srcDC.SelectObject(srcBmp)
destDC = wx.MemoryDC()
destDC.SelectObject(self.bitmap)
destDC.Blit(x, y, int(w), int(h), srcDC, x, y)
destDC.SelectObject(wx.NullBitmap)
srcDC.SelectObject(wx.NullBitmap)
self.gui_repaint()
filetypes = FigureCanvasAgg.filetypes
def print_figure(self, filename, *args, **kwargs):
# Use pure Agg renderer to draw
FigureCanvasAgg.print_figure(self, filename, *args, **kwargs)
# Restore the current view; this is needed because the
# artist contains methods rely on particular attributes
# of the rendered figure for determining things like
# bounding boxes.
if self._isDrawn:
self.draw()
class NavigationToolbar2WxAgg(NavigationToolbar2Wx):
def get_canvas(self, frame, fig):
return FigureCanvasWxAgg(frame, -1, fig)
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
# in order to expose the Figure constructor to the pylab
# interface we need to create the figure here
DEBUG_MSG("new_figure_manager()", 3, None)
backend_wx._create_wx_app()
FigureClass = kwargs.pop('FigureClass', Figure)
fig = FigureClass(*args, **kwargs)
return new_figure_manager_given_figure(num, fig)
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
frame = FigureFrameWxAgg(num, figure)
figmgr = frame.get_figure_manager()
if matplotlib.is_interactive():
figmgr.frame.Show()
figure.canvas.draw_idle()
return figmgr
#
# agg/wxPython image conversion functions (wxPython >= 2.8)
#
def _convert_agg_to_wx_image(agg, bbox):
"""
Convert the region of the agg buffer bounded by bbox to a wx.Image. If
bbox is None, the entire buffer is converted.
Note: agg must be a backend_agg.RendererAgg instance.
"""
if bbox is None:
# agg => rgb -> image
image = wxc.EmptyImage(int(agg.width), int(agg.height))
image.SetData(agg.tostring_rgb())
return image
else:
# agg => rgba buffer -> bitmap => clipped bitmap => image
return wx.ImageFromBitmap(_WX28_clipped_agg_as_bitmap(agg, bbox))
def _convert_agg_to_wx_bitmap(agg, bbox):
"""
Convert the region of the agg buffer bounded by bbox to a wx.Bitmap. If
bbox is None, the entire buffer is converted.
Note: agg must be a backend_agg.RendererAgg instance.
"""
if bbox is None:
# agg => rgba buffer -> bitmap
return wxc.BitmapFromBuffer(int(agg.width), int(agg.height),
agg.buffer_rgba())
else:
# agg => rgba buffer -> bitmap => clipped bitmap
return _WX28_clipped_agg_as_bitmap(agg, bbox)
def _WX28_clipped_agg_as_bitmap(agg, bbox):
"""
Convert the region of a the agg buffer bounded by bbox to a wx.Bitmap.
Note: agg must be a backend_agg.RendererAgg instance.
"""
l, b, width, height = bbox.bounds
r = l + width
t = b + height
srcBmp = wxc.BitmapFromBuffer(int(agg.width), int(agg.height),
agg.buffer_rgba())
srcDC = wx.MemoryDC()
srcDC.SelectObject(srcBmp)
destBmp = wxc.EmptyBitmap(int(width), int(height))
destDC = wx.MemoryDC()
destDC.SelectObject(destBmp)
x = int(l)
y = int(int(agg.height) - t)
destDC.Blit(0, 0, int(width), int(height), srcDC, x, y)
srcDC.SelectObject(wx.NullBitmap)
destDC.SelectObject(wx.NullBitmap)
return destBmp
FigureCanvas = FigureCanvasWxAgg
FigureManager = FigureManagerWx
| bsd-2-clause |
hdmetor/scikit-learn | examples/cluster/plot_mean_shift.py | 351 | 1793 | """
=============================================
A demo of the mean-shift clustering algorithm
=============================================
Reference:
Dorin Comaniciu and Peter Meer, "Mean Shift: A robust approach toward
feature space analysis". IEEE Transactions on Pattern Analysis and
Machine Intelligence. 2002. pp. 603-619.
"""
print(__doc__)
import numpy as np
from sklearn.cluster import MeanShift, estimate_bandwidth
from sklearn.datasets.samples_generator import make_blobs
###############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, _ = make_blobs(n_samples=10000, centers=centers, cluster_std=0.6)
###############################################################################
# Compute clustering with MeanShift
# The following bandwidth can be automatically detected using
bandwidth = estimate_bandwidth(X, quantile=0.2, n_samples=500)
ms = MeanShift(bandwidth=bandwidth, bin_seeding=True)
ms.fit(X)
labels = ms.labels_
cluster_centers = ms.cluster_centers_
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
print("number of estimated clusters : %d" % n_clusters_)
###############################################################################
# Plot result
import matplotlib.pyplot as plt
from itertools import cycle
plt.figure(1)
plt.clf()
colors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')
for k, col in zip(range(n_clusters_), colors):
my_members = labels == k
cluster_center = cluster_centers[k]
plt.plot(X[my_members, 0], X[my_members, 1], col + '.')
plt.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
| bsd-3-clause |
bioconda/bioconda-utils | test/conftest.py | 1 | 5084 | import contextlib
import datetime
import os
import os.path as op
import shutil
import tempfile
from copy import deepcopy
from ruamel_yaml import YAML
import pandas as pd
import pytest
import py
from bioconda_utils import utils
yaml = YAML(typ="rt") # pylint: disable=invalid-name
# common settings
TEST_RECIPES_FOLDER = 'recipes'
TEST_CONFIG_YAML_FNAME = 'config.yaml'
TEST_CONFIG_YAML = {
'blacklists': [],
'channels': []
}
def pytest_runtest_makereport(item, call):
if "successive" in item.keywords:
# if we failed, mark parent with callspec id (name from test args)
if call.excinfo is not None:
item.parent.failedcallspec = item.callspec.id
def pytest_runtest_setup(item):
if "successive" in item.keywords:
if getattr(item.parent, "failedcallspec", None) == item.callspec.id:
pytest.xfail("preceding test failed")
@pytest.fixture
def mock_repodata(repodata, case):
"""Pepares RepoData singleton to contain mock data
Expects function to be parametrized with ``case`` and ``repodata``,
where ``case`` may contain a ``repodata`` key to be added to
base ``repodata`` contents.
``repodata`` should be of this form::
<channel>:
<package_name>:
- <key>: value
<key>: value
E.g.::
bioconda:
package_one:
- version: 0.1
build_number: 0
"""
if 'repodata' in case:
data = deepcopy(repodata)
dict_merge(data, case['repodata'])
else:
data = repodata
dataframe = pd.DataFrame(columns=utils.RepoData.columns)
for channel, packages in data.items():
for name, versions in packages.items():
for item in versions:
pkg = {
'channel': channel,
'name': name,
'build': '',
'build_number': 0,
'version': 0,
'depends': [],
'subdir': '',
'platform': 'noarch',
}
pkg.update(item)
dataframe = dataframe.append(pkg, ignore_index=True)
backup = utils.RepoData()._df, utils.RepoData()._df_ts
utils.RepoData()._df = dataframe
utils.RepoData()._df_ts = datetime.datetime.now()
yield
utils.RepoData()._df, utils.RepoData()._df_ts = backup
@pytest.fixture
def recipes_folder(tmpdir: py.path.local):
"""Prepares a temp dir with '/recipes' folder as configured"""
orig_cwd = tmpdir.chdir()
yield tmpdir.mkdir(TEST_RECIPES_FOLDER)
orig_cwd.chdir()
def dict_merge(base, add):
for key, value in add.items():
if isinstance(value, dict):
base[key] = dict_merge(base.get(key, {}), value)
elif isinstance(base, list):
for num in range(len(base)):
base[num][key] = dict_merge(base[num].get(key, {}), add)
else:
base[key] = value
return base
@pytest.fixture
def config_file(tmpdir: py.path.local, case):
"""Prepares Bioconda config.yaml"""
if 'add_root_files' in case:
for fname, data in case['add_root_files'].items():
with tmpdir.join(fname).open('w') as fdes:
fdes.write(data)
data = deepcopy(TEST_CONFIG_YAML)
if 'config' in case:
dict_merge(data, case['config'])
config_fname = tmpdir.join(TEST_CONFIG_YAML_FNAME)
with config_fname.open('w') as fdes:
yaml.dump(data, fdes)
yield config_fname
@pytest.fixture
def recipe_dir(recipes_folder: py.path.local, tmpdir: py.path.local,
case, recipe_data):
"""Prepares a recipe from recipe_data in recipes_folder"""
recipe = deepcopy(recipe_data['meta.yaml'])
if 'remove' in case:
for remove in utils.ensure_list(case['remove']):
path = remove.split('/')
cont = recipe
for p in path[:-1]:
cont = cont[p]
if isinstance(cont, list):
for n in range(len(cont)):
del cont[n][path[-1]]
else:
del cont[path[-1]]
if 'add' in case:
dict_merge(recipe, case['add'])
recipe_dir = recipes_folder.mkdir(recipe_data['folder'])
with recipe_dir.join('meta.yaml').open('w') as fdes:
yaml.dump(recipe, fdes,
transform=lambda l: l.replace('#{%', '{%').replace("#{{", "{{"))
if 'add_files' in case:
for fname, data in case['add_files'].items():
with recipe_dir.join(fname).open('w') as fdes:
fdes.write(data)
if 'move_files' in case:
for src, dest in case['move_files'].items():
src_path = recipe_dir.join(src)
if not dest:
if os.path.isdir(src_path):
shutil.rmtree(src_path)
else:
os.remove(src_path)
else:
dest_path = recipe_dir.join(dest)
shutil.move(src_path, dest_path)
yield recipe_dir
| mit |
marcocaccin/scikit-learn | sklearn/covariance/tests/test_covariance.py | 34 | 11120 | # Author: Alexandre Gramfort <[email protected]>
# Gael Varoquaux <[email protected]>
# Virgile Fritsch <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_greater
from sklearn import datasets
from sklearn.covariance import empirical_covariance, EmpiricalCovariance, \
ShrunkCovariance, shrunk_covariance, \
LedoitWolf, ledoit_wolf, ledoit_wolf_shrinkage, OAS, oas
X = datasets.load_diabetes().data
X_1d = X[:, 0]
n_samples, n_features = X.shape
def test_covariance():
# Tests Covariance module on a simple dataset.
# test covariance fit from data
cov = EmpiricalCovariance()
cov.fit(X)
emp_cov = empirical_covariance(X)
assert_array_almost_equal(emp_cov, cov.covariance_, 4)
assert_almost_equal(cov.error_norm(emp_cov), 0)
assert_almost_equal(
cov.error_norm(emp_cov, norm='spectral'), 0)
assert_almost_equal(
cov.error_norm(emp_cov, norm='frobenius'), 0)
assert_almost_equal(
cov.error_norm(emp_cov, scaling=False), 0)
assert_almost_equal(
cov.error_norm(emp_cov, squared=False), 0)
assert_raises(NotImplementedError,
cov.error_norm, emp_cov, norm='foo')
# Mahalanobis distances computation test
mahal_dist = cov.mahalanobis(X)
assert_greater(np.amin(mahal_dist), 0)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
cov = EmpiricalCovariance()
cov.fit(X_1d)
assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
assert_almost_equal(cov.error_norm(empirical_covariance(X_1d)), 0)
assert_almost_equal(
cov.error_norm(empirical_covariance(X_1d), norm='spectral'), 0)
# test with one sample
# Create X with 1 sample and 5 features
X_1sample = np.arange(5).reshape(1, 5)
cov = EmpiricalCovariance()
assert_warns(UserWarning, cov.fit, X_1sample)
assert_array_almost_equal(cov.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test integer type
X_integer = np.asarray([[0, 1], [1, 0]])
result = np.asarray([[0.25, -0.25], [-0.25, 0.25]])
assert_array_almost_equal(empirical_covariance(X_integer), result)
# test centered case
cov = EmpiricalCovariance(assume_centered=True)
cov.fit(X)
assert_array_equal(cov.location_, np.zeros(X.shape[1]))
def test_shrunk_covariance():
# Tests ShrunkCovariance module on a simple dataset.
# compare shrunk covariance obtained from data and from MLE estimate
cov = ShrunkCovariance(shrinkage=0.5)
cov.fit(X)
assert_array_almost_equal(
shrunk_covariance(empirical_covariance(X), shrinkage=0.5),
cov.covariance_, 4)
# same test with shrinkage not provided
cov = ShrunkCovariance()
cov.fit(X)
assert_array_almost_equal(
shrunk_covariance(empirical_covariance(X)), cov.covariance_, 4)
# same test with shrinkage = 0 (<==> empirical_covariance)
cov = ShrunkCovariance(shrinkage=0.)
cov.fit(X)
assert_array_almost_equal(empirical_covariance(X), cov.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
cov = ShrunkCovariance(shrinkage=0.3)
cov.fit(X_1d)
assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
cov = ShrunkCovariance(shrinkage=0.5, store_precision=False)
cov.fit(X)
assert(cov.precision_ is None)
def test_ledoit_wolf():
# Tests LedoitWolf module on a simple dataset.
# test shrinkage coeff on a simple data set
X_centered = X - X.mean(axis=0)
lw = LedoitWolf(assume_centered=True)
lw.fit(X_centered)
shrinkage_ = lw.shrinkage_
score_ = lw.score(X_centered)
assert_almost_equal(ledoit_wolf_shrinkage(X_centered,
assume_centered=True),
shrinkage_)
assert_almost_equal(ledoit_wolf_shrinkage(X_centered, assume_centered=True,
block_size=6),
shrinkage_)
# compare shrunk covariance obtained from data and from MLE estimate
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_centered,
assume_centered=True)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
# compare estimates given by LW and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=lw.shrinkage_, assume_centered=True)
scov.fit(X_centered)
assert_array_almost_equal(scov.covariance_, lw.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
lw = LedoitWolf(assume_centered=True)
lw.fit(X_1d)
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_1d,
assume_centered=True)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
assert_array_almost_equal((X_1d ** 2).sum() / n_samples, lw.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
lw = LedoitWolf(store_precision=False, assume_centered=True)
lw.fit(X_centered)
assert_almost_equal(lw.score(X_centered), score_, 4)
assert(lw.precision_ is None)
# Same tests without assuming centered data
# test shrinkage coeff on a simple data set
lw = LedoitWolf()
lw.fit(X)
assert_almost_equal(lw.shrinkage_, shrinkage_, 4)
assert_almost_equal(lw.shrinkage_, ledoit_wolf_shrinkage(X))
assert_almost_equal(lw.shrinkage_, ledoit_wolf(X)[1])
assert_almost_equal(lw.score(X), score_, 4)
# compare shrunk covariance obtained from data and from MLE estimate
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
# compare estimates given by LW and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=lw.shrinkage_)
scov.fit(X)
assert_array_almost_equal(scov.covariance_, lw.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
lw = LedoitWolf()
lw.fit(X_1d)
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_1d)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
assert_array_almost_equal(empirical_covariance(X_1d), lw.covariance_, 4)
# test with one sample
# warning should be raised when using only 1 sample
X_1sample = np.arange(5).reshape(1, 5)
lw = LedoitWolf()
assert_warns(UserWarning, lw.fit, X_1sample)
assert_array_almost_equal(lw.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test shrinkage coeff on a simple data set (without saving precision)
lw = LedoitWolf(store_precision=False)
lw.fit(X)
assert_almost_equal(lw.score(X), score_, 4)
assert(lw.precision_ is None)
def test_ledoit_wolf_large():
# test that ledoit_wolf doesn't error on data that is wider than block_size
rng = np.random.RandomState(0)
# use a number of features that is larger than the block-size
X = rng.normal(size=(10, 20))
lw = LedoitWolf(block_size=10).fit(X)
# check that covariance is about diagonal (random normal noise)
assert_almost_equal(lw.covariance_, np.eye(20), 0)
cov = lw.covariance_
# check that the result is consistent with not splitting data into blocks.
lw = LedoitWolf(block_size=25).fit(X)
assert_almost_equal(lw.covariance_, cov)
def test_oas():
# Tests OAS module on a simple dataset.
# test shrinkage coeff on a simple data set
X_centered = X - X.mean(axis=0)
oa = OAS(assume_centered=True)
oa.fit(X_centered)
shrinkage_ = oa.shrinkage_
score_ = oa.score(X_centered)
# compare shrunk covariance obtained from data and from MLE estimate
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_centered,
assume_centered=True)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
# compare estimates given by OAS and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=oa.shrinkage_, assume_centered=True)
scov.fit(X_centered)
assert_array_almost_equal(scov.covariance_, oa.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0:1]
oa = OAS(assume_centered=True)
oa.fit(X_1d)
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_1d, assume_centered=True)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
assert_array_almost_equal((X_1d ** 2).sum() / n_samples, oa.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
oa = OAS(store_precision=False, assume_centered=True)
oa.fit(X_centered)
assert_almost_equal(oa.score(X_centered), score_, 4)
assert(oa.precision_ is None)
# Same tests without assuming centered data--------------------------------
# test shrinkage coeff on a simple data set
oa = OAS()
oa.fit(X)
assert_almost_equal(oa.shrinkage_, shrinkage_, 4)
assert_almost_equal(oa.score(X), score_, 4)
# compare shrunk covariance obtained from data and from MLE estimate
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
# compare estimates given by OAS and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=oa.shrinkage_)
scov.fit(X)
assert_array_almost_equal(scov.covariance_, oa.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
oa = OAS()
oa.fit(X_1d)
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_1d)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
assert_array_almost_equal(empirical_covariance(X_1d), oa.covariance_, 4)
# test with one sample
# warning should be raised when using only 1 sample
X_1sample = np.arange(5).reshape(1, 5)
oa = OAS()
assert_warns(UserWarning, oa.fit, X_1sample)
assert_array_almost_equal(oa.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test shrinkage coeff on a simple data set (without saving precision)
oa = OAS(store_precision=False)
oa.fit(X)
assert_almost_equal(oa.score(X), score_, 4)
assert(oa.precision_ is None)
| bsd-3-clause |
johannfaouzi/pyts | pyts/metrics/lower_bounds.py | 1 | 12840 | """Code for Lower Bounds of Dynamic Time Warping."""
# Author: Johann Faouzi <[email protected]>
# License: BSD-3-Clause
import numpy as np
from math import sqrt
from numba import njit, prange
from sklearn.utils import check_array
def _check_consistent_lengths(X, Y):
n_timestamps_X, n_timestamps_Y = X.shape[-1], Y.shape[-1]
if not n_timestamps_X == n_timestamps_Y:
raise ValueError(
"Found input variables with inconsistent numbers of "
"timestamps: [{0}, {1}]".format(n_timestamps_X, n_timestamps_Y)
)
@njit()
def _lower_bound_yi_x_y(x, x_min, x_max, y, y_min, y_max):
if x_max >= y_max:
if x_min < y_min:
sum1 = np.sum(np.square(x[x > y_max] - y_max))
sum2 = np.sum(np.square(x[x < y_min] - y_min))
return sqrt(sum1 + sum2)
elif x_min > y_max:
return sqrt(max(np.sum(np.square(x - y_max)),
np.sum(np.square(y - x_min))))
else:
sum1 = np.sum(np.square(x[x > y_max] - y_max))
sum2 = np.sum(np.square(y[y < x_min] - x_min))
return sqrt(sum1 + sum2)
else:
if y_min < x_min:
sum1 = np.sum(np.square(y[y > x_max] - x_max))
sum2 = np.sum(np.square(y[y < x_min] - x_min))
return sqrt(sum1 + sum2)
elif y_min > x_max:
return sqrt(max(np.sum(np.square(y - x_max)),
np.sum(np.square(x - y_min))))
else:
sum1 = np.sum(np.square(y[y > x_max] - x_max))
sum2 = np.sum(np.square(x[x < y_min] - y_min))
return sqrt(sum1 + sum2)
@njit()
def _lower_bound_yi_X_Y(X, X_min, X_max, Y, Y_min, Y_max):
n_samples_X, _ = X.shape
n_samples_Y, _ = Y.shape
X_yi = np.empty((n_samples_X, n_samples_Y))
for i in prange(n_samples_X):
for j in prange(n_samples_Y):
X_yi[i, j] = _lower_bound_yi_x_y(
X[i], X_min[i], X_max[i], Y[j], Y_min[j], Y_max[j]
)
return X_yi
def lower_bound_yi(X_train, X_test):
"""Compute the "LB_Yi" lower bounds between two datasets.
Parameters
----------
X_train : array-like, shape = (n_samples_train, n_timestamps)
Training set.
X_test: : array-like, shape = (n_samples_test, n_timestamps)
Test set.
Returns
-------
lower_bounds : array, shape = (n_samples_test, n_samples_train)
"LB_Yi" lower bounds.
References
----------
.. [1] B. K. Yi et al, "Efficient Retrieval of Similar Time Sequences
Under Time Warping". International Conference on Data Engineering,
201-208 (1998).
Examples
--------
>>> X_train = [[5, 4, 3, 2, 1], [1, 8, 4, 3, 2], [6, 3, 5, 4, 7]]
>>> X_test = [[2, 1, 8, 4, 5]]
>>> lower_bound_yi(X_train, X_test)
array([[3. , 0. , 2.44...]])
"""
X_train = check_array(X_train)
X_test = check_array(X_test)
_check_consistent_lengths(X_train, X_test)
X_train_min = np.min(X_train, axis=1)
X_train_max = np.max(X_train, axis=1)
X_test_min = np.min(X_test, axis=1)
X_test_max = np.max(X_test, axis=1)
lb_yi = _lower_bound_yi_X_Y(X_test, X_test_min, X_test_max,
X_train, X_train_min, X_train_max)
return lb_yi
def lower_bound_kim(X_train, X_test):
"""Compute the "LB_Kim" lower bounds between two datasets.
Parameters
----------
X_train : array-like, shape = (n_samples_train, n_timestamps)
Training set.
X_test: : array-like, shape = (n_samples_test, n_timestamps)
Test set.
Returns
-------
lower_bounds : array, shape = (n_samples_test, n_samples_train)
"LB_Kim" lower bounds.
References
----------
.. [1] S. W. Kim et al, "An Index-Based Approach for Similarity Search
Supporting Time Warping in Large Sequence Databases". International
Conference on Data Engineering, 607-614 (2001).
Examples
--------
>>> X_train = [[2, 1, 8, 4, 5], [1, 2, 3, 4, 5]]
>>> X_test = [[5, 4, 3, 2, 1], [1, 8, 4, 3, 2], [6, 3, 5, 4, 7]]
>>> lower_bound_kim(X_train, X_test)
array([[4, 4],
[3, 3],
[4, 5]])
"""
X_train = check_array(X_train)
X_test = check_array(X_test)
_check_consistent_lengths(X_train, X_test)
first = np.abs(X_test[:, 0, None] - X_train[None, :, 0])
last = np.abs(X_test[:, -1, None] - X_train[None, :, -1])
max_ = np.abs(np.max(X_test, axis=1)[:, None]
- np.max(X_train, axis=1)[None, :])
min_ = np.abs(np.min(X_test, axis=1)[:, None]
- np.min(X_train, axis=1)[None, :])
lb_kim = np.max(np.asarray([first, last, max_, min_]), axis=0)
return lb_kim
@njit()
def _warping_envelope_2d(X, n_samples, n_timestamps, region):
lower = np.empty((n_samples, n_timestamps))
upper = np.empty((n_samples, n_timestamps))
for i in prange(n_samples):
for j in prange(n_timestamps):
sub_series = X[i, region[0, j]:region[1, j]]
lower[i, j] = np.min(sub_series)
upper[i, j] = np.max(sub_series)
return lower, upper
@njit()
def _warping_envelope_3d(X, n_samples_X, n_samples_Y, n_timestamps, region):
lower = np.empty((n_samples_X, n_samples_Y, n_timestamps))
upper = np.empty((n_samples_X, n_samples_Y, n_timestamps))
for i in prange(n_samples_X):
for j in prange(n_samples_Y):
for k in prange(n_timestamps):
sub_series = X[i, j, region[0, k]:region[1, k]]
lower[i, j, k] = np.min(sub_series)
upper[i, j, k] = np.max(sub_series)
return lower, upper
def _warping_envelope(X, region):
"""Compute the warping envelope.
Parameters
----------
X : array
Input data. It must be two- or three-dimensional.
region : array, shape = (2, n_timestamps)
Constraint region. The first row consists of the starting indices
(included) and the second row consists of the ending indices (excluded)
of the valid rows for each column.
Returns
-------
lower : array
The lower envelope.
upper : array
The upper envelope.
"""
X = check_array(X, ensure_2d=False, allow_nd=True)
region = check_array(region, ensure_min_samples=2)
n_dims = X.ndim
if n_dims not in (2, 3):
raise ValueError("X must be a two- or three-dimensional.")
if n_dims == 2:
n_samples, n_timestamps = X.shape
lower, upper = _warping_envelope_2d(
X, n_samples, n_timestamps, region
)
else:
n_samples_X, n_samples_Y, n_timestamps = X.shape
lower, upper = _warping_envelope_3d(
X, n_samples_X, n_samples_Y, n_timestamps, region
)
return lower, upper
@njit()
def _clip_2d(X, X_min, X_max, n_samples_X, n_samples_clip, n_timestamps):
X_clipped = np.empty((n_samples_X, n_samples_clip, n_timestamps))
for i in prange(n_samples_X):
X_clipped[i] = np.minimum(np.maximum(X[i], X_min), X_max)
return X_clipped
@njit()
def _clip_3d(X, X_min, X_max, n_samples_X, n_samples_clip, n_timestamps):
X_clipped = np.empty((n_samples_X, n_samples_clip, n_timestamps))
for i in prange(n_samples_X):
X_clipped[i] = np.minimum(np.maximum(X[i], X_min[:, i]), X_max[:, i])
return X_clipped
def _clip(X, lower, upper):
"""Clip an array.
Parameters
----------
X : array, shape = (n_samples, n_timestamps)
Array to clip.
lower : array
Minimum values in the clipped array. It must be
two- or three-dimensional.
upper : array
Maximum values in the clipped array. It must be
two- or three-dimensional, and have the same shape
as ``lower``.
Returns
-------
X_clipped : array
Clipped array.
"""
X = check_array(X)
lower = check_array(lower, ensure_2d=False, allow_nd=True)
upper = check_array(upper, ensure_2d=False, allow_nd=True)
n_dims = lower.ndim
if n_dims not in (2, 3):
raise ValueError("'lower' must be two- or three-dimensional.")
if not lower.shape == upper.shape:
raise ValueError(
"'lower' and 'upper' must have the same shape "
"({0} != {1})".format(lower.shape, upper.shape)
)
if n_dims == 2:
n_samples_X, n_timestamps = X.shape
n_samples_clip, _ = lower.shape
X_clipped = _clip_2d(
X, lower, upper, n_samples_X, n_samples_clip, n_timestamps
)
else:
n_samples_X, n_samples_Y, n_timestamps = lower.shape
X_clipped = _clip_3d(
X, lower, upper, n_samples_Y, n_samples_X, n_timestamps
)
return X_clipped
def lower_bound_keogh(X_train, X_test, region):
r"""Compute the "LB_Keogh" lower bounds between two datasets.
Parameters
----------
X_train : array-like, shape = (n_samples_train, n_timestamps)
Training set. The warping envelopes are computed
on this set.
X_test: : array-like, shape = (n_samples_test, n_timestamps)
Test set.
region : array, shape = (2, n_timestamps)
Constraint region. The first row consists of the starting indices
(included) and the second row consists of the ending indices (excluded)
of the valid rows for each column.
Returns
-------
lower_bounds : array, shape = (n_samples_test, n_samples_train)
"LB_Keogh" lower bounds.
Notes
-----
The "LB_Keogh" lower bounds are computed as
.. math:: LB_Keogh(X, Y) = \Vert X - H(X, Y) \Vert_{2}
where :math:`X` is the test set (``X_test``), :math:`Y` is the
training set (``X_train``), and :math:`H(X, Y)` is the projection
of :math:`X` on :math:`Y`.
References
----------
.. [1] E. Keogh and C. A. Ratanamahatana, "Exact indexing of dynamic
time warping". Knowledge and Information Systems, 7(3),
358-386 (2005).
Examples
--------
>>> X_train = [[0, 1, 2, 3], [1, 2, 3, 4]]
>>> X_test = [[0, 2.5, 3.5, 6]]
>>> region = [[0, 0, 1, 2], [2, 3, 4, 4]]
>>> lower_bound_keogh(X_train, X_test, region)
array([[3.08... , 2.23...]])
"""
X_train = check_array(X_train)
X_test = check_array(X_test)
_check_consistent_lengths(X_train, X_test)
lower, upper = _warping_envelope(X_train, region)
X_proj = _clip(X_test, lower, upper)
squared_lb_keogh = np.sum(
(X_test[:, None, :] - X_proj) ** 2, axis=-1
)
return np.sqrt(squared_lb_keogh)
def lower_bound_improved(X_train, X_test, region):
r"""Compute the "LB_Improved" lower bounds between two datasets.
Parameters
----------
X_train : array-like, shape = (n_samples_train, n_timestamps)
Training set.
X_test: : array-like, shape = (n_samples_test, n_timestamps)
Test set.
region : array, shape = (2, n_timestamps)
Constraint region. The first row consists of the starting indices
(included) and the second row consists of the ending indices (excluded)
of the valid rows for each column.
Returns
-------
lower_bounds : array, shape = (n_samples_test, n_samples_train)
"LB_Improved" lower bounds.
Notes
-----
The "LB_Improved" lower bounds are computed as
.. math::
LB_Improved(X, Y) = \sqrt\left\( \Vert X - H(X, Y) \Vert_{2}^2
+ \Vert Y - H(Y, H(X, Y)) \Vert_{2}^2 \right\)
where :math:`X` is the test set (``X_test``), :math:`Y` is the
training set (``X_train``), and :math:`H(X, Y)` is the projection
of :math:`X` on :math:`Y`.
References
----------
.. [1] D. Lemire, "Faster Retrieval with a Two-Pass Dynamic-Time-Warping
Lower Bound". Pattern Recognition, 42(9), 2169-2180 (2009).
Examples
--------
>>> X_train = [[0, 1, 2, 3], [1, 2, 3, 4]]
>>> X_test = [[0, 2.5, 3.5, 3.3]]
>>> region = [[0, 0, 1, 2], [2, 3, 4, 4]]
>>> lower_bound_improved(X_train, X_test, region)
array([[0.76..., 1.11...]])
"""
X_train = check_array(X_train)
X_test = check_array(X_test)
_check_consistent_lengths(X_train, X_test)
# LB Keogh lower bounds
lower_train, upper_train = _warping_envelope(X_train, region)
X_test_proj = _clip(X_test, lower_train, upper_train)
squared_lb_keogh = np.sum(
(X_test[:, None, :] - X_test_proj) ** 2, axis=-1
)
# LB Improved lower bounds
lower_test, upper_test = _warping_envelope(X_test_proj, region)
X_train_proj = _clip(X_train, lower_test, upper_test)
squared_lb_improved = np.sum(
(X_train[:, None, :] - X_train_proj) ** 2, axis=-1
)
return np.sqrt(squared_lb_keogh + squared_lb_improved.T)
| bsd-3-clause |
Nehoroshiy/multi_classifier | dataset/features.py | 10 | 4825 | import matplotlib
import numpy as np
from scipy.ndimage import uniform_filter
def extract_features(imgs, feature_fns, verbose=False):
"""
Given pixel data for images and several feature functions that can operate on
single images, apply all feature functions to all images, concatenating the
feature vectors for each image and storing the features for all images in
a single matrix.
Inputs:
- imgs: N x H X W X C array of pixel data for N images.
- feature_fns: List of k feature functions. The ith feature function should
take as input an H x W x D array and return a (one-dimensional) array of
length F_i.
- verbose: Boolean; if true, print progress.
Returns:
An array of shape (F_1 + ... + F_k, N) where each column is the concatenation
of all features for a single image.
"""
num_images = imgs.shape[0]
if num_images == 0:
return np.array([])
# Use the first image to determine feature dimensions
feature_dims = []
first_image_features = []
for feature_fn in feature_fns:
feats = feature_fn(imgs[0].squeeze())
assert len(feats.shape) == 1, 'Feature functions must be one-dimensional'
feature_dims.append(feats.size)
first_image_features.append(feats)
# Now that we know the dimensions of the features, we can allocate a single
# big array to store all features as columns.
total_feature_dim = sum(feature_dims)
imgs_features = np.zeros((total_feature_dim, num_images))
imgs_features[:total_feature_dim, 0] = np.hstack(first_image_features)
# Extract features for the rest of the images.
for i in xrange(1, num_images):
idx = 0
for feature_fn, feature_dim in zip(feature_fns, feature_dims):
next_idx = idx + feature_dim
imgs_features[idx:next_idx, i] = feature_fn(imgs[i].squeeze())
idx = next_idx
if verbose and i % 1000 == 0:
print 'Done extracting features for %d / %d images' % (i, num_images)
return imgs_features
def rgb2gray(rgb):
"""Convert RGB image to grayscale
Parameters:
rgb : RGB image
Returns:
gray : grayscale image
"""
return np.dot(rgb[...,:3], [0.299, 0.587, 0.144])
def hog_feature(im):
"""Compute Histogram of Gradient (HOG) feature for an image
Modified from skimage.feature.hog
http://pydoc.net/Python/scikits-image/0.4.2/skimage.feature.hog
Reference:
Histograms of Oriented Gradients for Human Detection
Navneet Dalal and Bill Triggs, CVPR 2005
Parameters:
im : an input grayscale or rgb image
Returns:
feat: Histogram of Gradient (HOG) feature
"""
# convert rgb to grayscale if needed
if im.ndim == 3:
image = rgb2gray(im)
else:
image = np.at_least_2d(im)
sx, sy = image.shape # image size
orientations = 9 # number of gradient bins
cx, cy = (8, 8) # pixels per cell
gx = np.zeros(image.shape)
gy = np.zeros(image.shape)
gx[:, :-1] = np.diff(image, n=1, axis=1) # compute gradient on x-direction
gy[:-1, :] = np.diff(image, n=1, axis=0) # compute gradient on y-direction
grad_mag = np.sqrt(gx ** 2 + gy ** 2) # gradient magnitude
grad_ori = np.arctan2(gy, (gx + 1e-15)) * (180 / np.pi) + 90 # gradient orientation
n_cellsx = int(np.floor(sx / cx)) # number of cells in x
n_cellsy = int(np.floor(sy / cy)) # number of cells in y
# compute orientations integral images
orientation_histogram = np.zeros((n_cellsx, n_cellsy, orientations))
for i in range(orientations):
# create new integral image for this orientation
# isolate orientations in this range
temp_ori = np.where(grad_ori < 180 / orientations * (i + 1),
grad_ori, 0)
temp_ori = np.where(grad_ori >= 180 / orientations * i,
temp_ori, 0)
# select magnitudes for those orientations
cond2 = temp_ori > 0
temp_mag = np.where(cond2, grad_mag, 0)
orientation_histogram[:,:,i] = uniform_filter(temp_mag, size=(cx, cy))[cx/2::cx, cy/2::cy].T
return orientation_histogram.ravel()
def color_histogram_hsv(im, nbin=10, xmin=0, xmax=255, normalized=True):
"""
Compute color histogram for an image using hue.
Inputs:
- im: H x W x C array of pixel data for an RGB image.
- nbin: Number of histogram bins. (default: 10)
- xmin: Minimum pixel value (default: 0)
- xmax: Maximum pixel value (default: 255)
- normalized: Whether to normalize the histogram (default: True)
Returns:
1D vector of length nbin giving the color histogram over the hue of the
input image.
"""
ndim = im.ndim
bins = np.linspace(xmin, xmax, nbin+1)
hsv = matplotlib.colors.rgb_to_hsv(im/xmax) * xmax
imhist, bin_edges = np.histogram(hsv[:,:,0], bins=bins, density=normalized)
imhist = imhist * np.diff(bin_edges)
# return histogram
return imhist
pass
| mit |
hcmlab/nova | bin/cml/models/templates/discrete/feature/sklearn/sklearn_naivebayes.py | 1 | 6393 | import sys
import importlib
if not hasattr(sys, 'argv'):
sys.argv = ['']
import numpy as np
import random
from xml.dom import minidom
import os
import shutil
import site
import pprint
import pickle
from sklearn.naive_bayes import GaussianNB
#interface
def getModelType(types, opts, vars):
return types.REGRESSION if opts["is_regression"] else types.CLASSIFICATION
def getOptions(opts, vars):
try:
vars['x'] = None
vars['y'] = None
vars['session'] = None
vars['model'] = None
vars['model_id'] = "."
'''Setting the default options. All options can be overwritten by adding them to the conf-dictionary in the same file as the network'''
opts['network'] = ''
opts['experiment_id'] = ''
opts['is_regression'] = False
opts['n_timesteps'] = 1
opts['perma_drop'] = False
opts['n_fp'] = 1
opts['loss_function'] = 'mean_squared_error'
opts['optimizier'] = 'adam'
opts['metrics'] = []
opts['lr'] = 0.0001
opts['n_epoch'] = 1
opts['batch_size'] = 32
except Exception as e:
print_exception(e, 'getOptions')
exit()
def train(data,label_score, opts, vars):
try:
n_input = data[0].dim
if not opts['is_regression']:
#adding one output for the restclass
n_output = int(max(label_score)+1)
else:
n_output = 1
print ('load network architecture from ' + opts['network'] + '.py')
print('#input: {} \n#output: {}'.format(n_input, n_output))
nts = opts['n_timesteps']
if nts < 1:
raise ValueError('n_timesteps must be >= 1 but is {}'.format(nts))
elif nts == 1:
sample_shape = (n_input, )
else:
sample_shape = (int(n_input / nts), nts)
#(number of samples, number of features, number of timesteps)
sample_list_shape = ((len(data),) + sample_shape)
x = np.empty(sample_list_shape)
y = np.empty((len(label_score)))
print('Input data array should be of shape: {} \nLabel array should be of shape {} \nStart reshaping input accordingly...\n'.format(x.shape, y.shape))
#reshaping
for sample in range(len(data)):
#input
temp = np.reshape(data[sample], sample_shape)
x[sample] = temp
y[sample] = label_score[sample]
sanity_check(x)
#model = LinearSVC(random_state=1234, tol=1e-4)
model = GaussianNB()
print('train_x shape: {} | train_x[0] shape: {}'.format(x.shape, x[0].shape))
model.fit(x, y)
vars['n_input'] = n_input
vars['n_output'] = n_output
vars['model'] = model
except Exception as e:
print_exception(e, 'train')
exit()
def forward(data, probs_or_score, opts, vars):
try:
model = vars['model']
#reshaping the input
if model:
n_input = data.dim
n_output = len(probs_or_score)
n_fp = int(opts['n_fp'])
nts = opts['n_timesteps']
if nts < 1:
raise ValueError('n_timesteps must be >= 1 but is {}'.format(nts))
elif nts == 1:
sample_shape = (n_input,)
else:
sample_shape = (int(n_input / nts), nts)
x = np.asarray(data)
x = x.astype('float32')
x.reshape(sample_shape)
n_output = len(probs_or_score)
results = np.zeros((n_fp, n_output), dtype=np.float32)
#TODO: Add logic for multiple time frames
for fp in range(n_fp):
pred = model.predict_proba(data)
# for i in range(len(pred[fp])):
# results[fp][i] = pred[fp][i]
# print(pred)
# if opts['is_regression']:
# results[0] = pred[0]
# else:
# results[fp] = pred[0]
#for i in range(len(mean)):
# probs_or_score = pred
# conf = 1
#mean = np.mean(results, axis=0)
#std = np.std(results, axis=0)
#conf = max(0,1-np.mean(std))
conf = max(pred[0])
for i in range(pred.size):
probs_or_score[i] = pred[0][i]
return conf
else:
print('Train model first')
return 1
except Exception as e:
print_exception(e, 'forward')
exit()
def save(path, opts, vars):
try:
with open(path, 'wb') as f:
pickle.dump(vars['model'], f)
# # copy scripts
src_dir = os.path.dirname(os.path.realpath(__file__))
dst_dir = os.path.dirname(path)
# print('copy scripts from \'' + src_dir + '\' to \'' + dst_dir + '\'')
srcFiles = os.listdir(src_dir)
for fileName in srcFiles:
full_file_name = os.path.join(src_dir, fileName)
if os.path.isfile(full_file_name) and (fileName.endswith('sklearn_naivebayes.py')):
shutil.copy(full_file_name, dst_dir)
except Exception as e:
print_exception(e, 'save')
sys.exit()
def load(path, opts, vars):
try:
print('\nLoading model\n')
with open(path, 'rb') as f:
model = pickle.load(f)
vars['model'] = model
except Exception as e:
print_exception(e, 'load')
sys.exit()
# helper functions
def print_exception(exception, function):
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
print('Exception in {}: {} \nType: {} Fname: {} LN: {} '.format(function, exception, exc_type, fname, exc_tb.tb_lineno))
def set_opts_from_config(opts, conf):
print(conf)
for key, value in conf.items():
opts[key] = value
print('Options haven been set to:\n')
pprint.pprint(opts)
print('\n')
#checking the input for corrupted values
def sanity_check(x):
if np.any(np.isnan(x)):
print('At least one input is not a number!')
if np.any(np.isinf(x)):
print('At least one input is inf!')
| gpl-3.0 |
maxlikely/scikit-learn | examples/linear_model/plot_lasso_and_elasticnet.py | 12 | 1962 | """
========================================
Lasso and Elastic Net for Sparse Signals
========================================
Estimates Lasso and Elastic-Net regression models on a manually generated
sparse signal corrupted with an additive noise. Estimated coefficients are
compared with the ground-truth.
"""
print(__doc__)
import numpy as np
import pylab as pl
from sklearn.metrics import r2_score
###############################################################################
# generate some sparse data to play with
np.random.seed(42)
n_samples, n_features = 50, 200
X = np.random.randn(n_samples, n_features)
coef = 3 * np.random.randn(n_features)
inds = np.arange(n_features)
np.random.shuffle(inds)
coef[inds[10:]] = 0 # sparsify coef
y = np.dot(X, coef)
# add noise
y += 0.01 * np.random.normal((n_samples,))
# Split data in train set and test set
n_samples = X.shape[0]
X_train, y_train = X[:n_samples / 2], y[:n_samples / 2]
X_test, y_test = X[n_samples / 2:], y[n_samples / 2:]
###############################################################################
# Lasso
from sklearn.linear_model import Lasso
alpha = 0.1
lasso = Lasso(alpha=alpha)
y_pred_lasso = lasso.fit(X_train, y_train).predict(X_test)
r2_score_lasso = r2_score(y_test, y_pred_lasso)
print(lasso)
print("r^2 on test data : %f" % r2_score_lasso)
###############################################################################
# ElasticNet
from sklearn.linear_model import ElasticNet
enet = ElasticNet(alpha=alpha, l1_ratio=0.7)
y_pred_enet = enet.fit(X_train, y_train).predict(X_test)
r2_score_enet = r2_score(y_test, y_pred_enet)
print(enet)
print("r^2 on test data : %f" % r2_score_enet)
pl.plot(enet.coef_, label='Elastic net coefficients')
pl.plot(lasso.coef_, label='Lasso coefficients')
pl.plot(coef, '--', label='original coefficients')
pl.legend(loc='best')
pl.title("Lasso R^2: %f, Elastic Net R^2: %f"
% (r2_score_lasso, r2_score_enet))
pl.show()
| bsd-3-clause |
harisbal/pandas | pandas/tests/scalar/test_nat.py | 3 | 10319 | import pytest
from datetime import datetime, timedelta
import pytz
import numpy as np
from pandas import (NaT, Index, Timestamp, Timedelta, Period,
DatetimeIndex,
TimedeltaIndex, Series, isna)
from pandas.core.arrays import PeriodArray
from pandas.util import testing as tm
from pandas._libs.tslib import iNaT
from pandas.compat import callable
@pytest.mark.parametrize('nat, idx', [(Timestamp('NaT'), DatetimeIndex),
(Timedelta('NaT'), TimedeltaIndex),
(Period('NaT', freq='M'), PeriodArray)])
def test_nat_fields(nat, idx):
for field in idx._field_ops:
# weekday is a property of DTI, but a method
# on NaT/Timestamp for compat with datetime
if field == 'weekday':
continue
result = getattr(NaT, field)
assert np.isnan(result)
result = getattr(nat, field)
assert np.isnan(result)
for field in idx._bool_ops:
result = getattr(NaT, field)
assert result is False
result = getattr(nat, field)
assert result is False
def test_nat_vector_field_access():
idx = DatetimeIndex(['1/1/2000', None, None, '1/4/2000'])
for field in DatetimeIndex._field_ops:
# weekday is a property of DTI, but a method
# on NaT/Timestamp for compat with datetime
if field == 'weekday':
continue
result = getattr(idx, field)
expected = Index([getattr(x, field) for x in idx])
tm.assert_index_equal(result, expected)
s = Series(idx)
for field in DatetimeIndex._field_ops:
# weekday is a property of DTI, but a method
# on NaT/Timestamp for compat with datetime
if field == 'weekday':
continue
result = getattr(s.dt, field)
expected = [getattr(x, field) for x in idx]
tm.assert_series_equal(result, Series(expected))
for field in DatetimeIndex._bool_ops:
result = getattr(s.dt, field)
expected = [getattr(x, field) for x in idx]
tm.assert_series_equal(result, Series(expected))
@pytest.mark.parametrize('klass', [Timestamp, Timedelta, Period])
def test_identity(klass):
assert klass(None) is NaT
result = klass(np.nan)
assert result is NaT
result = klass(None)
assert result is NaT
result = klass(iNaT)
assert result is NaT
result = klass(np.nan)
assert result is NaT
result = klass(float('nan'))
assert result is NaT
result = klass(NaT)
assert result is NaT
result = klass('NaT')
assert result is NaT
assert isna(klass('nat'))
@pytest.mark.parametrize('klass', [Timestamp, Timedelta, Period])
def test_equality(klass):
# nat
if klass is not Period:
klass('').value == iNaT
klass('nat').value == iNaT
klass('NAT').value == iNaT
klass(None).value == iNaT
klass(np.nan).value == iNaT
assert isna(klass('nat'))
@pytest.mark.parametrize('klass', [Timestamp, Timedelta])
def test_round_nat(klass):
# GH14940
ts = klass('nat')
for method in ["round", "floor", "ceil"]:
round_method = getattr(ts, method)
for freq in ["s", "5s", "min", "5min", "h", "5h"]:
assert round_method(freq) is ts
def test_NaT_methods():
# GH 9513
# GH 17329 for `timestamp`
raise_methods = ['astimezone', 'combine', 'ctime', 'dst',
'fromordinal', 'fromtimestamp', 'isocalendar',
'strftime', 'strptime', 'time', 'timestamp',
'timetuple', 'timetz', 'toordinal', 'tzname',
'utcfromtimestamp', 'utcnow', 'utcoffset',
'utctimetuple', 'timestamp']
nat_methods = ['date', 'now', 'replace', 'to_datetime', 'today',
'tz_convert', 'tz_localize']
nan_methods = ['weekday', 'isoweekday']
for method in raise_methods:
if hasattr(NaT, method):
with pytest.raises(ValueError):
getattr(NaT, method)()
for method in nan_methods:
if hasattr(NaT, method):
assert np.isnan(getattr(NaT, method)())
for method in nat_methods:
if hasattr(NaT, method):
# see gh-8254
exp_warning = None
if method == 'to_datetime':
exp_warning = FutureWarning
with tm.assert_produces_warning(
exp_warning, check_stacklevel=False):
assert getattr(NaT, method)() is NaT
# GH 12300
assert NaT.isoformat() == 'NaT'
def test_NaT_docstrings():
# GH#17327
nat_names = dir(NaT)
# NaT should have *most* of the Timestamp methods, with matching
# docstrings. The attributes that are not expected to be present in NaT
# are private methods plus `ts_expected` below.
ts_names = dir(Timestamp)
ts_missing = [x for x in ts_names if x not in nat_names and
not x.startswith('_')]
ts_missing.sort()
ts_expected = ['freqstr', 'normalize',
'to_julian_date',
'to_period', 'tz']
assert ts_missing == ts_expected
ts_overlap = [x for x in nat_names if x in ts_names and
not x.startswith('_') and
callable(getattr(Timestamp, x))]
for name in ts_overlap:
tsdoc = getattr(Timestamp, name).__doc__
natdoc = getattr(NaT, name).__doc__
assert tsdoc == natdoc
# NaT should have *most* of the Timedelta methods, with matching
# docstrings. The attributes that are not expected to be present in NaT
# are private methods plus `td_expected` below.
# For methods that are both Timestamp and Timedelta methods, the
# Timestamp docstring takes priority.
td_names = dir(Timedelta)
td_missing = [x for x in td_names if x not in nat_names and
not x.startswith('_')]
td_missing.sort()
td_expected = ['components', 'delta', 'is_populated',
'to_pytimedelta', 'to_timedelta64', 'view']
assert td_missing == td_expected
td_overlap = [x for x in nat_names if x in td_names and
x not in ts_names and # Timestamp __doc__ takes priority
not x.startswith('_') and
callable(getattr(Timedelta, x))]
assert td_overlap == ['total_seconds']
for name in td_overlap:
tddoc = getattr(Timedelta, name).__doc__
natdoc = getattr(NaT, name).__doc__
assert tddoc == natdoc
@pytest.mark.parametrize('klass', [Timestamp, Timedelta])
def test_isoformat(klass):
result = klass('NaT').isoformat()
expected = 'NaT'
assert result == expected
def test_nat_arithmetic():
# GH 6873
i = 2
f = 1.5
for (left, right) in [(NaT, i), (NaT, f), (NaT, np.nan)]:
assert left / right is NaT
assert left * right is NaT
assert right * left is NaT
with pytest.raises(TypeError):
right / left
# Timestamp / datetime
t = Timestamp('2014-01-01')
dt = datetime(2014, 1, 1)
for (left, right) in [(NaT, NaT), (NaT, t), (NaT, dt)]:
# NaT __add__ or __sub__ Timestamp-like (or inverse) returns NaT
assert right + left is NaT
assert left + right is NaT
assert left - right is NaT
assert right - left is NaT
# timedelta-like
# offsets are tested in test_offsets.py
delta = timedelta(3600)
td = Timedelta('5s')
for (left, right) in [(NaT, delta), (NaT, td)]:
# NaT + timedelta-like returns NaT
assert right + left is NaT
assert left + right is NaT
assert right - left is NaT
assert left - right is NaT
assert np.isnan(left / right)
assert np.isnan(right / left)
# GH 11718
t_utc = Timestamp('2014-01-01', tz='UTC')
t_tz = Timestamp('2014-01-01', tz='US/Eastern')
dt_tz = pytz.timezone('Asia/Tokyo').localize(dt)
for (left, right) in [(NaT, t_utc), (NaT, t_tz),
(NaT, dt_tz)]:
# NaT __add__ or __sub__ Timestamp-like (or inverse) returns NaT
assert right + left is NaT
assert left + right is NaT
assert left - right is NaT
assert right - left is NaT
# int addition / subtraction
for (left, right) in [(NaT, 2), (NaT, 0), (NaT, -3)]:
assert right + left is NaT
assert left + right is NaT
assert left - right is NaT
assert right - left is NaT
def test_nat_rfloordiv_timedelta():
# GH#18846
# See also test_timedelta.TestTimedeltaArithmetic.test_floordiv
td = Timedelta(hours=3, minutes=4)
assert td // np.nan is NaT
assert np.isnan(td // NaT)
assert np.isnan(td // np.timedelta64('NaT'))
def test_nat_arithmetic_index():
# GH 11718
dti = DatetimeIndex(['2011-01-01', '2011-01-02'], name='x')
exp = DatetimeIndex([NaT, NaT], name='x')
tm.assert_index_equal(dti + NaT, exp)
tm.assert_index_equal(NaT + dti, exp)
dti_tz = DatetimeIndex(['2011-01-01', '2011-01-02'],
tz='US/Eastern', name='x')
exp = DatetimeIndex([NaT, NaT], name='x', tz='US/Eastern')
tm.assert_index_equal(dti_tz + NaT, exp)
tm.assert_index_equal(NaT + dti_tz, exp)
exp = TimedeltaIndex([NaT, NaT], name='x')
for (left, right) in [(NaT, dti), (NaT, dti_tz)]:
tm.assert_index_equal(left - right, exp)
tm.assert_index_equal(right - left, exp)
# timedelta # GH#19124
tdi = TimedeltaIndex(['1 day', '2 day'], name='x')
tdi_nat = TimedeltaIndex([NaT, NaT], name='x')
tm.assert_index_equal(tdi + NaT, tdi_nat)
tm.assert_index_equal(NaT + tdi, tdi_nat)
tm.assert_index_equal(tdi - NaT, tdi_nat)
tm.assert_index_equal(NaT - tdi, tdi_nat)
@pytest.mark.parametrize('box', [TimedeltaIndex, Series])
def test_nat_arithmetic_td64_vector(box):
# GH#19124
vec = box(['1 day', '2 day'], dtype='timedelta64[ns]')
box_nat = box([NaT, NaT], dtype='timedelta64[ns]')
tm.assert_equal(vec + NaT, box_nat)
tm.assert_equal(NaT + vec, box_nat)
tm.assert_equal(vec - NaT, box_nat)
tm.assert_equal(NaT - vec, box_nat)
def test_nat_pinned_docstrings():
# GH17327
assert NaT.ctime.__doc__ == datetime.ctime.__doc__
| bsd-3-clause |
SiLab-Bonn/pyBAR | examples/example_multi_chip_analysis/plot_occupancy.py | 1 | 1512 | 'Simple example how to draw hit maps from a raw data file with data from multiple chips. No event builing is done, thus no feedback about FE status!'
import os.path
from matplotlib.backends.backend_pdf import PdfPages
import tables as tb
import numpy as np
from pybar.daq import readout_utils
from pybar.analysis.plotting import plotting
def draw_hit_map_from_raw_data(raw_data_file, front_ends):
with PdfPages(os.path.splitext(raw_data_file)[0] + '.pdf') as output_pdf:
with tb.open_file(raw_data_file, 'r') as in_file_h5:
raw_data = in_file_h5.root.raw_data[:]
for front_end in range(front_ends):
print 'Create occupancy hist of front end %d' % front_end
occupancy_array, _, _ = np.histogram2d(*readout_utils.convert_data_array(raw_data,
filter_func=readout_utils.logical_and(readout_utils.is_data_record, readout_utils.is_data_from_channel(4 - front_end)),
converter_func=readout_utils.get_col_row_array_from_data_record_array), bins=(80, 336), range=[[1, 80], [1, 336]])
plotting.plot_three_way(hist=occupancy_array.T, title="Occupancy of chip %d" % front_end, x_axis_title="Occupancy", filename=output_pdf)
if __name__ == "__main__":
draw_hit_map_from_raw_data('/home/davidlp/Downloads/digital_analog/21_module_test_analog_scan.h5', 4)
| bsd-3-clause |
youdar/work | work/Clashes/collecting_overlap_data.py | 1 | 11675 | from __future__ import division
from matplotlib.gridspec import GridSpec
import matplotlib.pyplot as plt
import cPickle as pickle
import numpy as np
import sys
import os
class Plot_data(object):
def __init__(self):
"""
When using multiple lines of similar data, collect the plot info using
this object
count (y value): count of files for each year
percent (y value): 100* count/total_structure
years (x value): year when PDB structure was published
n_ignored (int): the number of structures not included
nbo (int): file included are those with NBO >= nbo
"""
self.count = None
self.percent = None
self.years = None
self.n_ignored = None
self.nbo = None
class Collecting_overlaps_data(object):
def __init__(self,start_year=1980,end_year=2014):
"""
if "test_clean_data" and "test_clean_dict" exist, plotting is done on
existing data
if "test_clean_data" and "test_clean_dict" do not exist, collect info from the
folder containing the tests, from the queue and create them.
the file "test_data.txt" is a comma separated file, containing all data.
the file "test_clean_data" is a pickled list containing only files that
could be processed (without error of any kind).
the file "test_clean_dict" is a pickled dictionary containing all data.
"test_data.txt" is a comma separated file with the header:
PDB ID,Macro molecule overlaps,Symmetry overlaps,All overlaps,
Macro molecule overlaps per 1000 atoms,Symmetry overlaps per 1000 atoms,
All overlaps per 1000 atoms,year model deposited in PDB,experiment type
Args:
start_year, end_year (int): start and end years to show on plots
"""
self.data_file_name = 'test_data.txt'
self.clean_dict_file_name = 'test_clean_dict'
self.clean_data_file_name = 'test_clean_data'
#
self.data = []
self.clean_data = []
self.clean_data_dict = {}
self.queue_data_path = "/net/cci/youval/work/work/Clashes/queue_clash_compare"
# filtered data
self.plot_data_list = []
# total data
self.n_total = None
self.n_total_dict = {}
self.years = None
self.start_year = start_year
self.end_year = end_year
# collecting years for structures with x[i] > min_nbo
self.nbo_per_1000_atoms = []
self.sym = True
def set_working_path(self):
""" Set working and data folders """
p = r'C:\Users\Youval\Google Drive\Documents\LBNL\phenix\publications'
p += r'\news letter\clash score\related code'
p += r'\pdb_overlap_scan'
osType = sys.platform
if osType.startswith('win'):
assert os.path.isdir(p)
self.working_path = p
print 'data path: ',p
else:
path = '/net/cci/youval/work/work/Clashes'
assert os.path.isdir(path)
self.working_path = path
print 'data path: ',path
os.chdir(self.working_path)
def get_test_data(self):
""" collect data from pdb scan or existing data files """
have_data = os.path.isfile(self.data_file_name)
have_data &= os.path.isfile(self.clean_dict_file_name)
have_data &= os.path.isfile(self.clean_data_file_name)
if have_data:
print 'using existing data files'
self.data = self.read_csv_data()
self.clean_data = pickle.load(open(self.clean_data_file_name,'r'))
self.clean_data = [x for x in self.clean_data if x[1] >= 0 ]
self.data_dict = pickle.load(open(self.clean_dict_file_name,'r'))
print "Number of good files: ",len(self.clean_data)
print "Total number files processed: ",len(self.data)
else:
print 'getting new data from {}'.format(self.queue_data_path)
# check if data folder exist
if os.path.isdir(self.queue_data_path):
# Read files in directory_path
files = os.listdir(self.queue_data_path)
# collect only the files that starts with log_
files = [x for x in files if x.startswith('log_')]
print "Number of log files: ",len(files)
for fn in files:
d = open(os.path.join(self.queue_data_path, fn), "r").readlines()
if d:
data = format_data_types(d[0])
else:
data = []
if not ((len(d)==1) and (len(data) == 9)):
# Some issue with results
pdb_id = fn[-4:]
data = [pdb_id] + ([-9] * 8)
self.data.append(data)
# clean data, collect good data
print 'Total number data records: {}'.format(len(self.data))
f = open(self.data_file_name,'w')
out_str = ['{}'] * 9
out_str = ','.join(out_str) + '\n'
for d in self.data:
pdb_id = d[0]
f.write(out_str.format(*d))
if d[1] >= 0:
self.clean_data.append(d)
self.clean_data_dict[pdb_id] = d
f.close()
print "Number of good records: ",len(self.clean_data)
pickle.dump(self.clean_data, open(self.clean_data_file_name,'w'))
pickle.dump(self.clean_data_dict, open(self.clean_dict_file_name,'w'))
def read_csv_data(self):
""" read the data from csv text file """
data = open(self.data_file_name,'r').read().splitlines()
data_list = []
for l in data:
if (not l.startswith('#')) and (not l.startswith('PDB ID')):
d = format_data_types(l)
data_list.append(d)
return data_list
def prepare_data_for_plotting(self,sym=True):
"""
Process data for plotting.
nonbonded overlaps (NBO) > min_nbo vs.
year pdb structure was submitted, starting at 1980 till 2014
Build self.plot_data_list
Args:
sym (bool): when True plot symmetry NBO when False use all NBO
"""
self.sym = sym
plt.close('all')
if not self.clean_data:
raise IOError('No Data to plot')
# get data year: x[7], sym NBO: x[2], all NBO: x[3]
# get data year: x[7], sym NBO per 1000 atoms: x[5], all NBO: x[3]
assert len(self.clean_data[0]) == 9
if sym:
# i = 2 # NBO
i = 5 # NBO per 1000 atoms
else:
# i = 3 # NBO
i = 6 # NBO per 1000 atoms
# collecting years for structures with x[i] > min_nbo
# nbo_list = [10,20,50,100]
nbo_per_1000_atoms = [0,3,6,9,15]
data = [x[7] for x in self.clean_data
if (x[7] >= self.start_year) and (x[7] <= self.end_year)]
n_bins = np.arange(min(data)-0.5,max(data)+1.5,1)
years = range(min(data),max(data)+1,1)
# get the total of structure for each year
n_total, all_years, _ = plt.hist(data, bins=n_bins)
n_total_dict = {y:n for y,n in zip(years,n_total)}
plt.clf()
n_all = len(self.clean_data)
# for min_nbo in nbo_list:
for min_nbo in nbo_per_1000_atoms:
pd = Plot_data()
pd.nbo = min_nbo
data = [x[7] for x in self.clean_data if x[i] > min_nbo]
pd.n_ignored = len(self.clean_data)-len(data)
# use histogram to get the data point for each nbo
pd.count, _, _ = plt.hist(data, bins=n_bins)
plt.clf()
pd.years = np.array(years)
assert len(pd.count) == len(years)
# collect the total number of structure deposited for present years
tmp = np.array([n_total_dict[y] for y in pd.years])
tmp = np.array([(x,y) for x,y in zip(pd.count,tmp) if y>0])
pd.percent = 100*tmp[:,0]/tmp[:,1]
pd.count = tmp[:,0]
assert len(pd.years) == len(pd.percent)
# filtered data
self.plot_data_list.append(pd)
# total data
self.n_total = n_total
self.years = years
self.nbo_per_1000_atoms = nbo_per_1000_atoms
self.n_total_dict = n_total_dict
def nbo_vs_year(self):
"""
Plot percent of structures, with different NBO per 1000 atoms levels,
from "good" pdb structures (all PDB files with a single model, no unknown
atom types and good CRYST1 records) VS. year
Second sub plot: the total of "good" structures deposited VS. year
"""
plt.close('all')
# figure parameters
# plt.ion() # enables interactive mode
max_y = 105
fontsize = 20
fig = plt.figure(figsize=(8,10))
gs = GridSpec(2,1,height_ratios=[2,1])
# first subplot
ax1 = plt.subplot(gs[0,0])
ax2 = plt.subplot(gs[1,0])
lines = []
line_type = ['.:','.-','.--']
n = len(line_type)
for i,pd in enumerate(self.plot_data_list):
lt = line_type[i%n]
l, = ax1.plot(pd.years,pd.percent,lt)
lines.append(l)
ax1.set_ylabel('Percent of PDB structures',fontsize=fontsize)
ax1.text(min(self.years)+0.5,max_y-4,'a.',fontsize=fontsize)
ax1.tick_params(axis='both',labelsize=fontsize - 2)
ax1.axes.get_xaxis().set_visible(False)
ax1.set_yticks([5,10,40,70,100])
ax1.set_ylim([0,max_y])
ax1.set_xlim([self.start_year,self.end_year])
# legend
labels = ['NBO per 1000 atom > {}']*len(self.nbo_per_1000_atoms)
labels = [x.format(y) for x,y in zip(labels,self.nbo_per_1000_atoms)]
if self.sym:
legend_pos = [0.96,0.70]
else:
legend_pos = [0.54,0.30]
ax1.legend(
lines,labels,
bbox_to_anchor=legend_pos,
loc=1,borderaxespad=0.0)
# Second subplot
ax2.plot(self.years,self.n_total,'.:g')
ax2.set_xlim([self.start_year,self.end_year])
ax2.set_xlabel('Year',fontsize=fontsize)
ax2.set_ylabel('Number of structures',fontsize=fontsize)
ax2.text(min(self.years)+0.5,max(self.n_total)-5,'b.',fontsize=fontsize)
ax2.tick_params(axis='both',labelsize=fontsize - 2)
ax2.set_xticks([self.start_year,1990,2000,self.end_year])
ax2.set_yscale('log')
ax2.set_yticks([10,100,1000])
#
gs.tight_layout(fig)
gs.update(hspace=0)
s = 'all'*(not self.sym) + 'sym'*self.sym
fig_name = 'nbo_vs_year_{}.png'.format(s)
plt.savefig(fig_name)
fig.show()
def show_values(self,year=2014):
""" Show the plot numbers for a particular year """
print '--------------------------'
print 'Showing info for {}'.format(year)
print '--------------------------'
msg = 'total number of structure PDB files with a single model, \n'
msg += 'no unknown atom types and good CRYST1 records: {}\n'
print msg.format(int(self.n_total_dict[year]))
# find and collect number of structure. for each NBO limit,
# for the desired year
data = self.plot_data_list[0]
i = list(data.years).index(year)
n = [x.count[i] for x in self.plot_data_list]
p = [x.percent[i] for x in self.plot_data_list]
#
s = ['NBO per 1000 atom > {:3}, , # of structure: {:5}, percent: {:5.1f}']
s = s*len(self.nbo_per_1000_atoms)
s = [x.format(y,int(z),k) for x,y,z,k in zip(s,self.nbo_per_1000_atoms,n,p)]
for l in s:
print l
print
def format_data_types(s):
"""
apply the correct data type to each value in the list created from a comma
separated sting "s"
x1: PDB ID (string)
x2: Macro molecule overlaps (int)
x3: Symmetry overlaps (int)
x4: All overlaps (int)
x5: Macro molecule overlaps per 1000 atoms (float)
x6: Symmetry overlaps per 1000 atoms (float)
x7: All overlaps per 1000 atoms (float)
x8: year model deposited in PDB (int)
x9: experiment type (string)
"""
d = [x.strip() for x in s.split(',')]
if len(d) == 9:
# integer values
for i in [1,2,3,7]:
d[i] = int(d[i])
# float values
for i in [4,5,6]:
d[i] = round(float(d[i]),1)
return d
else:
return None
def run():
print 'Start'
test_results = Collecting_overlaps_data()
test_results.set_working_path()
test_results.get_test_data()
test_results.prepare_data_for_plotting(sym=False)
test_results.nbo_vs_year()
test_results.show_values(year=2014)
print 'Done...'
if __name__=='__main__':
run()
| mit |
hugobowne/scikit-learn | sklearn/datasets/tests/test_kddcup99.py | 59 | 1336 | """Test kddcup99 loader. Only 'percent10' mode is tested, as the full data
is too big to use in unit-testing.
The test is skipped if the data wasn't previously fetched and saved to
scikit-learn data folder.
"""
import errno
from sklearn.datasets import fetch_kddcup99
from sklearn.utils.testing import assert_equal, SkipTest
def test_percent10():
try:
data = fetch_kddcup99(download_if_missing=False)
except IOError as e:
if e.errno == errno.ENOENT:
raise SkipTest("kddcup99 dataset can not be loaded.")
assert_equal(data.data.shape, (494021, 41))
assert_equal(data.target.shape, (494021,))
data_shuffled = fetch_kddcup99(shuffle=True, random_state=0)
assert_equal(data.data.shape, data_shuffled.data.shape)
assert_equal(data.target.shape, data_shuffled.target.shape)
data = fetch_kddcup99('SA')
assert_equal(data.data.shape, (100655, 41))
assert_equal(data.target.shape, (100655,))
data = fetch_kddcup99('SF')
assert_equal(data.data.shape, (73237, 4))
assert_equal(data.target.shape, (73237,))
data = fetch_kddcup99('http')
assert_equal(data.data.shape, (58725, 3))
assert_equal(data.target.shape, (58725,))
data = fetch_kddcup99('smtp')
assert_equal(data.data.shape, (9571, 3))
assert_equal(data.target.shape, (9571,))
| bsd-3-clause |
dsullivan7/scikit-learn | sklearn/neighbors/regression.py | 39 | 10464 | """Nearest Neighbor Regression"""
# Authors: Jake Vanderplas <[email protected]>
# Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Sparseness support by Lars Buitinck <[email protected]>
# Multi-output support by Arnaud Joly <[email protected]>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import numpy as np
from .base import _get_weights, _check_weights, NeighborsBase, KNeighborsMixin
from .base import RadiusNeighborsMixin, SupervisedFloatMixin
from ..base import RegressorMixin
from ..utils import check_array
class KNeighborsRegressor(NeighborsBase, KNeighborsMixin,
SupervisedFloatMixin,
RegressorMixin):
"""Regression based on k-nearest neighbors.
The target is predicted by local interpolation of the targets
associated of the nearest neighbors in the training set.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`k_neighbors` queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default='minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional (default = None)
additional keyword arguments for the metric function.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import KNeighborsRegressor
>>> neigh = KNeighborsRegressor(n_neighbors=2)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
KNeighborsRegressor(...)
>>> print(neigh.predict([[1.5]]))
[ 0.5]
See also
--------
NearestNeighbors
RadiusNeighborsRegressor
KNeighborsClassifier
RadiusNeighborsClassifier
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
.. warning::
Regarding the Nearest Neighbors algorithms, if it is found that two
neighbors, neighbor `k+1` and `k`, have identical distances but
but different labels, the results will depend on the ordering of the
training data.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5, weights='uniform',
algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, **kwargs):
self._init_params(n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, **kwargs)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the target for the provided data
Parameters
----------
X : array or matrix, shape = [n_samples, n_features]
Returns
-------
y : array of int, shape = [n_samples] or [n_samples, n_outputs]
Target values
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.kneighbors(X)
weights = _get_weights(neigh_dist, self.weights)
_y = self._y
if _y.ndim == 1:
_y = _y.reshape((-1, 1))
if weights is None:
y_pred = np.mean(_y[neigh_ind], axis=1)
else:
y_pred = np.empty((X.shape[0], _y.shape[1]), dtype=np.float)
denom = np.sum(weights, axis=1)
for j in range(_y.shape[1]):
num = np.sum(_y[neigh_ind, j] * weights, axis=1)
y_pred[:, j] = num / denom
if self._y.ndim == 1:
y_pred = y_pred.ravel()
return y_pred
class RadiusNeighborsRegressor(NeighborsBase, RadiusNeighborsMixin,
SupervisedFloatMixin,
RegressorMixin):
"""Regression based on neighbors within a fixed radius.
The target is predicted by local interpolation of the targets
associated of the nearest neighbors in the training set.
Parameters
----------
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth`radius_neighbors`
queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default='minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional (default = None)
additional keyword arguments for the metric function.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import RadiusNeighborsRegressor
>>> neigh = RadiusNeighborsRegressor(radius=1.0)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
RadiusNeighborsRegressor(...)
>>> print(neigh.predict([[1.5]]))
[ 0.5]
See also
--------
NearestNeighbors
KNeighborsRegressor
KNeighborsClassifier
RadiusNeighborsClassifier
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, radius=1.0, weights='uniform',
algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, **kwargs):
self._init_params(radius=radius,
algorithm=algorithm,
leaf_size=leaf_size,
p=p, metric=metric, metric_params=metric_params,
**kwargs)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the target for the provided data
Parameters
----------
X : array or matrix, shape = [n_samples, n_features]
Returns
-------
y : array of int, shape = [n_samples] or [n_samples, n_outputs]
Target values
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.radius_neighbors(X)
weights = _get_weights(neigh_dist, self.weights)
_y = self._y
if _y.ndim == 1:
_y = _y.reshape((-1, 1))
if weights is None:
y_pred = np.array([np.mean(_y[ind, :], axis=0)
for ind in neigh_ind])
else:
y_pred = np.array([(np.average(_y[ind, :], axis=0,
weights=weights[i]))
for (i, ind) in enumerate(neigh_ind)])
if self._y.ndim == 1:
y_pred = y_pred.ravel()
return y_pred
| bsd-3-clause |
hdmetor/scikit-learn | examples/applications/wikipedia_principal_eigenvector.py | 233 | 7819 | """
===============================
Wikipedia principal eigenvector
===============================
A classical way to assert the relative importance of vertices in a
graph is to compute the principal eigenvector of the adjacency matrix
so as to assign to each vertex the values of the components of the first
eigenvector as a centrality score:
http://en.wikipedia.org/wiki/Eigenvector_centrality
On the graph of webpages and links those values are called the PageRank
scores by Google.
The goal of this example is to analyze the graph of links inside
wikipedia articles to rank articles by relative importance according to
this eigenvector centrality.
The traditional way to compute the principal eigenvector is to use the
power iteration method:
http://en.wikipedia.org/wiki/Power_iteration
Here the computation is achieved thanks to Martinsson's Randomized SVD
algorithm implemented in the scikit.
The graph data is fetched from the DBpedia dumps. DBpedia is an extraction
of the latent structured data of the Wikipedia content.
"""
# Author: Olivier Grisel <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from bz2 import BZ2File
import os
from datetime import datetime
from pprint import pprint
from time import time
import numpy as np
from scipy import sparse
from sklearn.decomposition import randomized_svd
from sklearn.externals.joblib import Memory
from sklearn.externals.six.moves.urllib.request import urlopen
from sklearn.externals.six import iteritems
print(__doc__)
###############################################################################
# Where to download the data, if not already on disk
redirects_url = "http://downloads.dbpedia.org/3.5.1/en/redirects_en.nt.bz2"
redirects_filename = redirects_url.rsplit("/", 1)[1]
page_links_url = "http://downloads.dbpedia.org/3.5.1/en/page_links_en.nt.bz2"
page_links_filename = page_links_url.rsplit("/", 1)[1]
resources = [
(redirects_url, redirects_filename),
(page_links_url, page_links_filename),
]
for url, filename in resources:
if not os.path.exists(filename):
print("Downloading data from '%s', please wait..." % url)
opener = urlopen(url)
open(filename, 'wb').write(opener.read())
print()
###############################################################################
# Loading the redirect files
memory = Memory(cachedir=".")
def index(redirects, index_map, k):
"""Find the index of an article name after redirect resolution"""
k = redirects.get(k, k)
return index_map.setdefault(k, len(index_map))
DBPEDIA_RESOURCE_PREFIX_LEN = len("http://dbpedia.org/resource/")
SHORTNAME_SLICE = slice(DBPEDIA_RESOURCE_PREFIX_LEN + 1, -1)
def short_name(nt_uri):
"""Remove the < and > URI markers and the common URI prefix"""
return nt_uri[SHORTNAME_SLICE]
def get_redirects(redirects_filename):
"""Parse the redirections and build a transitively closed map out of it"""
redirects = {}
print("Parsing the NT redirect file")
for l, line in enumerate(BZ2File(redirects_filename)):
split = line.split()
if len(split) != 4:
print("ignoring malformed line: " + line)
continue
redirects[short_name(split[0])] = short_name(split[2])
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
# compute the transitive closure
print("Computing the transitive closure of the redirect relation")
for l, source in enumerate(redirects.keys()):
transitive_target = None
target = redirects[source]
seen = set([source])
while True:
transitive_target = target
target = redirects.get(target)
if target is None or target in seen:
break
seen.add(target)
redirects[source] = transitive_target
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
return redirects
# disabling joblib as the pickling of large dicts seems much too slow
#@memory.cache
def get_adjacency_matrix(redirects_filename, page_links_filename, limit=None):
"""Extract the adjacency graph as a scipy sparse matrix
Redirects are resolved first.
Returns X, the scipy sparse adjacency matrix, redirects as python
dict from article names to article names and index_map a python dict
from article names to python int (article indexes).
"""
print("Computing the redirect map")
redirects = get_redirects(redirects_filename)
print("Computing the integer index map")
index_map = dict()
links = list()
for l, line in enumerate(BZ2File(page_links_filename)):
split = line.split()
if len(split) != 4:
print("ignoring malformed line: " + line)
continue
i = index(redirects, index_map, short_name(split[0]))
j = index(redirects, index_map, short_name(split[2]))
links.append((i, j))
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
if limit is not None and l >= limit - 1:
break
print("Computing the adjacency matrix")
X = sparse.lil_matrix((len(index_map), len(index_map)), dtype=np.float32)
for i, j in links:
X[i, j] = 1.0
del links
print("Converting to CSR representation")
X = X.tocsr()
print("CSR conversion done")
return X, redirects, index_map
# stop after 5M links to make it possible to work in RAM
X, redirects, index_map = get_adjacency_matrix(
redirects_filename, page_links_filename, limit=5000000)
names = dict((i, name) for name, i in iteritems(index_map))
print("Computing the principal singular vectors using randomized_svd")
t0 = time()
U, s, V = randomized_svd(X, 5, n_iter=3)
print("done in %0.3fs" % (time() - t0))
# print the names of the wikipedia related strongest compenents of the the
# principal singular vector which should be similar to the highest eigenvector
print("Top wikipedia pages according to principal singular vectors")
pprint([names[i] for i in np.abs(U.T[0]).argsort()[-10:]])
pprint([names[i] for i in np.abs(V[0]).argsort()[-10:]])
def centrality_scores(X, alpha=0.85, max_iter=100, tol=1e-10):
"""Power iteration computation of the principal eigenvector
This method is also known as Google PageRank and the implementation
is based on the one from the NetworkX project (BSD licensed too)
with copyrights by:
Aric Hagberg <[email protected]>
Dan Schult <[email protected]>
Pieter Swart <[email protected]>
"""
n = X.shape[0]
X = X.copy()
incoming_counts = np.asarray(X.sum(axis=1)).ravel()
print("Normalizing the graph")
for i in incoming_counts.nonzero()[0]:
X.data[X.indptr[i]:X.indptr[i + 1]] *= 1.0 / incoming_counts[i]
dangle = np.asarray(np.where(X.sum(axis=1) == 0, 1.0 / n, 0)).ravel()
scores = np.ones(n, dtype=np.float32) / n # initial guess
for i in range(max_iter):
print("power iteration #%d" % i)
prev_scores = scores
scores = (alpha * (scores * X + np.dot(dangle, prev_scores))
+ (1 - alpha) * prev_scores.sum() / n)
# check convergence: normalized l_inf norm
scores_max = np.abs(scores).max()
if scores_max == 0.0:
scores_max = 1.0
err = np.abs(scores - prev_scores).max() / scores_max
print("error: %0.6f" % err)
if err < n * tol:
return scores
return scores
print("Computing principal eigenvector score using a power iteration method")
t0 = time()
scores = centrality_scores(X, max_iter=100, tol=1e-10)
print("done in %0.3fs" % (time() - t0))
pprint([names[i] for i in np.abs(scores).argsort()[-10:]])
| bsd-3-clause |
mrcslws/htmresearch | projects/sequence_learning/generate_plots.py | 6 | 2426 | import matplotlib.pyplot as plt
import multiprocessing
from optparse import OptionParser
import sequence_simulations
import sys
def fig6a(cliArgs, noises):
argsTpl = cliArgs + " --noise {}"
return [
sequence_simulations.parser.parse_args(argsTpl.format(noise).split(" "))[0]
for noise in noises
] + [
sequence_simulations.parser.parse_args((argsTpl + " --cells 1")
.format(noise).split(" "))[0]
for noise in noises
]
def fig6b(cliArgs, noises):
argsTpl = cliArgs + " --noise {}"
return [
sequence_simulations.parser.parse_args(argsTpl.format(noise).split(" "))[0]
for noise in noises
]
if __name__ == "__main__":
parser = OptionParser("python %prog noise [noise ...]")
parser.add_option("--figure",
help="Which figure to plot. Must be 'A' or 'B'.")
parser.add_option("--passthru",
help=("Pass options through to sequence_simulations.py. "
"See `python sequence_simulations.py --help` for "
"options"))
# Parse CLI arguments
options, args = parser.parse_args(sys.argv[1:])
if not args:
print "You must specify at least one 'noise' argument."
sys.exit(1)
if options.figure == "A":
figure = fig6a
elif options.figure == "B":
figure = fig6b
else:
print "You must specify one of '--figure A' or '--figure B'"
sys.exit(1)
# Convert list of str to list of float
noises = [float(noise) for noise in args]
# Run simulations in parallel
pool = multiprocessing.Pool()
results = pool.map(sequence_simulations.runExperiment1,
figure(options.passthru, noises))
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlabel("Sequence Elements")
ax.set_ylabel("Accuracy")
# Plot results
for result in results:
ax.plot(result, linewidth=2.0)
# Legend
if options.figure == "A":
ax.legend(["HTM Layer", "First Order Model"], loc="lower right")
elif options.figure == "B":
ax.legend(["{}% cell death".format(int(noise * 100)) for noise in noises],
loc="lower right")
# Horizontal bar at 50%
ax.plot([0.5 for x in xrange(len(results[0]))], "--")
# Re-tick axes
plt.yticks((0.1, 0.2, 0.3, 0.4, 0.5, 0.6),
("10%", "20%", "30%", "40%", "50%", "60%"))
plt.xticks((2000, 4000, 6000, 8000))
# Show plot
plt.show() | agpl-3.0 |
vnpy/vnpy | examples/data_analysis/data_analysis.py | 3 | 10377 | from datetime import datetime
import warnings
import numpy as np
import pandas as pd
from pandas import DataFrame
import matplotlib.pyplot as plt
from statsmodels.stats.diagnostic import acorr_ljungbox
from statsmodels.tsa.stattools import adfuller as ADF
from statsmodels.graphics.tsaplots import plot_acf, plot_pacf
import talib
from vnpy.trader.constant import Exchange, Interval
from vnpy.trader.database import database_manager
warnings.filterwarnings("ignore")
class DataAnalysis:
def __init__(self):
""""""
self.symbol = ""
self.exchange = None
self.interval = None
self.start = None
self.end = None
self.rate = 0.0
self.window_volatility = 20
self.window_index = 20
self.orignal = pd.DataFrame()
self.index_1to1 = []
self.index_2to2 = []
self.index_3to1 = []
self.index_2to1 = []
self.index_4to1 = []
self.intervals = []
self.results = {}
def load_history(
self,
symbol: str,
exchange: Exchange,
interval: Interval,
start: datetime,
end: datetime,
rate: float = 0.0,
index_1to1: list = None,
index_2to2: list = None,
index_3to1: list = None,
index_2to1: list = None,
index_4to1: list = None,
window_index: int = 20,
window_volatility: int = 20,
):
""""""
output("开始加载历史数据")
self.window_volatility = window_volatility
self.window_index = window_index
self.rate = rate
self.index_1to1 = index_1to1
self.index_2to2 = index_2to2
self.index_3to1 = index_3to1
self.index_2to1 = index_2to1
self.index_4to1 = index_4to1
# Load history data from database
bars = database_manager.load_bar_data(
symbol=symbol,
exchange=exchange,
interval=interval,
start=start,
end=end,
)
output(f"历史数据加载完成,数据量:{len(bars)}")
# Generate history data in DataFrame
t = []
o = []
h = []
l = [] # noqa
c = []
v = []
for bar in bars:
time = bar.datetime
open_price = bar.open_price
high_price = bar.high_price
low_price = bar.low_price
close_price = bar.close_price
volume = bar.volume
t.append(time)
o.append(open_price)
h.append(high_price)
l.append(low_price)
c.append(close_price)
v.append(volume)
self.orignal["open"] = o
self.orignal["high"] = h
self.orignal["low"] = l
self.orignal["close"] = c
self.orignal["volume"] = v
self.orignal.index = t
def base_analysis(self, df: DataFrame = None):
""""""
if df is None:
df = self.orignal
if df is None:
output("数据为空,请输入数据")
close_price = df["close"]
output("第一步:画出行情图,检查数据断点")
close_price.plot(figsize=(20, 8), title="close_price")
plt.show()
random_test(close_price)
stability_test(close_price)
autocorrelation_test(close_price)
self.relative_volatility_analysis(df)
self.growth_analysis(df)
self.calculate_index(df)
return df
def relative_volatility_analysis(self, df: DataFrame = None):
"""
相对波动率
"""
output("第五步:相对波动率分析")
df["volatility"] = talib.ATR(
np.array(df["high"]),
np.array(df["low"]),
np.array(df["close"]),
self.window_volatility
)
df["fixed_cost"] = df["close"] * self.rate
df["relative_vol"] = df["volatility"] - df["fixed_cost"]
df["relative_vol"].plot(figsize=(20, 6), title="relative volatility")
plt.show()
df["relative_vol"].hist(bins=200, figsize=(20, 6), grid=False)
plt.show()
statitstic_info(df["relative_vol"])
def growth_analysis(self, df: DataFrame = None):
"""
百分比K线变化率
"""
output("第六步:变化率分析")
df["pre_close"] = df["close"].shift(1).fillna(0)
df["g%"] = 100 * (df["close"] - df["pre_close"]) / df["close"]
df["g%"].plot(figsize=(20, 6), title="growth", ylim=(-5, 5))
plt.show()
df["g%"].hist(bins=200, figsize=(20, 6), grid=False)
plt.show()
statitstic_info(df["g%"])
def calculate_index(self, df: DataFrame = None):
""""""
output("第七步:计算相关技术指标,返回DataFrame\n")
if self.index_1to1:
for i in self.index_1to1:
func = getattr(talib, i)
df[i] = func(
np.array(df["close"]),
self.window_index
)
if self.index_3to1:
for i in self.index_3to1:
func = getattr(talib, i)
df[i] = func(
np.array(df["high"]),
np.array(df["low"]),
np.array(df["close"]),
self.window_index
)
if self.index_2to2:
for i in self.index_2to2:
func = getattr(talib, i)
result_down, result_up = func(
np.array(df["high"]),
np.array(df["low"]),
self.window_index
)
up = i + "_UP"
down = i + "_DOWN"
df[up] = result_up
df[down] = result_down
if self.index_2to1:
for i in self.index_2to1:
func = getattr(talib, i)
df[i] = func(
np.array(df["high"]),
np.array(df["low"]),
self.window_index
)
if self.index_4to1:
for i in self.index_4to1:
func = getattr(talib, i)
df[i] = func(
np.array(df["open"]),
np.array(df["high"]),
np.array(df["low"]),
np.array(df["close"]),
)
return df
def multi_time_frame_analysis(self, intervals: list = None, df: DataFrame = None):
""""""
if not intervals:
output("请输入K线合成周期")
return
if df is None:
df = self.orignal
if df is None:
output("请先加载数据")
return
for interval in intervals:
output("------------------------------------------------")
output(f"合成{interval}周期K先并开始数据分析")
data = pd.DataFrame()
data["open"] = df["open"].resample(interval, how="first")
data["high"] = df["high"].resample(interval, how="max")
data["low"] = df["low"].resample(interval, how="min")
data["close"] = df["close"].resample(interval, how="last")
data["volume"] = df["volume"].resample(interval, how="sum")
result = self.base_analysis(data)
self.results[interval] = result
def show_chart(self, data, boll_wide):
""""""
data["boll_up"] = data["SMA"] + data["STDDEV"] * boll_wide
data["boll_down"] = data["SMA"] - data["STDDEV"] * boll_wide
up_signal = []
down_signal = []
len_data = len(data["close"])
for i in range(1, len_data):
if data.iloc[i]["close"] > data.iloc[i]["boll_up"]and data.iloc[i - 1]["close"] < data.iloc[i - 1]["boll_up"]:
up_signal.append(i)
elif data.iloc[i]["close"] < data.iloc[i]["boll_down"] and data.iloc[i - 1]["close"] > data.iloc[i - 1]["boll_down"]:
down_signal.append(i)
plt.figure(figsize=(20, 8))
close = data["close"]
plt.plot(close, lw=1)
plt.plot(close, '^', markersize=5, color='r',
label='UP signal', markevery=up_signal)
plt.plot(close, 'v', markersize=5, color='g',
label='DOWN signal', markevery=down_signal)
plt.plot(data["boll_up"], lw=0.5, color="r")
plt.plot(data["boll_down"], lw=0.5, color="g")
plt.legend()
plt.show()
data["ATR"].plot(figsize=(20, 3), title="ATR")
plt.show()
def random_test(close_price):
""""""
acorr_result = acorr_ljungbox(close_price, lags=1)
p_value = acorr_result[1]
if p_value < 0.05:
output("第二步:随机性检验:非纯随机性")
else:
output("第二步:随机性检验:纯随机性")
output(f"白噪声检验结果:{acorr_result}\n")
def stability_test(close_price):
""""""
statitstic = ADF(close_price)
t_s = statitstic[1]
t_c = statitstic[4]["10%"]
if t_s > t_c:
output("第三步:平稳性检验:存在单位根,时间序列不平稳")
else:
output("第三步:平稳性检验:不存在单位根,时间序列平稳")
output(f"ADF检验结果:{statitstic}\n")
def autocorrelation_test(close_price):
""""""
output("第四步:画出自相关性图,观察自相关特性")
plot_acf(close_price, lags=60)
plt.show()
plot_pacf(close_price, lags=60).show()
plt.show()
def statitstic_info(df):
""""""
mean = round(df.mean(), 4)
median = round(df.median(), 4)
output(f"样本平均数:{mean}, 中位数: {median}")
skew = round(df.skew(), 4)
kurt = round(df.kurt(), 4)
if skew == 0:
skew_attribute = "对称分布"
elif skew > 0:
skew_attribute = "分布偏左"
else:
skew_attribute = "分布偏右"
if kurt == 0:
kurt_attribute = "正态分布"
elif kurt > 0:
kurt_attribute = "分布陡峭"
else:
kurt_attribute = "分布平缓"
output(f"偏度为:{skew},属于{skew_attribute};峰度为:{kurt},属于{kurt_attribute}\n")
def output(msg):
"""
Output message of backtesting engine.
"""
print(f"{datetime.now()}\t{msg}")
| mit |
ClimbsRocks/scikit-learn | examples/mixture/plot_gmm_selection.py | 95 | 3310 | """
================================
Gaussian Mixture Model Selection
================================
This example shows that model selection can be performed with
Gaussian Mixture Models using information-theoretic criteria (BIC).
Model selection concerns both the covariance type
and the number of components in the model.
In that case, AIC also provides the right result (not shown to save time),
but BIC is better suited if the problem is to identify the right model.
Unlike Bayesian procedures, such inferences are prior-free.
In that case, the model with 2 components and full covariance
(which corresponds to the true generative model) is selected.
"""
import numpy as np
import itertools
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
print(__doc__)
# Number of samples per component
n_samples = 500
# Generate random sample, two components
np.random.seed(0)
C = np.array([[0., -0.1], [1.7, .4]])
X = np.r_[np.dot(np.random.randn(n_samples, 2), C),
.7 * np.random.randn(n_samples, 2) + np.array([-6, 3])]
lowest_bic = np.infty
bic = []
n_components_range = range(1, 7)
cv_types = ['spherical', 'tied', 'diag', 'full']
for cv_type in cv_types:
for n_components in n_components_range:
# Fit a Gaussian mixture with EM
gmm = mixture.GaussianMixture(n_components=n_components,
covariance_type=cv_type)
gmm.fit(X)
bic.append(gmm.bic(X))
if bic[-1] < lowest_bic:
lowest_bic = bic[-1]
best_gmm = gmm
bic = np.array(bic)
color_iter = itertools.cycle(['navy', 'turquoise', 'cornflowerblue',
'darkorange'])
clf = best_gmm
bars = []
# Plot the BIC scores
spl = plt.subplot(2, 1, 1)
for i, (cv_type, color) in enumerate(zip(cv_types, color_iter)):
xpos = np.array(n_components_range) + .2 * (i - 2)
bars.append(plt.bar(xpos, bic[i * len(n_components_range):
(i + 1) * len(n_components_range)],
width=.2, color=color))
plt.xticks(n_components_range)
plt.ylim([bic.min() * 1.01 - .01 * bic.max(), bic.max()])
plt.title('BIC score per model')
xpos = np.mod(bic.argmin(), len(n_components_range)) + .65 +\
.2 * np.floor(bic.argmin() / len(n_components_range))
plt.text(xpos, bic.min() * 0.97 + .03 * bic.max(), '*', fontsize=14)
spl.set_xlabel('Number of components')
spl.legend([b[0] for b in bars], cv_types)
# Plot the winner
splot = plt.subplot(2, 1, 2)
Y_ = clf.predict(X)
for i, (mean, cov, color) in enumerate(zip(clf.means_, clf.covariances_,
color_iter)):
v, w = linalg.eigh(cov)
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan2(w[0][1], w[0][0])
angle = 180. * angle / np.pi # convert to degrees
v = 2. * np.sqrt(2.) * np.sqrt(v)
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180. + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(.5)
splot.add_artist(ell)
plt.xticks(())
plt.yticks(())
plt.title('Selected GMM: full model, 2 components')
plt.subplots_adjust(hspace=.35, bottom=.02)
plt.show()
| bsd-3-clause |
SylvainGuieu/smartplotlib | plotclasses.py | 1 | 26242 | from __future__ import division, absolute_import, print_function
from .recursive import alias, KWS
from .base import PlotFactory, rproperty
from . import plotfuncs as pfs
from .figaxes import subplot, plots, axes, figure, fa
import numpy as np
class _BaseFigAxes(object):
""" just a colection of usefull axes/figure
for other plots.
"""
subplot = subplot
axes = axes
plots = plots
figure = figure
aclear = pfs.aclear
fclear = pfs.fclear
cla = pfs.aclear
clf = pfs.fclear
def fa(self, **kwargs):
self.axes(**kwargs)
self.figure(**kwargs)
#axes = axes
#fset = fset
show = pfs.show
draw = pfs.draw
legend = pfs.legend
grid = pfs.grid
savefig = pfs.savefig
@rproperty
def grids(self):
""" axes.grids plot setter """
return self.axes.grids
@rproperty
def xlabel(self):
""" axes.x.label plot setter """
return self.axes.x.label
@rproperty
def ylabel(self):
""" axes.y.label plot setter """
return self.axes.y.label
@rproperty
def labels(self):
""" axes.labels plot setter """
return self.axes.labels
@rproperty
def xaxis(self):
""" axes.x plot setter """
return self.axes.x
@rproperty
def yaxis(self):
""" axes.y plot setter """
return self.axes.y
@rproperty
def ticks(self):
""" axes.ticks plot setter """
return self.ticks
@rproperty
def xticks(self):
""" axes.x.ticks plot setter """
return self.axes.x.ticks
@rproperty
def yticks(self):
""" axes.y.ticks plot setter """
return self.axes.y.ticks
def get_axes(self):
""" Return the matplotlib axes linked """
return pfs.get_axes(self.get("axes", None), self.get("figure", None))
def get_figure(self):
""" Return the matplotlib figure linked """
return pfs.get_figure(self.get("figure", None), self.get("axes", None))
@property
def a(self):
return self.get_axes()
@property
def f(self):
return self.get_figure()
"""
Define the main plot classes and their plotfuncs
All the other more sofiticated Plots are added later
"""
def _k2edge(plot,k):
"""convert array plot[k] to bar edge (remove the last)"""
return np.asarray(plot[k])[:-1]
def _k2width(plot,k):
"""convert array plot[k] to bar width np.diff()"""
return np.diff(plot[k])
def _make_array(value):
if not isinstance(value, np.ndarray):
return np.asarray(value)
return value
class _subxy(object):
def __init__(self, _xyplot=None):
self.xyplot = _xyplot
def __get__(self, xyplot, cl=None):
if xyplot is None:
return self
new = self.__class__(xyplot)
return new
def __getitem__(self, item):
xyplot = self.xyplot
try:
y = xyplot["y"]
except KeyError:
raise TypeError("missing y data, cannot extract sub-data")
y = _make_array(y)[item]
is_scalar = not hasattr(y, "__iter__")
if is_scalar:
new = scalarplot.derive(value=y)
else:
new = xyplot.derive(y=y)
try:
x = xyplot["x"]
except KeyError:
pass
else:
if x is not None:
if is_scalar:
new["index"] = _make_array(x)[item]
else:
new["x"] = _make_array(x)[item]
try:
xerr = xyplot["xerr"]
except KeyError:
pass
else:
if not is_scalar and (xerr is not None) and hasattr(xerr, "__iter__"):
new["xerr"] = _make_array(xerr)[item]
try:
yerr = xyplot["yerr"]
except KeyError:
pass
else:
if yerr is not None and hasattr(yerr, "__iter__"):
if is_scalar:
new["err"] = _make_array(yerr)[item]
else:
new["yerr"] = _make_array(yerr)[item]
elif yerr is not None:
new["yerr"] = _make_array(yerr)
return new
def __getslice__(self, start, end):
return self.__getitem__(slice(start, end))
class XYFit(PlotFactory, _BaseFigAxes):
""" a PlotColection of fit factory for x vs y type of data
PlotFactory Methods:
|------------|------------|-------------------------------------------------|
| method | Factory | comment |
|------------|------------|-------------------------------------------------|
| polynome | XYPlot | polynome fitting capability from x/y data |
| linear | XYPlot | polynome fit with dim=1 |
| inverse | XYPlot | fit y = (a*x +b) |
"""
pass
class XYPlot(PlotFactory, _BaseFigAxes):
""" PlotFactory. Return a new instance of xyplot ready to plot 2d x/y data
call signature:
xyplot(y)
xyplot(x,y)
xyplot(x,y, yerr, xerr, **kwargs)
All arguments can inerite from a parent Plot object if any.
All args can be keyword arguments or assigned parameter as obj['x'] = array(..)
Use .info attribute to print a state of this curent PlotFactory object
Args:
x (array-like) : the x absis data
y (array-like) : y-axis data
yerr (Optional[array-like]) : error on y-data, can be scalar or None.
xerr (Optional[array-like]) : error on x-data, can be scalar or None.
params (Optional[dict]) : A dictionary of any parameters for child plots
or child plots function (FuncPlot)
go (Optional[list of string]) : Action taken after the ready-instance created
see the .go method help
**kwargs : Any other parameters that will ramp-up the hierarchy of childs
Returns:
xyplot : New ready instance of xyplot.
Machined Parameters:
x (array-like) : if not present x = np.arange(len(y))
All other are set as they are
PlotFunc Methods:
The ones using "x" "y" like:
|---------------|--------------------------------------------------------------|
| method | action |
|---------------|--------------------------------------------------------------|
| plot | plot line point etc... |
| errorbar | errobar plotting |
| scatter | scattered points plot |
| step | plot line has step |
| fill | polygone fill |
| xhist | histogram on x axis of x data |
| yhist | histogram on y axis of y data |
| vlines | verticl lines where by default ymin=0 and ymax=alias("y") |
| hlines | horozontal lines where by default xmin=0 and xmax=alias("x") |
| axvline | Use only the x data |
| | (axes ymin and ymax are 0 and 1 by default) |
| axhline | Use only the y data |
| | (axes xmin and xmax are 0 and 1 by default) |
| bar2x | plot bars horizontaly edges are y[:-1], width are diff(y) |
| | and height are x. align="center" by default |
| bar2y | plot bars verticaly edges are x[:-1], width are diff(x) |
| | and height are y. align="center" by default |
| bar | is bar2y |
| annotates | annotate x/y points |
| fill_betweeny | fill between y lines. Use 'y' bydefault for the first line |
| fill_betweenx | fill between x lines. Use 'x' bydefault for the first line |
| fill_between | is fill_betweeny |
| fillstep | plot a polygone that envelop data if it was ploted with bars |
|---------------|--------------------------------------------------------------|
Not related to "x", "y" data, for conveniant use:
{conveniant}
PlotFactory Methods:
|------------|------------|-------------------------------------------------|
| method | Factory | comment |
|------------|------------|-------------------------------------------------|
| polyfit | XYPlot | polynome fitting capability from x/y data |
| linearfit | XYPlot | same as polyfit with ["dim"] = 1 |
| ybinedstat | XYPlot | make plot-ready binned statistic of y data. |
| | | this is the samething than .ydata.binedstat |
| ydata | DataPlot | a dataplot factory where data=alias("y") |
| xdata | DataPlot | a dataplot factory where data=alias("y") |
| cohere | XYPlot | coherence plot factory on 'x' vs 'y' by default |
| csd | XYPlot | Cross Spectral Density plot factory on |
| | | 'x' vs 'y' by default |
| histogram2 | XYZPlot | histogram2d Factory from 'x' and 'y' |
| | | |
| | | |
| ystat | ScalarPlot | scalar statistic plot factory on y-data |
| | | (e.i mean, median, etc). same as ydata.stat |
| xstat | ScalarPlot | scalar statistic plot factory on x-data |
| | | (e.i mean, median, etc).same as ydata.stat |
|------------|------------|-------------------------------------------------|
| xmedian | ScalarPlot | xstat with fstat="median" |
| ymedian | ScalarPlot | ystat with fstat="median" |
| xmean | ScalarPlot | xstat with fstat="mean" |
| ymean | ScalarPlot | ystat with fstat="mean" |
| xmin | ScalarPlot | xstat with fstat="min" |
| ymin | ScalarPlot | ystat with fstat="min" |
| xmax | ScalarPlot | xstat with fstat="max" |
| ymax | ScalarPlot | ystat with fstat="max" |
| xstd | ScalarPlot | xstat with fstat="std" |
| ystd | ScalarPlot | ystat with fstat="std" |
|------------|------------|-------------------------------------------------|
| subplot | SubPlot | return the subplot factory with all the default |
| | | of thi xyplot |
| plots | Plots | return a plots factory (plots linked to figure) |
| | | with all default taken from that xyxplot |
| fits | PlotCollec | containe fit plot factory |
|------------|------------|-------------------------------------------------|
Others usefull methods:
{useful}
And Attributes:
{usefulattr}
See the doc of PlotFactory for other methods shared by PlotFactoty objects
"""
sub = _subxy()
fits = XYFit()
plot = pfs.plot
step = pfs.step
errorbar = pfs.errorbar
scatter = pfs.scatter
## fit and statistic
xhist = pfs.hist.derive(orientation="vertical", data=alias("x"))
yhist = pfs.hist.derive(orientation="horizontal", data=alias("y"))
vlines = pfs.vlines.derive(ymin=0, ymax=alias("y"))
hlines = pfs.hlines.derive(xmin=0, xmax=alias("x"))
##
# put the default in axvline and axhline, they have
# nothing to do with data but axes
axvline = pfs.axvline.derive(ymin=0, ymax=1.0)
axhline = pfs.axhline.derive(xmin=0, xmax=1.0)
bar2y = pfs.bar.derive(edge= alias(lambda p:_k2edge(p,"x"), "-> x[:-1]"),
width=alias(lambda p:_k2width(p,"x"), "-> diff(x)"),
height=alias(lambda p:_k2edge(p,"y"), "-> y[:-1]"),
align="center"
)
bar2x = pfs.bar.derive(edge=alias(lambda p:_k2edge(p,"y"), "-> y[:-1]"),
width=alias(lambda p:_k2width(p,"y"), "-> diff(y)"),
height=alias(lambda p:_k2edge(p,"x"), "-> x[:-1]"),
align="center", direction="x"
)
bar = bar2y
fillstep = pfs.fillstep
fill = pfs.fill
fill_betweeny = pfs.fill_betweeny.derive(y1=alias("y"))
fill_betweenx = pfs.fill_betweenx.derive(x1=alias("x"))
fill_between = fill_betweeny
annotates = pfs.annotates
@staticmethod
def finit(plot, *args,**kwargs):
""" """
plot.update(kwargs.pop(KWS,{}), **kwargs)
x, y = plot.parseargs(args, "x","y",
x=None, y=None
)
if y is None: # with one arg, x is y
if x is not None:
y, x = x, None
plot.update(y=y)
#if x is None:
# x = alias(lambda p:np.arange(np.asarray(p["y"]).shape[0]), "-> arange(len(y))")
plot.update(x=x)
class _subdata(object):
def __init__(self, _dataplot=None):
self.dataplot = _dataplot
def __get__(self, dataplot, cl=None):
if dataplot is None:
return self
new = self.__class__(dataplot)
return new
def __getitem__(self, item):
dataplot = self.dataplot
try:
data = dataplot["data"]
except KeyError:
raise TypeError("missing 'data', cannot extract sub-data")
data = _make_array(data)[item]
is_scalar = not hasattr(data, "__iter__")
if is_scalar:
new = scalarplot.derive(value=data)
else:
new = dataplot.derive(data=data)
try:
x = dataplot["indexes"]
except KeyError:
pass
else:
if is_scalar:
new["index"] = _make_array(x)[item]
else:
new["indexes"] = _make_array(x)[item]
try:
weights = dataplot["weights"]
except KeyError:
pass
else:
if not is_scalar and (weights is not None) and hasattr(weights, "__iter__"):
new["weights"] = _make_array(weights)[item]
return new
def __getslice__(self, start, end):
return self.__getitem__(slice(start, end))
class DataPlot(PlotFactory, _BaseFigAxes):
""" PlotFactory for single array like data
Contain a colection of PlotFunc and PlotFactory to represend
the data and its statistic.
Use .info attribute to print a state of this curent PlotFactory object
Args:
data (array-like) : the (mostly 1-d) data
indexes (Optiona[array]) : optional array of data indexes, can be used for
binedstat or stat factories.
params (Optional[dict]) : A dictionary of any parameters for child plots
or child plots function (FuncPlot)
go (Optional[list of string]) : Action taken after the ready-instance created
see the .go method help
**kwargs : Any other parameters that will ramp-up the hierarchy of childs
Returns:
dataplot factory : a new instance of self
Machined Parameters:
None, all are set as they are
PlotFunc Methods:
|--------|-------------------------|
| method | action |
|--------|-------------------------|
| hist | plot histogram directly |
|--------|-------------------------|
Not related to "x", "y" data, for conveniant use:
{conveniant}
PlotFactory Methods:
|--------------------|------------|-------------------------------------------------|
| method | Factory | comment |
|--------------------|------------|-------------------------------------------------|
| histogram | DataPlot | Histogram factory on *data* |
| binedstat | DataPlot | binned statistic (mean, max, etc) on *data* |
| stat | ScalarPlot | A scalar factory tha contain full data |
| | | statistics. |
| distribfit | XYPlot | Fit the data distribution (e.g. with |
| | | 'normal'). Usefull to plot on top of histograms |
| mean, median, min | ScalarPlot | inerit from stat with the right fstat param |
| max, std | | |
| | | arange(len(data)) else |
| specgram | ImgPlot | a specgram factory (produce image) |
| psd | XYPlot | power spectral density factory |
| angle_spectrum | XYPlot | Angle spectrum factory from *data* see doc |
| magnitude_spectrum | XYPlot | Magnitude Spectrum factory .. |
| phase_spectrum | XYPlot | Phase Spectrum factory |
|--------------------|------------|-------------------------------------------------|
| subplot | SubPlot | return the subplot factory with all the default |
| | | of thi xyplot |
| plots | Plots | return a plots factory (plots linked to figure) |
| | | with all default taken from that xyxplot |
| | | |
|--------------------|------------|-------------------------------------------------|
Others usefull methods:
{useful}
And Attributes:
{usefulattr}
"""
sub = _subdata()
subplot = subplot
plots = plots
lines = pfs.lines
vlines = pfs.vlines.derive(x=alias("data"))
hlines = pfs.vlines.derive(y=alias("data"))
hist = pfs.hist
@staticmethod
def finit(plot, data, **kwargs):
plot.update(kwargs.pop(KWS,{}), **kwargs)
data, = plot.parseargs([data], "data")
plot["data"] = data
def colors_or_z(p,k):
return p[k] if k in p else p["z"]
class XYZPlot(PlotFactory, _BaseFigAxes):
""" plot for 3 dimentional data """
imshow = pfs.imshow.derive(img=alias("colors"), colors=alias("Z"))
pcolor = pfs.pcolor
pcolormesh = pfs.pcolormesh
pcolorfast = pfs.pcolorfast
contour = pfs.contour
contourf = pfs.contourf
colorbar = pfs.colorbar
class _subimg(object):
def __init__(self, _imgplot=None):
self.imgplot = _imgplot
def __get__(self, imgplot, cl=None):
if imgplot is None:
return self
new = self.__class__(imgplot)
return new
def __getitem__(self, item):
imgplot = self.imgplot
try:
img = imgplot["img"]
except KeyError:
raise TypeError("'img' is not defined, cannot extract sub-image")
img = img[item]
if not hasattr(img, "__iter__"): # this is a scalar
return scalarplot(img)
if len(img.shape)<2: # this is a vector
return xyplot( np.arange(len(img)), img )
if self.imgplot:
return self.imgplot.derive(img=img)
return imgplot(img=img)
def __getslice__(self, start, end):
return self.__getitem__(slice(start, end))
class ImgPlot(PlotFactory, _BaseFigAxes):
""" plot used for image """
imshow = pfs.imshow
hist = pfs.hist.derive(data=alias(lambda p: np.asarray(p["img"]).flatten(), "-> img.flatten()"))
colorbar = pfs.colorbar
sub = _subimg()
@staticmethod
def finit(plot, img, **kwargs):
plot.update(kwargs.pop(KWS,{}), **kwargs)
img, = plot.parseargs([img], "img")
plot["img"] = img
class ScalarPlot(PlotFactory, _BaseFigAxes):
axline = pfs.axline
##
# here ymin and ymax has nothing to do with the data
# change it to 0,1 by default
axvline = pfs.axvline.derive(x=alias("value"),ymin=0,ymax=1)
axhline = pfs.axhline.derive(y=alias("value"),xmin=0,xmax=1)
vlines = pfs.vlines.derive(x=alias(lambda p:np.asarray(p["value"]), "-> array(value)"))
hlines = pfs.vlines.derive(y=alias(lambda p:np.asarray(p["value"]), "-> array(value)"))
lines = pfs.vlines.derive(data=alias(lambda p:np.asarray(p["value"]), "-> array(value)"))
axspan = pfs.axspan.derive(value1=alias("previous"), value2=alias("value"))
axvspan = pfs.axvspan.derive(xmin=alias("previous"), xmax=alias("value"))
axhspan = pfs.axvspan.derive(ymin=alias("previous"), ymax=alias("value"))
text = pfs.text.derive(text=alias(lambda p: str(p["value"]), "-> str(value)"))
annotate = pfs.annotate
@staticmethod
def finit(plot, *args, **kwargs):
plot.update(kwargs.pop(KWS,{}), **kwargs)
value = plot.parseargs(args, "value")
plot["value"] = value
conveniant = """
|--------|-------------------------------------------------|
| method | action |
|--------|-------------------------------------------------|
| axes | set all valid axes parameters to the axes plot. |
| | Like "xlabel", "ylabel", grid, title etc ... |
| figure | set all figure parameters |
| aclear | clear the axes |
| fclear | clear the figure |
| legend | plot the axes legend |
| show | figure.show() |
| draw | figure.canvas.draw() |
| grids | axes.grids setting |
|--------|-------------------------------------------------|
"""
useful = """
| method | return | comment |
|------------|----------------|-----------------------------------------------------|
| update | None | update parameters of the factory |
| derive | (Self class) | A new factory instance where defaults are |
| | | in self |
| go | dict (results) | execute a list actions from list of strings |
| | | e.g. .go("axes", "legend", "show") |
| get | any object | As for dict get a parameter value |
| pop | any object | As for dict pop a parameter |
| clear | None | clear the local parameters |
| setdefault | None | As for dict set a default parameter |
| iter | (self class) | Iter on iterables and return new instances of self |
| iteraxes | (self class) | Same has iter but buil *axes* params for each new |
| iterfigure | (self class) | Same has iter but buil *figure* params for each new |
| itercall | (self class) | Iter on iterables and return new() |
| get_axes | plt.Axes | return the matplotlib Axes instance obect |
| get_figure | plt.Figure | return the matplotlib Figure instance obect |
| | | |
"""
usefulattr = """
| attribute | return | comment |
|-----------|-------------|---------------------------------------------|
| info | None | Print a state of the Factory object |
| all | dict | return a dictionary with all parameters set |
| | | the locals and the inerited. |
| locals | dict | the local parameter dictionary |
| example | string/None | If any return an string example ready for |
| | | print or exec |
| axes | plt.Axes | the matplotlib Axes instance obect |
| figure | plt.Figure | the matplotlib Figure instance obect |
|-----------|-------------|---------------------------------------------|
"""
xyplot = XYPlot()
xyplot.finit.__doc__ = XYPlot.__doc__.format(conveniant=conveniant,
useful=useful, usefulattr=usefulattr
)
xyplot.__doc__ = xyplot.finit.__doc__
xyplot["_example_"] = ("xyplot", None)
dataplot = DataPlot()
dataplot["_example_"] = ("histogram", None)
xyzplot = XYZPlot()
imgplot = ImgPlot()
scalarplot = ScalarPlot()
| gpl-2.0 |
valexandersaulys/airbnb_kaggle_contest | prototype_alpha/xgboost_take2.py | 1 | 1916 | """
Take 1 on the RandomForest, predicting for country_destinations.
"""
import pandas as pd
import numpy as np
from sklearn.cross_validation import train_test_split
training = pd.read_csv("protoAlpha_training.csv")
testing = pd.read_csv("protoAlpha_testing.csv")
X = training.iloc[:,1:-1].values
y = training['country_destination'].values
x_train,x_valid,y_train,y_valid = train_test_split(X,y,test_size=0.3,random_state=None)
# LabelEncoder
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
le.fit(y_train);
y_train = le.transform(y_train);
y_valid = le.transform(y_valid);
# Train classifier
import xgboost as xgb
xg_train = xgb.DMatrix(x_train,label=y_train);
xg_valid = xgb.DMatrix(x_valid,label=y_valid);
# setup parameters for xgboost
param = {}
# use softmax multi-class classification
param['objective'] = 'multi:softmax'
# scale weight of positive examples
param['eta'] = 0.7
param['max_depth'] = 100
param['silent'] = 1
param['nthread'] = 5
param['num_class'] = len(np.unique(y_train).tolist());
# Train & Get validation data
num_round = 1000
clf = xgb.train(param, xg_train, num_round);
# get prediction
y_preds = clf.predict( xg_valid );
# Run Predictions
from sklearn.metrics import confusion_matrix, accuracy_score
print( confusion_matrix(y_valid,y_preds) );
print( "Accuracy: %f" % (accuracy_score(y_valid,y_preds)) );
f = open('xgboost_take2.txt', 'w')
f.write( str(confusion_matrix(y_valid,y_preds)) );
f.write( "\nAccuracy: %f" % (accuracy_score(y_valid,y_preds)) );
f.write( "\nclf = xgboost{1000 rounds, 0.7 eta, 100 max_depth}" );
# Now on to final submission
xg_test = xgb.DMatrix(testing.iloc[:,1:].values);
y_final = le.inverse_transform( clf.predict(xg_test).reshape([62096,]).astype(int) );
y_final = pd.DataFrame(y_final);
numbahs = testing['id']
df = pd.concat([numbahs,y_final],axis=1)
df.columns = ['id','country']
df.to_csv("xgboost_take2.csv",index=False)
| gpl-2.0 |
GreenGear5/planet-wars | train-bot.py | 1 | 6405 | import argparse
import multiprocessing
import multiprocessing.pool
import sys
import random
from sklearn.linear_model import LogisticRegression
from sklearn.externals import joblib
from api import State, util
from bots.featurebot.featurebot import features
from bots.rand import rand
BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = range(8)
colors = {
'SUCCESS': GREEN,
'INFO': BLUE,
'WARN': YELLOW,
'FAIL': RED
}
# How many games to play
args = None
NOTIFY_AMOUNT = 50
def main():
pool = multiprocessing.Pool(processes=args.parallelism)
bots = []
for id, botname in enumerate(args.players):
bots.append(util.load_player(botname))
matches = len(bots) * args.matches * len(args.planets)
log("Training against {} Bots, {} Maps, {} Matches".format(len(bots), len(args.planets), matches))
data, target = [], []
try:
i = 0
for ret in pool.imap_unordered(execute, gen_rounds(bots)):
i += 1
(bid, mid), winner, state_vectors, (map_size, seed) = ret
if winner == 1:
result = 'won'
elif winner == 2:
result = 'lost'
else:
result = 'draw'
data += state_vectors
target += [result] * len(state_vectors)
log("({}:{} | {}:{}): {}".format(bid, mid, map_size, seed, result), lvl=1)
if i % NOTIFY_AMOUNT == 0:
log("Finished {}/{} matches ({:.2f})%.".format(i, matches, (float(i) / matches * 100)))
except KeyboardInterrupt:
log("Tournament interrupted by user", type="FAIL")
pool.terminate()
pool.join()
sys.exit(1)
pool.close()
pool.join()
log("All games finished", type="SUCCESS")
generate_model(data, target)
# If you wish to use a different model, this
# is where to edit
def generate_model(data, target):
log("Training logistic regression model", lvl=1)
learner = LogisticRegression()
model = learner.fit(data, target)
log("Checking class imbalance", lvl=1)
count = {}
for str in target:
if str not in count:
count[str] = 0
count[str] += 1
log("Instances per class: {}".format(count))
try:
joblib.dump(model, args.model, compress=True)
except IOError as e:
log("Failed to dump model: "+args.model, type="FAIL")
print e
return
log("Done. Model saved at: "+args.model, type="SUCCESS")
def gen_rounds(bots):
for bid, bot in enumerate(bots):
for map_id, map_size in enumerate(args.planets):
for i in range(args.matches):
mid = map_id * args.matches + i
seed = random.randint(0, 100000)
yield ((bid, mid), bot, (map_size, seed, args.max_turns, args.asym))
def execute(params):
ids, bot, (map_size, seed, max_turns, asym) = params
state, _ = State.generate(map_size, seed, symmetric=not asym)
state_vectors = []
i = 0
while not state.finished() and i <= max_turns:
state_vectors.append(features(state))
move = bot.get_move(state)
state = state.next(move)
i += 1
winner = state.winner()
return ids, winner, state_vectors, (map_size, seed)
# following from Python cookbook, #475186
def has_colours(stream):
if not hasattr(stream, "isatty"):
return False
if not stream.isatty():
return False # auto color only on TTYs
try:
import curses
curses.setupterm()
return curses.tigetnum("colors") > 2
except:
# guess false in case of error
return False
def log(s, type='INFO', lvl=0):
color = WHITE
if type in colors:
color = colors[type]
if args.verbose >= lvl:
sys.stdout.write("[")
printout("%07s" % type, color)
sys.stdout.write("] %s\n" % s)
def printout(text, colour=WHITE):
if args.color:
seq = "\x1b[1;%dm" % (30 + colour) + text + "\x1b[0m"
sys.stdout.write(seq)
else:
sys.stdout.write(text)
def optparse():
global args
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-c', '--color', action='store_true', dest='color',
help="force color output")
parser.add_argument('-n', '--no-color', action='store_false', dest='color',
help="force disable color output")
parser.add_argument("-p", "--num-planets",
dest="planets",
help="List of map sizes to use",
type=int, nargs='*',
default=[6])
parser.add_argument("-m", "--num-matches",
dest="matches",
help="Amount of matches played per map size",
type=int, default=1000)
parser.add_argument("-t", "--max-time",
dest="max_time",
help="Maximum amount of time allowed per turn in seconds",
type=float, default=5)
parser.add_argument("-T", "--max-turns",
dest="max_turns",
help="Maximum amount of turns per game",
type=int, default=100)
parser.add_argument("model",
help="Output file for model",
type=str, default="./bots/featurebot/kr-model.pkl")
parser.add_argument("players",
metavar="player",
help="Players for the game",
type=str, nargs='+')
parser.add_argument("-P", "--pool-size",
dest="parallelism",
help="Pool size for parallelism. Do not use unless you know what you are doing",
type=int, default=multiprocessing.cpu_count())
parser.add_argument("-v", "--verbose",
action="count", default=0,
help="Show more output")
parser.add_argument("-a", "--asym", dest="asym",
help="Whether to start with an asymmetric state.",
action="store_true")
parser.set_defaults(color=has_colours(sys.stdout))
args = parser.parse_args()
if __name__ == "__main__":
optparse()
main()
| mit |
cvjena/libmaxdiv | experiments/gaussian_bias.py | 1 | 1754 | """ Create an example for the small-interval bias of the KL divergence. """
import sys
sys.path.append('..')
import numpy as np
import matplotlib.pylab as plt
from maxdiv.maxdiv import maxdiv
from maxdiv.eval import plotDetections
# Create synthetic time series with one/two huge flat region(s)
mean = 5.0
sd = 2.0
ts_len = 500
window_center1 = 350
window_center2 = 100
window_sd = 10
np.random.seed(0)
gauss_window1 = np.exp(-0.5 * ((np.arange(0.0, ts_len) - window_center1) ** 2) / (window_sd ** 2))
gauss_window2 = np.exp(-0.5 * ((np.arange(0.0, ts_len) - window_center2) ** 2) / (window_sd ** 2))
ts = mean + np.random.randn(ts_len) * sd
ts1 = gauss_window1 * ts + 0.1 * np.random.randn(ts_len)
ts2 = (gauss_window1 + gauss_window2) * ts + 0.1 * np.random.randn(ts_len)
gt = [(window_center1 - 3 * window_sd, window_center1 + 3 * window_sd + 1), (window_center2 - 3 * window_sd, window_center2 + 3 * window_sd + 1)]
# Apply MDI Gaussian on different scenarios
print('--- OMEGA_I on single extremum ---')
det = maxdiv(ts1.reshape((1, ts_len)), 'gaussian_cov', None, mode = 'OMEGA_I', extint_min_len = 10, extint_max_len = 8 * window_sd, preproc = 'td')
plotDetections(ts1.reshape((1, ts_len)), det, [gt[0]], silent = False)
print('--- I_OMEGA on single extremum ---')
det = maxdiv(ts1.reshape((1, ts_len)), 'gaussian_cov', None, mode = 'I_OMEGA', extint_min_len = 10, extint_max_len = 8 * window_sd, preproc = 'td')
plotDetections(ts1.reshape((1, ts_len)), det, [gt[0]], silent = False)
print('--- I_OMEGA on two extrema ---')
det = maxdiv(ts2.reshape((1, ts_len)), 'gaussian_cov', None, mode = 'I_OMEGA', extint_min_len = 10, extint_max_len = 8 * window_sd, preproc = 'td')
plotDetections(ts2.reshape((1, ts_len)), det, gt, silent = False)
| lgpl-3.0 |
veeresht/CommPy | commpy/examples/conv_encode_decode.py | 1 | 5009 | # Authors: CommPy contributors
# License: BSD 3-Clause
from __future__ import division, print_function # Python 2 compatibility
import math
import matplotlib.pyplot as plt
import numpy as np
import commpy.channelcoding.convcode as cc
import commpy.channels as chan
import commpy.links as lk
import commpy.modulation as mod
import commpy.utilities as util
# =============================================================================
# Convolutional Code 1: G(D) = [1+D^2, 1+D+D^2]
# Standard code with rate 1/2
# =============================================================================
# Number of delay elements in the convolutional encoder
memory = np.array(2, ndmin=1)
# Generator matrix
g_matrix = np.array((0o5, 0o7), ndmin=2)
# Create trellis data structure
trellis1 = cc.Trellis(memory, g_matrix)
# =============================================================================
# Convolutional Code 1: G(D) = [1+D^2, 1+D^2+D^3]
# Standard code with rate 1/2
# =============================================================================
# Number of delay elements in the convolutional encoder
memory = np.array(3, ndmin=1)
# Generator matrix (1+D^2+D^3 <-> 13 or 0o15)
g_matrix = np.array((0o5, 0o15), ndmin=2)
# Create trellis data structure
trellis2 = cc.Trellis(memory, g_matrix)
# =============================================================================
# Convolutional Code 2: G(D) = [[1, 0, 0], [0, 1, 1+D]]; F(D) = [[D, D], [1+D, 1]]
# RSC with rate 2/3
# =============================================================================
# Number of delay elements in the convolutional encoder
memory = np.array((1, 1))
# Generator matrix & feedback matrix
g_matrix = np.array(((1, 0, 0), (0, 1, 3)))
feedback = np.array(((2, 2), (3, 1)))
# Create trellis data structure
trellis3 = cc.Trellis(memory, g_matrix, feedback, 'rsc')
# =============================================================================
# Basic example using homemade counting and hard decoding
# =============================================================================
# Traceback depth of the decoder
tb_depth = None # Default value is 5 times the number or memories
for trellis in (trellis1, trellis2, trellis3):
for i in range(10):
# Generate random message bits to be encoded
message_bits = np.random.randint(0, 2, 1000)
# Encode message bits
coded_bits = cc.conv_encode(message_bits, trellis)
# Introduce bit errors (channel)
coded_bits[np.random.randint(0, 1000)] = 0
coded_bits[np.random.randint(0, 1000)] = 0
coded_bits[np.random.randint(0, 1000)] = 1
coded_bits[np.random.randint(0, 1000)] = 1
# Decode the received bits
decoded_bits = cc.viterbi_decode(coded_bits.astype(float), trellis, tb_depth)
num_bit_errors = util.hamming_dist(message_bits, decoded_bits[:len(message_bits)])
if num_bit_errors != 0:
print(num_bit_errors, "Bit Errors found!")
elif i == 9:
print("No Bit Errors :)")
# ==================================================================================================
# Complete example using Commpy features and compare hard and soft demodulation. Example with code 1
# ==================================================================================================
# Modem : QPSK
modem = mod.QAMModem(4)
# AWGN channel
channels = chan.SISOFlatChannel(None, (1 + 0j, 0j))
# SNR range to test
SNRs = np.arange(0, 6) + 10 * math.log10(modem.num_bits_symbol)
# Modulation function
def modulate(bits):
return modem.modulate(cc.conv_encode(bits, trellis1, 'cont'))
# Receiver function (no process required as there are no fading)
def receiver_hard(y, h, constellation, noise_var):
return modem.demodulate(y, 'hard')
# Receiver function (no process required as there are no fading)
def receiver_soft(y, h, constellation, noise_var):
return modem.demodulate(y, 'soft', noise_var)
# Decoder function
def decoder_hard(msg):
return cc.viterbi_decode(msg, trellis1)
# Decoder function
def decoder_soft(msg):
return cc.viterbi_decode(msg, trellis1, decoding_type='soft')
# Build model from parameters
code_rate = trellis1.k / trellis1.n
model_hard = lk.LinkModel(modulate, channels, receiver_hard,
modem.num_bits_symbol, modem.constellation, modem.Es,
decoder_hard, code_rate)
model_soft = lk.LinkModel(modulate, channels, receiver_soft,
modem.num_bits_symbol, modem.constellation, modem.Es,
decoder_soft, code_rate)
# Test
BERs_hard = model_hard.link_performance(SNRs, 10000, 600, 5000, code_rate)
BERs_soft = model_soft.link_performance(SNRs, 10000, 600, 5000, code_rate)
plt.semilogy(SNRs, BERs_hard, 'o-', SNRs, BERs_soft, 'o-')
plt.grid()
plt.xlabel('Signal to Noise Ration (dB)')
plt.ylabel('Bit Error Rate')
plt.legend(('Hard demodulation', 'Soft demodulation'))
plt.show()
| bsd-3-clause |
RosesTheN00b/BudgetButlerWeb | butler_offline/core/database/sparen/depotauszuege.py | 1 | 3510 | from butler_offline.core.database.database_object import DatabaseObject
import pandas as pd
import numpy as np
class Depotauszuege(DatabaseObject):
TABLE_HEADER = ['Datum', 'Depotwert', 'Konto', 'Wert']
def __init__(self):
super().__init__(self.TABLE_HEADER)
def add(self, datum, depotwert, konto, wert):
neuer_auszug = pd.DataFrame([[datum, depotwert, konto, wert]], columns=self.TABLE_HEADER)
self.content = self.content.append(neuer_auszug, ignore_index=True)
self.taint()
self._sort()
def get_all(self):
return self.content
def edit(self, index, datum, depotwert, konto, wert):
self.edit_element(index, {
'Datum': datum,
'Depotwert': depotwert,
'Konto': konto,
'Wert': wert
})
def get_by(self, datum, konto):
auszuege = self.content[self.content.Konto == konto].copy()
auszuege = auszuege[auszuege.Datum == datum]
return auszuege
def get_latest_datum_by(self, konto):
auszuege = self.content[self.content.Konto == konto].copy()
if len(auszuege) == 0:
return None
return auszuege.Datum.max()
def resolve_index(self, datum, konto, depotwert):
auszuege = self.get_by(datum, konto)
result_frame = auszuege[auszuege.Depotwert == depotwert]
if len(result_frame) == 0:
return None
return result_frame.index[0]
def exists_wert(self, konto, depotwert):
frame = self.content[self.content.Konto == konto].copy()
frame = frame[frame.Depotwert == depotwert]
return len(frame) != 0
def get_kontostand_by(self, konto):
latest_datum = self.get_latest_datum_by(konto)
if not latest_datum:
return 0
auszuege = self.content[self.content.Konto == konto].copy()
auszug = auszuege[auszuege.Datum == latest_datum]
return auszug.Wert.sum()
def get_depotwert_by(self, depotwert):
auszuege = self.content[self.content.Depotwert == depotwert].copy()
if len(auszuege) == 0:
return 0
kontos = set(auszuege.Konto.tolist())
gesamt = 0
for konto in kontos:
konto_auszuege = auszuege[auszuege.Konto == konto].copy()
datum_max = konto_auszuege.Datum.max()
gesamt += konto_auszuege[konto_auszuege.Datum == datum_max].Wert.sum()
return gesamt
def delete_depotauszug(self, datum, konto):
index = self._resolve_indices(konto, datum)
for i in index:
self.delete(i)
def _resolve_indices(self, konto, datum):
values = self.content[self.content.Konto == konto].copy()
return values[values.Datum == datum].index.tolist()
def get_all(self):
return self.content
def resolve_konto(self, index):
return self.content.loc[index, 'Konto']
def resolve_datum(self, index):
return self.content.loc[index, 'Datum']
def _sort(self):
self.content = self.content.sort_values(by=['Datum', 'Konto', 'Depotwert'])
self.content = self.content.reset_index(drop=True)
def select_max_year(self, year):
include = self.content.copy()
include['datum_filter'] = include.Datum.map(lambda x: x.year)
include = include[include.datum_filter <= year].copy()
del include['datum_filter']
selected = Depotauszuege()
selected.content = include
return selected
| agpl-3.0 |
fabianp/scikit-learn | examples/gaussian_process/plot_gpc_xor.py | 104 | 2132 | """
========================================================================
Illustration of Gaussian process classification (GPC) on the XOR dataset
========================================================================
This example illustrates GPC on XOR data. Compared are a stationary, isotropic
kernel (RBF) and a non-stationary kernel (DotProduct). On this particular
dataset, the DotProduct kernel obtains considerably better results because the
class-boundaries are linear and coincide with the coordinate axes. In general,
stationary kernels often obtain better results.
"""
print(__doc__)
# Authors: Jan Hendrik Metzen <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF, DotProduct
xx, yy = np.meshgrid(np.linspace(-3, 3, 50),
np.linspace(-3, 3, 50))
rng = np.random.RandomState(0)
X = rng.randn(200, 2)
Y = np.logical_xor(X[:, 0] > 0, X[:, 1] > 0)
# fit the model
plt.figure(figsize=(10, 5))
kernels = [1.0 * RBF(length_scale=1.0), 1.0 * DotProduct(sigma_0=1.0)**2]
for i, kernel in enumerate(kernels):
clf = GaussianProcessClassifier(kernel=kernel, warm_start=True).fit(X, Y)
# plot the decision function for each datapoint on the grid
Z = clf.predict_proba(np.vstack((xx.ravel(), yy.ravel())).T)[:, 1]
Z = Z.reshape(xx.shape)
plt.subplot(1, 2, i + 1)
image = plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()),
aspect='auto', origin='lower', cmap=plt.cm.PuOr_r)
contours = plt.contour(xx, yy, Z, levels=[0], linewidths=2,
linetypes='--')
plt.scatter(X[:, 0], X[:, 1], s=30, c=Y, cmap=plt.cm.Paired)
plt.xticks(())
plt.yticks(())
plt.axis([-3, 3, -3, 3])
plt.colorbar(image)
plt.title("%s\n Log-Marginal-Likelihood:%.3f"
% (clf.kernel_, clf.log_marginal_likelihood(clf.kernel_.theta)),
fontsize=12)
plt.tight_layout()
plt.show()
| bsd-3-clause |
ajoros/ves_ajoros | ves/equations.py | 1 | 17103 | import numpy as np
np.seterr(all='ignore')
from numpy import nan
import scipy.integrate as integrate
import scipy.interpolate as interpolate
import scipy.special as special
def wennerResistivity(voltageSpacing, Vm, I):
"""Wenner spacing apparent restivity implementation
Parameters
----------
voltageSpacing: `np.array.npfloat`
Distance between the central location an a probing point
Vm: `np.array.npfloat`
The mean voltage of the four readings taken at the probing point
I: `np.array.npfloat`
The mean current of the four readings taken at the probing locale
Notes
-----
The Wenner layout of the probes requires that all probes be evenly spaced
Returns
-------
apparentResitivity: `np.array.float`
The apparent resitivity as calculated via the Wenner approach
"""
a = voltageSpacing[0] * 2
apparentResitivity = 2 * np.pi * a * (Vm / I)
return apparentResitivity
def schlumbergerResistivity(voltageSpacing, Vm, I):
"""Schlumberger spacing apprent resitivity implementation
Parameters
----------
voltageSpacing: `np.array.float64`
Distance between the central location an a probing point
Vm: `np.array.npfloat`
The mean voltage of the four readings taken at the probing point
I: `np.array.npfloat`
The mean current of the four readings taken at the probing locale
Notes
-----
The Schlumber layout requires a particular spacing. The probes must be
separated from the central point by 0.6, 1.0, 1.6, 2.0, 3.0, 6.0,
9.0, 15.0, 20.0, 30.0, and 60 (meters)
Returns
-------
apparentResitivity: `np.array.float`
The apparent resitivity as calculated via the Wenner approach
"""
# Initialize empty arrays that are the same length as voltage spacing
nRows = len(voltageSpacing)
s, L = np.empty(nRows), np.empty(nRows)
s[:], L[:] = np.nan, np.nan
# Create arrays of offset distances to define s and L for each data point
# See Clark, 2011
for i in range(nRows):
if i == len(voltageSpacing) - 1:
break
s[i] = voltageSpacing[i]
L[i] = voltageSpacing[i + 1]
apparentResitivity = (np.pi * Vm * (L**2 - s**2)) / (2 * s * I)
return apparentResitivity
def schlumbergerResistivityModified(voltageSpacing, Vm, I):
"""Alternative impleMENtation for a modified Schlumber arrangement"""
apparentResitivity = 2 * np.pi * voltageSpacing * (Vm / I)
return apparentResitivity
def T(lam, rhoA, rhoB, rhoM, dA):
TTT = (
(rhoB + rhoM * np.tanh(lam * dA)) /
(1. + (rhoB / rhoM) * np.tanh(lam * dA)) )
TT = (
(TTT + rhoA * np.tanh(lam * 5.)) /
(1. + (TTT / rhoA) * np.tanh(lam * 5.)) )
return TT
def integrand(lam, rhoA, rhoB, rhoM, dA):
kernel = lam * (T(lam, rhoA, rhoB, rhoM, dA) - rhoA)
integrandResult = kernel * special.jn(1, r * lam)
return integrandResult
def apparentResitivity(aSpacing, ab, dA):
for a in aSpacing:
rhoA = a
answers, rhoplot, rplot = [], [], []
for r in ab:
integral = integrate.quad(integrand, 0, np.inf, limit=100)
answer = rhoA + (r**2) * float(integral[0])
answers.append(answer)
rplot.append(r / dA)
rhoplot.append(answer / rhoA)
del answer, integral
return (answers, rplot, rhoplot)
def interpolateFieldData(voltageSpacing, apparentResistivity, arraySpacing,
bounds_error=True):
""""""
# Define the recommended sample interval from Gosh 1971
# np.log is natural log
sampleInterval = np.log(10) / 3.
# Deal with the fact that Schlumberger layout produces nan as last value
apparentResistivity[np.isnan(apparentResistivity)] = np.nanmax(
apparentResistivity)
lastApparentRestivity = apparentResistivity[-1]
## The following two steps are done to ensure np.interpolate can produce
## new values, as it mandates that the new values be contained within the
## range of values used to produce the function;
## Gosh refers to this as extrapolation
# Extend the voltageSpacing and apparentRestivity arrays
# New arrays for max and min sample ranges
# voltageSpacingInsertion = np.array(
# [voltageSpacing[0] - sampleInterval * i + 1 for i in range(3)])
# voltageSpacingAppend = np.array(
# [voltageSpacing[0] + sampleInterval * i + 1 for i in range(3)])
if arraySpacing.lower() == 'schlumberger':
voltageSpacingInsertion = np.array(
[0 - sampleInterval * i + 1 for i in range(3)])
voltageSpacingAppend = np.array(
[0 + sampleInterval * i + 1 for i in range(3)])
if arraySpacing.lower() == 'wenner':
voltageSpacing = voltageSpacing - sampleInterval
voltageSpacingInsertion = np.array(
[-0.48 - sampleInterval * i + 1 for i in range(3)])
voltageSpacingAppend = np.array(
[-0.48 + sampleInterval * i + 1 for i in range(3)])
apparentResistivityInsertion = np.empty(3)
apparentResistivityInsertion.fill(apparentResistivity[0])
apparentResistivityAppend = np.empty(3)
apparentResistivityAppend.fill(lastApparentRestivity)
# New arrays with the extended values for input into scipy.interpolate
# Voltage Spacing (m)
voltageSpacingExtrapolated = np.insert(
voltageSpacing, 0, voltageSpacingInsertion)
voltageSpacingExtrapolated = np.append(
voltageSpacingExtrapolated, voltageSpacingAppend)
voltageSpacingExtrapolated.sort()
# Apparent Restivity (ohm-m)
apparentResistivityExtrapolate = np.insert(
apparentResistivity, 0, apparentResistivityInsertion)
apparentResistivityExtrapolate = np.append(
apparentResistivityExtrapolate, apparentResistivityAppend)
# # Replace nan values with the maximum
# apparentResistivityExtrapolate[np.isnan(
# apparentResistivityExtrapolate)] = np.nanmax(apparentResistivity)
# Interpolate the measured resistivity data
if len(voltageSpacingExtrapolated) > len(apparentResistivityExtrapolate):
voltageSpacingExtrapolated = (
voltageSpacingExtrapolated[:len(apparentResistivityExtrapolate)])
if len(apparentResistivityExtrapolate) > len(voltageSpacingExtrapolated):
apparentResistivityExtrapolate = (
apparentResistivityExtrapolate[:len(voltageSpacingExtrapolated)])
function = interpolate.interp1d(
voltageSpacingExtrapolated, apparentResistivityExtrapolate,
bounds_error=bounds_error)
newRestivity = function(voltageSpacingExtrapolated)
return (voltageSpacingExtrapolated, newRestivity)
def applyFilter(extrapolatedResistivity, filterCoefficients):
""""""
# Calculate the last index of the extrapolated resistivity values
# upon which the filters are to be applied
lastIndex = len(extrapolatedResistivity) - len(filterCoefficients)
# Fill a list with numpy arrays of the extrapolated resistivity values
# with the digitial filter coefficients systematically applied
resistList = []
for i in range(lastIndex):
resistOut = np.copy(extrapolatedResistivity)
for j in range(len(filterCoefficients)):
try:
resistOut[(i + 1) + j] = (
resistOut[(i + 1) + j] * filterCoefficients[j])
except IndexError:
break
resistList.append(resistOut)
del resistOut
# Collapse the list into a one dimensional array of the sum of all the
# extrapolated values with the digital filter coefficients applied
resistArray = np.array(resistList)
# resistArray.dump('/Users/avitale/Desktop/array') # load with np.load
filteredApparentResistivity = np.nansum(resistArray, axis=0)
return filteredApparentResistivity
if __name__ == '__main__':
sleep_time = 0.5
# Import time to control the printing a bit
import time
# I'm using Qt5 as the matplotlib backend because I'm on a minimal
# environment. Comment out the next two lines if you're having problems
# related to the matplotlib backend
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
import matplotlib
matplotlib.use('Qt5Agg')
import matplotlib.pyplot as plt
plt.style.use('bmh') # Prettier defaults on matplotlib.__version__ >= 1.5
# plt.hold(False) # Don't hold on first plt.show()
import matplotlib.lines as mlines # to create manual legend
# Import some parameters that are defined elsewhere
from templates.tempData import (
colors, tableData, coefficients)
from aggregate import aggregateTable
schlumberferFilterCoefficients, wennerFilterCoefficients = coefficients
sampleInterval = np.log(10) / 3.
# Print out the table that is the "input" table from the field survey
print('Schlumberger example:')
time.sleep(sleep_time)
print('This is the starting table:')
for row in tableData:
print(row)
# Aggregate the table to get the mean voltage and current
voltageSpacing, meanVoltage, meanCurrent = aggregateTable(
tableData)
# Print out the aggregated values
print('\nVoltage Spacing: {}\nMean Voltage: {}'.format(
voltageSpacing, meanVoltage))
print('Mean Current: {}'.format(meanCurrent))
# Use the modified Schlumberger equation like that used in the spreadsheet
apparentResistivity = schlumbergerResistivityModified(
voltageSpacing, meanVoltage, meanCurrent)
print('\nApparent resistivity (same formula as ' +
'spreadsheet for Schlum):\n{}'.format(apparentResistivity))
# Interpolate the field data to get values at Gosh's suggested intervals
voltageSpacingExtrapolated, newRestivity = interpolateFieldData(
voltageSpacing, apparentResistivity, 'schlumberger')
print('\nNew Resitivity values:\n{}'.format(newRestivity))
# Apply the filter coefficients. In this case, using the Schlumber short
# coeffieients for the digital filter
filteredResistivity = applyFilter(newRestivity, shortFilterCoefficients)
print('\nFiltered resistivity after coefficients applied:\n{}'.format(
filteredResistivity))
# Create poitns for the plot
samplePoints = np.arange(
start=( - sampleInterval * 2),
stop=sampleInterval * 30,
step=sampleInterval)
print("\nNew sample points based on Gosh's suggested interval:\n{}".format(
samplePoints))
print('\nCoefficients:')
print(' Schlum. short:\n {}'.format(shortFilterCoefficients))
print(' Schlum. long: \n {}'.format(longFilterCoefficients))
print(' Wenner. short:\n {}'.format(wennerFilterCoefficients))
# Plot out the results
print(len(samplePoints))
print(len(filteredResistivity))
plt.semilogy(samplePoints[:len(filteredResistivity)], filteredResistivity,
marker='o', linestyle='--', color='#348ABD')
plt.semilogy(voltageSpacing, apparentResistivity,
marker='o', linestyle='-', color='#A60628')
plt.xlabel('Electrode Spacing (m)')
plt.ylabel('Apparent Resitivity (ohm-m)')
blue_line = mlines.Line2D(
[], [], marker='o',linestyle='--',
label='Filtered values', color='#348ABD')
red_lines = mlines.Line2D(
[], [], marker='o', linestyle='-',
label='Field values', color='#A60628')
plt.legend(
handles=[blue_line, red_lines],
bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
ncol=2, mode="expand", borderaxespad=0.)
plt.show()
with open('./ves/templates/MafingaExample.txt', 'r') as f:
lines = f.read()
newLines = [line.split(' ') for line in lines.split('\n')]
wennerVoltageSpacing, wennerApparentResistivity = [], []
for line in newLines[2:-3]:
try:
wennerVoltageSpacing.append(line.pop(0))
wennerApparentResistivity.append(line.pop(-1))
except IndexError:
pass
print('\n\nWenner Example:')
time.sleep(sleep_time)
print('Probe spacing (m), apparent res.')
for spacing, res in zip(wennerVoltageSpacing, wennerApparentResistivity):
print('{:<17} {:<12}'.format(spacing, res))
wennerVoltageSpacing = np.array(
wennerVoltageSpacing, dtype=np.float64)
wennerApparentResistivity = np.array(
wennerApparentResistivity, dtype=np.float64)
# Interpolate the field data to get values at Gosh's suggested intervals
voltageSpacingExtrapolated, newRestivity = interpolateFieldData(
wennerVoltageSpacing, wennerApparentResistivity, 'wenner')
print('\nNew Resitivity values:\n{}'.format(newRestivity))
# Apply the filter coefficients. In this case, using the Schlumber short
# coeffieients for the digital filter
filteredResistivity = applyFilter(newRestivity, wennerFilterCoefficients)
print('\nFiltered resistivity after coefficients applied:\n{}'.format(
filteredResistivity))
# Create poitns for the plot
samplePoints = np.arange(
start=( - sampleInterval * 2),
stop=sampleInterval * 20,
step=sampleInterval)
print('\nNew sample points based on Gosh\'s suggested interval:\n{}'.format(
samplePoints))
print('\nCoefficients:')
print(' Schlum. short:\n {}'.format(shortFilterCoefficients))
print(' Schlum. long: \n {}'.format(longFilterCoefficients))
print(' Wenner. short:\n {}'.format(wennerFilterCoefficients))
# Plot out the results
plt.semilogy(samplePoints[:len(filteredResistivity)], filteredResistivity,
marker='o', linestyle='--', color='#348ABD')
plt.semilogy(voltageSpacing, apparentResistivity,
marker='o', linestyle='-', color='#A60628')
plt.xlabel('Electrode Spacing (m)')
plt.ylabel('Apparent Resitivity (ohm-m)')
blue_line = mlines.Line2D(
[], [], marker='o',linestyle='--',
label='Filtered values', color='#348ABD')
red_lines = mlines.Line2D(
[], [], marker='o', linestyle='-',
label='Field values', color='#A60628')
plt.legend(
handles=[blue_line, red_lines],
bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
ncol=2, mode="expand", borderaxespad=0.)
plt.show()
with open('./ves/templates/UgandaPIM.txt', 'r') as f:
lines = f.read()
newLines = [line.split(' ') for line in lines.split('\n')]
wennerVoltageSpacing, wennerApparentResistivity = [], []
for line in newLines[2:-3]:
try:
wennerVoltageSpacing.append(line.pop(0))
wennerApparentResistivity.append(line.pop(-1))
except IndexError:
pass
print('\n\nWenner Example 2:')
time.sleep(sleep_time)
print('Probe spacing (m), apparent res.')
for spacing, res in zip(wennerVoltageSpacing, wennerApparentResistivity):
print('{:<17} {:<12}'.format(spacing, res))
wennerVoltageSpacing = np.array(
wennerVoltageSpacing, dtype=np.float64)
wennerApparentResistivity = np.array(
wennerApparentResistivity, dtype=np.float64)
# Interpolate the field data to get values at Gosh's suggested intervals
voltageSpacingExtrapolated, newRestivity = interpolateFieldData(
wennerVoltageSpacing, wennerApparentResistivity, 'wenner')
print('\nNew Resitivity values:\n{}'.format(newRestivity))
# Apply the filter coefficients. In this case, using the Schlumber short
# coeffieients for the digital filter
filteredResistivity = applyFilter(newRestivity, wennerFilterCoefficients)
print('\nFiltered resistivity after coefficients applied:\n{}'.format(
filteredResistivity))
# Create poitns for the plot
samplePoints = np.arange(
start=( - sampleInterval * 2),
stop=sampleInterval * 20,
step=sampleInterval)
print('\nNew sample points based on Gosh\'s suggested interval:\n{}'.format(
samplePoints))
print('\nCoefficients:')
print(' Schlum. short:\n {}'.format(shortFilterCoefficients))
print(' Schlum. long: \n {}'.format(longFilterCoefficients))
print(' Wenner. short:\n {}'.format(wennerFilterCoefficients))
# print(samplePoints[:len(filteredResistivity)])
# Plot out the results
plt.semilogy(samplePoints[:len(filteredResistivity)], filteredResistivity,
marker='o', linestyle='--', color='#348ABD')
plt.semilogy(voltageSpacing, apparentResistivity,
marker='o', linestyle='-', color='#A60628')
plt.xlabel('Electrode Spacing (m)')
plt.ylabel('Apparent Resitivity (ohm-m)')
blue_line = mlines.Line2D(
[], [], marker='o',linestyle='--',
label='Filtered values', color='#348ABD')
red_lines = mlines.Line2D(
[], [], marker='o', linestyle='-',
label='Field values', color='#A60628')
plt.legend(
handles=[blue_line, red_lines],
bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
ncol=2, mode="expand", borderaxespad=0.)
plt.show()
| lgpl-3.0 |
uelei/api_cluster | back/trainning.py | 1 | 1208 | # -*- coding: utf-8 -*-
import json
from .database import db
from .models import ClusterLogs
import numpy as np
from sklearn.cluster import MeanShift, estimate_bandwidth
__author__ = 'wesleywwerneck'
def train_cluster():
rows, data = get_train_data()
data_set = np.array(data)
bandwidth = estimate_bandwidth(data_set, quantile=0.2, n_samples=200)
ms = MeanShift(bandwidth=bandwidth, cluster_all=False, bin_seeding=True)
labels = ms.fit_predict(data_set)
print(labels)
for row, label in zip(rows, labels):
row.label = int(label)
db.session.commit()
return labels
def get_train_data():
data = []
rows = ClusterLogs.query.filter_by(label=None)
for log in rows:
formated_data = format_data_set_peaks(log)
data.append(formated_data[1::])
return rows, data
def format_data_set_peaks(data):
selected = [
'PowerActive',
'PowerReactive',
'PowerAppearent',
'LineCurrent',
'LineVoltage',
'Peaks'
]
clean = [getattr(data, s) for s in selected]
list_peaks = [float(l) for l in json.loads(clean[5])[:3]]
clean_list = clean[:5] + list_peaks
return clean_list
| mit |
davidgbe/scikit-learn | examples/applications/plot_model_complexity_influence.py | 323 | 6372 | """
==========================
Model Complexity Influence
==========================
Demonstrate how model complexity influences both prediction accuracy and
computational performance.
The dataset is the Boston Housing dataset (resp. 20 Newsgroups) for
regression (resp. classification).
For each class of models we make the model complexity vary through the choice
of relevant model parameters and measure the influence on both computational
performance (latency) and predictive power (MSE or Hamming Loss).
"""
print(__doc__)
# Author: Eustache Diemert <[email protected]>
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.parasite_axes import host_subplot
from mpl_toolkits.axisartist.axislines import Axes
from scipy.sparse.csr import csr_matrix
from sklearn import datasets
from sklearn.utils import shuffle
from sklearn.metrics import mean_squared_error
from sklearn.svm.classes import NuSVR
from sklearn.ensemble.gradient_boosting import GradientBoostingRegressor
from sklearn.linear_model.stochastic_gradient import SGDClassifier
from sklearn.metrics import hamming_loss
###############################################################################
# Routines
# initialize random generator
np.random.seed(0)
def generate_data(case, sparse=False):
"""Generate regression/classification data."""
bunch = None
if case == 'regression':
bunch = datasets.load_boston()
elif case == 'classification':
bunch = datasets.fetch_20newsgroups_vectorized(subset='all')
X, y = shuffle(bunch.data, bunch.target)
offset = int(X.shape[0] * 0.8)
X_train, y_train = X[:offset], y[:offset]
X_test, y_test = X[offset:], y[offset:]
if sparse:
X_train = csr_matrix(X_train)
X_test = csr_matrix(X_test)
else:
X_train = np.array(X_train)
X_test = np.array(X_test)
y_test = np.array(y_test)
y_train = np.array(y_train)
data = {'X_train': X_train, 'X_test': X_test, 'y_train': y_train,
'y_test': y_test}
return data
def benchmark_influence(conf):
"""
Benchmark influence of :changing_param: on both MSE and latency.
"""
prediction_times = []
prediction_powers = []
complexities = []
for param_value in conf['changing_param_values']:
conf['tuned_params'][conf['changing_param']] = param_value
estimator = conf['estimator'](**conf['tuned_params'])
print("Benchmarking %s" % estimator)
estimator.fit(conf['data']['X_train'], conf['data']['y_train'])
conf['postfit_hook'](estimator)
complexity = conf['complexity_computer'](estimator)
complexities.append(complexity)
start_time = time.time()
for _ in range(conf['n_samples']):
y_pred = estimator.predict(conf['data']['X_test'])
elapsed_time = (time.time() - start_time) / float(conf['n_samples'])
prediction_times.append(elapsed_time)
pred_score = conf['prediction_performance_computer'](
conf['data']['y_test'], y_pred)
prediction_powers.append(pred_score)
print("Complexity: %d | %s: %.4f | Pred. Time: %fs\n" % (
complexity, conf['prediction_performance_label'], pred_score,
elapsed_time))
return prediction_powers, prediction_times, complexities
def plot_influence(conf, mse_values, prediction_times, complexities):
"""
Plot influence of model complexity on both accuracy and latency.
"""
plt.figure(figsize=(12, 6))
host = host_subplot(111, axes_class=Axes)
plt.subplots_adjust(right=0.75)
par1 = host.twinx()
host.set_xlabel('Model Complexity (%s)' % conf['complexity_label'])
y1_label = conf['prediction_performance_label']
y2_label = "Time (s)"
host.set_ylabel(y1_label)
par1.set_ylabel(y2_label)
p1, = host.plot(complexities, mse_values, 'b-', label="prediction error")
p2, = par1.plot(complexities, prediction_times, 'r-',
label="latency")
host.legend(loc='upper right')
host.axis["left"].label.set_color(p1.get_color())
par1.axis["right"].label.set_color(p2.get_color())
plt.title('Influence of Model Complexity - %s' % conf['estimator'].__name__)
plt.show()
def _count_nonzero_coefficients(estimator):
a = estimator.coef_.toarray()
return np.count_nonzero(a)
###############################################################################
# main code
regression_data = generate_data('regression')
classification_data = generate_data('classification', sparse=True)
configurations = [
{'estimator': SGDClassifier,
'tuned_params': {'penalty': 'elasticnet', 'alpha': 0.001, 'loss':
'modified_huber', 'fit_intercept': True},
'changing_param': 'l1_ratio',
'changing_param_values': [0.25, 0.5, 0.75, 0.9],
'complexity_label': 'non_zero coefficients',
'complexity_computer': _count_nonzero_coefficients,
'prediction_performance_computer': hamming_loss,
'prediction_performance_label': 'Hamming Loss (Misclassification Ratio)',
'postfit_hook': lambda x: x.sparsify(),
'data': classification_data,
'n_samples': 30},
{'estimator': NuSVR,
'tuned_params': {'C': 1e3, 'gamma': 2 ** -15},
'changing_param': 'nu',
'changing_param_values': [0.1, 0.25, 0.5, 0.75, 0.9],
'complexity_label': 'n_support_vectors',
'complexity_computer': lambda x: len(x.support_vectors_),
'data': regression_data,
'postfit_hook': lambda x: x,
'prediction_performance_computer': mean_squared_error,
'prediction_performance_label': 'MSE',
'n_samples': 30},
{'estimator': GradientBoostingRegressor,
'tuned_params': {'loss': 'ls'},
'changing_param': 'n_estimators',
'changing_param_values': [10, 50, 100, 200, 500],
'complexity_label': 'n_trees',
'complexity_computer': lambda x: x.n_estimators,
'data': regression_data,
'postfit_hook': lambda x: x,
'prediction_performance_computer': mean_squared_error,
'prediction_performance_label': 'MSE',
'n_samples': 30},
]
for conf in configurations:
prediction_performances, prediction_times, complexities = \
benchmark_influence(conf)
plot_influence(conf, prediction_performances, prediction_times,
complexities)
| bsd-3-clause |
dsquareindia/scikit-learn | sklearn/tests/test_kernel_ridge.py | 342 | 3027 | import numpy as np
import scipy.sparse as sp
from sklearn.datasets import make_regression
from sklearn.linear_model import Ridge
from sklearn.kernel_ridge import KernelRidge
from sklearn.metrics.pairwise import pairwise_kernels
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_array_almost_equal
X, y = make_regression(n_features=10)
Xcsr = sp.csr_matrix(X)
Xcsc = sp.csc_matrix(X)
Y = np.array([y, y]).T
def test_kernel_ridge():
pred = Ridge(alpha=1, fit_intercept=False).fit(X, y).predict(X)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(X, y).predict(X)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_csr():
pred = Ridge(alpha=1, fit_intercept=False,
solver="cholesky").fit(Xcsr, y).predict(Xcsr)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(Xcsr, y).predict(Xcsr)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_csc():
pred = Ridge(alpha=1, fit_intercept=False,
solver="cholesky").fit(Xcsc, y).predict(Xcsc)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(Xcsc, y).predict(Xcsc)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_singular_kernel():
# alpha=0 causes a LinAlgError in computing the dual coefficients,
# which causes a fallback to a lstsq solver. This is tested here.
pred = Ridge(alpha=0, fit_intercept=False).fit(X, y).predict(X)
kr = KernelRidge(kernel="linear", alpha=0)
ignore_warnings(kr.fit)(X, y)
pred2 = kr.predict(X)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_precomputed():
for kernel in ["linear", "rbf", "poly", "cosine"]:
K = pairwise_kernels(X, X, metric=kernel)
pred = KernelRidge(kernel=kernel).fit(X, y).predict(X)
pred2 = KernelRidge(kernel="precomputed").fit(K, y).predict(K)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_precomputed_kernel_unchanged():
K = np.dot(X, X.T)
K2 = K.copy()
KernelRidge(kernel="precomputed").fit(K, y)
assert_array_almost_equal(K, K2)
def test_kernel_ridge_sample_weights():
K = np.dot(X, X.T) # precomputed kernel
sw = np.random.RandomState(0).rand(X.shape[0])
pred = Ridge(alpha=1,
fit_intercept=False).fit(X, y, sample_weight=sw).predict(X)
pred2 = KernelRidge(kernel="linear",
alpha=1).fit(X, y, sample_weight=sw).predict(X)
pred3 = KernelRidge(kernel="precomputed",
alpha=1).fit(K, y, sample_weight=sw).predict(K)
assert_array_almost_equal(pred, pred2)
assert_array_almost_equal(pred, pred3)
def test_kernel_ridge_multi_output():
pred = Ridge(alpha=1, fit_intercept=False).fit(X, Y).predict(X)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(X, Y).predict(X)
assert_array_almost_equal(pred, pred2)
pred3 = KernelRidge(kernel="linear", alpha=1).fit(X, y).predict(X)
pred3 = np.array([pred3, pred3]).T
assert_array_almost_equal(pred2, pred3)
| bsd-3-clause |
wwf5067/statsmodels | statsmodels/iolib/summary.py | 8 | 34185 | from statsmodels.compat.python import range, lrange, lmap, lzip, zip_longest
import numpy as np
from statsmodels.iolib.table import SimpleTable
from statsmodels.iolib.tableformatting import (gen_fmt, fmt_2,
fmt_params, fmt_base, fmt_2cols)
#from statsmodels.iolib.summary2d import summary_params_2dflat
#from summary2d import summary_params_2dflat
def forg(x, prec=3):
if prec == 3:
#for 3 decimals
if (abs(x) >= 1e4) or (abs(x) < 1e-4):
return '%9.3g' % x
else:
return '%9.3f' % x
elif prec == 4:
if (abs(x) >= 1e4) or (abs(x) < 1e-4):
return '%10.4g' % x
else:
return '%10.4f' % x
else:
raise NotImplementedError
def summary(self, yname=None, xname=None, title=0, alpha=.05,
returns='text', model_info=None):
"""
Parameters
-----------
yname : string
optional, Default is `Y`
xname : list of strings
optional, Default is `X.#` for # in p the number of regressors
Confidance interval : (0,1) not implimented
title : string
optional, Defualt is 'Generalized linear model'
returns : string
'text', 'table', 'csv', 'latex', 'html'
Returns
-------
Defualt :
returns='print'
Prints the summarirized results
Option :
returns='text'
Prints the summarirized results
Option :
returns='table'
SimpleTable instance : summarizing the fit of a linear model.
Option :
returns='csv'
returns a string of csv of the results, to import into a spreadsheet
Option :
returns='latex'
Not implimented yet
Option :
returns='HTML'
Not implimented yet
Examples (needs updating)
--------
>>> import statsmodels as sm
>>> data = sm.datasets.longley.load()
>>> data.exog = sm.add_constant(data.exog)
>>> ols_results = sm.OLS(data.endog, data.exog).results
>>> print ols_results.summary()
...
Notes
-----
conf_int calculated from normal dist.
"""
import time as time
#TODO Make sure all self.model.__class__.__name__ are listed
model_types = {'OLS' : 'Ordinary least squares',
'GLS' : 'Generalized least squares',
'GLSAR' : 'Generalized least squares with AR(p)',
'WLS' : 'Weigthed least squares',
'RLM' : 'Robust linear model',
'GLM' : 'Generalized linear model'
}
model_methods = {'OLS' : 'Least Squares',
'GLS' : 'Least Squares',
'GLSAR' : 'Least Squares',
'WLS' : 'Least Squares',
'RLM' : '?',
'GLM' : '?'
}
if title==0:
title = model_types[self.model.__class__.__name__]
if yname is None:
try:
yname = self.model.endog_names
except AttributeError:
yname = 'y'
if xname is None:
try:
xname = self.model.exog_names
except AttributeError:
xname = ['var_%d' % i for i in range(len(self.params))]
time_now = time.localtime()
time_of_day = [time.strftime("%H:%M:%S", time_now)]
date = time.strftime("%a, %d %b %Y", time_now)
modeltype = self.model.__class__.__name__
#dist_family = self.model.family.__class__.__name__
nobs = self.nobs
df_model = self.df_model
df_resid = self.df_resid
#General part of the summary table, Applicable to all? models
#------------------------------------------------------------
#TODO: define this generically, overwrite in model classes
#replace definition of stubs data by single list
#e.g.
gen_left = [('Model type:', [modeltype]),
('Date:', [date]),
('Dependent Variable:', yname), #What happens with multiple names?
('df model', [df_model])
]
gen_stubs_left, gen_data_left = zip_longest(*gen_left) #transpose row col
gen_title = title
gen_header = None
## gen_stubs_left = ('Model type:',
## 'Date:',
## 'Dependent Variable:',
## 'df model'
## )
## gen_data_left = [[modeltype],
## [date],
## yname, #What happens with multiple names?
## [df_model]
## ]
gen_table_left = SimpleTable(gen_data_left,
gen_header,
gen_stubs_left,
title = gen_title,
txt_fmt = gen_fmt
)
gen_stubs_right = ('Method:',
'Time:',
'Number of Obs:',
'df resid'
)
gen_data_right = ([modeltype], #was dist family need to look at more
time_of_day,
[nobs],
[df_resid]
)
gen_table_right = SimpleTable(gen_data_right,
gen_header,
gen_stubs_right,
title = gen_title,
txt_fmt = gen_fmt
)
gen_table_left.extend_right(gen_table_right)
general_table = gen_table_left
#Parameters part of the summary table
#------------------------------------
#Note: this is not necessary since we standardized names, only t versus normal
tstats = {'OLS' : self.t(),
'GLS' : self.t(),
'GLSAR' : self.t(),
'WLS' : self.t(),
'RLM' : self.t(),
'GLM' : self.t()
}
prob_stats = {'OLS' : self.pvalues,
'GLS' : self.pvalues,
'GLSAR' : self.pvalues,
'WLS' : self.pvalues,
'RLM' : self.pvalues,
'GLM' : self.pvalues
}
#Dictionary to store the header names for the parameter part of the
#summary table. look up by modeltype
alp = str((1-alpha)*100)+'%'
param_header = {
'OLS' : ['coef', 'std err', 't', 'P>|t|', alp + ' Conf. Interval'],
'GLS' : ['coef', 'std err', 't', 'P>|t|', alp + ' Conf. Interval'],
'GLSAR' : ['coef', 'std err', 't', 'P>|t|', alp + ' Conf. Interval'],
'WLS' : ['coef', 'std err', 't', 'P>|t|', alp + ' Conf. Interval'],
'GLM' : ['coef', 'std err', 't', 'P>|t|', alp + ' Conf. Interval'], #glm uses t-distribution
'RLM' : ['coef', 'std err', 'z', 'P>|z|', alp + ' Conf. Interval'] #checke z
}
params_stubs = xname
params = self.params
conf_int = self.conf_int(alpha)
std_err = self.bse
exog_len = lrange(len(xname))
tstat = tstats[modeltype]
prob_stat = prob_stats[modeltype]
# Simpletable should be able to handle the formating
params_data = lzip(["%#6.4g" % (params[i]) for i in exog_len],
["%#6.4f" % (std_err[i]) for i in exog_len],
["%#6.4f" % (tstat[i]) for i in exog_len],
["%#6.4f" % (prob_stat[i]) for i in exog_len],
["(%#5g, %#5g)" % tuple(conf_int[i]) for i in \
exog_len]
)
parameter_table = SimpleTable(params_data,
param_header[modeltype],
params_stubs,
title = None,
txt_fmt = fmt_2, #gen_fmt,
)
#special table
#-------------
#TODO: exists in linear_model, what about other models
#residual diagnostics
#output options
#--------------
#TODO: JP the rest needs to be fixed, similar to summary in linear_model
def ols_printer():
"""
print summary table for ols models
"""
table = str(general_table)+'\n'+str(parameter_table)
return table
def ols_to_csv():
"""
exports ols summary data to csv
"""
pass
def glm_printer():
table = str(general_table)+'\n'+str(parameter_table)
return table
pass
printers = {'OLS': ols_printer,
'GLM' : glm_printer
}
if returns=='print':
try:
return printers[modeltype]()
except KeyError:
return printers['OLS']()
def _getnames(self, yname=None, xname=None):
'''extract names from model or construct names
'''
if yname is None:
if hasattr(self.model, 'endog_names') and (
not self.model.endog_names is None):
yname = self.model.endog_names
else:
yname = 'y'
if xname is None:
if hasattr(self.model, 'exog_names') and (
not self.model.exog_names is None):
xname = self.model.exog_names
else:
xname = ['var_%d' % i for i in range(len(self.params))]
return yname, xname
def summary_top(results, title=None, gleft=None, gright=None, yname=None, xname=None):
'''generate top table(s)
TODO: this still uses predefined model_methods
? allow gleft, gright to be 1 element tuples instead of filling with None?
'''
#change of names ?
gen_left, gen_right = gleft, gright
#time and names are always included
import time
time_now = time.localtime()
time_of_day = [time.strftime("%H:%M:%S", time_now)]
date = time.strftime("%a, %d %b %Y", time_now)
yname, xname = _getnames(results, yname=yname, xname=xname)
#create dictionary with default
#use lambdas because some values raise exception if they are not available
#alternate spellings are commented out to force unique labels
default_items = dict([
('Dependent Variable:', lambda: [yname]),
('Dep. Variable:', lambda: [yname]),
('Model:', lambda: [results.model.__class__.__name__]),
#('Model type:', lambda: [results.model.__class__.__name__]),
('Date:', lambda: [date]),
('Time:', lambda: time_of_day),
('Number of Obs:', lambda: [results.nobs]),
#('No. of Observations:', lambda: ["%#6d" % results.nobs]),
('No. Observations:', lambda: ["%#6d" % results.nobs]),
#('Df model:', lambda: [results.df_model]),
('Df Model:', lambda: ["%#6d" % results.df_model]),
#TODO: check when we have non-integer df
('Df Residuals:', lambda: ["%#6d" % results.df_resid]),
#('Df resid:', lambda: [results.df_resid]),
#('df resid:', lambda: [results.df_resid]), #check capitalization
('Log-Likelihood:', lambda: ["%#8.5g" % results.llf]) #doesn't exist for RLM - exception
#('Method:', lambda: [???]), #no default for this
])
if title is None:
title = results.model.__class__.__name__ + 'Regression Results'
if gen_left is None:
#default: General part of the summary table, Applicable to all? models
gen_left = [('Dep. Variable:', None),
('Model type:', None),
('Date:', None),
('No. Observations:', None),
('Df model:', None),
('Df resid:', None)]
try:
llf = results.llf
gen_left.append(('Log-Likelihood', None))
except: #AttributeError, NotImplementedError
pass
gen_right = []
gen_title = title
gen_header = None
#needed_values = [k for k,v in gleft + gright if v is None] #not used anymore
#replace missing (None) values with default values
gen_left_ = []
for item, value in gen_left:
if value is None:
value = default_items[item]() #let KeyErrors raise exception
gen_left_.append((item, value))
gen_left = gen_left_
if gen_right:
gen_right_ = []
for item, value in gen_right:
if value is None:
value = default_items[item]() #let KeyErrors raise exception
gen_right_.append((item, value))
gen_right = gen_right_
#check
missing_values = [k for k,v in gen_left + gen_right if v is None]
assert missing_values == [], missing_values
#pad both tables to equal number of rows
if gen_right:
if len(gen_right) < len(gen_left):
#fill up with blank lines to same length
gen_right += [(' ', ' ')] * (len(gen_left) - len(gen_right))
elif len(gen_right) > len(gen_left):
#fill up with blank lines to same length, just to keep it symmetric
gen_left += [(' ', ' ')] * (len(gen_right) - len(gen_left))
#padding in SimpleTable doesn't work like I want
#force extra spacing and exact string length in right table
gen_right = [('%-21s' % (' '+k), v) for k,v in gen_right]
gen_stubs_right, gen_data_right = zip_longest(*gen_right) #transpose row col
gen_table_right = SimpleTable(gen_data_right,
gen_header,
gen_stubs_right,
title = gen_title,
txt_fmt = fmt_2cols #gen_fmt
)
else:
gen_table_right = [] #because .extend_right seems works with []
#moved below so that we can pad if needed to match length of gen_right
#transpose rows and columns, `unzip`
gen_stubs_left, gen_data_left = zip_longest(*gen_left) #transpose row col
gen_table_left = SimpleTable(gen_data_left,
gen_header,
gen_stubs_left,
title = gen_title,
txt_fmt = fmt_2cols
)
gen_table_left.extend_right(gen_table_right)
general_table = gen_table_left
return general_table #, gen_table_left, gen_table_right
def summary_params(results, yname=None, xname=None, alpha=.05, use_t=True,
skip_header=False, title=None):
'''create a summary table for the parameters
Parameters
----------
res : results instance
some required information is directly taken from the result
instance
yname : string or None
optional name for the endogenous variable, default is "y"
xname : list of strings or None
optional names for the exogenous variables, default is "var_xx"
alpha : float
significance level for the confidence intervals
use_t : bool
indicator whether the p-values are based on the Student-t
distribution (if True) or on the normal distribution (if False)
skip_headers : bool
If false (default), then the header row is added. If true, then no
header row is added.
Returns
-------
params_table : SimpleTable instance
'''
#Parameters part of the summary table
#------------------------------------
#Note: this is not necessary since we standardized names, only t versus normal
if isinstance(results, tuple):
#for multivariate endog
#TODO: check whether I don't want to refactor this
#we need to give parameter alpha to conf_int
results, params, std_err, tvalues, pvalues, conf_int = results
else:
params = results.params
std_err = results.bse
tvalues = results.tvalues #is this sometimes called zvalues
pvalues = results.pvalues
conf_int = results.conf_int(alpha)
#Dictionary to store the header names for the parameter part of the
#summary table. look up by modeltype
alp = str((1-alpha)*100)+'%'
if use_t:
param_header = ['coef', 'std err', 't', 'P>|t|',
'[' + alp + ' Conf. Int.]']
else:
param_header = ['coef', 'std err', 'z', 'P>|z|',
'[' + alp + ' Conf. Int.]']
if skip_header:
param_header = None
_, xname = _getnames(results, yname=yname, xname=xname)
params_stubs = xname
exog_idx = lrange(len(xname))
#center confidence intervals if they are unequal lengths
# confint = ["(%#6.3g, %#6.3g)" % tuple(conf_int[i]) for i in \
# exog_idx]
confint = ["%s %s" % tuple(lmap(forg, conf_int[i])) for i in \
exog_idx]
len_ci = lmap(len, confint)
max_ci = max(len_ci)
min_ci = min(len_ci)
if min_ci < max_ci:
confint = [ci.center(max_ci) for ci in confint]
#explicit f/g formatting, now uses forg, f or g depending on values
# params_data = lzip(["%#6.4g" % (params[i]) for i in exog_idx],
# ["%#6.4f" % (std_err[i]) for i in exog_idx],
# ["%#6.3f" % (tvalues[i]) for i in exog_idx],
# ["%#6.3f" % (pvalues[i]) for i in exog_idx],
# confint
## ["(%#6.3g, %#6.3g)" % tuple(conf_int[i]) for i in \
## exog_idx]
# )
params_data = lzip([forg(params[i], prec=4) for i in exog_idx],
[forg(std_err[i]) for i in exog_idx],
[forg(tvalues[i]) for i in exog_idx],
["%#6.3f" % (pvalues[i]) for i in exog_idx],
confint
# ["(%#6.3g, %#6.3g)" % tuple(conf_int[i]) for i in \
# exog_idx]
)
parameter_table = SimpleTable(params_data,
param_header,
params_stubs,
title = title,
txt_fmt = fmt_params #gen_fmt #fmt_2, #gen_fmt,
)
return parameter_table
def summary_params_frame(results, yname=None, xname=None, alpha=.05,
use_t=True):
'''create a summary table for the parameters
Parameters
----------
res : results instance
some required information is directly taken from the result
instance
yname : string or None
optional name for the endogenous variable, default is "y"
xname : list of strings or None
optional names for the exogenous variables, default is "var_xx"
alpha : float
significance level for the confidence intervals
use_t : bool
indicator whether the p-values are based on the Student-t
distribution (if True) or on the normal distribution (if False)
skip_headers : bool
If false (default), then the header row is added. If true, then no
header row is added.
Returns
-------
params_table : SimpleTable instance
'''
#Parameters part of the summary table
#------------------------------------
#Note: this is not necessary since we standardized names, only t versus normal
if isinstance(results, tuple):
#for multivariate endog
#TODO: check whether I don't want to refactor this
#we need to give parameter alpha to conf_int
results, params, std_err, tvalues, pvalues, conf_int = results
else:
params = results.params
std_err = results.bse
tvalues = results.tvalues #is this sometimes called zvalues
pvalues = results.pvalues
conf_int = results.conf_int(alpha)
#Dictionary to store the header names for the parameter part of the
#summary table. look up by modeltype
alp = str((1-alpha)*100)+'%'
if use_t:
param_header = ['coef', 'std err', 't', 'P>|t|',
'Conf. Int. Low', 'Conf. Int. Upp.']
else:
param_header = ['coef', 'std err', 'z', 'P>|z|',
'Conf. Int. Low', 'Conf. Int. Upp.']
_, xname = _getnames(results, yname=yname, xname=xname)
#------------------
from pandas import DataFrame
table = np.column_stack((params, std_err, tvalues, pvalues, conf_int))
return DataFrame(table, columns=param_header, index=xname)
def summary_params_2d(result, extras=None, endog_names=None, exog_names=None,
title=None):
'''create summary table of regression parameters with several equations
This allows interleaving of parameters with bse and/or tvalues
Parameters
----------
result : result instance
the result instance with params and attributes in extras
extras : list of strings
additional attributes to add below a parameter row, e.g. bse or tvalues
endog_names : None or list of strings
names for rows of the parameter array (multivariate endog)
exog_names : None or list of strings
names for columns of the parameter array (exog)
alpha : float
level for confidence intervals, default 0.95
title : None or string
Returns
-------
tables : list of SimpleTable
this contains a list of all seperate Subtables
table_all : SimpleTable
the merged table with results concatenated for each row of the parameter
array
'''
if endog_names is None:
#TODO: note the [1:] is specific to current MNLogit
endog_names = ['endog_%d' % i for i in
np.unique(result.model.endog)[1:]]
if exog_names is None:
exog_names = ['var%d' %i for i in range(len(result.params))]
#TODO: check formatting options with different values
#res_params = [['%10.4f'%item for item in row] for row in result.params]
res_params = [[forg(item, prec=4) for item in row] for row in result.params]
if extras: #not None or non-empty
#maybe this should be a simple triple loop instead of list comprehension?
#below_list = [[['%10s' % ('('+('%10.3f'%v).strip()+')')
extras_list = [[['%10s' % ('(' + forg(v, prec=3).strip() + ')')
for v in col]
for col in getattr(result, what)]
for what in extras
]
data = lzip(res_params, *extras_list)
data = [i for j in data for i in j] #flatten
stubs = lzip(endog_names, *[['']*len(endog_names)]*len(extras))
stubs = [i for j in stubs for i in j] #flatten
#return SimpleTable(data, headers=exog_names, stubs=stubs)
else:
data = res_params
stubs = endog_names
# return SimpleTable(data, headers=exog_names, stubs=stubs,
# data_fmts=['%10.4f'])
import copy
txt_fmt = copy.deepcopy(fmt_params)
txt_fmt.update(dict(data_fmts = ["%s"]*result.params.shape[1]))
return SimpleTable(data, headers=exog_names,
stubs=stubs,
title=title,
# data_fmts = ["%s"]),
txt_fmt = txt_fmt)
def summary_params_2dflat(result, endog_names=None, exog_names=None, alpha=0.05,
use_t=True, keep_headers=True, endog_cols=False):
#skip_headers2=True):
'''summary table for parameters that are 2d, e.g. multi-equation models
Parameters
----------
result : result instance
the result instance with params, bse, tvalues and conf_int
endog_names : None or list of strings
names for rows of the parameter array (multivariate endog)
exog_names : None or list of strings
names for columns of the parameter array (exog)
alpha : float
level for confidence intervals, default 0.95
use_t : bool
indicator whether the p-values are based on the Student-t
distribution (if True) or on the normal distribution (if False)
keep_headers : bool
If true (default), then sub-tables keep their headers. If false, then
only the first headers are kept, the other headerse are blanked out
endog_cols : bool
If false (default) then params and other result statistics have
equations by rows. If true, then equations are assumed to be in columns.
Not implemented yet.
Returns
-------
tables : list of SimpleTable
this contains a list of all seperate Subtables
table_all : SimpleTable
the merged table with results concatenated for each row of the parameter
array
'''
res = result
params = res.params
if params.ndim == 2: # we've got multiple equations
n_equ = params.shape[1]
if not len(endog_names) == params.shape[1]:
raise ValueError('endog_names has wrong length')
else:
if not len(endog_names) == len(params):
raise ValueError('endog_names has wrong length')
n_equ = 1
#VAR doesn't have conf_int
#params = res.params.T # this is a convention for multi-eq models
if not isinstance(endog_names, list):
#this might be specific to multinomial logit type, move?
if endog_names is None:
endog_basename = 'endog'
else:
endog_basename = endog_names
#TODO: note, the [1:] is specific to current MNLogit
endog_names = res.model.endog_names[1:]
#check if we have the right length of names
tables = []
for eq in range(n_equ):
restup = (res, res.params[:,eq], res.bse[:,eq], res.tvalues[:,eq],
res.pvalues[:,eq], res.conf_int(alpha)[eq])
#not used anymore in current version
# if skip_headers2:
# skiph = (row != 0)
# else:
# skiph = False
skiph = False
tble = summary_params(restup, yname=endog_names[eq],
xname=exog_names, alpha=alpha, use_t=use_t,
skip_header=skiph)
tables.append(tble)
#add titles, they will be moved to header lines in table_extend
for i in range(len(endog_names)):
tables[i].title = endog_names[i]
table_all = table_extend(tables, keep_headers=keep_headers)
return tables, table_all
def table_extend(tables, keep_headers=True):
'''extend a list of SimpleTables, adding titles to header of subtables
This function returns the merged table as a deepcopy, in contrast to the
SimpleTable extend method.
Parameters
----------
tables : list of SimpleTable instances
keep_headers : bool
If true, then all headers are kept. If falls, then the headers of
subtables are blanked out.
Returns
-------
table_all : SimpleTable
merged tables as a single SimpleTable instance
'''
from copy import deepcopy
for ii, t in enumerate(tables[:]): #[1:]:
t = deepcopy(t)
#move title to first cell of header
#TODO: check if we have multiline headers
if t[0].datatype == 'header':
t[0][0].data = t.title
t[0][0]._datatype = None
t[0][0].row = t[0][1].row
if not keep_headers and (ii > 0):
for c in t[0][1:]:
c.data = ''
#add separating line and extend tables
if ii == 0:
table_all = t
else:
r1 = table_all[-1]
r1.add_format('txt', row_dec_below='-')
table_all.extend(t)
table_all.title = None
return table_all
def summary_return(tables, return_fmt='text'):
######## Return Summary Tables ########
# join table parts then print
if return_fmt == 'text':
strdrop = lambda x: str(x).rsplit('\n',1)[0]
#convert to string drop last line
return '\n'.join(lmap(strdrop, tables[:-1]) + [str(tables[-1])])
elif return_fmt == 'tables':
return tables
elif return_fmt == 'csv':
return '\n'.join(map(lambda x: x.as_csv(), tables))
elif return_fmt == 'latex':
#TODO: insert \hline after updating SimpleTable
import copy
table = copy.deepcopy(tables[0])
del table[-1]
for part in tables[1:]:
table.extend(part)
return table.as_latex_tabular()
elif return_fmt == 'html':
return "\n".join(table.as_html() for table in tables)
else:
raise ValueError('available output formats are text, csv, latex, html')
class Summary(object):
'''class to hold tables for result summary presentation
Construction does not take any parameters. Tables and text can be added
with the `add_` methods.
Attributes
----------
tables : list of tables
Contains the list of SimpleTable instances, horizontally concatenated tables are not saved separately.
extra_txt : string
extra lines that are added to the text output, used for warnings and explanations.
'''
def __init__(self):
self.tables = []
self.extra_txt = None
def __str__(self):
return self.as_text()
def __repr__(self):
#return '<' + str(type(self)) + '>\n"""\n' + self.__str__() + '\n"""'
return str(type(self)) + '\n"""\n' + self.__str__() + '\n"""'
def _repr_html_(self):
'''Display as HTML in IPython notebook.'''
return self.as_html()
def add_table_2cols(self, res, title=None, gleft=None, gright=None,
yname=None, xname=None):
'''add a double table, 2 tables with one column merged horizontally
Parameters
----------
res : results instance
some required information is directly taken from the result
instance
title : string or None
if None, then a default title is used.
gleft : list of tuples
elements for the left table, tuples are (name, value) pairs
If gleft is None, then a default table is created
gright : list of tuples or None
elements for the right table, tuples are (name, value) pairs
yname : string or None
optional name for the endogenous variable, default is "y"
xname : list of strings or None
optional names for the exogenous variables, default is "var_xx"
Returns
-------
None : tables are attached
'''
table = summary_top(res, title=title, gleft=gleft, gright=gright,
yname=yname, xname=xname)
self.tables.append(table)
def add_table_params(self, res, yname=None, xname=None, alpha=.05,
use_t=True):
'''create and add a table for the parameter estimates
Parameters
----------
res : results instance
some required information is directly taken from the result
instance
yname : string or None
optional name for the endogenous variable, default is "y"
xname : list of strings or None
optional names for the exogenous variables, default is "var_xx"
alpha : float
significance level for the confidence intervals
use_t : bool
indicator whether the p-values are based on the Student-t
distribution (if True) or on the normal distribution (if False)
Returns
-------
None : table is attached
'''
if res.params.ndim == 1:
table = summary_params(res, yname=yname, xname=xname, alpha=alpha,
use_t=use_t)
elif res.params.ndim == 2:
# _, table = summary_params_2dflat(res, yname=yname, xname=xname,
# alpha=alpha, use_t=use_t)
_, table = summary_params_2dflat(res, endog_names=yname,
exog_names=xname,
alpha=alpha, use_t=use_t)
else:
raise ValueError('params has to be 1d or 2d')
self.tables.append(table)
def add_extra_txt(self, etext):
'''add additional text that will be added at the end in text format
Parameters
----------
etext : string
string with lines that are added to the text output.
'''
self.extra_txt = '\n'.join(etext)
def as_text(self):
'''return tables as string
Returns
-------
txt : string
summary tables and extra text as one string
'''
txt = summary_return(self.tables, return_fmt='text')
if not self.extra_txt is None:
txt = txt + '\n\n' + self.extra_txt
return txt
def as_latex(self):
'''return tables as string
Returns
-------
latex : string
summary tables and extra text as string of Latex
Notes
-----
This currently merges tables with different number of columns.
It is recommended to use `as_latex_tabular` directly on the individual
tables.
'''
return summary_return(self.tables, return_fmt='latex')
def as_csv(self):
'''return tables as string
Returns
-------
csv : string
concatenated summary tables in comma delimited format
'''
return summary_return(self.tables, return_fmt='csv')
def as_html(self):
'''return tables as string
Returns
-------
html : string
concatenated summary tables in HTML format
'''
return summary_return(self.tables, return_fmt='html')
if __name__ == "__main__":
import statsmodels.api as sm
data = sm.datasets.longley.load()
data.exog = sm.add_constant(data.exog)
res = sm.OLS(data.endog, data.exog).fit()
#summary(
| bsd-3-clause |
awdensmore/mg-econ-model | mgplot.py | 1 | 5552 | __author__ = 'adensmore'
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import numpy as np
global mk
mk = ['b-', 'r-', 'g-', 'y-', 'm-', 'c-', 'k-']
def plot_pd(prod, demand, ul=[]):
hrs = range(len(prod))
fig, ax = plt.subplots()
p1 = ax.plot(hrs, prod, 'b-')
p2 = ax.plot(hrs, demand, 'm-')
if len(ul) > 0:
ax.plot(hrs, ul, 'r-')
ax.set_xlabel("Hours of the year")
ax.set_ylabel("Hourly Production or Load (Whr)")
ax.legend( (p1[0], p2[0]), ('Production', 'Load') )
def plot_socw(socw, bal=[]):
hrs = range(len(socw))
#mk = ['b-', 'r-', 'g-', 'y-', 'm-', 'c-', 'k-']
fig, ax = plt.subplots()
ax.set_xlabel("Hours of the year")
ax.set_ylabel("Hourly Battery State of Charge (Whr)")
b = []
for i in range(len(socw[0])):
y = []
for j in range(len(socw)):
y.append(socw[j][i])
b.append(y)
for i in range(len(b)):
ax.plot(hrs, b[i], mk[i])
if len(bal) > 0:
ax2 = ax.twinx()
ax2.plot(hrs, bal, 'k--')
def plot_soc(soc):
hrs = range(len(soc))
plt.xlabel("Simulation Hours")
plt.ylabel("Battery State of Charge (%)")
plt.plot(hrs, soc)
def plot_prices(prc, bal=[]):
hrs = range(len(prc))
fig, ax = plt.subplots()
ax.plot(hrs, prc, 'r-')
ax.set_xlabel("Hours of the year")
ax.set_ylabel("Hourly price of electricity ($/Whr)")
if bal>0:
ax2 = ax.twinx()
ax2.plot(hrs, bal, 'k-')
def plot_price(results, labels):
ind = np.arange(len(results))
width_bar = 0.25
#width_cat = width_bar *
plt.bar(ind, results, width_bar)
plt.xticks(ind+width_bar/2, labels)
plt.xlabel("Elasticity of demand (% change in demand / % change in price)")
plt.ylabel("Cost of electricity (Currency / Whr)")
plt.title("Change in nominal electricity price with elasticity of demand")
def plot_rev(results, labels, ttl):
ind = np.arange(len(results[6]))
width_bar = 0.25
plt.bar(ind, results[6], width_bar)
plt.xticks(ind+width_bar/2, labels)
# FOR USE WITH CHANGING ELASTICITY
#plt.xlabel("Elasticity of demand (% change in demand / % change in price)")
#plt.title("Change in revenue with elasticity of demand, 1*d")
# FOR USE WITH CHANGING DEMAND
plt.xlabel("Actual demand as a ratio of expected")
plt.title("Change in annual revenue with demand, e= " + str(ttl))
# FOR USE WITH BASELINE
#plt.title("Baseline Annual Revenue")
plt.ylabel("Annual Revenue")
def plot_hd(results, labels, ttl):
ul_soc = [a * 100 for a in results[1]] # % unmet load due to low soc
ul_p = [(a - b)*100 for a,b in zip(results[2], results[1])] # % unmet load due to high prices
ind = np.arange(len(results[1]))
width_bar = 0.25
p1 = plt.bar(ind, ul_soc, width_bar, color='r')
p2 = plt.bar(ind, ul_p, width_bar, color='b', bottom=ul_soc)
plt.xticks(ind+width_bar/2, labels)
# FOR USE WITH CHANGING ELASTICITY
#plt.xlabel("Elasticity of demand (% change in demand / % change in price)")
#plt.title("Change in unmet load with elasticity of demand, 0.5*d")
# FOR USE WITH CHANGING DEMAND
plt.xlabel("Actual demand as a ratio of expected")
plt.title("Change in unmet load with demand, e= " + str(ttl))
# FOR USE WITH BASELINE
#plt.title("Baseline unmet load")
plt.ylabel("% of total load unmet due to disconnection")
plt.legend( (p1[0], p2[0]), ('Due to low SOC', 'Due to high prices'), loc=2 )
def import_data(file):
results = []
delim = ","
with open(file, 'r') as f:
while True:
rx = []
l = f.readline().rstrip()
if not l: break
b = False
while True:
n = l.find(delim)
if n > 0:
a = l[0:n]
else:
a = l[0:]
b = True
rx.append(a)
l = l.lstrip(a + delim)
l = l.lstrip()
if b == True: break
results.append(rx)
return results
def results_diff(results):
out = []
lines = len(results)
for i in range(lines):
if len(results[i]) == 5:
j = 1
while (i + j*7) < lines:
rp = float(results[i+j*7][2])
rb = float(results[i][2])
r_diff = 100 * (rp / rb - 1)
u_diff = float(results[i+j*7][3]) - float(results[i][3])
line = [r_diff] + [u_diff] + results[i+j*7]
j = j + 1
out.append(line)
else:
break
return out
def plot_scatter(results):
x = [a[1] for a in results]
y = [b[0] for b in results]
m = ["o", "v", "s", "*", "+", "D", "x"]
c = ["r", "b", "g", "y", "0", "0.5", "0.75"]
p = []
for i in range(0,7):
for j in range(0,7):
a = plt.scatter(x[i*7+j],y[i*7+j], marker=m[i], c=c[j], s=64)
p.append(a)
plt.xlabel("% change in unmet load vs. baseline")
plt.ylabel("% change in revenue vs. baseline")
plt.legend([p[0], p[7], p[14], p[21], p[28], p[35], p[42],\
p[0], p[1], p[2], p[3], p[4], p[5], p[6]],\
['-0.2e', '-0.4e','-0.7e', '-0.9e', '-1e', '-1.25e', '-1.5e', \
'1/4d', '1/2d', '3/4d', 'd', '1.5d', '2d', '3d'], loc=(1.005,0.05))
plt.axis([-7,7,-5,20])
plt.show()
a = import_data("results.txt")
b = results_diff(a)
print(b)
plot_scatter(b)
def show():
plt.show() | gpl-3.0 |
treycausey/scikit-learn | sklearn/ensemble/__init__.py | 44 | 1228 | """
The :mod:`sklearn.ensemble` module includes ensemble-based methods for
classification and regression.
"""
from .base import BaseEnsemble
from .forest import RandomForestClassifier
from .forest import RandomForestRegressor
from .forest import RandomTreesEmbedding
from .forest import ExtraTreesClassifier
from .forest import ExtraTreesRegressor
from .bagging import BaggingClassifier
from .bagging import BaggingRegressor
from .weight_boosting import AdaBoostClassifier
from .weight_boosting import AdaBoostRegressor
from .gradient_boosting import GradientBoostingClassifier
from .gradient_boosting import GradientBoostingRegressor
from . import bagging
from . import forest
from . import weight_boosting
from . import gradient_boosting
from . import partial_dependence
__all__ = ["BaseEnsemble",
"RandomForestClassifier", "RandomForestRegressor",
"RandomTreesEmbedding", "ExtraTreesClassifier",
"ExtraTreesRegressor", "BaggingClassifier",
"BaggingRegressor", "GradientBoostingClassifier",
"GradientBoostingRegressor", "AdaBoostClassifier",
"AdaBoostRegressor", "bagging", "forest", "gradient_boosting",
"partial_dependence", "weight_boosting"]
| bsd-3-clause |
yhat/digit-recognizer | model/crossfold.py | 1 | 2307 | import numpy as np
import pandas as pd
from PIL import Image
from StringIO import StringIO
import base64
import os
import csv
from ggplot import *
from sklearn.decomposition import RandomizedPCA
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import confusion_matrix, accuracy_score, precision_score, recall_score
from sklearn.cross_validation import train_test_split
wd = "./numbers/"
files = [f for f in os.listdir(wd)]
files = [wd + f for f in files]
STANDARD_SIZE = (50, 50)
def get_image_data(filename):
img = Image.open(filename)
img = img.getdata()
img = img.resize(STANDARD_SIZE)
img = map(list, img)
img = np.array(img)
s = img.shape[0] * img.shape[1]
img_wide = img.reshape(1, s)
return img_wide[0]
data = []
labels = []
print "extracting features..."
for i, f in enumerate(files):
print i, "of", len(files)
data.append(get_image_data(f))
labels.append(int(f.split(".")[-2][-1]))
print "done.\n"
output = open('./results.csv', 'w')
w = csv.writer(output)
w.writerow(["actual"] + range(10))
results = []
for n_components in [2, 5, 10, 25, 50, 100, 250, 500, 1000]:
print "Training for %d components..." % n_components
pca = RandomizedPCA(n_components=n_components)
std_scaler = StandardScaler()
X_train, X_test, y_train, y_test = train_test_split(data, labels, test_size=0.1)
X_train = pca.fit_transform(X_train)
X_test = pca.transform(X_test)
X_train = std_scaler.fit_transform(X_train)
X_test = std_scaler.transform(X_test)
clf = KNeighborsClassifier(n_neighbors=13)
clf.fit(X_train, y_train)
print "Results for %d components:" % n_components
cm = confusion_matrix(y_test, clf.predict(X_test))
for i, row in enumerate(cm):
w.writerow([i] + row.tolist())
acc = accuracy_score(y_test, clf.predict(X_test))
# print precision_score(y_test, clf.predict(X_test))
# print recall_score(y_test, clf.predict(X_test))
print acc
results.append({"n_components": n_components, "accuracy": acc})
output.close()
results = pd.DataFrame(results)
print ggplot(results, aes(x='n_components', y='accuracy')) + \
geom_line()
| bsd-2-clause |
billy-inn/scikit-learn | sklearn/utils/testing.py | 84 | 24860 | """Testing utilities."""
# Copyright (c) 2011, 2012
# Authors: Pietro Berkes,
# Andreas Muller
# Mathieu Blondel
# Olivier Grisel
# Arnaud Joly
# Denis Engemann
# License: BSD 3 clause
import os
import inspect
import pkgutil
import warnings
import sys
import re
import platform
import scipy as sp
import scipy.io
from functools import wraps
try:
# Python 2
from urllib2 import urlopen
from urllib2 import HTTPError
except ImportError:
# Python 3+
from urllib.request import urlopen
from urllib.error import HTTPError
import tempfile
import shutil
import os.path as op
import atexit
# WindowsError only exist on Windows
try:
WindowsError
except NameError:
WindowsError = None
import sklearn
from sklearn.base import BaseEstimator
from sklearn.externals import joblib
# Conveniently import all assertions in one place.
from nose.tools import assert_equal
from nose.tools import assert_not_equal
from nose.tools import assert_true
from nose.tools import assert_false
from nose.tools import assert_raises
from nose.tools import raises
from nose import SkipTest
from nose import with_setup
from numpy.testing import assert_almost_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_less
import numpy as np
from sklearn.base import (ClassifierMixin, RegressorMixin, TransformerMixin,
ClusterMixin)
__all__ = ["assert_equal", "assert_not_equal", "assert_raises",
"assert_raises_regexp", "raises", "with_setup", "assert_true",
"assert_false", "assert_almost_equal", "assert_array_equal",
"assert_array_almost_equal", "assert_array_less",
"assert_less", "assert_less_equal",
"assert_greater", "assert_greater_equal"]
try:
from nose.tools import assert_in, assert_not_in
except ImportError:
# Nose < 1.0.0
def assert_in(x, container):
assert_true(x in container, msg="%r in %r" % (x, container))
def assert_not_in(x, container):
assert_false(x in container, msg="%r in %r" % (x, container))
try:
from nose.tools import assert_raises_regex
except ImportError:
# for Python 2
def assert_raises_regex(expected_exception, expected_regexp,
callable_obj=None, *args, **kwargs):
"""Helper function to check for message patterns in exceptions"""
not_raised = False
try:
callable_obj(*args, **kwargs)
not_raised = True
except expected_exception as e:
error_message = str(e)
if not re.compile(expected_regexp).search(error_message):
raise AssertionError("Error message should match pattern "
"%r. %r does not." %
(expected_regexp, error_message))
if not_raised:
raise AssertionError("%s not raised by %s" %
(expected_exception.__name__,
callable_obj.__name__))
# assert_raises_regexp is deprecated in Python 3.4 in favor of
# assert_raises_regex but lets keep the bacward compat in scikit-learn with
# the old name for now
assert_raises_regexp = assert_raises_regex
def _assert_less(a, b, msg=None):
message = "%r is not lower than %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a < b, message
def _assert_greater(a, b, msg=None):
message = "%r is not greater than %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a > b, message
def assert_less_equal(a, b, msg=None):
message = "%r is not lower than or equal to %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a <= b, message
def assert_greater_equal(a, b, msg=None):
message = "%r is not greater than or equal to %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a >= b, message
def assert_warns(warning_class, func, *args, **kw):
"""Test that a certain warning occurs.
Parameters
----------
warning_class : the warning class
The class to test for, e.g. UserWarning.
func : callable
Calable object to trigger warnings.
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`
Returns
-------
result : the return value of `func`
"""
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
result = func(*args, **kw)
if hasattr(np, 'VisibleDeprecationWarning'):
# Filter out numpy-specific warnings in numpy >= 1.9
w = [e for e in w
if e.category is not np.VisibleDeprecationWarning]
# Verify some things
if not len(w) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
found = any(warning.category is warning_class for warning in w)
if not found:
raise AssertionError("%s did not give warning: %s( is %s)"
% (func.__name__, warning_class, w))
return result
def assert_warns_message(warning_class, message, func, *args, **kw):
# very important to avoid uncontrolled state propagation
"""Test that a certain warning occurs and with a certain message.
Parameters
----------
warning_class : the warning class
The class to test for, e.g. UserWarning.
message : str | callable
The entire message or a substring to test for. If callable,
it takes a string as argument and will trigger an assertion error
if it returns `False`.
func : callable
Calable object to trigger warnings.
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`.
Returns
-------
result : the return value of `func`
"""
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
if hasattr(np, 'VisibleDeprecationWarning'):
# Let's not catch the numpy internal DeprecationWarnings
warnings.simplefilter('ignore', np.VisibleDeprecationWarning)
# Trigger a warning.
result = func(*args, **kw)
# Verify some things
if not len(w) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
found = [issubclass(warning.category, warning_class) for warning in w]
if not any(found):
raise AssertionError("No warning raised for %s with class "
"%s"
% (func.__name__, warning_class))
message_found = False
# Checks the message of all warnings belong to warning_class
for index in [i for i, x in enumerate(found) if x]:
# substring will match, the entire message with typo won't
msg = w[index].message # For Python 3 compatibility
msg = str(msg.args[0] if hasattr(msg, 'args') else msg)
if callable(message): # add support for certain tests
check_in_message = message
else:
check_in_message = lambda msg: message in msg
if check_in_message(msg):
message_found = True
break
if not message_found:
raise AssertionError("Did not receive the message you expected "
"('%s') for <%s>, got: '%s'"
% (message, func.__name__, msg))
return result
# To remove when we support numpy 1.7
def assert_no_warnings(func, *args, **kw):
# XXX: once we may depend on python >= 2.6, this can be replaced by the
# warnings module context manager.
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
result = func(*args, **kw)
if hasattr(np, 'VisibleDeprecationWarning'):
# Filter out numpy-specific warnings in numpy >= 1.9
w = [e for e in w
if e.category is not np.VisibleDeprecationWarning]
if len(w) > 0:
raise AssertionError("Got warnings when calling %s: %s"
% (func.__name__, w))
return result
def ignore_warnings(obj=None):
""" Context manager and decorator to ignore warnings
Note. Using this (in both variants) will clear all warnings
from all python modules loaded. In case you need to test
cross-module-warning-logging this is not your tool of choice.
Examples
--------
>>> with ignore_warnings():
... warnings.warn('buhuhuhu')
>>> def nasty_warn():
... warnings.warn('buhuhuhu')
... print(42)
>>> ignore_warnings(nasty_warn)()
42
"""
if callable(obj):
return _ignore_warnings(obj)
else:
return _IgnoreWarnings()
def _ignore_warnings(fn):
"""Decorator to catch and hide warnings without visual nesting"""
@wraps(fn)
def wrapper(*args, **kwargs):
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
return fn(*args, **kwargs)
w[:] = []
return wrapper
class _IgnoreWarnings(object):
"""Improved and simplified Python warnings context manager
Copied from Python 2.7.5 and modified as required.
"""
def __init__(self):
"""
Parameters
==========
category : warning class
The category to filter. Defaults to Warning. If None,
all categories will be muted.
"""
self._record = True
self._module = sys.modules['warnings']
self._entered = False
self.log = []
def __repr__(self):
args = []
if self._record:
args.append("record=True")
if self._module is not sys.modules['warnings']:
args.append("module=%r" % self._module)
name = type(self).__name__
return "%s(%s)" % (name, ", ".join(args))
def __enter__(self):
clean_warning_registry() # be safe and not propagate state + chaos
warnings.simplefilter('always')
if self._entered:
raise RuntimeError("Cannot enter %r twice" % self)
self._entered = True
self._filters = self._module.filters
self._module.filters = self._filters[:]
self._showwarning = self._module.showwarning
if self._record:
self.log = []
def showwarning(*args, **kwargs):
self.log.append(warnings.WarningMessage(*args, **kwargs))
self._module.showwarning = showwarning
return self.log
else:
return None
def __exit__(self, *exc_info):
if not self._entered:
raise RuntimeError("Cannot exit %r without entering first" % self)
self._module.filters = self._filters
self._module.showwarning = self._showwarning
self.log[:] = []
clean_warning_registry() # be safe and not propagate state + chaos
try:
from nose.tools import assert_less
except ImportError:
assert_less = _assert_less
try:
from nose.tools import assert_greater
except ImportError:
assert_greater = _assert_greater
def _assert_allclose(actual, desired, rtol=1e-7, atol=0,
err_msg='', verbose=True):
actual, desired = np.asanyarray(actual), np.asanyarray(desired)
if np.allclose(actual, desired, rtol=rtol, atol=atol):
return
msg = ('Array not equal to tolerance rtol=%g, atol=%g: '
'actual %s, desired %s') % (rtol, atol, actual, desired)
raise AssertionError(msg)
if hasattr(np.testing, 'assert_allclose'):
assert_allclose = np.testing.assert_allclose
else:
assert_allclose = _assert_allclose
def assert_raise_message(exceptions, message, function, *args, **kwargs):
"""Helper function to test error messages in exceptions
Parameters
----------
exceptions : exception or tuple of exception
Name of the estimator
func : callable
Calable object to raise error
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`
"""
try:
function(*args, **kwargs)
except exceptions as e:
error_message = str(e)
if message not in error_message:
raise AssertionError("Error message does not include the expected"
" string: %r. Observed error message: %r" %
(message, error_message))
else:
# concatenate exception names
if isinstance(exceptions, tuple):
names = " or ".join(e.__name__ for e in exceptions)
else:
names = exceptions.__name__
raise AssertionError("%s not raised by %s" %
(names, function.__name__))
def fake_mldata(columns_dict, dataname, matfile, ordering=None):
"""Create a fake mldata data set.
Parameters
----------
columns_dict : dict, keys=str, values=ndarray
Contains data as columns_dict[column_name] = array of data.
dataname : string
Name of data set.
matfile : string or file object
The file name string or the file-like object of the output file.
ordering : list, default None
List of column_names, determines the ordering in the data set.
Notes
-----
This function transposes all arrays, while fetch_mldata only transposes
'data', keep that into account in the tests.
"""
datasets = dict(columns_dict)
# transpose all variables
for name in datasets:
datasets[name] = datasets[name].T
if ordering is None:
ordering = sorted(list(datasets.keys()))
# NOTE: setting up this array is tricky, because of the way Matlab
# re-packages 1D arrays
datasets['mldata_descr_ordering'] = sp.empty((1, len(ordering)),
dtype='object')
for i, name in enumerate(ordering):
datasets['mldata_descr_ordering'][0, i] = name
scipy.io.savemat(matfile, datasets, oned_as='column')
class mock_mldata_urlopen(object):
def __init__(self, mock_datasets):
"""Object that mocks the urlopen function to fake requests to mldata.
`mock_datasets` is a dictionary of {dataset_name: data_dict}, or
{dataset_name: (data_dict, ordering).
`data_dict` itself is a dictionary of {column_name: data_array},
and `ordering` is a list of column_names to determine the ordering
in the data set (see `fake_mldata` for details).
When requesting a dataset with a name that is in mock_datasets,
this object creates a fake dataset in a StringIO object and
returns it. Otherwise, it raises an HTTPError.
"""
self.mock_datasets = mock_datasets
def __call__(self, urlname):
dataset_name = urlname.split('/')[-1]
if dataset_name in self.mock_datasets:
resource_name = '_' + dataset_name
from io import BytesIO
matfile = BytesIO()
dataset = self.mock_datasets[dataset_name]
ordering = None
if isinstance(dataset, tuple):
dataset, ordering = dataset
fake_mldata(dataset, resource_name, matfile, ordering)
matfile.seek(0)
return matfile
else:
raise HTTPError(urlname, 404, dataset_name + " is not available",
[], None)
def install_mldata_mock(mock_datasets):
# Lazy import to avoid mutually recursive imports
from sklearn import datasets
datasets.mldata.urlopen = mock_mldata_urlopen(mock_datasets)
def uninstall_mldata_mock():
# Lazy import to avoid mutually recursive imports
from sklearn import datasets
datasets.mldata.urlopen = urlopen
# Meta estimators need another estimator to be instantiated.
META_ESTIMATORS = ["OneVsOneClassifier",
"OutputCodeClassifier", "OneVsRestClassifier", "RFE",
"RFECV", "BaseEnsemble"]
# estimators that there is no way to default-construct sensibly
OTHER = ["Pipeline", "FeatureUnion", "GridSearchCV",
"RandomizedSearchCV"]
# some trange ones
DONT_TEST = ['SparseCoder', 'EllipticEnvelope', 'DictVectorizer',
'LabelBinarizer', 'LabelEncoder',
'MultiLabelBinarizer', 'TfidfTransformer',
'TfidfVectorizer', 'IsotonicRegression',
'OneHotEncoder', 'RandomTreesEmbedding',
'FeatureHasher', 'DummyClassifier', 'DummyRegressor',
'TruncatedSVD', 'PolynomialFeatures',
'GaussianRandomProjectionHash', 'HashingVectorizer',
'CheckingClassifier', 'PatchExtractor', 'CountVectorizer',
# GradientBoosting base estimators, maybe should
# exclude them in another way
'ZeroEstimator', 'ScaledLogOddsEstimator',
'QuantileEstimator', 'MeanEstimator',
'LogOddsEstimator', 'PriorProbabilityEstimator',
'_SigmoidCalibration', 'VotingClassifier']
def all_estimators(include_meta_estimators=False,
include_other=False, type_filter=None,
include_dont_test=False):
"""Get a list of all estimators from sklearn.
This function crawls the module and gets all classes that inherit
from BaseEstimator. Classes that are defined in test-modules are not
included.
By default meta_estimators such as GridSearchCV are also not included.
Parameters
----------
include_meta_estimators : boolean, default=False
Whether to include meta-estimators that can be constructed using
an estimator as their first argument. These are currently
BaseEnsemble, OneVsOneClassifier, OutputCodeClassifier,
OneVsRestClassifier, RFE, RFECV.
include_other : boolean, default=False
Wether to include meta-estimators that are somehow special and can
not be default-constructed sensibly. These are currently
Pipeline, FeatureUnion and GridSearchCV
include_dont_test : boolean, default=False
Whether to include "special" label estimator or test processors.
type_filter : string, list of string, or None, default=None
Which kind of estimators should be returned. If None, no filter is
applied and all estimators are returned. Possible values are
'classifier', 'regressor', 'cluster' and 'transformer' to get
estimators only of these specific types, or a list of these to
get the estimators that fit at least one of the types.
Returns
-------
estimators : list of tuples
List of (name, class), where ``name`` is the class name as string
and ``class`` is the actuall type of the class.
"""
def is_abstract(c):
if not(hasattr(c, '__abstractmethods__')):
return False
if not len(c.__abstractmethods__):
return False
return True
all_classes = []
# get parent folder
path = sklearn.__path__
for importer, modname, ispkg in pkgutil.walk_packages(
path=path, prefix='sklearn.', onerror=lambda x: None):
if ".tests." in modname:
continue
module = __import__(modname, fromlist="dummy")
classes = inspect.getmembers(module, inspect.isclass)
all_classes.extend(classes)
all_classes = set(all_classes)
estimators = [c for c in all_classes
if (issubclass(c[1], BaseEstimator)
and c[0] != 'BaseEstimator')]
# get rid of abstract base classes
estimators = [c for c in estimators if not is_abstract(c[1])]
if not include_dont_test:
estimators = [c for c in estimators if not c[0] in DONT_TEST]
if not include_other:
estimators = [c for c in estimators if not c[0] in OTHER]
# possibly get rid of meta estimators
if not include_meta_estimators:
estimators = [c for c in estimators if not c[0] in META_ESTIMATORS]
if type_filter is not None:
if not isinstance(type_filter, list):
type_filter = [type_filter]
else:
type_filter = list(type_filter) # copy
filtered_estimators = []
filters = {'classifier': ClassifierMixin,
'regressor': RegressorMixin,
'transformer': TransformerMixin,
'cluster': ClusterMixin}
for name, mixin in filters.items():
if name in type_filter:
type_filter.remove(name)
filtered_estimators.extend([est for est in estimators
if issubclass(est[1], mixin)])
estimators = filtered_estimators
if type_filter:
raise ValueError("Parameter type_filter must be 'classifier', "
"'regressor', 'transformer', 'cluster' or None, got"
" %s." % repr(type_filter))
# drop duplicates, sort for reproducibility
return sorted(set(estimators))
def set_random_state(estimator, random_state=0):
if "random_state" in estimator.get_params().keys():
estimator.set_params(random_state=random_state)
def if_matplotlib(func):
"""Test decorator that skips test if matplotlib not installed. """
@wraps(func)
def run_test(*args, **kwargs):
try:
import matplotlib
matplotlib.use('Agg', warn=False)
# this fails if no $DISPLAY specified
import matplotlib.pyplot as plt
plt.figure()
except ImportError:
raise SkipTest('Matplotlib not available.')
else:
return func(*args, **kwargs)
return run_test
def if_not_mac_os(versions=('10.7', '10.8', '10.9'),
message='Multi-process bug in Mac OS X >= 10.7 '
'(see issue #636)'):
"""Test decorator that skips test if OS is Mac OS X and its
major version is one of ``versions``.
"""
mac_version, _, _ = platform.mac_ver()
skip = '.'.join(mac_version.split('.')[:2]) in versions
def decorator(func):
if skip:
@wraps(func)
def func(*args, **kwargs):
raise SkipTest(message)
return func
return decorator
def clean_warning_registry():
"""Safe way to reset warnings """
warnings.resetwarnings()
reg = "__warningregistry__"
for mod_name, mod in list(sys.modules.items()):
if 'six.moves' in mod_name:
continue
if hasattr(mod, reg):
getattr(mod, reg).clear()
def check_skip_network():
if int(os.environ.get('SKLEARN_SKIP_NETWORK_TESTS', 0)):
raise SkipTest("Text tutorial requires large dataset download")
def check_skip_travis():
"""Skip test if being run on Travis."""
if os.environ.get('TRAVIS') == "true":
raise SkipTest("This test needs to be skipped on Travis")
def _delete_folder(folder_path, warn=False):
"""Utility function to cleanup a temporary folder if still existing.
Copy from joblib.pool (for independance)"""
try:
if os.path.exists(folder_path):
# This can fail under windows,
# but will succeed when called by atexit
shutil.rmtree(folder_path)
except WindowsError:
if warn:
warnings.warn("Could not delete temporary folder %s" % folder_path)
class TempMemmap(object):
def __init__(self, data, mmap_mode='r'):
self.temp_folder = tempfile.mkdtemp(prefix='sklearn_testing_')
self.mmap_mode = mmap_mode
self.data = data
def __enter__(self):
fpath = op.join(self.temp_folder, 'data.pkl')
joblib.dump(self.data, fpath)
data_read_only = joblib.load(fpath, mmap_mode=self.mmap_mode)
atexit.register(lambda: _delete_folder(self.temp_folder, warn=True))
return data_read_only
def __exit__(self, exc_type, exc_val, exc_tb):
_delete_folder(self.temp_folder)
with_network = with_setup(check_skip_network)
with_travis = with_setup(check_skip_travis)
| bsd-3-clause |
jeremiedecock/snippets | python/matplotlib/animation_fourier.py | 1 | 1042 | #!/usr/bin/env python3
import numpy as np
import math
import matplotlib.pyplot as plt
import matplotlib.animation as animation
fig = plt.figure()
ax = plt.axes(xlim=(-7, 7), ylim=(-5, 5))
line, = ax.plot([], [], "o-", lw=2)
ax.axvline(0, color="gray", lw=1)
ax.axhline(0, color="gray", lw=1)
def init():
line.set_data([], [])
return line,
def update(frame):
t = float(frame)/10.
e = math.e
z1 = 2. * e ** (1j * t)
z2 = z1 + 1. * e ** (1j * 2. * t)
z3 = z2 - 0.5 * e ** (1j * 3. * t)
x = [0, z1.real, z2.real, z3.real]
y = [0, z1.imag, z2.imag, z3.imag]
line.set_data(x, y)
return line,
ani = animation.FuncAnimation(fig, func=update, init_func=init, frames=62, interval=50, blit=True)
#ani.save('fourier.mp4')
ani.save('animation_fourier.gif', writer='imagemagick', fps=20)
## Set up formatting for the movie files
#Writer = animation.writers['ffmpeg']
#writer = Writer(fps=15, metadata=dict(artist='Me'), bitrate=1800)
#anim.save('fourier.mp4', writer=writer)
plt.show()
| mit |
valexandersaulys/airbnb_kaggle_contest | venv/lib/python3.4/site-packages/sklearn/metrics/tests/test_score_objects.py | 138 | 14048 | import pickle
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_not_equal
from sklearn.base import BaseEstimator
from sklearn.metrics import (f1_score, r2_score, roc_auc_score, fbeta_score,
log_loss, precision_score, recall_score)
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.metrics.scorer import (check_scoring, _PredictScorer,
_passthrough_scorer)
from sklearn.metrics import make_scorer, get_scorer, SCORERS
from sklearn.svm import LinearSVC
from sklearn.pipeline import make_pipeline
from sklearn.cluster import KMeans
from sklearn.dummy import DummyRegressor
from sklearn.linear_model import Ridge, LogisticRegression
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.datasets import make_blobs
from sklearn.datasets import make_classification
from sklearn.datasets import make_multilabel_classification
from sklearn.datasets import load_diabetes
from sklearn.cross_validation import train_test_split, cross_val_score
from sklearn.grid_search import GridSearchCV
from sklearn.multiclass import OneVsRestClassifier
REGRESSION_SCORERS = ['r2', 'mean_absolute_error', 'mean_squared_error',
'median_absolute_error']
CLF_SCORERS = ['accuracy', 'f1', 'f1_weighted', 'f1_macro', 'f1_micro',
'roc_auc', 'average_precision', 'precision',
'precision_weighted', 'precision_macro', 'precision_micro',
'recall', 'recall_weighted', 'recall_macro', 'recall_micro',
'log_loss',
'adjusted_rand_score' # not really, but works
]
MULTILABEL_ONLY_SCORERS = ['precision_samples', 'recall_samples', 'f1_samples']
class EstimatorWithoutFit(object):
"""Dummy estimator to test check_scoring"""
pass
class EstimatorWithFit(BaseEstimator):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
return self
class EstimatorWithFitAndScore(object):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
return self
def score(self, X, y):
return 1.0
class EstimatorWithFitAndPredict(object):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
self.y = y
return self
def predict(self, X):
return self.y
class DummyScorer(object):
"""Dummy scorer that always returns 1."""
def __call__(self, est, X, y):
return 1
def test_check_scoring():
# Test all branches of check_scoring
estimator = EstimatorWithoutFit()
pattern = (r"estimator should a be an estimator implementing 'fit' method,"
r" .* was passed")
assert_raises_regexp(TypeError, pattern, check_scoring, estimator)
estimator = EstimatorWithFitAndScore()
estimator.fit([[1]], [1])
scorer = check_scoring(estimator)
assert_true(scorer is _passthrough_scorer)
assert_almost_equal(scorer(estimator, [[1]], [1]), 1.0)
estimator = EstimatorWithFitAndPredict()
estimator.fit([[1]], [1])
pattern = (r"If no scoring is specified, the estimator passed should have"
r" a 'score' method\. The estimator .* does not\.")
assert_raises_regexp(TypeError, pattern, check_scoring, estimator)
scorer = check_scoring(estimator, "accuracy")
assert_almost_equal(scorer(estimator, [[1]], [1]), 1.0)
estimator = EstimatorWithFit()
scorer = check_scoring(estimator, "accuracy")
assert_true(isinstance(scorer, _PredictScorer))
estimator = EstimatorWithFit()
scorer = check_scoring(estimator, allow_none=True)
assert_true(scorer is None)
def test_check_scoring_gridsearchcv():
# test that check_scoring works on GridSearchCV and pipeline.
# slightly redundant non-regression test.
grid = GridSearchCV(LinearSVC(), param_grid={'C': [.1, 1]})
scorer = check_scoring(grid, "f1")
assert_true(isinstance(scorer, _PredictScorer))
pipe = make_pipeline(LinearSVC())
scorer = check_scoring(pipe, "f1")
assert_true(isinstance(scorer, _PredictScorer))
# check that cross_val_score definitely calls the scorer
# and doesn't make any assumptions about the estimator apart from having a
# fit.
scores = cross_val_score(EstimatorWithFit(), [[1], [2], [3]], [1, 0, 1],
scoring=DummyScorer())
assert_array_equal(scores, 1)
def test_make_scorer():
# Sanity check on the make_scorer factory function.
f = lambda *args: 0
assert_raises(ValueError, make_scorer, f, needs_threshold=True,
needs_proba=True)
def test_classification_scores():
# Test classification scorers.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LinearSVC(random_state=0)
clf.fit(X_train, y_train)
for prefix, metric in [('f1', f1_score), ('precision', precision_score),
('recall', recall_score)]:
score1 = get_scorer('%s_weighted' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='weighted')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s_macro' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='macro')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s_micro' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='micro')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=1)
assert_almost_equal(score1, score2)
# test fbeta score that takes an argument
scorer = make_scorer(fbeta_score, beta=2)
score1 = scorer(clf, X_test, y_test)
score2 = fbeta_score(y_test, clf.predict(X_test), beta=2)
assert_almost_equal(score1, score2)
# test that custom scorer can be pickled
unpickled_scorer = pickle.loads(pickle.dumps(scorer))
score3 = unpickled_scorer(clf, X_test, y_test)
assert_almost_equal(score1, score3)
# smoke test the repr:
repr(fbeta_score)
def test_regression_scorers():
# Test regression scorers.
diabetes = load_diabetes()
X, y = diabetes.data, diabetes.target
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = Ridge()
clf.fit(X_train, y_train)
score1 = get_scorer('r2')(clf, X_test, y_test)
score2 = r2_score(y_test, clf.predict(X_test))
assert_almost_equal(score1, score2)
def test_thresholded_scorers():
# Test scorers that take thresholds.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LogisticRegression(random_state=0)
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.decision_function(X_test))
score3 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1])
assert_almost_equal(score1, score2)
assert_almost_equal(score1, score3)
logscore = get_scorer('log_loss')(clf, X_test, y_test)
logloss = log_loss(y_test, clf.predict_proba(X_test))
assert_almost_equal(-logscore, logloss)
# same for an estimator without decision_function
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1])
assert_almost_equal(score1, score2)
# test with a regressor (no decision_function)
reg = DecisionTreeRegressor()
reg.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(reg, X_test, y_test)
score2 = roc_auc_score(y_test, reg.predict(X_test))
assert_almost_equal(score1, score2)
# Test that an exception is raised on more than two classes
X, y = make_blobs(random_state=0, centers=3)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf.fit(X_train, y_train)
assert_raises(ValueError, get_scorer('roc_auc'), clf, X_test, y_test)
def test_thresholded_scorers_multilabel_indicator_data():
# Test that the scorer work with multilabel-indicator format
# for multilabel and multi-output multi-class classifier
X, y = make_multilabel_classification(allow_unlabeled=False,
random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# Multi-output multi-class predict_proba
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
y_proba = clf.predict_proba(X_test)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, np.vstack(p[:, -1] for p in y_proba).T)
assert_almost_equal(score1, score2)
# Multi-output multi-class decision_function
# TODO Is there any yet?
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
clf._predict_proba = clf.predict_proba
clf.predict_proba = None
clf.decision_function = lambda X: [p[:, 1] for p in clf._predict_proba(X)]
y_proba = clf.decision_function(X_test)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, np.vstack(p for p in y_proba).T)
assert_almost_equal(score1, score2)
# Multilabel predict_proba
clf = OneVsRestClassifier(DecisionTreeClassifier())
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.predict_proba(X_test))
assert_almost_equal(score1, score2)
# Multilabel decision function
clf = OneVsRestClassifier(LinearSVC(random_state=0))
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.decision_function(X_test))
assert_almost_equal(score1, score2)
def test_unsupervised_scorers():
# Test clustering scorers against gold standard labeling.
# We don't have any real unsupervised Scorers yet.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
km = KMeans(n_clusters=3)
km.fit(X_train)
score1 = get_scorer('adjusted_rand_score')(km, X_test, y_test)
score2 = adjusted_rand_score(y_test, km.predict(X_test))
assert_almost_equal(score1, score2)
@ignore_warnings
def test_raises_on_score_list():
# Test that when a list of scores is returned, we raise proper errors.
X, y = make_blobs(random_state=0)
f1_scorer_no_average = make_scorer(f1_score, average=None)
clf = DecisionTreeClassifier()
assert_raises(ValueError, cross_val_score, clf, X, y,
scoring=f1_scorer_no_average)
grid_search = GridSearchCV(clf, scoring=f1_scorer_no_average,
param_grid={'max_depth': [1, 2]})
assert_raises(ValueError, grid_search.fit, X, y)
@ignore_warnings
def test_scorer_sample_weight():
# Test that scorers support sample_weight or raise sensible errors
# Unlike the metrics invariance test, in the scorer case it's harder
# to ensure that, on the classifier output, weighted and unweighted
# scores really should be unequal.
X, y = make_classification(random_state=0)
_, y_ml = make_multilabel_classification(n_samples=X.shape[0],
random_state=0)
split = train_test_split(X, y, y_ml, random_state=0)
X_train, X_test, y_train, y_test, y_ml_train, y_ml_test = split
sample_weight = np.ones_like(y_test)
sample_weight[:10] = 0
# get sensible estimators for each metric
sensible_regr = DummyRegressor(strategy='median')
sensible_regr.fit(X_train, y_train)
sensible_clf = DecisionTreeClassifier(random_state=0)
sensible_clf.fit(X_train, y_train)
sensible_ml_clf = DecisionTreeClassifier(random_state=0)
sensible_ml_clf.fit(X_train, y_ml_train)
estimator = dict([(name, sensible_regr)
for name in REGRESSION_SCORERS] +
[(name, sensible_clf)
for name in CLF_SCORERS] +
[(name, sensible_ml_clf)
for name in MULTILABEL_ONLY_SCORERS])
for name, scorer in SCORERS.items():
if name in MULTILABEL_ONLY_SCORERS:
target = y_ml_test
else:
target = y_test
try:
weighted = scorer(estimator[name], X_test, target,
sample_weight=sample_weight)
ignored = scorer(estimator[name], X_test[10:], target[10:])
unweighted = scorer(estimator[name], X_test, target)
assert_not_equal(weighted, unweighted,
msg="scorer {0} behaves identically when "
"called with sample weights: {1} vs "
"{2}".format(name, weighted, unweighted))
assert_almost_equal(weighted, ignored,
err_msg="scorer {0} behaves differently when "
"ignoring samples and setting sample_weight to"
" 0: {1} vs {2}".format(name, weighted,
ignored))
except TypeError as e:
assert_true("sample_weight" in str(e),
"scorer {0} raises unhelpful exception when called "
"with sample weights: {1}".format(name, str(e)))
| gpl-2.0 |
jgbarah/revisor | report.py | 1 | 45149 | #! /usr/bin/python
# -*- coding: utf-8 -*-
## Copyright (C) 2014 Bitergia
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
##
## Package to report about details from a Gerrit database retrieved via revisor
##
## Authors:
## Jesus M. Gonzalez-Barahona <[email protected]>
##
from query_gerrit import DB, Query
from sqlalchemy import func, Column, and_, desc, type_coerce, Float, or_
from sqlalchemy.sql import label
from datetime import datetime, timedelta
import argparse
import textwrap
from ggplot import *
import pandas as pd
import numpy as np
description = """
Simple script to produce reports with information extracted from a
revisor Gerrit-based database.
Example:
report.py --summary mysql://jgb:XXX@localhost/ gerrit_changes
"""
def parse_args ():
"""
Parse command line arguments
"""
parser = argparse.ArgumentParser(description = description)
parser.add_argument("database",
help = "SQLAlchemy url of the database " + \
"to read data from (schema not included)."
)
parser.add_argument("schema",
help = "Name of the schema " + \
"to read data from."
)
parser.add_argument("--projects",
help = "Name of projects to consider, " + \
"separated by comma."
)
parser.add_argument("--branches",
help = "Name of branches to consider, " + \
"separated by comma."
)
parser.add_argument("--owners",
help = "Name of change owners to consider, " + \
"separated by comma."
)
parser.add_argument("--no_owners",
help = "Name of change owners to filter out, " + \
"separated by comma."
)
parser.add_argument("--since",
help = "Only changes starting since date, " \
+ "in format 2014-10-29."
)
parser.add_argument("--until",
help = "Only changes starting until date, " \
+ "in format 2014-10-29."
)
parser.add_argument("--max_results",
help = "Maximum number of results"
)
parser.add_argument("--summary",
help = "Summary of main stats in the database.",
action = "store_true"
)
parser.add_argument("--summary_projects",
help = "Summary of main stats by projects.",
action = "store_true"
)
parser.add_argument("--full_messages",
help = "Show full messages wherever messages " + \
"are to be shown.",
action = "store_true"
)
parser.add_argument("--plot",
help = "Produce plots, when applicable.",
action = "store_true"
)
parser.add_argument("--plot_file",
help = "File name to plot to, when applicable.",
)
parser.add_argument("--period",
help = "Period length: day, week, month."
)
parser.add_argument("--change",
help = "Summary of a change, given change number"
)
parser.add_argument("--check_change_numbers",
help = "Check change numbers."
)
parser.add_argument("--check_upload",
help = "Check upload time of first revision with " + \
"created time for change. (time in mins.)"
)
parser.add_argument("--check_first_revision",
help = "Check that changes have a first revision."
)
parser.add_argument("--check_status",
help = "Check status of changes"
)
parser.add_argument("--check_abandon",
help = "Check that changes with an 'Abandoned' " \
+ "message are abandoned."
)
parser.add_argument("--check_abandon_cont",
help = "Check changes with an 'Abandoned' " \
+ "but continuing with activity."
)
parser.add_argument("--check_subm",
help = "Check Check that changes with 'SUBM' " + \
"approval are closed"
)
parser.add_argument("--check_events",
help = "Check that evolution of events matches " \
+ "current situation.",
action = "store_true"
)
parser.add_argument("--check_newer_dates",
help = "Check that all dates related to a " + \
"are newer than the creation date " + \
"(print at most n issues found)."
)
parser.add_argument("--show_drafts",
help = "Show revisins with isdraft == True, up to the number specified.",
)
parser.add_argument("--calc_duration_changes",
help = "Calculate duration of changes.",
)
parser.add_argument("--calc_duration_changes_approvals",
help = "Calculate duration of changes (using approvals).",
)
parser.add_argument("--show_events",
help = "Produce a list with all " \
+ "events of specified kind " \
+ "('start', 'submit', 'push', " \
+ "'abandon','restore','revert').",
)
parser.add_argument("--show_events_byperiod",
help = "Produce a list with number of " \
"events by period..",
)
parser.add_argument("--show_start_end",
help = "Show start and end for changes.",
action = "store_true"
)
args = parser.parse_args()
return args
def show_summary ():
"""Summary of main stats in the database.
Number of changes, messages, revisions, approvals.
"""
q = session.query(label ("changes",
func.count (DB.Change.id)))
print "Changes: " + str(q.scalar()) + " (",
q = session.query(
label ("status", DB.Change.status),
label ("number", func.count(DB.Change.uid))
) \
.group_by(DB.Change.status)
for row in q.all():
print row.status + ": " + str(row.number),
print ")"
q = session.query(label ("messages",
func.count (DB.Message.uid)))
print "Messages: " + str(q.scalar())
q = session.query(label ("revisions",
func.count (DB.Revision.uid)))
print "Revisions: " + str(q.scalar())
q = session.query(
label ("change", DB.Revision.uid),
) \
.join(DB.Change) \
.group_by(DB.Change.uid)
print "Changes with revisions: " + str(q.count())
q = session.query(label ("approvals",
func.count (DB.Approval.uid)))
print "Approvals: " + str(q.scalar())
q = session.query(label("max",
func.max(DB.Change.updated)))
last_date = q.one().max
print last_date
q = session.query(DB.Change).filter(DB.Change.updated == last_date)
last_change = q.one()
print "Last change: " + str(last_change.number)
print " Updated: " + str(last_change.updated)
def show_summary_projects ():
"""Summary of main stats by project (and name of projects).
"""
q = session.query (label ("project", DB.Change.project),
label ("changes", func.count(DB.Change.uid))
) \
.group_by(DB.Change.project) \
.order_by(desc("changes"))
for project, changes in q.all():
print project + ": " + str(changes)
def show_change_record (change):
"""Show a change record.
Parameters
----------
change: DB.Change
Change record to show.
"""
print "Change: " + str(change.number)
print "Project: " + change.project + " (branch: " + \
change.branch + ") " + change.url
print "Status: " + change.status + " / Created: " + \
str (change.created) + \
" Updated: " + str (change.updated)
print "Subject: " + change.subject
def show_revision_record (rev, approvals = True, change = None):
"""Show a revision record.
Parameters
----------
rev: DB.Revision
Revision record to show.
approvals: bool
Flag to show approvals (or not).
change: int
Change number (show if not None)
"""
print "Revision (patchset) " + str(rev.number) + " (" + \
rev.revision + ")",
if change:
print ", Change: " + str(change),
if rev.isdraft:
print " (rev is DRAFT)",
print
print " Date: " + str(rev.date)
if approvals:
res = session.query(DB.Approval) \
.filter(DB.Approval.revision_id == rev.uid) \
.order_by(DB.Approval.date)
for approval in res.all():
show_approval_record (approval)
def show_approval_record (approval):
"""Show an approval record.
Parameters
----------
approval: DB.Approval
Approval record to show.
"""
print " " + approval.type + ": " + str(approval.value)
print " Date: " + str(approval.date)
def show_message_record (message):
"""Show an message record.
Parameters
----------
message: DB.Message
Message record to show.
"""
print "Message: " + message.header
print " Date: " + str(message.date)
if args.full_messages:
print " Full text: \n" + \
"".join (
[" " + line for line in message.message.splitlines(True)]
)
def show_change (change_no):
"""Summary of data for a change (including revisions, approvals, etc.)
Parameters
----------
change_no: str
Change number.
"""
res = session.query(DB.Change) \
.filter (DB.Change.number == change_no)
change = res.one()
show_change_record (change)
res = session.query(DB.Revision) \
.filter (DB.Revision.change_id == change.uid) \
.order_by (DB.Revision.number)
for revision in res.all():
show_revision_record (revision)
res = session.query(DB.Message) \
.filter(DB.Message.change_id == change.uid) \
.order_by(DB.Message.date)
for message in res.all():
show_message_record (message)
def check_change_numbers(max):
"""Check change numbers.
"""
numbers = session.query(
label("number", DB.Change.number),
label("rep", func.count(DB.Change.number)),
) \
.group_by (DB.Change.number).subquery()
q = session.query(
label("number", numbers.c.number),
) \
.filter(numbers.c.rep > 1)
print "Repeated change numbers: " + str(q.count()) + " [",
for number in q.limit(max).all():
print number.number,
print "]"
def check_upload (diff):
"""Check upload time of first revision with created time for change.
For each change, the upload time of the first revision (patchset) is
matched against the created time for the change. Those changes with
more than diff mins. of difference are shown.
Parameters
----------
diff: int
Minutes of difference considered.
"""
revs = session.query(label ("daterev",
func.min(DB.Revision.date)),
label ("change_id",
DB.Revision.change_id),
label ("number",
DB.Change.number)) \
.filter (DB.Revision.change_id == DB.Change.uid) \
.group_by("change_id") \
.subquery()
res = session.query(
label ("number",
revs.c.number),
label ("created",
DB.Change.created),
label ("daterev",
revs.c.daterev)
) \
.filter(and_(
func.abs(func.timediff(
DB.Change.created,
revs.c.daterev) > timedelta (minutes = diff)),
DB.Change.uid == revs.c.change_id)) \
.order_by (func.datediff(DB.Change.created, revs.c.daterev),
func.timediff(DB.Change.created, revs.c.daterev))
messages = res.all()
for message in messages:
print "Change " + str(message.number) + ": " + \
str(message.created - message.daterev) + \
" -- " + str(message.created) + " (created), " + \
str(message.daterev) + " (first revision)"
print "Total changes with discrepancy: " + str (len(messages))
def check_newer_dates(max):
"""Check that dates related to a change are newer than creation date.
This will print sumary stats about dates that are not correct,
and will show at most max cases.
Parameters
----------
max: int
Max number of cases to show among those violating the check.
"""
res = session.query(
label ("number",
DB.Change.number),
label ("created",
DB.Change.created),
label ("updated",
DB.Change.updated)
) \
.filter (DB.Change.created > DB.Change.updated) \
.order_by (desc (func.datediff(DB.Change.created,
DB.Change.updated)))
cases = res.limit(max).all()
for case in cases:
print str(case.number) + ": " + str(case.created) + \
" (created), " + str(case.updated) + " (updated) Mismatch: " + \
str(case.created - case.updated) + ")"
print "Total number of mismatchs: " + str(res.count())
def check_first_revision(max):
"""Check that changes have a first revision.
Parameters
----------
max: int
Max number of cases to show among those violating the check.
"""
q = session.query(
label ("revision", DB.Revision.uid),
) \
.join (DB.Change) \
.filter (DB.Revision.number == 1) \
.group_by (DB.Change.uid)
print "Changes with first revision: " + str(q.count())
first = session.query(
label ("change", DB.Revision.change_id),
) \
.filter (DB.Revision.number == 1) \
.subquery()
q = session.query(
label ("change", DB.Change.number),
) \
.filter (~DB.Change.uid.in_(first))
for change in q.limit(max).all():
print change.change
print "Changes with no first revision: " + str(q.count())
def check_status(max):
"""Check status of changes.
Check the status of each change, in combination with its "open" flag.
Parameters
----------
max: int
Max number of cases to show among those violating the check.
"""
q = session.query(
label("num", func.count(DB.Change.uid)),
label("open", DB.Change.open),
label("status", DB.Change.status),
) \
.group_by (DB.Change.open, DB.Change.status)
for state in q.all():
print "Open is " + str(state.open) + ", status is " \
+ state.status + ": " + str(state.num)
def check_abandon(max):
"""Check that changes with an "Abandoned" message are abandoned.
Parameters
----------
max: int
Max number of cases to show among those violating the check.
"""
q = session.query(
label("num", DB.Change.number),
) \
.filter (DB.Change.status == "ABANDONED")
print q.count()
q = session.query(
label("num", DB.Change.number),
) \
.filter (DB.Change.status == "ABANDONED") \
.join (DB.Message) \
.filter (DB.Message.header == "Abandoned")
print q.count()
q_abandoned = session.query(DB.Message) \
.filter(DB.Change.uid == DB.Message.change_id,
or_ (DB.Message.header == "Abandoned",
DB.Message.header.like ("Patch%Abandoned")))
q = session.query(
label("num", DB.Change.number),
) \
.filter (DB.Change.status == "ABANDONED") \
.filter(~q_abandoned.exists())
print q.count()
for change in q.limit(max).all():
print str(change.num),
print
q = session.query(
label("num", DB.Change.number),
) \
.filter (DB.Change.status != "ABANDONED") \
.filter(q_abandoned.exists())
print q.count()
def check_abandon_cont(max):
"""Check changes with an "Abandoned" but continuing with activity.
Parameters
----------
max: int
Max number of cases to show among those violating the check.
"""
q_abandons = session.query(
label("id", DB.Change.uid),
label("date", func.min(DB.Message.date)),
label("num", DB.Change.number)
) \
.select_from(DB.Change) \
.join(DB.Message) \
.filter (or_ (DB.Message.header == "Abandoned",
DB.Message.header.like ("Patch%Abandoned"))) \
.group_by(DB.Change.uid) \
.subquery()
q = session.query(
label("num", q_abandons.c.num)
) \
.join(DB.Message,
DB.Message.change_id == q_abandons.c.id) \
.filter(DB.Message.date > q_abandons.c.date) \
.group_by(q_abandons.c.id)
changes = q.count()
print "Changes abandoned, with activity after abandon (" \
+ str(changes) + "): ",
for change in q.limit(max).all():
print change.num
print
def check_subm(max):
"""Check that changes with "SUBM" approval are closed.
Parameters
----------
max: int
Max number of cases to show among those violating the check.
"""
# Subquery for changes with at least one SUBM approval
q_subm = session.query(DB.Revision) \
.join(DB.Approval) \
.filter (DB.Change.uid == DB.Revision.change_id,
DB.Approval.type == "SUBM")
# Query for list of changes with at least one SUBM approval
q_changes_subm = session.query(
label ("num", DB.Change.number),
) \
.filter(q_subm.exists())
total = q_changes_subm.count()
print "Changes with at least a SUBM approval (" + str(total) + "):"
# Query for cases of changes with at least one SUBM approval
q_changes_subm_cases = session.query(
label ("open", DB.Change.open),
label ("num", func.count(DB.Change.uid)),
) \
.filter(q_subm.exists()) \
.group_by (DB.Change.open)
cases = q_changes_subm_cases.all()
for case in cases:
print " Open is " + str(case.open) + ": " + str(case.num)
if case.open == 1:
print " Changes still open (list): ",
cases = q_changes_subm.filter(DB.Change.open == 1).limit(max).all()
for case in cases:
print str(case.num) + " ",
print
# Query for list of changes with no SUBM approval
q_changes_nosubm = session.query(
label ("num", DB.Change.number),
) \
.filter(~q_subm.exists())
total = q_changes_nosubm.count()
print "Changes with no SUBM approval (" + str(total) + "):"
# Query for cases of changes with no SUBM approval
q_changes_nosubm_cases = session.query(
label ("open", DB.Change.open),
label ("num", func.count(DB.Change.uid)),
label ("status", DB.Change.status),
) \
.filter(~q_subm.exists()) \
.group_by (DB.Change.open)
cases = q_changes_nosubm_cases.all()
for case in cases:
print " Open is " + str(case.open) + ": " + str(case.num)
if case.open == 0:
# Closed changes, but no SUBM
cases_status = q_changes_nosubm_cases \
.filter(DB.Change.open == 0) \
.group_by (DB.Change.status).all()
for case_status in cases_status:
print " Status is " + case_status.status \
+ ": " + str(case_status.num)
if case_status.status == "MERGED":
# Closed & merged changes, but no SUBM
pushed = q_changes_nosubm \
.join(DB.Message) \
.filter(DB.Change.status == "MERGED") \
.filter(DB.Change.open == 0) \
.filter(DB.Message.header.like(
"Change has been successfully pushed%"
))
print " Changes merged by being pushed: " \
+ str(pushed.count())
# Other remaining changes
q_pushed = session.query(DB.Message) \
.filter(DB.Change.uid == DB.Message.change_id,
DB.Message.header.like(
"Change has been successfully pushed%"
))
not_pushed = q_changes_nosubm \
.filter(DB.Change.status == "MERGED") \
.filter(DB.Change.open == 0) \
.filter(~q_pushed.exists())
not_pushed_no = not_pushed.count()
print " Other changes (" + str(not_pushed_no) \
+ ", list): ",
changes = not_pushed.limit(max).all()
for change in changes:
print str(change.num),
print
def check_events (projects = None):
"""Check that evolution of events matches current situation.
Parameters
----------
projects: list of str
List of projects to consider. Default: None
"""
q = query_start (projects)
started = q.count()
print "Started: " + str(started)
q = query_submit (projects)
submitted = q.count()
print "Submitted: " + str(submitted)
q = query_in_header ("Pushed", "Change has been successfully pushed%",
projects)
pushed = q.count()
print "Pushed: " + str(pushed)
q = query_in_header ("Abandoned", "Patch%Abandoned", projects)
abandoned = q.count()
print "Abandoned: " + str(abandoned)
q = query_in_header ("Restored", "Patch%Restored", projects)
restored = q.count()
print "Restored: " + str(restored)
q = query_in_header ("Reverted", "Patch%Reverted", projects)
reverted = q.count()
print "Reverted: " + str(reverted)
res_merged = submitted + pushed
print "Resulting merged: " + str(res_merged)
res_abandoned = abandoned - restored
print "Resulting abandoned: " + str(res_abandoned)
print "Resulting new:" + str(
started - res_merged - res_abandoned)
def show_drafts(max):
"""Find revisins with isdraft == True up to the number specified.
Parameters
----------
max: int
Maximum number of isdraft revisions to print.
"""
res = session.query(DB.Revision,
label("change",
DB.Change.number)) \
.join(DB.Change) \
.filter (DB.Revision.isdraft == True)
for rev in res.limit(max).all():
show_revision_record(rev = rev.Revision, change = rev.change)
print "Total number of drafts: " + str(res.count())
def calc_duration_changes(max):
"""Calculate duration of changes (time from created to updated).
This will print sumary stats about the duration of the
changes in the review system, and will show some of them.
Parameters
----------
max: int
Max number of changes to show.
"""
res = session.query(
label ("number",
DB.Change.number),
label ("start",
DB.Change.created),
label ("finish",
DB.Change.updated),
) \
.filter (DB.Change.created < DB.Change.updated) \
.order_by (desc (func.datediff(DB.Change.updated,
DB.Change.created)))
cases = res.limit(max).all()
for case in cases:
print str(case.number) + ": " + str(case.start) + \
" (start), " + str(case.finish) + " (finish) Duration: " + \
str(case.finish - case.start)
def calc_duration_changes_approvals(max):
"""Calculate duration of changes using information about approvals.
This will print sumary stats about the duration of the
changes in the review system, and will show some of them.
A change is defined to start when the first upload for it is
found, and defined to end when the latest approval is found.
Parameters
----------
max: int
Max number of changes to show.
"""
starts = session.query(
label ("number",
DB.Change.number),
label ("date",
func.min (DB.Revision.date)),
) \
.filter (DB.Change.uid == DB.Revision.change_id) \
.group_by (DB.Change.uid) \
.subquery()
finishes = session.query(
label ("number",
DB.Change.number),
label ("date",
func.max (DB.Approval.date)),
) \
.filter (DB.Change.uid == DB.Revision.change_id,
DB.Revision.uid == DB.Approval.revision_id) \
.group_by (DB.Change.uid) \
.subquery()
query = session.query(
label ("number", starts.c.number),
label ("start", starts.c.date),
label ("finish", finishes.c.date),
) \
.filter (starts.c.number == finishes.c.number)
cases = query.limit(max).all()
for case in cases:
print str(case.number) + ": " + str(case.start) + \
" (start), " + str(case.finish) + " (finish) Duration: " + \
str(case.finish - case.start)
def plot_events_byperiod (byperiod, filename = None):
"""Plot a series of events.
Parameters
----------
byperiod: panda.timeseries
Events to plot, as a timeseries dataframe with three columns:
period (as starting date), event name, number of events.
filename: str
File name to plot to. (Default: None, means plot online).
"""
chart = ggplot (aes(x='date', y='change', color='event'),
data=byperiod) \
+ geom_line() \
+ labs("Date", "Number of events")
if filename is None:
print chart
else:
ggsave (filename, chart)
def query_create ():
"""Produce a query for selecting change create events.
The query will select "date" as the date for the event, and
"change" for the change number. The date is the "created"
field for the change.
Returns
-------
query_gerrit.query: produced query
"""
q = session.query(
label ("date", DB.Change.created),
label ("change", DB.Change.number),
)
return q
def query_start (changes = None):
"""Produce a query for selecting chnage start events.
The query will select "date" as the date for the event, and
"change" for the change number. The date is calculated as
the date of the first revision.
Parameters
----------
changes: list of int
List of change numbers to consider.
Returns
-------
query_gerrit.query: produced query
"""
q = session.query(
label ("date", func.min(DB.Revision.date)),
label ("change", DB.Change.number),
) \
.join(DB.Change)
q = q.group_by(DB.Change.uid)
if changes is not None:
q = q.filter(DB.Change.number.in_(changes))
return q
def query_submit ():
"""Produce a query for selecting submit (ready to merge) events.
The query will select "date" as the date for the event, and
"change" for the change number.
Returns
-------
query_gerrit.query: produced query
"""
q = session.query(
label ("date", func.max(DB.Approval.date)),
label ("change", DB.Change.number),
) \
.select_from(DB.Change) \
.join(DB.Revision) \
.join(DB.Approval) \
.filter (DB.Approval.type == "SUBM")
q = q.group_by(DB.Change.uid)
return q
def query_in_header (header, like_header,
unique = False):
"""Produce a query for selecting events by finding header in messages.
The query will select "date" as the date for the event, and
"change" for the change number.
Parameters
----------
header: str
String to find (exactly) in header of messages.
like_header: str
String to find (using like) in header of messages.
unique: bool
Consider only unique changes (count as one if a change has
several abandoned.
Returns
-------
query_gerrit.query: produced query
"""
if unique:
q = session.query(
label ("date", func.min(DB.Message.date)),
label ("change", DB.Change.number),
)
else:
q = session.query(
label ("date", DB.Message.date),
label ("change", DB.Change.number),
)
q = q.select_from(DB.Change) \
.join(DB.Message) \
.filter (or_ (DB.Message.header == header,
DB.Message.header.like (like_header)))
if unique:
q = q.group_by(DB.Change.uid)
return q
def query_revisions ():
"""Produce a query for selecting new revision events.
The query will select "date" in revision record as the date
for the event, and "change" for the change number.
"""
q = session.query(
label ("date", DB.Revision.date),
label ("change", DB.Change.number),
)
q = q.select_from(DB.Revision) \
.join(DB.Change)
return q
def get_events (kinds, max, projects = None, branches = None,
owners = None, no_owners = None,
since = None, until = None):
"""Get a dataframe with avents of kind kinds.
Parameters
----------
kinds: list of {"start", "submit", "push", "abandon", "restore", "revert"}
Kinds of events to be produced.
max: int
Max number of changes to consider (0 means "all").
projects: list of str
List of projects to consider. Default: None
branches: list of str
List of branches to consider. Default: None.
owners: list of str
List of owners to consider. Default: None.
no_owners: list of str
List of owners to filter out. Default: None.
since: datetime
Only changes starting later than since. Default: None.
until: datetime
Only changes starting before until. Default: None.
Returns
-------
pandas.dataframe: Events
Dataframe with columns "date" (datetime), "change"
(change number), "event" (str, kind of event).
"""
queries = {}
if "create" in kinds:
queries["create"] = query_create ()
if "start" in kinds:
queries["start"] = query_start ()
if "submit" in kinds:
queries["submit"] = query_submit ()
if "push" in kinds:
queries["push"] = query_in_header (
"Pushed",
"Change has been successfully pushed%")
if "abandon" in kinds:
queries["abandon"] = query_in_header ("Abandoned",
"Patch%Abandoned")
if "restore" in kinds:
queries["restore"] = query_in_header ("Restored",
"Patch%Restored")
if "revert" in kinds:
queries["revert"] = query_in_header ("Reverted",
"Patch%Reverted")
if "revision" in kinds:
queries["revision"] = query_revisions ()
event_list = []
for kind in queries:
# Add owners to query
if owners is not None:
q_owners = session.query (DB.People.uid) \
.filter (DB.People.username.in_(owners))
owner_ids = [id for (id, ) in q_owners.all()]
queries[kind] = queries[kind].filter (
DB.Change.owner_id.in_(owner_ids))
elif no_owners is not None:
q_no_owners = session.query (DB.People.uid) \
.filter (DB.People.username.in_(no_owners))
no_owner_ids = [id for (id, ) in q_no_owners.all()]
queries[kind] = queries[kind].filter (
~DB.Change.owner_id.in_(no_owner_ids))
# Add projects to query
if projects is not None:
queries[kind] = queries[kind].filter (
DB.Change.project.in_(projects))
# Add branches to query
if branches is not None:
queries[kind] = queries[kind].filter (
DB.Change.branch.in_(branches))
# Add limit to query, query, add kind column
if max != 0:
queries[kind] = queries[kind].limit(max)
for date, change in queries[kind]:
event_list.append( [date, change, kind] )
events_df = pd.DataFrame.from_records (
event_list,
columns = ["date", "change", "event"]
)
if (since is not None) or (until is not None):
# Get all start events
start_q = query_start(events_df["change"].unique())
start_df = pd.DataFrame.from_records (
start_q.all(),
columns = ["date", "change"]
)
if since is not None:
start_df = start_df[start_df["date"] >= since]
if until is not None:
start_df = start_df[start_df["date"] < until]
events_df = events_df[events_df["change"].isin(start_df["change"])]
# print events_df
return events_df
def get_events_byperiod (events_df, period = "month"):
"""Get a pandas timeseries with events per period.
Parameters
----------
events_df: pandas.dataframe
Events to group by period. It is a dataframe with the
following columns: "date" (datetime), "change"
(change number), "event" (str, kind of event).
period: { "day", "week", "month" }
Length of period (Default: "month").
Returns
-------
pandas.timeseries: Number of events per period
Pandas grouped object.
"""
if period == "month":
freq = 'M'
elif period == "day":
freq = 'D'
elif period == "week":
freq = 'W'
byperiod = events_df.set_index('date') \
.groupby([pd.TimeGrouper(freq=freq), "event"],
as_index=False) \
.aggregate(len)
return byperiod
def change_start_end (events):
"""Produce a (start, end) list for the events of a change.
Parameters
----------
events: pandas.DataFrame
Each row contains an event as "date" (datetime), "change"
(int, change number), and "event" (str, kind of event).
Returns
-------
datetime: start of the change.
datetime: end of the change (None if still not finished).
"""
events = events.sort("date")
# print "Events: \n", events
start = None
end = None
reason = None
for i, row in events.iterrows():
if start is None:
if row ["event"] == "start":
start = row ["date"]
elif row ["event"] in ["submit", "push", "abandon"]:
end = row ["date"]
reason = row ["event"]
break
if start is not None and end is not None:
diff = end - start
duration = (diff.seconds + diff.days * 86400 ) / 3600
else:
duration = None
return pd.DataFrame( {"start": [start], "end": [end],
"duration": [duration], "reason": reason})
def get_start_end (events_df):
"""Get a dataframe with start and end times per change.
The dataframe has fields "change" (change number), "start"
(datetime, start of the change), "end" (datetime, end of the change).
For "start", the "start" event will be considered (first upload).
For "end", the first "submit", "push" or "abandon" will be considered.
Parameters
----------
events_df: pandas.dateframe
Events for all changes, including at least "start", "submit",
"push" and "abandon". Columns are "change", "date", and "event".
Returns
-------
pandas.dateframe: start and end times (datetime) per change.
Columns of the dataframe: "change", "start", "end".
"""
bychange = events_df.groupby("change")
start_end = bychange.apply(change_start_end)
return start_end
def show_events (kinds, max, projects = None, branches = None,
no_owners = None, owners = None,
plot = False, plot_file = False):
"""Produce a list with events of kind kinds.
Parameters
----------
kinds: list of {"start", "submit", "push", "abandon", "restore", "revert"}
Kinds of events to be produced.
max: int
Max number of changes to consider (0 means "all").
projects: list of str
List of projects to consider. Default: None
branches: list of str
List of branches to consider. Default: None.
owners: list of str
List of owners to consider. Default: None.
no_owners: list of str
List of owners to filter out. Default: None.
plot: bool
Plot results in a chart (Default: False)
plot_file: str
File name to plot to (Default: None, means plot online)
"""
events_df = get_events (kinds, max, projects, branches, owners, no_owners)
print events_df
if plot:
plot_events_all(events_df, plot_file)
def show_events_byperiod (kinds, max, projects = None, branches = None,
owners = None, no_owners = None,
since = None, until = None,
plot = False, plot_file = None,
period = "month"):
"""Produce a list with number of events by period.
Parameters
----------
kinds: list of {"start", "submit", "push", "abandon", "restore", "revert"}
Kinds of events to be produced.
max: int
Max number of changes to consider (0 means "all").
projects: list of str
List of projects to consider. Default: None.
branches: list of str
List of branches to consider. Default: None.
owners: list of str
List of owners to consider. Default: None.
no_owners: list of str
List of owners to filter out. Default: None.
since: datetime
Only changes starting later than since. Default: None.
until: datetime
Only changes starting before until. Default: None.
plot: bool
Plot results in a chart (Default: False)
plot_file: str
File name to plot to (Default: None, means plot online).
period: { "day", "week", "month" }
Length of period (Default: "month").
"""
events_df = get_events (kinds, max, projects, branches, owners, no_owners, since = since, until = until)
byperiod = get_events_byperiod (events_df, period)
print byperiod
print "Total number of changes: " + str(byperiod.sum()["change"])
if plot:
plot_events_byperiod(byperiod, plot_file)
def show_start_end (max, projects = None, branches = None,
owners = None, no_owners = None,
since = None, until = None,
plot = False, plot_file = False):
"""Show start and end for changes.
Parameters
----------
max: int
Max number of changes to consider (0 means "all").
projects: list of str
List of projects to consider. Default: None.
branches: list of str
List of branches to consider. Default: None.
owners: list of str
List of owners to consider. Default: None.
no_owners: list of str
List of owners to filter out. Default: None.
since: datetime
Only changes starting later than since. Default: None.
until: datetime
Only changes starting before until. Default: None.
plot: bool
Plot results in a chart. Default: False.
plot_file: str
file name to plot to. Default: None, means plot online.
"""
events = get_events (["start", "submit", "push", "abandon"], max,
projects, branches, owners, no_owners,
since = since, until = until)
start_end = get_start_end (events)
print start_end
print start_end.describe()
if __name__ == "__main__":
from grimoirelib_alch.aux.standalone import stdout_utf8, print_banner
stdout_utf8()
args = parse_args()
database = DB (url = args.database,
schema = args.schema,
schema_id = args.schema)
session = database.build_session(Query, echo = False)
if args.summary:
show_summary()
if args.summary_projects:
show_summary_projects()
if args.change:
show_change(args.change)
if args.projects:
projects = args.projects.split (",")
else:
projects = None
if args.branches:
branches = args.branches.split (",")
else:
branches = None
if args.owners:
owners = args.owners.split (",")
else:
owners = None
if args.no_owners:
no_owners = args.no_owners.split (",")
else:
no_owners = None
if args.since:
since = datetime.strptime(args.since, "%Y-%m-%d")
print since
else:
since = None
if args.until:
until = datetime.strptime(args.until, "%Y-%m-%d")
else:
until = None
if args.max_results:
max_results = args.max_results
else:
max_results = 0
if args.plot:
plot = True
else:
plot = False
if args.plot_file:
plot_file = args.plot_file
else:
plot_file = None
if args.period:
period = args.period
else:
period = "month"
if args.check_change_numbers:
check_change_numbers(int(args.check_change_numbers))
if args.check_upload:
check_upload(int(args.check_upload))
if args.check_first_revision:
check_first_revision(int(args.check_first_revision))
if args.check_status:
check_status(int(args.check_status))
if args.check_abandon:
check_abandon(int(args.check_abandon))
if args.check_abandon_cont:
check_abandon_cont(int(args.check_abandon_cont))
if args.check_subm:
check_subm(int(args.check_subm))
if args.check_events:
check_events(projects)
if args.show_drafts:
show_drafts(args.show_drafts)
if args.check_newer_dates:
check_newer_dates(args.check_newer_dates)
if args.calc_duration_changes:
calc_duration_changes(args.calc_duration_changes)
if args.calc_duration_changes_approvals:
calc_duration_changes_approvals(
args.calc_duration_changes_approvals)
if args.show_events:
show_events(args.show_events,
max_results, projects, plot, plot_file)
if args.show_events_byperiod:
show_events_byperiod(args.show_events_byperiod,
max_results,
projects, branches, owners, no_owners,
since = since, until = until,
plot = plot, plot_file = plot_file,
period = period)
if args.show_start_end:
show_start_end(max_results,
projects, branches, owners, no_owners,
since = since, until = until,
plot = plot, plot_file = plot_file)
| gpl-3.0 |
RPGOne/scikit-learn | examples/decomposition/plot_kernel_pca.py | 353 | 2011 | """
==========
Kernel PCA
==========
This example shows that Kernel PCA is able to find a projection of the data
that makes data linearly separable.
"""
print(__doc__)
# Authors: Mathieu Blondel
# Andreas Mueller
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA, KernelPCA
from sklearn.datasets import make_circles
np.random.seed(0)
X, y = make_circles(n_samples=400, factor=.3, noise=.05)
kpca = KernelPCA(kernel="rbf", fit_inverse_transform=True, gamma=10)
X_kpca = kpca.fit_transform(X)
X_back = kpca.inverse_transform(X_kpca)
pca = PCA()
X_pca = pca.fit_transform(X)
# Plot results
plt.figure()
plt.subplot(2, 2, 1, aspect='equal')
plt.title("Original space")
reds = y == 0
blues = y == 1
plt.plot(X[reds, 0], X[reds, 1], "ro")
plt.plot(X[blues, 0], X[blues, 1], "bo")
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
X1, X2 = np.meshgrid(np.linspace(-1.5, 1.5, 50), np.linspace(-1.5, 1.5, 50))
X_grid = np.array([np.ravel(X1), np.ravel(X2)]).T
# projection on the first principal component (in the phi space)
Z_grid = kpca.transform(X_grid)[:, 0].reshape(X1.shape)
plt.contour(X1, X2, Z_grid, colors='grey', linewidths=1, origin='lower')
plt.subplot(2, 2, 2, aspect='equal')
plt.plot(X_pca[reds, 0], X_pca[reds, 1], "ro")
plt.plot(X_pca[blues, 0], X_pca[blues, 1], "bo")
plt.title("Projection by PCA")
plt.xlabel("1st principal component")
plt.ylabel("2nd component")
plt.subplot(2, 2, 3, aspect='equal')
plt.plot(X_kpca[reds, 0], X_kpca[reds, 1], "ro")
plt.plot(X_kpca[blues, 0], X_kpca[blues, 1], "bo")
plt.title("Projection by KPCA")
plt.xlabel("1st principal component in space induced by $\phi$")
plt.ylabel("2nd component")
plt.subplot(2, 2, 4, aspect='equal')
plt.plot(X_back[reds, 0], X_back[reds, 1], "ro")
plt.plot(X_back[blues, 0], X_back[blues, 1], "bo")
plt.title("Original space after inverse transform")
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
plt.subplots_adjust(0.02, 0.10, 0.98, 0.94, 0.04, 0.35)
plt.show()
| bsd-3-clause |
mitdbg/modeldb | client/verta/verta/_internal_utils/_pip_requirements_utils.py | 1 | 11105 | # -*- coding: utf-8 -*-
from __future__ import print_function
import importlib
import re
import subprocess
import sys
import warnings
import cloudpickle
from ..external import six
from .. import __about__
# for process_requirements()
PYPI_TO_IMPORT = {
'scikit-learn': "sklearn",
'tensorflow-gpu': "tensorflow",
'tensorflow-hub': "tensorflow_hub",
'beautifulsoup4': "bs4",
}
IMPORT_TO_PYPI = { # separate mapping because PyPI to import is surjective
'sklearn': "scikit-learn",
'tensorflow_hub': "tensorflow-hub",
'bs4': "beautifulsoup4",
}
PKG_NAME_PATTERN = r"([A-Z0-9][A-Z0-9._-]*[A-Z0-9]|[A-Z0-9])" # https://www.python.org/dev/peps/pep-0508/#names
VER_SPEC_PATTERN = r"(~=|==|!=|<=|>=|<|>|===)" # https://www.python.org/dev/peps/pep-0440/#version-specifiers
VER_NUM_PATTERN = r"([0-9]+(?:\.[0-9]+){0,2}[^\s]*)" # https://www.python.org/dev/peps/pep-0440/#version-scheme
REQ_SPEC_PATTERN = (
PKG_NAME_PATTERN + r"\s*"
+ VER_SPEC_PATTERN + r"\s*"
+ VER_NUM_PATTERN
)
SPACY_MODEL_PATTERN = r"[a-z]{2}(?:[_-][a-z]+){2}[_-](?:sm|md|lg)" # https://spacy.io/models#conventions
PKG_NAME_REGEX = re.compile(PKG_NAME_PATTERN, flags=re.IGNORECASE)
VER_SPEC_REGEX = re.compile(VER_SPEC_PATTERN)
VER_NUM_REGEX = re.compile(VER_NUM_PATTERN)
REQ_SPEC_REGEX = re.compile(REQ_SPEC_PATTERN, flags=re.IGNORECASE)
SPACY_MODEL_REGEX = re.compile(SPACY_MODEL_PATTERN)
def get_pip_freeze():
pip_freeze = subprocess.check_output([sys.executable, '-m', 'pip', 'freeze'])
pip_freeze = six.ensure_str(pip_freeze)
req_specs = pip_freeze.splitlines()
req_specs = clean_reqs_file_lines(req_specs)
return req_specs
def parse_req_spec(req_spec):
"""
Parses a requirement specifier into its components.
Parameters
----------
req_spec : str
e.g. "banana >= 3.6.0"
Returns
-------
library : str
e.g. "banana"
constraint : str
e.g. ">="
version : str
e.g. "3.6.0"
"""
match = REQ_SPEC_REGEX.match(req_spec)
if match is None:
raise ValueError("\"{}\" does not appear to be a valid pip requirement specifier;"
" it may be misspelled or missing its version specifier".format(req_spec))
return match.groups()
def parse_version(version):
"""
Parses a version number into its components.
A missing component will be returned as a ``0``.
Parameters
----------
version : str
e.g. "3.6"
Returns
-------
major : int
e.g. 3
minor : int
e.g. 6
patch : int
e.g. 0
suffix : str
Additional characters, such as build metadata or sub-patch release numbers.
"""
if VER_NUM_REGEX.match(version) is None:
raise ValueError("\"{}\" does not appear to be a valid version number".format(version))
MAJOR_REGEX = re.compile(r"^([0-9]+)")
MINOR_OR_PATCH_REGEX = re.compile(r"^(\.[0-9]+)")
# extract major version
split = MAJOR_REGEX.split(version, maxsplit=1)[1:] # first element is empty
major = int(split[0])
suffix = ''.join(split[1:])
# extract minor version
if MINOR_OR_PATCH_REGEX.match(suffix):
split = MINOR_OR_PATCH_REGEX.split(suffix, maxsplit=1)[1:] # first element is empty
minor = int(split[0][1:]) # first character is period
suffix = ''.join(split[1:])
else:
minor = 0
# extract patch version
if MINOR_OR_PATCH_REGEX.match(suffix):
split = MINOR_OR_PATCH_REGEX.split(suffix, maxsplit=1)[1:] # first element is empty
patch = int(split[0][1:]) # first character is period
suffix = ''.join(split[1:])
else:
patch = 0
return major, minor, patch, suffix
def process_requirements(requirements):
"""
Validates `requirements` against packages available in the current environment.
Parameters
----------
requirements : list of str
PyPI package names.
Raises
------
ValueError
If a package's name is invalid for PyPI, or its exact version cannot be determined.
"""
# validate package names
for req in requirements:
if not PKG_NAME_REGEX.match(req):
raise ValueError("'{}' does not appear to be a valid PyPI-installable package;"
" please check its spelling,"
" or file an issue if you believe it is in error".format(req))
strip_inexact_specifiers(requirements)
set_version_pins(requirements)
add_verta_and_cloudpickle(requirements)
def strip_inexact_specifiers(requirements):
"""
Removes any version specifier that is not ``==``, leaving just the package name.
Parameters
----------
requirements : list of str
Warns
-----
UserWarning
If a requirement specifier uses version specifier other than ``==``, to inform the user
that it will be replaced with an exact version pin.
"""
for i, req in enumerate(requirements):
_, pkg, ver_spec = PKG_NAME_REGEX.split(req, maxsplit=1)
if not ver_spec:
continue
elif '==' in ver_spec:
continue
else:
msg = ("'{}' does not use '=='; for reproducibility in deployment, it will be replaced"
" with an exact pin of the currently-installed version".format(req))
warnings.warn(msg)
requirements[i] = pkg
def set_version_pins(requirements):
"""
Sets version pins for packages in `requirements`.
Parameters
----------
requirements : list of str
Notes
-----
This function attempts an import of each package and checks its version using the module's
``__version__`` attribute. This can lead to problems if the package is not importable (e.g.
PyPI name is different from its package module name) or if it does not have supply
``__version__``.
This approach is taken because Python package management is complete anarchy, and the Client
can't determine whether the environment is using pip or conda in order to check the installed
version directly from the environment.
"""
# map of packages to their versions according to pip
pip_pkg_vers = dict(
req_spec.split('==')
for req_spec
in six.ensure_str(subprocess.check_output([sys.executable, '-m', 'pip', 'freeze'])).splitlines()
if '==' in req_spec
)
# replace importable module names with PyPI package names in case of user error
for i, req in enumerate(requirements):
requirements[i] = IMPORT_TO_PYPI.get(req, req)
for i, req in enumerate(requirements):
error = ValueError("unable to determine a version number for requirement '{}';"
" it might not be installed;"
" please manually specify it as '{}==x.y.z'".format(req, req))
if VER_SPEC_REGEX.search(req) is None:
# obtain package version
try:
mod_name = PYPI_TO_IMPORT.get(req, req)
mod = importlib.import_module(mod_name)
ver = mod.__version__
except (ImportError, AttributeError):
# fall back to checking pip
try:
ver = pip_pkg_vers[req]
except KeyError:
six.raise_from(error, None)
requirements[i] = req + "==" + ver
def add_verta_and_cloudpickle(requirements):
"""
Adds verta and cloudpickle to `requirements`, pinning their versions from the environment.
verta and cloudpickle are required for deployment, but a user might not have specified them in
their manual deployment requirements.
Parameters
----------
requirements : list of str
Raises
------
ValueError
If verta or cloudpickle already have a version pin specified in `requirements`, but it
conflicts with the version in the current environment.
"""
# add verta
verta_req = "verta=={}".format(__about__.__version__)
for req in requirements:
if req.startswith("verta"): # if present, check version
our_ver = verta_req.split('==')[-1]
their_ver = req.split('==')[-1]
if our_ver != their_ver: # versions conflict, so raise exception
raise ValueError("Client is running with verta v{}, but the provided requirements specify v{};"
" these must match".format(our_ver, their_ver))
else: # versions match, so proceed
break
else: # if not present, add
requirements.append(verta_req)
# add cloudpickle
cloudpickle_req = "cloudpickle=={}".format(cloudpickle.__version__)
for req in requirements:
if req.startswith("cloudpickle"): # if present, check version
our_ver = cloudpickle_req.split('==')[-1]
their_ver = req.split('==')[-1]
if our_ver != their_ver: # versions conflict, so raise exception
raise ValueError("Client is running with cloudpickle v{}, but the provided requirements specify v{};"
" these must match".format(our_ver, their_ver))
else: # versions match, so proceed
break
else: # if not present, add
requirements.append(cloudpickle_req)
def clean_reqs_file_lines(requirements):
"""
Performs basic preprocessing on a requirements file's lines so it's easier to handle downstream.
Parameters
----------
requirements : list of str
``requirements_file.readlines()``.
Returns
-------
cleaned_requirements : list of str
Requirement specifiers.
"""
requirements = [req.strip() for req in requirements]
requirements = [req for req in requirements if req] # empty line
requirements = [req for req in requirements if not req.startswith('#')] # comment line
# remove unsupported options
supported_requirements = []
for req in requirements:
# https://pip.pypa.io/en/stable/reference/pip_install/#requirements-file-format
if req.startswith(('--', '-c ', '-f ', '-i ')):
print("skipping unsupported option \"{}\"".format(req))
continue
# https://pip.pypa.io/en/stable/reference/pip_install/#vcs-support
# TODO: upgrade protos and Client to handle VCS-installed packages
if req.startswith(('-e ', 'git:', 'git+', 'hg+', 'svn+', 'bzr+')):
print("skipping unsupported VCS-installed package \"{}\"".format(req))
continue
# TODO: follow references to other requirements files
if req.startswith('-r '):
print("skipping unsupported file reference \"{}\"".format(req))
continue
# non-PyPI-installable spaCy models
if SPACY_MODEL_REGEX.match(req):
print("skipping non-PyPI-installable spaCy model \"{}\"".format(req))
continue
supported_requirements.append(req)
return supported_requirements
| mit |
alexrudnick/chipa | src/learn.py | 1 | 2747 | #!/usr/bin/env python3
import argparse
import readline
import functools
from nltk.classify.scikitlearn import SklearnClassifier
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import Pipeline
import nltk
from nltk.probability import FreqDist
from nltk.probability import ConditionalFreqDist
from nltk.probability import ConditionalProbDist
from nltk.probability import ELEProbDist
import features
from constants import UNTRANSLATED
from constants import OOV
DEBUG=False
def pause():
if DEBUG: input("ENTER TO CONTINUE")
def cpd(cfd):
"""Take a ConditionalFreqDist and turn it into a ConditionalProdDist"""
return ConditionalProbDist(cfd, ELEProbDist)
def reverse_cfd(cfd):
"""Given a ConditionalFreqDist, reverse the conditions and the samples!!"""
out = ConditionalFreqDist()
for condition in cfd.conditions():
for sample in cfd[condition].samples():
out[sample].inc(condition, cfd[condition][sample])
return out
@functools.lru_cache(maxsize=100000)
def mfs_for(word):
fd = nltk.probability.FreqDist()
labeled_featuresets = trainingdata_for(word)
for (f,label) in labeled_featuresets:
fd[label] += 1
return fd.max()
@functools.lru_cache(maxsize=100000)
def mfs_translation(word):
"""Return the MFS for the given word, but require that it's not the
untranslated token unless that's all we've seen."""
fd = nltk.probability.FreqDist()
labeled_featuresets = trainingdata_for(word)
for (f,label) in labeled_featuresets:
if label == UNTRANSLATED: continue
fd[label] += 1
mostcommon = fd.most_common()
if not mostcommon:
return OOV
return mostcommon[0][0]
class MFSClassifier(nltk.classify.ClassifierI):
def __init__(self):
self.fd = nltk.probability.FreqDist()
def train(self, labeled_featuresets):
self.fd.clear()
for (f,label) in labeled_featuresets:
self.fd[label] += 1
def classify(self, featureset):
return self.fd.max()
def prob_classify(self, featureset):
return nltk.probability.DictionaryProbDist({self.fd.max(): 1.0})
class OOVClassifier(nltk.classify.ClassifierI):
def __init__(self):
pass
def train(self, labeled_featuresets):
pass
def classify(self, featureset):
return OOV
def prob_classify(self, featureset):
return nltk.probability.DictionaryProbDist({OOV: 1.0})
@functools.lru_cache(maxsize=100000)
def distribution_for(word):
fd = nltk.probability.FreqDist()
labeled_featuresets = trainingdata_for(word)
for (f,label) in labeled_featuresets:
fd[label] += 1
return fd
| gpl-3.0 |
MTgeophysics/mtpy | legacy/create_modem_input.py | 1 | 6364 | # -*- coding: utf-8 -*-
"""
Create modem input files:
This script includes topography in the model. To not include topography,
set number of air layers to zero (recommended) or comment out add_topography
line. Note: setting number of air layers to zero will add bathymetry but not
topography.
USAGE examples:
python examples/create_modem_input.py tests/data/edifiles/ examples/etopo1.asc /e/tmp/modem_test
python examples/create_modem_input.py /e/Data/MT_Datasets/WenPingJiang_EDI /e/Data/MT_Datasets/concurry_topo/AussieContinent_etopo1.asc
/e/tmp/WenPingTest
python examples/create_modem_input.py /e/Data/MT_Datasets/concurry_EDI_files/ /e/Data/MT_Datasets/concurry_topo/AussieContinent_etopo1.asc
/e/tmp/Concurry
Developed by
[email protected]
[email protected]
Create Date: 2017-02-01
"""
from __future__ import print_function
import glob
import os
import sys
import matplotlib.pyplot as plt
import numpy as np
from mtpy.modeling.modem import Covariance, Data, Model
from mtpy.core.edi_collection import EdiCollection
# YG: patch that changes the matplotlib behaviour
# plt.ion() # enable interactive
# plt.ioff() # disable interactive, which will also disable this patch
def show_patcher(show_func):
"""
patch the plt.show() if interactive is enabled to display and then close the plot after 1 second
so plt.show() will not block the script and the figure is still visible to the user
:param show_func:
:return:
"""
def new_show_func(*args, **kwargs):
stuff = show_func(*args, **kwargs)
# wait 1 second for the image to show on screen
figManager = plt.gcf()
if figManager is not None:
canvas = figManager.canvas
# if canvas.figure.stale:
# canvas.draw()
# show(block=False)
canvas.start_event_loop(1) # wait time = 1
plt.close()
return stuff
return new_show_func if plt.isinteractive() else show_func
# plt.show = show_patcher(plt.show)
# end of patch
if __name__ == '__main__':
if len(sys.argv) < 4:
print("USAGE: %s path2edifiles path2topo.asc path2outdir" %
sys.argv[0])
sys.exit(1)
else:
edipath = sys.argv[1] # edi files to be inversioned
topofile = sys.argv[2] # topography file, if using
outputdir = sys.argv[3] # path to save to
if not os.path.exists(outputdir):
os.mkdir(outputdir)
# epsg to project to. Google epsg 'your projection'
epsg_code = 28354
epsg_code = 3112
edi_list = glob.glob(edipath + '/*.edi')
if edi_list is None or (edi_list) < 1:
print("Error: No edi files found in the dir %s" % edipath)
sys.exit(2)
# period list (can take periods from one of the edi files, or just specify
# periods directly using the logspace function (commented out))
# eo = mtedi.Edi(edi_list[0]) # this may miss some periods?
# period_list = 1. / eo.Z.freq # period_list = np.logspace(-3,3)
print ("edi_list = {}".format(edi_list))
period_list = EdiCollection(edi_list).select_periods()
datob = Data(edi_list=edi_list,
inv_mode='1',
period_list=period_list,
epsg=epsg_code,
error_type='floor',
error_floor=10)
# period_buffer=0.000001)
datob.write_data_file(save_path=outputdir)
# create mesh grid model object
# model = Model(Data=datob,
model = Model(station_object=datob.station_locations,
epsg=epsg_code, # epsg
# cell_size_east=500, cell_size_north=500, # concurry
cell_size_east=10000, cell_size_north=10000, #GA_VIC
# cell_size_east=1000, cell_size_north=1000, # Concurry
cell_number_ew=120, cell_number_ns=100, # option to specify cell numbers
pad_north=8, # number of padding cells in each of the north and south directions
pad_east=8, # number of east and west padding cells
pad_z=8, # number of vertical padding cells
pad_stretch_v=1.5, # factor to increase by in padding cells (vertical)
pad_stretch_h=1.5, # factor to increase by in padding cells (horizontal)
n_airlayers=10, # number of air layers 0, 10, 20, depend on topo elev height
res_model=100, # halfspace resistivity value for initial reference model
n_layers=55, # total number of z layers, including air and pad_z
z1_layer=50, # first layer thickness metres, depend
z_target_depth=500000)
model.make_mesh() # the data file will be re-write in this method. No topo elev file used yet
model.plot_mesh()
model.plot_mesh_xy()
model.plot_mesh_xz()
# write a model file and initialise a resistivity model
model.write_model_file(save_path=outputdir)
#=========== now add topo data, with or without air layers?
# 1) the data file will be changed in 3 columns sxi, syi and szi meters
# 2) The covariance file will be written.
# 3) the model file not changed?? No air layers can be seen in the .ws file.
# add topography, define an initial resistivity model, modify and re-write the data file, define covariance mask
# dat file will be changed and rewritten,
# grid centre is used as the new origin of coordinate system, topo data used in the elev column.
# model.add_topography(topofile, interp_method='nearest') # dat file will be written again as elevation updated
model.add_topography_2mesh(topofile, interp_method='nearest') # dat file will be written again as elevation updated
model.plot_topograph() # plot the MT stations on topography elevation data
print("*** Re-writing model file after topo data and air layers are added - will include air sea-water resistivity")
model.write_model_file(save_path=model.save_path)
# model.write_model_file(save_path='temp/')
# make covariance (mask) file
cov = Covariance(mask_arr=model.covariance_mask,
save_path=outputdir,
smoothing_east=0.3,
smoothing_north=0.3,
smoothing_z=0.3)
cov.write_covariance_file(model_fn=model.model_fn)
| gpl-3.0 |
zihua/scikit-learn | sklearn/metrics/tests/test_score_objects.py | 15 | 17443 | import pickle
import tempfile
import shutil
import os
import numbers
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.base import BaseEstimator
from sklearn.metrics import (f1_score, r2_score, roc_auc_score, fbeta_score,
log_loss, precision_score, recall_score)
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.metrics.scorer import (check_scoring, _PredictScorer,
_passthrough_scorer)
from sklearn.metrics import make_scorer, get_scorer, SCORERS
from sklearn.svm import LinearSVC
from sklearn.pipeline import make_pipeline
from sklearn.cluster import KMeans
from sklearn.dummy import DummyRegressor
from sklearn.linear_model import Ridge, LogisticRegression
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.datasets import make_blobs
from sklearn.datasets import make_classification
from sklearn.datasets import make_multilabel_classification
from sklearn.datasets import load_diabetes
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.model_selection import GridSearchCV
from sklearn.multiclass import OneVsRestClassifier
from sklearn.externals import joblib
REGRESSION_SCORERS = ['r2', 'neg_mean_absolute_error',
'neg_mean_squared_error', 'neg_median_absolute_error',
'mean_absolute_error',
'mean_squared_error', 'median_absolute_error']
CLF_SCORERS = ['accuracy', 'f1', 'f1_weighted', 'f1_macro', 'f1_micro',
'roc_auc', 'average_precision', 'precision',
'precision_weighted', 'precision_macro', 'precision_micro',
'recall', 'recall_weighted', 'recall_macro', 'recall_micro',
'neg_log_loss', 'log_loss',
'adjusted_rand_score' # not really, but works
]
MULTILABEL_ONLY_SCORERS = ['precision_samples', 'recall_samples', 'f1_samples']
def _make_estimators(X_train, y_train, y_ml_train):
# Make estimators that make sense to test various scoring methods
sensible_regr = DummyRegressor(strategy='median')
sensible_regr.fit(X_train, y_train)
sensible_clf = DecisionTreeClassifier(random_state=0)
sensible_clf.fit(X_train, y_train)
sensible_ml_clf = DecisionTreeClassifier(random_state=0)
sensible_ml_clf.fit(X_train, y_ml_train)
return dict(
[(name, sensible_regr) for name in REGRESSION_SCORERS] +
[(name, sensible_clf) for name in CLF_SCORERS] +
[(name, sensible_ml_clf) for name in MULTILABEL_ONLY_SCORERS]
)
X_mm, y_mm, y_ml_mm = None, None, None
ESTIMATORS = None
TEMP_FOLDER = None
def setup_module():
# Create some memory mapped data
global X_mm, y_mm, y_ml_mm, TEMP_FOLDER, ESTIMATORS
TEMP_FOLDER = tempfile.mkdtemp(prefix='sklearn_test_score_objects_')
X, y = make_classification(n_samples=30, n_features=5, random_state=0)
_, y_ml = make_multilabel_classification(n_samples=X.shape[0],
random_state=0)
filename = os.path.join(TEMP_FOLDER, 'test_data.pkl')
joblib.dump((X, y, y_ml), filename)
X_mm, y_mm, y_ml_mm = joblib.load(filename, mmap_mode='r')
ESTIMATORS = _make_estimators(X_mm, y_mm, y_ml_mm)
def teardown_module():
global X_mm, y_mm, y_ml_mm, TEMP_FOLDER, ESTIMATORS
# GC closes the mmap file descriptors
X_mm, y_mm, y_ml_mm, ESTIMATORS = None, None, None, None
shutil.rmtree(TEMP_FOLDER)
class EstimatorWithoutFit(object):
"""Dummy estimator to test check_scoring"""
pass
class EstimatorWithFit(BaseEstimator):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
return self
class EstimatorWithFitAndScore(object):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
return self
def score(self, X, y):
return 1.0
class EstimatorWithFitAndPredict(object):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
self.y = y
return self
def predict(self, X):
return self.y
class DummyScorer(object):
"""Dummy scorer that always returns 1."""
def __call__(self, est, X, y):
return 1
def test_all_scorers_repr():
# Test that all scorers have a working repr
for name, scorer in SCORERS.items():
repr(scorer)
def test_check_scoring():
# Test all branches of check_scoring
estimator = EstimatorWithoutFit()
pattern = (r"estimator should be an estimator implementing 'fit' method,"
r" .* was passed")
assert_raises_regexp(TypeError, pattern, check_scoring, estimator)
estimator = EstimatorWithFitAndScore()
estimator.fit([[1]], [1])
scorer = check_scoring(estimator)
assert_true(scorer is _passthrough_scorer)
assert_almost_equal(scorer(estimator, [[1]], [1]), 1.0)
estimator = EstimatorWithFitAndPredict()
estimator.fit([[1]], [1])
pattern = (r"If no scoring is specified, the estimator passed should have"
r" a 'score' method\. The estimator .* does not\.")
assert_raises_regexp(TypeError, pattern, check_scoring, estimator)
scorer = check_scoring(estimator, "accuracy")
assert_almost_equal(scorer(estimator, [[1]], [1]), 1.0)
estimator = EstimatorWithFit()
scorer = check_scoring(estimator, "accuracy")
assert_true(isinstance(scorer, _PredictScorer))
estimator = EstimatorWithFit()
scorer = check_scoring(estimator, allow_none=True)
assert_true(scorer is None)
def test_check_scoring_gridsearchcv():
# test that check_scoring works on GridSearchCV and pipeline.
# slightly redundant non-regression test.
grid = GridSearchCV(LinearSVC(), param_grid={'C': [.1, 1]})
scorer = check_scoring(grid, "f1")
assert_true(isinstance(scorer, _PredictScorer))
pipe = make_pipeline(LinearSVC())
scorer = check_scoring(pipe, "f1")
assert_true(isinstance(scorer, _PredictScorer))
# check that cross_val_score definitely calls the scorer
# and doesn't make any assumptions about the estimator apart from having a
# fit.
scores = cross_val_score(EstimatorWithFit(), [[1], [2], [3]], [1, 0, 1],
scoring=DummyScorer())
assert_array_equal(scores, 1)
def test_make_scorer():
# Sanity check on the make_scorer factory function.
f = lambda *args: 0
assert_raises(ValueError, make_scorer, f, needs_threshold=True,
needs_proba=True)
def test_classification_scores():
# Test classification scorers.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LinearSVC(random_state=0)
clf.fit(X_train, y_train)
for prefix, metric in [('f1', f1_score), ('precision', precision_score),
('recall', recall_score)]:
score1 = get_scorer('%s_weighted' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='weighted')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s_macro' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='macro')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s_micro' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='micro')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=1)
assert_almost_equal(score1, score2)
# test fbeta score that takes an argument
scorer = make_scorer(fbeta_score, beta=2)
score1 = scorer(clf, X_test, y_test)
score2 = fbeta_score(y_test, clf.predict(X_test), beta=2)
assert_almost_equal(score1, score2)
# test that custom scorer can be pickled
unpickled_scorer = pickle.loads(pickle.dumps(scorer))
score3 = unpickled_scorer(clf, X_test, y_test)
assert_almost_equal(score1, score3)
# smoke test the repr:
repr(fbeta_score)
def test_regression_scorers():
# Test regression scorers.
diabetes = load_diabetes()
X, y = diabetes.data, diabetes.target
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = Ridge()
clf.fit(X_train, y_train)
score1 = get_scorer('r2')(clf, X_test, y_test)
score2 = r2_score(y_test, clf.predict(X_test))
assert_almost_equal(score1, score2)
def test_thresholded_scorers():
# Test scorers that take thresholds.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LogisticRegression(random_state=0)
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.decision_function(X_test))
score3 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1])
assert_almost_equal(score1, score2)
assert_almost_equal(score1, score3)
logscore = get_scorer('neg_log_loss')(clf, X_test, y_test)
logloss = log_loss(y_test, clf.predict_proba(X_test))
assert_almost_equal(-logscore, logloss)
# same for an estimator without decision_function
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1])
assert_almost_equal(score1, score2)
# test with a regressor (no decision_function)
reg = DecisionTreeRegressor()
reg.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(reg, X_test, y_test)
score2 = roc_auc_score(y_test, reg.predict(X_test))
assert_almost_equal(score1, score2)
# Test that an exception is raised on more than two classes
X, y = make_blobs(random_state=0, centers=3)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf.fit(X_train, y_train)
assert_raises(ValueError, get_scorer('roc_auc'), clf, X_test, y_test)
def test_thresholded_scorers_multilabel_indicator_data():
# Test that the scorer work with multilabel-indicator format
# for multilabel and multi-output multi-class classifier
X, y = make_multilabel_classification(allow_unlabeled=False,
random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# Multi-output multi-class predict_proba
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
y_proba = clf.predict_proba(X_test)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, np.vstack(p[:, -1] for p in y_proba).T)
assert_almost_equal(score1, score2)
# Multi-output multi-class decision_function
# TODO Is there any yet?
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
clf._predict_proba = clf.predict_proba
clf.predict_proba = None
clf.decision_function = lambda X: [p[:, 1] for p in clf._predict_proba(X)]
y_proba = clf.decision_function(X_test)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, np.vstack(p for p in y_proba).T)
assert_almost_equal(score1, score2)
# Multilabel predict_proba
clf = OneVsRestClassifier(DecisionTreeClassifier())
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.predict_proba(X_test))
assert_almost_equal(score1, score2)
# Multilabel decision function
clf = OneVsRestClassifier(LinearSVC(random_state=0))
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.decision_function(X_test))
assert_almost_equal(score1, score2)
def test_unsupervised_scorers():
# Test clustering scorers against gold standard labeling.
# We don't have any real unsupervised Scorers yet.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
km = KMeans(n_clusters=3)
km.fit(X_train)
score1 = get_scorer('adjusted_rand_score')(km, X_test, y_test)
score2 = adjusted_rand_score(y_test, km.predict(X_test))
assert_almost_equal(score1, score2)
@ignore_warnings
def test_raises_on_score_list():
# Test that when a list of scores is returned, we raise proper errors.
X, y = make_blobs(random_state=0)
f1_scorer_no_average = make_scorer(f1_score, average=None)
clf = DecisionTreeClassifier()
assert_raises(ValueError, cross_val_score, clf, X, y,
scoring=f1_scorer_no_average)
grid_search = GridSearchCV(clf, scoring=f1_scorer_no_average,
param_grid={'max_depth': [1, 2]})
assert_raises(ValueError, grid_search.fit, X, y)
@ignore_warnings
def test_scorer_sample_weight():
# Test that scorers support sample_weight or raise sensible errors
# Unlike the metrics invariance test, in the scorer case it's harder
# to ensure that, on the classifier output, weighted and unweighted
# scores really should be unequal.
X, y = make_classification(random_state=0)
_, y_ml = make_multilabel_classification(n_samples=X.shape[0],
random_state=0)
split = train_test_split(X, y, y_ml, random_state=0)
X_train, X_test, y_train, y_test, y_ml_train, y_ml_test = split
sample_weight = np.ones_like(y_test)
sample_weight[:10] = 0
# get sensible estimators for each metric
estimator = _make_estimators(X_train, y_train, y_ml_train)
for name, scorer in SCORERS.items():
if name in MULTILABEL_ONLY_SCORERS:
target = y_ml_test
else:
target = y_test
try:
weighted = scorer(estimator[name], X_test, target,
sample_weight=sample_weight)
ignored = scorer(estimator[name], X_test[10:], target[10:])
unweighted = scorer(estimator[name], X_test, target)
assert_not_equal(weighted, unweighted,
msg="scorer {0} behaves identically when "
"called with sample weights: {1} vs "
"{2}".format(name, weighted, unweighted))
assert_almost_equal(weighted, ignored,
err_msg="scorer {0} behaves differently when "
"ignoring samples and setting sample_weight to"
" 0: {1} vs {2}".format(name, weighted,
ignored))
except TypeError as e:
assert_true("sample_weight" in str(e),
"scorer {0} raises unhelpful exception when called "
"with sample weights: {1}".format(name, str(e)))
@ignore_warnings # UndefinedMetricWarning for P / R scores
def check_scorer_memmap(scorer_name):
scorer, estimator = SCORERS[scorer_name], ESTIMATORS[scorer_name]
if scorer_name in MULTILABEL_ONLY_SCORERS:
score = scorer(estimator, X_mm, y_ml_mm)
else:
score = scorer(estimator, X_mm, y_mm)
assert isinstance(score, numbers.Number), scorer_name
def test_scorer_memmap_input():
# Non-regression test for #6147: some score functions would
# return singleton memmap when computed on memmap data instead of scalar
# float values.
for name in SCORERS.keys():
yield check_scorer_memmap, name
def test_deprecated_names():
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LogisticRegression(random_state=0)
clf.fit(X_train, y_train)
for name in ('mean_absolute_error', 'mean_squared_error',
'median_absolute_error', 'log_loss'):
warning_msg = "Scoring method %s was renamed to" % name
for scorer in (get_scorer(name), SCORERS[name]):
assert_warns_message(DeprecationWarning,
warning_msg,
scorer, clf, X, y)
assert_warns_message(DeprecationWarning,
warning_msg,
cross_val_score, clf, X, y, scoring=name)
def test_scoring_is_not_metric():
assert_raises_regexp(ValueError, 'make_scorer', check_scoring,
LogisticRegression(), f1_score)
assert_raises_regexp(ValueError, 'make_scorer', check_scoring,
LogisticRegression(), roc_auc_score)
assert_raises_regexp(ValueError, 'make_scorer', check_scoring,
Ridge(), r2_score)
assert_raises_regexp(ValueError, 'make_scorer', check_scoring,
KMeans(), adjusted_rand_score)
| bsd-3-clause |
tz3/ml_homework | source/bag_of_word/imutils.py | 1 | 1105 | #!/usr/local/bin/python2.7
import cv2
import matplotlib.pyplot as plt
import os
def imlist(path):
"""
The function imlist returns all the names of the files in
the directory path supplied as argument to the function.
"""
return [os.path.join(path, f) for f in os.listdir(path)]
def imshow(im_title, im):
""" This is function to display the image"""
plt.figure()
plt.title(im_title)
plt.axis("off")
if len(im.shape) == 2:
plt.imshow(im, cmap="gray")
else:
im_display = cv2.cvtColor(im, cv2.COLOR_RGB2BGR)
plt.imshow(im_display)
plt.show()
def imreads(path):
"""
This reads all the images in a given folder and returns the results
"""
images_path = imlist("/home/bikz05/Desktop/back_projection")
images = []
for image_path in images_path:
images.append(cv2.imread(image_path, cv2.CV_LOAD_IMAGE_COLOR))
return images
def show(image, name="Image"):
"""
Routine to display the image.
"""
cv2.namedWindow(name, cv2.WINDOW_NORMAL)
cv2.imshow(name, image)
cv2.waitKey(0)
| mit |
mortonjt/scipy | scipy/cluster/tests/test_hierarchy.py | 26 | 35159 | #! /usr/bin/env python
#
# Author: Damian Eads
# Date: April 17, 2008
#
# Copyright (C) 2008 Damian Eads
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import (TestCase, run_module_suite, dec, assert_raises,
assert_allclose, assert_equal, assert_)
from scipy._lib.six import xrange, u
import scipy.cluster.hierarchy
from scipy.cluster.hierarchy import (
linkage, from_mlab_linkage, to_mlab_linkage, num_obs_linkage, inconsistent,
cophenet, fclusterdata, fcluster, is_isomorphic, single, leaders,
correspond, is_monotonic, maxdists, maxinconsts, maxRstat,
is_valid_linkage, is_valid_im, to_tree, leaves_list, dendrogram,
set_link_color_palette)
from scipy.spatial.distance import pdist
import hierarchy_test_data
# Matplotlib is not a scipy dependency but is optionally used in dendrogram, so
# check if it's available
try:
import matplotlib
# and set the backend to be Agg (no gui)
matplotlib.use('Agg')
# before importing pyplot
import matplotlib.pyplot as plt
have_matplotlib = True
except:
have_matplotlib = False
class TestLinkage(object):
def test_linkage_empty_distance_matrix(self):
# Tests linkage(Y) where Y is a 0x4 linkage matrix. Exception expected.
y = np.zeros((0,))
assert_raises(ValueError, linkage, y)
def test_linkage_tdist(self):
for method in ['single', 'complete', 'average', 'weighted', u('single')]:
yield self.check_linkage_tdist, method
def check_linkage_tdist(self, method):
# Tests linkage(Y, method) on the tdist data set.
Z = linkage(hierarchy_test_data.ytdist, method)
expectedZ = getattr(hierarchy_test_data, 'linkage_ytdist_' + method)
assert_allclose(Z, expectedZ, atol=1e-10)
def test_linkage_X(self):
for method in ['centroid', 'median', 'ward']:
yield self.check_linkage_q, method
def check_linkage_q(self, method):
# Tests linkage(Y, method) on the Q data set.
Z = linkage(hierarchy_test_data.X, method)
expectedZ = getattr(hierarchy_test_data, 'linkage_X_' + method)
assert_allclose(Z, expectedZ, atol=1e-06)
class TestInconsistent(object):
def test_inconsistent_tdist(self):
for depth in hierarchy_test_data.inconsistent_ytdist:
yield self.check_inconsistent_tdist, depth
def check_inconsistent_tdist(self, depth):
Z = hierarchy_test_data.linkage_ytdist_single
assert_allclose(inconsistent(Z, depth),
hierarchy_test_data.inconsistent_ytdist[depth])
class TestCopheneticDistance(object):
def test_linkage_cophenet_tdist_Z(self):
# Tests cophenet(Z) on tdist data set.
expectedM = np.array([268, 295, 255, 255, 295, 295, 268, 268, 295, 295,
295, 138, 219, 295, 295])
Z = hierarchy_test_data.linkage_ytdist_single
M = cophenet(Z)
assert_allclose(M, expectedM, atol=1e-10)
def test_linkage_cophenet_tdist_Z_Y(self):
# Tests cophenet(Z, Y) on tdist data set.
Z = hierarchy_test_data.linkage_ytdist_single
(c, M) = cophenet(Z, hierarchy_test_data.ytdist)
expectedM = np.array([268, 295, 255, 255, 295, 295, 268, 268, 295, 295,
295, 138, 219, 295, 295])
expectedc = 0.639931296433393415057366837573
assert_allclose(c, expectedc, atol=1e-10)
assert_allclose(M, expectedM, atol=1e-10)
class TestMLabLinkageConversion(object):
def test_mlab_linkage_conversion_empty(self):
# Tests from/to_mlab_linkage on empty linkage array.
X = np.asarray([])
assert_equal(from_mlab_linkage([]), X)
assert_equal(to_mlab_linkage([]), X)
def test_mlab_linkage_conversion_single_row(self):
# Tests from/to_mlab_linkage on linkage array with single row.
Z = np.asarray([[0., 1., 3., 2.]])
Zm = [[1, 2, 3]]
assert_equal(from_mlab_linkage(Zm), Z)
assert_equal(to_mlab_linkage(Z), Zm)
def test_mlab_linkage_conversion_multiple_rows(self):
# Tests from/to_mlab_linkage on linkage array with multiple rows.
Zm = np.asarray([[3, 6, 138], [4, 5, 219],
[1, 8, 255], [2, 9, 268], [7, 10, 295]])
Z = np.array([[2., 5., 138., 2.],
[3., 4., 219., 2.],
[0., 7., 255., 3.],
[1., 8., 268., 4.],
[6., 9., 295., 6.]],
dtype=np.double)
assert_equal(from_mlab_linkage(Zm), Z)
assert_equal(to_mlab_linkage(Z), Zm)
class TestFcluster(object):
def test_fclusterdata(self):
for t in hierarchy_test_data.fcluster_inconsistent:
yield self.check_fclusterdata, t, 'inconsistent'
for t in hierarchy_test_data.fcluster_distance:
yield self.check_fclusterdata, t, 'distance'
for t in hierarchy_test_data.fcluster_maxclust:
yield self.check_fclusterdata, t, 'maxclust'
def check_fclusterdata(self, t, criterion):
# Tests fclusterdata(X, criterion=criterion, t=t) on a random 3-cluster data set.
expectedT = getattr(hierarchy_test_data, 'fcluster_' + criterion)[t]
X = hierarchy_test_data.Q_X
T = fclusterdata(X, criterion=criterion, t=t)
assert_(is_isomorphic(T, expectedT))
def test_fcluster(self):
for t in hierarchy_test_data.fcluster_inconsistent:
yield self.check_fcluster, t, 'inconsistent'
for t in hierarchy_test_data.fcluster_distance:
yield self.check_fcluster, t, 'distance'
for t in hierarchy_test_data.fcluster_maxclust:
yield self.check_fcluster, t, 'maxclust'
def check_fcluster(self, t, criterion):
# Tests fcluster(Z, criterion=criterion, t=t) on a random 3-cluster data set.
expectedT = getattr(hierarchy_test_data, 'fcluster_' + criterion)[t]
Z = single(hierarchy_test_data.Q_X)
T = fcluster(Z, criterion=criterion, t=t)
assert_(is_isomorphic(T, expectedT))
def test_fcluster_monocrit(self):
for t in hierarchy_test_data.fcluster_distance:
yield self.check_fcluster_monocrit, t
for t in hierarchy_test_data.fcluster_maxclust:
yield self.check_fcluster_maxclust_monocrit, t
def check_fcluster_monocrit(self, t):
expectedT = hierarchy_test_data.fcluster_distance[t]
Z = single(hierarchy_test_data.Q_X)
T = fcluster(Z, t, criterion='monocrit', monocrit=maxdists(Z))
assert_(is_isomorphic(T, expectedT))
def check_fcluster_maxclust_monocrit(self, t):
expectedT = hierarchy_test_data.fcluster_maxclust[t]
Z = single(hierarchy_test_data.Q_X)
T = fcluster(Z, t, criterion='maxclust_monocrit', monocrit=maxdists(Z))
assert_(is_isomorphic(T, expectedT))
class TestLeaders(object):
def test_leaders_single(self):
# Tests leaders using a flat clustering generated by single linkage.
X = hierarchy_test_data.Q_X
Y = pdist(X)
Z = linkage(Y)
T = fcluster(Z, criterion='maxclust', t=3)
Lright = (np.array([53, 55, 56]), np.array([2, 3, 1]))
L = leaders(Z, T)
assert_equal(L, Lright)
class TestIsIsomorphic(object):
def test_is_isomorphic_1(self):
# Tests is_isomorphic on test case #1 (one flat cluster, different labellings)
a = [1, 1, 1]
b = [2, 2, 2]
assert_(is_isomorphic(a, b))
assert_(is_isomorphic(b, a))
def test_is_isomorphic_2(self):
# Tests is_isomorphic on test case #2 (two flat clusters, different labelings)
a = [1, 7, 1]
b = [2, 3, 2]
assert_(is_isomorphic(a, b))
assert_(is_isomorphic(b, a))
def test_is_isomorphic_3(self):
# Tests is_isomorphic on test case #3 (no flat clusters)
a = []
b = []
assert_(is_isomorphic(a, b))
def test_is_isomorphic_4A(self):
# Tests is_isomorphic on test case #4A (3 flat clusters, different labelings, isomorphic)
a = [1, 2, 3]
b = [1, 3, 2]
assert_(is_isomorphic(a, b))
assert_(is_isomorphic(b, a))
def test_is_isomorphic_4B(self):
# Tests is_isomorphic on test case #4B (3 flat clusters, different labelings, nonisomorphic)
a = [1, 2, 3, 3]
b = [1, 3, 2, 3]
assert_(is_isomorphic(a, b) == False)
assert_(is_isomorphic(b, a) == False)
def test_is_isomorphic_4C(self):
# Tests is_isomorphic on test case #4C (3 flat clusters, different labelings, isomorphic)
a = [7, 2, 3]
b = [6, 3, 2]
assert_(is_isomorphic(a, b))
assert_(is_isomorphic(b, a))
def test_is_isomorphic_5(self):
# Tests is_isomorphic on test case #5 (1000 observations, 2/3/5 random
# clusters, random permutation of the labeling).
for nc in [2, 3, 5]:
yield self.help_is_isomorphic_randperm, 1000, nc
def test_is_isomorphic_6(self):
# Tests is_isomorphic on test case #5A (1000 observations, 2/3/5 random
# clusters, random permutation of the labeling, slightly
# nonisomorphic.)
for nc in [2, 3, 5]:
yield self.help_is_isomorphic_randperm, 1000, nc, True, 5
def help_is_isomorphic_randperm(self, nobs, nclusters, noniso=False, nerrors=0):
for k in range(3):
a = np.int_(np.random.rand(nobs) * nclusters)
b = np.zeros(a.size, dtype=np.int_)
P = np.random.permutation(nclusters)
for i in xrange(0, a.shape[0]):
b[i] = P[a[i]]
if noniso:
Q = np.random.permutation(nobs)
b[Q[0:nerrors]] += 1
b[Q[0:nerrors]] %= nclusters
assert_(is_isomorphic(a, b) == (not noniso))
assert_(is_isomorphic(b, a) == (not noniso))
class TestIsValidLinkage(object):
def test_is_valid_linkage_various_size(self):
for nrow, ncol, valid in [(2, 5, False), (2, 3, False),
(1, 4, True), (2, 4, True)]:
yield self.check_is_valid_linkage_various_size, nrow, ncol, valid
def check_is_valid_linkage_various_size(self, nrow, ncol, valid):
# Tests is_valid_linkage(Z) with linkage matrics of various sizes
Z = np.asarray([[0, 1, 3.0, 2, 5],
[3, 2, 4.0, 3, 3]], dtype=np.double)
Z = Z[:nrow, :ncol]
assert_(is_valid_linkage(Z) == valid)
if not valid:
assert_raises(ValueError, is_valid_linkage, Z, throw=True)
def test_is_valid_linkage_int_type(self):
# Tests is_valid_linkage(Z) with integer type.
Z = np.asarray([[0, 1, 3.0, 2],
[3, 2, 4.0, 3]], dtype=np.int)
assert_(is_valid_linkage(Z) == False)
assert_raises(TypeError, is_valid_linkage, Z, throw=True)
def test_is_valid_linkage_empty(self):
# Tests is_valid_linkage(Z) with empty linkage.
Z = np.zeros((0, 4), dtype=np.double)
assert_(is_valid_linkage(Z) == False)
assert_raises(ValueError, is_valid_linkage, Z, throw=True)
def test_is_valid_linkage_4_and_up(self):
# Tests is_valid_linkage(Z) on linkage on observation sets between
# sizes 4 and 15 (step size 3).
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
assert_(is_valid_linkage(Z) == True)
def test_is_valid_linkage_4_and_up_neg_index_left(self):
# Tests is_valid_linkage(Z) on linkage on observation sets between
# sizes 4 and 15 (step size 3) with negative indices (left).
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
Z[i//2,0] = -2
assert_(is_valid_linkage(Z) == False)
assert_raises(ValueError, is_valid_linkage, Z, throw=True)
def test_is_valid_linkage_4_and_up_neg_index_right(self):
# Tests is_valid_linkage(Z) on linkage on observation sets between
# sizes 4 and 15 (step size 3) with negative indices (right).
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
Z[i//2,1] = -2
assert_(is_valid_linkage(Z) == False)
assert_raises(ValueError, is_valid_linkage, Z, throw=True)
def test_is_valid_linkage_4_and_up_neg_dist(self):
# Tests is_valid_linkage(Z) on linkage on observation sets between
# sizes 4 and 15 (step size 3) with negative distances.
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
Z[i//2,2] = -0.5
assert_(is_valid_linkage(Z) == False)
assert_raises(ValueError, is_valid_linkage, Z, throw=True)
def test_is_valid_linkage_4_and_up_neg_counts(self):
# Tests is_valid_linkage(Z) on linkage on observation sets between
# sizes 4 and 15 (step size 3) with negative counts.
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
Z[i//2,3] = -2
assert_(is_valid_linkage(Z) == False)
assert_raises(ValueError, is_valid_linkage, Z, throw=True)
class TestIsValidInconsistent(object):
def test_is_valid_im_int_type(self):
# Tests is_valid_im(R) with integer type.
R = np.asarray([[0, 1, 3.0, 2],
[3, 2, 4.0, 3]], dtype=np.int)
assert_(is_valid_im(R) == False)
assert_raises(TypeError, is_valid_im, R, throw=True)
def test_is_valid_im_various_size(self):
for nrow, ncol, valid in [(2, 5, False), (2, 3, False),
(1, 4, True), (2, 4, True)]:
yield self.check_is_valid_im_various_size, nrow, ncol, valid
def check_is_valid_im_various_size(self, nrow, ncol, valid):
# Tests is_valid_im(R) with linkage matrics of various sizes
R = np.asarray([[0, 1, 3.0, 2, 5],
[3, 2, 4.0, 3, 3]], dtype=np.double)
R = R[:nrow, :ncol]
assert_(is_valid_im(R) == valid)
if not valid:
assert_raises(ValueError, is_valid_im, R, throw=True)
def test_is_valid_im_empty(self):
# Tests is_valid_im(R) with empty inconsistency matrix.
R = np.zeros((0, 4), dtype=np.double)
assert_(is_valid_im(R) == False)
assert_raises(ValueError, is_valid_im, R, throw=True)
def test_is_valid_im_4_and_up(self):
# Tests is_valid_im(R) on im on observation sets between sizes 4 and 15
# (step size 3).
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
R = inconsistent(Z)
assert_(is_valid_im(R) == True)
def test_is_valid_im_4_and_up_neg_index_left(self):
# Tests is_valid_im(R) on im on observation sets between sizes 4 and 15
# (step size 3) with negative link height means.
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
R = inconsistent(Z)
R[i//2,0] = -2.0
assert_(is_valid_im(R) == False)
assert_raises(ValueError, is_valid_im, R, throw=True)
def test_is_valid_im_4_and_up_neg_index_right(self):
# Tests is_valid_im(R) on im on observation sets between sizes 4 and 15
# (step size 3) with negative link height standard deviations.
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
R = inconsistent(Z)
R[i//2,1] = -2.0
assert_(is_valid_im(R) == False)
assert_raises(ValueError, is_valid_im, R, throw=True)
def test_is_valid_im_4_and_up_neg_dist(self):
# Tests is_valid_im(R) on im on observation sets between sizes 4 and 15
# (step size 3) with negative link counts.
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
R = inconsistent(Z)
R[i//2,2] = -0.5
assert_(is_valid_im(R) == False)
assert_raises(ValueError, is_valid_im, R, throw=True)
class TestNumObsLinkage(TestCase):
def test_num_obs_linkage_empty(self):
# Tests num_obs_linkage(Z) with empty linkage.
Z = np.zeros((0, 4), dtype=np.double)
assert_raises(ValueError, num_obs_linkage, Z)
def test_num_obs_linkage_1x4(self):
# Tests num_obs_linkage(Z) on linkage over 2 observations.
Z = np.asarray([[0, 1, 3.0, 2]], dtype=np.double)
assert_equal(num_obs_linkage(Z), 2)
def test_num_obs_linkage_2x4(self):
# Tests num_obs_linkage(Z) on linkage over 3 observations.
Z = np.asarray([[0, 1, 3.0, 2],
[3, 2, 4.0, 3]], dtype=np.double)
assert_equal(num_obs_linkage(Z), 3)
def test_num_obs_linkage_4_and_up(self):
# Tests num_obs_linkage(Z) on linkage on observation sets between sizes
# 4 and 15 (step size 3).
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
assert_equal(num_obs_linkage(Z), i)
class TestLeavesList(object):
def test_leaves_list_1x4(self):
# Tests leaves_list(Z) on a 1x4 linkage.
Z = np.asarray([[0, 1, 3.0, 2]], dtype=np.double)
to_tree(Z)
assert_equal(leaves_list(Z), [0, 1])
def test_leaves_list_2x4(self):
# Tests leaves_list(Z) on a 2x4 linkage.
Z = np.asarray([[0, 1, 3.0, 2],
[3, 2, 4.0, 3]], dtype=np.double)
to_tree(Z)
assert_equal(leaves_list(Z), [0, 1, 2])
def test_leaves_list_Q(self):
for method in ['single', 'complete', 'average', 'weighted', 'centroid',
'median', 'ward']:
yield self.check_leaves_list_Q, method
def check_leaves_list_Q(self, method):
# Tests leaves_list(Z) on the Q data set
X = hierarchy_test_data.Q_X
Z = linkage(X, method)
node = to_tree(Z)
assert_equal(node.pre_order(), leaves_list(Z))
def test_Q_subtree_pre_order(self):
# Tests that pre_order() works when called on sub-trees.
X = hierarchy_test_data.Q_X
Z = linkage(X, 'single')
node = to_tree(Z)
assert_equal(node.pre_order(), (node.get_left().pre_order()
+ node.get_right().pre_order()))
class TestCorrespond(TestCase):
def test_correspond_empty(self):
# Tests correspond(Z, y) with empty linkage and condensed distance matrix.
y = np.zeros((0,))
Z = np.zeros((0,4))
assert_raises(ValueError, correspond, Z, y)
def test_correspond_2_and_up(self):
# Tests correspond(Z, y) on linkage and CDMs over observation sets of
# different sizes.
for i in xrange(2, 4):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
assert_(correspond(Z, y))
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
assert_(correspond(Z, y))
def test_correspond_4_and_up(self):
# Tests correspond(Z, y) on linkage and CDMs over observation sets of
# different sizes. Correspondance should be false.
for (i, j) in (list(zip(list(range(2, 4)), list(range(3, 5)))) +
list(zip(list(range(3, 5)), list(range(2, 4))))):
y = np.random.rand(i*(i-1)//2)
y2 = np.random.rand(j*(j-1)//2)
Z = linkage(y)
Z2 = linkage(y2)
assert_equal(correspond(Z, y2), False)
assert_equal(correspond(Z2, y), False)
def test_correspond_4_and_up_2(self):
# Tests correspond(Z, y) on linkage and CDMs over observation sets of
# different sizes. Correspondance should be false.
for (i, j) in (list(zip(list(range(2, 7)), list(range(16, 21)))) +
list(zip(list(range(2, 7)), list(range(16, 21))))):
y = np.random.rand(i*(i-1)//2)
y2 = np.random.rand(j*(j-1)//2)
Z = linkage(y)
Z2 = linkage(y2)
assert_equal(correspond(Z, y2), False)
assert_equal(correspond(Z2, y), False)
def test_num_obs_linkage_multi_matrix(self):
# Tests num_obs_linkage with observation matrices of multiple sizes.
for n in xrange(2, 10):
X = np.random.rand(n, 4)
Y = pdist(X)
Z = linkage(Y)
assert_equal(num_obs_linkage(Z), n)
class TestIsMonotonic(TestCase):
def test_is_monotonic_empty(self):
# Tests is_monotonic(Z) on an empty linkage.
Z = np.zeros((0, 4))
assert_raises(ValueError, is_monotonic, Z)
def test_is_monotonic_1x4(self):
# Tests is_monotonic(Z) on 1x4 linkage. Expecting True.
Z = np.asarray([[0, 1, 0.3, 2]], dtype=np.double)
assert_equal(is_monotonic(Z), True)
def test_is_monotonic_2x4_T(self):
# Tests is_monotonic(Z) on 2x4 linkage. Expecting True.
Z = np.asarray([[0, 1, 0.3, 2],
[2, 3, 0.4, 3]], dtype=np.double)
assert_equal(is_monotonic(Z), True)
def test_is_monotonic_2x4_F(self):
# Tests is_monotonic(Z) on 2x4 linkage. Expecting False.
Z = np.asarray([[0, 1, 0.4, 2],
[2, 3, 0.3, 3]], dtype=np.double)
assert_equal(is_monotonic(Z), False)
def test_is_monotonic_3x4_T(self):
# Tests is_monotonic(Z) on 3x4 linkage. Expecting True.
Z = np.asarray([[0, 1, 0.3, 2],
[2, 3, 0.4, 2],
[4, 5, 0.6, 4]], dtype=np.double)
assert_equal(is_monotonic(Z), True)
def test_is_monotonic_3x4_F1(self):
# Tests is_monotonic(Z) on 3x4 linkage (case 1). Expecting False.
Z = np.asarray([[0, 1, 0.3, 2],
[2, 3, 0.2, 2],
[4, 5, 0.6, 4]], dtype=np.double)
assert_equal(is_monotonic(Z), False)
def test_is_monotonic_3x4_F2(self):
# Tests is_monotonic(Z) on 3x4 linkage (case 2). Expecting False.
Z = np.asarray([[0, 1, 0.8, 2],
[2, 3, 0.4, 2],
[4, 5, 0.6, 4]], dtype=np.double)
assert_equal(is_monotonic(Z), False)
def test_is_monotonic_3x4_F3(self):
# Tests is_monotonic(Z) on 3x4 linkage (case 3). Expecting False
Z = np.asarray([[0, 1, 0.3, 2],
[2, 3, 0.4, 2],
[4, 5, 0.2, 4]], dtype=np.double)
assert_equal(is_monotonic(Z), False)
def test_is_monotonic_tdist_linkage1(self):
# Tests is_monotonic(Z) on clustering generated by single linkage on
# tdist data set. Expecting True.
Z = linkage(hierarchy_test_data.ytdist, 'single')
assert_equal(is_monotonic(Z), True)
def test_is_monotonic_tdist_linkage2(self):
# Tests is_monotonic(Z) on clustering generated by single linkage on
# tdist data set. Perturbing. Expecting False.
Z = linkage(hierarchy_test_data.ytdist, 'single')
Z[2,2] = 0.0
assert_equal(is_monotonic(Z), False)
def test_is_monotonic_Q_linkage(self):
# Tests is_monotonic(Z) on clustering generated by single linkage on
# Q data set. Expecting True.
X = hierarchy_test_data.Q_X
Z = linkage(X, 'single')
assert_equal(is_monotonic(Z), True)
class TestMaxDists(object):
def test_maxdists_empty_linkage(self):
# Tests maxdists(Z) on empty linkage. Expecting exception.
Z = np.zeros((0, 4), dtype=np.double)
assert_raises(ValueError, maxdists, Z)
def test_maxdists_one_cluster_linkage(self):
# Tests maxdists(Z) on linkage with one cluster.
Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
MD = maxdists(Z)
expectedMD = calculate_maximum_distances(Z)
assert_allclose(MD, expectedMD, atol=1e-15)
def test_maxdists_Q_linkage(self):
for method in ['single', 'complete', 'ward', 'centroid', 'median']:
yield self.check_maxdists_Q_linkage, method
def check_maxdists_Q_linkage(self, method):
# Tests maxdists(Z) on the Q data set
X = hierarchy_test_data.Q_X
Z = linkage(X, method)
MD = maxdists(Z)
expectedMD = calculate_maximum_distances(Z)
assert_allclose(MD, expectedMD, atol=1e-15)
class TestMaxInconsts(object):
def test_maxinconsts_empty_linkage(self):
# Tests maxinconsts(Z, R) on empty linkage. Expecting exception.
Z = np.zeros((0, 4), dtype=np.double)
R = np.zeros((0, 4), dtype=np.double)
assert_raises(ValueError, maxinconsts, Z, R)
def test_maxinconsts_difrow_linkage(self):
# Tests maxinconsts(Z, R) on linkage and inconsistency matrices with
# different numbers of clusters. Expecting exception.
Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
R = np.random.rand(2, 4)
assert_raises(ValueError, maxinconsts, Z, R)
def test_maxinconsts_one_cluster_linkage(self):
# Tests maxinconsts(Z, R) on linkage with one cluster.
Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
R = np.asarray([[0, 0, 0, 0.3]], dtype=np.double)
MD = maxinconsts(Z, R)
expectedMD = calculate_maximum_inconsistencies(Z, R)
assert_allclose(MD, expectedMD, atol=1e-15)
def test_maxinconsts_Q_linkage(self):
for method in ['single', 'complete', 'ward', 'centroid', 'median']:
yield self.check_maxinconsts_Q_linkage, method
def check_maxinconsts_Q_linkage(self, method):
# Tests maxinconsts(Z, R) on the Q data set
X = hierarchy_test_data.Q_X
Z = linkage(X, method)
R = inconsistent(Z)
MD = maxinconsts(Z, R)
expectedMD = calculate_maximum_inconsistencies(Z, R)
assert_allclose(MD, expectedMD, atol=1e-15)
class TestMaxRStat(object):
def test_maxRstat_invalid_index(self):
for i in [3.3, -1, 4]:
yield self.check_maxRstat_invalid_index, i
def check_maxRstat_invalid_index(self, i):
# Tests maxRstat(Z, R, i). Expecting exception.
Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
R = np.asarray([[0, 0, 0, 0.3]], dtype=np.double)
if isinstance(i, int):
assert_raises(ValueError, maxRstat, Z, R, i)
else:
assert_raises(TypeError, maxRstat, Z, R, i)
def test_maxRstat_empty_linkage(self):
for i in range(4):
yield self.check_maxRstat_empty_linkage, i
def check_maxRstat_empty_linkage(self, i):
# Tests maxRstat(Z, R, i) on empty linkage. Expecting exception.
Z = np.zeros((0, 4), dtype=np.double)
R = np.zeros((0, 4), dtype=np.double)
assert_raises(ValueError, maxRstat, Z, R, i)
def test_maxRstat_difrow_linkage(self):
for i in range(4):
yield self.check_maxRstat_difrow_linkage, i
def check_maxRstat_difrow_linkage(self, i):
# Tests maxRstat(Z, R, i) on linkage and inconsistency matrices with
# different numbers of clusters. Expecting exception.
Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
R = np.random.rand(2, 4)
assert_raises(ValueError, maxRstat, Z, R, i)
def test_maxRstat_one_cluster_linkage(self):
for i in range(4):
yield self.check_maxRstat_one_cluster_linkage, i
def check_maxRstat_one_cluster_linkage(self, i):
# Tests maxRstat(Z, R, i) on linkage with one cluster.
Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
R = np.asarray([[0, 0, 0, 0.3]], dtype=np.double)
MD = maxRstat(Z, R, 1)
expectedMD = calculate_maximum_inconsistencies(Z, R, 1)
assert_allclose(MD, expectedMD, atol=1e-15)
def test_maxRstat_Q_linkage(self):
for method in ['single', 'complete', 'ward', 'centroid', 'median']:
for i in range(4):
yield self.check_maxRstat_Q_linkage, method, i
def check_maxRstat_Q_linkage(self, method, i):
# Tests maxRstat(Z, R, i) on the Q data set
X = hierarchy_test_data.Q_X
Z = linkage(X, method)
R = inconsistent(Z)
MD = maxRstat(Z, R, 1)
expectedMD = calculate_maximum_inconsistencies(Z, R, 1)
assert_allclose(MD, expectedMD, atol=1e-15)
class TestDendrogram(object):
def test_dendrogram_single_linkage_tdist(self):
# Tests dendrogram calculation on single linkage of the tdist data set.
Z = linkage(hierarchy_test_data.ytdist, 'single')
R = dendrogram(Z, no_plot=True)
leaves = R["leaves"]
assert_equal(leaves, [2, 5, 1, 0, 3, 4])
def test_valid_orientation(self):
Z = linkage(hierarchy_test_data.ytdist, 'single')
assert_raises(ValueError, dendrogram, Z, orientation="foo")
@dec.skipif(not have_matplotlib)
def test_dendrogram_plot(self):
for orientation in ['top', 'bottom', 'left', 'right']:
yield self.check_dendrogram_plot, orientation
def check_dendrogram_plot(self, orientation):
# Tests dendrogram plotting.
Z = linkage(hierarchy_test_data.ytdist, 'single')
expected = {'color_list': ['g', 'b', 'b', 'b', 'b'],
'dcoord': [[0.0, 138.0, 138.0, 0.0],
[0.0, 219.0, 219.0, 0.0],
[0.0, 255.0, 255.0, 219.0],
[0.0, 268.0, 268.0, 255.0],
[138.0, 295.0, 295.0, 268.0]],
'icoord': [[5.0, 5.0, 15.0, 15.0],
[45.0, 45.0, 55.0, 55.0],
[35.0, 35.0, 50.0, 50.0],
[25.0, 25.0, 42.5, 42.5],
[10.0, 10.0, 33.75, 33.75]],
'ivl': ['2', '5', '1', '0', '3', '4'],
'leaves': [2, 5, 1, 0, 3, 4]}
fig = plt.figure()
ax = fig.add_subplot(111)
# test that dendrogram accepts ax keyword
R1 = dendrogram(Z, ax=ax, orientation=orientation)
plt.close()
assert_equal(R1, expected)
# test plotting to gca (will import pylab)
R2 = dendrogram(Z, orientation=orientation)
plt.close()
assert_equal(R2, expected)
@dec.skipif(not have_matplotlib)
def test_dendrogram_truncate_mode(self):
Z = linkage(hierarchy_test_data.ytdist, 'single')
R = dendrogram(Z, 2, 'lastp', show_contracted=True)
plt.close()
assert_equal(R, {'color_list': ['b'],
'dcoord': [[0.0, 295.0, 295.0, 0.0]],
'icoord': [[5.0, 5.0, 15.0, 15.0]],
'ivl': ['(2)', '(4)'],
'leaves': [6, 9]})
R = dendrogram(Z, 2, 'mtica', show_contracted=True)
plt.close()
assert_equal(R, {'color_list': ['g', 'b', 'b', 'b'],
'dcoord': [[0.0, 138.0, 138.0, 0.0],
[0.0, 255.0, 255.0, 0.0],
[0.0, 268.0, 268.0, 255.0],
[138.0, 295.0, 295.0, 268.0]],
'icoord': [[5.0, 5.0, 15.0, 15.0],
[35.0, 35.0, 45.0, 45.0],
[25.0, 25.0, 40.0, 40.0],
[10.0, 10.0, 32.5, 32.5]],
'ivl': ['2', '5', '1', '0', '(2)'],
'leaves': [2, 5, 1, 0, 7]})
def test_dendrogram_colors(self):
# Tests dendrogram plots with alternate colors
Z = linkage(hierarchy_test_data.ytdist, 'single')
set_link_color_palette(['c', 'm', 'y', 'k'])
R = dendrogram(Z, no_plot=True,
above_threshold_color='g', color_threshold=250)
set_link_color_palette(['g', 'r', 'c', 'm', 'y', 'k'])
color_list = R['color_list']
assert_equal(color_list, ['c', 'm', 'g', 'g', 'g'])
def calculate_maximum_distances(Z):
# Used for testing correctness of maxdists.
n = Z.shape[0] + 1
B = np.zeros((n-1,))
q = np.zeros((3,))
for i in xrange(0, n - 1):
q[:] = 0.0
left = Z[i, 0]
right = Z[i, 1]
if left >= n:
q[0] = B[int(left) - n]
if right >= n:
q[1] = B[int(right) - n]
q[2] = Z[i, 2]
B[i] = q.max()
return B
def calculate_maximum_inconsistencies(Z, R, k=3):
# Used for testing correctness of maxinconsts.
n = Z.shape[0] + 1
B = np.zeros((n-1,))
q = np.zeros((3,))
for i in xrange(0, n - 1):
q[:] = 0.0
left = Z[i, 0]
right = Z[i, 1]
if left >= n:
q[0] = B[int(left) - n]
if right >= n:
q[1] = B[int(right) - n]
q[2] = R[i, k]
B[i] = q.max()
return B
def test_euclidean_linkage_value_error():
for method in scipy.cluster.hierarchy._cpy_euclid_methods:
assert_raises(ValueError,
linkage, [[1, 1], [1, 1]], method=method, metric='cityblock')
def test_2x2_linkage():
Z1 = linkage([1], method='single', metric='euclidean')
Z2 = linkage([[0, 1], [0, 0]], method='single', metric='euclidean')
assert_allclose(Z1, Z2)
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause |
ningchi/scikit-learn | examples/linear_model/plot_lasso_coordinate_descent_path.py | 254 | 2639 | """
=====================
Lasso and Elastic Net
=====================
Lasso and elastic net (L1 and L2 penalisation) implemented using a
coordinate descent.
The coefficients can be forced to be positive.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import lasso_path, enet_path
from sklearn import datasets
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
X /= X.std(axis=0) # Standardize data (easier to set the l1_ratio parameter)
# Compute paths
eps = 5e-3 # the smaller it is the longer is the path
print("Computing regularization path using the lasso...")
alphas_lasso, coefs_lasso, _ = lasso_path(X, y, eps, fit_intercept=False)
print("Computing regularization path using the positive lasso...")
alphas_positive_lasso, coefs_positive_lasso, _ = lasso_path(
X, y, eps, positive=True, fit_intercept=False)
print("Computing regularization path using the elastic net...")
alphas_enet, coefs_enet, _ = enet_path(
X, y, eps=eps, l1_ratio=0.8, fit_intercept=False)
print("Computing regularization path using the positve elastic net...")
alphas_positive_enet, coefs_positive_enet, _ = enet_path(
X, y, eps=eps, l1_ratio=0.8, positive=True, fit_intercept=False)
# Display results
plt.figure(1)
ax = plt.gca()
ax.set_color_cycle(2 * ['b', 'r', 'g', 'c', 'k'])
l1 = plt.plot(-np.log10(alphas_lasso), coefs_lasso.T)
l2 = plt.plot(-np.log10(alphas_enet), coefs_enet.T, linestyle='--')
plt.xlabel('-Log(alpha)')
plt.ylabel('coefficients')
plt.title('Lasso and Elastic-Net Paths')
plt.legend((l1[-1], l2[-1]), ('Lasso', 'Elastic-Net'), loc='lower left')
plt.axis('tight')
plt.figure(2)
ax = plt.gca()
ax.set_color_cycle(2 * ['b', 'r', 'g', 'c', 'k'])
l1 = plt.plot(-np.log10(alphas_lasso), coefs_lasso.T)
l2 = plt.plot(-np.log10(alphas_positive_lasso), coefs_positive_lasso.T,
linestyle='--')
plt.xlabel('-Log(alpha)')
plt.ylabel('coefficients')
plt.title('Lasso and positive Lasso')
plt.legend((l1[-1], l2[-1]), ('Lasso', 'positive Lasso'), loc='lower left')
plt.axis('tight')
plt.figure(3)
ax = plt.gca()
ax.set_color_cycle(2 * ['b', 'r', 'g', 'c', 'k'])
l1 = plt.plot(-np.log10(alphas_enet), coefs_enet.T)
l2 = plt.plot(-np.log10(alphas_positive_enet), coefs_positive_enet.T,
linestyle='--')
plt.xlabel('-Log(alpha)')
plt.ylabel('coefficients')
plt.title('Elastic-Net and positive Elastic-Net')
plt.legend((l1[-1], l2[-1]), ('Elastic-Net', 'positive Elastic-Net'),
loc='lower left')
plt.axis('tight')
plt.show()
| bsd-3-clause |
gregvw/qho-control | qho.py | 1 | 4949 | from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
from scipy.linalg import solve_banded
from scipy.optimize import fmin_bfgs
from mult_banded import mult_banded
class QHO(object):
"""
Quantum Harmonic Oscillator class
"""
def __init__(self,modes,N,T,i,f,g):
"""
Create and store the three banded matrices for the system
(I,H, and X) since they will be reused
"""
self.modes = modes
self.N = N
self.dt = T/N
self.i = i
self.f = f
self.g = g
self.u = np.zeros(N)
j = np.arange(modes)
# Identity matrix in banded form
self.I = np.zeros((3,modes))
self.I[1,:] = 1
# Stationary part of Hamiltonian in banded form
self.H = np.zeros((3,modes))
self.H[1,:] = j+0.5
# Dipole term in banded form
self.X = np.zeros((3,modes))
x = np.sqrt(j+1)/2
self.X[0,1:] = x[:-1]
self.X[2,:-1] = x[:-1]
# Allocate space for the state variable
self.Y = np.zeros((modes,N+1),dtype=complex)
# Allocate space for the adjoint variable
self.Z = np.zeros((modes,N+1),dtype=complex)
def _solve_state(self,u):
"""
Compute the solution to the state equation for a given
initial condition y[i]=1 and control u. See eq. (25)
"""
self.Y[self.i,0] = 1
for k in range(self.N):
ab = self.I+0.5j*self.dt*(self.H+u[k]*self.X)
b = mult_banded((1,1),np.conj(ab),self.Y[:,k])
self.Y[:,k+1] = solve_banded((1,1),ab,b,overwrite_ab=True,
overwrite_b=True,debug=False,
check_finite=True)
self.u = u
def _solve_adjoint(self,u):
"""
Compute the solution to the adjoint equation for a given
final condition and control u. See eq. (26)
"""
self.Z[self.f,-1] = 1j*np.conj(self.Y[self.f,-1])
for j in range(self.N):
k = self.N-j-1
ab = self.I+0.5j*self.dt*(self.H+u[k]*self.X)
b = mult_banded((1,1),np.conj(ab),self.Z[:,k+1])
self.Z[:,k] = solve_banded((1,1),ab,b,overwrite_ab=True,
overwrite_b=True,debug=False,
check_finite=True)
self.u = u
def cost(self,u):
"""
Evaluate the reduced cost functional
"""
if not np.array_equal(u,self.u):
self._solve_state(u)
J = 0.5*self.dt*self.g*sum(u**2)-abs(self.Y[self.f,-1])**2
return J
def grad(self,u):
"""
Evaluate the reduced gradient
"""
if not np.array_equal(u,self.u):
self._solve_state(u)
self._solve_adjoint(u)
dex = range(1,self.N+1)
# Inner product term in eq (28)
ip = [np.dot((self.Z[:,j]+self.Z[:,j-1]),
mult_banded((1,1),self.X,(self.Y[:,j]+self.Y[:,j-1])))
for j in dex]
dJ = self.dt*(self.g*u+0.5*np.real(np.array(ip)))
return dJ
if __name__ == '__main__':
# Number of Hermite functions
modes = 10
# Number of time steps
N = 10
# Control duration
T = 10
# Regularization parameter
g = 1e-2
# Time grid
t = np.linspace(0,T,N+1)
# Midpoints of time steps
tm = 0.5*(t[:-1]+t[1:])
# Initial guess of control
u = 0.01*np.ones(N)
# Instantiate Quantum Harmonic Oscillator for 0 -> 1 transition
qho = QHO(modes,N,T,0,1,g)
uopt = fmin_bfgs(qho.cost,u,qho.grad,args=(),gtol=1e-6,norm=np.inf,
epsilon=1e-7, maxiter=1000, full_output=0, disp=1,
retall=0, callback=None)
for k in range(4):
t_old = t
tm_old = tm
u = np.repeat(uopt,2*np.ones(N,dtype=int))
N *= 2
t = np.linspace(0,T,N+1)
tm = 0.5*(t[:-1]+t[1:])
g /= 2
# Instantiate the controlled oscillator object
qho = QHO(modes,N,T,0,1,g)
# Compute a local minimizer
uopt = fmin_bfgs(qho.cost,u,qho.grad,args=(),gtol=1e-6,norm=np.inf,
epsilon=1e-7, maxiter=1000, full_output=0, disp=1,
retall=0, callback=None)
fig = plt.figure(1,(16,7))
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
ax1.set_xlabel('time',fontsize=16)
ax1.set_title('Control',fontsize=18)
ax1.tick_params(axis='both',which='major',labelsize=16)
ax2.set_xlabel('time',fontsize=16)
ax2.set_title('State',fontsize=18)
ax2.tick_params(axis='both',which='major',labelsize=16)
ax1.plot(tm,uopt)
ax2.plot(t,np.abs(qho.Y.T)**2)
plt.show()
| mit |
jdmcbr/blaze | blaze/compute/tests/test_core_compute.py | 8 | 4256 | from __future__ import absolute_import, division, print_function
import pytest
import operator
from datashape import discover, dshape
from blaze.compute.core import (compute_up, compute, bottom_up_until_type_break,
top_then_bottom_then_top_again_etc,
swap_resources_into_scope)
from blaze.expr import by, symbol, Expr, Symbol
from blaze.dispatch import dispatch
from blaze.compatibility import raises, reduce
from blaze.utils import example
import pandas as pd
import numpy as np
def test_errors():
t = symbol('t', 'var * {foo: int}')
with raises(NotImplementedError):
compute_up(by(t, count=t.count()), 1)
def test_optimize():
class Foo(object):
pass
s = symbol('s', '5 * {x: int, y: int}')
@dispatch(Expr, Foo)
def compute_down(expr, foo):
return str(expr)
assert compute(s.x * 2, Foo()) == "s.x * 2"
@dispatch(Expr, Foo)
def optimize(expr, foo):
return expr + 1
assert compute(s.x * 2, Foo()) == "(s.x * 2) + 1"
def test_bottom_up_until_type_break():
s = symbol('s', 'var * {name: string, amount: int}')
data = np.array([('Alice', 100), ('Bob', 200), ('Charlie', 300)],
dtype=[('name', 'S7'), ('amount', 'i4')])
e = (s.amount + 1).distinct()
expr, scope = bottom_up_until_type_break(e, {s: data})
amount = symbol('amount', 'var * int64', token=1)
assert expr.isidentical(amount)
assert len(scope) == 1
assert amount in scope
assert (scope[amount] == np.array([101, 201, 301], dtype='i4')).all()
# This computation has a type change midstream, so we stop and get the
# unfinished computation.
e = s.amount.sum() + 1
expr, scope = bottom_up_until_type_break(e, {s: data})
amount_sum = symbol('amount_sum', 'int64')
assert expr.isidentical(amount_sum + 1)
assert len(scope) == 1
assert amount_sum in scope
assert scope[amount_sum] == 600
# ensure that we work on binops with one child
x = symbol('x', 'real')
expr, scope = bottom_up_until_type_break(x + x, {x: 1})
assert len(scope) == 1
x2 = list(scope.keys())[0]
assert isinstance(x2, Symbol)
assert isinstance(expr, Symbol)
assert scope[x2] == 2
def test_top_then_bottom_then_top_again_etc():
s = symbol('s', 'var * {name: string, amount: int32}')
data = np.array([('Alice', 100), ('Bob', 200), ('Charlie', 300)],
dtype=[('name', 'S7'), ('amount', 'i4')])
e = s.amount.sum() + 1
assert top_then_bottom_then_top_again_etc(e, {s: data}) == 601
def test_swap_resources_into_scope():
from blaze import Data
t = Data([1, 2, 3], dshape='3 * int', name='t')
expr, scope = swap_resources_into_scope(t.head(2), {t: t.data})
assert t._resources()
assert not expr._resources()
assert t not in scope
def test_compute_up_on_dict():
d = {'a': [1, 2, 3], 'b': [4, 5, 6]}
assert str(discover(d)) == str(dshape('{a: 3 * int64, b: 3 * int64}'))
s = symbol('s', discover(d))
assert compute(s.a, {s: d}) == [1, 2, 3]
def test_pre_compute_on_multiple_datasets_is_selective():
from odo import CSV
from blaze import Data
from blaze.cached import CachedDataset
df = pd.DataFrame([[1, 'Alice', 100],
[2, 'Bob', -200],
[3, 'Charlie', 300],
[4, 'Denis', 400],
[5, 'Edith', -500]], columns=['id', 'name', 'amount'])
iris = CSV(example('iris.csv'))
dset = CachedDataset({'df': df, 'iris': iris})
d = Data(dset)
assert str(compute(d.df.amount)) == str(df.amount)
def test_raises_on_valid_expression_but_no_implementation():
class MyExpr(Expr):
__slots__ = '_hash', '_child'
@property
def dshape(self):
return self._child.dshape
t = symbol('t', 'var * {amount: real}')
expr = MyExpr(t.amount)
df = [(1.0,), (2.0,), (3.0,)]
with pytest.raises(NotImplementedError):
compute(expr, df)
@pytest.mark.parametrize('n', range(2, 11))
def test_simple_add(n):
x = symbol('x', 'int')
expr = reduce(operator.add, [x] * n)
assert compute(expr, 1) == n
| bsd-3-clause |
bikong2/scikit-learn | examples/decomposition/plot_ica_vs_pca.py | 306 | 3329 | """
==========================
FastICA on 2D point clouds
==========================
This example illustrates visually in the feature space a comparison by
results using two different component analysis techniques.
:ref:`ICA` vs :ref:`PCA`.
Representing ICA in the feature space gives the view of 'geometric ICA':
ICA is an algorithm that finds directions in the feature space
corresponding to projections with high non-Gaussianity. These directions
need not be orthogonal in the original feature space, but they are
orthogonal in the whitened feature space, in which all directions
correspond to the same variance.
PCA, on the other hand, finds orthogonal directions in the raw feature
space that correspond to directions accounting for maximum variance.
Here we simulate independent sources using a highly non-Gaussian
process, 2 student T with a low number of degrees of freedom (top left
figure). We mix them to create observations (top right figure).
In this raw observation space, directions identified by PCA are
represented by orange vectors. We represent the signal in the PCA space,
after whitening by the variance corresponding to the PCA vectors (lower
left). Running ICA corresponds to finding a rotation in this space to
identify the directions of largest non-Gaussianity (lower right).
"""
print(__doc__)
# Authors: Alexandre Gramfort, Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA, FastICA
###############################################################################
# Generate sample data
rng = np.random.RandomState(42)
S = rng.standard_t(1.5, size=(20000, 2))
S[:, 0] *= 2.
# Mix data
A = np.array([[1, 1], [0, 2]]) # Mixing matrix
X = np.dot(S, A.T) # Generate observations
pca = PCA()
S_pca_ = pca.fit(X).transform(X)
ica = FastICA(random_state=rng)
S_ica_ = ica.fit(X).transform(X) # Estimate the sources
S_ica_ /= S_ica_.std(axis=0)
###############################################################################
# Plot results
def plot_samples(S, axis_list=None):
plt.scatter(S[:, 0], S[:, 1], s=2, marker='o', zorder=10,
color='steelblue', alpha=0.5)
if axis_list is not None:
colors = ['orange', 'red']
for color, axis in zip(colors, axis_list):
axis /= axis.std()
x_axis, y_axis = axis
# Trick to get legend to work
plt.plot(0.1 * x_axis, 0.1 * y_axis, linewidth=2, color=color)
plt.quiver(0, 0, x_axis, y_axis, zorder=11, width=0.01, scale=6,
color=color)
plt.hlines(0, -3, 3)
plt.vlines(0, -3, 3)
plt.xlim(-3, 3)
plt.ylim(-3, 3)
plt.xlabel('x')
plt.ylabel('y')
plt.figure()
plt.subplot(2, 2, 1)
plot_samples(S / S.std())
plt.title('True Independent Sources')
axis_list = [pca.components_.T, ica.mixing_]
plt.subplot(2, 2, 2)
plot_samples(X / np.std(X), axis_list=axis_list)
legend = plt.legend(['PCA', 'ICA'], loc='upper right')
legend.set_zorder(100)
plt.title('Observations')
plt.subplot(2, 2, 3)
plot_samples(S_pca_ / np.std(S_pca_, axis=0))
plt.title('PCA recovered signals')
plt.subplot(2, 2, 4)
plot_samples(S_ica_ / np.std(S_ica_))
plt.title('ICA recovered signals')
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.36)
plt.show()
| bsd-3-clause |
slimpotatoes/STEM_Moire_GPA | src/guidisplay.py | 1 | 10738 | # Image Display module
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from matplotlib.widgets import Button, SpanSelector, TextBox
from matplotlib_scalebar.scalebar import ScaleBar
from matplotlib.colorbar import Colorbar
from imageio import imwrite as imsave
import numpy as np
import guilinemanager
import skimage.measure
# import time
# import string
import collections
ButtonParams = collections.namedtuple('bp', ['text', 'x', 'y', 'functioncall'])
class GUIDisplay(object):
"""Display 2d arrays with interactive parameters:
- Contrast
- Colormap
- Axis label
- Legend
- Calibration
- Scalebar
- Line profile
- Export"""
def __init__(self, data_to_display, cal=None):
# 2D array to display with calibration cal in nm/pixel
self.image_data = data_to_display
# Window for image display + matplotlib parameters
self.fig_image = plt.figure(figsize=(10, 7), dpi=100)
# Layout figure
self.gs_fig_image = gridspec.GridSpec(8, 8)
# Make buttons and assign function calls
buttons = (
ButtonParams('Refresh', 0, 0, self.test),
ButtonParams('Set\nColourmap', 1, 0, self.colourmap_button),
ButtonParams('Calibration', 2, 0, self.test),
ButtonParams('Scale bar', 3, 0, self.update_scalebar),
ButtonParams('Line profile', 4, 0, self.line_profile),
ButtonParams('Rot 90 CCW', 5, 0, self.rotate_90),
ButtonParams('Num 6', 6, 0, self.test),
ButtonParams('Export', 7, 0, self.export_data)
)
self.fig_image_parameter = []
# Assign button to subplot in figure
for ii in buttons:
button = Button(plt.subplot(self.gs_fig_image[ii.x, ii.y]), ii.text)
button.on_clicked(ii.functioncall)
self.fig_image_parameter.append(button)
# Define image axis
self.ax_image = plt.subplot(self.gs_fig_image[1:-1, 1:6])
self.ax_image.set_axis_off()
self.cmap = 'bwr'
self.image = self.ax_image.imshow(self.image_data, cmap=self.cmap, vmin=-0.02, vmax=0.02)
# Contrast histogram display and span selector
self.ax_contrast = plt.subplot(self.gs_fig_image[0, 1:6])
self.contrastbins = 256
self.cmin = -0.02
self.cmax = 0.02
self.imhist, self.imbins = np.histogram(self.image_data, bins=self.contrastbins)
self.ax_contrast_span = None
self.plot_contrast_histogram()
# Colormaps
self.maps = sorted([m for m in plt.cm.datad if not m.endswith("_r")])
self.cmapfig = None
self.cmapaxes = None
# (https://scipy.github.io/old-wiki/pages/Cookbook/Matplotlib/Show_colormaps)
# Colourbar
self.ax_colourbar = plt.subplot(self.gs_fig_image[1:-1, 7])
self.colourbar = Colorbar(self.ax_colourbar, self.image)
# Textbox for colormap
self.ax_cmin = plt.axes([0.8, 0.85, 0.1, 0.05])
self.ax_cmax = plt.axes([0.8, 0.8, 0.1, 0.05])
self.text_cmin = TextBox(self.ax_cmin, label='min', initial="%.4f" % self.cmin, label_pad=0.25)
self.text_cmax = TextBox(self.ax_cmax, label='max', initial="%.4f" % self.cmax, label_pad=0.25)
self.text_cmin.on_submit(self.update_cmin)
self.text_cmax.on_submit(self.update_cmax)
# Calibration textbox
self.cal = cal
self.ax_cal = plt.axes([0.5, 0.1, 0.1, 0.05])
if self.cal is None:
self.text_cal = TextBox(self.ax_cal, label='Calibration (nm/pixel)', initial='', label_pad=0.25)
else:
self.text_cal = TextBox(self.ax_cal, label='Calibration (nm/pixel)', initial=self.cal, label_pad=0.25)
self.text_cal.on_submit(self.update_calibration)
# Scalebar
self.state_scalebar = 0
self.scalebar = None
# Line profile
self.line_prof = None
self.line_prof_edit = 0
self.fig_line_prof = None
self.ax_fig_line_prof = None
self.profile = None
# Show the display window
plt.show()
@staticmethod
def test(event):
print(event)
# Button to open colourmap selection window
def colourmap_button(self, event):
if event.inaxes == self.fig_image_parameter[1].ax:
nummaps = len(self.maps)
self.cmapfig = plt.figure('Colourmap options, pick one!', figsize=(5, 2 * nummaps))
self.cmapaxes = {}
gradient = np.linspace(0, 1, 100) * np.ones((3, 100))
for mm in range(nummaps):
corners = [0., mm / float(nummaps), 0.75, 1. / nummaps]
self.cmapaxes[mm] = plt.axes(corners)
self.cmapaxes[mm].annotate(self.maps[mm], xy=(0.77, (mm + 0.2) / float(nummaps)),
xycoords='figure fraction', fontsize=11)
self.cmapaxes[mm].set_axis_off()
self.cmapaxes[mm].imshow(gradient, cmap=plt.get_cmap(self.maps[mm]))
self.cmapfig.canvas.mpl_connect('button_press_event', self.colourmap_axis_select)
plt.show()
# Set colourmap based on clicking on an axis in the colourmap window
def colourmap_axis_select(self, event):
for aa in self.cmapaxes:
if event.inaxes == self.cmapaxes[aa]:
self.cmap = self.maps[aa]
self.image.set_cmap(plt.get_cmap(self.cmap))
self.update_colourmap()
self.fig_image.canvas.draw()
def line_profile(self, event):
if event.inaxes == self.fig_image_parameter[4].ax:
if self.line_prof_edit == 0:
if self.line_prof is None:
print('create line')
self.line_prof_edit = 1
self.line_prof = guilinemanager.LineDraw(self.ax_image)
self.line_prof.ConnectDraw()
else:
print('edit line')
self.line_prof_edit = 1
self.line_prof.ConnectMove()
elif self.line_prof_edit == 1:
print('disconect')
self.line_prof_edit = 0
self.line_prof.DisconnectDraw()
self.line_prof.DisconnectMove()
self.fig_line_prof = plt.figure()
self.ax_fig_line_prof = self.fig_line_prof.add_subplot(1, 1, 1)
print(self.line_prof.WidthData)
first_postion = (self.line_prof.LineCoords[0][1], self.line_prof.LineCoords[0][0])
second_postion = (self.line_prof.LineCoords[1][1], self.line_prof.LineCoords[1][0])
self.profile = skimage.measure.profile_line(self.image_data, first_postion,
second_postion,
linewidth=int(self.line_prof.WidthData))
self.ax_fig_line_prof.plot(self.profile)
plt.show()
else:
return
# Function to update image after changing it
def update_image(self):
self.image.set_clim(vmin=self.cmin, vmax=self.cmax)
def update_colourmap(self):
self.colourbar.update_bruteforce(self.image)
def update_cm_textbox(self):
self.text_cmin.set_val("%.4f" % self.cmin)
self.text_cmax.set_val("%.4f" % self.cmax)
def update_cmin(self, event):
self.cmin = float(event)
self.contrast_span(self.cmin, self.cmax)
def update_cmax(self, event):
self.cmax = float(event)
self.contrast_span(self.cmin, self.cmax)
# Calculates and plots image histogram and connects interactive spanselector
def plot_contrast_histogram(self):
self.ax_contrast.cla()
self.ax_contrast.plot(self.imbins[:-1], self.imhist, color='k')
self.ax_contrast.set_axis_off()
self.ax_contrast_span = SpanSelector(self.ax_contrast, self.contrast_span, 'horizontal',
span_stays=True, rectprops=dict(alpha=0.5, facecolor='green'))
# Function for interactive spanselector for contrast histogram
def contrast_span(self, cmin, cmax):
self.cmin = cmin
self.cmax = cmax
self.update_image()
self.update_colourmap()
self.update_cm_textbox()
def update_calibration(self, event):
self.cal = float(event)
def update_scalebar(self, event):
if event.inaxes == self.fig_image_parameter[3].ax:
if self.state_scalebar == 0:
if self.cal is not None:
self.state_scalebar = 1
self.scalebar = self.ax_image.add_artist(ScaleBar(self.cal * 10 ** -9))
self.fig_image.canvas.draw()
elif self.state_scalebar == 1:
if self.cal is not None:
self.state_scalebar = 0
self.scalebar.remove()
self.fig_image.canvas.draw()
else:
raise Exception("Invalid parameter for scalebar")
def rotate_90(self, event):
if event.inaxes == self.fig_image_parameter[5].ax:
self.image_data = np.rot90(self.image_data)
self.image.set_array(self.image_data)
self.fig_image.canvas.draw()
def export_data(self, event):
if event.inaxes == self.fig_image_parameter[7].ax:
print('export')
#'''Save image respecting the number of pixels of the origin image'''
#imsave('image_array.png', self.image_data)
#'''Save image without respecting the number of pixels of the origin image'''
plt.ioff()
fig_export = plt.figure(figsize=(10, 7), dpi=100)
ax_fig_export = fig_export.add_subplot(1, 1, 1)
image_fig_export = ax_fig_export.imshow(self.image_data, cmap=self.cmap, vmin=self.cmin, vmax=self.cmax)
ax_fig_export.set_axis_off()
if self.state_scalebar == 1:
ax_fig_export.add_artist(ScaleBar(self.cal * 10 ** -9))
fig_export.canvas.draw()
if self.line_prof is not None:
np.savetxt('line_profile', self.profile)
fig_export.colorbar(image_fig_export)
fig_export.savefig('image.png')
print('Image saved')
status_export_raw = 1
if status_export_raw == 1:
np.savetxt('image_raw', self.image_data, delimiter=',')
print('Raw data extracted')
plt.close(fig_export)
| bsd-3-clause |
bmazin/ARCONS-pipeline | examples/Pal2012-0656/timePL.py | 1 | 5792 | #!/bin/python
'''
Author: Matt Strader Date: March 20,2014
This program takes previously created photon lists (customized pulsar lists with empty columns for timing information) and feeds them through tempo2 to fill the jd,bjd,pulseNumber,totalPhase, and phase columns. It then creates an index in the PLs so that they may be searched for pulseNumber efficiently. It also fills the waveOutOfRange and waveUpperLimit columns using the corresponding obs file's wavecal solution.
In the course of running, this program creates temporary text files filled with timestamps to feed to tempo2. Tempo2 makes it's own temporary output files for this program to read.
'''
from flatcal.flatCal import FlatCal
from util.ObsFile import ObsFile
from hackedPipeline.photlist import PhotList
from util.FileName import FileName
from util.popup import PopUp
import matplotlib.pyplot as plt
import numpy as np
import datetime
import tables
import ephem
import matplotlib
import matplotlib.cm as cm
import os
import time
import subprocess
import numexpr
def main():
obsSequence0="""
112633
112731
113234
113737
114240
114743
115246
115749
120252
120755
121258
121801
122304
"""
run = 'PAL2012'
obsSequences = [obsSequence0]
obsUtcDates = ['20121208']
wvlCals = ['112506']
flatCals = ['20121211']
fluxCalDates = ['20121206']
fluxCals = ['20121207-072055']
#Row coordinate of center of pulsar for each obsSequence
centersRow = [30]
#Col coordinate of center of pulsar for each obsSequence
centersCol = [30]
path = '/Scratch/dataProcessing/psr0656/'
parFile = 'B0656+14_LAT.par'
#parFile = 'B0531+21.par'
obsFileNames = []
obsFileNameTimestamps = []
wvlFileNames = []
flatFileNames = []
fluxFileNames = []
timeMaskFileNames = []
plFileNames = []
for iSeq in range(len(obsSequences)):
obsSequence = obsSequences[iSeq]
obsSequence = obsSequence.strip().split()
obsFileNameTimestamps.append(obsSequence)
obsUtcDate = obsUtcDates[iSeq]
sunsetDate = str(int(obsUtcDate)-1)
obsSequence = [obsUtcDates[iSeq]+'-'+ts for ts in obsSequence]
obsFileNames.append([FileName(run=run,date=sunsetDate,tstamp=ts).obs() for ts in obsSequence])
plFileNames.append([FileName(run=run,date=sunsetDate,tstamp=ts).timedPhotonList() for ts in obsSequence])
timeMaskFileNames.append([FileName(run=run,date=sunsetDate,tstamp=ts).timeMask() for ts in obsSequence])
wvlCalTstamp = obsUtcDate+'-'+wvlCals[iSeq]
wvlFileNames.append(FileName(run=run,date=sunsetDate,tstamp=wvlCalTstamp).calSoln())
fluxFileNames.append(FileName(run=run,date=fluxCalDates[iSeq],tstamp=fluxCals[iSeq]).fluxSoln())
flatFileNames.append(FileName(run=run,date=flatCals[iSeq],tstamp='').flatSoln())
apertureRadius = 4
obLists = [[ObsFile(fn) for fn in seq ] for seq in obsFileNames]
plLists = [[PhotList(fn) for fn in seq ] for seq in plFileNames]
tstampFormat = '%H:%M:%S'
#print 'fileName','headerUnix','headerUTC','logUnix','packetReceivedUnixTime'
wvlRangeTable = np.zeros([46, 44, 2])
wvlCalData = plLists[0][0].file.root.wavecal.calsoln
for calPixel in wvlCalData:
if calPixel['wave_flag'] == 0:
wvlRangeTable[calPixel['pixelrow']][calPixel['pixelcol']] = calPixel['solnrange']
#obsDate = obList[0].getFromHeader('jd')
unixEpochJD = 2440587.5
epochMJD = 2400000.5
secsPerDay = (24.*3600)
processList = []
for iSeq,(obList,plList) in enumerate(zip(obLists,plLists)):
for iOb,(ob,pl) in enumerate(zip(obList,plList)):
print iOb,'of',len(obList),ob.fileName
ob.loadTimeAdjustmentFile(FileName(run='PAL2012').timeAdjustments())
pl.loadData()
print 'loaded...',
obsDate = ob.getFromHeader('unixtime')/secsPerDay+unixEpochJD
timestamps = np.array(pl.data['arrivalTime'],dtype=np.float64)
jdTimestamps = obsDate+timestamps/secsPerDay - epochMJD
tempJDPath = path+'temp_timestamps_{:.3f}'.format(time.time())
tempBJDPath = path+'temp_bary_timestamps_{:.3f}'.format(time.time())
np.savetxt(tempJDPath,jdTimestamps)
strCommand = 'tempo2 -gr arcons -f {} -a {} -o {}'.format(parFile,tempJDPath,tempBJDPath)
print strCommand
proc = subprocess.Popen(strCommand,shell=True,stdout=subprocess.PIPE)
proc.wait()
processList.append(proc)
tempoOutput = np.loadtxt(tempBJDPath,usecols=[1,2])
bjdTimestamps = tempoOutput[:,0]
phases = tempoOutput[:,1]
pl.photTable.cols.jd[:] = jdTimestamps
pl.photTable.cols.bjd[:] = bjdTimestamps
pl.photTable.cols.totalPhase[:] = phases
print 'tempo done...',
pl.photTable.cols.pulseNumber[:] = np.array(phases,dtype=np.uint32)
pl.photTable.cols.phase[:] = numexpr.evaluate('phases%1.')
print 'phases done...',
try:
pl.photTable.cols.pulseNumber.createCSIndex()
print 'index made...',
except:
print 'WARNING:couldn\'t make index (probably exists already)...'
os.remove(tempJDPath)
os.remove(tempBJDPath)
for photon in pl.photTable.iterrows():
x = photon['xPix']
y = photon['yPix']
wave = photon['wavelength']
photon['waveUpperLimit']=wvlRangeTable[y,x][1]
photon.update()
print 'wave checked.'
pl.file.flush()
del pl
if __name__ == '__main__':
main()
| gpl-2.0 |
scienceopen/hist-feasibility | Plots/FirstAuroralConjugate.py | 2 | 2453 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Plots first recorded auroral conjugate plausible geometry.
Willis 1996 http://adsabs.harvard.edu/abs/1996QJRAS..37..733W
FIXME: add geomagnetic coordinates for 1770
Chinese font: apt install fonts-wqy-zenhei
"""
import numpy as np
import cartopy
#
#import matplotlib as mpl
#font_name = "WenQuanYi Zen Hei"
#mpl.rcParams['font.family']=font_name
#
#import matplotlib.font_manager as mfm
#ch_font = mfm.FontProperties(fname="/usr/share/fonts/truetype/wqy/wqy-zenhei.ttc")
#
#from matplotlib import font_manager
#fontP = font_manager.FontProperties()
#fontP.set_family('WenQuanYi Zen Hei')
#
from matplotlib.pyplot import show,figure
#
from pymap3d import geodetic2aer
#
PROJ = cartopy.crs.PlateCarree() # arbitrary
def main():
# %% from paper
sitella = {'HMS Endeavour':[-10.45, 122.82],
'冀县 Ji-zhou':[ 40.1, 117.4]}
Narclla = np.array([[41,118],
[41.5,123.5],
[40.5,127.5],
[38.5,132]])
Sarclla = np.array([[-26, 119],
[-26.5,123],
[-26, 126],
[-25, 129.5]
])
# %% simple calculations for presumable LOS
archeight = 350e3 # m, assumed
aer = geodetic2aer(Sarclla[1,0], Sarclla[1,1], archeight,
*sitella['HMS Endeavour'], 0.)
print(f'aurora elevation angle (deg) {aer[1]:.1f}')
# %%
ax= figure().gca(projection=PROJ)
ax.add_feature(cartopy.feature.LAND)
ax.add_feature(cartopy.feature.OCEAN)
ax.add_feature(cartopy.feature.COASTLINE)
ax.add_feature(cartopy.feature.BORDERS, linestyle=':')
ax.set_extent((90,160,-40,45))
#%% sites
for o,lla in sitella.items():
ax.plot(*lla[::-1],'o',
color='limegreen',markersize=12,
transform=PROJ)
ax.annotate(o, xy=lla[::-1], xytext = (3, 3), textcoords = 'offset points',
ha='right',
family='WenQuanYi Wen Hei')
#%% aurora
ax.plot(Narclla[:,1], Narclla[:,0],
color='firebrick',linewidth=2,
transform=PROJ)
ax.plot(Sarclla[:,1], Sarclla[:,0],
color='firebrick',linewidth=2,
transform=PROJ)
ax.set_title('First Conjugate Auroral Observation 1770 CE')
#fontproperties=fontP)
if __name__ == '__main__':
main()
show() | gpl-3.0 |
albertoferna/compmech | compmech/conecyl/conecyl.py | 1 | 87599 | from __future__ import division
import gc
import os
import sys
import traceback
from collections import Iterable
import time
import cPickle
import __main__
import numpy as np
from scipy.sparse import coo_matrix, csr_matrix, csc_matrix
from scipy.sparse.linalg import eigsh
from scipy.optimize import leastsq
from numpy import linspace, pi, cos, sin, tan, deg2rad
from conecylDB import ccs, laminaprops
import compmech.composite.laminate as laminate
from compmech.analysis import Analysis
from compmech.logger import msg, warn, error
from compmech.sparse import remove_null_cols, make_symmetric
from compmech.constants import DOUBLE
import modelDB
from modelDB import get_model
def load(name):
if '.ConeCyl' in name:
cc = cPickle.load(open(name, 'rb'))
else:
cc = cPickle.load(open(name + '.ConeCyl', 'rb'))
cc.analysis.calc_fext = cc.calc_fext
cc.analysis.calc_k0 = cc.calc_k0
cc.analysis.calc_fint = cc.calc_fint
cc.analysis.calc_kT = cc.calc_kT
return cc
class ConeCyl(object):
"""
"""
__slots__ = ['_load_rebuilt', 'name', 'alphadeg', 'alpharad', 'r1', 'r2',
'L', 'H', 'h', 'K', 'is_cylinder', 'inf', 'zero',
'bc', 'kuBot', 'kvBot', 'kwBot', 'kphixBot', 'kphitBot', 'kuTop',
'kvTop', 'kwTop', 'kphixTop', 'kphitTop', 'model', 'm1', 'm2',
'size', 'n2', 's', 'nx', 'nt', 'ni_num_cores', 'ni_method',
'forces', 'forces_inc', 'P',
'P_inc', 'pdC', 'Fc', 'Nxxtop', 'uTM', 'c0', 'm0', 'n0',
'funcnum', 'pdT', 'T', 'T_inc', 'thetaTdeg', 'thetaTrad', 'pdLA',
'tLAdeg', 'tLArad', 'betadeg', 'betarad', 'xiLA', 'MLA', 'LA',
'num0', 'excluded_dofs', 'excluded_dofs_ck', 'sina', 'cosa',
'laminaprop', 'plyt', 'laminaprops', 'stack', 'plyts', 'F',
'F_reuse', 'force_orthotropic_laminate', 'E11', 'nu',
'num_eigvalues', 'num_eigvalues_print',
'analysis', 'with_k0L', 'with_kLL',
'cs', 'increments', 'outputs',
'eigvals', 'eigvecs',
'k0', 'k0uk', 'k0uu', 'kTuk', 'kTuu', 'kG0', 'kG0_Fc', 'kG0_P',
'kG0_T', 'kG', 'kGuu', 'kL', 'kLuu', 'lam', 'u', 'v', 'w', 'phix',
'phit', 'Xs', 'Ts',
'out_num_cores',
]
def __init__(self):
self.name = ''
# geometry
self.alphadeg = 0.
self.alpharad = 0.
self.r1 = None
self.r2 = None
self.L = None
self.H = None
self.h = None # total thickness, required for isotropic shells
self.is_cylinder = None
# boundary conditions
self.inf = 1.e8 # used to define high stiffnesses
self.zero = 0. # used to define zero stiffnesses
self.bc = None
self.kuBot = self.inf
self.kvBot = self.inf
self.kwBot = self.inf
self.kphixBot = 0.
self.kphitBot = 0.
self.kuTop = self.inf
self.kvTop = self.inf
self.kwTop = self.inf
self.kphixTop = 0.
self.kphitTop = 0.
# default equations
self.model = 'clpt_donnell_bc1'
# approximation series
self.m1 = 120
self.m2 = 25
self.n2 = 45
# analytical integration for cones
self.s = 79
# numerical integration
self.nx = 120
self.nt = 180
self.ni_num_cores = 4
self.ni_method = 'trapz2d'
# punctual loads
self.forces = []
self.forces_inc = []
# internal pressure measured in force/area
self.P = 0.
self.P_inc = 0.
# axial compression
self.pdC = False
self.Fc = None
self.Nxxtop = None
self.uTM = 0.
self._load_rebuilt = False
# initial imperfection
self.c0 = None
self.m0 = 0
self.n0 = 0
self.funcnum = 2
# torsion
self.pdT = True
self.T = 0.
self.T_inc = 0.
self.thetaTdeg = 0.
self.thetaTrad = 0.
# load asymmetry (la)
self.pdLA = True
self.tLAdeg = 0.
self.tLArad = 0.
self.betadeg = 0.
self.betarad = 0.
self.xiLA = None
self.MLA = None
self.LA = None
self.num0 = 3
self.excluded_dofs = []
self.excluded_dofs_ck = []
self.sina = None
self.cosa = None
# material
self.laminaprop = None
self.plyt = None
self.laminaprops = []
self.stack = []
self.plyts = []
# constitutive law
self.F_reuse = None
self.F = None
self.force_orthotropic_laminate = False
self.E11 = None
self.nu = None
self.K = 5/6.
# eigenvalue analysis
self.num_eigvalues = 50
self.num_eigvalues_print = 5
# output queries
self.out_num_cores = 4
self.cs = []
self.increments = []
# analysis
self.analysis = Analysis(self.calc_fext, self.calc_k0, self.calc_fint,
self.calc_kT)
self.with_k0L = True
self.with_kLL = True
# outputs
self.outputs = {}
self._clear_matrices()
def _clear_matrices(self):
self.k0 = None
self.k0uk = None
self.k0uu = None
self.kTuk = None
self.kTuu = None
self.kG0 = None
self.kG0_Fc = None
self.kG0_P = None
self.kG0_T = None
self.kG = None
self.kGuu = None
self.kL = None
self.kLuu = None
self.lam = None
self.u = None
self.v = None
self.w = None
self.phix = None
self.phit = None
self.Xs = None
self.Ts = None
self.Nxxtop = None
gc.collect()
def _rebuild(self):
if not self.name:
try:
self.name = os.path.basename(__main__.__file__).split('.py')[0]
except AttributeError:
warn('ConeCyl name unchanged')
if self.k0 is not None:
if self.k0.shape[0] != self.get_size():
self._clear_matrices()
self._load_rebuilt = False
self._rebuild()
self.model = self.model.lower()
model_dict = get_model(self.model)
# boundary conditions
inf = self.inf
zero = self.zero
if inf > 1.e8:
warn('"inf" parameter reduced to 1.e8 due to the verified ' +
'numerical instability for higher values', level=2)
inf = 1.e8
if self.bc is not None:
bc = self.bc.lower()
if '_' in bc:
# different bc for Bot and Top
bc_Bot, bc_Top = self.bc.split('_')
elif '-' in bc:
# different bc for Bot and Top
bc_Bot, bc_Top = self.bc.split('-')
else:
bc_Bot = bc_Top = bc
bcs = dict(bc_Bot=bc_Bot, bc_Top=bc_Top)
for k in bcs.keys():
sufix = k.split('_')[1] # Bot or Top
if bcs[k] == 'ss1':
setattr(self, 'ku' + sufix, inf)
setattr(self, 'kv' + sufix, inf)
setattr(self, 'kw' + sufix, inf)
setattr(self, 'kphix' + sufix, zero)
setattr(self, 'kphit' + sufix, zero)
elif bcs[k] == 'ss2':
setattr(self, 'ku' + sufix, zero)
setattr(self, 'kv' + sufix, inf)
setattr(self, 'kw' + sufix, inf)
setattr(self, 'kphix' + sufix, zero)
setattr(self, 'kphit' + sufix, zero)
elif bcs[k] == 'ss3':
setattr(self, 'ku' + sufix, inf)
setattr(self, 'kv' + sufix, zero)
setattr(self, 'kw' + sufix, inf)
setattr(self, 'kphix' + sufix, zero)
setattr(self, 'kphit' + sufix, zero)
elif bcs[k] == 'ss4':
setattr(self, 'ku' + sufix, zero)
setattr(self, 'kv' + sufix, zero)
setattr(self, 'kw' + sufix, inf)
setattr(self, 'kphix' + sufix, zero)
setattr(self, 'kphit' + sufix, zero)
elif bcs[k] == 'cc1':
setattr(self, 'ku' + sufix, inf)
setattr(self, 'kv' + sufix, inf)
setattr(self, 'kw' + sufix, inf)
setattr(self, 'kphix' + sufix, inf)
setattr(self, 'kphit' + sufix, zero)
elif bcs[k] == 'cc2':
setattr(self, 'ku' + sufix, zero)
setattr(self, 'kv' + sufix, inf)
setattr(self, 'kw' + sufix, inf)
setattr(self, 'kphix' + sufix, inf)
setattr(self, 'kphit' + sufix, zero)
elif bcs[k] == 'cc3':
setattr(self, 'ku' + sufix, inf)
setattr(self, 'kv' + sufix, zero)
setattr(self, 'kw' + sufix, inf)
setattr(self, 'kphix' + sufix, inf)
setattr(self, 'kphit' + sufix, zero)
elif bcs[k] == 'cc4':
setattr(self, 'ku' + sufix, zero)
setattr(self, 'kv' + sufix, zero)
setattr(self, 'kw' + sufix, inf)
setattr(self, 'kphix' + sufix, inf)
setattr(self, 'kphit' + sufix, zero)
elif bcs[k] == 'free':
setattr(self, 'ku' + sufix, zero)
setattr(self, 'kv' + sufix, zero)
setattr(self, 'kw' + sufix, zero)
setattr(self, 'kphix' + sufix, zero)
setattr(self, 'kphit' + sufix, zero)
else:
text = '"{}" is not a valid boundary condition!'.format(bc)
raise ValueError(text)
self.alpharad = deg2rad(self.alphadeg)
self.sina = sin(self.alpharad)
self.cosa = cos(self.alpharad)
if not self.H and not self.L:
self.H = (self.r1-self.r2)/tan(self.alpharad)
if self.H and not self.L:
self.L = self.H/self.cosa
if self.L and not self.H:
self.H = self.L*self.cosa
if not self.r2:
if not self.r1:
raise ValueError('Radius "r1" or "r2" must be specified')
else:
self.r2 = self.r1 - self.L*self.sina
else:
self.r1 = self.r2 + self.L*self.sina
self.thetaTrad = deg2rad(self.thetaTdeg)
self.tLArad = deg2rad(self.tLAdeg)
self.betarad = deg2rad(self.betadeg)
self.LA = self.r2*tan(self.betarad)
if not self.laminaprops:
self.laminaprops = [self.laminaprop for i in self.stack]
if not self.plyts:
self.plyts = [self.plyt for i in self.stack]
if self.alpharad == 0:
self.is_cylinder = True
else:
self.is_cylinder = False
self.excluded_dofs = []
self.excluded_dofs_ck = []
if self.pdC:
self.excluded_dofs.append(0)
self.excluded_dofs_ck.append(self.uTM)
if self.pdT:
self.excluded_dofs.append(1)
self.excluded_dofs_ck.append(self.thetaTrad)
if self.pdLA:
self.excluded_dofs.append(2)
self.excluded_dofs_ck.append(self.LA)
else:
raise NotImplementedError('pdLA == False is giving wrong results!')
if self.nx < 4*self.m2:
warn('Number of integration points along x too small')
if self.nt < 4*self.n2:
warn('Number of integration points along theta too small')
if self.laminaprop is None:
h = self.h
E11 = self.E11
nu = self.nu
if h is None or E11 is None or nu is None:
raise ValueError(
'laminaprop or (E11, nu and h) must be defined')
G12 = E11/(2*(1 + nu))
A11 = E11*h/(1 - nu**2)
A12 = nu*E11*h/(1 - nu**2)
A16 = 0
A22 = E11*h/(1 - nu**2)
A26 = 0
A66 = G12*h
D11 = E11*h**3/(12*(1 - nu**2))
D12 = nu*E11*h**3/(12*(1 - nu**2))
D16 = 0
D22 = E11*h**3/(12*(1 - nu**2))
D26 = 0
D66 = G12*h**3/12
# TODO, what if FSDT is used?
if 'fsdt' in self.model:
raise NotImplementedError(
'For FSDT laminaprop must be defined!')
self.F = np.array([[A11, A12, A16, 0, 0, 0],
[A12, A22, A26, 0, 0, 0],
[A16, A26, A66, 0, 0, 0],
[0, 0, 0, D11, D12, D16],
[0, 0, 0, D12, D22, D26],
[0, 0, 0, D16, D26, D66]])
if self.c0 is not None:
self.analysis.kT_initial_state = True
if self.Nxxtop is not None and self._load_rebuilt:
return
# axial load
if self.Nxxtop is not None:
if type(self.Nxxtop) in (int, float):
Nxxtop0 = self.Nxxtop
self.Nxxtop = np.zeros(2*self.n2+1, dtype=DOUBLE)
self.Nxxtop[0] = Nxxtop0
check = False
if isinstance(self.Nxxtop, np.ndarray):
if self.Nxxtop.ndim == 1:
assert self.Nxxtop.shape[0] == (2*self.n2+1)
check = True
if not check:
raise ValueError('Invalid Nxxtop input')
else:
self.Nxxtop = np.zeros(2*self.n2+1, dtype=DOUBLE)
if self.Fc is not None:
self.Nxxtop[0] = self.Fc/(2*pi*self.r2*self.cosa)
msg('Nxxtop[0] calculated from Fc', level=2)
if self.MLA is None:
if self.xiLA is not None:
self.MLA = self.xiLA*self.Fc
msg('MLA calculated from xiLA', level=2)
if self.MLA is not None:
self.Nxxtop[2] = self.MLA/(pi*self.r2**2*self.cosa)
msg('Nxxtop[2] calculated from MLA', level=2)
self._load_rebuilt = True
def get_size(self):
r"""Calculates the size of the stiffness matrices
The size of the stiffness matrices can be interpreted as the number of
rows or columns, recalling that this will be the size of the Ritz
constants' vector `\{c\}`, the internal force vector `\{F_{int}\}` and
the external force vector `\{F_{ext}\}`.
Returns
-------
size : int
The size of the stiffness matrices.
"""
model_dict = get_model(self.model)
num0 = model_dict['num0']
num1 = model_dict['num1']
num2 = model_dict['num2']
self.size = num0 + num1*self.m1 + num2*self.m2*self.n2
return self.size
def from_DB(self, name):
"""Load cone / cylinder data from the local database
Parameters
----------
name : str
A key contained in the ``ccs`` dictionary of module
:mod:`compmech.conecyl.conecylDB`.
"""
try:
attrs = ['r1', 'r2', 'H', 'L', 'alphadeg', 'plyt', 'stack']
cc = ccs[name]
self.laminaprop = laminaprops[cc['laminapropKey']]
for attr in attrs:
setattr(self, attr, cc.get(attr, getattr(self, attr)))
except:
raise ValueError('Invalid data-base entry!')
def exclude_dofs_matrix(self, k, return_kkk=False,
return_kku=False,
return_kuk=False):
"""Makes the partition of the dofs for prescribed displacements
Makes the following partition of a given matrix::
k = | kkk kku |
| kuk kuu |
Parameters
----------
k : scipy.sparse.coo_matrix
Matrix to be partitioned.
return_kkk : bool, optional
If the region `kkk` must be returned.
return_kku : bool, optional
If the region `kku` must be returned.
return_kuk : bool, optional
If the region `kuk` must be returned.
Returns
-------
out : dict
A ``dict`` object containing the keys for the
corresponding sub-matrices ``kkk``, ``kku``, ``kuk``, ``kuu``.
The sub-matrix ``out['kuu']`` is a ``scipy.sparse.csr_matrix``,
while the others are 2-D ``np.ndarray`` objects.
"""
if not isinstance(k, coo_matrix):
k = coo_matrix(k)
if return_kkk:
kkk = coo_matrix(np.zeros((self.num0, self.num0)))
ind = np.where(((k.row < self.num0) & (k.col < self.num0)))[0]
kkk.row = np.take(k.row, ind)
kkk.col = np.take(k.col, ind)
kkk.data = np.take(k.data, ind)
kkk = kkk.toarray()
kkk = np.delete(kkk, self.excluded_dofs, axis=0)
kkk = np.delete(kkk, self.excluded_dofs, axis=1)
if return_kku:
kku = coo_matrix(np.zeros((self.num0, k.shape[0])))
ind = np.where(k.row < self.num0)[0]
kku.row = np.take(k.row, ind)
kku.col = np.take(k.col, ind)
kku.data = np.take(k.data, ind)
kku = kku.toarray()
kku = np.delete(kku, self.excluded_dofs, axis=1)
if return_kuk:
kuk = coo_matrix(np.zeros((k.shape[0], self.num0)))
ind = np.where(k.col < self.num0)[0]
kuk.row = np.take(k.row, ind)
kuk.col = np.take(k.col, ind)
kuk.data = np.take(k.data, ind)
kuk = kuk.toarray()
kuk = np.delete(kuk, self.excluded_dofs, axis=0)
rows = np.sort(self.excluded_dofs)[::-1]
cols = np.sort(self.excluded_dofs)[::-1]
kuu = k.copy()
for r in rows:
ind = np.where(kuu.row != r)[0]
kuu.row[kuu.row > r] -= 1
kuu.row = np.take(kuu.row, ind)
kuu.col = np.take(kuu.col, ind)
kuu.data = np.take(kuu.data, ind)
kuu._shape = (kuu._shape[0]-1, kuu._shape[1])
for c in cols:
ind = np.where(kuu.col != c)[0]
kuu.col[kuu.col > c] -= 1
kuu.row = np.take(kuu.row, ind)
kuu.col = np.take(kuu.col, ind)
kuu.data = np.take(kuu.data, ind)
kuu._shape = (kuu._shape[0], kuu._shape[1]-1)
kuu = csr_matrix(kuu)
out = {}
out['kuu'] = kuu
if return_kkk:
out['kkk'] = kkk
if return_kku:
out['kku'] = kku
if return_kuk:
out['kuk'] = kuk
return out
def calc_full_c(self, cu, inc=1.):
"""Returns the full set of Ritz constants
When prescribed displacements take place the matrices and the Ritz
constants are partitioned like::
k = | kkk kku |
| kuk kuu |
and the corresponding Ritz constants::
c = | ck |
| cu |
This function adds the set of known Ritz constants (``ck``)
to the set of unknown (``cu``) based on the prescribed displacements.
Parameters
----------
cu : np.ndarray
The set of unknown Ritz constants
inc : float, optional
Load increment, necessary to calculate the full set of Ritz
constants.
Returns
-------
c : np.ndarray
The full set of Ritz constants.
"""
c = cu.copy()
size = self.get_size()
if c.shape[0] == size:
for dof in self.excluded_dofs:
c[dof] *= inc
return c
ordered = sorted(zip(self.excluded_dofs,
self.excluded_dofs_ck), key=lambda x:x[0])
for dof, cai in ordered:
c = np.insert(c, dof, inc*cai)
return c
def _default_field(self, xs, ts, gridx, gridt):
if xs is None or ts is None:
xs = linspace(0, self.L, gridx)
ts = linspace(-pi, pi, gridt)
xs, ts = np.meshgrid(xs, ts, copy=False)
xs = np.atleast_1d(np.array(xs, dtype=DOUBLE))
ts = np.atleast_1d(np.array(ts, dtype=DOUBLE))
xshape = xs.shape
tshape = ts.shape
if xshape != tshape:
raise ValueError('Arrays xs and ts must have the same shape')
self.Xs = xs
self.Ts = ts
xs = xs.ravel()
ts = ts.ravel()
return xs, ts, xshape, tshape
def _calc_linear_matrices(self, combined_load_case=None):
self._rebuild()
msg('Calculating linear matrices... ', level=2)
fk0, fk0_cyl, fkG0, fkG0_cyl, k0edges = modelDB.get_linear_matrices(
self, combined_load_case)
model = self.model
alpharad = self.alpharad
cosa = self.cosa
r1 = self.r1
r2 = self.r2
L = self.L
m1 = self.m1
m2 = self.m2
n2 = self.n2
s = self.s
laminaprops = self.laminaprops
plyts = self.plyts
stack = self.stack
P = self.P
T = self.T
E11 = self.E11
nu = self.nu
h = self.h
Fc = self.Nxxtop[0]*(2*pi*r2*cosa)
lam = self.lam
if stack != [] and self.F_reuse is None:
lam = laminate.read_stack(stack, plyts=plyts,
laminaprops=laminaprops)
if 'clpt' in model:
if self.F_reuse is not None:
msg('')
msg('Reusing F matrix...', level=2)
F = self.F_reuse
elif lam is not None:
F = lam.ABD
else:
F = self.F
elif 'fsdt' in model:
if self.F_reuse is not None:
msg('')
msg('Reusing F matrix...', level=2)
F = self.F_reuse
elif lam is not None:
F = lam.ABDE
F[6:, 6:] *= self.K
else:
F = self.F
if self.force_orthotropic_laminate:
msg('')
msg('Forcing orthotropic laminate...', level=2)
F[0, 2] = 0. # A16
F[1, 2] = 0. # A26
F[2, 0] = 0. # A61
F[2, 1] = 0. # A62
F[0, 5] = 0. # B16
F[5, 0] = 0. # B61
F[1, 5] = 0. # B26
F[5, 1] = 0. # B62
F[3, 2] = 0. # B16
F[2, 3] = 0. # B61
F[4, 2] = 0. # B26
F[2, 4] = 0. # B62
F[3, 5] = 0. # D16
F[4, 5] = 0. # D26
F[5, 3] = 0. # D61
F[5, 4] = 0. # D62
if F.shape[0] == 8:
F[6, 7] = 0. # A45
F[7, 6] = 0. # A54
self.lam = lam
self.F = F
if self.is_cylinder:
if 'iso_' in model:
k0 = fk0_cyl(r2, L, E11, nu, h, m1, m2, n2)
else:
k0 = fk0_cyl(r2, L, F, m1, m2, n2)
if not combined_load_case:
kG0 = fkG0_cyl(Fc, P, T, r2, L, m1, m2, n2)
else:
kG0_Fc = fkG0_cyl(Fc, 0, 0, r2, L, m1, m2, n2)
kG0_P = fkG0_cyl(0, P, 0, r2, L, m1, m2, n2)
kG0_T = fkG0_cyl(0, 0, T, r2, L, m1, m2, n2)
else:
if 'iso_' in model:
k0 = fk0(alpharad, r2, L, E11, nu, h, m1, m2, n2, s)
else:
k0 = fk0(alpharad, r2, L, F, m1, m2, n2, s)
if not combined_load_case:
kG0 = fkG0(Fc, P, T, r2, alpharad, L, m1, m2, n2, s)
else:
kG0_Fc = fkG0(Fc, 0, 0, r2, alpharad, L, m1, m2, n2, s)
kG0_P = fkG0(0, P, 0, r2, alpharad, L, m1, m2, n2, s)
kG0_T = fkG0(0, 0, T, r2, alpharad, L, m1, m2, n2, s)
if k0edges is not None:
k0 = csr_matrix(k0) + csr_matrix(k0edges)
assert np.any((np.isnan(k0.data) | np.isinf(k0.data))) == False
k0 = make_symmetric(k0)
if not combined_load_case:
assert np.any((np.isnan(kG0.data) | np.isinf(kG0.data))) == False
self.kG0 = make_symmetric(kG0)
else:
assert np.any((np.isnan(kG0_Fc.data)
| np.isinf(kG0_Fc.data))) == False
assert np.any((np.isnan(kG0_P.data)
| np.isinf(kG0_P.data))) == False
assert np.any((np.isnan(kG0_T.data)
| np.isinf(kG0_T.data))) == False
self.kG0_Fc = make_symmetric(kG0_Fc)
self.kG0_P = make_symmetric(kG0_P)
self.kG0_T = make_symmetric(kG0_T)
k = self.exclude_dofs_matrix(k0, return_kuk=True)
k0uk = k['kuk']
k0uu = k['kuu']
self.k0 = k0
self.k0uk = k0uk
self.k0uu = k0uu
#NOTE forcing Python garbage collector to clean the memory
# it DOES make a difference! There is a memory leak not
# identified, probably in the csr_matrix process
gc.collect()
msg('finished!', level=2)
def calc_k0(self):
if self.k0uu is None:
self._calc_linear_matrices()
return self.k0uu
def calc_kT(self, c, inc=1.):
r"""Calculates the tangent stiffness matrix
The following attributes will affect the numerical integration:
================= ================================================
Attribute Description
================= ================================================
``ni_num_cores`` ``int``, number of cores used for the numerical
integration
``ni_method`` ``str``, integration method:
- ``'trapz2d'`` for 2-D Trapezoidal's rule
- ``'simps2d'`` for 2-D Simpsons' rule
``nx`` ``int``, number of integration points along the
`x` coordinate
``nt`` ``int``, number of integration points along the
`\theta` coordinate
================= ================================================
Parameters
----------
c : np.ndarray
The Ritz constants vector of the current state.
inc : float, optional
Load increment, necessary to calculate the full set of Ritz
constants using :meth:`calc_full_c`.
Returns
-------
kTuu : sparse matrix
The tangent stiffness matrix corresponding to the unknown degrees
of freedom.
"""
self._calc_NL_matrices(c, inc=inc)
return self.kTuu
def lb(self, c=None, tol=0, combined_load_case=None):
"""Performs a linear buckling analysis
The following parameters of the ``ConeCyl`` object will affect the
linear buckling analysis:
======================= =====================================
Attribute Description
======================= =====================================
``num_eigenvalues`` Number of eigenvalues to be extracted
``num_eigvalues_print`` Number of eigenvalues to print after
the analysis is completed
======================= =====================================
Parameters
----------
combined_load_case : int, optional
It tells whether the linear buckling analysis must be computed
considering combined load cases, each value will tell
the algorithm to rearrange the linear matrices in a different
way. The valid values are ``1``, or ``2``, where:
- ``1`` : find the critical axial load for a fixed torsion load
- ``2`` : find the critical axial load for a fixed pressure load
- ``3`` : find the critical torsion load for a fixed axial load
Notes
-----
The extracted eigenvalues are stored in the ``eigvals`` parameter
of the ``ConeCyl`` object and the `i^{th}` eigenvector in the
``eigvecs[i-1, :]`` parameter.
"""
model_dict = get_model(self.model)
if not model_dict['linear buckling']:
msg('________________________________________________')
msg('')
warn('Model {} cannot be used in linear buckling analysis!'.
format(self.model))
msg('________________________________________________')
msg('Running linear buckling analysis...')
if self.Fc is None and self.Nxxtop is None:
warn('using Fc = 1.', level=1)
self.Fc = 1.
if self.pdC is None:
self.pdC = False
self._calc_linear_matrices(combined_load_case=combined_load_case)
#TODO maybe a better estimator to sigma would be to run
# a preliminary eigsh using a small m2 and n2
#NOTE runs faster for self.k0 than -self.k0, so that the negative
# sign is applied later
msg('Eigenvalue solver... ', level=2)
model_dict = get_model(self.model)
i0 = model_dict['i0']
num0 = model_dict['num0']
num1 = model_dict['num1']
num2 = model_dict['num2']
pos = num0
if not combined_load_case:
M = csr_matrix(self.k0)
A = csr_matrix(self.kG0)
elif combined_load_case == 1:
M = csr_matrix(self.k0) + csr_matrix(self.kG0_T)
A = csr_matrix(self.kG0_Fc)
elif combined_load_case == 2:
M = csr_matrix(self.k0) + csr_matrix(self.kG0_P)
A = csr_matrix(self.kG0_Fc)
elif combined_load_case == 3:
M = csr_matrix(self.k0) + csr_matrix(self.kG0_Fc)
A = csr_matrix(self.kG0_T)
A = A[pos:, pos:]
M = M[pos:, pos:]
try:
eigvals, eigvecs = eigsh(A=A, k=self.num_eigvalues, which='SM',
M=M, tol=tol, sigma=1.,
mode='cayley')
except Exception, e:
warn(str(e), level=3)
size22 = M.shape[0]
M, A, used_cols = remove_null_cols(M, A)
msg('solver...', level=3)
try:
eigvals, peigvecs = eigsh(A=A, k=self.num_eigvalues,
which='SM', M=M, tol=tol, sigma=1.,
mode='cayley')
except:
eigvals, peigvecs = eigsh(A=A, k=self.num_eigvalues,
which='SM', M=M, tol=tol, sigma=1.,
mode='buckling')
msg('finished!', level=3)
eigvecs = np.zeros((size22, self.num_eigvalues), dtype=DOUBLE)
eigvecs[used_cols, :] = peigvecs
eigvals = -1./eigvals
self.eigvals = eigvals
self.eigvecs = np.vstack((np.zeros((pos, self.num_eigvalues)),
eigvecs))
msg('finished!', level=2)
msg('first {} eigenvalues:'.format(self.num_eigvalues_print), level=1)
for eig in eigvals[:self.num_eigvalues_print]:
msg('{}'.format(eig), level=2)
self.analysis.last_analysis = 'lb'
def eigen(self, c=None, tol=0, kL=None, kG=None):
"""Performs a non-linear eigenvalue analysis at a given state
The following attributes of the ``ConeCyl`` object will affect the
non-linear eigenvalue analysis:
======================= =====================================
Attribute Description
======================= =====================================
``num_eigenvalues`` Number of eigenvalues to be extracted
``num_eigvalues_print`` Number of eigenvalues to print after
the analysis is completed
======================= =====================================
Additionally, the non-linear analysis parameters described in
:meth:`static` will affect the integration of the non-linear matrices
``kL`` and ``kG`` if they are not given as input parameters.
Parameters
----------
combined_load_case : int, optional
It tells whether the linear buckling analysis must be computed
considering combined load cases, each value will tell
the algorithm to rearrange the linear matrices in a different
way. The valid values are ``1``, or ``2``, where:
- ``1`` : find the critical axial load for a fixed torsion load
- ``2`` : find the critical axial load for a fixed pressure load
- ``3`` : find the critical torsion load for a fixed axial load
Notes
-----
The extracted eigenvalues are stored in the ``eigvals`` parameter
of the ``ConeCyl`` object and the `i^{th}` eigenvector in the
``eigvecs[i-1, :]`` parameter.
"""
model_dict = get_model(self.model)
if not model_dict['linear buckling']:
msg('________________________________________________')
msg('')
warn('Model {} cannot be used in linear buckling analysis!'.
format(self.model))
msg('________________________________________________')
msg('Running linear buckling analysis...')
if self.Fc is None:
self.Fc = 1.
if self.pdC is None:
self.pdC = False
self._calc_linear_matrices(combined_load_case=combined_load_case)
#TODO maybe a better estimator to sigma would be to run
# a preliminary eigsh using a small m2 and n2
#NOTE runs faster for self.k0 than -self.k0, so that the negative
# sign is applied later
msg('Eigenvalue solver... ', level=2)
model_dict = get_model(self.model)
i0 = model_dict['i0']
num0 = model_dict['num0']
num1 = model_dict['num1']
num2 = model_dict['num2']
pos = num0
if not combined_load_case:
M = csr_matrix(self.k0)
A = csr_matrix(self.kG0)
elif combined_load_case == 1:
M = csr_matrix(self.k0) + csr_matrix(self.kG0_T)
A = csr_matrix(self.kG0_Fc)
elif combined_load_case == 2:
M = csr_matrix(self.k0) + csr_matrix(self.kG0_P)
A = csr_matrix(self.kG0_Fc)
elif combined_load_case == 3:
M = csr_matrix(self.k0) + csr_matrix(self.kG0_Fc)
A = csr_matrix(self.kG0_T)
A = A[pos:, pos:]
M = M[pos:, pos:]
try:
eigvals, eigvecs = eigsh(A=A, k=self.num_eigvalues, which='SM',
M=M, tol=tol, sigma=1.,
mode='cayley')
except Exception, e:
warn(str(e), level=3)
size22 = M.shape[0]
M, A, used_cols = remove_null_cols(M, A)
msg('solver...', level=3)
try:
eigvals, peigvecs = eigsh(A=A, k=self.num_eigvalues,
which='SM', M=M, tol=tol, sigma=1.,
mode='cayley')
except:
eigvals, peigvecs = eigsh(A=A, k=self.num_eigvalues,
which='SM', M=M, tol=tol, sigma=1.,
mode='buckling')
msg('finished!', level=3)
eigvecs = np.zeros((size22, self.num_eigvalues), dtype=DOUBLE)
eigvecs[used_cols, :] = peigvecs
eigvals = (-1./eigvals)
self.eigvals = eigvals
self.eigvecs = np.vstack((np.zeros((pos, self.num_eigvalues)),
eigvecs))
msg('finished!', level=2)
msg('first {} eigenvalues:'.format(self.num_eigvalues_print), level=1)
for eig in eigvals[:self.num_eigvalues_print]:
msg('{}'.format(eig), level=2)
self.analysis.last_analysis = 'lb'
def _calc_NL_matrices(self, c, inc=1., with_kLL=None, with_k0L=None):
r"""Calculates the non-linear stiffness matrices
Parameters
----------
c : np.ndarray
Ritz constants representing the current state to calculate the
stiffness matrices.
inc : float, optional
Load increment, necessary to calculate the full set of Ritz
constants using :meth:`calc_full_c`.
with_kLL : bool, optional
When ``with_kLL=False`` assumes kLL << than k0L and kG.
with_k0L : bool, optional
When ``with_k0L=False`` assumes k0L << than kLL and kG.
Notes
-----
Nothing is returned, the calculated matrices
"""
c = self.calc_full_c(c, inc=inc)
if self.k0 is None:
self._calc_linear_matrices()
if with_k0L is None:
with_k0L = self.with_k0L
if with_kLL is None:
with_kLL = self.with_kLL
msg('Calculating non-linear matrices...', level=2)
alpharad = self.alpharad
r2 = self.r2
L = self.L
tLArad = self.tLArad
F = self.F
m1 = self.m1
m2 = self.m2
n2 = self.n2
c0 = self.c0
m0 = self.m0
n0 = self.n0
funcnum = self.funcnum
model = self.model
model_dict = get_model(model)
nlmodule = model_dict['non-linear']
ni_method = self.ni_method
num_cores = self.ni_num_cores
nx = self.nx
nt = self.nt
if nlmodule:
calc_k0L = nlmodule.calc_k0L
calc_kLL = nlmodule.calc_kLL
if 'iso_' in model:
calc_kG = modelDB.db[model[4:]]['non-linear'].calc_kG
else:
calc_kG = nlmodule.calc_kG
kG = calc_kG(c, alpharad, r2, L, tLArad, F, m1, m2, n2, nx=nx,
nt=nt, num_cores=num_cores, method=ni_method, c0=c0,
m0=m0, n0=n0)
kG = make_symmetric(kG)
if 'iso_' in model:
E11 = self.E11
nu = self.nu
h = self.h
if with_k0L:
k0L = calc_k0L(c, alpharad, r2, L, tLArad, E11, nu, h, m1,
m2, n2, nx=nx, nt=nt, num_cores=num_cores,
method=ni_method, c0=c0, m0=m0, n0=n0)
else:
k0L = kG*0
if with_kLL:
kLL = calc_kLL(c, alpharad, r2, L, tLArad, E11, nu, h, m1,
m2, n2, nx=nx, nt=nt, num_cores=num_cores,
method=ni_method, c0=c0, m0=m0, n0=n0)
kLL = make_symmetric(kLL)
else:
kLL = kG*0
else:
if with_k0L:
k0L = calc_k0L(c, alpharad, r2, L, tLArad, F, m1, m2, n2,
nx=nx, nt=nt, num_cores=num_cores,
method=ni_method, c0=c0, m0=m0, n0=n0)
else:
k0L = kG*0
if with_kLL:
kLL = calc_kLL(c, alpharad, r2, L, tLArad, F, m1, m2, n2,
nx=nx, nt=nt, num_cores=num_cores,
method=ni_method, c0=c0, m0=m0, n0=n0)
kLL = make_symmetric(kLL)
else:
kLL = kG*0
else:
raise ValueError(
'Non-Linear analysis not implemented for model {0}'.format(model))
kL0 = k0L.T
#TODO maybe slow...
kT = coo_matrix(self.k0 + k0L + kL0 + kLL + kG)
# kS was deprecated, now fint is integrated numerically
#kS = coo_matrix(self.k0 + k0L/2 + kL0 + kLL/2)
k = self.exclude_dofs_matrix(kT, return_kuk=True)
self.kTuk = k['kuk']
self.kTuu = k['kuu']
#NOTE intended for non-linear eigenvalue analyses
self.kL = csr_matrix(self.k0 + k0L + kL0 + kLL)
self.kG = csr_matrix(kG)
msg('finished!', level=2)
def uvw(self, c, xs=None, ts=None, gridx=300, gridt=300, inc=1.):
r"""Calculates the displacement field
For a given full set of Ritz constants ``c``, the displacement
field is calculated and stored in the parameters
``u``, ``v``, ``w``, ``phix``, ``phit`` of the ``ConeCyl`` object.
Parameters
----------
c : float
The full set of Ritz constants
xs : np.ndarray
The `x` positions where to calculate the displacement field.
Default is ``None`` and the method ``_default_field`` is used.
ts : np.ndarray
The ``theta`` positions where to calculate the displacement field.
Default is ``None`` and the method ``_default_field`` is used.
gridx : int
Number of points along the `x` axis where to calculate the
displacement field.
gridt : int
Number of points along the `theta` where to calculate the
displacement field.
inc : float, optional
Load increment, necessary to calculate the full set of Ritz
constants using :meth:`calc_full_c`.
Returns
-------
out : tuple
A tuple of ``np.ndarrays`` containing
``(u, v, w, phix, phit)``.
Notes
-----
The returned values ``u```, ``v``, ``w``, ``phix``, ``phit`` are
stored as parameters with the same name in the ``ConeCyl`` object.
"""
xs, ts, xshape, tshape = self._default_field(xs, ts, gridx, gridt)
alpharad = self.alpharad
tLArad = self.tLArad
m1 = self.m1
m2 = self.m2
n2 = self.n2
r2 = self.r2
L = self.L
c = self.calc_full_c(c, inc=inc)
model_dict = get_model(self.model)
fuvw = model_dict['commons'].fuvw
us, vs, ws, phixs, phits = fuvw(c, m1, m2, n2, alpharad, r2, L,
tLArad, xs, ts, self.out_num_cores)
self.u = us.reshape(xshape)
self.v = vs.reshape(xshape)
self.w = ws.reshape(xshape)
self.phix = phixs.reshape(xshape)
self.phit = phits.reshape(xshape)
return self.u, self.v, self.w, self.phix, self.phit
def strain(self, c, xs=None, ts=None, gridx=300, gridt=300, inc=1.):
r"""Calculates the strain field
Parameters
----------
c : np.ndarray
The Ritz constants vector to be used for the strain field
calculation.
xs : np.ndarray, optional
The `x` coordinates where to calculate the strains.
ts : np.ndarray, optional
The `\theta` coordinates where to calculate the strains, must
have the same shape as ``xs``.
gridx : int, optional
When ``xs`` and ``ts`` are not supplied, ``gridx`` and ``gridt``
are used.
gridt : int, optional
When ``xs`` and ``ts`` are not supplied, ``gridx`` and ``gridt``
are used.
inc : float, optional
Load increment, necessary to calculate the full set of Ritz
constants using :meth:`calc_full_c`.
"""
xs, ts, xshape, tshape = self._default_field(xs, ts, gridx, gridt)
alpharad = self.alpharad
L = self.L
r2 = self.r2
sina = self.sina
cosa = self.cosa
tLArad = self.tLArad
m1 = self.m1
m2 = self.m2
n2 = self.n2
c0 = self.c0
m0 = self.m0
n0 = self.n0
funcnum = self.funcnum
model = self.model
model_dict = get_model(model)
NL_kinematics = model.split('_')[1]
fstrain = model_dict['commons'].fstrain
e_num = model_dict['e_num']
if 'donnell' in NL_kinematics:
int_NL_kinematics = 0
elif 'sanders' in NL_kinematics:
int_NL_kinematics = 1
else:
raise NotImplementedError(
'{} is not a valid "NL_kinematics" option'.format(
NL_kinematics))
c = self.calc_full_c(c, inc=inc)
es = fstrain(c, sina, cosa, tLArad, xs, ts, r2, L,
m1, m2, n2, c0, m0, n0, funcnum, int_NL_kinematics,
self.out_num_cores)
return es.reshape((xshape + (e_num,)))
def stress(self, c, xs=None, ts=None, gridx=300, gridt=300, inc=1.):
r"""Calculates the stress field
Parameters
----------
c : np.ndarray
The Ritz constants vector to be used for the strain field
calculation.
xs : np.ndarray, optional
The `x` coordinates where to calculate the strains.
ts : np.ndarray, optional
The `\theta` coordinates where to calculate the strains, must
have the same shape as ``xs``.
gridx : int, optional
When ``xs`` and ``ts`` are not supplied, ``gridx`` and ``gridt``
are used.
gridt : int, optional
When ``xs`` and ``ts`` are not supplied, ``gridx`` and ``gridt``
are used.
inc : float, optional
Load increment, necessary to calculate the full set of Ritz
constants using :meth:`calc_full_c`.
"""
xs, ts, xshape, tshape = self._default_field(xs, ts, gridx, gridt)
F = self.F
alpharad = self.alpharad
L = self.L
r2 = self.r2
sina = self.sina
cosa = self.cosa
tLArad = self.tLArad
m1 = self.m1
m2 = self.m2
n2 = self.n2
c0 = self.c0
m0 = self.m0
n0 = self.n0
funcnum = self.funcnum
model = self.model
model_dict = get_model(model)
NL_kinematics = model.split('_')[1]
fstress = model_dict['commons'].fstress
e_num = model_dict['e_num']
if 'donnell' in NL_kinematics:
int_NL_kinematics = 0
elif 'sanders' in NL_kinematics:
int_NL_kinematics = 1
else:
raise NotImplementedError(
'{} is not a valid "NL_kinematics" option'.format(
NL_kinematics))
c = self.calc_full_c(c, inc=inc)
Ns = fstress(c, F, sina, cosa, tLArad, xs, ts, r2, L,
m1, m2, n2, c0, m0, n0, funcnum, int_NL_kinematics,
self.out_num_cores)
return Ns.reshape((xshape + (e_num,)))
def calc_fint(self, c, inc=1., m=1, return_u=True):
r"""Calculates the internal force vector `\{F_{int}\}`
The following attributes will affect the numerical integration:
================= ================================================
Attribute Description
================= ================================================
``ni_num_cores`` ``int``, number of cores used for the numerical
integration
``ni_method`` ``str``, integration method:
- ``'trapz2d'`` for 2-D Trapezoidal's rule
- ``'simps2d'`` for 2-D Simpsons' rule
``nx`` ``int``, number of integration points along the
`x` coordinate
``nt`` ``int``, number of integration points along the
`\theta` coordinate
================= ================================================
Parameters
----------
c : np.ndarray
The Ritz constants that will be used to compute the internal
forces.
inc : float, optional
Load increment, necessary to calculate the full set of Ritz
constants using :meth:`calc_full_c`.
m : integer, optional
A multiplier to be applied to ``nx`` and ``nt``, if one
whishes to use more integration points.
return_u : bool, optional
If the internal force vector corresponsing to the unknown
set of Ritz constants should be returned.
Returns
-------
fint : np.ndarray
The internal force vector.
"""
c = self.calc_full_c(c, inc=inc)
if 'iso_' in self.model:
nlmodule = modelDB.db[self.model[4:]]['non-linear']
else:
nlmodule = modelDB.db[self.model]['non-linear']
ni_method = self.ni_method
ni_num_cores = self.ni_num_cores
nx = self.nx*m
nt = self.nt*m
fint = nlmodule.calc_fint_0L_L0_LL(c, self.alpharad, self.r2, self.L,
self.tLArad, self.F, self.m1, self.m2, self.n2, nx, nt,
ni_num_cores, ni_method, self.c0, self.m0, self.n0)
fint += self.k0*c
if return_u:
fint = np.delete(fint, self.excluded_dofs)
return fint
def add_SPL(self, PL, pt=0.5, thetadeg=0., increment=False):
"""Add a Single Perturbation Load `\{{F_{PL}}_i\}`
Adds a perturbation load to the ``ConeCyl`` object, the perturbation
load is a particular case of the punctual load with only a normal
component.
Parameters
----------
PL : float
The perturbation load value.
pt : float, optional
The normalized position along the `x` axis in which the new SPL
will be included.
thetadeg : float, optional
The angular position of the SPL in degrees.
increment : bool, optional
If this perturbation load should be incrementally applied in a
non-linear analysis.
Notes
-----
Each single perturbation load is added to the ``forces`` parameter
of the ``ConeCyl`` object, which may be changed by the analyst at
any time.
"""
self._rebuild()
thetarad = deg2rad(thetadeg)
if increment:
self.forces_inc.append([pt*self.L, thetarad, 0., 0., PL])
else:
self.forces.append([pt*self.L, thetarad, 0., 0., PL])
def add_force(self, x, thetadeg, fx, ftheta, fz, increment=False):
r"""Add a punctual force
Adds a force vector `\{f_x, f_\theta, f_z\}^T` to the ``forces``
parameter of the ``ConeCyl`` object.
Parameters
----------
x : float
The `x` position.
thetadeg : float
The `\theta` position in degrees.
fx : float
The `x` component of the force vector.
ftheta : float
The `\theta` component of the force vector.
fz : float
The `z` component of the force vector.
increment : bool, optional
If this punctual force should be incrementally applied in a
non-linear analysis.
"""
thetarad = deg2rad(thetadeg)
if incremented:
self.forces_inc.append([x, thetarad, fx, ftheta, fz])
else:
self.forces.append([x, thetarad, fx, ftheta, fz])
def calc_fext(self, inc=1., kuk=None, silent=False):
"""Calculates the external force vector `\{F_{ext}\}`
Recall that:
.. math::
\{F_{ext}\}=\{{F_{ext}}_0\} + \{{F_{ext}}_\lambda\}
such that the terms in `\{{F_{ext}}_0\}` are constant and the terms in
`\{{F_{ext}}_\lambda\}` will be scaled by the parameter ``inc``.
Parameters
----------
inc : float, optional
Since this function is called during the non-linear analysis,
``inc`` will multiply the terms `\{{F_{ext}}_\lambda\}`.
kuk : np.ndarray, optional
Obsolete, created for displacement controlled analyses, but the
implementation has not been finished, see
:meth:`exclude_dofs_matrix`.
silent : bool, optional
A boolean to tell whether the msg messages should be printed.
Returns
-------
fext : np.ndarray
The external force vector
"""
self._rebuild()
if self.k0 is None:
self._calc_linear_matrices()
msg('Calculating external forces...', level=2, silent=silent)
uTM = inc*self.uTM
Nxxtop = inc*self.Nxxtop
thetaTrad = inc*self.thetaTrad
sina = self.sina
cosa = self.cosa
r2 = self.r2
L = self.L
tLArad = self.tLArad
m1 = self.m1
m2 = self.m2
n2 = self.n2
pdC = self.pdC
pdT = self.pdT
model = self.model
model_dict = get_model(model)
i0 = model_dict['i0']
j0 = model_dict['j0']
num0 = model_dict['num0']
num1 = model_dict['num1']
num2 = model_dict['num2']
dofs = model_dict['dofs']
fg = model_dict['commons'].fg
size = self.get_size()
g = np.zeros((dofs, size), dtype=DOUBLE)
fext = np.zeros(size, dtype=DOUBLE)
fext = np.delete(fext, self.excluded_dofs)
# constant punctual forces
for i, force in enumerate(self.forces):
x, theta, fx, ftheta, fz = force
fg(g, m1, m2, n2, r2, x, theta, L, cosa, tLArad)
gu = np.delete(g, self.excluded_dofs, axis=1)
if dofs == 3:
fpt = np.array([[fx, ftheta, fz]])
elif dofs == 5:
fpt = np.array([[fx, ftheta, fz, 0, 0]])
fext += -fpt.dot(gu).ravel()
# incremented punctual forces
for i, force in enumerate(self.forces_inc):
x, theta, fx, ftheta, fz = force
fg(g, m1, m2, n2, r2, x, theta, L, cosa, tLArad)
gu = np.delete(g, self.excluded_dofs, axis=1)
if dofs == 3:
fpt = inc*np.array([[fx, ftheta, fz]])
elif dofs == 5:
fpt = inc*np.array([[fx, ftheta, fz, 0, 0]])
fext += -fpt.dot(gu).ravel()
# axial load
fext_tmp = np.zeros(size, dtype=DOUBLE)
if not 0 in self.excluded_dofs:
fext_tmp[0] += Nxxtop[0]*(2*pi*r2)/cosa
if 'bc2' in model or 'bc4' in model:
for j2 in range(j0, n2+j0):
for i2 in range(i0, m2+i0):
row = (num0 + num1*m1
+ (i2-i0)*num2 + (j2-j0)*num2*m2)
rowNxx = 1+2*(j2-j0)
fext_tmp[row+0]+=(Nxxtop[rowNxx+0]*pi*r2)
fext_tmp[row+1]+=(Nxxtop[rowNxx+1]*pi*r2)
else:
if kuk is None:
kuk_C = self.k0uk[:, 0].ravel()
else:
kuk_C = kuk[:, 0].ravel()
fext += -uTM*kuk_C
if not 2 in self.excluded_dofs:
fext_tmp[2] += Nxxtop[2]*(2*pi*r2)/cosa
# pressure
P = self.P + inc*self.P_inc
if P != 0:
if 'clpt' in model:
for i1 in range(i0, m1+i0):
if i1 == 0:
continue
col = num0 + (i1-i0)*num1
fext_tmp[col+2] += P*(L*2./i1*(r2 - (-1)**i1*(r2 + L*sina)))
elif 'fsdt' in model:
#TODO it might be the same as for the CLPT
raise NotImplementedError(
'Pressure not implemented for static analysis for FSDT')
fext_tmp = np.delete(fext_tmp, self.excluded_dofs)
fext += fext_tmp
# torsion
if pdT:
if kuk is None:
kuk_T = self.k0uk[:, 1].ravel()
else:
kuk_T = kuk[:, 1].ravel()
fext += -thetaTrad*kuk_T
else:
T = self.T + inc*self.T_inc
if T != 0:
fg(g, m1, m2, n2, r2, 0, 0, L, cosa, tLArad)
gu = np.delete(g, self.excluded_dofs, axis=1)
if dofs == 3:
fpt = np.array([[0, T/r2, 0]])
elif dofs == 5:
fpt = np.array([[0, T/r2, 0, 0, 0]])
fext += fpt.dot(gu).ravel()
msg('finished!', level=2, silent=silent)
return fext
def static(self, NLgeom=False, silent=False):
"""Static analysis for cones and cylinders
The analysis can be linear or geometrically non-linear. See
:class:`.Analysis` for further details about the parameters
controlling the non-linear analysis.
Parameters
----------
NLgeom : bool
Flag to indicate whether a linear or a non-linear analysis is to
be performed.
silent : bool, optional
A boolean to tell whether the msg messages should be printed.
Returns
-------
cs : list
A list containing the Ritz constants for each load increment of
the static analysis. The list will have only one entry in case
of a linear analysis.
Notes
-----
The returned ``cs`` is stored in the ``cs`` parameter of the
``ConeCyl`` object. The actual increments used in the non-linear
analysis are stored in the ``increments`` parameter.
"""
self.cs = []
self.increments = []
if self.pdC:
text ='Non-linear analysis with prescribed displacements'
raise NotImplementedError(text)
if NLgeom and not modelDB.db[self.model]['non-linear static']:
msg('________________________________________________',
silent=silent)
msg('', silent=silent)
warn('Model {} cannot be used in non-linear static analysis!'.
format(self.model), silent=silent)
msg('________________________________________________',
silent=silent)
raise
elif not NLgeom and not modelDB.db[self.model]['linear static']:
msg('________________________________________________',
level=1, silent=silent)
msg('', level=1, silent=silent)
warn('Model {} cannot be used in linear static analysis!'.
format(self.model), level=1, silent=silent)
lob('________________________________________________',
level=1, silent=silent)
raise
self.analysis.static(NLgeom=NLgeom, silent=silent)
self.cs = self.analysis.cs
self.increments = self.analysis.increments
return self.cs
def plot(self, c, invert_x=False, plot_type=1, vec='w',
deform_u=False, deform_u_sf=100.,
filename='',
ax=None, figsize=(3.5, 2.), save=True,
add_title=True, title='',
colorbar=False, cbar_nticks=2, cbar_format=None,
cbar_title='', cbar_fontsize=10,
aspect='equal', clean=True, dpi=400,
texts=[], xs=None, ts=None, gridx=300, gridt=300,
num_levels=400, inc=1.):
r"""Contour plot for a Ritz constants vector.
Parameters
----------
c : np.ndarray
The Ritz constants that will be used to compute the field contour.
vec : str, optional
Can be one of the components:
- Displacement: ``'u'``, ``'v'``, ``'w'``, ``'phix'``, ``'phit'``,
``'magnitude'``
- Strain: ``'exx'``, ``'ett'``, ``'gxt'``, ``'kxx'``, ``'ktt'``,
``'kxt'``, ``'gtz'``, ``'gxz'``
- Stress: ``'Nxx'``, ``'Ntt'``, ``'Nxt'``, ``'Mxx'``, ``'Mtt'``,
``'Mxt'``, ``'Qt'``, ``'Qx'``
deform_u : bool, optional
If ``True`` the contour plot will look deformed.
deform_u_sf : float, optional
The scaling factor used to deform the contour.
invert_x : bool, optional
Inverts the `x` axis of the plot. It may be used to match
the coordinate system of the finite element models created
using the ``desicos.abaqus`` module.
plot_type : int, optional
For cylinders only ``4`` and ``5`` are valid.
For cones all the following types can be used:
- ``1``: concave up (with ``invert_x=False``) (default)
- ``2``: concave down (with ``invert_x=False``)
- ``3``: stretched closed
- ``4``: stretched opened (`r \times \theta` vs. `H`)
- ``5``: stretched opened (`\theta` vs. `H`)
save : bool, optional
Flag telling whether the contour should be saved to an image file.
dpi : int, optional
Resolution of the saved file in dots per inch.
filename : str, optional
The file name for the generated image file. If no value is given,
the `name` parameter of the ``ConeCyl`` object will be used.
ax : AxesSubplot, optional
When ``ax`` is given, the contour plot will be created inside it.
figsize : tuple, optional
The figure size given by ``(width, height)``.
add_title : bool, optional
If a title should be added to the figure.
title : str, optional
If any string is given ``add_title`` will be ignored and the given
title added to the contour plot.
colorbar : bool, optional
If a colorbar should be added to the contour plot.
cbar_nticks : int, optional
Number of ticks added to the colorbar.
cbar_format : [ None | format string | Formatter object ], optional
See the ``matplotlib.pyplot.colorbar`` documentation.
cbar_fontsize : int, optional
Fontsize of the colorbar labels.
cbar_title : str, optional
Colorbar title. If ``cbar_title == ''`` no title is added.
aspect : str, optional
String that will be passed to the ``AxesSubplot.set_aspect()``
method.
clean : bool, optional
Clean axes ticks, grids, spines etc.
xs : np.ndarray, optional
The `x` positions where to calculate the displacement field.
Default is ``None`` and the method ``_default_field`` is used.
ts : np.ndarray, optional
The ``theta`` positions where to calculate the displacement field.
Default is ``None`` and the method ``_default_field`` is used.
gridx : int, optional
Number of points along the `x` axis where to calculate the
displacement field.
gridt : int, optional
Number of points along the `theta` where to calculate the
displacement field.
num_levels : int, optional
Number of contour levels (higher values make the contour smoother).
inc : float, optional
Load increment, necessary to calculate the full set of Ritz
constants using :meth:`calc_full_c`.
Returns
-------
ax : matplotlib.axes.Axes
The Matplotlib object that can be used to modify the current plot
if needed.
"""
msg('Plotting contour...')
ubkp, vbkp, wbkp, phixbkp, phitbkp = (self.u, self.v, self.w,
self.phix, self.phit)
import matplotlib.pyplot as plt
import matplotlib
from plotutils import get_filename
c = self.calc_full_c(c, inc=inc)
msg('Computing field variables...', level=1)
displs = ['u', 'v', 'w', 'phix', 'phit', 'magnitude', 'test']
strains = ['exx', 'ett', 'gxt', 'kxx', 'ktt', 'kxt', 'gtz', 'gxz']
stresses = ['Nxx', 'Ntt', 'Nxt', 'Mxx', 'Mtt', 'Mxt', 'Qt', 'Qx']
if vec in displs or 'eq_' in vec:
self.uvw(c, xs=xs, ts=ts, gridx=gridx, gridt=gridt, inc=inc)
if vec == 'magnitude':
u = self.u
v = self.v
w = self.w
field = (u**2 + v**2 + w**2)**0.5
elif 'eq_' in vec:
u = self.u
v = self.v
w = self.w
field = eval(vec[3:])
else:
field = getattr(self, vec)
elif vec in strains:
es = self.strain(c, xs=xs, ts=ts,
gridx=gridx, gridt=gridt, inc=inc)
field = es[..., strains.index(vec)]
elif vec in stresses:
Ns = self.stress(c, xs=xs, ts=ts,
gridx=gridx, gridt=gridt, inc=inc)
field = Ns[..., stresses.index(vec)]
else:
raise ValueError(
'{0} is not a valid "vec" parameter value!'.format(vec))
msg('Finished!', level=1)
Xs = self.Xs
Ts = self.Ts
vecmin = field.min()
vecmax = field.max()
levels = np.linspace(vecmin, vecmax, num_levels)
if ax is None:
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
else:
if isinstance(ax, matplotlib.axes.Axes):
ax = ax
fig = ax.figure
save = False
else:
raise ValueError('"ax" must be an Axes object')
def r(x):
return self.r2 + x*self.sina
if self.is_cylinder:
plot_type=4
if plot_type == 1:
r_plot = self.r2/self.sina + Xs
r_plot_max = self.r2/self.sina + self.L
y = r_plot_max - r_plot*cos(Ts*self.sina)
x = r_plot*sin(Ts*self.sina)
elif plot_type == 2:
r_plot = self.r2/self.sina + Xs
y = r_plot*cos(Ts*self.sina)
x = r_plot*sin(Ts*self.sina)
elif plot_type == 3:
r_plot = self.r2/self.sina + Xs
r_plot_max = self.r2/self.sina + self.L
y = r_plot_max - r_plot*cos(Ts)
x = r_plot*sin(Ts)
elif plot_type == 4:
x = r(Xs)*Ts
y = self.L-Xs
elif plot_type == 5:
x = Ts
y = Xs
if deform_u:
if vec in displs:
pass
else:
self.uvw(c, xs=xs, ts=ts, gridx=gridx, gridt=gridt, inc=inc)
field_u = self.u
y -= deform_u_sf*field_u
contour = ax.contourf(x, y, field, levels=levels)
if colorbar:
from mpl_toolkits.axes_grid1 import make_axes_locatable
fsize = cbar_fontsize
divider = make_axes_locatable(ax)
cax = divider.append_axes('right', size='5%', pad=0.05)
cbarticks = np.linspace(vecmin, vecmax, cbar_nticks)
cbar = plt.colorbar(contour, ticks=cbarticks, format=cbar_format,
cax=cax)
if cbar_title:
cax.text(0.5, 1.05, cbar_title, horizontalalignment='center',
verticalalignment='bottom', fontsize=fsize)
cbar.outline.remove()
cbar.ax.tick_params(labelsize=fsize, pad=0., tick2On=False)
if invert_x:
ax.invert_yaxis()
if title!='':
ax.set_title(str(title))
elif add_title:
if self.analysis.last_analysis == 'static':
ax.set_title(r'$m_1={0}$, $m_2={1}$, $n_2={2}$'.
format(self.m1, self.m2, self.n2))
elif self.analysis.last_analysis == 'lb':
ax.set_title(
r'$m_1={0}$, $m2={1}$, $n2={2}$, $\lambda_{{CR}}={3:1.3e}$'.format(
self.m1, self.m2, self.n2, self.eigvals[0]))
fig.tight_layout()
ax.set_aspect(aspect)
if clean:
ax.grid(False)
ax.xaxis.set_ticks_position('none')
ax.yaxis.set_ticks_position('none')
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
ax.set_frame_on(False)
for kwargs in texts:
ax.text(transform=ax.transAxes, **kwargs)
if save:
if not filename:
filename = get_filename(self)
fig.savefig(filename, transparent=True,
bbox_inches='tight', pad_inches=0.05, dpi=dpi)
plt.close()
if ubkp is not None:
self.u = ubkp
if vbkp is not None:
self.v = vbkp
if wbkp is not None:
self.w = wbkp
if phixbkp is not None:
self.phix = phixbkp
if phitbkp is not None:
self.phit = phitbkp
msg('finished!')
return ax
def plotAbaqus(self, frame, fieldOutputKey, vec, nodes, numel_cir,
elem_type='S4R', ignore=[],
ax=None, figsize=(3.3, 3.3), save=True,
aspect='equal', clean=True, plot_type=1,
outpath='', filename='', npzname='', pyname='',
num_levels=400):
r"""Print a field output for a cylinder/cone model from Abaqus
This function is intended to be used with models created by the
`DESICOS plug-in for Abaqus <http://desicos.github.io/desicos/>`_,
where a **mapped mesh** is used and the models comparable to the
models of :mod:`compmech.conecyl`.
The ``frame`` and ``nodes`` input types are described in
`Abaqus Scripting Reference Manual
<http://abaqus.me.chalmers.se/v6.11/books/ker/default.htm>`_ and
they can be obtained through:
>>> frame = session.odbs['odb_name.odb'].steps['step_name'].frames[0]
>>> nodes = mdb.models['model_name'].parts['part_name'].nodes
Parameters
----------
frame : OdbFrame
The frame from where the field output will be taken from.
fieldOutputKey : str
The field output key to be used. It must be available in
``frame.fieldOutputs.keys()``. This function was tested with
``'UT'`` and ``'U'`` only.
vec : str
The displacement vector to be plotted:
``'u'``, ``'v'`` or ``'w'``.
nodes : MeshNodeArray
The part nodes.
numel_cir : int
The number of elements around the circumference.
elem_type : str, optional
The element type. The elements ``'S4R', 'S4R5'`` where tested.
ignore : list, optional
A list with the node ids to be ignored. It must contain any nodes
outside the mapped mesh included in ``parts['part_name'].nodes``.
ax : AxesSubplot, optional
When ``ax`` is given, the contour plot will be created inside it.
figsize : tuple, optional
The figure size given by ``(width, height)``.
save : bool, optional
Flag telling whether the contour should be saved to an image file.
aspect : str, optional
String that will be passed to the ``AxesSubplot.set_aspect()``
method.
clean : bool, optional
Clean axes ticks, grids, spines etc.
plot_type : int, optional
See :meth:`plot`.
outpath : str, optional
Output path where the data from Abaqus and the plots are
saved (see notes).
filename : str, optional
The file name for the generated image file.
npzname : str, optional
The file name for the generated npz file.
pyname : str, optional
The file name for the generated Python file.
num_levels : int, optional
Number of contour levels (higher values make the contour smoother).
Returns
-------
out : tuple
Where ``out[0]`` and ``out[1]`` contain the circumferential and
meridional grids of coordinates and ``out[2]`` the corresponding
field output.
Notes
-----
The data is saved using ``np.savez()`` into ``outpath`` as
``abaqus_output.npz`` with an accompanying script for plotting
``abaqus_output_plot.py``, very handy when Matplotlib is not
importable from Abaqus.
"""
workingplt = True
if not npzname:
npzname = 'abaqus_output.npz'
npzname = os.path.join(outpath, npzname)
if not pyname:
pyname = 'abaqus_output_plot.py'
pyname = os.path.join(outpath, pyname)
if not filename:
filename = 'plot_from_abaqus.png'
filename = os.path.join(outpath, filename)
try:
import matplotlib.pyplot as plt
import matplotlib
except:
workingplt = False
try:
if not frame:
frame = utils.get_current_frame()
if not frame:
raise ValueError('A frame must be selected!')
frame_num = int(frame.frameValue)
coords = np.array([n.coordinates for n in nodes
if n.label not in ignore])
#TODO include more outputs like stress etc
field = frame.fieldOutputs[fieldOutputKey]
uvw_rec = np.array([val.data for val in field.values
if getattr(val.instance, 'name', None) == 'INSTANCECYLINDER'])
u_rec = uvw_rec[:,0]
v_rec = uvw_rec[:,1]
w_rec = uvw_rec[:,2]
res_alpha = np.arctan2(v_rec, u_rec)
thetas = np.arctan2(coords[:, 1], coords[:, 0])
sina = sin(self.alpharad)
cosa = cos(self.alpharad)
ucyl = -w_rec
vcyl = v_rec*cos(thetas) - u_rec*sin(thetas)
wcyl = v_rec*sin(thetas) + u_rec*cos(thetas)
u = wcyl*sina + ucyl*cosa
v = vcyl
w = wcyl*cosa - ucyl*sina
displ_vecs = {'u':u, 'v':v, 'w':w}
uvw = displ_vecs[vec]
zs = coords[:, 2]
nt = numel_cir
if 'S8' in elem_type:
raise NotImplementedError('{0} elements!'.format(elem_type))
#nt *= 2
#first sort
asort = zs.argsort()
zs = zs[asort].reshape(-1, nt)
thetas = thetas[asort].reshape(-1, nt)
uvw = uvw[asort].reshape(-1, nt)
#second sort
asort = thetas.argsort(axis=1)
for i, asorti in enumerate(asort):
zs[i,:] = zs[i,:][asorti]
thetas[i,:] = thetas[i,:][asorti]
uvw[i,:] = uvw[i,:][asorti]
H = self.H
r2 = self.r2
r1 = self.r1
L = H/cosa
def fr(z):
return r1 - z*sina/cosa
if self.alpharad == 0.:
plot_type=4
if plot_type == 1:
r_plot = fr(zs)
if self.alpharad == 0.:
r_plot_max = L
else:
r_plot_max = r2/sina + L
y = r_plot_max - r_plot*cos(thetas*sina)
x = r_plot*sin(thetas*sina)
elif plot_type == 2:
r_plot = fr(zs)
y = r_plot*cos(thetas*sina)
x = r_plot*sin(thetas*sina)
elif plot_type == 3:
r_plot = fr(zs)
r_plot_max = r2/sina + L
y = r_plot_max - r_plot*cos(thetas)
x = r_plot*sin(thetas)
elif plot_type == 4:
x = fr(zs)*thetas
y = zs
elif plot_type == 5:
x = thetas
y = zs
cir = x
mer = y
field = uvw
if workingplt:
levels = np.linspace(field.min(), field.max(), num_levels)
if ax is None:
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
else:
if isinstance(ax, matplotlib.axes.Axes):
ax = ax
fig = ax.figure
save = False
else:
raise ValueError('"ax" must be an Axes object')
ax.contourf(cir, mer, field, levels=levels)
ax.grid(False)
ax.set_aspect(aspect)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
#lim = self.r2*pi
#ax.xaxis.set_ticks([-lim, 0, lim])
#ax.xaxis.set_ticklabels([r'$-\pi$', '$0$', r'$+\pi$'])
#ax.set_title(
#r'$PL=20 N$, $F_{{C}}=50 kN$, $w_{{PL}}=\beta$, $mm$')
if clean:
ax.xaxis.set_ticks_position('none')
ax.yaxis.set_ticks_position('none')
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
ax.set_frame_on(False)
if save:
msg('Plot saved at: {0}'.format(filename))
plt.tight_layout()
plt.savefig(filename, transparent=True,
bbox_inches='tight', pad_inches=0.05,
dpi=400)
else:
warn('Matplotlib cannot be imported from Abaqus')
np.savez(npzname, cir=cir, mer=mer, field=field)
with open(pyname, 'w') as f:
f.write("import os\n")
f.write("import numpy as np\n")
f.write("import matplotlib.pyplot as plt\n")
f.write("tmp = np.load('abaqus_output.npz')\n")
f.write("cir = tmp['cir']\n")
f.write("mer = tmp['mer']\n")
f.write("field = tmp['field']\n")
f.write("clean = {0}\n".format(clean))
f.write("filename = '{0}'\n".format(filename))
f.write("plt.figure(figsize={0})\n".format(figsize))
f.write("ax = plt.gca()\n")
f.write("levels = np.linspace(field.min(), field.max(), {0})\n".format(
num_levels))
f.write("ax.contourf(cir, mer, field, levels=levels)\n")
f.write("ax.grid(b=None)\n")
f.write("ax.set_aspect('{0}')\n".format(aspect))
f.write("ax.xaxis.set_ticks_position('bottom')\n")
f.write("ax.yaxis.set_ticks_position('left')\n")
f.write("ax.xaxis.set_ticks([{0}, 0, {1}])\n".format(
-self.r2*pi, self.r2*pi))
f.write("ax.xaxis.set_ticklabels([r'$-\pi$', '$0$', r'$+\pi$'])\n")
f.write("ax.set_title(r'Abaqus, $PL=20 N$, $F_{{C}}=50 kN$, $w_{{PL}}=\beta$, $mm$')\n")
f.write("if clean:\n")
f.write(" ax.xaxis.set_ticks_position('none')\n")
f.write(" ax.yaxis.set_ticks_position('none')\n")
f.write(" ax.xaxis.set_ticklabels([])\n")
f.write(" ax.yaxis.set_ticklabels([])\n")
f.write(" ax.set_frame_on(False)\n")
f.write("if not filename:\n")
f.write(" filename = 'abaqus_result.png'\n")
f.write("plt.savefig(filename, transparent=True,\n")
f.write(" bbox_inches='tight', pad_inches=0.05, dpi=400)\n")
f.write("plt.show()\n")
msg('Output exported to "{0}"'.format(npzname))
msg('Please run the file "{0}" to plot the output'.format(
pyname))
return cir, mer, field
except:
traceback.print_exc()
error('Opened plot could not be generated! :(')
def SPLA(self, PLs, NLgeom=True, plot=False):
"""Runs the Single Perturbation Load Approach (SPLA)
A set of non-linear results will be
Parameters
----------
PLs: list
The perturbation loads used to build the knock-down curve. It must
be a list of float values.
NLgeom : bool, optional
Flag passed to the ``static()`` method that tells whether a
geometrically non-linear analysis is to be performed.
Returns
-------
curves : list
The sequence of curves, one curve for each perturbation load given
in the input parameter ``PLs``.
Each curve in the list is a ``dict`` object with the keys:
================= ==============================================
Key Description
================= ==============================================
``'wall_time_s'`` The wall time for the non-linear analysis
``'name'`` The name of the curve. Ex: ``'PL = 1. N'``
``'cs'`` A list with a vector of Ritz constants for
each load increment needed
``'increments'`` A list with the values of increments needed
``'wPLs'`` A list with the normal displacement at the
perturbation load application point for each
load increment
``'uTMs'`` A list containing the axial displacement for
each load increment
``'Fcs'`` A list containing the axial reaction force
for each load increment
================= ==============================================
Notes
-----
The curves are stores in the ``ConeCyl`` parameter
``outputs['SPLA_curves']``.
"""
curves = []
for PLi, PL in enumerate(PLs):
curve = {}
self.forces = []
self.add_SPL(PL)
time1 = time.clock()
cs = self.static(NLgeom=NLgeom)
if plot:
self.plot(cs[-1])
curve['wall_time_s'] = time.clock() - time1
curve['name'] = 'PL = {} N'.format(PL)
curve['cs'] = cs
curve['increments'] = self.increments
curve['wPLs'] = []
curve['uTMs'] = []
curve['Fcs'] = []
for i, c in enumerate(self.cs):
inc = self.increments[i]
self.uvw(c, xs=self.L/2, ts=0)
curve['wPLs'].append(self.w[0])
if self.pdC:
ts = np.linspace(0, pi*2, 1000, endpoint=False)
xs = np.zeros_like(ts)
es = self.strain(c=c, xs=xs, ts=ts, inc=inc)
fvec = self.F.dot(es.T)
Fc = -fvec[0,:].mean()*(2*self.r2*pi)
curve['Fcs'].append(Fc/1000)
curve['uTMs'].append(inc*self.uTM)
else:
curve['Fcs'].append(inc*self.Fc/1000)
curve['uTMs'].append(c[0])
curves.append(curve)
self.outputs['SPLA_curves'] = curves
return curves
def apply_shim(self, thetadeg, width, thick, ncpts=10000):
r"""Distributes the axial load in order to simulate a shim
The axial load distribution `{N_{xx}}_{top}` will be adjusted such
that the resulting displacement `u` at `x=0` (top edge) will look
similar to a case where a shim is applied.
Parameters
----------
thetadeg : float
Position in degrees of the center of the shim.
width : float
Circumferential width of the shim.
thick : float
Thickness of the shim.
ncpts : int, optional
Number of control points used in the least-squares routine.
Returns
-------
ts : np.ndarray
Positions `\theta` of the control points.
us : np.ndarray
Displacements `u` of the control points.
Notes
-----
This function changes the ``Nxxtop`` parameter of the current
``ConeCyl`` object. Returning ``ts`` and ``us`` is useful for post
processing purposes only.
Examples
--------
>>> ts, us = cc.apply_shim(0., 25.4, 0.1)
"""
ts = np.linspace(-np.pi, np.pi, ncpts)
xs = np.zeros_like(ts)
us = np.zeros_like(ts)
self.static(NLgeom=False)
thetashim = width/self.r2
thetarad = deg2rad(thetadeg)
theta1 = thetarad - thetashim
theta2 = thetarad + thetashim
uTM = self.cs[0][0]
utop = uTM/self.cosa
us += uTM
shim_region = (ts >= theta1) & (ts <= theta2)
us[shim_region] += thick
self.fit_Nxxtop(ts, us)
return ts, us
def fit_Nxxtop(self, ts, us, update_Nxxtop=True):
r"""Adjusts the axial load distribution for a desired top edge
displacement
Parameters
----------
ts : np.ndarray
Corrdinates `\theta` of each control point.
us : np.ndarray
Desired displacement `u` for each control point.
update_Nxxtop : bool, optional
Tells whether ``self.Nxxtop`` should be updated.
Returns
-------
Nxxtop : np.ndarray
The coefficients for the `{N_{xx}}_{top}` function.
"""
from scipy.sparse.linalg import inv as sparseinv
assert ts.ndim == 1 and us.ndim == 1
assert ts.shape[0] == us.shape[0]
xs = np.zeros_like(ts)
if not update_Nxxtop:
Nxxtop_backup = self.Nxxtop.copy()
k0uuinv = sparseinv(csc_matrix(self.k0uu))
Nxxtop_new = self.Nxxtop.copy()
def fit_Nxxtop_residual(Nxxtop_new):
self.Nxxtop = Nxxtop_new.copy()
fext = self.calc_fext(silent=True)
c = k0uuinv*fext
self.uvw(c, xs=xs, ts=ts)
res = (self.u - us)**2
return res
popt, pcov = leastsq(fit_Nxxtop_residual, x0=Nxxtop_new, maxfev=10000)
if not update_Nxxtop:
self.Nxxtop = Nxxtop_backup
else:
self.Nxxtop = popt
return popt
def save(self):
"""Save the ``ConeCyl`` object using ``cPickle``
Notes
-----
The pickled file will have the name stored in ``ConeCyl.name``
followed by a ``'.ConeCyl'`` extension.
"""
name = self.name + '.ConeCyl'
msg('Saving ConeCyl to {}'.format(name))
self.analysis.calc_fext = None
self.analysis.calc_k0 = None
self.analysis.calc_fint = None
self.analysis.calc_kT = None
self._clear_matrices()
with open(name, 'wb') as f:
cPickle.dump(self, f, protocol=cPickle.HIGHEST_PROTOCOL)
| bsd-3-clause |
hsuantien/scikit-learn | examples/plot_digits_pipe.py | 250 | 1809 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Pipelining: chaining a PCA and a logistic regression
=========================================================
The PCA does an unsupervised dimensionality reduction, while the logistic
regression does the prediction.
We use a GridSearchCV to set the dimensionality of the PCA
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, decomposition, datasets
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
logistic = linear_model.LogisticRegression()
pca = decomposition.PCA()
pipe = Pipeline(steps=[('pca', pca), ('logistic', logistic)])
digits = datasets.load_digits()
X_digits = digits.data
y_digits = digits.target
###############################################################################
# Plot the PCA spectrum
pca.fit(X_digits)
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.axes([.2, .2, .7, .7])
plt.plot(pca.explained_variance_, linewidth=2)
plt.axis('tight')
plt.xlabel('n_components')
plt.ylabel('explained_variance_')
###############################################################################
# Prediction
n_components = [20, 40, 64]
Cs = np.logspace(-4, 4, 3)
#Parameters of pipelines can be set using ‘__’ separated parameter names:
estimator = GridSearchCV(pipe,
dict(pca__n_components=n_components,
logistic__C=Cs))
estimator.fit(X_digits, y_digits)
plt.axvline(estimator.best_estimator_.named_steps['pca'].n_components,
linestyle=':', label='n_components chosen')
plt.legend(prop=dict(size=12))
plt.show()
| bsd-3-clause |
pkreissl/espresso | samples/visualization_ljliquid.py | 2 | 5850 | #
# Copyright (C) 2013-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Visualize a Lennard-Jones liquid with live plotting via matplotlib.
"""
import numpy as np
from matplotlib import pyplot
from threading import Thread
import espressomd
from espressomd import visualization
import argparse
required_features = ["LENNARD_JONES"]
espressomd.assert_features(required_features)
parser = argparse.ArgumentParser(epilog=__doc__)
group = parser.add_mutually_exclusive_group()
group.add_argument("--opengl", action="store_const", dest="visualizer",
const="opengl", help="OpenGL visualizer", default="opengl")
group.add_argument("--mayavi", action="store_const", dest="visualizer",
const="mayavi", help="MayaVi visualizer")
args = parser.parse_args()
print("""
=======================================================
= lj_liquid.py =
=======================================================
""")
# System parameters
#############################################################
# 10 000 Particles
box_l = 10.7437
density = 0.7
# Interaction parameters (repulsive Lennard-Jones)
#############################################################
lj_eps = 1.0
lj_sig = 1.0
lj_cut = 1.12246
# Integration parameters
#############################################################
system = espressomd.System(box_l=[box_l] * 3)
np.random.seed(seed=42)
system.time_step = 0.001
system.cell_system.skin = 0.4
# warmup integration (steepest descent)
warm_steps = 20
warm_n_times = 30
# convergence criterion (particles are separated by at least 90% sigma)
min_dist = 0.9 * lj_sig
# integration
int_steps = 10
int_n_times = 50000
#############################################################
# Setup System #
#############################################################
# Interaction setup
#############################################################
system.non_bonded_inter[0, 0].lennard_jones.set_params(
epsilon=lj_eps, sigma=lj_sig, cutoff=lj_cut, shift="auto")
print("LJ-parameters:")
print(system.non_bonded_inter[0, 0].lennard_jones.get_params())
# Particle setup
#############################################################
volume = box_l**3
n_part = int(volume * density)
for i in range(n_part):
system.part.add(id=i, pos=np.random.random(3) * system.box_l)
print("Simulate {} particles in a cubic box {} at density {}."
.format(n_part, box_l, density).strip())
print("Interactions:\n")
act_min_dist = system.analysis.min_dist()
print("Start with minimal distance {}".format(act_min_dist))
# Select visualizer
if args.visualizer == "mayavi":
visualizer = visualization.mayaviLive(system)
else:
visualizer = visualization.openGLLive(system)
#############################################################
# Warmup Integration #
#############################################################
print("""
Start warmup integration:
At maximum {} times {} steps
Stop if minimal distance is larger than {}
""".strip().format(warm_n_times, warm_steps, min_dist))
print(system.non_bonded_inter[0, 0].lennard_jones)
# minimize energy using min_dist as the convergence criterion
system.integrator.set_steepest_descent(f_max=0, gamma=1e-3,
max_displacement=lj_sig / 100)
i = 0
while i < warm_n_times and system.analysis.min_dist() < min_dist:
print("minimization: {:+.2e}".format(system.analysis.energy()["total"]))
system.integrator.run(warm_steps)
i += 1
visualizer.update()
print("minimization: {:+.2e}".format(system.analysis.energy()["total"]))
print()
system.integrator.set_vv()
# activate thermostat
system.thermostat.set_langevin(kT=1.0, gamma=1.0, seed=42)
#############################################################
# Integration #
#############################################################
print("\nStart integration: run %d times %d steps" % (int_n_times, int_steps))
# print initial energies
energies = system.analysis.energy()
print(energies)
plot, = pyplot.plot([0], [energies['total']], label="total")
pyplot.xlabel("Time")
pyplot.ylabel("Energy")
pyplot.legend()
pyplot.show(block=False)
def main_loop():
global energies
print("run at time={:.2f}".format(system.time))
system.integrator.run(int_steps)
visualizer.update()
energies = system.analysis.energy()
plot.set_xdata(np.append(plot.get_xdata(), system.time))
plot.set_ydata(np.append(plot.get_ydata(), energies['total']))
def main_thread():
for _ in range(int_n_times):
main_loop()
last_plotted = 0
def update_plot():
global last_plotted
current_time = plot.get_xdata()[-1]
if last_plotted == current_time:
return
last_plotted = current_time
pyplot.xlim(0, plot.get_xdata()[-1])
pyplot.ylim(plot.get_ydata().min(), plot.get_ydata().max())
pyplot.draw()
pyplot.pause(0.01)
t = Thread(target=main_thread)
t.daemon = True
t.start()
visualizer.register_callback(update_plot, interval=1000)
visualizer.start()
# terminate program
print("\nFinished.")
| gpl-3.0 |
andreww/theia_tools | terraheat.py | 1 | 2132 | #!/usr/bin/env python
import numpy as np
def read_terra_heat(file):
"""Read an optionally compressed heat file from TERRA
Returns five numpy arrays of summary data from a
TERRA heat file. The arrays contain the time (years),
power exiting via the surface (W), entering via
bottom heating (W), added via the decay of radioisotopes
(W) and the total power (W), respectivly.
"""
# We can use the numpy loadtxt to read this file format, which is
# nice - and allows file to be a file object or file name, and the
# file can be .gz or .bz2 compressed.
time, htop, hbot, hrad, heat = np.loadtxt(file, unpack=True)
return time, htop, hbot, hrad, heat
def plot_heat(time, htop, hbot, hrad, heat, filename=None):
"""Create a graph of data from a TERRA heat file
Creates a line graph of the contributions to the power
of the system using data that can be read from a
TERRA heat file. Data must be presented as numpy
arrays (or a format that can be converted into a numpy
array). Setting the optional filename argument will result
in the graph being written to a file.
"""
import matplotlib
if filename is not None:
matplotlib.use('Agg')
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(time, htop, 'g-', label='htop')
ax.plot(time, hbot, 'b-', label='hbot')
ax.plot(time, hrad, 'r-', label='hrad')
ax.plot(time, heat, 'k-', label='heat')
ax.legend()
ax.set_xlabel('Time (yr)')
ax.set_ylabel('Heat (W)')
if filename is not None:
plt.savefig(filename)
else:
plt.show()
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description=
'Generate a graph from a TERRA heat file.')
parser.add_argument('histfile', help='TERRA heat file')
parser.add_argument('-o', '--outfile', help='Ouput graph to a file')
args = parser.parse_args()
time, htop, hbot, hrad, heat = read_terra_heat(args.histfile)
plot_heat(time, htop, hbot, hrad, heat, filename=args.outfile)
| mit |
kaichogami/scikit-learn | examples/svm/plot_rbf_parameters.py | 44 | 8096 | '''
==================
RBF SVM parameters
==================
This example illustrates the effect of the parameters ``gamma`` and ``C`` of
the Radial Basis Function (RBF) kernel SVM.
Intuitively, the ``gamma`` parameter defines how far the influence of a single
training example reaches, with low values meaning 'far' and high values meaning
'close'. The ``gamma`` parameters can be seen as the inverse of the radius of
influence of samples selected by the model as support vectors.
The ``C`` parameter trades off misclassification of training examples against
simplicity of the decision surface. A low ``C`` makes the decision surface
smooth, while a high ``C`` aims at classifying all training examples correctly
by giving the model freedom to select more samples as support vectors.
The first plot is a visualization of the decision function for a variety of
parameter values on a simplified classification problem involving only 2 input
features and 2 possible target classes (binary classification). Note that this
kind of plot is not possible to do for problems with more features or target
classes.
The second plot is a heatmap of the classifier's cross-validation accuracy as a
function of ``C`` and ``gamma``. For this example we explore a relatively large
grid for illustration purposes. In practice, a logarithmic grid from
:math:`10^{-3}` to :math:`10^3` is usually sufficient. If the best parameters
lie on the boundaries of the grid, it can be extended in that direction in a
subsequent search.
Note that the heat map plot has a special colorbar with a midpoint value close
to the score values of the best performing models so as to make it easy to tell
them appart in the blink of an eye.
The behavior of the model is very sensitive to the ``gamma`` parameter. If
``gamma`` is too large, the radius of the area of influence of the support
vectors only includes the support vector itself and no amount of
regularization with ``C`` will be able to prevent overfitting.
When ``gamma`` is very small, the model is too constrained and cannot capture
the complexity or "shape" of the data. The region of influence of any selected
support vector would include the whole training set. The resulting model will
behave similarly to a linear model with a set of hyperplanes that separate the
centers of high density of any pair of two classes.
For intermediate values, we can see on the second plot that good models can
be found on a diagonal of ``C`` and ``gamma``. Smooth models (lower ``gamma``
values) can be made more complex by selecting a larger number of support
vectors (larger ``C`` values) hence the diagonal of good performing models.
Finally one can also observe that for some intermediate values of ``gamma`` we
get equally performing models when ``C`` becomes very large: it is not
necessary to regularize by limiting the number of support vectors. The radius of
the RBF kernel alone acts as a good structural regularizer. In practice though
it might still be interesting to limit the number of support vectors with a
lower value of ``C`` so as to favor models that use less memory and that are
faster to predict.
We should also note that small differences in scores results from the random
splits of the cross-validation procedure. Those spurious variations can be
smoothed out by increasing the number of CV iterations ``n_iter`` at the
expense of compute time. Increasing the value number of ``C_range`` and
``gamma_range`` steps will increase the resolution of the hyper-parameter heat
map.
'''
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import Normalize
from sklearn.svm import SVC
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_iris
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.model_selection import GridSearchCV
# Utility function to move the midpoint of a colormap to be around
# the values of interest.
class MidpointNormalize(Normalize):
def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):
self.midpoint = midpoint
Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
return np.ma.masked_array(np.interp(value, x, y))
##############################################################################
# Load and prepare data set
#
# dataset for grid search
iris = load_iris()
X = iris.data
y = iris.target
# Dataset for decision function visualization: we only keep the first two
# features in X and sub-sample the dataset to keep only 2 classes and
# make it a binary classification problem.
X_2d = X[:, :2]
X_2d = X_2d[y > 0]
y_2d = y[y > 0]
y_2d -= 1
# It is usually a good idea to scale the data for SVM training.
# We are cheating a bit in this example in scaling all of the data,
# instead of fitting the transformation on the training set and
# just applying it on the test set.
scaler = StandardScaler()
X = scaler.fit_transform(X)
X_2d = scaler.fit_transform(X_2d)
##############################################################################
# Train classifiers
#
# For an initial search, a logarithmic grid with basis
# 10 is often helpful. Using a basis of 2, a finer
# tuning can be achieved but at a much higher cost.
C_range = np.logspace(-2, 10, 13)
gamma_range = np.logspace(-9, 3, 13)
param_grid = dict(gamma=gamma_range, C=C_range)
cv = StratifiedShuffleSplit(n_iter=5, test_size=0.2, random_state=42)
grid = GridSearchCV(SVC(), param_grid=param_grid, cv=cv)
grid.fit(X, y)
print("The best parameters are %s with a score of %0.2f"
% (grid.best_params_, grid.best_score_))
# Now we need to fit a classifier for all parameters in the 2d version
# (we use a smaller set of parameters here because it takes a while to train)
C_2d_range = [1e-2, 1, 1e2]
gamma_2d_range = [1e-1, 1, 1e1]
classifiers = []
for C in C_2d_range:
for gamma in gamma_2d_range:
clf = SVC(C=C, gamma=gamma)
clf.fit(X_2d, y_2d)
classifiers.append((C, gamma, clf))
##############################################################################
# visualization
#
# draw visualization of parameter effects
plt.figure(figsize=(8, 6))
xx, yy = np.meshgrid(np.linspace(-3, 3, 200), np.linspace(-3, 3, 200))
for (k, (C, gamma, clf)) in enumerate(classifiers):
# evaluate decision function in a grid
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# visualize decision function for these parameters
plt.subplot(len(C_2d_range), len(gamma_2d_range), k + 1)
plt.title("gamma=10^%d, C=10^%d" % (np.log10(gamma), np.log10(C)),
size='medium')
# visualize parameter's effect on decision function
plt.pcolormesh(xx, yy, -Z, cmap=plt.cm.RdBu)
plt.scatter(X_2d[:, 0], X_2d[:, 1], c=y_2d, cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.axis('tight')
# plot the scores of the grid
# grid_scores_ contains parameter settings and scores
# We extract just the scores
scores = [x[1] for x in grid.grid_scores_]
scores = np.array(scores).reshape(len(C_range), len(gamma_range))
# Draw heatmap of the validation accuracy as a function of gamma and C
#
# The score are encoded as colors with the hot colormap which varies from dark
# red to bright yellow. As the most interesting scores are all located in the
# 0.92 to 0.97 range we use a custom normalizer to set the mid-point to 0.92 so
# as to make it easier to visualize the small variations of score values in the
# interesting range while not brutally collapsing all the low score values to
# the same color.
plt.figure(figsize=(8, 6))
plt.subplots_adjust(left=.2, right=0.95, bottom=0.15, top=0.95)
plt.imshow(scores, interpolation='nearest', cmap=plt.cm.hot,
norm=MidpointNormalize(vmin=0.2, midpoint=0.92))
plt.xlabel('gamma')
plt.ylabel('C')
plt.colorbar()
plt.xticks(np.arange(len(gamma_range)), gamma_range, rotation=45)
plt.yticks(np.arange(len(C_range)), C_range)
plt.title('Validation accuracy')
plt.show()
| bsd-3-clause |
tri2sing/OpenStack | correlate_host_load_vm_count.py | 1 | 3907 | #!/usr/bin/env python
from datetime import datetime as dt, timedelta
import logging as lg
import pandas as pd
import pandas.tools as tl
import pymongo
import sys
import csv
today = dt.now()
today = today.replace (hour=0, minute=0, second=0, microsecond=0)
yesterday = today + timedelta (days=-1)
MONGO_URL = os.environ['MONGO_URL']
try:
conn = pymongo.Connection (MONGO_URL, safe=True)
except pymongo.errors.ConnectionFailure as e:
print 'Error: check your MongoDB connectivity'
print 'Error:', e
sys.exit()
# Databases to use
db1 = conn.inventory
db2 = conn.collectd
# Collections to use
hy = db1.hypervisors
vms = db1.instances
caph = db1.hostcapacity
ld = db2.load
qry1 = {'timestamp': today}
sltr1 = {'_id': 0}
sltr2 = {'_id': 0}
sltr3 = {'_id': 0}
try:
# Get the list of hypervisors
hvsrs = hy.find (qry1, sltr1)
# print 'timestamp', 'host_name', 'vms_count', 'host_ram', 'vms_ram_alloc', 'ratio_ram', 'host_cpus', 'vms_vcpu_alloc', 'ratio_cpu', 'host_load_total', 'host_load_per_cpu'
for h in hvsrs:
info = {}
# Get the cpu load data which is of the form [short term, medium term, long term]
# We focus on the long-term as the barometer of host compute capacity.
qry2 = {"time" : { "$gte" : yesterday, "$lte" : today }, "host": h['name']}
lddocs = ld.find (qry2, sltr2)
ldlist = list(lddocs)
if ldlist:
ldvals = [item["values"][2] for item in ldlist]
lddf = pd.DataFrame(ldvals)
ldmean = round(lddf.mean().tolist()[0], 4)
else:
ldmean = -1.0 * (float(h['host_os_cpus']))
print str(today) + ': No load data for ' + h['name']
info['timestamp'] = today
info['host_name'] = h['name']
info['host_load_total'] = ldmean
info['vms_count'] = h['vms']
info['host_ram'] = h['ram_mb']
info['host_cpus'] = h['host_os_cpus']
info['host_load_per_cpu'] = round(ldmean/ float(h['host_os_cpus']), 4)
if h['vms'] > 0:
qry3 = {"timestamp" : today, "hypervisor": h['name']}
vmdocs = vms.find (qry3, sltr3)
vmlist = list(vmdocs)
vmvals = [{"vcpus": item["flavor"]["vcpus"], "ram": item["flavor"]["ram"]} for item in vmlist]
vmdf = pd.DataFrame(vmvals)
totals = vmdf.sum()
# We have to cast the values stored in totals[] to types that MongoDB can understand.
# For example totals['vms_ram_alloc'] is of type numpy.int64 which we cannot insert into MongoDB.
info['vms_ram_alloc'] = long(totals['ram'])
info['vms_vcpu_alloc'] = long(totals['vcpus'])
info['ratio_ram'] = round(float(totals['ram'])/ float(h['ram_mb']), 4)
info['ratio_cpu'] = round(float(totals['vcpus'])/ float(h['host_os_cpus']), 4)
else:
info['vms_ram_alloc'] = 0
info['vms_vcpu_alloc'] = 0
info['ratio_ram'] = 0.0
info['ratio_cpu'] = 0.0
if info['host_load_per_cpu'] >= 0.75 or info['vms_count'] >= 70:
info['state'] = 'RED'
elif info['host_load_per_cpu'] > 0.5 or info['vms_count'] >= 60:
info['state'] = 'YELLOW'
else:
info['state'] = 'GREEN'
# To debug why MongoDB insert was failing printed the data types of values.
#for k,v in info.items(): print k, type(v)
caph.update({'host_name':h['name'], 'timestamp':today}, info, upsert=True)
# print info['timestamp'], info['host_name'], info['vms_count'], info['host_ram'], info['vms_ram_alloc'], info['ratio_ram'], info['host_cpus'], info['vms_vcpu_alloc'], info['ratio_cpu'], info['host_load_total'], info['host_load_per_cpu']
except pymongo.errors.PyMongoError as e:
print str(today) + ': Error: unable to query'
print str(today) + ': Error:', e
| apache-2.0 |
dieterich-lab/rp-bp | rpbp/translation_prediction/estimate_orf_bayes_factors.py | 1 | 17952 | #! /usr/bin/env python3
import argparse
import pickle
import logging
import sys
import ctypes
import multiprocessing
import numpy as np
import pandas as pd
import scipy.io
import pbio.utils.bed_utils as bed_utils
import pbio.misc.logging_utils as logging_utils
import pbio.misc.parallel as parallel
import pbio.misc.slurm as slurm
import pbio.misc.utils as utils
from pbio.misc.suppress_stdout_stderr import suppress_stdout_stderr
import pbio.ribo.ribo_utils as ribo_utils
from rpbp.defaults import default_num_cpus, default_num_groups, translation_options
logger = logging.getLogger(__name__)
# we will use global variables to share the (read-only) scipy.sparse.csr_matrix
# across the child processes.
# see: http://stackoverflow.com/questions/1675766/
# how-to-combine-pool-map-with-array-shared-memory-in-python-multiprocessing
profiles_data = 0
profiles_indices = 0
profiles_indptr = 0
profiles_shape = 0
translated_models = 0
untranslated_models = 0
args = 0
# Not passed as arguments, unlikely to be required
default_orf_num_field = 'orf_num'
default_orf_type_field = 'orf_type'
# --num-orfs is not used in the the Rp-Bp pipeline
def get_bayes_factor(profile, translated_models, untranslated_models, args):
""" This function calculates the Bayes' factor for a single ORF profile.
Args:
profile (np.array): the (dense) profile for this ORF
translated_models (list of pystan.StanModel): the models which explain translation
untranslated_models (list of pystan.StanModel): the models which account for background
args (namespace): a namespace (presumably from argparse) which includes the following:
seed (int): random seed for initializing MCMC
chains (int): the number of MCMC chains
iterations (int): the number of iterations for each chain
Returns:
pd.Series: a series containing:
the mean and variance for each of the following estimated values:
bayes_factor
p_translated
p_background
translated_location
translated_scale
background_location
background_scale
the chi-square p-value
"""
profile_sum = sum(profile)
# split the signal based on frame
x_1 = profile[0::3]
x_2 = profile[1::3]
x_3 = profile[2::3]
T = len(x_1)
x_1_sum = sum(x_1)
x_2_sum = sum(x_2)
x_3_sum = sum(x_3)
ret = {
"p_translated_mean": float('-inf'),
"p_translated_var": float('-inf'),
"p_background_mean": float('-inf'),
"p_background_var": float('-inf'),
"translated_location_mean": float('-inf'),
"translated_location_var": float('-inf'),
"translated_scale_mean": float('-inf'),
"translated_scale_var": float('-inf'),
"background_location_mean": float('-inf'),
"background_location_var": float('-inf'),
"background_scale_mean": float('-inf'),
"background_scale_var": float('-inf'),
"bayes_factor_mean": float('-inf'),
"bayes_factor_var": float('-inf'),
"chi_square_p": float('-inf'),
"x_1_sum": x_1_sum,
"x_2_sum": x_2_sum,
"x_3_sum": x_3_sum,
"profile_sum": profile_sum
}
ret = pd.Series(ret)
# check if something odd happens with the length
# this should already be checked before calling the function.
if (T != len(x_2)) or (T != len(x_3)):
return ret
# and make sure we have more reads in x_1 than each of the others
if (x_1_sum < x_2_sum) or (x_1_sum < x_3_sum):
return ret
# chi-square values
f_obs = [x_1_sum, x_2_sum, x_3_sum]
chisq, chi_square_p = scipy.stats.chisquare(f_obs)
ret['chi_square_p'] = chi_square_p
# check if we only wanted the chi square value
if args.chi_square_only:
return ret
# now, smooth the signals
smoothed_profile = ribo_utils.smooth_profile(profile,
reweighting_iterations=args.reweighting_iterations,
fraction=args.fraction)
# split the signal based on frame
x_1 = smoothed_profile[0::3]
x_2 = smoothed_profile[1::3]
x_3 = smoothed_profile[2::3]
nonzero_x_1 = np.count_nonzero(x_1)
# construct the input for Stan
data = {
"x_1": x_1,
"x_2": x_2,
"x_3": x_3,
"T": T,
"nonzero_x_1": nonzero_x_1
}
m_translated = [tm.sampling(data=data, iter=args.iterations, chains=args.chains, n_jobs=1,
seed=args.seed, refresh=0) for tm in translated_models]
m_background = [bm.sampling(data=data, iter=args.iterations, chains=args.chains, n_jobs=1,
seed=args.seed, refresh=0) for bm in untranslated_models]
# extract the parameters of interest
m_translated_ex = [m.extract(pars=['lp__', 'background_location', 'background_scale'])
for m in m_translated]
m_background_ex = [m.extract(pars=['lp__', 'background_location', 'background_scale'])
for m in m_background]
# now, choose the best model of each class, based on mean likelihood
m_translated_means = [np.mean(m_ex['lp__']) for m_ex in m_translated_ex]
m_background_means = [np.mean(m_ex['lp__']) for m_ex in m_background_ex]
max_translated_mean = np.argmax(m_translated_means)
max_background_mean = np.argmax(m_background_means)
# select the best sampling results
m_translated_ex = m_translated_ex[max_translated_mean]
m_background_ex = m_background_ex[max_background_mean]
# extract the relevant means and variances
ret['p_translated_mean'] = np.mean(m_translated_ex['lp__'])
ret['p_translated_var'] = np.var(m_translated_ex['lp__'])
ret['p_background_mean'] = np.mean(m_background_ex['lp__'])
ret['p_background_var'] = np.var(m_background_ex['lp__'])
ret['translated_location_mean'] = np.mean(m_translated_ex['background_location'])
ret['translated_location_var'] = np.var(m_translated_ex['background_location'])
ret['translated_scale_mean'] = np.mean(m_translated_ex['background_scale'])
ret['translated_scale_var'] = np.var(m_translated_ex['background_scale'])
ret['background_location_mean'] = np.mean(m_background_ex['background_location'])
ret['background_location_var'] = np.var(m_background_ex['background_location'])
ret['background_scale_mean'] = np.mean(m_background_ex['background_scale'])
ret['background_scale_var'] = np.var(m_background_ex['background_scale'])
# the (log of) the Bayes factor is the difference between two normals:
# (the best translated model) - (the best background model)
#
# thus, it is also a normal whose mean is the difference of the two means
# and whose variance is the sum of the two variances
ret['bayes_factor_mean'] = ret['p_translated_mean'] - ret['p_background_mean']
ret['bayes_factor_var'] = ret['p_translated_var'] + ret['p_background_var']
return ret
def get_all_bayes_factors(orfs, args):
""" This function calculates the Bayes' factor term for each region in regions. See the
description of the script for the Bayes' factor calculations.
Args:
orfs (pd.DataFrame) : a set of orfs. The columns must include:
orf_num
exon_lengths
args (namespace) : a namespace containing the models and profiles filenames
Returns:
pandas.Series: the Bayes' factors (and other estimated quantities) for each region
"""
# read in the signals and sequences
logger.debug("Reading profiles")
profiles = scipy.io.mmread(args.profiles).tocsr()
logger.debug("Reading models")
translated_models = [pickle.load(open(tm, 'rb')) for tm in args.translated_models]
untranslated_models = [pickle.load(open(bm, 'rb')) for bm in args.untranslated_models]
logger.debug("Applying on regions")
bfs = []
for idx, row in orfs.iterrows():
orf_num = row[args.orf_num_field]
orf_len = row['orf_len']
# sometimes the orf_len is off...
if orf_len % 3 != 0:
msg = "Found an ORF whose length was not 0 mod 3. Skipping. orf_id: {}".format(row['id'])
logger.warning(msg)
continue
profile = utils.to_dense(profiles, orf_num, float, length=orf_len)
row_bf = get_bayes_factor(profile, translated_models, untranslated_models, args)
row = row.append(row_bf)
bfs.append(row)
bfs = pd.DataFrame(bfs)
return bfs
def get_all_bayes_factors_args(orfs):
""" This function calculates the Bayes' factor term for each region in regions. See the
description of the script for the Bayes' factor calculations.
Args:
orfs (pd.DataFrame) : a set of orfs. The columns must include:
orf_num
exon_lengths
args (namespace) : a namespace containing the models and profiles filenames
Returns:
pandas.Series: the Bayes' factors (and other estimated quantities) for each region
"""
# read in the signals and sequences
# logger.debug("Reading profiles")
# profiles = scipy.io.mmread(args.profiles).tocsr()
# logger.debug("Reading models")
# translated_models = [pickle.load(open(tm, 'rb')) for tm in args.translated_models]
# untranslated_models = [pickle.load(open(bm, 'rb')) for bm in args.untranslated_models]
# this is code to initialize a csc matrix using the internal numpy arrays
# csr is basically the same
# b = scipy.sparse.csc_matrix((a.data, a.indices, a.indptr), shape=a.shape, copy=False)
profiles = scipy.sparse.csr_matrix((profiles_data, profiles_indices, profiles_indptr),
shape=profiles_shape, copy=False)
logger.debug("Applying on regions")
bfs = []
for idx, row in orfs.iterrows():
orf_num = row[args.orf_num_field]
orf_len = row['orf_len']
# sometimes the orf_len is off...
if orf_len % 3 != 0:
msg = "Found an ORF whose length was not 0 mod 3. Skipping. orf_id: {}".format(row['id'])
logger.warning(msg)
continue
profile = utils.to_dense(profiles, orf_num, float, length=orf_len)
row_bf = get_bayes_factor(profile, translated_models, untranslated_models, args)
row = row.append(row_bf)
bfs.append(row)
bfs = pd.DataFrame(bfs)
return bfs
def main():
global profiles_data, profiles_indices, profiles_indptr, profiles_shape
global translated_models, untranslated_models
global args
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="""This script uses Hamiltonian MCMC with Stan
to estimate translation parameters for a set of regions (presumably ORFs). Roughly, it takes
as input: (1) a set of regions (ORFs) and their corresponding profiles
(2) a "translated" model which gives the probability that a region is translated
(3) an "untranslated" model which gives the probability that a region is not translated.
The script first smoothes the profiles using LOWESS. It then calculates both the Bayes' factor
(using the smoothed profile) and chi2 value (using the raw counts) for each ORF.""")
parser.add_argument('profiles', help="The ORF profiles (counts) (mtx)")
parser.add_argument('regions', help="The regions (ORFs) for which predictions will be made (BED12+)")
parser.add_argument('out', help="The output file for the Bayes' factors (BED12+)")
parser.add_argument('--chi-square-only', help="""If this flag is present, then only the chi
square test will be performed for each ORF. This can also be a way to get the counts within
each of the ORFs.""", action='store_true')
parser.add_argument('--translated-models', help="The models to use as H_t (pkl)", nargs='+')
parser.add_argument('--untranslated-models', help="The models to use as H_u (pkl)", nargs='+')
# filtering options
parser.add_argument('--orf-types', help="If values are given, then only orfs with those types are processed.",
nargs='*', default=translation_options['orf_types'])
parser.add_argument('--orf-type-field', default=default_orf_type_field)
parser.add_argument('--min-length', help="ORFs with length less than this value will not be processed",
type=int, default=translation_options['orf_min_length_pre'])
parser.add_argument('--max-length', help="ORFs with length greater than this value will not be processed",
type=int, default=translation_options['orf_max_length_pre'])
parser.add_argument('--min-profile', help="""ORFs with profile sum (i.e., number of reads) less than this
value will not be processed.""", type=float, default=translation_options['orf_min_profile_count_pre'])
# smoothing options
parser.add_argument('--fraction', help="The fraction of signal to use in LOWESS",
type=float, default=translation_options['smoothing_fraction'])
parser.add_argument('--reweighting-iterations', help="The number of reweighting "
"iterations to use in LOWESS. "
"Please see the statsmodels documentation for a "
"detailed description of this parameter.",
type=int, default=translation_options['smoothing_reweighting_iterations'])
# MCMC options
parser.add_argument('-s', '--seed', help="The random seeds to use for inference",
type=int, default=translation_options['seed'])
parser.add_argument('-c', '--chains', help="The number of MCMC chains to use", type=int,
default=translation_options['chains'])
parser.add_argument('-i', '--iterations', help="The number of MCMC iterations to use for each chain",
type=int, default=translation_options['translation_iterations'])
# behavior options
parser.add_argument('--num-orfs', help="If n>0, then only this many ORFs will be processed",
type=int, default=0)
parser.add_argument('--orf-num-field', default=default_orf_num_field)
parser.add_argument('--do-not-compress', help="Unless otherwise specified, the output will "
"be written in GZip format", action='store_true')
parser.add_argument('-g', '--num-groups', help="The number of groups into which to split "
"the ORFs. More groups means the progress bar is "
"updated more frequently but incurs more overhead "
"because of the parallel calls.",
type=int, default=default_num_groups)
slurm.add_sbatch_options(parser)
logging_utils.add_logging_options(parser)
args = parser.parse_args()
logging_utils.update_logging(args)
if args.use_slurm:
cmd = ' '.join(sys.argv)
slurm.check_sbatch(cmd, args=args)
return
# read in the regions and apply the filters
msg = "Reading and filtering ORFs"
logger.info(msg)
regions = bed_utils.read_bed(args.regions)
# by default, keep everything
m_filters = np.array([True] * len(regions))
if len(args.orf_types) > 0:
m_orf_type = regions[args.orf_type_field].isin(args.orf_types)
m_filters = m_orf_type & m_filters
# min length
if args.min_length > 0:
m_min_length = regions['orf_len'] >= args.min_length
m_filters = m_min_length & m_filters
# max length
if args.max_length > 0:
m_max_length = regions['orf_len'] <= args.max_length
m_filters = m_max_length & m_filters
# min profile
profiles = scipy.io.mmread(args.profiles).tocsr()
profiles_sums = profiles.sum(axis=1)
good_orf_nums = np.where(profiles_sums >= args.min_profile)
good_orf_nums = set(good_orf_nums[0])
m_profile = regions['orf_num'].isin(good_orf_nums)
m_filters = m_profile & m_filters
regions = regions[m_filters]
if args.num_orfs > 0:
regions = regions.head(args.num_orfs)
regions = regions.reset_index(drop=True)
msg = "Number of regions after filtering: {}".format(len(regions))
logger.info(msg)
logger.debug("Reading models")
translated_models = [pickle.load(open(tm, 'rb')) for tm in args.translated_models]
untranslated_models = [pickle.load(open(bm, 'rb')) for bm in args.untranslated_models]
profiles_data = multiprocessing.RawArray(ctypes.c_double, profiles.data.flat)
profiles_indices = multiprocessing.RawArray(ctypes.c_int, profiles.indices)
profiles_indptr = multiprocessing.RawArray(ctypes.c_int, profiles.indptr)
profiles_shape = multiprocessing.RawArray(ctypes.c_int, profiles.shape)
with suppress_stdout_stderr():
bfs_l = parallel.apply_parallel_split(
regions,
args.num_cpus,
get_all_bayes_factors_args,
num_groups=args.num_groups,
progress_bar=True,
backend='multiprocessing'
)
bfs = pd.concat(bfs_l)
# write the results as a bed12+ file
bed_utils.write_bed(bfs, args.out)
if __name__ == '__main__':
main()
| mit |
ycaihua/scikit-learn | examples/decomposition/plot_kernel_pca.py | 353 | 2011 | """
==========
Kernel PCA
==========
This example shows that Kernel PCA is able to find a projection of the data
that makes data linearly separable.
"""
print(__doc__)
# Authors: Mathieu Blondel
# Andreas Mueller
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA, KernelPCA
from sklearn.datasets import make_circles
np.random.seed(0)
X, y = make_circles(n_samples=400, factor=.3, noise=.05)
kpca = KernelPCA(kernel="rbf", fit_inverse_transform=True, gamma=10)
X_kpca = kpca.fit_transform(X)
X_back = kpca.inverse_transform(X_kpca)
pca = PCA()
X_pca = pca.fit_transform(X)
# Plot results
plt.figure()
plt.subplot(2, 2, 1, aspect='equal')
plt.title("Original space")
reds = y == 0
blues = y == 1
plt.plot(X[reds, 0], X[reds, 1], "ro")
plt.plot(X[blues, 0], X[blues, 1], "bo")
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
X1, X2 = np.meshgrid(np.linspace(-1.5, 1.5, 50), np.linspace(-1.5, 1.5, 50))
X_grid = np.array([np.ravel(X1), np.ravel(X2)]).T
# projection on the first principal component (in the phi space)
Z_grid = kpca.transform(X_grid)[:, 0].reshape(X1.shape)
plt.contour(X1, X2, Z_grid, colors='grey', linewidths=1, origin='lower')
plt.subplot(2, 2, 2, aspect='equal')
plt.plot(X_pca[reds, 0], X_pca[reds, 1], "ro")
plt.plot(X_pca[blues, 0], X_pca[blues, 1], "bo")
plt.title("Projection by PCA")
plt.xlabel("1st principal component")
plt.ylabel("2nd component")
plt.subplot(2, 2, 3, aspect='equal')
plt.plot(X_kpca[reds, 0], X_kpca[reds, 1], "ro")
plt.plot(X_kpca[blues, 0], X_kpca[blues, 1], "bo")
plt.title("Projection by KPCA")
plt.xlabel("1st principal component in space induced by $\phi$")
plt.ylabel("2nd component")
plt.subplot(2, 2, 4, aspect='equal')
plt.plot(X_back[reds, 0], X_back[reds, 1], "ro")
plt.plot(X_back[blues, 0], X_back[blues, 1], "bo")
plt.title("Original space after inverse transform")
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
plt.subplots_adjust(0.02, 0.10, 0.98, 0.94, 0.04, 0.35)
plt.show()
| bsd-3-clause |
DaRasch/spiceminer | spiceminer/simple/__init__.py | 1 | 2206 | #!/usr/bin/env python
#-*- coding:utf-8 -*-
import numpy
import matplotlib.pyplot as plt
from . import views
from . import animation
from .. import kernel
from ..time_ import Time
from ..bodies import Body
import pdb
class BodyData(object):
def __init__(self, body, args, times):
self.body = Body(body)
self.plotargs = args
self.data = self.body.position(times)
class SpiceAnimation(animation.PlaybackController):
def __init__(self, path='.', bodies=None, times=None, title=None, **animargs):
kernel.load(path, force_reload=True)
if times is None:
times =numpy.arange(Time.now() - 56 * Time.WEEK, Time.now(), Time.DAY, dtype=float)
if title is None:
title = 'From {}\nto {}'.format(str(Time.fromposix(times[0])), str(Time.fromposix(times[-1])))
self.bodydata = [BodyData(body, args, times) for body, args in bodies.items()]
limit = max(item.data[1:3].max() for item in self.bodydata) * 1.1
frame_count = max(len(item.data[0]) for item in self.bodydata)
self._view = views.QuadView(title, limit)
self._view.widgets['play'].on_clicked(self._play_cb)
self._view.widgets['slider'].on_changed(self._slider_cb)
for item in self.bodydata:
self._view.add(item.data[1:,:1], item.plotargs)
super(self.__class__, self).__init__(self._view.figure, frame_count, **animargs)
def _play_cb(self, event):
super(self.__class__, self)._pause()
def _slider_cb(self, pos):
self.current_frame = int(pos * self.frame_count)
def _draw_frame(self, frame):
self._view.update(Time.fromposix(
self.bodydata[0].data[0, frame - 1]),
[item.data[1:, :frame] for item in self.bodydata]
)
slider = self._view.widgets['slider']
slider.eventson = False
slider.set_val(frame / float(self.frame_count))
slider.eventson = True
def show(self):
try:
plt.show(self._view.figure)
finally:
plt.close(self._view.figure)
def close(self):
try:
plt.close(self._view.figure)
except Exception:
pass
| mit |
robbymeals/scikit-learn | examples/ensemble/plot_forest_importances.py | 241 | 1761 | """
=========================================
Feature importances with forests of trees
=========================================
This examples shows the use of forests of trees to evaluate the importance of
features on an artificial classification task. The red bars are the feature
importances of the forest, along with their inter-trees variability.
As expected, the plot suggests that 3 features are informative, while the
remaining are not.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.ensemble import ExtraTreesClassifier
# Build a classification task using 3 informative features
X, y = make_classification(n_samples=1000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
n_classes=2,
random_state=0,
shuffle=False)
# Build a forest and compute the feature importances
forest = ExtraTreesClassifier(n_estimators=250,
random_state=0)
forest.fit(X, y)
importances = forest.feature_importances_
std = np.std([tree.feature_importances_ for tree in forest.estimators_],
axis=0)
indices = np.argsort(importances)[::-1]
# Print the feature ranking
print("Feature ranking:")
for f in range(10):
print("%d. feature %d (%f)" % (f + 1, indices[f], importances[indices[f]]))
# Plot the feature importances of the forest
plt.figure()
plt.title("Feature importances")
plt.bar(range(10), importances[indices],
color="r", yerr=std[indices], align="center")
plt.xticks(range(10), indices)
plt.xlim([-1, 10])
plt.show()
| bsd-3-clause |
zoyahav/incubator-airflow | setup.py | 5 | 9813 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup, find_packages, Command
from setuptools.command.test import test as TestCommand
import imp
import logging
import os
import pip
import sys
logger = logging.getLogger(__name__)
# Kept manually in sync with airflow.__version__
version = imp.load_source(
'airflow.version', os.path.join('airflow', 'version.py')).version
class Tox(TestCommand):
user_options = [('tox-args=', None, "Arguments to pass to tox")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.tox_args = ''
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
#import here, cause outside the eggs aren't loaded
import tox
errno = tox.cmdline(args=self.tox_args.split())
sys.exit(errno)
class CleanCommand(Command):
"""Custom clean command to tidy up the project root."""
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
os.system('rm -vrf ./build ./dist ./*.pyc ./*.tgz ./*.egg-info')
def git_version(version):
"""
Return a version to identify the state of the underlying git repo. The version will
indicate whether the head of the current git-backed working directory is tied to a
release tag or not : it will indicate the former with a 'release:{version}' prefix
and the latter with a 'dev0' prefix. Following the prefix will be a sha of the current
branch head. Finally, a "dirty" suffix is appended to indicate that uncommitted changes
are present.
"""
repo = None
try:
import git
repo = git.Repo('.git')
except ImportError:
logger.warning('gitpython not found: Cannot compute the git version.')
return ''
except Exception as e:
logger.warning('Git repo not found: Cannot compute the git version.')
return ''
if repo:
sha = repo.head.commit.hexsha
if repo.is_dirty():
return '.dev0+{sha}.dirty'.format(sha=sha)
# commit is clean
# is it release of `version` ?
try:
tag = repo.git.describe(
match='[0-9]*', exact_match=True,
tags=True, dirty=True)
assert tag == version, (tag, version)
return '.release:{version}+{sha}'.format(version=version,
sha=sha)
except git.GitCommandError:
return '.dev0+{sha}'.format(sha=sha)
else:
return 'no_git_version'
def write_version(filename=os.path.join(*['airflow',
'git_version'])):
text = "{}".format(git_version(version))
with open(filename, 'w') as a:
a.write(text)
def check_previous():
installed_packages = ([package.project_name for package
in pip.get_installed_distributions()])
if 'airflow' in installed_packages:
print("An earlier non-apache version of Airflow was installed, "
"please uninstall it first. Then reinstall.")
sys.exit(1)
async = [
'greenlet>=0.4.9',
'eventlet>= 0.9.7',
'gevent>=0.13'
]
azure = ['azure-storage>=0.34.0']
celery = [
'celery>=3.1.17',
'flower>=0.7.3'
]
cgroups = [
'cgroupspy>=0.1.4',
]
crypto = ['cryptography>=0.9.3']
dask = [
'distributed>=1.15.2, <2'
]
databricks = ['requests>=2.5.1, <3']
datadog = ['datadog>=0.14.0']
doc = [
'sphinx>=1.2.3',
'sphinx-argparse>=0.1.13',
'sphinx-rtd-theme>=0.1.6',
'Sphinx-PyPI-upload>=0.2.1'
]
docker = ['docker-py>=1.6.0']
druid = ['pydruid>=0.2.1']
emr = ['boto3>=1.0.0']
gcp_api = [
'httplib2',
'google-api-python-client>=1.5.0, <1.6.0',
'oauth2client>=2.0.2, <2.1.0',
'PyOpenSSL',
'pandas-gbq'
]
hdfs = ['snakebite>=2.7.8']
webhdfs = ['hdfs[dataframe,avro,kerberos]>=2.0.4']
jira = ['JIRA>1.0.7']
hive = [
'hive-thrift-py>=0.0.1',
'pyhive>=0.1.3',
'impyla>=0.13.3',
'unicodecsv>=0.14.1'
]
jdbc = ['jaydebeapi>=0.2.0']
mssql = ['pymssql>=2.1.1', 'unicodecsv>=0.14.1']
mysql = ['mysqlclient>=1.3.6']
rabbitmq = ['librabbitmq>=1.6.1']
oracle = ['cx_Oracle>=5.1.2']
postgres = ['psycopg2>=2.7.1']
salesforce = ['simple-salesforce>=0.72']
s3 = [
'boto>=2.36.0',
'filechunkio>=1.6',
]
samba = ['pysmbclient>=0.1.3']
slack = ['slackclient>=1.0.0']
statsd = ['statsd>=3.0.1, <4.0']
vertica = ['vertica-python>=0.5.1']
ldap = ['ldap3>=0.9.9.1']
kerberos = ['pykerberos>=1.1.13',
'requests_kerberos>=0.10.0',
'thrift_sasl>=0.2.0',
'snakebite[kerberos]>=2.7.8',
'kerberos>=1.2.5']
password = [
'bcrypt>=2.0.0',
'flask-bcrypt>=0.7.1',
]
github_enterprise = ['Flask-OAuthlib>=0.9.1']
qds = ['qds-sdk>=1.9.0']
cloudant = ['cloudant>=0.5.9,<2.0'] # major update coming soon, clamp to 0.x
redis = ['redis>=2.10.5']
all_dbs = postgres + mysql + hive + mssql + hdfs + vertica + cloudant
devel = [
'click',
'freezegun',
'jira',
'lxml>=3.3.4',
'mock',
'moto',
'nose',
'nose-ignore-docstring==0.2',
'nose-timer',
'parameterized',
'rednose'
]
devel_minreq = devel + mysql + doc + password + s3 + cgroups
devel_hadoop = devel_minreq + hive + hdfs + webhdfs + kerberos
devel_all = devel + all_dbs + doc + samba + s3 + slack + crypto + oracle + docker
def do_setup():
check_previous()
write_version()
setup(
name='apache-airflow',
description='Programmatically author, schedule and monitor data pipelines',
license='Apache License 2.0',
version=version,
packages=find_packages(),
package_data={'': ['airflow/alembic.ini', "airflow/git_version"]},
include_package_data=True,
zip_safe=False,
scripts=['airflow/bin/airflow'],
install_requires=[
'alembic>=0.8.3, <0.9',
'bleach==2.0.0',
'configparser>=3.5.0, <3.6.0',
'croniter>=0.3.8, <0.4',
'dill>=0.2.2, <0.3',
'flask>=0.11, <0.12',
'flask-admin==1.4.1',
'flask-cache>=0.13.1, <0.14',
'flask-login==0.2.11',
'flask-swagger==0.2.13',
'flask-wtf==0.14',
'funcsigs==1.0.0',
'future>=0.16.0, <0.17',
'gitpython>=2.0.2',
'gunicorn>=19.3.0, <19.4.0', # 19.4.? seemed to have issues
'jinja2>=2.7.3, <2.9.0',
'lxml>=3.6.0, <4.0',
'markdown>=2.5.2, <3.0',
'pandas>=0.17.1, <1.0.0',
'psutil>=4.2.0, <5.0.0',
'pygments>=2.0.1, <3.0',
'python-daemon>=2.1.1, <2.2',
'python-dateutil>=2.3, <3',
'python-nvd3==0.14.2',
'requests>=2.5.1, <3',
'setproctitle>=1.1.8, <2',
'sqlalchemy>=0.9.8',
'tabulate>=0.7.5, <0.8.0',
'thrift>=0.9.2, <0.10',
'zope.deprecation>=4.0, <5.0',
],
extras_require={
'all': devel_all,
'all_dbs': all_dbs,
'async': async,
'azure': azure,
'celery': celery,
'cgroups': cgroups,
'cloudant': cloudant,
'crypto': crypto,
'dask': dask,
'databricks': databricks,
'datadog': datadog,
'devel': devel_minreq,
'devel_hadoop': devel_hadoop,
'doc': doc,
'docker': docker,
'druid': druid,
'emr': emr,
'gcp_api': gcp_api,
'github_enterprise': github_enterprise,
'hdfs': hdfs,
'hive': hive,
'jdbc': jdbc,
'kerberos': kerberos,
'ldap': ldap,
'mssql': mssql,
'mysql': mysql,
'oracle': oracle,
'password': password,
'postgres': postgres,
'qds': qds,
'rabbitmq': rabbitmq,
's3': s3,
'salesforce': salesforce,
'samba': samba,
'slack': slack,
'statsd': statsd,
'vertica': vertica,
'webhdfs': webhdfs,
'jira': jira,
'redis': redis,
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Topic :: System :: Monitoring',
],
author='Apache Software Foundation',
author_email='[email protected]',
url='http://airflow.incubator.apache.org/',
download_url=(
'https://dist.apache.org/repos/dist/release/incubator/airflow/' + version),
cmdclass={
'test': Tox,
'extra_clean': CleanCommand,
},
)
if __name__ == "__main__":
do_setup()
| apache-2.0 |
mwv/scikit-learn | sklearn/decomposition/tests/test_truncated_svd.py | 240 | 6055 | """Test truncated SVD transformer."""
import numpy as np
import scipy.sparse as sp
from sklearn.decomposition import TruncatedSVD
from sklearn.utils import check_random_state
from sklearn.utils.testing import (assert_array_almost_equal, assert_equal,
assert_raises, assert_greater,
assert_array_less)
# Make an X that looks somewhat like a small tf-idf matrix.
# XXX newer versions of SciPy have scipy.sparse.rand for this.
shape = 60, 55
n_samples, n_features = shape
rng = check_random_state(42)
X = rng.randint(-100, 20, np.product(shape)).reshape(shape)
X = sp.csr_matrix(np.maximum(X, 0), dtype=np.float64)
X.data[:] = 1 + np.log(X.data)
Xdense = X.A
def test_algorithms():
svd_a = TruncatedSVD(30, algorithm="arpack")
svd_r = TruncatedSVD(30, algorithm="randomized", random_state=42)
Xa = svd_a.fit_transform(X)[:, :6]
Xr = svd_r.fit_transform(X)[:, :6]
assert_array_almost_equal(Xa, Xr)
comp_a = np.abs(svd_a.components_)
comp_r = np.abs(svd_r.components_)
# All elements are equal, but some elements are more equal than others.
assert_array_almost_equal(comp_a[:9], comp_r[:9])
assert_array_almost_equal(comp_a[9:], comp_r[9:], decimal=3)
def test_attributes():
for n_components in (10, 25, 41):
tsvd = TruncatedSVD(n_components).fit(X)
assert_equal(tsvd.n_components, n_components)
assert_equal(tsvd.components_.shape, (n_components, n_features))
def test_too_many_components():
for algorithm in ["arpack", "randomized"]:
for n_components in (n_features, n_features+1):
tsvd = TruncatedSVD(n_components=n_components, algorithm=algorithm)
assert_raises(ValueError, tsvd.fit, X)
def test_sparse_formats():
for fmt in ("array", "csr", "csc", "coo", "lil"):
Xfmt = Xdense if fmt == "dense" else getattr(X, "to" + fmt)()
tsvd = TruncatedSVD(n_components=11)
Xtrans = tsvd.fit_transform(Xfmt)
assert_equal(Xtrans.shape, (n_samples, 11))
Xtrans = tsvd.transform(Xfmt)
assert_equal(Xtrans.shape, (n_samples, 11))
def test_inverse_transform():
for algo in ("arpack", "randomized"):
# We need a lot of components for the reconstruction to be "almost
# equal" in all positions. XXX Test means or sums instead?
tsvd = TruncatedSVD(n_components=52, random_state=42)
Xt = tsvd.fit_transform(X)
Xinv = tsvd.inverse_transform(Xt)
assert_array_almost_equal(Xinv, Xdense, decimal=1)
def test_integers():
Xint = X.astype(np.int64)
tsvd = TruncatedSVD(n_components=6)
Xtrans = tsvd.fit_transform(Xint)
assert_equal(Xtrans.shape, (n_samples, tsvd.n_components))
def test_explained_variance():
# Test sparse data
svd_a_10_sp = TruncatedSVD(10, algorithm="arpack")
svd_r_10_sp = TruncatedSVD(10, algorithm="randomized", random_state=42)
svd_a_20_sp = TruncatedSVD(20, algorithm="arpack")
svd_r_20_sp = TruncatedSVD(20, algorithm="randomized", random_state=42)
X_trans_a_10_sp = svd_a_10_sp.fit_transform(X)
X_trans_r_10_sp = svd_r_10_sp.fit_transform(X)
X_trans_a_20_sp = svd_a_20_sp.fit_transform(X)
X_trans_r_20_sp = svd_r_20_sp.fit_transform(X)
# Test dense data
svd_a_10_de = TruncatedSVD(10, algorithm="arpack")
svd_r_10_de = TruncatedSVD(10, algorithm="randomized", random_state=42)
svd_a_20_de = TruncatedSVD(20, algorithm="arpack")
svd_r_20_de = TruncatedSVD(20, algorithm="randomized", random_state=42)
X_trans_a_10_de = svd_a_10_de.fit_transform(X.toarray())
X_trans_r_10_de = svd_r_10_de.fit_transform(X.toarray())
X_trans_a_20_de = svd_a_20_de.fit_transform(X.toarray())
X_trans_r_20_de = svd_r_20_de.fit_transform(X.toarray())
# helper arrays for tests below
svds = (svd_a_10_sp, svd_r_10_sp, svd_a_20_sp, svd_r_20_sp, svd_a_10_de,
svd_r_10_de, svd_a_20_de, svd_r_20_de)
svds_trans = (
(svd_a_10_sp, X_trans_a_10_sp),
(svd_r_10_sp, X_trans_r_10_sp),
(svd_a_20_sp, X_trans_a_20_sp),
(svd_r_20_sp, X_trans_r_20_sp),
(svd_a_10_de, X_trans_a_10_de),
(svd_r_10_de, X_trans_r_10_de),
(svd_a_20_de, X_trans_a_20_de),
(svd_r_20_de, X_trans_r_20_de),
)
svds_10_v_20 = (
(svd_a_10_sp, svd_a_20_sp),
(svd_r_10_sp, svd_r_20_sp),
(svd_a_10_de, svd_a_20_de),
(svd_r_10_de, svd_r_20_de),
)
svds_sparse_v_dense = (
(svd_a_10_sp, svd_a_10_de),
(svd_a_20_sp, svd_a_20_de),
(svd_r_10_sp, svd_r_10_de),
(svd_r_20_sp, svd_r_20_de),
)
# Assert the 1st component is equal
for svd_10, svd_20 in svds_10_v_20:
assert_array_almost_equal(
svd_10.explained_variance_ratio_,
svd_20.explained_variance_ratio_[:10],
decimal=5,
)
# Assert that 20 components has higher explained variance than 10
for svd_10, svd_20 in svds_10_v_20:
assert_greater(
svd_20.explained_variance_ratio_.sum(),
svd_10.explained_variance_ratio_.sum(),
)
# Assert that all the values are greater than 0
for svd in svds:
assert_array_less(0.0, svd.explained_variance_ratio_)
# Assert that total explained variance is less than 1
for svd in svds:
assert_array_less(svd.explained_variance_ratio_.sum(), 1.0)
# Compare sparse vs. dense
for svd_sparse, svd_dense in svds_sparse_v_dense:
assert_array_almost_equal(svd_sparse.explained_variance_ratio_,
svd_dense.explained_variance_ratio_)
# Test that explained_variance is correct
for svd, transformed in svds_trans:
total_variance = np.var(X.toarray(), axis=0).sum()
variances = np.var(transformed, axis=0)
true_explained_variance_ratio = variances / total_variance
assert_array_almost_equal(
svd.explained_variance_ratio_,
true_explained_variance_ratio,
)
| bsd-3-clause |
pratapvardhan/pandas | pandas/tests/dtypes/test_generic.py | 3 | 3780 | # -*- coding: utf-8 -*-
from warnings import catch_warnings
import numpy as np
import pandas as pd
from pandas.core.dtypes import generic as gt
from pandas.util import testing as tm
class TestABCClasses(object):
tuples = [[1, 2, 2], ['red', 'blue', 'red']]
multi_index = pd.MultiIndex.from_arrays(tuples, names=('number', 'color'))
datetime_index = pd.to_datetime(['2000/1/1', '2010/1/1'])
timedelta_index = pd.to_timedelta(np.arange(5), unit='s')
period_index = pd.period_range('2000/1/1', '2010/1/1/', freq='M')
categorical = pd.Categorical([1, 2, 3], categories=[2, 3, 1])
categorical_df = pd.DataFrame({"values": [1, 2, 3]}, index=categorical)
df = pd.DataFrame({'names': ['a', 'b', 'c']}, index=multi_index)
sparse_series = pd.Series([1, 2, 3]).to_sparse()
sparse_array = pd.SparseArray(np.random.randn(10))
sparse_frame = pd.SparseDataFrame({'a': [1, -1, None]})
def test_abc_types(self):
assert isinstance(pd.Index(['a', 'b', 'c']), gt.ABCIndex)
assert isinstance(pd.Int64Index([1, 2, 3]), gt.ABCInt64Index)
assert isinstance(pd.UInt64Index([1, 2, 3]), gt.ABCUInt64Index)
assert isinstance(pd.Float64Index([1, 2, 3]), gt.ABCFloat64Index)
assert isinstance(self.multi_index, gt.ABCMultiIndex)
assert isinstance(self.datetime_index, gt.ABCDatetimeIndex)
assert isinstance(self.timedelta_index, gt.ABCTimedeltaIndex)
assert isinstance(self.period_index, gt.ABCPeriodIndex)
assert isinstance(self.categorical_df.index, gt.ABCCategoricalIndex)
assert isinstance(pd.Index(['a', 'b', 'c']), gt.ABCIndexClass)
assert isinstance(pd.Int64Index([1, 2, 3]), gt.ABCIndexClass)
assert isinstance(pd.Series([1, 2, 3]), gt.ABCSeries)
assert isinstance(self.df, gt.ABCDataFrame)
with catch_warnings(record=True):
assert isinstance(self.df.to_panel(), gt.ABCPanel)
assert isinstance(self.sparse_series, gt.ABCSparseSeries)
assert isinstance(self.sparse_array, gt.ABCSparseArray)
assert isinstance(self.sparse_frame, gt.ABCSparseDataFrame)
assert isinstance(self.categorical, gt.ABCCategorical)
assert isinstance(pd.Period('2012', freq='A-DEC'), gt.ABCPeriod)
assert isinstance(pd.DateOffset(), gt.ABCDateOffset)
assert isinstance(pd.Period('2012', freq='A-DEC').freq,
gt.ABCDateOffset)
assert not isinstance(pd.Period('2012', freq='A-DEC'),
gt.ABCDateOffset)
assert isinstance(pd.Interval(0, 1.5), gt.ABCInterval)
assert not isinstance(pd.Period('2012', freq='A-DEC'), gt.ABCInterval)
def test_setattr_warnings():
# GH7175 - GOTCHA: You can't use dot notation to add a column...
d = {'one': pd.Series([1., 2., 3.], index=['a', 'b', 'c']),
'two': pd.Series([1., 2., 3., 4.], index=['a', 'b', 'c', 'd'])}
df = pd.DataFrame(d)
with catch_warnings(record=True) as w:
# successfully add new column
# this should not raise a warning
df['three'] = df.two + 1
assert len(w) == 0
assert df.three.sum() > df.two.sum()
with catch_warnings(record=True) as w:
# successfully modify column in place
# this should not raise a warning
df.one += 1
assert len(w) == 0
assert df.one.iloc[0] == 2
with catch_warnings(record=True) as w:
# successfully add an attribute to a series
# this should not raise a warning
df.two.not_an_index = [1, 2]
assert len(w) == 0
with tm.assert_produces_warning(UserWarning):
# warn when setting column to nonexistent name
df.four = df.two + 2
assert df.four.sum() > df.two.sum()
| bsd-3-clause |
pv/scikit-learn | sklearn/utils/__init__.py | 132 | 14185 | """
The :mod:`sklearn.utils` module includes various utilities.
"""
from collections import Sequence
import numpy as np
from scipy.sparse import issparse
import warnings
from .murmurhash import murmurhash3_32
from .validation import (as_float_array,
assert_all_finite,
check_random_state, column_or_1d, check_array,
check_consistent_length, check_X_y, indexable,
check_symmetric, DataConversionWarning)
from .class_weight import compute_class_weight, compute_sample_weight
from ..externals.joblib import cpu_count
__all__ = ["murmurhash3_32", "as_float_array",
"assert_all_finite", "check_array",
"check_random_state",
"compute_class_weight", "compute_sample_weight",
"column_or_1d", "safe_indexing",
"check_consistent_length", "check_X_y", 'indexable',
"check_symmetric"]
class deprecated(object):
"""Decorator to mark a function or class as deprecated.
Issue a warning when the function is called/the class is instantiated and
adds a warning to the docstring.
The optional extra argument will be appended to the deprecation message
and the docstring. Note: to use this with the default value for extra, put
in an empty of parentheses:
>>> from sklearn.utils import deprecated
>>> deprecated() # doctest: +ELLIPSIS
<sklearn.utils.deprecated object at ...>
>>> @deprecated()
... def some_function(): pass
"""
# Adapted from http://wiki.python.org/moin/PythonDecoratorLibrary,
# but with many changes.
def __init__(self, extra=''):
"""
Parameters
----------
extra: string
to be added to the deprecation messages
"""
self.extra = extra
def __call__(self, obj):
if isinstance(obj, type):
return self._decorate_class(obj)
else:
return self._decorate_fun(obj)
def _decorate_class(self, cls):
msg = "Class %s is deprecated" % cls.__name__
if self.extra:
msg += "; %s" % self.extra
# FIXME: we should probably reset __new__ for full generality
init = cls.__init__
def wrapped(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning)
return init(*args, **kwargs)
cls.__init__ = wrapped
wrapped.__name__ = '__init__'
wrapped.__doc__ = self._update_doc(init.__doc__)
wrapped.deprecated_original = init
return cls
def _decorate_fun(self, fun):
"""Decorate function fun"""
msg = "Function %s is deprecated" % fun.__name__
if self.extra:
msg += "; %s" % self.extra
def wrapped(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning)
return fun(*args, **kwargs)
wrapped.__name__ = fun.__name__
wrapped.__dict__ = fun.__dict__
wrapped.__doc__ = self._update_doc(fun.__doc__)
return wrapped
def _update_doc(self, olddoc):
newdoc = "DEPRECATED"
if self.extra:
newdoc = "%s: %s" % (newdoc, self.extra)
if olddoc:
newdoc = "%s\n\n%s" % (newdoc, olddoc)
return newdoc
def safe_mask(X, mask):
"""Return a mask which is safe to use on X.
Parameters
----------
X : {array-like, sparse matrix}
Data on which to apply mask.
mask: array
Mask to be used on X.
Returns
-------
mask
"""
mask = np.asarray(mask)
if np.issubdtype(mask.dtype, np.int):
return mask
if hasattr(X, "toarray"):
ind = np.arange(mask.shape[0])
mask = ind[mask]
return mask
def safe_indexing(X, indices):
"""Return items or rows from X using indices.
Allows simple indexing of lists or arrays.
Parameters
----------
X : array-like, sparse-matrix, list.
Data from which to sample rows or items.
indices : array-like, list
Indices according to which X will be subsampled.
"""
if hasattr(X, "iloc"):
# Pandas Dataframes and Series
try:
return X.iloc[indices]
except ValueError:
# Cython typed memoryviews internally used in pandas do not support
# readonly buffers.
warnings.warn("Copying input dataframe for slicing.",
DataConversionWarning)
return X.copy().iloc[indices]
elif hasattr(X, "shape"):
if hasattr(X, 'take') and (hasattr(indices, 'dtype') and
indices.dtype.kind == 'i'):
# This is often substantially faster than X[indices]
return X.take(indices, axis=0)
else:
return X[indices]
else:
return [X[idx] for idx in indices]
def resample(*arrays, **options):
"""Resample arrays or sparse matrices in a consistent way
The default strategy implements one step of the bootstrapping
procedure.
Parameters
----------
*arrays : sequence of indexable data-structures
Indexable data-structures can be arrays, lists, dataframes or scipy
sparse matrices with consistent first dimension.
replace : boolean, True by default
Implements resampling with replacement. If False, this will implement
(sliced) random permutations.
n_samples : int, None by default
Number of samples to generate. If left to None this is
automatically set to the first dimension of the arrays.
random_state : int or RandomState instance
Control the shuffling for reproducible behavior.
Returns
-------
resampled_arrays : sequence of indexable data-structures
Sequence of resampled views of the collections. The original arrays are
not impacted.
Examples
--------
It is possible to mix sparse and dense arrays in the same run::
>>> X = np.array([[1., 0.], [2., 1.], [0., 0.]])
>>> y = np.array([0, 1, 2])
>>> from scipy.sparse import coo_matrix
>>> X_sparse = coo_matrix(X)
>>> from sklearn.utils import resample
>>> X, X_sparse, y = resample(X, X_sparse, y, random_state=0)
>>> X
array([[ 1., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> X_sparse # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
<3x2 sparse matrix of type '<... 'numpy.float64'>'
with 4 stored elements in Compressed Sparse Row format>
>>> X_sparse.toarray()
array([[ 1., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> y
array([0, 1, 0])
>>> resample(y, n_samples=2, random_state=0)
array([0, 1])
See also
--------
:func:`sklearn.utils.shuffle`
"""
random_state = check_random_state(options.pop('random_state', None))
replace = options.pop('replace', True)
max_n_samples = options.pop('n_samples', None)
if options:
raise ValueError("Unexpected kw arguments: %r" % options.keys())
if len(arrays) == 0:
return None
first = arrays[0]
n_samples = first.shape[0] if hasattr(first, 'shape') else len(first)
if max_n_samples is None:
max_n_samples = n_samples
if max_n_samples > n_samples:
raise ValueError("Cannot sample %d out of arrays with dim %d" % (
max_n_samples, n_samples))
check_consistent_length(*arrays)
if replace:
indices = random_state.randint(0, n_samples, size=(max_n_samples,))
else:
indices = np.arange(n_samples)
random_state.shuffle(indices)
indices = indices[:max_n_samples]
# convert sparse matrices to CSR for row-based indexing
arrays = [a.tocsr() if issparse(a) else a for a in arrays]
resampled_arrays = [safe_indexing(a, indices) for a in arrays]
if len(resampled_arrays) == 1:
# syntactic sugar for the unit argument case
return resampled_arrays[0]
else:
return resampled_arrays
def shuffle(*arrays, **options):
"""Shuffle arrays or sparse matrices in a consistent way
This is a convenience alias to ``resample(*arrays, replace=False)`` to do
random permutations of the collections.
Parameters
----------
*arrays : sequence of indexable data-structures
Indexable data-structures can be arrays, lists, dataframes or scipy
sparse matrices with consistent first dimension.
random_state : int or RandomState instance
Control the shuffling for reproducible behavior.
n_samples : int, None by default
Number of samples to generate. If left to None this is
automatically set to the first dimension of the arrays.
Returns
-------
shuffled_arrays : sequence of indexable data-structures
Sequence of shuffled views of the collections. The original arrays are
not impacted.
Examples
--------
It is possible to mix sparse and dense arrays in the same run::
>>> X = np.array([[1., 0.], [2., 1.], [0., 0.]])
>>> y = np.array([0, 1, 2])
>>> from scipy.sparse import coo_matrix
>>> X_sparse = coo_matrix(X)
>>> from sklearn.utils import shuffle
>>> X, X_sparse, y = shuffle(X, X_sparse, y, random_state=0)
>>> X
array([[ 0., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> X_sparse # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
<3x2 sparse matrix of type '<... 'numpy.float64'>'
with 3 stored elements in Compressed Sparse Row format>
>>> X_sparse.toarray()
array([[ 0., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> y
array([2, 1, 0])
>>> shuffle(y, n_samples=2, random_state=0)
array([0, 1])
See also
--------
:func:`sklearn.utils.resample`
"""
options['replace'] = False
return resample(*arrays, **options)
def safe_sqr(X, copy=True):
"""Element wise squaring of array-likes and sparse matrices.
Parameters
----------
X : array like, matrix, sparse matrix
copy : boolean, optional, default True
Whether to create a copy of X and operate on it or to perform
inplace computation (default behaviour).
Returns
-------
X ** 2 : element wise square
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
if issparse(X):
if copy:
X = X.copy()
X.data **= 2
else:
if copy:
X = X ** 2
else:
X **= 2
return X
def gen_batches(n, batch_size):
"""Generator to create slices containing batch_size elements, from 0 to n.
The last slice may contain less than batch_size elements, when batch_size
does not divide n.
Examples
--------
>>> from sklearn.utils import gen_batches
>>> list(gen_batches(7, 3))
[slice(0, 3, None), slice(3, 6, None), slice(6, 7, None)]
>>> list(gen_batches(6, 3))
[slice(0, 3, None), slice(3, 6, None)]
>>> list(gen_batches(2, 3))
[slice(0, 2, None)]
"""
start = 0
for _ in range(int(n // batch_size)):
end = start + batch_size
yield slice(start, end)
start = end
if start < n:
yield slice(start, n)
def gen_even_slices(n, n_packs, n_samples=None):
"""Generator to create n_packs slices going up to n.
Pass n_samples when the slices are to be used for sparse matrix indexing;
slicing off-the-end raises an exception, while it works for NumPy arrays.
Examples
--------
>>> from sklearn.utils import gen_even_slices
>>> list(gen_even_slices(10, 1))
[slice(0, 10, None)]
>>> list(gen_even_slices(10, 10)) #doctest: +ELLIPSIS
[slice(0, 1, None), slice(1, 2, None), ..., slice(9, 10, None)]
>>> list(gen_even_slices(10, 5)) #doctest: +ELLIPSIS
[slice(0, 2, None), slice(2, 4, None), ..., slice(8, 10, None)]
>>> list(gen_even_slices(10, 3))
[slice(0, 4, None), slice(4, 7, None), slice(7, 10, None)]
"""
start = 0
if n_packs < 1:
raise ValueError("gen_even_slices got n_packs=%s, must be >=1" % n_packs)
for pack_num in range(n_packs):
this_n = n // n_packs
if pack_num < n % n_packs:
this_n += 1
if this_n > 0:
end = start + this_n
if n_samples is not None:
end = min(n_samples, end)
yield slice(start, end, None)
start = end
def _get_n_jobs(n_jobs):
"""Get number of jobs for the computation.
This function reimplements the logic of joblib to determine the actual
number of jobs depending on the cpu count. If -1 all CPUs are used.
If 1 is given, no parallel computing code is used at all, which is useful
for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used.
Thus for n_jobs = -2, all CPUs but one are used.
Parameters
----------
n_jobs : int
Number of jobs stated in joblib convention.
Returns
-------
n_jobs : int
The actual number of jobs as positive integer.
Examples
--------
>>> from sklearn.utils import _get_n_jobs
>>> _get_n_jobs(4)
4
>>> jobs = _get_n_jobs(-2)
>>> assert jobs == max(cpu_count() - 1, 1)
>>> _get_n_jobs(0)
Traceback (most recent call last):
...
ValueError: Parameter n_jobs == 0 has no meaning.
"""
if n_jobs < 0:
return max(cpu_count() + 1 + n_jobs, 1)
elif n_jobs == 0:
raise ValueError('Parameter n_jobs == 0 has no meaning.')
else:
return n_jobs
def tosequence(x):
"""Cast iterable x to a Sequence, avoiding a copy if possible."""
if isinstance(x, np.ndarray):
return np.asarray(x)
elif isinstance(x, Sequence):
return x
else:
return list(x)
class ConvergenceWarning(UserWarning):
"""Custom warning to capture convergence problems"""
class DataDimensionalityWarning(UserWarning):
"""Custom warning to notify potential issues with data dimensionality"""
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.