repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
rishikksh20/scikit-learn
|
sklearn/neighbors/__init__.py
|
71
|
1025
|
"""
The :mod:`sklearn.neighbors` module implements the k-nearest neighbors
algorithm.
"""
from .ball_tree import BallTree
from .kd_tree import KDTree
from .dist_metrics import DistanceMetric
from .graph import kneighbors_graph, radius_neighbors_graph
from .unsupervised import NearestNeighbors
from .classification import KNeighborsClassifier, RadiusNeighborsClassifier
from .regression import KNeighborsRegressor, RadiusNeighborsRegressor
from .nearest_centroid import NearestCentroid
from .kde import KernelDensity
from .approximate import LSHForest
from .lof import LocalOutlierFactor
__all__ = ['BallTree',
'DistanceMetric',
'KDTree',
'KNeighborsClassifier',
'KNeighborsRegressor',
'NearestCentroid',
'NearestNeighbors',
'RadiusNeighborsClassifier',
'RadiusNeighborsRegressor',
'kneighbors_graph',
'radius_neighbors_graph',
'KernelDensity',
'LSHForest',
'LocalOutlierFactor']
|
bsd-3-clause
|
gundramleifert/exp_tf
|
models/lp/bdlstm_lp_v6.py
|
1
|
13704
|
'''
Author: Tobi and Gundram
'''
from __future__ import print_function
import tensorflow as tf
from tensorflow.python.ops import ctc_ops as ctc
from tensorflow.python.ops import rnn_cell
from tensorflow.python.ops.rnn import bidirectional_rnn
from util.LoaderUtil import read_image_list, get_list_vals
from random import shuffle
from util.STR2CTC import get_charmap_lp, get_charmap_lp_inv
import os
import time
import numpy as np
import matplotlib.pyplot as plt
# Goes done to 10%
INPUT_PATH_TRAIN = './private/lists/lp_only_train.lst'
INPUT_PATH_VAL = './private/lists/lp_only_val.lst'
cm, nClasses = get_charmap_lp()
# Additional NaC Channel
nClasses += 1
nEpochs = 15
batchSize = 4
learningRate = 0.001
momentum = 0.9
# It is assumed that the TextLines are ALL saved with a consistent height of imgH
imgH = 48
# Depending on the size the image is cropped or zero padded
imgW = 256
channels = 1
nHiddenLSTM1 = 256
os.chdir("../..")
trainList = read_image_list(INPUT_PATH_TRAIN)
stepsPerEpocheTrain = len(trainList) / batchSize
valList = read_image_list(INPUT_PATH_VAL)
stepsPerEpocheVal = len(valList) / batchSize
def inference(images, seqLen):
with tf.variable_scope('conv1') as scope:
kernel = tf.Variable(tf.truncated_normal([6, 5, channels, 32], stddev=5e-2), name='weights')
##Weight Decay?
# weight_decay = tf.mul(tf.nn.l2_loss(kernel), 0.002, name='weight_loss')
# tf.add_to_collection('losses', weight_decay)
conv = tf.nn.conv2d(images, kernel, [1, 4, 3, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.1, shape=[32]), name='biases')
pre_activation = tf.nn.bias_add(conv, biases)
conv1 = tf.nn.relu(pre_activation, name=scope.name)
# _activation_summary(conv1)
norm1 = tf.nn.local_response_normalization(conv1, name='norm1')
seqFloat = tf.to_float(seqLen)
seqL2 = tf.ceil(seqFloat * 0.33)
with tf.variable_scope('conv2') as scope:
kernel = tf.Variable(tf.truncated_normal([5, 5, 32, 64], stddev=5e-2), name='weights')
##Weight Decay?
# weight_decay = tf.mul(tf.nn.l2_loss(kernel), 0.002, name='weight_loss')
# tf.add_to_collection('losses', weight_decay)
conv = tf.nn.conv2d(norm1, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.1, shape=[64]), name='biases')
pre_activation = tf.nn.bias_add(conv, biases)
conv2 = tf.nn.relu(pre_activation, name=scope.name)
# _activation_summary(conv2)
# norm2
# norm2 = tf.nn.local_response_normalization(conv2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,name='norm2')
pool2 = tf.nn.max_pool(conv2, ksize=[1, 4, 2, 1], strides=[1, 4, 2, 1], padding='SAME', name='pool2')
seqL3 = tf.ceil(seqL2 * 0.5)
with tf.variable_scope('conv3') as scope:
kernel = tf.Variable(tf.truncated_normal([5, 3, 64, 128], stddev=5e-2), name='weights')
##Weight Decay?
# weight_decay = tf.mul(tf.nn.l2_loss(kernel), 0.002, name='weight_loss')
# tf.add_to_collection('losses', weight_decay)
conv = tf.nn.conv2d(pool2, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.1, shape=[128]), name='biases')
pre_activation = tf.nn.bias_add(conv, biases)
conv3 = tf.nn.relu(pre_activation, name=scope.name)
pool3 = tf.nn.max_pool(conv3, ksize=[1, 3, 1, 1], strides=[1, 3, 1, 1], padding='SAME', name='pool2')
# NO POOLING HERE -> CTC needs an appropriate length.
seqLenAfterConv = tf.to_int32(seqL3)
with tf.variable_scope('RNN_Prep') as scope:
# (#batch Y X Z) --> (X #batch Y Z)
rnnIn = tf.transpose(pool3, [2, 0, 1, 3])
# (X #batch Y Z) --> (X #batch Y*Z)
shape = rnnIn.get_shape()
steps = shape[0]
rnnIn = tf.reshape(rnnIn, tf.pack([shape[0], shape[1], -1]))
# (X #batch Y*Z) --> (X*#batch Y*Z)
shape = rnnIn.get_shape()
rnnIn = tf.reshape(rnnIn, tf.pack([-1, shape[2]]))
# (X*#batch Y*Z) --> list of X tensors of shape (#batch, Y*Z)
rnnIn = tf.split(0, steps, rnnIn)
with tf.variable_scope('BLSTM1') as scope:
forwardH1 = rnn_cell.LSTMCell(nHiddenLSTM1, use_peepholes=True, state_is_tuple=True)
backwardH1 = rnn_cell.LSTMCell(nHiddenLSTM1, use_peepholes=True, state_is_tuple=True)
outputs, _, _ = bidirectional_rnn(forwardH1, backwardH1, rnnIn, dtype=tf.float32)
fbH1rs = [tf.reshape(t, [batchSize, 2, nHiddenLSTM1]) for t in outputs]
# outH1 = [tf.reduce_sum(tf.mul(t, weightsOutH1), reduction_indices=1) + biasesOutH1 for t in fbH1rs]
outH1 = [tf.reduce_sum(t, reduction_indices=1) for t in fbH1rs]
with tf.variable_scope('LOGIT') as scope:
weightsClasses = tf.Variable(tf.truncated_normal([nHiddenLSTM1, nClasses],
stddev=np.sqrt(2.0 / nHiddenLSTM1)))
biasesClasses = tf.Variable(tf.zeros([nClasses]))
logitsFin = [tf.matmul(t, weightsClasses) + biasesClasses for t in outH1]
logits3d = tf.pack(logitsFin)
return logits3d, seqLenAfterConv
def loss(logits3d, tgt, seqLenAfterConv):
loss = tf.reduce_mean(ctc.ctc_loss(logits3d, tgt, seqLenAfterConv))
return loss
print('Defining graph')
graph = tf.Graph()
with graph.as_default():
####Graph input
inputX = tf.placeholder(tf.float32, shape=(batchSize, imgH, imgW, channels))
targetIxs = tf.placeholder(tf.int64)
targetVals = tf.placeholder(tf.int32)
targetShape = tf.placeholder(tf.int64)
targetY = tf.SparseTensor(targetIxs, targetVals, targetShape)
seqLengths = tf.placeholder(tf.int32, shape=(batchSize))
logits3d, seqAfterConv = inference(inputX, seqLengths)
loss = loss(logits3d, targetY, seqAfterConv)
optimizer = tf.train.MomentumOptimizer(learningRate, momentum).minimize(loss)
# pred = tf.to_int32(ctc.ctc_beam_search_decoder(logits3d, seqAfterConv, merge_repeated=False)[0][0])
pred = tf.to_int32(ctc.ctc_greedy_decoder(logits3d, seqAfterConv)[0][0])
edist = tf.edit_distance(pred, targetY, normalize=False)
tgtLens = tf.to_float(tf.size(targetY.values))
err = tf.reduce_sum(edist) / tgtLens
saver = tf.train.Saver()
with tf.Session(graph=graph) as session:
# writer = tf.train.SummaryWriter('./log', session.graph)
print('Initializing')
tf.global_variables_initializer().run()
# ckpt = tf.train.get_checkpoint_state("./private/models/lp2/")
# if ckpt and ckpt.model_checkpoint_path:
# saver.restore(session, ckpt.model_checkpoint_path)
# print(ckpt)
# workList = valList[:]
# errV = 0
# lossV = 0
# timeVS = time.time()
# cmInv = get_charmap_lp_inv()
# for bStep in range(stepsPerEpocheVal):
# bList, workList = workList[:batchSize], workList[batchSize:]
# batchInputs, batchSeqLengths, batchTargetIdxs, batchTargetVals, batchTargetShape = get_list_vals(bList, cm,
# imgW,
# mvn=True)
# feedDict = {inputX: batchInputs, targetIxs: batchTargetIdxs, targetVals: batchTargetVals,
# targetShape: batchTargetShape, seqLengths: batchSeqLengths}
# lossB, aErr, p = session.run([loss, err, pred], feed_dict=feedDict)
# print(aErr)
# res = []
# for idx in p.values:
# res.append(cmInv[idx])
# print(res)
# # print(p)
# plt.imshow(batchInputs[0,:,:,0], cmap=plt.cm.gray)
# plt.show()
#
# lossV += lossB
# errV += aErr
# print('Val: CTC-loss ', lossV)
# errVal = errV / stepsPerEpocheVal
# print('Val: CER ', errVal)
# print('Val time ', time.time() - timeVS)
for epoch in range(nEpochs):
workList = trainList[:]
shuffle(workList)
print('Epoch', epoch + 1, '...')
lossT = 0
errT = 0
timeTS = time.time()
for bStep in range(stepsPerEpocheTrain):
bList, workList = workList[:batchSize], workList[batchSize:]
batchInputs, batchSeqLengths, batchTargetIdxs, batchTargetVals, batchTargetShape = get_list_vals(bList, cm,
imgW,
mvn=True)
feedDict = {inputX: batchInputs, targetIxs: batchTargetIdxs, targetVals: batchTargetVals,
targetShape: batchTargetShape, seqLengths: batchSeqLengths}
_, lossB, aErr = session.run([optimizer, loss, err], feed_dict=feedDict)
# _, lossB, aErr, sET, sLT = session.run([optimizer, loss, err, err_train, loss_train], feed_dict=feedDict)
lossT += lossB
# writer.add_summary(sET, epoch * stepsPerEpocheTrain + bStep)
# writer.add_summary(sLT, epoch * stepsPerEpocheTrain + bStep)
errT += aErr
print('Train: CTC-loss ', lossT)
cerT = errT / stepsPerEpocheTrain
print('Train: CER ', cerT)
print('Train time ', time.time() - timeTS)
workList = valList[:]
errV = 0
lossV = 0
timeVS = time.time()
for bStep in range(stepsPerEpocheVal):
bList, workList = workList[:batchSize], workList[batchSize:]
batchInputs, batchSeqLengths, batchTargetIdxs, batchTargetVals, batchTargetShape = get_list_vals(bList, cm,
imgW,
mvn=True)
feedDict = {inputX: batchInputs, targetIxs: batchTargetIdxs, targetVals: batchTargetVals,
targetShape: batchTargetShape, seqLengths: batchSeqLengths}
lossB, aErr = session.run([loss, err], feed_dict=feedDict)
# lossB, aErr, sE, sL = session.run([loss, err, err_val, loss_val], feed_dict=feedDict)
# writer.add_summary(sE, epoch*stepsPerEpocheVal + bStep)
# writer.add_summary(sL, epoch * stepsPerEpocheVal + bStep)
lossV += lossB
errV += aErr
print('Val: CTC-loss ', lossV)
errVal = errV / stepsPerEpocheVal
print('Val: CER ', errVal)
print('Val time ', time.time() - timeVS)
# Write a checkpoint.
checkpoint_file = os.path.join('./private/models/lp6/', 'checkpoint')
saver.save(session, checkpoint_file, global_step=epoch)
# Defining graph
# Initializing
# Epoch 1 ...
# Train: CTC-loss 129009.017706
# Train: CER 0.635904513293
# Train time 4908.49444389
# Val: CTC-loss 1641.79976816
# Val: CER 0.0801813207567
# Val time 244.049314976
# Epoch 2 ...
# Train: CTC-loss 16020.608585
# Train: CER 0.0717145665077
# Train time 7330.24510384
# Val: CTC-loss 1204.36847229
# Val: CER 0.0566576011727
# Val time 245.118979931
# Epoch 3 ...
# Train: CTC-loss 12435.9589674
# Train: CER 0.0558677665295
# Train time 7285.28540993
# Val: CTC-loss 1003.13010596
# Val: CER 0.0471066227357
# Val time 242.016130924
# Epoch 4 ...
# Train: CTC-loss 11060.2886085
# Train: CER 0.0499579166048
# Train time 7326.90888286
# Val: CTC-loss 969.390615069
# Val: CER 0.0463109914263
# Val time 245.883394003
# Epoch 5 ...
# Train: CTC-loss 10113.6315179
# Train: CER 0.0457048515265
# Train time 7260.16503906
# Val: CTC-loss 964.054605111
# Val: CER 0.0448569302758
# Val time 245.195471048
# Epoch 6 ...
# Train: CTC-loss 9361.70014321
# Train: CER 0.042607394019
# Train time 7276.95676613
# Val: CTC-loss 942.684666969
# Val: CER 0.0438320938696
# Val time 239.865092039
# Epoch 7 ...
# Train: CTC-loss 8693.04606334
# Train: CER 0.0398572982518
# Train time 6516.15737796
# Val: CTC-loss 930.6919411
# Val: CER 0.0422663276643
# Val time 220.383415222
# Epoch 8 ...
# Train: CTC-loss 8161.14864806
# Train: CER 0.0377375896172
# Train time 6433.12666297
# Val: CTC-loss 933.970610965
# Val: CER 0.0425528454781
# Val time 192.637362957
# Epoch 9 ...
# Train: CTC-loss 7658.31400694
# Train: CER 0.0357702803461
# Train time 5611.53865314
# Val: CTC-loss 944.544853458
# Val: CER 0.0428228211651
# Val time 152.010342121
# Epoch 10 ...
# Train: CTC-loss 7171.84027007
# Train: CER 0.0337837695306
# Train time 5177.82906294
# Val: CTC-loss 941.78110862
# Val: CER 0.0423581593285
# Val time 184.70659399
# Epoch 11 ...
# Train: CTC-loss 6820.79927806
# Train: CER 0.0323584240315
# Train time 5460.32187796
# Val: CTC-loss 987.756852884
# Val: CER 0.044123320813
# Val time 153.327903986
# Epoch 12 ...
# Train: CTC-loss 6330.18515219
# Train: CER 0.030294881605
# Train time 5040.84565091
# Val: CTC-loss 971.562253463
# Val: CER 0.0413985775958
# Val time 167.768498898
# Epoch 13 ...
# Train: CTC-loss 5951.8420738
# Train: CER 0.0285477739336
# Train time 5047.84928107
# Val: CTC-loss 1012.34960045
# Val: CER 0.0429205714911
# Val time 167.878767014
# Epoch 14 ...
# Train: CTC-loss 5679.90946481
# Train: CER 0.0276907928977
# Train time 5026.46480107
# Val: CTC-loss 1040.27236869
# Val: CER 0.0416939370632
# Val time 166.396095991
# Epoch 15 ...
# Train: CTC-loss 5316.91454479
# Train: CER 0.0258935857246
# Train time 5059.73199415
# Val: CTC-loss 1048.74418164
# Val: CER 0.0438409063319
# Val time 166.044019938
|
apache-2.0
|
davidgbe/scikit-learn
|
examples/neighbors/plot_approximate_nearest_neighbors_scalability.py
|
225
|
5719
|
"""
============================================
Scalability of Approximate Nearest Neighbors
============================================
This example studies the scalability profile of approximate 10-neighbors
queries using the LSHForest with ``n_estimators=20`` and ``n_candidates=200``
when varying the number of samples in the dataset.
The first plot demonstrates the relationship between query time and index size
of LSHForest. Query time is compared with the brute force method in exact
nearest neighbor search for the same index sizes. The brute force queries have a
very predictable linear scalability with the index (full scan). LSHForest index
have sub-linear scalability profile but can be slower for small datasets.
The second plot shows the speedup when using approximate queries vs brute force
exact queries. The speedup tends to increase with the dataset size but should
reach a plateau typically when doing queries on datasets with millions of
samples and a few hundreds of dimensions. Higher dimensional datasets tends to
benefit more from LSHForest indexing.
The break even point (speedup = 1) depends on the dimensionality and structure
of the indexed data and the parameters of the LSHForest index.
The precision of approximate queries should decrease slowly with the dataset
size. The speed of the decrease depends mostly on the LSHForest parameters and
the dimensionality of the data.
"""
from __future__ import division
print(__doc__)
# Authors: Maheshakya Wijewardena <[email protected]>
# Olivier Grisel <[email protected]>
#
# License: BSD 3 clause
###############################################################################
import time
import numpy as np
from sklearn.datasets.samples_generator import make_blobs
from sklearn.neighbors import LSHForest
from sklearn.neighbors import NearestNeighbors
import matplotlib.pyplot as plt
# Parameters of the study
n_samples_min = int(1e3)
n_samples_max = int(1e5)
n_features = 100
n_centers = 100
n_queries = 100
n_steps = 6
n_iter = 5
# Initialize the range of `n_samples`
n_samples_values = np.logspace(np.log10(n_samples_min),
np.log10(n_samples_max),
n_steps).astype(np.int)
# Generate some structured data
rng = np.random.RandomState(42)
all_data, _ = make_blobs(n_samples=n_samples_max + n_queries,
n_features=n_features, centers=n_centers, shuffle=True,
random_state=0)
queries = all_data[:n_queries]
index_data = all_data[n_queries:]
# Metrics to collect for the plots
average_times_exact = []
average_times_approx = []
std_times_approx = []
accuracies = []
std_accuracies = []
average_speedups = []
std_speedups = []
# Calculate the average query time
for n_samples in n_samples_values:
X = index_data[:n_samples]
# Initialize LSHForest for queries of a single neighbor
lshf = LSHForest(n_estimators=20, n_candidates=200,
n_neighbors=10).fit(X)
nbrs = NearestNeighbors(algorithm='brute', metric='cosine',
n_neighbors=10).fit(X)
time_approx = []
time_exact = []
accuracy = []
for i in range(n_iter):
# pick one query at random to study query time variability in LSHForest
query = queries[rng.randint(0, n_queries)]
t0 = time.time()
exact_neighbors = nbrs.kneighbors(query, return_distance=False)
time_exact.append(time.time() - t0)
t0 = time.time()
approx_neighbors = lshf.kneighbors(query, return_distance=False)
time_approx.append(time.time() - t0)
accuracy.append(np.in1d(approx_neighbors, exact_neighbors).mean())
average_time_exact = np.mean(time_exact)
average_time_approx = np.mean(time_approx)
speedup = np.array(time_exact) / np.array(time_approx)
average_speedup = np.mean(speedup)
mean_accuracy = np.mean(accuracy)
std_accuracy = np.std(accuracy)
print("Index size: %d, exact: %0.3fs, LSHF: %0.3fs, speedup: %0.1f, "
"accuracy: %0.2f +/-%0.2f" %
(n_samples, average_time_exact, average_time_approx, average_speedup,
mean_accuracy, std_accuracy))
accuracies.append(mean_accuracy)
std_accuracies.append(std_accuracy)
average_times_exact.append(average_time_exact)
average_times_approx.append(average_time_approx)
std_times_approx.append(np.std(time_approx))
average_speedups.append(average_speedup)
std_speedups.append(np.std(speedup))
# Plot average query time against n_samples
plt.figure()
plt.errorbar(n_samples_values, average_times_approx, yerr=std_times_approx,
fmt='o-', c='r', label='LSHForest')
plt.plot(n_samples_values, average_times_exact, c='b',
label="NearestNeighbors(algorithm='brute', metric='cosine')")
plt.legend(loc='upper left', fontsize='small')
plt.ylim(0, None)
plt.ylabel("Average query time in seconds")
plt.xlabel("n_samples")
plt.grid(which='both')
plt.title("Impact of index size on response time for first "
"nearest neighbors queries")
# Plot average query speedup versus index size
plt.figure()
plt.errorbar(n_samples_values, average_speedups, yerr=std_speedups,
fmt='o-', c='r')
plt.ylim(0, None)
plt.ylabel("Average speedup")
plt.xlabel("n_samples")
plt.grid(which='both')
plt.title("Speedup of the approximate NN queries vs brute force")
# Plot average precision versus index size
plt.figure()
plt.errorbar(n_samples_values, accuracies, std_accuracies, fmt='o-', c='c')
plt.ylim(0, 1.1)
plt.ylabel("precision@10")
plt.xlabel("n_samples")
plt.grid(which='both')
plt.title("precision of 10-nearest-neighbors queries with index size")
plt.show()
|
bsd-3-clause
|
ChanderG/scikit-learn
|
sklearn/datasets/__init__.py
|
176
|
3671
|
"""
The :mod:`sklearn.datasets` module includes utilities to load datasets,
including methods to load and fetch popular reference datasets. It also
features some artificial data generators.
"""
from .base import load_diabetes
from .base import load_digits
from .base import load_files
from .base import load_iris
from .base import load_linnerud
from .base import load_boston
from .base import get_data_home
from .base import clear_data_home
from .base import load_sample_images
from .base import load_sample_image
from .covtype import fetch_covtype
from .mlcomp import load_mlcomp
from .lfw import load_lfw_pairs
from .lfw import load_lfw_people
from .lfw import fetch_lfw_pairs
from .lfw import fetch_lfw_people
from .twenty_newsgroups import fetch_20newsgroups
from .twenty_newsgroups import fetch_20newsgroups_vectorized
from .mldata import fetch_mldata, mldata_filename
from .samples_generator import make_classification
from .samples_generator import make_multilabel_classification
from .samples_generator import make_hastie_10_2
from .samples_generator import make_regression
from .samples_generator import make_blobs
from .samples_generator import make_moons
from .samples_generator import make_circles
from .samples_generator import make_friedman1
from .samples_generator import make_friedman2
from .samples_generator import make_friedman3
from .samples_generator import make_low_rank_matrix
from .samples_generator import make_sparse_coded_signal
from .samples_generator import make_sparse_uncorrelated
from .samples_generator import make_spd_matrix
from .samples_generator import make_swiss_roll
from .samples_generator import make_s_curve
from .samples_generator import make_sparse_spd_matrix
from .samples_generator import make_gaussian_quantiles
from .samples_generator import make_biclusters
from .samples_generator import make_checkerboard
from .svmlight_format import load_svmlight_file
from .svmlight_format import load_svmlight_files
from .svmlight_format import dump_svmlight_file
from .olivetti_faces import fetch_olivetti_faces
from .species_distributions import fetch_species_distributions
from .california_housing import fetch_california_housing
from .rcv1 import fetch_rcv1
__all__ = ['clear_data_home',
'dump_svmlight_file',
'fetch_20newsgroups',
'fetch_20newsgroups_vectorized',
'fetch_lfw_pairs',
'fetch_lfw_people',
'fetch_mldata',
'fetch_olivetti_faces',
'fetch_species_distributions',
'fetch_california_housing',
'fetch_covtype',
'fetch_rcv1',
'get_data_home',
'load_boston',
'load_diabetes',
'load_digits',
'load_files',
'load_iris',
'load_lfw_pairs',
'load_lfw_people',
'load_linnerud',
'load_mlcomp',
'load_sample_image',
'load_sample_images',
'load_svmlight_file',
'load_svmlight_files',
'make_biclusters',
'make_blobs',
'make_circles',
'make_classification',
'make_checkerboard',
'make_friedman1',
'make_friedman2',
'make_friedman3',
'make_gaussian_quantiles',
'make_hastie_10_2',
'make_low_rank_matrix',
'make_moons',
'make_multilabel_classification',
'make_regression',
'make_s_curve',
'make_sparse_coded_signal',
'make_sparse_spd_matrix',
'make_sparse_uncorrelated',
'make_spd_matrix',
'make_swiss_roll',
'mldata_filename']
|
bsd-3-clause
|
BioinfUD/K-mersCL
|
src/utils/extract_metrics.py
|
1
|
3825
|
import pandas as pd
import numpy as np
from sys import argv
import re
def extract_nvidia_max_memory(merged_file):
return np.nanmax(merged_file['GPU'])
def extract_anytool_max_memory(merged_file):
return (np.nanmax(merged_file['MEM']) - np.nanmin(merged_file['MEM']))/1000.0
def extract_kmc_time(log_file):
time = 0.0
for line in log_file:
if "Time read processing : " in line:
line = line.split(",")[1]
content = line.replace("*", "").replace("Time read processing : ", "").replace("ns.", "").strip()
time += float(content)
return time/(10**9)
def extract_mspk_time(log_file):
for line in log_file:
if "Total algorithm time" in line:
line = line.split(",")[1]
time = line.replace("Total algorithm time:","").replace("s", "").strip()
if "min" in time:
time = float(time.replace("min", "").strip())
return time * 60
return float(time)
def extract_kmercl_signature_time(log_file):
for line in log_file:
if "Total algorithm time" in line:
line = line.split(",")[1]
time = line.replace("Total algorithm time:","").replace("s", "").strip()
return float(time)
def extract_kmercl_time(log_file):
for line in log_file:
if "Kernel execution took " in line:
line = line.split(",")[1]
return float(line.replace("Kernel execution took ", "").strip())
def extract_transfer_time(log_file):
total = 0.0
for line in log_file:
if "Copying data took " in line:
line = line.split(",")[1]
total += float(line.replace(" Copying data took ", "").replace("seconds","").strip())
return total
def extract_metrics(path):
merged_metrics_path = "{}/metrics/merged_metrics.xlsx".format(path)
tool_log_path = "{}/metrics/tool_log.csv".format(path)
log_file = open(tool_log_path)
merged_file = pd.read_excel(merged_metrics_path)
transfer_time = None
if "kmerscl-" in path:
transfer_time = extract_transfer_time(log_file)
log_file.seek(0)
time = extract_kmercl_time(log_file)
memory = extract_nvidia_max_memory(merged_file)
log_file = open(tool_log_path)
elif "kmerscl_signature" in path:
transfer_time = extract_transfer_time(log_file)
log_file.seek(0)
time = extract_kmercl_time(log_file)
memory = extract_nvidia_max_memory(merged_file)
log_file = open(tool_log_path)
elif "kmc" in path:
time = extract_kmc_time(log_file)
memory = extract_anytool_max_memory(merged_file)
elif "mspk" in path:
time = extract_mspk_time(log_file)
memory = extract_anytool_max_memory(merged_file)
else:
print "Not valid tool"
exit()
print path
m = re.match(r".*/(?P<tool>\w+)-k(?P<kmer>\w+)-m(?P<mmer>\w+)-r(?P<read_lenght>\w+)-(?P<seq>\w+)_(?P<read_millions>\w+)m_.*", path)
row = m.groupdict()
row['mem'] = memory
row['time'] = time
print "Tool and params: {}".format(path)
print "Used memory {} mb".format(memory)
print "Time {} seconds".format(time)
if transfer_time:
row['tt'] = transfer_time
print "Transfer time {} seconds".format(transfer_time)
return values_to_list(row)
def values_to_list(d):
nd = {}
for k, v in d.iteritems():
nd[k] = [v]
return nd
# df = pd.DataFrame(columns=['tool', 'kmer', 'mmer', 'read_lenght', 'seq', 'time', 'mem','read_millions'])
writer = pd.ExcelWriter('output.xlsx')
paths = argv[1:]
# print df
print extract_metrics(paths[0])
frames = []
for path in paths[:]:
df2 = pd.DataFrame.from_dict(extract_metrics(path))
frames.append(df2)
df = pd.concat(frames)
df.to_excel(writer,'KmerCL_benchmark')
writer.save()
|
mit
|
f3r/scikit-learn
|
examples/svm/plot_separating_hyperplane.py
|
294
|
1273
|
"""
=========================================
SVM: Maximum margin separating hyperplane
=========================================
Plot the maximum margin separating hyperplane within a two-class
separable dataset using a Support Vector Machine classifier with
linear kernel.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# we create 40 separable points
np.random.seed(0)
X = np.r_[np.random.randn(20, 2) - [2, 2], np.random.randn(20, 2) + [2, 2]]
Y = [0] * 20 + [1] * 20
# fit the model
clf = svm.SVC(kernel='linear')
clf.fit(X, Y)
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(-5, 5)
yy = a * xx - (clf.intercept_[0]) / w[1]
# plot the parallels to the separating hyperplane that pass through the
# support vectors
b = clf.support_vectors_[0]
yy_down = a * xx + (b[1] - a * b[0])
b = clf.support_vectors_[-1]
yy_up = a * xx + (b[1] - a * b[0])
# plot the line, the points, and the nearest vectors to the plane
plt.plot(xx, yy, 'k-')
plt.plot(xx, yy_down, 'k--')
plt.plot(xx, yy_up, 'k--')
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1],
s=80, facecolors='none')
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.axis('tight')
plt.show()
|
bsd-3-clause
|
ArtsiomCh/tensorflow
|
tensorflow/contrib/learn/python/learn/estimators/linear_test.py
|
58
|
71789
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for estimators.linear."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import json
import tempfile
import numpy as np
from tensorflow.contrib.layers.python.layers import feature_column as feature_column_lib
from tensorflow.contrib.learn.python.learn import experiment
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import estimator_test_utils
from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
from tensorflow.contrib.learn.python.learn.estimators import linear
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.estimators import test_data
from tensorflow.contrib.learn.python.learn.metric_spec import MetricSpec
from tensorflow.contrib.linear_optimizer.python import sdca_optimizer as sdca_optimizer_lib
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.python.feature_column import feature_column as fc_core
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.training import ftrl
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import server_lib
def _prepare_iris_data_for_logistic_regression():
# Converts iris data to a logistic regression problem.
iris = base.load_iris()
ids = np.where((iris.target == 0) | (iris.target == 1))
iris = base.Dataset(data=iris.data[ids], target=iris.target[ids])
return iris
class LinearClassifierTest(test.TestCase):
def testExperimentIntegration(self):
cont_features = [
feature_column_lib.real_valued_column(
'feature', dimension=4)
]
exp = experiment.Experiment(
estimator=linear.LinearClassifier(
n_classes=3, feature_columns=cont_features),
train_input_fn=test_data.iris_input_multiclass_fn,
eval_input_fn=test_data.iris_input_multiclass_fn)
exp.test()
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(self,
linear.LinearClassifier)
def testTrain(self):
"""Tests that loss goes down with training."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
classifier = linear.LinearClassifier(feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100)
loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
classifier.fit(input_fn=input_fn, steps=200)
loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
self.assertLess(loss2, 0.01)
def testJointTrain(self):
"""Tests that loss goes down with training with joint weights."""
def input_fn():
return {
'age':
sparse_tensor.SparseTensor(
values=['1'], indices=[[0, 0]], dense_shape=[1, 1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.sparse_column_with_hash_bucket('age', 2)
classifier = linear.LinearClassifier(
_joint_weight=True, feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100)
loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
classifier.fit(input_fn=input_fn, steps=200)
loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
self.assertLess(loss2, 0.01)
def testMultiClass_MatrixData(self):
"""Tests multi-class classification using matrix data as input."""
feature_column = feature_column_lib.real_valued_column(
'feature', dimension=4)
classifier = linear.LinearClassifier(
n_classes=3, feature_columns=[feature_column])
classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_multiclass_fn, steps=100)
self.assertGreater(scores['accuracy'], 0.9)
def testMultiClass_MatrixData_Labels1D(self):
"""Same as the last test, but labels shape is [150] instead of [150, 1]."""
def _input_fn():
iris = base.load_iris()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[150], dtype=dtypes.int32)
feature_column = feature_column_lib.real_valued_column(
'feature', dimension=4)
classifier = linear.LinearClassifier(
n_classes=3, feature_columns=[feature_column])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testMultiClass_NpMatrixData(self):
"""Tests multi-class classification using numpy matrix data as input."""
iris = base.load_iris()
train_x = iris.data
train_y = iris.target
feature_column = feature_column_lib.real_valued_column('', dimension=4)
classifier = linear.LinearClassifier(
n_classes=3, feature_columns=[feature_column])
classifier.fit(x=train_x, y=train_y, steps=100)
scores = classifier.evaluate(x=train_x, y=train_y, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testMultiClassLabelKeys(self):
"""Tests n_classes > 2 with label_keys vocabulary for labels."""
# Byte literals needed for python3 test to pass.
label_keys = [b'label0', b'label1', b'label2']
def _input_fn(num_epochs=None):
features = {
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant(
[[label_keys[1]], [label_keys[0]], [label_keys[0]]],
dtype=dtypes.string)
return features, labels
language_column = feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
classifier = linear.LinearClassifier(
n_classes=3,
feature_columns=[language_column],
label_keys=label_keys)
classifier.fit(input_fn=_input_fn, steps=50)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
self.assertIn('loss', scores)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_classes = list(
classifier.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
self.assertEqual(3, len(predicted_classes))
for pred in predicted_classes:
self.assertIn(pred, label_keys)
predictions = list(
classifier.predict(input_fn=predict_input_fn, as_iterable=True))
self.assertAllEqual(predicted_classes, predictions)
def testLogisticRegression_MatrixData(self):
"""Tests binary classification using matrix data as input."""
def _input_fn():
iris = _prepare_iris_data_for_logistic_regression()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[100, 1], dtype=dtypes.int32)
feature_column = feature_column_lib.real_valued_column(
'feature', dimension=4)
classifier = linear.LinearClassifier(feature_columns=[feature_column])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testEstimatorWithCoreFeatureColumns(self):
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [0.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
language_column = fc_core.categorical_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [language_column, fc_core.numeric_column('age')]
classifier = linear.LinearClassifier(feature_columns=feature_columns)
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testLogisticRegression_MatrixData_Labels1D(self):
"""Same as the last test, but labels shape is [100] instead of [100, 1]."""
def _input_fn():
iris = _prepare_iris_data_for_logistic_regression()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[100], dtype=dtypes.int32)
feature_column = feature_column_lib.real_valued_column(
'feature', dimension=4)
classifier = linear.LinearClassifier(feature_columns=[feature_column])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testLogisticRegression_NpMatrixData(self):
"""Tests binary classification using numpy matrix data as input."""
iris = _prepare_iris_data_for_logistic_regression()
train_x = iris.data
train_y = iris.target
feature_columns = [feature_column_lib.real_valued_column('', dimension=4)]
classifier = linear.LinearClassifier(feature_columns=feature_columns)
classifier.fit(x=train_x, y=train_y, steps=100)
scores = classifier.evaluate(x=train_x, y=train_y, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testWeightAndBiasNames(self):
"""Tests that weight and bias names haven't changed."""
feature_column = feature_column_lib.real_valued_column(
'feature', dimension=4)
classifier = linear.LinearClassifier(
n_classes=3, feature_columns=[feature_column])
classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)
variable_names = classifier.get_variable_names()
self.assertIn('linear/feature/weight', variable_names)
self.assertIn('linear/bias_weight', variable_names)
self.assertEqual(
4, len(classifier.get_variable_value('linear/feature/weight')))
self.assertEqual(
3, len(classifier.get_variable_value('linear/bias_weight')))
def testCustomOptimizerByObject(self):
"""Tests multi-class classification using matrix data as input."""
feature_column = feature_column_lib.real_valued_column(
'feature', dimension=4)
classifier = linear.LinearClassifier(
n_classes=3,
optimizer=ftrl.FtrlOptimizer(learning_rate=0.1),
feature_columns=[feature_column])
classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_multiclass_fn, steps=100)
self.assertGreater(scores['accuracy'], 0.9)
def testCustomOptimizerByString(self):
"""Tests multi-class classification using matrix data as input."""
feature_column = feature_column_lib.real_valued_column(
'feature', dimension=4)
def _optimizer():
return ftrl.FtrlOptimizer(learning_rate=0.1)
classifier = linear.LinearClassifier(
n_classes=3, optimizer=_optimizer, feature_columns=[feature_column])
classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_multiclass_fn, steps=100)
self.assertGreater(scores['accuracy'], 0.9)
def testCustomOptimizerByFunction(self):
"""Tests multi-class classification using matrix data as input."""
feature_column = feature_column_lib.real_valued_column(
'feature', dimension=4)
classifier = linear.LinearClassifier(
n_classes=3, optimizer='Ftrl', feature_columns=[feature_column])
classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_multiclass_fn, steps=100)
self.assertGreater(scores['accuracy'], 0.9)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1], [0], [0], [0]], dtype=dtypes.float32)
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs)
}
return features, labels
def _my_metric_op(predictions, labels):
# For the case of binary classification, the 2nd column of "predictions"
# denotes the model predictions.
predictions = array_ops.strided_slice(
predictions, [0, 1], [-1, 2], end_mask=1)
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
classifier = linear.LinearClassifier(
feature_columns=[feature_column_lib.real_valued_column('x')])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
'my_accuracy':
MetricSpec(
metric_fn=metric_ops.streaming_accuracy,
prediction_key='classes'),
'my_precision':
MetricSpec(
metric_fn=metric_ops.streaming_precision,
prediction_key='classes'),
'my_metric':
MetricSpec(
metric_fn=_my_metric_op, prediction_key='probabilities')
})
self.assertTrue(
set(['loss', 'my_accuracy', 'my_precision', 'my_metric']).issubset(
set(scores.keys())))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(classifier.predict_classes(
input_fn=predict_input_fn)))
self.assertEqual(
_sklearn.accuracy_score([1, 0, 0, 0], predictions),
scores['my_accuracy'])
# Tests the case where the prediction_key is neither "classes" nor
# "probabilities".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
'bad_name':
MetricSpec(
metric_fn=metric_ops.streaming_auc,
prediction_key='bad_type')
})
# Tests the case where the 2nd element of the key is neither "classes" nor
# "probabilities".
with self.assertRaises(KeyError):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={('bad_name', 'bad_type'): metric_ops.streaming_auc})
# Tests the case where the tuple of the key doesn't have 2 elements.
with self.assertRaises(ValueError):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
('bad_length_name', 'classes', 'bad_length'):
metric_ops.streaming_accuracy
})
def testLogisticFractionalLabels(self):
"""Tests logistic training with fractional labels."""
def input_fn(num_epochs=None):
return {
'age':
input_lib.limit_epochs(
constant_op.constant([[1], [2]]), num_epochs=num_epochs),
}, constant_op.constant(
[[.7], [0]], dtype=dtypes.float32)
age = feature_column_lib.real_valued_column('age')
classifier = linear.LinearClassifier(
feature_columns=[age], config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=input_fn, steps=500)
predict_input_fn = functools.partial(input_fn, num_epochs=1)
predictions_proba = list(
classifier.predict_proba(input_fn=predict_input_fn))
# Prediction probabilities mirror the labels column, which proves that the
# classifier learns from float input.
self.assertAllClose([[.3, .7], [1., 0.]], predictions_proba, atol=.1)
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn():
features = {
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant([[1], [0], [0]])
return features, labels
sparse_features = [
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=2e7)
]
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig()
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config._cluster_spec = server_lib.ClusterSpec({})
classifier = linear.LinearClassifier(
feature_columns=sparse_features, config=config)
classifier.fit(input_fn=_input_fn, steps=200)
loss = classifier.evaluate(input_fn=_input_fn, steps=1)['loss']
self.assertLess(loss, 0.07)
def testTrainSaveLoad(self):
"""Tests that insures you can save and reload a trained model."""
def input_fn(num_epochs=None):
return {
'age':
input_lib.limit_epochs(
constant_op.constant([1]), num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1]),
}, constant_op.constant([[1]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
model_dir = tempfile.mkdtemp()
classifier = linear.LinearClassifier(
model_dir=model_dir, feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=30)
predict_input_fn = functools.partial(input_fn, num_epochs=1)
out1_class = list(
classifier.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
out1_proba = list(
classifier.predict_proba(
input_fn=predict_input_fn, as_iterable=True))
del classifier
classifier2 = linear.LinearClassifier(
model_dir=model_dir, feature_columns=[age, language])
out2_class = list(
classifier2.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
out2_proba = list(
classifier2.predict_proba(
input_fn=predict_input_fn, as_iterable=True))
self.assertTrue(np.array_equal(out1_class, out2_class))
self.assertTrue(np.array_equal(out1_proba, out2_proba))
def testWeightColumn(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = constant_op.constant([[1], [1], [1], [1]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
classifier = linear.LinearClassifier(
weight_column_name='w',
feature_columns=[feature_column_lib.real_valued_column('x')],
config=run_config.RunConfig(tf_random_seed=3))
classifier.fit(input_fn=_input_fn_train, steps=100)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
# All examples in eval data set are y=x.
self.assertGreater(scores['labels/actual_label_mean'], 0.9)
# If there were no weight column, model would learn y=Not(x). Because of
# weights, it learns y=x.
self.assertGreater(scores['labels/prediction_mean'], 0.9)
# All examples in eval data set are y=x. So if weight column were ignored,
# then accuracy would be zero. Because of weights, accuracy should be close
# to 1.0.
self.assertGreater(scores['accuracy'], 0.9)
scores_train_set = classifier.evaluate(input_fn=_input_fn_train, steps=1)
# Considering weights, the mean label should be close to 1.0.
# If weights were ignored, it would be 0.25.
self.assertGreater(scores_train_set['labels/actual_label_mean'], 0.9)
# The classifier has learned y=x. If weight column were ignored in
# evaluation, then accuracy for the train set would be 0.25.
# Because weight is not ignored, accuracy is greater than 0.6.
self.assertGreater(scores_train_set['accuracy'], 0.6)
def testWeightColumnLoss(self):
"""Test ensures that you can specify per-example weights for loss."""
def _input_fn():
features = {
'age': constant_op.constant([[20], [20], [20]]),
'weights': constant_op.constant([[100], [1], [1]]),
}
labels = constant_op.constant([[1], [0], [0]])
return features, labels
age = feature_column_lib.real_valued_column('age')
classifier = linear.LinearClassifier(feature_columns=[age])
classifier.fit(input_fn=_input_fn, steps=100)
loss_unweighted = classifier.evaluate(input_fn=_input_fn, steps=1)['loss']
classifier = linear.LinearClassifier(
feature_columns=[age], weight_column_name='weights')
classifier.fit(input_fn=_input_fn, steps=100)
loss_weighted = classifier.evaluate(input_fn=_input_fn, steps=1)['loss']
self.assertLess(loss_weighted, loss_unweighted)
def testExport(self):
"""Tests that export model for servo works."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
classifier = linear.LinearClassifier(feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100)
export_dir = tempfile.mkdtemp()
classifier.export(export_dir)
def testDisableCenteredBias(self):
"""Tests that we can disable centered bias."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
classifier = linear.LinearClassifier(
feature_columns=[age, language], enable_centered_bias=False)
classifier.fit(input_fn=input_fn, steps=100)
self.assertNotIn('centered_bias_weight', classifier.get_variable_names())
def testEnableCenteredBias(self):
"""Tests that we can enable centered bias."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
classifier = linear.LinearClassifier(
feature_columns=[age, language], enable_centered_bias=True)
classifier.fit(input_fn=input_fn, steps=100)
self.assertIn('linear/binary_logistic_head/centered_bias_weight',
classifier.get_variable_names())
def testTrainOptimizerWithL1Reg(self):
"""Tests l1 regularized model has higher loss."""
def input_fn():
return {
'language':
sparse_tensor.SparseTensor(
values=['hindi'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
classifier_no_reg = linear.LinearClassifier(feature_columns=[language])
classifier_with_reg = linear.LinearClassifier(
feature_columns=[language],
optimizer=ftrl.FtrlOptimizer(
learning_rate=1.0, l1_regularization_strength=100.))
loss_no_reg = classifier_no_reg.fit(input_fn=input_fn, steps=100).evaluate(
input_fn=input_fn, steps=1)['loss']
loss_with_reg = classifier_with_reg.fit(input_fn=input_fn,
steps=100).evaluate(
input_fn=input_fn,
steps=1)['loss']
self.assertLess(loss_no_reg, loss_with_reg)
def testTrainWithMissingFeature(self):
"""Tests that training works with missing features."""
def input_fn():
return {
'language':
sparse_tensor.SparseTensor(
values=['Swahili', 'turkish'],
indices=[[0, 0], [2, 0]],
dense_shape=[3, 1])
}, constant_op.constant([[1], [1], [1]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
classifier = linear.LinearClassifier(feature_columns=[language])
classifier.fit(input_fn=input_fn, steps=100)
loss = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.07)
def testSdcaOptimizerRealValuedFeatures(self):
"""Tests LinearClassifier with SDCAOptimizer and real valued features."""
def input_fn():
return {
'example_id': constant_op.constant(['1', '2']),
'maintenance_cost': constant_op.constant([[500.0], [200.0]]),
'sq_footage': constant_op.constant([[800.0], [600.0]]),
'weights': constant_op.constant([[1.0], [1.0]])
}, constant_op.constant([[0], [1]])
maintenance_cost = feature_column_lib.real_valued_column('maintenance_cost')
sq_footage = feature_column_lib.real_valued_column('sq_footage')
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
classifier = linear.LinearClassifier(
feature_columns=[maintenance_cost, sq_footage],
weight_column_name='weights',
optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=100)
loss = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.05)
def testSdcaOptimizerRealValuedFeatureWithHigherDimension(self):
"""Tests SDCAOptimizer with real valued features of higher dimension."""
# input_fn is identical to the one in testSdcaOptimizerRealValuedFeatures
# where 2 1-dimensional dense features have been replaced by 1 2-dimensional
# feature.
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2']),
'dense_feature':
constant_op.constant([[500.0, 800.0], [200.0, 600.0]])
}, constant_op.constant([[0], [1]])
dense_feature = feature_column_lib.real_valued_column(
'dense_feature', dimension=2)
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
classifier = linear.LinearClassifier(
feature_columns=[dense_feature], optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=100)
loss = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.05)
def testSdcaOptimizerBucketizedFeatures(self):
"""Tests LinearClassifier with SDCAOptimizer and bucketized features."""
def input_fn():
return {
'example_id': constant_op.constant(['1', '2', '3']),
'price': constant_op.constant([[600.0], [1000.0], [400.0]]),
'sq_footage': constant_op.constant([[1000.0], [600.0], [700.0]]),
'weights': constant_op.constant([[1.0], [1.0], [1.0]])
}, constant_op.constant([[1], [0], [1]])
price_bucket = feature_column_lib.bucketized_column(
feature_column_lib.real_valued_column('price'),
boundaries=[500.0, 700.0])
sq_footage_bucket = feature_column_lib.bucketized_column(
feature_column_lib.real_valued_column('sq_footage'), boundaries=[650.0])
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id', symmetric_l2_regularization=1.0)
classifier = linear.LinearClassifier(
feature_columns=[price_bucket, sq_footage_bucket],
weight_column_name='weights',
optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=50)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testSdcaOptimizerSparseFeatures(self):
"""Tests LinearClassifier with SDCAOptimizer and sparse features."""
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2', '3']),
'price':
constant_op.constant([0.4, 0.6, 0.3]),
'country':
sparse_tensor.SparseTensor(
values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 3], [2, 1]],
dense_shape=[3, 5]),
'weights':
constant_op.constant([[1.0], [1.0], [1.0]])
}, constant_op.constant([[1], [0], [1]])
price = feature_column_lib.real_valued_column('price')
country = feature_column_lib.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
classifier = linear.LinearClassifier(
feature_columns=[price, country],
weight_column_name='weights',
optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=50)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testSdcaOptimizerWeightedSparseFeatures(self):
"""LinearClassifier with SDCAOptimizer and weighted sparse features."""
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2', '3']),
'price':
sparse_tensor.SparseTensor(
values=[2., 3., 1.],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 5]),
'country':
sparse_tensor.SparseTensor(
values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 5])
}, constant_op.constant([[1], [0], [1]])
country = feature_column_lib.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
country_weighted_by_price = feature_column_lib.weighted_sparse_column(
country, 'price')
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
classifier = linear.LinearClassifier(
feature_columns=[country_weighted_by_price], optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=50)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testSdcaOptimizerCrossedFeatures(self):
"""Tests LinearClassifier with SDCAOptimizer and crossed features."""
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2', '3']),
'language':
sparse_tensor.SparseTensor(
values=['english', 'italian', 'spanish'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
'country':
sparse_tensor.SparseTensor(
values=['US', 'IT', 'MX'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1])
}, constant_op.constant([[0], [0], [1]])
language = feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=5)
country = feature_column_lib.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
country_language = feature_column_lib.crossed_column(
[language, country], hash_bucket_size=10)
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
classifier = linear.LinearClassifier(
feature_columns=[country_language], optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=10)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testSdcaOptimizerMixedFeatures(self):
"""Tests LinearClassifier with SDCAOptimizer and a mix of features."""
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2', '3']),
'price':
constant_op.constant([[0.6], [0.8], [0.3]]),
'sq_footage':
constant_op.constant([[900.0], [700.0], [600.0]]),
'country':
sparse_tensor.SparseTensor(
values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 3], [2, 1]],
dense_shape=[3, 5]),
'weights':
constant_op.constant([[3.0], [1.0], [1.0]])
}, constant_op.constant([[1], [0], [1]])
price = feature_column_lib.real_valued_column('price')
sq_footage_bucket = feature_column_lib.bucketized_column(
feature_column_lib.real_valued_column('sq_footage'),
boundaries=[650.0, 800.0])
country = feature_column_lib.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
sq_footage_country = feature_column_lib.crossed_column(
[sq_footage_bucket, country], hash_bucket_size=10)
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
classifier = linear.LinearClassifier(
feature_columns=[price, sq_footage_bucket, country, sq_footage_country],
weight_column_name='weights',
optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=50)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testEval(self):
"""Tests that eval produces correct metrics.
"""
def input_fn():
return {
'age':
constant_op.constant([[1], [2]]),
'language':
sparse_tensor.SparseTensor(
values=['greek', 'chinese'],
indices=[[0, 0], [1, 0]],
dense_shape=[2, 1]),
}, constant_op.constant([[1], [0]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
classifier = linear.LinearClassifier(feature_columns=[age, language])
# Evaluate on trained model
classifier.fit(input_fn=input_fn, steps=100)
classifier.evaluate(input_fn=input_fn, steps=1)
# TODO(ispir): Enable accuracy check after resolving the randomness issue.
# self.assertLess(evaluated_values['loss/mean'], 0.3)
# self.assertGreater(evaluated_values['accuracy/mean'], .95)
class LinearRegressorTest(test.TestCase):
def testExperimentIntegration(self):
cont_features = [
feature_column_lib.real_valued_column(
'feature', dimension=4)
]
exp = experiment.Experiment(
estimator=linear.LinearRegressor(feature_columns=cont_features),
train_input_fn=test_data.iris_input_logistic_fn,
eval_input_fn=test_data.iris_input_logistic_fn)
exp.test()
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(self, linear.LinearRegressor)
def testRegression(self):
"""Tests that loss goes down with training."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[10.]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
classifier = linear.LinearRegressor(feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100)
loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
classifier.fit(input_fn=input_fn, steps=200)
loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
self.assertLess(loss2, 0.5)
def testRegression_MatrixData(self):
"""Tests regression using matrix data as input."""
cont_features = [
feature_column_lib.real_valued_column(
'feature', dimension=4)
]
regressor = linear.LinearRegressor(
feature_columns=cont_features,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)
scores = regressor.evaluate(
input_fn=test_data.iris_input_multiclass_fn, steps=1)
self.assertLess(scores['loss'], 0.2)
def testRegression_TensorData(self):
"""Tests regression using tensor data as input."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(
[1.0, 0., 0.2], dtype=dtypes.float32)
feature_columns = [
feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20),
feature_column_lib.real_valued_column('age')
]
regressor = linear.LinearRegressor(
feature_columns=feature_columns,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.2)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
regressor = linear.LinearRegressor(
feature_columns=[feature_column_lib.real_valued_column('x')],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_train, steps=1)
# Average square loss = (0.75^2 + 3*0.25^2) / 4 = 0.1875
self.assertAlmostEqual(0.1875, scores['loss'], delta=0.1)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
def _input_fn_eval():
# 4 rows, with different weights.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[7.], [1.], [1.], [1.]])
}
return features, labels
regressor = linear.LinearRegressor(
weight_column_name='w',
feature_columns=[feature_column_lib.real_valued_column('x')],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
# Weighted average square loss = (7*0.75^2 + 3*0.25^2) / 10 = 0.4125
self.assertAlmostEqual(0.4125, scores['loss'], delta=0.1)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = constant_op.constant([[1.], [1.], [1.], [1.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
regressor = linear.LinearRegressor(
weight_column_name='w',
feature_columns=[feature_column_lib.real_valued_column('x')],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
# The model should learn (y = x) because of the weights, so the loss should
# be close to zero.
self.assertLess(scores['loss'], 0.1)
def testPredict_AsIterableFalse(self):
"""Tests predict method with as_iterable=False."""
labels = [1.0, 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(labels, dtype=dtypes.float32)
feature_columns = [
feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20),
feature_column_lib.real_valued_column('age')
]
regressor = linear.LinearRegressor(
feature_columns=feature_columns,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.1)
predicted_scores = regressor.predict_scores(
input_fn=_input_fn, as_iterable=False)
self.assertAllClose(labels, predicted_scores, atol=0.1)
predictions = regressor.predict(input_fn=_input_fn, as_iterable=False)
self.assertAllClose(predicted_scores, predictions)
def testPredict_AsIterable(self):
"""Tests predict method with as_iterable=True."""
labels = [1.0, 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(labels, dtype=dtypes.float32)
feature_columns = [
feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20),
feature_column_lib.real_valued_column('age')
]
regressor = linear.LinearRegressor(
feature_columns=feature_columns,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.1)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_scores = list(
regressor.predict_scores(
input_fn=predict_input_fn, as_iterable=True))
self.assertAllClose(labels, predicted_scores, atol=0.1)
predictions = list(
regressor.predict(
input_fn=predict_input_fn, as_iterable=True))
self.assertAllClose(predicted_scores, predictions)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs)
}
return features, labels
def _my_metric_op(predictions, labels):
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
regressor = linear.LinearRegressor(
feature_columns=[feature_column_lib.real_valued_column('x')],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'my_error':
MetricSpec(
metric_fn=metric_ops.streaming_mean_squared_error,
prediction_key='scores'),
'my_metric':
MetricSpec(
metric_fn=_my_metric_op, prediction_key='scores')
})
self.assertIn('loss', set(scores.keys()))
self.assertIn('my_error', set(scores.keys()))
self.assertIn('my_metric', set(scores.keys()))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(
regressor.predict_scores(input_fn=predict_input_fn)))
self.assertAlmostEqual(
_sklearn.mean_squared_error(np.array([1, 0, 0, 0]), predictions),
scores['my_error'])
# Tests the case where the prediction_key is not "scores".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'bad_name':
MetricSpec(
metric_fn=metric_ops.streaming_auc,
prediction_key='bad_type')
})
# Tests the case where the 2nd element of the key is not "scores".
with self.assertRaises(KeyError):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
('my_error', 'predictions'):
metric_ops.streaming_mean_squared_error
})
# Tests the case where the tuple of the key doesn't have 2 elements.
with self.assertRaises(ValueError):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
('bad_length_name', 'scores', 'bad_length'):
metric_ops.streaming_mean_squared_error
})
def testTrainSaveLoad(self):
"""Tests that insures you can save and reload a trained model."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(
[1.0, 0., 0.2], dtype=dtypes.float32)
feature_columns = [
feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20),
feature_column_lib.real_valued_column('age')
]
model_dir = tempfile.mkdtemp()
regressor = linear.LinearRegressor(
model_dir=model_dir,
feature_columns=feature_columns,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = list(regressor.predict_scores(input_fn=predict_input_fn))
del regressor
regressor2 = linear.LinearRegressor(
model_dir=model_dir, feature_columns=feature_columns)
predictions2 = list(regressor2.predict_scores(input_fn=predict_input_fn))
self.assertAllClose(predictions, predictions2)
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(
[1.0, 0., 0.2], dtype=dtypes.float32)
feature_columns = [
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=2e7),
feature_column_lib.real_valued_column('age')
]
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig(tf_random_seed=1)
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config._cluster_spec = server_lib.ClusterSpec({})
regressor = linear.LinearRegressor(
feature_columns=feature_columns, config=config)
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.1)
def testDisableCenteredBias(self):
"""Tests that we can disable centered bias."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(
[1.0, 0., 0.2], dtype=dtypes.float32)
feature_columns = [
feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20),
feature_column_lib.real_valued_column('age')
]
regressor = linear.LinearRegressor(
feature_columns=feature_columns,
enable_centered_bias=False,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.1)
def testRecoverWeights(self):
rng = np.random.RandomState(67)
n = 1000
n_weights = 10
bias = 2
x = rng.uniform(-1, 1, (n, n_weights))
weights = 10 * rng.randn(n_weights)
y = np.dot(x, weights)
y += rng.randn(len(x)) * 0.05 + rng.normal(bias, 0.01)
feature_columns = estimator.infer_real_valued_columns_from_input(x)
regressor = linear.LinearRegressor(
feature_columns=feature_columns,
optimizer=ftrl.FtrlOptimizer(learning_rate=0.8))
regressor.fit(x, y, batch_size=64, steps=2000)
self.assertIn('linear//weight', regressor.get_variable_names())
regressor_weights = regressor.get_variable_value('linear//weight')
# Have to flatten weights since they come in (x, 1) shape.
self.assertAllClose(weights, regressor_weights.flatten(), rtol=1)
# TODO(ispir): Disable centered_bias.
# assert abs(bias - regressor.bias_) < 0.1
def testSdcaOptimizerRealValuedLinearFeatures(self):
"""Tests LinearRegressor with SDCAOptimizer and real valued features."""
x = [[1.2, 2.0, -1.5], [-2.0, 3.0, -0.5], [1.0, -0.5, 4.0]]
weights = [[3.0], [-1.2], [0.5]]
y = np.dot(x, weights)
def input_fn():
return {
'example_id': constant_op.constant(['1', '2', '3']),
'x': constant_op.constant(x),
'weights': constant_op.constant([[10.0], [10.0], [10.0]])
}, constant_op.constant(y)
x_column = feature_column_lib.real_valued_column('x', dimension=3)
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
regressor = linear.LinearRegressor(
feature_columns=[x_column],
weight_column_name='weights',
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=20)
loss = regressor.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.01)
self.assertIn('linear/x/weight', regressor.get_variable_names())
regressor_weights = regressor.get_variable_value('linear/x/weight')
self.assertAllClose(
[w[0] for w in weights], regressor_weights.flatten(), rtol=0.1)
def testSdcaOptimizerMixedFeaturesArbitraryWeights(self):
"""Tests LinearRegressor with SDCAOptimizer and a mix of features."""
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2', '3']),
'price':
constant_op.constant([0.6, 0.8, 0.3]),
'sq_footage':
constant_op.constant([[900.0], [700.0], [600.0]]),
'country':
sparse_tensor.SparseTensor(
values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 3], [2, 1]],
dense_shape=[3, 5]),
'weights':
constant_op.constant([[3.0], [5.0], [7.0]])
}, constant_op.constant([[1.55], [-1.25], [-3.0]])
price = feature_column_lib.real_valued_column('price')
sq_footage_bucket = feature_column_lib.bucketized_column(
feature_column_lib.real_valued_column('sq_footage'),
boundaries=[650.0, 800.0])
country = feature_column_lib.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
sq_footage_country = feature_column_lib.crossed_column(
[sq_footage_bucket, country], hash_bucket_size=10)
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id', symmetric_l2_regularization=1.0)
regressor = linear.LinearRegressor(
feature_columns=[price, sq_footage_bucket, country, sq_footage_country],
weight_column_name='weights',
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=20)
loss = regressor.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.05)
def testSdcaOptimizerSparseFeaturesWithL1Reg(self):
"""Tests LinearClassifier with SDCAOptimizer and sparse features."""
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2', '3']),
'price':
constant_op.constant([[0.4], [0.6], [0.3]]),
'country':
sparse_tensor.SparseTensor(
values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 3], [2, 1]],
dense_shape=[3, 5]),
'weights':
constant_op.constant([[10.0], [10.0], [10.0]])
}, constant_op.constant([[1.4], [-0.8], [2.6]])
price = feature_column_lib.real_valued_column('price')
country = feature_column_lib.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
# Regressor with no L1 regularization.
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
regressor = linear.LinearRegressor(
feature_columns=[price, country],
weight_column_name='weights',
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=20)
no_l1_reg_loss = regressor.evaluate(input_fn=input_fn, steps=1)['loss']
variable_names = regressor.get_variable_names()
self.assertIn('linear/price/weight', variable_names)
self.assertIn('linear/country/weights', variable_names)
no_l1_reg_weights = {
'linear/price/weight': regressor.get_variable_value(
'linear/price/weight'),
'linear/country/weights': regressor.get_variable_value(
'linear/country/weights'),
}
# Regressor with L1 regularization.
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id', symmetric_l1_regularization=1.0)
regressor = linear.LinearRegressor(
feature_columns=[price, country],
weight_column_name='weights',
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=20)
l1_reg_loss = regressor.evaluate(input_fn=input_fn, steps=1)['loss']
l1_reg_weights = {
'linear/price/weight': regressor.get_variable_value(
'linear/price/weight'),
'linear/country/weights': regressor.get_variable_value(
'linear/country/weights'),
}
# Unregularized loss is lower when there is no L1 regularization.
self.assertLess(no_l1_reg_loss, l1_reg_loss)
self.assertLess(no_l1_reg_loss, 0.05)
# But weights returned by the regressor with L1 regularization have smaller
# L1 norm.
l1_reg_weights_norm, no_l1_reg_weights_norm = 0.0, 0.0
for var_name in sorted(l1_reg_weights):
l1_reg_weights_norm += sum(
np.absolute(l1_reg_weights[var_name].flatten()))
no_l1_reg_weights_norm += sum(
np.absolute(no_l1_reg_weights[var_name].flatten()))
print('Var name: %s, value: %s' %
(var_name, no_l1_reg_weights[var_name].flatten()))
self.assertLess(l1_reg_weights_norm, no_l1_reg_weights_norm)
def testSdcaOptimizerBiasOnly(self):
"""Tests LinearClassifier with SDCAOptimizer and validates bias weight."""
def input_fn():
"""Testing the bias weight when it's the only feature present.
All of the instances in this input only have the bias feature, and a
1/4 of the labels are positive. This means that the expected weight for
the bias should be close to the average prediction, i.e 0.25.
Returns:
Training data for the test.
"""
num_examples = 40
return {
'example_id':
constant_op.constant([str(x + 1) for x in range(num_examples)]),
# place_holder is an empty column which is always 0 (absent), because
# LinearClassifier requires at least one column.
'place_holder':
constant_op.constant([[0.0]] * num_examples),
}, constant_op.constant(
[[1 if i % 4 is 0 else 0] for i in range(num_examples)])
place_holder = feature_column_lib.real_valued_column('place_holder')
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
regressor = linear.LinearRegressor(
feature_columns=[place_holder], optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=100)
self.assertNear(
regressor.get_variable_value('linear/bias_weight')[0], 0.25, err=0.1)
def testSdcaOptimizerBiasAndOtherColumns(self):
"""Tests LinearClassifier with SDCAOptimizer and validates bias weight."""
def input_fn():
"""Testing the bias weight when there are other features present.
1/2 of the instances in this input have feature 'a', the rest have
feature 'b', and we expect the bias to be added to each instance as well.
0.4 of all instances that have feature 'a' are positive, and 0.2 of all
instances that have feature 'b' are positive. The labels in the dataset
are ordered to appear shuffled since SDCA expects shuffled data, and
converges faster with this pseudo-random ordering.
If the bias was centered we would expect the weights to be:
bias: 0.3
a: 0.1
b: -0.1
Until b/29339026 is resolved, the bias gets regularized with the same
global value for the other columns, and so the expected weights get
shifted and are:
bias: 0.2
a: 0.2
b: 0.0
Returns:
The test dataset.
"""
num_examples = 200
half = int(num_examples / 2)
return {
'example_id':
constant_op.constant([str(x + 1) for x in range(num_examples)]),
'a':
constant_op.constant([[1]] * int(half) + [[0]] * int(half)),
'b':
constant_op.constant([[0]] * int(half) + [[1]] * int(half)),
}, constant_op.constant(
[[x]
for x in [1, 0, 0, 1, 1, 0, 0, 0, 1, 0] * int(half / 10) +
[0, 1, 0, 0, 0, 0, 0, 0, 1, 0] * int(half / 10)])
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
regressor = linear.LinearRegressor(
feature_columns=[
feature_column_lib.real_valued_column('a'),
feature_column_lib.real_valued_column('b')
],
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=200)
variable_names = regressor.get_variable_names()
self.assertIn('linear/bias_weight', variable_names)
self.assertIn('linear/a/weight', variable_names)
self.assertIn('linear/b/weight', variable_names)
# TODO(b/29339026): Change the expected results to expect a centered bias.
self.assertNear(
regressor.get_variable_value('linear/bias_weight')[0], 0.2, err=0.05)
self.assertNear(
regressor.get_variable_value('linear/a/weight')[0], 0.2, err=0.05)
self.assertNear(
regressor.get_variable_value('linear/b/weight')[0], 0.0, err=0.05)
def testSdcaOptimizerBiasAndOtherColumnsFabricatedCentered(self):
"""Tests LinearClassifier with SDCAOptimizer and validates bias weight."""
def input_fn():
"""Testing the bias weight when there are other features present.
1/2 of the instances in this input have feature 'a', the rest have
feature 'b', and we expect the bias to be added to each instance as well.
0.1 of all instances that have feature 'a' have a label of 1, and 0.1 of
all instances that have feature 'b' have a label of -1.
We can expect the weights to be:
bias: 0.0
a: 0.1
b: -0.1
Returns:
The test dataset.
"""
num_examples = 200
half = int(num_examples / 2)
return {
'example_id':
constant_op.constant([str(x + 1) for x in range(num_examples)]),
'a':
constant_op.constant([[1]] * int(half) + [[0]] * int(half)),
'b':
constant_op.constant([[0]] * int(half) + [[1]] * int(half)),
}, constant_op.constant([[1 if x % 10 == 0 else 0] for x in range(half)] +
[[-1 if x % 10 == 0 else 0] for x in range(half)])
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
regressor = linear.LinearRegressor(
feature_columns=[
feature_column_lib.real_valued_column('a'),
feature_column_lib.real_valued_column('b')
],
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=100)
variable_names = regressor.get_variable_names()
self.assertIn('linear/bias_weight', variable_names)
self.assertIn('linear/a/weight', variable_names)
self.assertIn('linear/b/weight', variable_names)
self.assertNear(
regressor.get_variable_value('linear/bias_weight')[0], 0.0, err=0.05)
self.assertNear(
regressor.get_variable_value('linear/a/weight')[0], 0.1, err=0.05)
self.assertNear(
regressor.get_variable_value('linear/b/weight')[0], -0.1, err=0.05)
class LinearEstimatorTest(test.TestCase):
def testExperimentIntegration(self):
cont_features = [
feature_column_lib.real_valued_column(
'feature', dimension=4)
]
exp = experiment.Experiment(
estimator=linear.LinearEstimator(feature_columns=cont_features,
head=head_lib.regression_head()),
train_input_fn=test_data.iris_input_logistic_fn,
eval_input_fn=test_data.iris_input_logistic_fn)
exp.test()
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(self,
linear.LinearEstimator)
def testLinearRegression(self):
"""Tests that loss goes down with training."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[10.]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
linear_estimator = linear.LinearEstimator(feature_columns=[age, language],
head=head_lib.regression_head())
linear_estimator.fit(input_fn=input_fn, steps=100)
loss1 = linear_estimator.evaluate(input_fn=input_fn, steps=1)['loss']
linear_estimator.fit(input_fn=input_fn, steps=400)
loss2 = linear_estimator.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
self.assertLess(loss2, 0.5)
def testPoissonRegression(self):
"""Tests that loss goes down with training."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[10.]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
linear_estimator = linear.LinearEstimator(
feature_columns=[age, language],
head=head_lib.poisson_regression_head())
linear_estimator.fit(input_fn=input_fn, steps=10)
loss1 = linear_estimator.evaluate(input_fn=input_fn, steps=1)['loss']
linear_estimator.fit(input_fn=input_fn, steps=100)
loss2 = linear_estimator.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
# Here loss of 2.1 implies a prediction of ~9.9998
self.assertLess(loss2, 2.1)
def testSDCANotSupported(self):
"""Tests that we detect error for SDCA."""
maintenance_cost = feature_column_lib.real_valued_column('maintenance_cost')
sq_footage = feature_column_lib.real_valued_column('sq_footage')
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
with self.assertRaises(ValueError):
linear.LinearEstimator(
head=head_lib.regression_head(label_dimension=1),
feature_columns=[maintenance_cost, sq_footage],
optimizer=sdca_optimizer,
_joint_weights=True)
def boston_input_fn():
boston = base.load_boston()
features = math_ops.cast(
array_ops.reshape(constant_op.constant(boston.data), [-1, 13]),
dtypes.float32)
labels = math_ops.cast(
array_ops.reshape(constant_op.constant(boston.target), [-1, 1]),
dtypes.float32)
return features, labels
class FeatureColumnTest(test.TestCase):
def testTrain(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
boston_input_fn)
est = linear.LinearRegressor(feature_columns=feature_columns)
est.fit(input_fn=boston_input_fn, steps=1)
_ = est.evaluate(input_fn=boston_input_fn, steps=1)
if __name__ == '__main__':
test.main()
|
apache-2.0
|
stinebuu/nest-simulator
|
pynest/examples/spatial/grid_iaf_oc.py
|
12
|
1781
|
# -*- coding: utf-8 -*-
#
# grid_iaf_oc.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Create three populations of iaf_psc_alpha neurons on a 4x3 grid, each with different center
-------------------------------------------------------------------------------------------
BCCN Tutorial @ CNS*09
Hans Ekkehard Plesser, UMB
"""
import nest
import matplotlib.pyplot as plt
import numpy as np
for ctr in [(0.0, 0.0), (-2.0, 2.0), (0.5, 1.0)]:
plt.figure()
nest.ResetKernel()
l1 = nest.Create('iaf_psc_alpha',
positions=nest.spatial.grid(shape=[4, 3], extent=[2., 1.5],
center=ctr))
nest.PlotLayer(l1, nodesize=50, fig=plt.gcf())
# beautify
plt.axis([-3, 3, -3, 3])
plt.axes().set_aspect('equal', 'box')
plt.axes().set_xticks(np.arange(-3.0, 3.1, 1.0))
plt.axes().set_yticks(np.arange(-3.0, 3.1, 1.0))
plt.grid(True)
plt.xlabel('4 Columns, Extent: 1.5, Center: %.1f' % ctr[0])
plt.ylabel('2 Rows, Extent: 1.0, Center: %.1f' % ctr[1])
plt.show()
# plt.savefig('grid_iaf_oc_{}_{}.png'.format(ctr[0], ctr[1]))
|
gpl-2.0
|
amolkahat/pandas
|
doc/source/conf.py
|
5
|
21946
|
# -*- coding: utf-8 -*-
#
# pandas documentation build configuration file, created by
#
# This file is execfile()d with the current directory set to its containing
# dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import re
import inspect
import importlib
import logging
import warnings
from sphinx.ext.autosummary import _import_by_name
logger = logging.getLogger(__name__)
try:
raw_input # Python 2
except NameError:
raw_input = input # Python 3
# https://github.com/sphinx-doc/sphinx/pull/2325/files
# Workaround for sphinx-build recursion limit overflow:
# pickle.dump(doctree, f, pickle.HIGHEST_PROTOCOL)
# RuntimeError: maximum recursion depth exceeded while pickling an object
#
# Python's default allowed recursion depth is 1000.
sys.setrecursionlimit(5000)
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.append(os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('../sphinxext'))
sys.path.extend([
# numpy standard doc extensions
os.path.join(os.path.dirname(__file__),
'..', '../..',
'sphinxext')
])
# numpydoc is available in the sphinxext directory, and can't be imported
# until sphinxext is available in the Python path
from numpydoc.docscrape import NumpyDocString
# -- General configuration -----------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
# sphinxext.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.doctest',
'sphinx.ext.extlinks',
'sphinx.ext.todo',
'numpydoc',
'IPython.sphinxext.ipython_directive',
'IPython.sphinxext.ipython_console_highlighting',
'matplotlib.sphinxext.plot_directive',
'sphinx.ext.intersphinx',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.linkcode',
'nbsphinx',
]
try:
import sphinxcontrib.spelling # noqa
except ImportError as err:
logger.warn(('sphinxcontrib.spelling failed to import with error "{}". '
'`spellcheck` command is not available.'.format(err)))
else:
extensions.append('sphinxcontrib.spelling')
exclude_patterns = ['**.ipynb_checkpoints']
spelling_word_list_filename = ['spelling_wordlist.txt', 'names_wordlist.txt']
spelling_ignore_pypi_package_names = True
with open("index.rst") as f:
index_rst_lines = f.readlines()
# only include the slow autosummary feature if we're building the API section
# of the docs
# JP: added from sphinxdocs
autosummary_generate = False
if any(re.match("\s*api\s*", l) for l in index_rst_lines):
autosummary_generate = True
# numpydoc
# for now use old parameter listing (styling + **kwargs problem)
numpydoc_use_blockquotes = True
# use member listing for attributes
numpydoc_attributes_as_param_list = False
# matplotlib plot directive
plot_include_source = True
plot_formats = [("png", 90)]
plot_html_show_formats = False
plot_html_show_source_link = False
plot_pre_code = """import numpy as np
import pandas as pd"""
# Add any paths that contain templates here, relative to this directory.
templates_path = ['../_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pandas'
copyright = u'2008-2014, the pandas development team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import pandas
# version = '%s r%s' % (pandas.__version__, svn_version())
version = str(pandas.__version__)
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
# unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = []
# The reST default role (used for this markup: `text`) to use for all
# documents. default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'nature_with_gtoc'
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
# html_style = 'statsmodels.css'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = os.path.join(html_static_path[0], 'favicon.ico')
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# Add redirect for previously existing API pages
# each item is like `(from_old, to_new)`
# To redirect a class and all its methods, see below
# https://github.com/pandas-dev/pandas/issues/16186
moved_api_pages = [
('pandas.core.common.isnull', 'pandas.isna'),
('pandas.core.common.notnull', 'pandas.notna'),
('pandas.core.reshape.get_dummies', 'pandas.get_dummies'),
('pandas.tools.merge.concat', 'pandas.concat'),
('pandas.tools.merge.merge', 'pandas.merge'),
('pandas.tools.pivot.pivot_table', 'pandas.pivot_table'),
('pandas.tseries.tools.to_datetime', 'pandas.to_datetime'),
('pandas.io.clipboard.read_clipboard', 'pandas.read_clipboard'),
('pandas.io.excel.ExcelFile.parse', 'pandas.ExcelFile.parse'),
('pandas.io.excel.read_excel', 'pandas.read_excel'),
('pandas.io.gbq.read_gbq', 'pandas.read_gbq'),
('pandas.io.html.read_html', 'pandas.read_html'),
('pandas.io.json.read_json', 'pandas.read_json'),
('pandas.io.parsers.read_csv', 'pandas.read_csv'),
('pandas.io.parsers.read_fwf', 'pandas.read_fwf'),
('pandas.io.parsers.read_table', 'pandas.read_table'),
('pandas.io.pickle.read_pickle', 'pandas.read_pickle'),
('pandas.io.pytables.HDFStore.append', 'pandas.HDFStore.append'),
('pandas.io.pytables.HDFStore.get', 'pandas.HDFStore.get'),
('pandas.io.pytables.HDFStore.put', 'pandas.HDFStore.put'),
('pandas.io.pytables.HDFStore.select', 'pandas.HDFStore.select'),
('pandas.io.pytables.read_hdf', 'pandas.read_hdf'),
('pandas.io.sql.read_sql', 'pandas.read_sql'),
('pandas.io.sql.read_frame', 'pandas.read_frame'),
('pandas.io.sql.write_frame', 'pandas.write_frame'),
('pandas.io.stata.read_stata', 'pandas.read_stata'),
]
# Again, tuples of (from_old, to_new)
moved_classes = [
('pandas.tseries.resample.Resampler', 'pandas.core.resample.Resampler'),
('pandas.formats.style.Styler', 'pandas.io.formats.style.Styler'),
]
for old, new in moved_classes:
# the class itself...
moved_api_pages.append((old, new))
mod, classname = new.rsplit('.', 1)
klass = getattr(importlib.import_module(mod), classname)
methods = [x for x in dir(klass)
if not x.startswith('_') or x in ('__iter__', '__array__')]
for method in methods:
# ... and each of its public methods
moved_api_pages.append(
("{old}.{method}".format(old=old, method=method),
"{new}.{method}".format(new=new, method=method))
)
html_additional_pages = {
'generated/' + page[0]: 'api_redirect.html'
for page in moved_api_pages
}
html_context = {
'redirects': {old: new for old, new in moved_api_pages}
}
# If false, no module index is generated.
html_use_modindex = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'pandas'
# -- Options for nbsphinx ------------------------------------------------
nbsphinx_allow_errors = True
# -- Options for LaTeX output --------------------------------------------
# The paper size ('letter' or 'a4').
# latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
# latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples (source start
# file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'pandas.tex',
u'pandas: powerful Python data analysis toolkit',
u'Wes McKinney\n\& PyData Development Team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# Additional stuff for the LaTeX preamble.
# latex_preamble = ''
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_use_modindex = True
intersphinx_mapping = {
'statsmodels': ('http://www.statsmodels.org/devel/', None),
'matplotlib': ('http://matplotlib.org/', None),
'pandas-gbq': ('https://pandas-gbq.readthedocs.io/en/latest/', None),
'python': ('https://docs.python.org/3/', None),
'numpy': ('https://docs.scipy.org/doc/numpy/', None),
'scipy': ('https://docs.scipy.org/doc/scipy/reference/', None),
'py': ('https://pylib.readthedocs.io/en/latest/', None)
}
import glob
autosummary_generate = glob.glob("*.rst")
# extlinks alias
extlinks = {'issue': ('https://github.com/pandas-dev/pandas/issues/%s',
'GH'),
'wiki': ('https://github.com/pandas-dev/pandas/wiki/%s',
'wiki ')}
# ignore all deprecation warnings from Panel during doc build
# (to avoid the need to add :okwarning: in many places)
warnings.filterwarnings("ignore", message="\nPanel is deprecated",
category=FutureWarning)
ipython_exec_lines = [
'import numpy as np',
'import pandas as pd',
# This ensures correct rendering on system with console encoding != utf8
# (windows). It forces pandas to encode its output reprs using utf8
# wherever the docs are built. The docs' target is the browser, not
# the console, so this is fine.
'pd.options.display.encoding="utf8"'
]
# Add custom Documenter to handle attributes/methods of an AccessorProperty
# eg pandas.Series.str and pandas.Series.dt (see GH9322)
import sphinx
from sphinx.util import rpartition
from sphinx.ext.autodoc import (
Documenter, MethodDocumenter, AttributeDocumenter)
from sphinx.ext.autosummary import Autosummary
class AccessorDocumenter(MethodDocumenter):
"""
Specialized Documenter subclass for accessors.
"""
objtype = 'accessor'
directivetype = 'method'
# lower than MethodDocumenter so this is not chosen for normal methods
priority = 0.6
def format_signature(self):
# this method gives an error/warning for the accessors, therefore
# overriding it (accessor has no arguments)
return ''
class AccessorLevelDocumenter(Documenter):
"""
Specialized Documenter subclass for objects on accessor level (methods,
attributes).
"""
# This is the simple straightforward version
# modname is None, base the last elements (eg 'hour')
# and path the part before (eg 'Series.dt')
# def resolve_name(self, modname, parents, path, base):
# modname = 'pandas'
# mod_cls = path.rstrip('.')
# mod_cls = mod_cls.split('.')
#
# return modname, mod_cls + [base]
def resolve_name(self, modname, parents, path, base):
if modname is None:
if path:
mod_cls = path.rstrip('.')
else:
mod_cls = None
# if documenting a class-level object without path,
# there must be a current class, either from a parent
# auto directive ...
mod_cls = self.env.temp_data.get('autodoc:class')
# ... or from a class directive
if mod_cls is None:
mod_cls = self.env.temp_data.get('py:class')
# ... if still None, there's no way to know
if mod_cls is None:
return None, []
# HACK: this is added in comparison to ClassLevelDocumenter
# mod_cls still exists of class.accessor, so an extra
# rpartition is needed
modname, accessor = rpartition(mod_cls, '.')
modname, cls = rpartition(modname, '.')
parents = [cls, accessor]
# if the module name is still missing, get it like above
if not modname:
modname = self.env.temp_data.get('autodoc:module')
if not modname:
if sphinx.__version__ > '1.3':
modname = self.env.ref_context.get('py:module')
else:
modname = self.env.temp_data.get('py:module')
# ... else, it stays None, which means invalid
return modname, parents + [base]
class AccessorAttributeDocumenter(AccessorLevelDocumenter,
AttributeDocumenter):
objtype = 'accessorattribute'
directivetype = 'attribute'
# lower than AttributeDocumenter so this is not chosen for normal
# attributes
priority = 0.6
class AccessorMethodDocumenter(AccessorLevelDocumenter, MethodDocumenter):
objtype = 'accessormethod'
directivetype = 'method'
# lower than MethodDocumenter so this is not chosen for normal methods
priority = 0.6
class AccessorCallableDocumenter(AccessorLevelDocumenter, MethodDocumenter):
"""
This documenter lets us removes .__call__ from the method signature for
callable accessors like Series.plot
"""
objtype = 'accessorcallable'
directivetype = 'method'
# lower than MethodDocumenter; otherwise the doc build prints warnings
priority = 0.5
def format_name(self):
return MethodDocumenter.format_name(self).rstrip('.__call__')
class PandasAutosummary(Autosummary):
"""
This alternative autosummary class lets us override the table summary for
Series.plot and DataFrame.plot in the API docs.
"""
def _replace_pandas_items(self, display_name, sig, summary, real_name):
# this a hack: ideally we should extract the signature from the
# .__call__ method instead of hard coding this
if display_name == 'DataFrame.plot':
sig = '([x, y, kind, ax, ....])'
summary = 'DataFrame plotting accessor and method'
elif display_name == 'Series.plot':
sig = '([kind, ax, figsize, ....])'
summary = 'Series plotting accessor and method'
return (display_name, sig, summary, real_name)
@staticmethod
def _is_deprecated(real_name):
try:
obj, parent, modname = _import_by_name(real_name)
except ImportError:
return False
doc = NumpyDocString(obj.__doc__ or '')
summary = ''.join(doc['Summary'] + doc['Extended Summary'])
return '.. deprecated::' in summary
def _add_deprecation_prefixes(self, items):
for item in items:
display_name, sig, summary, real_name = item
if self._is_deprecated(real_name):
summary = '(DEPRECATED) %s' % summary
yield display_name, sig, summary, real_name
def get_items(self, names):
items = Autosummary.get_items(self, names)
items = [self._replace_pandas_items(*item) for item in items]
items = list(self._add_deprecation_prefixes(items))
return items
# based on numpy doc/source/conf.py
def linkcode_resolve(domain, info):
"""
Determine the URL corresponding to Python object
"""
if domain != 'py':
return None
modname = info['module']
fullname = info['fullname']
submod = sys.modules.get(modname)
if submod is None:
return None
obj = submod
for part in fullname.split('.'):
try:
obj = getattr(obj, part)
except:
return None
try:
fn = inspect.getsourcefile(obj)
except:
fn = None
if not fn:
return None
try:
source, lineno = inspect.getsourcelines(obj)
except:
lineno = None
if lineno:
linespec = "#L{:d}-L{:d}".format(lineno, lineno + len(source) - 1)
else:
linespec = ""
fn = os.path.relpath(fn, start=os.path.dirname(pandas.__file__))
if '+' in pandas.__version__:
return ("http://github.com/pandas-dev/pandas/blob/master/pandas/"
"{}{}".format(fn, linespec))
else:
return ("http://github.com/pandas-dev/pandas/blob/"
"v{}/pandas/{}{}".format(pandas.__version__, fn, linespec))
# remove the docstring of the flags attribute (inherited from numpy ndarray)
# because these give doc build errors (see GH issue 5331)
def remove_flags_docstring(app, what, name, obj, options, lines):
if what == "attribute" and name.endswith(".flags"):
del lines[:]
def process_class_docstrings(app, what, name, obj, options, lines):
"""
For those classes for which we use ::
:template: autosummary/class_without_autosummary.rst
the documented attributes/methods have to be listed in the class
docstring. However, if one of those lists is empty, we use 'None',
which then generates warnings in sphinx / ugly html output.
This "autodoc-process-docstring" event connector removes that part
from the processed docstring.
"""
if what == "class":
joined = '\n'.join(lines)
templates = [
""".. rubric:: Attributes
.. autosummary::
:toctree:
None
""",
""".. rubric:: Methods
.. autosummary::
:toctree:
None
"""
]
for template in templates:
if template in joined:
joined = joined.replace(template, '')
lines[:] = joined.split('\n')
suppress_warnings = [
# We "overwrite" autosummary with our PandasAutosummary, but
# still want the regular autosummary setup to run. So we just
# suppress this warning.
'app.add_directive'
]
def setup(app):
app.connect("autodoc-process-docstring", remove_flags_docstring)
app.connect("autodoc-process-docstring", process_class_docstrings)
app.add_autodocumenter(AccessorDocumenter)
app.add_autodocumenter(AccessorAttributeDocumenter)
app.add_autodocumenter(AccessorMethodDocumenter)
app.add_autodocumenter(AccessorCallableDocumenter)
app.add_directive('autosummary', PandasAutosummary)
|
bsd-3-clause
|
tardis-sn/tardis
|
tardis/plasma/properties/ion_population.py
|
1
|
19794
|
import logging
import warnings
import sys
import numpy as np
import pandas as pd
import numexpr as ne
from scipy import interpolate
from tardis.plasma.properties.base import ProcessingPlasmaProperty
from tardis.plasma.properties.continuum_processes import get_ion_multi_index
from tardis.plasma.exceptions import PlasmaIonizationError
logger = logging.getLogger(__name__)
__all__ = [
"PhiSahaNebular",
"PhiSahaLTE",
"RadiationFieldCorrection",
"IonNumberDensity",
"IonNumberDensityHeNLTE",
"SahaFactor",
"ThermalPhiSahaLTE",
]
def calculate_block_ids_from_dataframe(dataframe):
block_start_id = (
np.where(np.diff(dataframe.index.get_level_values(0)) != 0.0)[0] + 1
)
return np.hstack(([0], block_start_id, [len(dataframe)]))
class PhiSahaLTE(ProcessingPlasmaProperty):
"""
Attributes
----------
phi : pandas.DataFrame, dtype float
Used for LTE ionization (at the radiation temperature).
Indexed by atomic number, ion number. Columns are zones.
"""
outputs = ("phi",)
latex_name = (r"\Phi",)
latex_formula = (
r"\dfrac{2Z_{i,j+1}}{Z_{i,j}}\Big(\
dfrac{2\pi m_{e}/\beta_{\textrm{rad}}}{h^2}\
Big)^{3/2}e^{\dfrac{-\chi_{i,j}}{kT_{\textrm{rad}}}}",
)
broadcast_ionization_energy = None
@staticmethod
def calculate(g_electron, beta_rad, partition_function, ionization_data):
phis = np.empty(
(
partition_function.shape[0]
- partition_function.index.get_level_values(0).unique().size,
partition_function.shape[1],
)
)
block_ids = calculate_block_ids_from_dataframe(partition_function)
for i, start_id in enumerate(block_ids[:-1]):
end_id = block_ids[i + 1]
current_block = partition_function.values[start_id:end_id]
current_phis = current_block[1:] / current_block[:-1]
phis[start_id - i : end_id - i - 1] = current_phis
broadcast_ionization_energy = ionization_data[
partition_function.index
].dropna()
phi_index = broadcast_ionization_energy.index
broadcast_ionization_energy = broadcast_ionization_energy.values
phi_coefficient = (
2
* g_electron
* np.exp(np.outer(broadcast_ionization_energy, -beta_rad))
)
return pd.DataFrame(phis * phi_coefficient, index=phi_index)
@staticmethod
def _calculate_block_ids(partition_function):
partition_function.index.get_level_values(0).unique()
class ThermalPhiSahaLTE(PhiSahaLTE):
"""
Attributes
----------
phi : pandas.DataFrame, dtype float
Used for LTE ionization (at the electron temperature).
Indexed by atomic number, ion number. Columns are zones.
"""
outputs = ("thermal_phi_lte",)
latex_name = (r"\Phi^{*}(T_\mathrm{e})",)
latex_formula = (
r"\dfrac{2Z_{i,j+1}}{Z_{i,j}}\Big(\
dfrac{2\pi m_{e}/\beta_{\textrm{electron}}}{h^2}\
Big)^{3/2}e^{\dfrac{-\chi_{i,j}}{kT_{\textrm{electron}}}}",
)
@staticmethod
def calculate(
thermal_g_electron,
beta_electron,
thermal_lte_partition_function,
ionization_data,
):
return super(ThermalPhiSahaLTE, ThermalPhiSahaLTE).calculate(
thermal_g_electron,
beta_electron,
thermal_lte_partition_function,
ionization_data,
)
class PhiSahaNebular(ProcessingPlasmaProperty):
"""
Attributes
----------
phi : pandas.DataFrame, dtype float
Used for nebular ionization. Indexed by atomic number, ion number.
Columns are zones.
"""
outputs = ("phi",)
latex_name = (r"\Phi",)
latex_formula = (
r"W(\delta\zeta_{i,j}+W(1-\zeta_{i,j}))\left(\
dfrac{T_{\textrm{electron}}}{T_{\textrm{rad}}}\right)^{1/2}",
)
@staticmethod
def calculate(
t_rad,
w,
zeta_data,
t_electrons,
delta,
g_electron,
beta_rad,
partition_function,
ionization_data,
):
phi_lte = PhiSahaLTE.calculate(
g_electron, beta_rad, partition_function, ionization_data
)
zeta = PhiSahaNebular.get_zeta_values(zeta_data, phi_lte.index, t_rad)
phis = (
phi_lte
* w
* ((zeta * delta) + w * (1 - zeta))
* (t_electrons / t_rad) ** 0.5
)
return phis
@staticmethod
def get_zeta_values(zeta_data, ion_index, t_rad):
zeta_t_rad = zeta_data.columns.values.astype(np.float64)
zeta_values = zeta_data.loc[ion_index].values.astype(np.float64)
zeta = interpolate.interp1d(
zeta_t_rad, zeta_values, bounds_error=False, fill_value=np.nan
)(t_rad)
zeta = zeta.astype(float)
if np.any(np.isnan(zeta)):
warnings.warn(
f"t_rads outside of zeta factor interpolation"
f" zeta_min={zeta_data.columns.values.min():.2f} zeta_max={zeta_data.columns.values.max():.2f} "
f"- replacing with {t_rad}"
)
zeta[np.isnan(zeta)] = 1.0
return zeta
class RadiationFieldCorrection(ProcessingPlasmaProperty):
"""
Attributes
----------
delta : pandas.DataFrame, dtype float
Calculates the radiation field correction (see Mazzali & Lucy, 1993) if
not given as input in the config. file. The default chi_0_species is
Ca II, which is good for type Ia supernovae. For type II supernovae,
(1, 1) should be used. Indexed by atomic number, ion number. The columns are zones.
"""
outputs = ("delta",)
latex_name = (r"\delta",)
def __init__(
self,
plasma_parent=None,
departure_coefficient=None,
chi_0_species=(20, 2),
delta_treatment=None,
):
super(RadiationFieldCorrection, self).__init__(plasma_parent)
self.departure_coefficient = departure_coefficient
self.delta_treatment = delta_treatment
self.chi_0_species = chi_0_species
def _set_chi_0(self, ionization_data):
if self.chi_0_species == (20, 2):
self.chi_0 = 1.9020591570241798e-11
else:
self.chi_0 = ionization_data.loc[self.chi_0_species]
def calculate(
self, w, ionization_data, beta_rad, t_electrons, t_rad, beta_electron
):
if getattr(self, "chi_0", None) is None:
self._set_chi_0(ionization_data)
if self.delta_treatment is None:
if self.departure_coefficient is None:
departure_coefficient = 1.0 / w
else:
departure_coefficient = self.departure_coefficient
radiation_field_correction = -np.ones(
(len(ionization_data), len(beta_rad))
)
less_than_chi_0 = (ionization_data < self.chi_0).values
factor_a = t_electrons / (departure_coefficient * w * t_rad)
radiation_field_correction[~less_than_chi_0] = factor_a * np.exp(
np.outer(
ionization_data.values[~less_than_chi_0],
beta_rad - beta_electron,
)
)
radiation_field_correction[less_than_chi_0] = 1 - np.exp(
np.outer(ionization_data.values[less_than_chi_0], beta_rad)
- beta_rad * self.chi_0
)
radiation_field_correction[less_than_chi_0] += factor_a * np.exp(
np.outer(ionization_data.values[less_than_chi_0], beta_rad)
- self.chi_0 * beta_electron
)
else:
radiation_field_correction = (
np.ones((len(ionization_data), len(beta_rad)))
* self.delta_treatment
)
delta = pd.DataFrame(
radiation_field_correction,
columns=np.arange(len(t_rad)),
index=ionization_data.index,
)
return delta
class IonNumberDensity(ProcessingPlasmaProperty):
"""
Convergence process to find the correct solution. A trial value for
the electron density is initiated in a particular zone. The ion
number densities are then calculated using the Saha equation. The
electron density is then re-calculated by using the ion number
densities to sum over the number of free electrons. If the two values
for the electron densities are not similar to within the threshold
value, a new guess for the value of the electron density is chosen
and the process is repeated.
Attributes
----------
ion_number_density : pandas.DataFrame, dtype float
Index atom number, ion number. Columns zones.
electron_densities : numpy.ndarray, dtype float
"""
outputs = ("ion_number_density", "electron_densities")
latex_name = (
"N_{i,j}",
"n_{e}",
)
def __init__(
self, plasma_parent, ion_zero_threshold=1e-20, electron_densities=None
):
super(IonNumberDensity, self).__init__(plasma_parent)
self.ion_zero_threshold = ion_zero_threshold
self.block_ids = None
self._electron_densities = electron_densities
@staticmethod
def calculate_with_n_electron(
phi,
partition_function,
number_density,
n_electron,
block_ids,
ion_zero_threshold,
):
if block_ids is None:
block_ids = IonNumberDensity._calculate_block_ids(phi)
ion_populations = np.empty_like(partition_function.values)
phi_electron = np.nan_to_num(phi.values / n_electron.values)
for i, start_id in enumerate(block_ids[:-1]):
end_id = block_ids[i + 1]
current_phis = phi_electron[start_id:end_id]
phis_product = np.cumprod(current_phis, 0)
tmp_ion_populations = np.empty(
(current_phis.shape[0] + 1, current_phis.shape[1])
)
tmp_ion_populations[0] = number_density.values[i] / (
1 + np.sum(phis_product, axis=0)
)
tmp_ion_populations[1:] = tmp_ion_populations[0] * phis_product
ion_populations[start_id + i : end_id + 1 + i] = tmp_ion_populations
ion_populations[ion_populations < ion_zero_threshold] = 0.0
return (
pd.DataFrame(data=ion_populations, index=partition_function.index),
block_ids,
)
@staticmethod
def _calculate_block_ids(phi):
return calculate_block_ids_from_dataframe(phi)
def calculate(self, phi, partition_function, number_density):
if self._electron_densities is None:
n_e_convergence_threshold = 0.05
n_electron = number_density.sum(axis=0)
n_electron_iterations = 0
while True:
(
ion_number_density,
self.block_ids,
) = self.calculate_with_n_electron(
phi,
partition_function,
number_density,
n_electron,
self.block_ids,
self.ion_zero_threshold,
)
ion_numbers = ion_number_density.index.get_level_values(
1
).values
ion_numbers = ion_numbers.reshape((ion_numbers.shape[0], 1))
new_n_electron = (ion_number_density.values * ion_numbers).sum(
axis=0
)
if np.any(np.isnan(new_n_electron)):
raise PlasmaIonizationError(
'n_electron just turned "nan" -' " aborting"
)
n_electron_iterations += 1
if n_electron_iterations > 100:
logger.warn(
f"n_electron iterations above 100 ({n_electron_iterations}) -"
f" something is probably wrong"
)
if np.all(
np.abs(new_n_electron - n_electron) / n_electron
< n_e_convergence_threshold
):
break
n_electron = 0.5 * (new_n_electron + n_electron)
else:
n_electron = self._electron_densities
ion_number_density, self.block_ids = self.calculate_with_n_electron(
phi,
partition_function,
number_density,
n_electron,
self.block_ids,
self.ion_zero_threshold,
)
return ion_number_density, n_electron
class IonNumberDensityHeNLTE(ProcessingPlasmaProperty):
"""
Convergence process to find the correct solution. A trial value for
the electron density is initiated in a particular zone. The ion
number densities are then calculated using the Saha equation. The
electron density is then re-calculated by using the ion number
densities to sum over the number of free electrons. If the two values
for the electron densities are not similar to within the threshold
value, a new guess for the value of the electron density is chosen
and the process is repeated.
Attributes
----------
ion_number_density : pandas.DataFrame, dtype float
Index atom number, ion number. Columns zones.
electron_densities : numpy.ndarray, dtype float
"""
outputs = (
"ion_number_density",
"electron_densities",
"helium_population_updated",
)
latex_name = (
"N_{i,j}",
"n_{e}",
)
def __init__(
self, plasma_parent, ion_zero_threshold=1e-20, electron_densities=None
):
super(IonNumberDensityHeNLTE, self).__init__(plasma_parent)
self.ion_zero_threshold = ion_zero_threshold
self.block_ids = None
self._electron_densities = electron_densities
def update_he_population(
self, helium_population, n_electron, number_density
):
helium_population_updated = helium_population.copy()
he_one_population = helium_population_updated.loc[0].mul(n_electron)
he_three_population = helium_population_updated.loc[2].mul(
1.0 / n_electron
)
helium_population_updated.loc[0].update(he_one_population)
helium_population_updated.loc[2].update(he_three_population)
unnormalised = helium_population_updated.sum()
normalised = helium_population_updated.mul(
number_density.loc[2] / unnormalised
)
helium_population_updated.update(normalised)
return helium_population_updated
def calculate(
self, phi, partition_function, number_density, helium_population
):
if self._electron_densities is None:
n_e_convergence_threshold = 0.05
n_electron = number_density.sum(axis=0)
n_electron_iterations = 0
while True:
(
ion_number_density,
self.block_ids,
) = IonNumberDensity.calculate_with_n_electron(
phi,
partition_function,
number_density,
n_electron,
self.block_ids,
self.ion_zero_threshold,
)
helium_population_updated = self.update_he_population(
helium_population, n_electron, number_density
)
ion_number_density.loc[2, 0].update(
helium_population_updated.loc[0].sum(axis=0)
)
ion_number_density.loc[2, 1].update(
helium_population_updated.loc[1].sum(axis=0)
)
ion_number_density.loc[2, 2].update(
helium_population_updated.loc[2, 0]
)
ion_numbers = ion_number_density.index.get_level_values(
1
).values
ion_numbers = ion_numbers.reshape((ion_numbers.shape[0], 1))
new_n_electron = (ion_number_density.values * ion_numbers).sum(
axis=0
)
if np.any(np.isnan(new_n_electron)):
raise PlasmaIonizationError(
'n_electron just turned "nan" -' " aborting"
)
n_electron_iterations += 1
if n_electron_iterations > 100:
logger.warn(
f"n_electron iterations above 100 ({n_electron_iterations}) -"
f" something is probably wrong"
)
if np.all(
np.abs(new_n_electron - n_electron) / n_electron
< n_e_convergence_threshold
):
break
n_electron = 0.5 * (new_n_electron + n_electron)
else:
n_electron = self._electron_densities
(
ion_number_density,
self.block_ids,
) = IonNumberDensity.calculate_with_n_electron(
phi,
partition_function,
number_density,
n_electron,
self.block_ids,
self.ion_zero_threshold,
)
helium_population_updated = self.update_he_population(
helium_population, n_electron, number_density
)
ion_number_density.loc[2, 0].update(
helium_population_updated.loc[0].sum(axis=0)
)
ion_number_density.loc[2, 1].update(
helium_population_updated.loc[1].sum(axis=0)
)
ion_number_density.loc[2, 2].update(
helium_population_updated.loc[2, 0]
)
return ion_number_density, n_electron, helium_population_updated
class SahaFactor(ProcessingPlasmaProperty):
"""
Calculates the 'Saha factor' Phi_ik = n_i* / (n_k* n_e), i.e.,
the ratio of the LTE level population n_i*, and the product of
the LTE ion density n_k* and the actual electron density n_e.
Attributes
----------
phi_ik : pandas.DataFrame, dtype float
Indexed by atom number, ion number, level number.
Columns are zones.
"""
outputs = ("phi_ik",)
latex_name = (r"\Phi_{i,\kappa}",)
def calculate(
self,
thermal_phi_lte,
thermal_lte_level_boltzmann_factor,
thermal_lte_partition_function,
):
boltzmann_factor = self._prepare_boltzmann_factor(
thermal_lte_level_boltzmann_factor
)
phi_saha_index = get_ion_multi_index(boltzmann_factor.index)
partition_function_index = get_ion_multi_index(
boltzmann_factor.index, next_higher=False
)
phi_saha = thermal_phi_lte.loc[phi_saha_index].values
# Replace zero values in phi_saha to avoid zero division in Saha factor
phi_saha[phi_saha == 0.0] = sys.float_info.min
partition_function = thermal_lte_partition_function.loc[
partition_function_index
].values
return boltzmann_factor / (phi_saha * partition_function)
@staticmethod
def _prepare_boltzmann_factor(boltzmann_factor):
atomic_number = boltzmann_factor.index.get_level_values(0)
ion_number = boltzmann_factor.index.get_level_values(1)
selected_ions_mask = atomic_number != ion_number
return boltzmann_factor[selected_ions_mask]
|
bsd-3-clause
|
alexeyum/scikit-learn
|
examples/ensemble/plot_feature_transformation.py
|
115
|
4327
|
"""
===============================================
Feature transformations with ensembles of trees
===============================================
Transform your features into a higher dimensional, sparse space. Then
train a linear model on these features.
First fit an ensemble of trees (totally random trees, a random
forest, or gradient boosted trees) on the training set. Then each leaf
of each tree in the ensemble is assigned a fixed arbitrary feature
index in a new feature space. These leaf indices are then encoded in a
one-hot fashion.
Each sample goes through the decisions of each tree of the ensemble
and ends up in one leaf per tree. The sample is encoded by setting
feature values for these leaves to 1 and the other feature values to 0.
The resulting transformer has then learned a supervised, sparse,
high-dimensional categorical embedding of the data.
"""
# Author: Tim Head <[email protected]>
#
# License: BSD 3 clause
import numpy as np
np.random.seed(10)
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import (RandomTreesEmbedding, RandomForestClassifier,
GradientBoostingClassifier)
from sklearn.preprocessing import OneHotEncoder
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_curve
from sklearn.pipeline import make_pipeline
n_estimator = 10
X, y = make_classification(n_samples=80000)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5)
# It is important to train the ensemble of trees on a different subset
# of the training data than the linear regression model to avoid
# overfitting, in particular if the total number of leaves is
# similar to the number of training samples
X_train, X_train_lr, y_train, y_train_lr = train_test_split(X_train,
y_train,
test_size=0.5)
# Unsupervised transformation based on totally random trees
rt = RandomTreesEmbedding(max_depth=3, n_estimators=n_estimator,
random_state=0)
rt_lm = LogisticRegression()
pipeline = make_pipeline(rt, rt_lm)
pipeline.fit(X_train, y_train)
y_pred_rt = pipeline.predict_proba(X_test)[:, 1]
fpr_rt_lm, tpr_rt_lm, _ = roc_curve(y_test, y_pred_rt)
# Supervised transformation based on random forests
rf = RandomForestClassifier(max_depth=3, n_estimators=n_estimator)
rf_enc = OneHotEncoder()
rf_lm = LogisticRegression()
rf.fit(X_train, y_train)
rf_enc.fit(rf.apply(X_train))
rf_lm.fit(rf_enc.transform(rf.apply(X_train_lr)), y_train_lr)
y_pred_rf_lm = rf_lm.predict_proba(rf_enc.transform(rf.apply(X_test)))[:, 1]
fpr_rf_lm, tpr_rf_lm, _ = roc_curve(y_test, y_pred_rf_lm)
grd = GradientBoostingClassifier(n_estimators=n_estimator)
grd_enc = OneHotEncoder()
grd_lm = LogisticRegression()
grd.fit(X_train, y_train)
grd_enc.fit(grd.apply(X_train)[:, :, 0])
grd_lm.fit(grd_enc.transform(grd.apply(X_train_lr)[:, :, 0]), y_train_lr)
y_pred_grd_lm = grd_lm.predict_proba(
grd_enc.transform(grd.apply(X_test)[:, :, 0]))[:, 1]
fpr_grd_lm, tpr_grd_lm, _ = roc_curve(y_test, y_pred_grd_lm)
# The gradient boosted model by itself
y_pred_grd = grd.predict_proba(X_test)[:, 1]
fpr_grd, tpr_grd, _ = roc_curve(y_test, y_pred_grd)
# The random forest model by itself
y_pred_rf = rf.predict_proba(X_test)[:, 1]
fpr_rf, tpr_rf, _ = roc_curve(y_test, y_pred_rf)
plt.figure(1)
plt.plot([0, 1], [0, 1], 'k--')
plt.plot(fpr_rt_lm, tpr_rt_lm, label='RT + LR')
plt.plot(fpr_rf, tpr_rf, label='RF')
plt.plot(fpr_rf_lm, tpr_rf_lm, label='RF + LR')
plt.plot(fpr_grd, tpr_grd, label='GBT')
plt.plot(fpr_grd_lm, tpr_grd_lm, label='GBT + LR')
plt.xlabel('False positive rate')
plt.ylabel('True positive rate')
plt.title('ROC curve')
plt.legend(loc='best')
plt.show()
plt.figure(2)
plt.xlim(0, 0.2)
plt.ylim(0.8, 1)
plt.plot([0, 1], [0, 1], 'k--')
plt.plot(fpr_rt_lm, tpr_rt_lm, label='RT + LR')
plt.plot(fpr_rf, tpr_rf, label='RF')
plt.plot(fpr_rf_lm, tpr_rf_lm, label='RF + LR')
plt.plot(fpr_grd, tpr_grd, label='GBT')
plt.plot(fpr_grd_lm, tpr_grd_lm, label='GBT + LR')
plt.xlabel('False positive rate')
plt.ylabel('True positive rate')
plt.title('ROC curve (zoomed in at top left)')
plt.legend(loc='best')
plt.show()
|
bsd-3-clause
|
aewhatley/scikit-learn
|
examples/text/document_classification_20newsgroups.py
|
222
|
10500
|
"""
======================================================
Classification of text documents using sparse features
======================================================
This is an example showing how scikit-learn can be used to classify documents
by topics using a bag-of-words approach. This example uses a scipy.sparse
matrix to store the features and demonstrates various classifiers that can
efficiently handle sparse matrices.
The dataset used in this example is the 20 newsgroups dataset. It will be
automatically downloaded, then cached.
The bar plot indicates the accuracy, training time (normalized) and test time
(normalized) of each classifier.
"""
# Author: Peter Prettenhofer <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Lars Buitinck <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
import logging
import numpy as np
from optparse import OptionParser
import sys
from time import time
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.linear_model import RidgeClassifier
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import Perceptron
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.naive_bayes import BernoulliNB, MultinomialNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neighbors import NearestCentroid
from sklearn.ensemble import RandomForestClassifier
from sklearn.utils.extmath import density
from sklearn import metrics
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
# parse commandline arguments
op = OptionParser()
op.add_option("--report",
action="store_true", dest="print_report",
help="Print a detailed classification report.")
op.add_option("--chi2_select",
action="store", type="int", dest="select_chi2",
help="Select some number of features using a chi-squared test")
op.add_option("--confusion_matrix",
action="store_true", dest="print_cm",
help="Print the confusion matrix.")
op.add_option("--top10",
action="store_true", dest="print_top10",
help="Print ten most discriminative terms per class"
" for every classifier.")
op.add_option("--all_categories",
action="store_true", dest="all_categories",
help="Whether to use all categories or not.")
op.add_option("--use_hashing",
action="store_true",
help="Use a hashing vectorizer.")
op.add_option("--n_features",
action="store", type=int, default=2 ** 16,
help="n_features when using the hashing vectorizer.")
op.add_option("--filtered",
action="store_true",
help="Remove newsgroup information that is easily overfit: "
"headers, signatures, and quoting.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
print(__doc__)
op.print_help()
print()
###############################################################################
# Load some categories from the training set
if opts.all_categories:
categories = None
else:
categories = [
'alt.atheism',
'talk.religion.misc',
'comp.graphics',
'sci.space',
]
if opts.filtered:
remove = ('headers', 'footers', 'quotes')
else:
remove = ()
print("Loading 20 newsgroups dataset for categories:")
print(categories if categories else "all")
data_train = fetch_20newsgroups(subset='train', categories=categories,
shuffle=True, random_state=42,
remove=remove)
data_test = fetch_20newsgroups(subset='test', categories=categories,
shuffle=True, random_state=42,
remove=remove)
print('data loaded')
categories = data_train.target_names # for case categories == None
def size_mb(docs):
return sum(len(s.encode('utf-8')) for s in docs) / 1e6
data_train_size_mb = size_mb(data_train.data)
data_test_size_mb = size_mb(data_test.data)
print("%d documents - %0.3fMB (training set)" % (
len(data_train.data), data_train_size_mb))
print("%d documents - %0.3fMB (test set)" % (
len(data_test.data), data_test_size_mb))
print("%d categories" % len(categories))
print()
# split a training set and a test set
y_train, y_test = data_train.target, data_test.target
print("Extracting features from the training data using a sparse vectorizer")
t0 = time()
if opts.use_hashing:
vectorizer = HashingVectorizer(stop_words='english', non_negative=True,
n_features=opts.n_features)
X_train = vectorizer.transform(data_train.data)
else:
vectorizer = TfidfVectorizer(sublinear_tf=True, max_df=0.5,
stop_words='english')
X_train = vectorizer.fit_transform(data_train.data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_train_size_mb / duration))
print("n_samples: %d, n_features: %d" % X_train.shape)
print()
print("Extracting features from the test data using the same vectorizer")
t0 = time()
X_test = vectorizer.transform(data_test.data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_test_size_mb / duration))
print("n_samples: %d, n_features: %d" % X_test.shape)
print()
# mapping from integer feature name to original token string
if opts.use_hashing:
feature_names = None
else:
feature_names = vectorizer.get_feature_names()
if opts.select_chi2:
print("Extracting %d best features by a chi-squared test" %
opts.select_chi2)
t0 = time()
ch2 = SelectKBest(chi2, k=opts.select_chi2)
X_train = ch2.fit_transform(X_train, y_train)
X_test = ch2.transform(X_test)
if feature_names:
# keep selected feature names
feature_names = [feature_names[i] for i
in ch2.get_support(indices=True)]
print("done in %fs" % (time() - t0))
print()
if feature_names:
feature_names = np.asarray(feature_names)
def trim(s):
"""Trim string to fit on terminal (assuming 80-column display)"""
return s if len(s) <= 80 else s[:77] + "..."
###############################################################################
# Benchmark classifiers
def benchmark(clf):
print('_' * 80)
print("Training: ")
print(clf)
t0 = time()
clf.fit(X_train, y_train)
train_time = time() - t0
print("train time: %0.3fs" % train_time)
t0 = time()
pred = clf.predict(X_test)
test_time = time() - t0
print("test time: %0.3fs" % test_time)
score = metrics.accuracy_score(y_test, pred)
print("accuracy: %0.3f" % score)
if hasattr(clf, 'coef_'):
print("dimensionality: %d" % clf.coef_.shape[1])
print("density: %f" % density(clf.coef_))
if opts.print_top10 and feature_names is not None:
print("top 10 keywords per class:")
for i, category in enumerate(categories):
top10 = np.argsort(clf.coef_[i])[-10:]
print(trim("%s: %s"
% (category, " ".join(feature_names[top10]))))
print()
if opts.print_report:
print("classification report:")
print(metrics.classification_report(y_test, pred,
target_names=categories))
if opts.print_cm:
print("confusion matrix:")
print(metrics.confusion_matrix(y_test, pred))
print()
clf_descr = str(clf).split('(')[0]
return clf_descr, score, train_time, test_time
results = []
for clf, name in (
(RidgeClassifier(tol=1e-2, solver="lsqr"), "Ridge Classifier"),
(Perceptron(n_iter=50), "Perceptron"),
(PassiveAggressiveClassifier(n_iter=50), "Passive-Aggressive"),
(KNeighborsClassifier(n_neighbors=10), "kNN"),
(RandomForestClassifier(n_estimators=100), "Random forest")):
print('=' * 80)
print(name)
results.append(benchmark(clf))
for penalty in ["l2", "l1"]:
print('=' * 80)
print("%s penalty" % penalty.upper())
# Train Liblinear model
results.append(benchmark(LinearSVC(loss='l2', penalty=penalty,
dual=False, tol=1e-3)))
# Train SGD model
results.append(benchmark(SGDClassifier(alpha=.0001, n_iter=50,
penalty=penalty)))
# Train SGD with Elastic Net penalty
print('=' * 80)
print("Elastic-Net penalty")
results.append(benchmark(SGDClassifier(alpha=.0001, n_iter=50,
penalty="elasticnet")))
# Train NearestCentroid without threshold
print('=' * 80)
print("NearestCentroid (aka Rocchio classifier)")
results.append(benchmark(NearestCentroid()))
# Train sparse Naive Bayes classifiers
print('=' * 80)
print("Naive Bayes")
results.append(benchmark(MultinomialNB(alpha=.01)))
results.append(benchmark(BernoulliNB(alpha=.01)))
print('=' * 80)
print("LinearSVC with L1-based feature selection")
# The smaller C, the stronger the regularization.
# The more regularization, the more sparsity.
results.append(benchmark(Pipeline([
('feature_selection', LinearSVC(penalty="l1", dual=False, tol=1e-3)),
('classification', LinearSVC())
])))
# make some plots
indices = np.arange(len(results))
results = [[x[i] for x in results] for i in range(4)]
clf_names, score, training_time, test_time = results
training_time = np.array(training_time) / np.max(training_time)
test_time = np.array(test_time) / np.max(test_time)
plt.figure(figsize=(12, 8))
plt.title("Score")
plt.barh(indices, score, .2, label="score", color='r')
plt.barh(indices + .3, training_time, .2, label="training time", color='g')
plt.barh(indices + .6, test_time, .2, label="test time", color='b')
plt.yticks(())
plt.legend(loc='best')
plt.subplots_adjust(left=.25)
plt.subplots_adjust(top=.95)
plt.subplots_adjust(bottom=.05)
for i, c in zip(indices, clf_names):
plt.text(-.3, i, c)
plt.show()
|
bsd-3-clause
|
benjaminpope/whisky
|
pysco/kpi.py
|
2
|
22629
|
''' --------------------------------------------------------------------
PYSCO: PYthon Self Calibrating Observables
--------------------------------------------------------------------
---
pysco is a python module to create, and extract Kernel-phase data
structures, using the theory of Martinache, 2010, ApJ, 724, 464.
----
This file contains the definition of the kpi class:
--------------------------------------------------
an object that contains the linear model for the optical system
of interest. Properties of this model are:
--> name : name of the model (HST, Keck, Annulus_19, ...)
--> mask : array of coordinates for pupil sample points
--> uv : matching array of coordinates in uv plane (baselines)
--> RED : vector coding the redundancy of these baselines
--> TFM : transfer matrix, linking pupil-phase to uv-phase
--> KerPhi : array storing the kernel-phase relations
--> uvrel : matrix storing the relations between sampling points and uv-points
-------------------------------------------------------------------- '''
#!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import copy
import pickle
import os
import sys
import gzip
import time
from scipy.sparse.linalg import svds
from core import *
from scipy.io.idl import readsav
class kpi(object):
''' Fundamental kernel-phase relations
-----------------------------------------------------------------------
This object condenses all the knowledge about a given instrument pupil
geometry into a series of arrays useful for kernel-phase analysis as
well as for other purposes, such as wavefront sensing.
----------------------------------------------------------------------- '''
name = "" # default array name. Should be descriptive of the array geometry
# =========================================================================
# =========================================================================
def __init__(self, file=None, maskname=None,bsp_mat=None,verbose=False,Ns=3):
''' Default instantiation of a KerPhase_Relation object:
-------------------------------------------------------------------
Default instantiation of this KerPhase_Relation class is achieved
by loading a pre-made file, containing all the relevant information
-------------------------------------------------------------------'''
try:
# -------------------------------
# load the pickled data structure
# -------------------------------
myf = gzip.GzipFile(file, "r")
data = pickle.load(myf)
myf.close()
# -------------------------------
# restore the variables for this
# session of Ker-phase use!
# -------------------------------
#print "TEST:", data['toto']
try: self.name = data['name']
except: self.name = "UNKNOWN"
self.uv = data['uv']
self.mask = data['mask']
self.RED = data['RED']
self.KerPhi = data['KerPhi']
self.TFM = data['TFM']
try:
self.uv_to_bsp = data['uv_to_bsp']
self.bsp_s = data['bsp_s']
self.nbsp = self.uv_to_bsp.shape[0]
except:
print 'No bispec'
self.nbh = self.mask.shape[0]
self.nbuv = self.uv.shape[0]
self.nkphi = self.KerPhi.shape[0]
try : self.uvrel = data['uvrel']
except: self.uvrel = np.array([])
except:
print("File %s isn't a valid Ker-phase data structure" % (file))
# try:
if maskname == None:
print 'Creating from coordinate file'
self.from_coord_file(file,bsp_mat = bsp_mat,verbose=verbose,Ns=Ns)
else:
print 'Creating from mfdata file'
self.from_mf(file,maskname)
# except:
# print("Failed.")
# return None
# =========================================================================
# =========================================================================
def from_mf(self, file, maskname, array_name=""):
''' Creation of the KerPhase_Relation object from a matched filter file.
----------------------------------------------------------------
This duplicates the functionality of from_coord_file for masking data.
Input is a matched filter idlvar file.
---------------------------------------------------------------- '''
mfdata = readsav(file)
maskdata = readsav(maskname)
self.mask = maskdata['xy_coords']
self.nbh = mfdata.n_holes # number of sub-Ap
print 'nbuv = ', mfdata.n_baselines
self.nbuv = mfdata.n_baselines
ndgt = 6 # number of digits of precision for rounding
prec = 10**(-ndgt)
# ================================================
# Create a kpi representation of the closure phase
# operator
# ================================================
self.uv = np.zeros((self.nbuv,2))
self.uv[:,0] = mfdata.u
self.uv[:,1] = mfdata.v
print self.uv.shape
# 2. Calculate the transfer matrix and the redundancy vector
# --------------------------------------------------------------
self.RED = np.ones(self.nbuv, dtype=float) # Redundancy
self.nkphi = mfdata.n_bispect # number of Ker-phases
self.KerPhi = np.zeros((self.nkphi, self.nbuv)) # allocate the array
self.TFM = self.KerPhi # assuming a non-redundant array!
for k in range(0,self.nkphi):
yes = mfdata.bs2bl_ix[k,:]
self.KerPhi[k,yes] = 1
# =========================================================================
# =========================================================================
def from_coord_file(self, file, array_name="", Ns=3,verbose=False, bsp_mat='sparse'):
''' Creation of the KerPhase_Relation object from a pupil mask file:
----------------------------------------------------------------
This is the core function of this class, really...
Input is a pupil coordinates file, containing one set of (x,y)
coordinates per line. Coordinates are in meters. From this, all
the intermediate products that lead to the kernel-phase matrix
KerPhi are calculated.
Set Ns < 2 for undersampled data [AL, 20.02.2014]
---------------------------------------------------------------- '''
self.mask = 1.0 * np.loadtxt(file) # sub-Ap. coordinate files
self.nbh = self.mask.shape[0] # number of sub-Ap
ndgt = 6 # number of digits of precision for rounding
prec = 10**(-ndgt)
# ================================================
# Determine all the baselines in the array.
# ================================================
# 1. Start by doing all the possible combinations of coordinates
# --------------------------------------------------------------
# in the array to calculate the baselines. The intent here, is
# to work with redundant arrays of course, so there will be plenty
# of duplicates.
nbh = self.nbh # local representation of the class variable
uvx = np.zeros(nbh * (nbh-1)) # prepare empty arrays to store
uvy = np.zeros(nbh * (nbh-1)) # the baselines
k = 0 # index for possible combinations (k = f(i,j))
uvi = np.zeros(nbh * (nbh-1), dtype=int) # arrays to store the possible
uvj = np.zeros(nbh * (nbh-1), dtype=int) # combinations k=f(i,j) !!
for i in range(nbh): # do all the possible combinations of
for j in range(nbh): # sub-apertures
if i != j:
uvx[k] = self.mask[i,0] - self.mask[j,0]
uvy[k] = self.mask[i,1] - self.mask[j,1]
# ---
uvi[k], uvj[k] = i, j
k+=1
a = np.unique(np.round(uvx, ndgt)) # distinct u-component of baselines
nbx = a.shape[0] # number of distinct u-components
uv_sel = np.zeros((0,2)) # array for "selected" baselines
for i in range(nbx): # identify distinct v-coords and fill uv_sel
b = np.where(np.abs(uvx - a[i]) <= prec)
c = np.unique(np.round(uvy[b], ndgt))
nby = np.shape(c)[0] # number of distinct v-compoments
app = np.ones(nby)*a[i]
uv_sel = np.append(uv_sel, np.array([app,c]).T, axis=0)
self.nbuv = np.shape(uv_sel)[0]/2 # actual number of distinct uv points
self.uv = uv_sel[:self.nbuv,:] # discard second half (symmetric)
print "%d distinct baselines were identified" % (self.nbuv,)
# 1.5. Special case for undersampled data
# ---------------------------------------
if (Ns < 2):
uv_sampl = self.uv.copy() # copy previously identified baselines
uvm = np.abs(self.uv).max() # max baseline length
keep = (np.abs(uv_sampl[:,0]) < (uvm*Ns/2.)) * \
(np.abs(uv_sampl[:,1]) < (uvm*Ns/2.))
self.uv = uv_sampl[keep]
self.nbuv = (self.uv.shape)[0]
print "%d baselines were kept (undersampled data)" % (self.nbuv,)
# 2. Calculate the transfer matrix and the redundancy vector
# [AL, 2014.05.22] keeping relations between uv points and sampling points
# --------------------------------------------------------------
self.TFM = np.zeros((self.nbuv, self.nbh), dtype=float) # matrix
self.RED = np.zeros(self.nbuv, dtype=float) # Redundancy
# relations matrix (-1 = not connected. NB: only positive baselines are saved)
self.uvrel=-np.ones((nbh,nbh),dtype='int')
for i in range(self.nbuv):
a=np.where((np.abs(self.uv[i,0]-uvx) <= prec) *
(np.abs(self.uv[i,1]-uvy) <= prec))
for k in range(len(a[0])) :
self.uvrel[uvi[a][k],uvj[a][k]]=i
#self.uvrel[uvj[a][k],uvi[a][k]]=i
self.TFM[i, uvi[a]] += 1.0
self.TFM[i, uvj[a]] -= 1.0
self.RED[i] = np.size(a)
# converting to relations matrix
# 3. Determine the kernel-phase relations
# ----------------------------------------
# One sub-aperture is taken as reference: the corresponding
# column of the transfer matrix is discarded. TFM is now a
# (nbuv) x (nbh - 1) array.
# The choice is up to the user... but the simplest is to
# discard the first column, that is, use the first aperture
# as a reference?
self.TFM = self.TFM[:,1:] # cf. explanation
self.TFM = np.dot(np.diag(1./self.RED), self.TFM) # experiment #[Al, 2014.05.12] Frantz's version
U, S, Vh = np.linalg.svd(self.TFM.T, full_matrices=1)
S1 = np.zeros(self.nbuv)
S1[0:nbh-1] = S
self.nkphi = np.size(np.where(abs(S1) < 1e-3)) # number of Ker-phases
KPhiCol = np.where(abs(S1) < 1e-3)[0]
self.KerPhi = np.zeros((self.nkphi, self.nbuv)) # allocate the array
for i in range(self.nkphi):
self.KerPhi[i,:] = (Vh)[KPhiCol[i],:]
if verbose:
print '-------------------------------'
print 'Singular values for this array:\n', np.round(S, ndgt)
print '\nRedundancy Vector:\n', self.RED
else:
print '%d Kernel Phases identified.' % self.nkphi
self.name = array_name
if bsp_mat is not None:
print 'Now calculating bispectrum'
self.generate_bispectrum_matrix2(bsp_mat = bsp_mat)
# =========================================================================
# =========================================================================
def plot_pupil_and_uv(self, xymax = 8.0):
''' Nice plot of the pupil sampling and matching uv plane.
--------------------------------------------------------------------
xymax just specifies the size of the region represented in the plot,
expressed in meters. Should typically be slightly larger than the
largest baseline in the array.
--------------------------------------------------------------------'''
plt.clf()
f0 = plt.subplot(121)
f0.plot(self.mask[:,0], self.mask[:,1], 'bo')
f0.axis([-xymax, xymax, -xymax, xymax], aspect='equal')
plt.title(self.name+' pupil')
f1 = plt.subplot(122)
f1.plot(self.uv[:,0], self.uv[:,1], 'bo') # plot baselines + symetric
f1.plot(-self.uv[:,0], -self.uv[:,1], 'ro') # for a "complete" feel
plt.title(self.name+' uv coverage')
f1.axis([-2*xymax, 2*xymax, -2*xymax, 2*xymax], aspect='equal')
# complete previous plot with redundancy of the baseline
# -------------------------------------------------------
dy = 0.1*abs(self.uv[0,1]-self.uv[1,1]) # to offset text in the plot.
for i in range(self.nbuv):
f1.text(self.uv[i,0]+dy, self.uv[i,1]+dy,
int(self.RED[i]), ha='center')
f0.axis('equal')
f1.axis('equal')
#plt.draw()
# =========================================================================
# =========================================================================
def save_to_file(self, file):
''' Export the KerPhase_Relation data structure into a pickle
----------------------------------------------------------------
To save on disk space, this procedure uses the gzip module.
While there is no requirement for a specific extension for the
file, I would recommend that one uses ".kpi.gz", so as to make
it obvious that the file is a gzipped kpi data structure.
---------------------------------------------------------------- '''
try:
try:
data = {'name' : self.name,
'mask' : self.mask,
'uv' : self.uv,
'TFM' : self.TFM,
'KerPhi' : self.KerPhi,
'RED' : self.RED,
'uvrel' : self.uvrel,
# 'uv_samp_rev': self.uv_samp_rev,
'uv_to_bsp': self.uv_to_bsp,
'bsp_s': self.bsp_s}
print 'KerPhase_Relation data structure was saved.'
except:
data = {'name' : self.name,
'mask' : self.mask,
'uv' : self.uv,
'TFM' : self.TFM,
'KerPhi' : self.KerPhi,
# 'uv_samp_rev': self.uv_samp_rev,
'RED' : self.RED,
'uvrel' : self.uvrel}
print 'KerPhase_Relation data structure was saved. No bispectrum!'
except:
print("KerPhase_Relation data structure is incomplete")
print("File %s wasn't saved!" % (file,))
return None
# -------------
try: myf = gzip.GzipFile(file, "wb")
except:
print("File %s cannot be created."+
" KerPhase_Relation data structure wasn't saved." % (file,))
return None
# -------------
pickle.dump(data, myf, -1)
myf.close()
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
def generate_bispectrum_matrix2(self,n=5,n_guess_bsp=1e6,verbose=False,bsp_mat='sparse'):
''' Calculates the matrix to convert from uv phases to bispectra.
This version iterates through the sampling points in a vectorized way.
It saves all of the triangles, then removes the duplicates every 'n'
iterations. Reduce the number to save on ram but make it much slower.
n_guess_bsp: guess the number of bispectra and pre-allocate the memory
to save millions of 'append' calls (which was the slowest part). It must
be large enough to contain all of the bispectra, or you will get an error.
'''
nbsp=self.nbuv*(self.nbuv-1)*(self.nbuv-2) / 6
uv_to_bsp = np.zeros((n_guess_bsp,self.nbuv),dtype=np.long)
bsp_u = np.zeros((n_guess_bsp,3)) # the u points of each bispectrum point
bsp_v = np.zeros((n_guess_bsp,3)) # the v points of each bispectrum point
already_done = np.zeros((n_guess_bsp),dtype=np.longlong) # to track the sets of uv points that have already been counted
bsp_ix=0
uvrel=self.uvrel+np.transpose(self.uvrel)+1
nbits=np.longlong(np.ceil(np.log(self.nbuv)/np.log(10)))
print 'Calculating bispectrum matrix. Will take a few minutes.'
# Loop over the first pupil sampling point
tstart=time.time()
for ix1 in range(self.nbh):
# Loop over the second pupil sampling point
for ix2 in range(ix1+1,self.nbh):
# Rather than a for loop, vectorize it!
ix3s=np.arange(ix2+1,self.nbh)
n_ix3s=ix3s.size
if (bsp_ix+n_ix3s) > n_guess_bsp:
raise IndexError('Number of calculated bispectra exceeds the initial guess for the matrix size!')
# Find the baseline indices
b1_ix=uvrel[ix1,ix2]
b2_ixs=uvrel[ix2,ix3s]
b3_ixs=uvrel[ix1,ix3s] # we actually want the negative of this baseline
b1_ixs=np.repeat(b1_ix,n_ix3s)
# What uv points are these?
uv1=self.uv[b1_ixs,:]
uv2=self.uv[b2_ixs,:]
uv3=self.uv[b3_ixs,:]
# Are they already in the array? (any permutation of these baselines is the same)
# Convert to a single number to find out.
bl_ixs=np.array([b1_ixs,b2_ixs,b3_ixs])
bl_ixs=np.sort(bl_ixs,axis=0)
these_triplet_nums=(10**(2*nbits))*bl_ixs[2,:]+ (10**nbits)*bl_ixs[1,:]+bl_ixs[0,:]
# Just add them all and remove the duplicates later.
already_done[bsp_ix:bsp_ix+n_ix3s]=these_triplet_nums
# add to all the arrays
uv_to_bsp_line=np.zeros((n_ix3s,self.nbuv))
diag=np.arange(n_ix3s)
uv_to_bsp_line[diag,b1_ixs]+=1
uv_to_bsp_line[diag,b2_ixs]+=1
uv_to_bsp_line[diag,b3_ixs]+=-1
uv_to_bsp[bsp_ix:bsp_ix+n_ix3s,:]=uv_to_bsp_line
bsp_u[bsp_ix:bsp_ix+n_ix3s,:]=np.transpose(np.array([uv1[:,0],uv2[:,0],uv3[:,0]]))
bsp_v[bsp_ix:bsp_ix+n_ix3s,:]=np.transpose(np.array([uv1[:,1],uv2[:,1],uv3[:,1]]))
bsp_ix+=n_ix3s
# remove the duplicates every n loops
if (ix1 % n) == ((self.nbh-1) % n):
# the (nbh-1 mod n) ensures we do this on the last iteration as well
dummy,unique_ix=np.unique(already_done[0:bsp_ix+n_ix3s],return_index=True)
bsp_ix=len(unique_ix)
already_done[0:bsp_ix]=already_done[unique_ix]
already_done[bsp_ix:]=0
uv_to_bsp[0:bsp_ix]=uv_to_bsp[unique_ix]
bsp_u[0:bsp_ix]=bsp_u[unique_ix]
bsp_v[0:bsp_ix]=bsp_v[unique_ix]
# Only print the status every 5*n iterations
if (ix1 % (5*n)) == ((self.nbh-1) % n):
print 'Done',ix1,'of',self.nbh,'. ',bsp_ix,' bispectra found. Time taken:',np.round(time.time()-tstart,decimals=1),'sec'
print 'Done. Total time taken:',np.round((time.time()-tstart)/60.,decimals=1),'mins'
# Remove the excess parts of each array and attach them to the kpi.
nbsp=bsp_ix
self.already_done=already_done
self.nbsp=bsp_ix
self.uv_to_bsp=uv_to_bsp[0:bsp_ix]
self.bsp_u=bsp_u[0:bsp_ix]
self.bsp_v=bsp_v[0:bsp_ix]
print 'Found',nbsp,'bispectra'
t_start2 = time.time()
tol = 1e-5
try:
if bsp_mat == 'sparse':
print 'Doing sparse svd'
rank = np.linalg.matrix_rank(uv_to_bsp.astype('double'), tol = tol)
print 'Matrix rank:',rank
u, s, vt = svds(uv_to_bsp.astype('double').T, k=rank)
elif bsp_mat == 'full':
print 'Attempting full svd'
u, s, vt = np.linalg.svd(uv_to_bsp.astype('double').T,full_matrices=False)
rank = np.sum(s>tol)
sys.stdout.flush()
self.uv_to_bsp_raw = np.copy(uv_to_bsp)
self.uv_to_bsp = u.T
self.nbsp = rank
self.bsp_s = s
print 'Reduced-rank bispectrum matrix calculated.'
print 'Matrix shape',self.uv_to_bsp.shape
print 'Time taken:',np.round((time.time()-t_start2)/60.,decimals=1),'mins'
if verbose:
print np.log(s)
return s
except:
print 'SVD failed. Using raw matrix.'
self.uv_to_bsp = uv_to_bsp
self.nbsp = nbsp
sys.stdout.flush()
|
gpl-3.0
|
Adai0808/scikit-learn
|
doc/sphinxext/numpy_ext/docscrape_sphinx.py
|
408
|
8061
|
import re
import inspect
import textwrap
import pydoc
from .docscrape import NumpyDocString
from .docscrape import FunctionDoc
from .docscrape import ClassDoc
class SphinxDocString(NumpyDocString):
def __init__(self, docstring, config=None):
config = {} if config is None else config
self.use_plots = config.get('use_plots', False)
NumpyDocString.__init__(self, docstring, config=config)
# string conversion routines
def _str_header(self, name, symbol='`'):
return ['.. rubric:: ' + name, '']
def _str_field_list(self, name):
return [':' + name + ':']
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' ' * indent + line]
return out
def _str_signature(self):
return ['']
if self['Signature']:
return ['``%s``' % self['Signature']] + ['']
else:
return ['']
def _str_summary(self):
return self['Summary'] + ['']
def _str_extended_summary(self):
return self['Extended Summary'] + ['']
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_field_list(name)
out += ['']
for param, param_type, desc in self[name]:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
out += ['']
out += self._str_indent(desc, 8)
out += ['']
return out
@property
def _obj(self):
if hasattr(self, '_cls'):
return self._cls
elif hasattr(self, '_f'):
return self._f
return None
def _str_member_list(self, name):
"""
Generate a member listing, autosummary:: table where possible,
and a table where not.
"""
out = []
if self[name]:
out += ['.. rubric:: %s' % name, '']
prefix = getattr(self, '_name', '')
if prefix:
prefix = '~%s.' % prefix
autosum = []
others = []
for param, param_type, desc in self[name]:
param = param.strip()
if not self._obj or hasattr(self._obj, param):
autosum += [" %s%s" % (prefix, param)]
else:
others.append((param, param_type, desc))
if autosum:
# GAEL: Toctree commented out below because it creates
# hundreds of sphinx warnings
# out += ['.. autosummary::', ' :toctree:', '']
out += ['.. autosummary::', '']
out += autosum
if others:
maxlen_0 = max([len(x[0]) for x in others])
maxlen_1 = max([len(x[1]) for x in others])
hdr = "=" * maxlen_0 + " " + "=" * maxlen_1 + " " + "=" * 10
fmt = '%%%ds %%%ds ' % (maxlen_0, maxlen_1)
n_indent = maxlen_0 + maxlen_1 + 4
out += [hdr]
for param, param_type, desc in others:
out += [fmt % (param.strip(), param_type)]
out += self._str_indent(desc, n_indent)
out += [hdr]
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += ['']
content = textwrap.dedent("\n".join(self[name])).split("\n")
out += content
out += ['']
return out
def _str_see_also(self, func_role):
out = []
if self['See Also']:
see_also = super(SphinxDocString, self)._str_see_also(func_role)
out = ['.. seealso::', '']
out += self._str_indent(see_also[2:])
return out
def _str_warnings(self):
out = []
if self['Warnings']:
out = ['.. warning::', '']
out += self._str_indent(self['Warnings'])
return out
def _str_index(self):
idx = self['index']
out = []
if len(idx) == 0:
return out
out += ['.. index:: %s' % idx.get('default', '')]
for section, references in idx.iteritems():
if section == 'default':
continue
elif section == 'refguide':
out += [' single: %s' % (', '.join(references))]
else:
out += [' %s: %s' % (section, ','.join(references))]
return out
def _str_references(self):
out = []
if self['References']:
out += self._str_header('References')
if isinstance(self['References'], str):
self['References'] = [self['References']]
out.extend(self['References'])
out += ['']
# Latex collects all references to a separate bibliography,
# so we need to insert links to it
import sphinx # local import to avoid test dependency
if sphinx.__version__ >= "0.6":
out += ['.. only:: latex', '']
else:
out += ['.. latexonly::', '']
items = []
for line in self['References']:
m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I)
if m:
items.append(m.group(1))
out += [' ' + ", ".join(["[%s]_" % item for item in items]), '']
return out
def _str_examples(self):
examples_str = "\n".join(self['Examples'])
if (self.use_plots and 'import matplotlib' in examples_str
and 'plot::' not in examples_str):
out = []
out += self._str_header('Examples')
out += ['.. plot::', '']
out += self._str_indent(self['Examples'])
out += ['']
return out
else:
return self._str_section('Examples')
def __str__(self, indent=0, func_role="obj"):
out = []
out += self._str_signature()
out += self._str_index() + ['']
out += self._str_summary()
out += self._str_extended_summary()
for param_list in ('Parameters', 'Returns', 'Raises', 'Attributes'):
out += self._str_param_list(param_list)
out += self._str_warnings()
out += self._str_see_also(func_role)
out += self._str_section('Notes')
out += self._str_references()
out += self._str_examples()
for param_list in ('Methods',):
out += self._str_member_list(param_list)
out = self._str_indent(out, indent)
return '\n'.join(out)
class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
def __init__(self, obj, doc=None, config={}):
self.use_plots = config.get('use_plots', False)
FunctionDoc.__init__(self, obj, doc=doc, config=config)
class SphinxClassDoc(SphinxDocString, ClassDoc):
def __init__(self, obj, doc=None, func_doc=None, config={}):
self.use_plots = config.get('use_plots', False)
ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config)
class SphinxObjDoc(SphinxDocString):
def __init__(self, obj, doc=None, config=None):
self._f = obj
SphinxDocString.__init__(self, doc, config=config)
def get_doc_object(obj, what=None, doc=None, config={}):
if what is None:
if inspect.isclass(obj):
what = 'class'
elif inspect.ismodule(obj):
what = 'module'
elif callable(obj):
what = 'function'
else:
what = 'object'
if what == 'class':
return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc,
config=config)
elif what in ('function', 'method'):
return SphinxFunctionDoc(obj, doc=doc, config=config)
else:
if doc is None:
doc = pydoc.getdoc(obj)
return SphinxObjDoc(obj, doc, config=config)
|
bsd-3-clause
|
python-control/python-control
|
examples/pvtol-nested.py
|
2
|
4551
|
# pvtol-nested.py - inner/outer design for vectored thrust aircraft
# RMM, 5 Sep 09
#
# This file works through a fairly complicated control design and
# analysis, corresponding to the planar vertical takeoff and landing
# (PVTOL) aircraft in Astrom and Murray, Chapter 11. It is intended
# to demonstrate the basic functionality of the python-control
# package.
#
from __future__ import print_function
import os
import matplotlib.pyplot as plt # MATLAB plotting functions
from control.matlab import * # MATLAB-like functions
import numpy as np
# System parameters
m = 4 # mass of aircraft
J = 0.0475 # inertia around pitch axis
r = 0.25 # distance to center of force
g = 9.8 # gravitational constant
c = 0.05 # damping factor (estimated)
# Transfer functions for dynamics
Pi = tf([r], [J, 0, 0]) # inner loop (roll)
Po = tf([1], [m, c, 0]) # outer loop (position)
#
# Inner loop control design
#
# This is the controller for the pitch dynamics. Goal is to have
# fast response for the pitch dynamics so that we can use this as a
# control for the lateral dynamics
#
# Design a simple lead controller for the system
k, a, b = 200, 2, 50
Ci = k*tf([1, a], [1, b]) # lead compensator
Li = Pi*Ci
# Bode plot for the open loop process
plt.figure(1)
bode(Pi)
# Bode plot for the loop transfer function, with margins
plt.figure(2)
bode(Li)
# Compute out the gain and phase margins
#! Not implemented
# gm, pm, wcg, wcp = margin(Li)
# Compute the sensitivity and complementary sensitivity functions
Si = feedback(1, Li)
Ti = Li*Si
# Check to make sure that the specification is met
plt.figure(3)
gangof4(Pi, Ci)
# Compute out the actual transfer function from u1 to v1 (see L8.2 notes)
# Hi = Ci*(1-m*g*Pi)/(1+Ci*Pi)
Hi = parallel(feedback(Ci, Pi), -m*g*feedback(Ci*Pi, 1))
plt.figure(4)
plt.clf()
plt.subplot(221)
bode(Hi)
# Now design the lateral control system
a, b, K = 0.02, 5, 2
Co = -K*tf([1, 0.3], [1, 10]) # another lead compensator
Lo = -m*g*Po*Co
plt.figure(5)
bode(Lo) # margin(Lo)
# Finally compute the real outer-loop loop gain + responses
L = Co*Hi*Po
S = feedback(1, L)
T = feedback(L, 1)
# Compute stability margins
gm, pm, wgc, wpc = margin(L)
print("Gain margin: %g at %g" % (gm, wgc))
print("Phase margin: %g at %g" % (pm, wpc))
plt.figure(6)
plt.clf()
bode(L, np.logspace(-4, 3))
# Add crossover line to the magnitude plot
#
# Note: in matplotlib before v2.1, the following code worked:
#
# plt.subplot(211); hold(True);
# loglog([1e-4, 1e3], [1, 1], 'k-')
#
# In later versions of matplotlib the call to plt.subplot will clear the
# axes and so we have to extract the axes that we want to use by hand.
# In addition, hold() is deprecated so we no longer require it.
#
for ax in plt.gcf().axes:
if ax.get_label() == 'control-bode-magnitude':
break
ax.semilogx([1e-4, 1e3], 20*np.log10([1, 1]), 'k-')
#
# Replot phase starting at -90 degrees
#
# Get the phase plot axes
for ax in plt.gcf().axes:
if ax.get_label() == 'control-bode-phase':
break
# Recreate the frequency response and shift the phase
mag, phase, w = freqresp(L, np.logspace(-4, 3))
phase = phase - 360
# Replot the phase by hand
ax.semilogx([1e-4, 1e3], [-180, -180], 'k-')
ax.semilogx(w, np.squeeze(phase), 'b-')
ax.axis([1e-4, 1e3, -360, 0])
plt.xlabel('Frequency [deg]')
plt.ylabel('Phase [deg]')
# plt.set(gca, 'YTick', [-360, -270, -180, -90, 0])
# plt.set(gca, 'XTick', [10^-4, 10^-2, 1, 100])
#
# Nyquist plot for complete design
#
plt.figure(7)
plt.clf()
nyquist(L, (0.0001, 1000))
# Add a box in the region we are going to expand
plt.plot([-2, -2, 1, 1, -2], [-4, 4, 4, -4, -4], 'r-')
# Expanded region
plt.figure(8)
plt.clf()
nyquist(L)
plt.axis([-2, 1, -4, 4])
# set up the color
color = 'b'
# Add arrows to the plot
# H1 = L.evalfr(0.4); H2 = L.evalfr(0.41);
# arrow([real(H1), imag(H1)], [real(H2), imag(H2)], AM_normal_arrowsize, \
# 'EdgeColor', color, 'FaceColor', color);
# H1 = freqresp(L, 0.35); H2 = freqresp(L, 0.36);
# arrow([real(H2), -imag(H2)], [real(H1), -imag(H1)], AM_normal_arrowsize, \
# 'EdgeColor', color, 'FaceColor', color);
plt.figure(9)
Yvec, Tvec = step(T, np.linspace(0, 20))
plt.plot(Tvec.T, Yvec.T)
Yvec, Tvec = step(Co*S, np.linspace(0, 20))
plt.plot(Tvec.T, Yvec.T)
plt.figure(10)
plt.clf()
P, Z = pzmap(T, plot=True, grid=True)
print("Closed loop poles and zeros: ", P, Z)
# Gang of Four
plt.figure(11)
plt.clf()
gangof4(Hi*Po, Co)
if 'PYCONTROL_TEST_EXAMPLES' not in os.environ:
plt.show()
|
bsd-3-clause
|
hoenirvili/distributions
|
distributions/geometric.py
|
1
|
2749
|
#!/usr/bin/env python3
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import geom
from .distribution import Distribution
__all__ = ['Geometric']
class Geometric(Distribution):
"""
The geometric distribution is the distribution
of the number of trials needed to get the first
sucess in repeated Bernoulli trials.
Parameters
----------
r : int
trials needed to get the first success
p : int or float
Probability of a trial to be successful
"""
def __init__(self, r, p):
if type(r) != int or r < 0 or r is None:
raise ValueError(
"Invalid number of trials needed to get the first success")
if (type(p) != int and type(p) != float or
p > 1 or p < 0 or p is None):
raise ValueError("Invalid probability number")
self.__p = p
self.__r = r
self.__not_p = (1 - p)
self.__all_r = np.arange(0, r + 1)
def mean(self):
"""
Compute the mean of the distribution
Returns:
--------
mean : float
"""
return geom.mean(self.__p)
def variance(self):
"""
Compute the variance of the distribution
Returns:
--------
variance : float
"""
return geom.var(self.__p)
def pmf(self):
"""
Compute the probability mass function of the distribution
Returns:
--------
pmf : float
"""
return geom.pmf(self.__r, self.__p)
def std(self):
"""
Compute the standard deviation of the distribution.
Returns:
--------
std : float
"""
return geom.std(self.__p)
def cdf(self):
"""
Compute the cumulative distribution function.
Returns:
--------
cdf : float
"""
return geom.cdf(self.__r, self.__p)
def pmfs(self):
"""
Compute the probability mass function of the distribution of all trials
needed to get the first success
Returns:
--------
pmf : numpy.narray
"""
return geom.pmf(self.__all_r, self.__p)
def plot(self):
"""
Plot all values pmf values ranging from zero to the
number of traisl in order to find the first sucess
"""
pmfs = self.pmfs()
fix, ax = plt.subplots()
x = np.arange(0, self.__r + 1)
plt.bar(x, pmfs, color="blue")
ax.set_xticks(np.arange(1, len(x)))
ax.set_title('Geometric distribution')
ax.set_xlabel('Trials needed to get the first success')
ax.set_ylabel('Probability')
plt.show()
|
mit
|
joescape/DeembedProject
|
DeembeddingTool.py
|
1
|
2376
|
# importing wx files
import wx
import skrf as rf
import matplotlib.pylab as plt
import numpy as np
import DeembedGUI
import DeembedFiles
class DeembedWiz(DeembedGUI.DeembedWiz):
# constructor
def __init__(self,parent):
# type: (object) -> object
# initialize parent class
DeembedGUI.DeembedWiz.__init__(self,parent)
def deembedChoiceChange(self,event):
self.deembedchoice = self.m_deembedChoice.GetSelection()
# print deembedchoice
def tempChoiceChange(self,event):
self.tempchoice = self.m_tempChoice.GetSelection()
if self.tempchoice:
self.m_DeembedFilePicker2.Disable()
self.m_DeembedFilePicker3.Disable()
self.m_DeembedFilePicker2.SetPath('')
self.m_DeembedFilePicker3.SetPath('')
else:
self.m_DeembedFilePicker2.Enable()
self.m_DeembedFilePicker3.Enable()
# print tempchoice
def Close(self,event):
event.Skip()
def RawDirChange(self, event):
self.rawdir = self.m_RawDirPicker.GetPath()
# print rawdir
def DeembedDirChange(self, event):
self.deembeddir = self.m_DeembedDirPicker.GetPath()
# print deembeddir
def DeembedFile1Change(self, event):
self.deembedfilename1 = self.m_DeembedFilePicker1.GetPath()
print self.deembedfilename1
def DeembedFile2Change(self, event):
self.deembedfilename2 = self.m_DeembedFilePicker2.GetPath()
# print self.deembedfilename2
def DeembedFile3Change(self, event):
self.deembedfilename3 = self.m_DeembedFilePicker3.GetPath()
# print self.deembedfilename3
def startDeembed(self, event):
DeembedFiles.deembed(self.deembedchoice,self.tempchoice,self.rawdir,self.deembeddir,self.deembedfilename1,
self.deembedfilename2,self.deembedfilename3)
# mandatory in wx, create an app, False stands for not deteriction stdin/stdout
# refer manual for details
app = wx.App(False)
# create an object of DeembedWiz
mywiz = DeembedWiz(None)
# configure properties of wizard
mywiz.FitToPage(mywiz.m_pages[1])
mywiz.RunWizard(mywiz.m_pages[0])
mywiz.Destroy()
# start the applications
app.MainLoop()
|
gpl-3.0
|
harme199497/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers
|
Chapter2_MorePyMC/separation_plot.py
|
86
|
1494
|
# separation plot
# Author: Cameron Davidson-Pilon,2013
# see http://mdwardlab.com/sites/default/files/GreenhillWardSacks.pdf
import matplotlib.pyplot as plt
import numpy as np
def separation_plot( p, y, **kwargs ):
"""
This function creates a separation plot for logistic and probit classification.
See http://mdwardlab.com/sites/default/files/GreenhillWardSacks.pdf
p: The proportions/probabilities, can be a nxM matrix which represents M models.
y: the 0-1 response variables.
"""
assert p.shape[0] == y.shape[0], "p.shape[0] != y.shape[0]"
n = p.shape[0]
try:
M = p.shape[1]
except:
p = p.reshape( n, 1 )
M = p.shape[1]
#colors = np.array( ["#fdf2db", "#e44a32"] )
colors_bmh = np.array( ["#eeeeee", "#348ABD"] )
fig = plt.figure( )#figsize = (8, 1.3*M) )
for i in range(M):
ax = fig.add_subplot(M, 1, i+1)
ix = np.argsort( p[:,i] )
#plot the different bars
bars = ax.bar( np.arange(n), np.ones(n), width=1.,
color = colors_bmh[ y[ix].astype(int) ],
edgecolor = 'none')
ax.plot( np.arange(n+1), np.append(p[ix,i], p[ix,i][-1]), "k",
linewidth = 1.,drawstyle="steps-post" )
#create expected value bar.
ax.vlines( [(1-p[ix,i]).sum()], [0], [1] )
#ax.grid(False)
#ax.axis('off')
plt.xlim( 0, n)
plt.tight_layout()
return
|
mit
|
saimn/astropy
|
astropy/timeseries/sampled.py
|
3
|
16089
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from copy import deepcopy
import numpy as np
from astropy.table import groups, QTable, Table
from astropy.time import Time, TimeDelta
from astropy import units as u
from astropy.units import Quantity, UnitsError
from astropy.utils.decorators import deprecated_renamed_argument
from astropy.timeseries.core import BaseTimeSeries, autocheck_required_columns
__all__ = ['TimeSeries']
@autocheck_required_columns
class TimeSeries(BaseTimeSeries):
"""
A class to represent time series data in tabular form.
`~astropy.timeseries.TimeSeries` provides a class for representing time
series as a collection of values of different quantities measured at specific
points in time (for time series with finite time bins, see the
`~astropy.timeseries.BinnedTimeSeries` class).
`~astropy.timeseries.TimeSeries` is a sub-class of `~astropy.table.QTable`
and thus provides all the standard table maniplation methods available to
tables, but it also provides additional conveniences for dealing with time
series, such as a flexible initializer for setting up the times, a method
for folding time series, and a ``time`` attribute for easy access to the
time values.
See also: https://docs.astropy.org/en/stable/timeseries/
Parameters
----------
data : numpy ndarray, dict, list, `~astropy.table.Table`, or table-like object, optional
Data to initialize time series. This does not need to contain the times,
which can be provided separately, but if it does contain the times they
should be in a column called ``'time'`` to be automatically recognized.
time : `~astropy.time.Time`, `~astropy.time.TimeDelta` or iterable
The times at which the values are sampled - this can be either given
directly as a `~astropy.time.Time` or `~astropy.time.TimeDelta` array
or as any iterable that initializes the `~astropy.time.Time` class. If
this is given, then the remaining time-related arguments should not be used.
time_start : `~astropy.time.Time` or str
The time of the first sample in the time series. This is an alternative
to providing ``time`` and requires that ``time_delta`` is also provided.
time_delta : `~astropy.time.TimeDelta` or `~astropy.units.Quantity`
The step size in time for the series. This can either be a scalar if
the time series is evenly sampled, or an array of values if it is not.
n_samples : int
The number of time samples for the series. This is only used if both
``time_start`` and ``time_delta`` are provided and are scalar values.
**kwargs : dict, optional
Additional keyword arguments are passed to `~astropy.table.QTable`.
"""
_required_columns = ['time']
def __init__(self, data=None, *, time=None, time_start=None,
time_delta=None, n_samples=None, **kwargs):
super().__init__(data=data, **kwargs)
# For some operations, an empty time series needs to be created, then
# columns added one by one. We should check that when columns are added
# manually, time is added first and is of the right type.
if data is None and time is None and time_start is None and time_delta is None:
self._required_columns_relax = True
return
# First if time has been given in the table data, we should extract it
# and treat it as if it had been passed as a keyword argument.
if data is not None:
if n_samples is not None:
if n_samples != len(self):
raise TypeError("'n_samples' has been given both and it is not the "
"same length as the input data.")
else:
n_samples = len(self)
if 'time' in self.colnames:
if time is None:
time = self.columns['time']
else:
raise TypeError("'time' has been given both in the table and as a keyword argument")
if time is None and time_start is None:
raise TypeError("Either 'time' or 'time_start' should be specified")
elif time is not None and time_start is not None:
raise TypeError("Cannot specify both 'time' and 'time_start'")
if time is not None and not isinstance(time, (Time, TimeDelta)):
time = Time(time)
if time_start is not None and not isinstance(time_start, (Time, TimeDelta)):
time_start = Time(time_start)
if time_delta is not None and not isinstance(time_delta, (Quantity, TimeDelta)):
raise TypeError("'time_delta' should be a Quantity or a TimeDelta")
if isinstance(time_delta, TimeDelta):
time_delta = time_delta.sec * u.s
if time_start is not None:
# We interpret this as meaning that time is that of the first
# sample and that the interval is given by time_delta.
if time_delta is None:
raise TypeError("'time' is scalar, so 'time_delta' is required")
if time_delta.isscalar:
time_delta = np.repeat(time_delta, n_samples)
time_delta = np.cumsum(time_delta)
time_delta = np.roll(time_delta, 1)
time_delta[0] = 0. * u.s
time = time_start + time_delta
elif len(self.colnames) > 0 and len(time) != len(self):
raise ValueError("Length of 'time' ({}) should match "
"data length ({})".format(len(time), n_samples))
elif time_delta is not None:
raise TypeError("'time_delta' should not be specified since "
"'time' is an array")
with self._delay_required_column_checks():
if 'time' in self.colnames:
self.remove_column('time')
self.add_column(time, index=0, name='time')
@property
def time(self):
"""
The time values.
"""
return self['time']
@deprecated_renamed_argument('midpoint_epoch', 'epoch_time', '4.0')
def fold(self, period=None, epoch_time=None, epoch_phase=0,
wrap_phase=None, normalize_phase=False):
"""
Return a new `~astropy.timeseries.TimeSeries` folded with a period and
epoch.
Parameters
----------
period : `~astropy.units.Quantity`
The period to use for folding
epoch_time : `~astropy.time.Time`
The time to use as the reference epoch, at which the relative time
offset / phase will be ``epoch_phase``. Defaults to the first time
in the time series.
epoch_phase : float or `~astropy.units.Quantity`
Phase of ``epoch_time``. If ``normalize_phase`` is `True`, this
should be a dimensionless value, while if ``normalize_phase`` is
``False``, this should be a `~astropy.units.Quantity` with time
units. Defaults to 0.
wrap_phase : float or `~astropy.units.Quantity`
The value of the phase above which values are wrapped back by one
period. If ``normalize_phase`` is `True`, this should be a
dimensionless value, while if ``normalize_phase`` is ``False``,
this should be a `~astropy.units.Quantity` with time units.
Defaults to half the period, so that the resulting time series goes
from ``-period / 2`` to ``period / 2`` (if ``normalize_phase`` is
`False`) or -0.5 to 0.5 (if ``normalize_phase`` is `True`).
normalize_phase : bool
If `False` phase is returned as `~astropy.time.TimeDelta`,
otherwise as a dimensionless `~astropy.units.Quantity`.
Returns
-------
folded_timeseries : `~astropy.timeseries.TimeSeries`
The folded time series object with phase as the ``time`` column.
"""
if not isinstance(period, Quantity) or period.unit.physical_type != 'time':
raise UnitsError('period should be a Quantity in units of time')
folded = self.copy()
if epoch_time is None:
epoch_time = self.time[0]
else:
epoch_time = Time(epoch_time)
period_sec = period.to_value(u.s)
if normalize_phase:
if isinstance(epoch_phase, Quantity) and epoch_phase.unit.physical_type != 'dimensionless':
raise UnitsError('epoch_phase should be a dimensionless Quantity '
'or a float when normalize_phase=True')
epoch_phase_sec = epoch_phase * period_sec
else:
if epoch_phase == 0:
epoch_phase_sec = 0.
else:
if not isinstance(epoch_phase, Quantity) or epoch_phase.unit.physical_type != 'time':
raise UnitsError('epoch_phase should be a Quantity in units '
'of time when normalize_phase=False')
epoch_phase_sec = epoch_phase.to_value(u.s)
if wrap_phase is None:
wrap_phase = period_sec / 2
else:
if normalize_phase:
if isinstance(wrap_phase, Quantity) and not wrap_phase.unit.is_equivalent(u.one):
raise UnitsError('wrap_phase should be dimensionless when '
'normalize_phase=True')
else:
if wrap_phase < 0 or wrap_phase > 1:
raise ValueError('wrap_phase should be between 0 and 1')
else:
wrap_phase = wrap_phase * period_sec
else:
if isinstance(wrap_phase, Quantity) and wrap_phase.unit.physical_type == 'time':
if wrap_phase < 0 or wrap_phase > period:
raise ValueError('wrap_phase should be between 0 and the period')
else:
wrap_phase = wrap_phase.to_value(u.s)
else:
raise UnitsError('wrap_phase should be a Quantity in units '
'of time when normalize_phase=False')
relative_time_sec = (((self.time - epoch_time).sec
+ epoch_phase_sec
+ (period_sec - wrap_phase)) % period_sec
- (period_sec - wrap_phase))
folded_time = TimeDelta(relative_time_sec * u.s)
if normalize_phase:
folded_time = (folded_time / period).decompose()
period = period_sec = 1
with folded._delay_required_column_checks():
folded.remove_column('time')
folded.add_column(folded_time, name='time', index=0)
return folded
def __getitem__(self, item):
if self._is_list_or_tuple_of_str(item):
if 'time' not in item:
out = QTable([self[x] for x in item],
meta=deepcopy(self.meta),
copy_indices=self._copy_indices)
out._groups = groups.TableGroups(out, indices=self.groups._indices,
keys=self.groups._keys)
return out
return super().__getitem__(item)
def add_column(self, *args, **kwargs):
"""
See :meth:`~astropy.table.Table.add_column`.
"""
# Note that the docstring is inherited from QTable
result = super().add_column(*args, **kwargs)
if len(self.indices) == 0 and 'time' in self.colnames:
self.add_index('time')
return result
def add_columns(self, *args, **kwargs):
"""
See :meth:`~astropy.table.Table.add_columns`.
"""
# Note that the docstring is inherited from QTable
result = super().add_columns(*args, **kwargs)
if len(self.indices) == 0 and 'time' in self.colnames:
self.add_index('time')
return result
@classmethod
def from_pandas(self, df, time_scale='utc'):
"""
Convert a :class:`~pandas.DataFrame` to a
:class:`astropy.timeseries.TimeSeries`.
Parameters
----------
df : :class:`pandas.DataFrame`
A pandas :class:`pandas.DataFrame` instance.
time_scale : str
The time scale to pass into `astropy.time.Time`.
Defaults to ``UTC``.
"""
from pandas import DataFrame, DatetimeIndex
if not isinstance(df, DataFrame):
raise TypeError("Input should be a pandas DataFrame")
if not isinstance(df.index, DatetimeIndex):
raise TypeError("DataFrame does not have a DatetimeIndex")
time = Time(df.index, scale=time_scale)
table = Table.from_pandas(df)
return TimeSeries(time=time, data=table)
def to_pandas(self):
"""
Convert this :class:`~astropy.timeseries.TimeSeries` to a
:class:`~pandas.DataFrame` with a :class:`~pandas.DatetimeIndex` index.
Returns
-------
dataframe : :class:`pandas.DataFrame`
A pandas :class:`pandas.DataFrame` instance
"""
return Table(self).to_pandas(index='time')
@classmethod
def read(self, filename, time_column=None, time_format=None, time_scale=None, format=None, *args, **kwargs):
"""
Read and parse a file and returns a `astropy.timeseries.TimeSeries`.
This method uses the unified I/O infrastructure in Astropy which makes
it easy to define readers/writers for various classes
(https://docs.astropy.org/en/stable/io/unified.html). By default, this
method will try and use readers defined specifically for the
`astropy.timeseries.TimeSeries` class - however, it is also
possible to use the ``format`` keyword to specify formats defined for
the `astropy.table.Table` class - in this case, you will need to also
provide the column names for column containing the start times for the
bins, as well as other column names (see the Parameters section below
for details)::
>>> from astropy.timeseries import TimeSeries
>>> ts = TimeSeries.read('sampled.dat', format='ascii.ecsv',
... time_column='date') # doctest: +SKIP
Parameters
----------
filename : str
File to parse.
format : str
File format specifier.
time_column : str, optional
The name of the time column.
time_format : str, optional
The time format for the time column.
time_scale : str, optional
The time scale for the time column.
*args : tuple, optional
Positional arguments passed through to the data reader.
**kwargs : dict, optional
Keyword arguments passed through to the data reader.
Returns
-------
out : `astropy.timeseries.sampled.TimeSeries`
TimeSeries corresponding to file contents.
Notes
-----
"""
try:
# First we try the readers defined for the BinnedTimeSeries class
return super().read(filename, format=format, *args, **kwargs)
except TypeError:
# Otherwise we fall back to the default Table readers
if time_column is None:
raise ValueError("``time_column`` should be provided since the default Table readers are being used.")
table = Table.read(filename, format=format, *args, **kwargs)
if time_column in table.colnames:
time = Time(table.columns[time_column], scale=time_scale, format=time_format)
table.remove_column(time_column)
else:
raise ValueError(f"Time column '{time_column}' not found in the input data.")
return TimeSeries(time=time, data=table)
|
bsd-3-clause
|
drammock/mne-python
|
tutorials/inverse/20_dipole_fit.py
|
5
|
5301
|
# -*- coding: utf-8 -*-
"""
============================================================
Source localization with equivalent current dipole (ECD) fit
============================================================
This shows how to fit a dipole :footcite:`Sarvas1987` using mne-python.
For a comparison of fits between MNE-C and mne-python, see
`this gist <https://gist.github.com/larsoner/ca55f791200fe1dc3dd2>`__.
"""
from os import path as op
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.forward import make_forward_dipole
from mne.evoked import combine_evoked
from mne.label import find_pos_in_annot
from mne.simulation import simulate_evoked
from nilearn.plotting import plot_anat
from nilearn.datasets import load_mni152_template
data_path = mne.datasets.sample.data_path()
subjects_dir = op.join(data_path, 'subjects')
fname_ave = op.join(data_path, 'MEG', 'sample', 'sample_audvis-ave.fif')
fname_cov = op.join(data_path, 'MEG', 'sample', 'sample_audvis-cov.fif')
fname_bem = op.join(subjects_dir, 'sample', 'bem', 'sample-5120-bem-sol.fif')
fname_trans = op.join(data_path, 'MEG', 'sample',
'sample_audvis_raw-trans.fif')
fname_surf_lh = op.join(subjects_dir, 'sample', 'surf', 'lh.white')
###############################################################################
# Let's localize the N100m (using MEG only)
evoked = mne.read_evokeds(fname_ave, condition='Right Auditory',
baseline=(None, 0))
evoked.pick_types(meg=True, eeg=False)
evoked_full = evoked.copy()
evoked.crop(0.07, 0.08)
# Fit a dipole
dip = mne.fit_dipole(evoked, fname_cov, fname_bem, fname_trans)[0]
# Plot the result in 3D brain with the MRI image.
dip.plot_locations(fname_trans, 'sample', subjects_dir, mode='orthoview')
###############################################################################
# Plot the result in 3D brain with the MRI image using Nilearn
# In MRI coordinates and in MNI coordinates (template brain)
trans = mne.read_trans(fname_trans)
subject = 'sample'
mni_pos = mne.head_to_mni(dip.pos, mri_head_t=trans,
subject=subject, subjects_dir=subjects_dir)
mri_pos = mne.head_to_mri(dip.pos, mri_head_t=trans,
subject=subject, subjects_dir=subjects_dir)
# In the meantime let's find an anatomical label for the best fitted dipole
best_dip_id = dip.gof.argmax()
best_dip_mri_pos = mri_pos[best_dip_id]
label = find_pos_in_annot(best_dip_mri_pos, subject=subject,
subjects_dir=subjects_dir,
annot='aparc.a2009s+aseg')
# Draw dipole position on MRI scan and add anatomical label from parcellation
t1_fname = op.join(subjects_dir, subject, 'mri', 'T1.mgz')
fig_T1 = plot_anat(t1_fname, cut_coords=mri_pos[0],
title=f'Dipole location: {label}')
template = load_mni152_template()
fig_template = plot_anat(template, cut_coords=mni_pos[0],
title='Dipole loc. (MNI Space)')
###############################################################################
# Calculate and visualise magnetic field predicted by dipole with maximum GOF
# and compare to the measured data, highlighting the ipsilateral (right) source
fwd, stc = make_forward_dipole(dip, fname_bem, evoked.info, fname_trans)
pred_evoked = simulate_evoked(fwd, stc, evoked.info, cov=None, nave=np.inf)
# find time point with highest GOF to plot
best_idx = np.argmax(dip.gof)
best_time = dip.times[best_idx]
print('Highest GOF %0.1f%% at t=%0.1f ms with confidence volume %0.1f cm^3'
% (dip.gof[best_idx], best_time * 1000,
dip.conf['vol'][best_idx] * 100 ** 3))
# remember to create a subplot for the colorbar
fig, axes = plt.subplots(nrows=1, ncols=4, figsize=[10., 3.4],
gridspec_kw=dict(width_ratios=[1, 1, 1, 0.1],
top=0.85))
vmin, vmax = -400, 400 # make sure each plot has same colour range
# first plot the topography at the time of the best fitting (single) dipole
plot_params = dict(times=best_time, ch_type='mag', outlines='skirt',
colorbar=False, time_unit='s')
evoked.plot_topomap(time_format='Measured field', axes=axes[0], **plot_params)
# compare this to the predicted field
pred_evoked.plot_topomap(time_format='Predicted field', axes=axes[1],
**plot_params)
# Subtract predicted from measured data (apply equal weights)
diff = combine_evoked([evoked, pred_evoked], weights=[1, -1])
plot_params['colorbar'] = True
diff.plot_topomap(time_format='Difference', axes=axes[2:], **plot_params)
fig.suptitle('Comparison of measured and predicted fields '
'at {:.0f} ms'.format(best_time * 1000.), fontsize=16)
fig.tight_layout()
###############################################################################
# Estimate the time course of a single dipole with fixed position and
# orientation (the one that maximized GOF) over the entire interval
dip_fixed = mne.fit_dipole(evoked_full, fname_cov, fname_bem, fname_trans,
pos=dip.pos[best_idx], ori=dip.ori[best_idx])[0]
dip_fixed.plot(time_unit='s')
##############################################################################
# References
# ----------
# .. footbibliography::
|
bsd-3-clause
|
ishanic/scikit-learn
|
sklearn/manifold/tests/test_isomap.py
|
226
|
3941
|
from itertools import product
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_almost_equal
from sklearn import datasets
from sklearn import manifold
from sklearn import neighbors
from sklearn import pipeline
from sklearn import preprocessing
from sklearn.utils.testing import assert_less
eigen_solvers = ['auto', 'dense', 'arpack']
path_methods = ['auto', 'FW', 'D']
def test_isomap_simple_grid():
# Isomap should preserve distances when all neighbors are used
N_per_side = 5
Npts = N_per_side ** 2
n_neighbors = Npts - 1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(N_per_side), repeat=2)))
# distances from each point to all others
G = neighbors.kneighbors_graph(X, n_neighbors,
mode='distance').toarray()
for eigen_solver in eigen_solvers:
for path_method in path_methods:
clf = manifold.Isomap(n_neighbors=n_neighbors, n_components=2,
eigen_solver=eigen_solver,
path_method=path_method)
clf.fit(X)
G_iso = neighbors.kneighbors_graph(clf.embedding_,
n_neighbors,
mode='distance').toarray()
assert_array_almost_equal(G, G_iso)
def test_isomap_reconstruction_error():
# Same setup as in test_isomap_simple_grid, with an added dimension
N_per_side = 5
Npts = N_per_side ** 2
n_neighbors = Npts - 1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(N_per_side), repeat=2)))
# add noise in a third dimension
rng = np.random.RandomState(0)
noise = 0.1 * rng.randn(Npts, 1)
X = np.concatenate((X, noise), 1)
# compute input kernel
G = neighbors.kneighbors_graph(X, n_neighbors,
mode='distance').toarray()
centerer = preprocessing.KernelCenterer()
K = centerer.fit_transform(-0.5 * G ** 2)
for eigen_solver in eigen_solvers:
for path_method in path_methods:
clf = manifold.Isomap(n_neighbors=n_neighbors, n_components=2,
eigen_solver=eigen_solver,
path_method=path_method)
clf.fit(X)
# compute output kernel
G_iso = neighbors.kneighbors_graph(clf.embedding_,
n_neighbors,
mode='distance').toarray()
K_iso = centerer.fit_transform(-0.5 * G_iso ** 2)
# make sure error agrees
reconstruction_error = np.linalg.norm(K - K_iso) / Npts
assert_almost_equal(reconstruction_error,
clf.reconstruction_error())
def test_transform():
n_samples = 200
n_components = 10
noise_scale = 0.01
# Create S-curve dataset
X, y = datasets.samples_generator.make_s_curve(n_samples, random_state=0)
# Compute isomap embedding
iso = manifold.Isomap(n_components, 2)
X_iso = iso.fit_transform(X)
# Re-embed a noisy version of the points
rng = np.random.RandomState(0)
noise = noise_scale * rng.randn(*X.shape)
X_iso2 = iso.transform(X + noise)
# Make sure the rms error on re-embedding is comparable to noise_scale
assert_less(np.sqrt(np.mean((X_iso - X_iso2) ** 2)), 2 * noise_scale)
def test_pipeline():
# check that Isomap works fine as a transformer in a Pipeline
# only checks that no error is raised.
# TODO check that it actually does something useful
X, y = datasets.make_blobs(random_state=0)
clf = pipeline.Pipeline(
[('isomap', manifold.Isomap()),
('clf', neighbors.KNeighborsClassifier())])
clf.fit(X, y)
assert_less(.9, clf.score(X, y))
|
bsd-3-clause
|
elkingtonmcb/scikit-learn
|
examples/decomposition/plot_incremental_pca.py
|
244
|
1878
|
"""
===============
Incremental PCA
===============
Incremental principal component analysis (IPCA) is typically used as a
replacement for principal component analysis (PCA) when the dataset to be
decomposed is too large to fit in memory. IPCA builds a low-rank approximation
for the input data using an amount of memory which is independent of the
number of input data samples. It is still dependent on the input data features,
but changing the batch size allows for control of memory usage.
This example serves as a visual check that IPCA is able to find a similar
projection of the data to PCA (to a sign flip), while only processing a
few samples at a time. This can be considered a "toy example", as IPCA is
intended for large datasets which do not fit in main memory, requiring
incremental approaches.
"""
print(__doc__)
# Authors: Kyle Kastner
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn.decomposition import PCA, IncrementalPCA
iris = load_iris()
X = iris.data
y = iris.target
n_components = 2
ipca = IncrementalPCA(n_components=n_components, batch_size=10)
X_ipca = ipca.fit_transform(X)
pca = PCA(n_components=n_components)
X_pca = pca.fit_transform(X)
for X_transformed, title in [(X_ipca, "Incremental PCA"), (X_pca, "PCA")]:
plt.figure(figsize=(8, 8))
for c, i, target_name in zip("rgb", [0, 1, 2], iris.target_names):
plt.scatter(X_transformed[y == i, 0], X_transformed[y == i, 1],
c=c, label=target_name)
if "Incremental" in title:
err = np.abs(np.abs(X_pca) - np.abs(X_ipca)).mean()
plt.title(title + " of iris dataset\nMean absolute unsigned error "
"%.6f" % err)
else:
plt.title(title + " of iris dataset")
plt.legend(loc="best")
plt.axis([-4, 4, -1.5, 1.5])
plt.show()
|
bsd-3-clause
|
mzwiessele/mzparam
|
paramz/transformations.py
|
2
|
8925
|
#===============================================================================
# Copyright (c) 2012 - 2014, GPy authors (see AUTHORS.txt).
# Copyright (c) 2015, Max Zwiessele
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of paramax nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#===============================================================================
import numpy as np
from .domains import _POSITIVE,_NEGATIVE, _BOUNDED
import weakref, logging
import sys
_log_lim_val = np.log(np.finfo(np.float64).max)
_exp_lim_val = np.finfo(np.float64).max
_lim_val = 36.0
epsilon = np.finfo(np.float64).resolution
#===============================================================================
# Fixing constants
__fixed__ = "fixed"
FIXED = False
UNFIXED = True
#===============================================================================
logger = logging.getLogger(__name__)
class Transformation(object):
domain = None
_instance = None
def __new__(cls, *args, **kwargs):
if not cls._instance or cls._instance.__class__ is not cls:
cls._instance = super(Transformation, cls).__new__(cls, *args, **kwargs)
return cls._instance
def f(self, opt_param):
raise NotImplementedError
def finv(self, model_param):
raise NotImplementedError
def log_jacobian(self, model_param):
"""
compute the log of the jacobian of f, evaluated at f(x)= model_param
"""
logger.warning("log Jacobian for transformation {} not implemented, approximating by 0.".format(self.__class__))
return 0.
def log_jacobian_grad(self, model_param):
"""
compute the drivative of the log of the jacobian of f, evaluated at f(x)= model_param
"""
logger.warning("gradient of log Jacobian for transformation {} not implemented, approximating by 0.".format(self.__class__))
return 0.
def gradfactor(self, model_param, dL_dmodel_param):
""" df(opt_param)_dopt_param evaluated at self.f(opt_param)=model_param, times the gradient dL_dmodel_param,
i.e.:
define
.. math::
\frac{\frac{\partial L}{\partial f}\left(\left.\partial f(x)}{\partial x}\right|_{x=f^{-1}(f)\right)}
"""
raise NotImplementedError
def gradfactor_non_natural(self, model_param, dL_dmodel_param):
return self.gradfactor(model_param, dL_dmodel_param)
def initialize(self, f):
""" produce a sensible initial value for f(x)"""
raise NotImplementedError
def plot(self, xlabel=r'transformed $\theta$', ylabel=r'$\theta$', axes=None, *args,**kw):
assert "matplotlib" in sys.modules, "matplotlib package has not been imported."
import matplotlib.pyplot as plt
x = np.linspace(-8,8)
plt.plot(x, self.f(x), *args, ax=axes, **kw)
axes = plt.gca()
axes.set_xlabel(xlabel)
axes.set_ylabel(ylabel)
def __str__(self):
raise NotImplementedError
def __repr__(self):
return self.__class__.__name__
class Logexp(Transformation):
domain = _POSITIVE
def f(self, x):
return np.where(x>_lim_val, x, np.log1p(np.exp(np.clip(x, -_log_lim_val, _lim_val)))) #+ epsilon
#raises overflow warning: return np.where(x>_lim_val, x, np.log(1. + np.exp(x)))
def finv(self, f):
return np.where(f>_lim_val, f, np.log(np.expm1(f)))
def gradfactor(self, f, df):
return df*np.where(f>_lim_val, 1., - np.expm1(-f))
def initialize(self, f):
if np.any(f < 0.):
logger.info("Warning: changing parameters to satisfy constraints")
return np.abs(f)
def log_jacobian(self, model_param):
return np.where(model_param>_lim_val, model_param, np.log(np.expm1(model_param))) - model_param
def log_jacobian_grad(self, model_param):
return 1./(np.expm1(model_param))
def __str__(self):
return '+ve'
class Exponent(Transformation):
domain = _POSITIVE
def f(self, x):
return np.where(x<_lim_val, np.where(x>-_lim_val, np.exp(x), np.exp(-_lim_val)), np.exp(_lim_val))
def finv(self, x):
return np.log(x)
def gradfactor(self, f, df):
return np.einsum('i,i->i', df, f)
def initialize(self, f):
if np.any(f < 0.):
logger.info("Warning: changing parameters to satisfy constraints")
return np.abs(f)
def log_jacobian(self, model_param):
return np.log(model_param)
def log_jacobian_grad(self, model_param):
return 1./model_param
def __str__(self):
return '+ve'
class NegativeLogexp(Transformation):
domain = _NEGATIVE
logexp = Logexp()
def f(self, x):
return -self.logexp.f(x) # np.log(1. + np.exp(x))
def finv(self, f):
return self.logexp.finv(-f) # np.log(np.exp(-f) - 1.)
def gradfactor(self, f, df):
return self.logexp.gradfactor(-f, -df)
def initialize(self, f):
return -self.logexp.initialize(-f) # np.abs(f)
def __str__(self):
return '-ve'
class NegativeExponent(Exponent):
domain = _NEGATIVE
def f(self, x):
return -Exponent.f(self, x)
def finv(self, f):
return -Exponent.finv(self, -f)
def gradfactor(self, f, df):
return -Exponent.gradfactor(self, f, df)
def initialize(self, f):
return -Exponent.initialize(self, f) #np.abs(f)
def __str__(self):
return '-ve'
class Square(Transformation):
domain = _POSITIVE
def f(self, x):
return x ** 2
def finv(self, x):
return np.sqrt(x)
def gradfactor(self, f, df):
return np.einsum('i,i->i', df, 2 * np.sqrt(f))
def initialize(self, f):
return np.abs(f)
def __str__(self):
return '+sq'
class Logistic(Transformation):
domain = _BOUNDED
_instances = []
def __new__(cls, lower=1e-6, upper=1e-6, *args, **kwargs):
if cls._instances:
cls._instances[:] = [instance for instance in cls._instances if instance()]
for instance in cls._instances:
if instance().lower == lower and instance().upper == upper:
return instance()
newfunc = super(Transformation, cls).__new__
if newfunc is object.__new__:
o = newfunc(cls)
else:
o = newfunc(cls, lower, upper, *args, **kwargs)
cls._instances.append(weakref.ref(o))
return cls._instances[-1]()
def __init__(self, lower, upper):
assert lower < upper
self.lower, self.upper = float(lower), float(upper)
self.difference = self.upper - self.lower
def f(self, x):
if (x<-300.).any():
x = x.copy()
x[x<-300.] = -300.
return self.lower + self.difference / (1. + np.exp(-x))
def finv(self, f):
return np.log(np.clip(f - self.lower, 1e-10, np.inf) / np.clip(self.upper - f, 1e-10, np.inf))
def gradfactor(self, f, df):
return np.einsum('i,i->i', df, (f - self.lower) * (self.upper - f) / self.difference)
def initialize(self, f):
if np.any(np.logical_or(f < self.lower, f > self.upper)):
logger.info("Warning: changing parameters to satisfy constraints")
#return np.where(np.logical_or(f < self.lower, f > self.upper), self.f(f * 0.), f)
#FIXME: Max, zeros_like right?
return np.where(np.logical_or(f < self.lower, f > self.upper), self.f(np.zeros_like(f)), f)
def __str__(self):
return '{},{}'.format(self.lower, self.upper)
|
bsd-3-clause
|
ihmeuw/vivarium
|
tests/framework/test_population.py
|
1
|
2608
|
import pandas as pd
import pytest
from vivarium.framework.population import PopulationView, InitializerComponentSet, PopulationError, PopulationManager
class DummyPopulationManager:
def __init__(self):
self.get_population = lambda _ : pd.DataFrame({'age': [0, 10, 20, 30, 40, 50, 60, 70], 'sex': ['Male', 'Female']*4})
def test_create_PopulationView_with_all_columns():
manager = DummyPopulationManager()
view = PopulationView(manager, 0)
assert set(view.columns) == {'age', 'sex'}
def test_initializer_set_fail_type():
component_set = InitializerComponentSet()
with pytest.raises(TypeError):
component_set.add(lambda: 'test', ['test_column'])
def initializer():
return 'test'
with pytest.raises(TypeError):
component_set.add(initializer, ['test_column'])
class UnnamedComponent:
def initializer(self):
return 'test'
class Component:
def __init__(self, name):
self.name = name
def initializer(self):
return 'test'
def other_initializer(self):
return 'whoops'
def test_initializer_set_fail_attr():
component_set = InitializerComponentSet()
with pytest.raises(AttributeError):
component_set.add(UnnamedComponent().initializer, ['test_column'])
def test_initializer_set_duplicate_component():
component_set = InitializerComponentSet()
component = Component('test')
component_set.add(component.initializer, ['test_column1'])
with pytest.raises(PopulationError, match='multiple population initializers'):
component_set.add(component.other_initializer, ['test_column2'])
def test_initializer_set_duplicate_columns():
component_set = InitializerComponentSet()
component1 = Component('test1')
component2 = Component('test2')
columns = ['test_column']
component_set.add(component1.initializer, columns)
with pytest.raises(PopulationError, match='both registered initializers'):
component_set.add(component2.initializer, columns)
with pytest.raises(PopulationError, match='both registered initializers'):
component_set.add(component2.initializer, ['sneaky_column'] + columns)
def test_initializer_set():
component_set = InitializerComponentSet()
for i in range(10):
component = Component(i)
columns = [f'test_column_{i}_{j}' for j in range(5)]
component_set.add(component.initializer, columns)
def test_get_view_with_no_query():
manager = PopulationManager()
view = manager._get_view(columns=['age','sex'])
assert view.query == 'tracked == True'
|
gpl-3.0
|
LohithBlaze/scikit-learn
|
sklearn/ensemble/tests/test_base.py
|
284
|
1328
|
"""
Testing for the base module (sklearn.ensemble.base).
"""
# Authors: Gilles Louppe
# License: BSD 3 clause
from numpy.testing import assert_equal
from nose.tools import assert_true
from sklearn.utils.testing import assert_raise_message
from sklearn.datasets import load_iris
from sklearn.ensemble import BaggingClassifier
from sklearn.linear_model import Perceptron
def test_base():
# Check BaseEnsemble methods.
ensemble = BaggingClassifier(base_estimator=Perceptron(), n_estimators=3)
iris = load_iris()
ensemble.fit(iris.data, iris.target)
ensemble.estimators_ = [] # empty the list and create estimators manually
ensemble._make_estimator()
ensemble._make_estimator()
ensemble._make_estimator()
ensemble._make_estimator(append=False)
assert_equal(3, len(ensemble))
assert_equal(3, len(ensemble.estimators_))
assert_true(isinstance(ensemble[0], Perceptron))
def test_base_zero_n_estimators():
# Check that instantiating a BaseEnsemble with n_estimators<=0 raises
# a ValueError.
ensemble = BaggingClassifier(base_estimator=Perceptron(), n_estimators=0)
iris = load_iris()
assert_raise_message(ValueError,
"n_estimators must be greater than zero, got 0.",
ensemble.fit, iris.data, iris.target)
|
bsd-3-clause
|
dagwieers/ansible
|
hacking/aws_config/build_iam_policy_framework.py
|
25
|
11861
|
# Requires pandas, bs4, html5lib, and lxml
#
# Call script with the output from aws_resource_actions callback, e.g.
# python build_iam_policy_framework.py ['ec2:AuthorizeSecurityGroupEgress', 'ec2:AuthorizeSecurityGroupIngress', 'sts:GetCallerIdentity']
#
# The sample output:
# {
# "Version": "2012-10-17",
# "Statement": [
# {
# "Sid": "AnsibleEditor0",
# "Effect": "Allow",
# "Action": [
# "ec2:AuthorizeSecurityGroupEgress",
# "ec2:AuthorizeSecurityGroupIngress"
# ],
# "Resource": "arn:aws:ec2:${Region}:${Account}:security-group/${SecurityGroupId}"
# },
# {
# "Sid": "AnsibleEditor1",
# "Effect": "Allow",
# "Action": [
# "sts:GetCallerIdentity"
# ],
# "Resource": "*"
# }
# ]
# }
#
# Policy troubleshooting:
# - If there are more actions in the policy than you provided, AWS has documented dependencies for some of your actions and
# those have been added to the policy.
# - If there are fewer actions in the policy than you provided, some of your actions are not in the IAM table of actions for
# that service. For example, the API call s3:DeleteObjects does not actually correlate to the permission needed in a policy.
# In this case s3:DeleteObject is the permission required to allow both the s3:DeleteObjects action and the s3:DeleteObject action.
# - The policies output are only as accurate as the AWS documentation. If the policy does not permit the
# necessary actions, look for undocumented dependencies. For example, redshift:CreateCluster requires ec2:DescribeVpcs,
# ec2:DescribeSubnets, ec2:DescribeSecurityGroups, and ec2:DescribeInternetGateways, but AWS does not document this.
#
import json
import requests
import sys
missing_dependencies = []
try:
import pandas as pd
except ImportError:
missing_dependencies.append('pandas')
try:
import bs4
except ImportError:
missing_dependencies.append('bs4')
try:
import html5lib
except ImportError:
missing_dependencies.append('html5lib')
try:
import lxml
except ImportError:
missing_dependencies.append('lxml')
irregular_service_names = {
'a4b': 'alexaforbusiness',
'appstream': 'appstream2.0',
'acm': 'certificatemanager',
'acm-pca': 'certificatemanagerprivatecertificateauthority',
'aws-marketplace-management': 'marketplacemanagementportal',
'ce': 'costexplorerservice',
'cognito-identity': 'cognitoidentity',
'cognito-sync': 'cognitosync',
'cognito-idp': 'cognitouserpools',
'cur': 'costandusagereport',
'dax': 'dynamodbacceleratordax',
'dlm': 'datalifecyclemanager',
'dms': 'databasemigrationservice',
'ds': 'directoryservice',
'ec2messages': 'messagedeliveryservice',
'ecr': 'ec2containerregistry',
'ecs': 'elasticcontainerservice',
'eks': 'elasticcontainerserviceforkubernetes',
'efs': 'elasticfilesystem',
'es': 'elasticsearchservice',
'events': 'cloudwatchevents',
'firehose': 'kinesisfirehose',
'fms': 'firewallmanager',
'health': 'healthapisandnotifications',
'importexport': 'importexportdiskservice',
'iot1click': 'iot1-click',
'kafka': 'managedstreamingforkafka',
'kinesisvideo': 'kinesisvideostreams',
'kms': 'keymanagementservice',
'license-manager': 'licensemanager',
'logs': 'cloudwatchlogs',
'opsworks-cm': 'opsworksconfigurationmanagement',
'mediaconnect': 'elementalmediaconnect',
'mediaconvert': 'elementalmediaconvert',
'medialive': 'elementalmedialive',
'mediapackage': 'elementalmediapackage',
'mediastore': 'elementalmediastore',
'mgh': 'migrationhub',
'mobiletargeting': 'pinpoint',
'pi': 'performanceinsights',
'pricing': 'pricelist',
'ram': 'resourceaccessmanager',
'resource-groups': 'resourcegroups',
'sdb': 'simpledb',
'servicediscovery': 'cloudmap',
'serverlessrepo': 'serverlessapplicationrepository',
'sms': 'servermigrationservice',
'sms-voice': 'pinpointsmsandvoiceservice',
'sso-directory': 'ssodirectory',
'ssm': 'systemsmanager',
'ssmmessages': 'sessionmanagermessagegatewayservice',
'states': 'stepfunctions',
'sts': 'securitytokenservice',
'swf': 'simpleworkflowservice',
'tag': 'resourcegrouptaggingapi',
'transfer': 'transferforsftp',
'waf-regional': 'wafregional',
'wam': 'workspacesapplicationmanager',
'xray': 'x-ray'
}
irregular_service_links = {
'apigateway': [
'https://docs.aws.amazon.com/IAM/latest/UserGuide/list_manageamazonapigateway.html'
],
'aws-marketplace': [
'https://docs.aws.amazon.com/IAM/latest/UserGuide/list_awsmarketplace.html',
'https://docs.aws.amazon.com/IAM/latest/UserGuide/list_awsmarketplacemeteringservice.html',
'https://docs.aws.amazon.com/IAM/latest/UserGuide/list_awsprivatemarketplace.html'
],
'discovery': [
'https://docs.aws.amazon.com/IAM/latest/UserGuide/list_applicationdiscovery.html'
],
'elasticloadbalancing': [
'https://docs.aws.amazon.com/IAM/latest/UserGuide/list_elasticloadbalancing.html',
'https://docs.aws.amazon.com/IAM/latest/UserGuide/list_elasticloadbalancingv2.html'
],
'globalaccelerator': [
'https://docs.aws.amazon.com/IAM/latest/UserGuide/list_globalaccelerator.html'
]
}
def get_docs_by_prefix(prefix):
amazon_link_form = 'https://docs.aws.amazon.com/IAM/latest/UserGuide/list_amazon{0}.html'
aws_link_form = 'https://docs.aws.amazon.com/IAM/latest/UserGuide/list_aws{0}.html'
if prefix in irregular_service_links:
links = irregular_service_links[prefix]
else:
if prefix in irregular_service_names:
prefix = irregular_service_names[prefix]
links = [amazon_link_form.format(prefix), aws_link_form.format(prefix)]
return links
def get_html(links):
html_list = []
for link in links:
html = requests.get(link).content
try:
parsed_html = pd.read_html(html)
html_list.append(parsed_html)
except ValueError as e:
if 'No tables found' in str(e):
pass
else:
raise e
return html_list
def get_tables(service):
links = get_docs_by_prefix(service)
html_list = get_html(links)
action_tables = []
arn_tables = []
for df_list in html_list:
for df in df_list:
table = json.loads(df.to_json(orient='split'))
table_data = table['data'][0]
if 'Actions' in table_data and 'Resource Types (*required)' in table_data:
action_tables.append(table['data'][1::])
elif 'Resource Types' in table_data and 'ARN' in table_data:
arn_tables.append(table['data'][1::])
# Action table indices:
# 0: Action, 1: Description, 2: Access level, 3: Resource type, 4: Condition keys, 5: Dependent actions
# ARN tables indices:
# 0: Resource type, 1: ARN template, 2: Condition keys
return action_tables, arn_tables
def add_dependent_action(resources, dependency):
resource, action = dependency.split(':')
if resource in resources:
resources[resource].append(action)
else:
resources[resource] = [action]
return resources
def get_dependent_actions(resources):
for service in dict(resources):
action_tables, arn_tables = get_tables(service)
for found_action_table in action_tables:
for action_stuff in found_action_table:
if action_stuff is None:
continue
if action_stuff[0] in resources[service] and action_stuff[5]:
dependencies = action_stuff[5].split()
if isinstance(dependencies, list):
for dependency in dependencies:
resources = add_dependent_action(resources, dependency)
else:
resources = add_dependent_action(resources, dependencies)
return resources
def get_actions_by_service(resources):
service_action_dict = {}
dependencies = {}
for service in resources:
action_tables, arn_tables = get_tables(service)
# Create dict of the resource type to the corresponding ARN
arn_dict = {}
for found_arn_table in arn_tables:
for arn_stuff in found_arn_table:
arn_dict["{0}*".format(arn_stuff[0])] = arn_stuff[1]
# Create dict of the action to the corresponding ARN
action_dict = {}
for found_action_table in action_tables:
for action_stuff in found_action_table:
if action_stuff[0] is None:
continue
if arn_dict.get(action_stuff[3]):
action_dict[action_stuff[0]] = arn_dict[action_stuff[3]]
else:
action_dict[action_stuff[0]] = None
service_action_dict[service] = action_dict
return service_action_dict
def get_resource_arns(aws_actions, action_dict):
resource_arns = {}
for resource_action in aws_actions:
resource, action = resource_action.split(':')
if action not in action_dict:
continue
if action_dict[action] is None:
resource = "*"
else:
resource = action_dict[action].replace("${Partition}", "aws")
if resource not in resource_arns:
resource_arns[resource] = []
resource_arns[resource].append(resource_action)
return resource_arns
def get_resources(actions):
resources = {}
for action in actions:
resource, action = action.split(':')
if resource not in resources:
resources[resource] = []
resources[resource].append(action)
return resources
def combine_arn_actions(resources, service_action_arn_dict):
arn_actions = {}
for service in service_action_arn_dict:
service_arn_actions = get_resource_arns(aws_actions, service_action_arn_dict[service])
for resource in service_arn_actions:
if resource in arn_actions:
arn_actions[resource].extend(service_arn_actions[resource])
else:
arn_actions[resource] = service_arn_actions[resource]
return arn_actions
def combine_actions_and_dependent_actions(resources):
aws_actions = []
for resource in resources:
for action in resources[resource]:
aws_actions.append('{0}:{1}'.format(resource, action))
return set(aws_actions)
def get_actions_restricted_by_arn(aws_actions):
resources = get_resources(aws_actions)
resources = get_dependent_actions(resources)
service_action_arn_dict = get_actions_by_service(resources)
aws_actions = combine_actions_and_dependent_actions(resources)
return combine_arn_actions(aws_actions, service_action_arn_dict)
def main(aws_actions):
arn_actions = get_actions_restricted_by_arn(aws_actions)
statement = []
for resource_restriction in arn_actions:
statement.append({
"Sid": "AnsibleEditor{0}".format(len(statement)),
"Effect": "Allow",
"Action": arn_actions[resource_restriction],
"Resource": resource_restriction
})
policy = {"Version": "2012-10-17", "Statement": statement}
print(json.dumps(policy, indent=4))
if __name__ == '__main__':
if missing_dependencies:
sys.exit('Missing Python libraries: {0}'.format(', '.join(missing_dependencies)))
actions = sys.argv[1:]
if len(actions) == 1:
actions = sys.argv[1].split(',')
aws_actions = [action.strip('[], "\'') for action in actions]
main(aws_actions)
|
gpl-3.0
|
smjhnits/Praktikum_TU_D_16-17
|
Anfängerpraktikum/Protokolle/V302_Brückenschaltungen/Auswertung/Auswertung.py
|
1
|
4533
|
import numpy as np
from scipy.stats import sem
from uncertainties import ufloat
import uncertainties.unumpy as unp
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
def Mean_Std(Werte):
s = 1/np.sqrt(len(Werte))
return ufloat(np.mean(Werte), s * np.std(Werte, ddof = 1))
# Wheatstone
Messung_Wert_10 = np.array([[1000, 196, 804],
[664 , 268, 732],
[332 , 422, 578]])
Messung_Wert_12 = np.array([[332 , 543, 457],
[664 , 373, 627],
[1000, 284, 716]])
Wheatstone10= np.array([row[0] * row[1] / row[2] for row in Messung_Wert_10])
Wheatstone12 = np.array([row[0] * row[1] / row[2] for row in Messung_Wert_12])
print("Ergebnisse für Wheatstone: ", '\n', "Wert 10: ", Wheatstone10, Mean_Std(Wheatstone10), '\n', "Wert 12: ", Wheatstone12, Mean_Std(Wheatstone12), '\n')
# Kapazität
Messung_Wert_3 = np.array([[450, 519, 481],
[399, 490, 510],
[597, 590, 410]])
Messung_Wert_1 = np.array([[450, 407, 593],
[399, 380, 620],
[597, 478, 522]])
Kapazitäten3 = np.array([row[0] * row[2] / row[1] for row in Messung_Wert_3])
Kapazitäten1 = np.array([row[0] * row[2] / row[1] for row in Messung_Wert_1])
print("Ergebnisse für Kapazitäten: ", '\n', "Wert 3: ", Kapazitäten3, Mean_Std(Kapazitäten3), '\n', "Wert 1: ", Kapazitäten1, Mean_Std(Kapazitäten1), '\n')
# RC - Glied
Messung_Wert_8 = np.array([[450, 371, 606, 394],
[399, 418, 578, 422],
[597, 278, 673, 327]])
Messung_Wert_9 = np.array([[450, 466, 511, 489],
[399, 524, 482, 518],
[597, 352, 581, 419]])
Kapazitäten8 = np.array([row[0] * row[3] / row[2] for row in Messung_Wert_8])
Kapazitäten9 = np.array([row[0] * row[3] / row[2] for row in Messung_Wert_9])
Wiederstand8 = np.array([row[1] * row[2] / row[3] for row in Messung_Wert_8])
Wiederstand9 = np.array([row[1] * row[2] / row[3] for row in Messung_Wert_9])
print("Ergebnisse für RC-Glied: ", '\n')
print("Ergebnisse Kapazitäten: ", '\n', "Wert 8: ", Kapazitäten8, Mean_Std(Kapazitäten8), '\n', "Wert 9: ", Kapazitäten9, Mean_Std(Kapazitäten9))
print("Ergebnisse Wiederstände: ", '\n', "Wert 8: ", Wiederstand8, Mean_Std(Wiederstand8), '\n', "Wert 9: ", Wiederstand9, Mean_Std(Wiederstand9), '\n')
# RL - Glied klassisch
Klassisch_Wert_16 = np.array([[14.6, 45, 907, 83],
[20.1, 57, 875, 125],
[27.5, 85, 837, 163]])
Klassisch_Wert_18 = np.array([[14.6, 108, 775, 225],
[20.1, 143, 715, 285],
[27.5, 197, 648, 352]])
Induktivität16 = np.array([row[0] * row[2] / row[3] for row in Klassisch_Wert_16])
Induktivität18 = np.array([row[0] * row[2] / row[3] for row in Klassisch_Wert_18])
Wiederstand16 = np.array([row[1] * row[2] / row[3] for row in Klassisch_Wert_16])
Wiederstand18 = np.array([row[1] * row[2] / row[3] for row in Klassisch_Wert_18])
print("Ergebnisse für RL-Glied klassisch: ", '\n')
print("Ergebnisse Induktivität: ", '\n', "Wert 16: ", Induktivität16, Mean_Std(Induktivität16), '\n', "Wert 18: ", Induktivität18, Mean_Std(Induktivität18))
print("Ergebnisse Wiederstände: ", '\n', "Wert 16: ", Wiederstand16, Mean_Std(Wiederstand16), '\n', "Wert 18: ", Wiederstand18, Mean_Std(Wiederstand18), '\n')
# RL - Glied Maxwell
C4 = 399 * 10**(-6)
Maxwell_Wert_18 = np.array([[1000, 128, 347],
[664, 193, 349],
[332, 382, 348]])
Maxwell_Wert_16 = np.array([[1000, 347, 829],
[664, 523, 829],
[332, 1036, 829]])
mInduktivität16 = np.array([row[0] * row[1] * C4 for row in Maxwell_Wert_16])
mInduktivität18 = np.array([row[0] * row[1] * C4 for row in Maxwell_Wert_18])
mWiederstand16 = np.array([row[1] * row[0] / row[2] for row in Maxwell_Wert_16])
mWiederstand18 = np.array([row[1] * row[0] / row[2] for row in Maxwell_Wert_18])
print("Ergebnisse für RL-Glied Maxwell: ", '\n')
print("Ergebnisse Induktivität: ", '\n', "Wert 16: ", mInduktivität16, Mean_Std(mInduktivität16), '\n', "Wert 18: ", mInduktivität18, Mean_Std(mInduktivität18))
print("Ergebnisse Wiederstände: ", '\n', "Wert 16: ", mWiederstand16, Mean_Std(mWiederstand16), '\n', "Wert 18: ", mWiederstand18, Mean_Std(mWiederstand18), '\n')
|
mit
|
hoburg/gpkit
|
gpkit/interactive/plot_sweep.py
|
1
|
3478
|
"Implements plot_sweep1d function"
import matplotlib.pyplot as plt
from ..exceptions import InvalidGPConstraint
def assign_axes(var, posys, axes):
"Assigns axes to posys, creating and formatting if necessary"
if not hasattr(posys, "__iter__"):
posys = [posys]
N = len(posys)
if axes is None:
_, axes = plt.subplots(N, 1, sharex="col", figsize=(4.5, 3+1.5*N))
if N == 1:
axes = [axes]
format_and_label_axes(var, posys, axes)
elif N == 1 and not hasattr(axes, "__len__"):
axes = [axes]
return posys, axes
def format_and_label_axes(var, posys, axes, ylabel=True):
"Formats and labels axes"
for posy, ax in zip(posys, axes):
if ylabel:
if hasattr(posy, "key"):
ylabel = (posy.key.descr.get("label", posy.key.name)
+ " [%s]" % posy.key.unitstr(dimless="-"))
else:
ylabel = str(posy)
ax.set_ylabel(ylabel)
ax.grid(color="0.6")
# ax.set_frame_on(False)
for item in [ax.xaxis.label, ax.yaxis.label]:
item.set_fontsize(12)
for item in ax.get_xticklabels() + ax.get_yticklabels():
item.set_fontsize(9)
ax.tick_params(length=0)
ax.spines['left'].set_visible(False)
ax.spines['top'].set_visible(False)
for i in ax.spines.values():
i.set_linewidth(0.6)
i.set_color("0.6")
i.set_linestyle("dotted")
xlabel = (var.key.descr.get("label", var.key.name)
+ " [%s]" % var.key.unitstr(dimless="-"))
ax.set_xlabel(xlabel) # pylint: disable=undefined-loop-variable
plt.locator_params(nbins=4)
plt.subplots_adjust(wspace=0.15)
# pylint: disable=too-many-locals,too-many-branches,too-many-statements
def plot_1dsweepgrid(model, sweeps, posys, origsol=None, tol=0.01, **solveargs):
"""Creates and plots a sweep from an existing model
Example usage:
f, _ = plot_sweep_1d(m, {'x': np.linspace(1, 2, 5)}, 'y')
f.savefig('mysweep.png')
"""
origsubs = {swept: model.substitutions[swept] for swept in sweeps
if swept in model.substitutions}
if origsubs and not origsol:
try:
origsol = model.solve(**solveargs)
except InvalidGPConstraint:
origsol = model.localsolve(**solveargs)
if not hasattr(posys, "__iter__"):
posys = [posys]
N, S = len(posys), len(sweeps)
f, axes = plt.subplots(N, S, sharex='col', sharey='row',
figsize=(4+2*S, 4+2*N))
plt.subplots_adjust(hspace=0.15)
for i, (swept, swept_over) in enumerate(sweeps.items()):
if isinstance(swept_over, tuple) and len(swept_over) == 2:
sol = model.autosweep({swept: swept_over}, tol=tol, **solveargs)
else:
sol = model.sweep({swept: swept_over}, **solveargs)
if len(sweeps) == 1:
if len(posys) == 1:
subaxes = [axes]
else:
subaxes = axes
elif len(posys) == 1:
subaxes = [axes[i]]
else:
subaxes = axes[:, i]
sol.plot(posys, subaxes)
if origsubs:
for posy, ax in zip(posys, subaxes):
ax.plot(origsubs[swept], origsol(posy), "ko", markersize=4)
format_and_label_axes(swept, posys, subaxes, ylabel=(i == 0))
model.substitutions.update(origsubs)
return f, axes
|
mit
|
Avikalp7/image-aesthetics-learning
|
src/ScraperScripts/TwitterScraping.py
|
1
|
4020
|
import tweepy
import wget
from tweepy import OAuthHandler
import json
import shutil
import csv
import os
consumer_key = '<We can\'t make our key public here, please enter your own key here :D :)'
consumer_secret = '...'
access_token = '...'
access_secret = '...'
from pandas import DataFrame
# from tweet import compute
from math import exp
@classmethod
def parse(cls, api, raw):
status = cls.first_parse(api, raw)
setattr(status, 'json', json.dumps(raw))
return status
metadata = open("output2.csv", 'a')
wr = csv.writer(metadata,dialect='excel')
# Status() is the data model for a tweet
tweepy.models.Status.first_parse = tweepy.models.Status.parse
# tweepy.models.Status.parse = parse
# User() is the data model for a user profil
tweepy.models.User.first_parse = tweepy.models.User.parse
# tweepy.models.User.parse = parse
# You need to do it for all the models you need
def compute(name, count_num, max_num):
tweepy.models.Status.parse = parse
tweepy.models.User.parse = parse
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_secret)
print("Authentication successful")
api = tweepy.API(auth)
tweets = api.user_timeline(screen_name= name,
count=200, include_rts=False,
exclude_replies=True)
# print(tweets)
if len(tweets) == 0:
last_id = 0
else:
last_id = tweets[-1].id
print("Successfully downloaded the tweet information")
count = 0
while (1):
count = count + 1
try:
more_tweets = api.user_timeline(screen_name=name,
count=200,
include_rts=False,
exclude_replies=True,
max_id=last_id - 1)
except:
print ('Exception Handled!')
continue
# There are no more tweets
if (len(more_tweets) == 0):
break
else:
last_id = more_tweets[-1].id - 1
tweets = tweets + more_tweets
media_files = []
for status in tweets:
if len(media_files) >= max_num:
break
if 'media' in status.entities:
media = status.entities['media']
if (media[0]['type'] == 'photo' and 'photo' in status.entities['media'][0]['expanded_url']):
list = [str(status.author.name), str(status.retweet_count), str(status.favorite_count), str(status.created_at),
str(status.user.followers_count),str(status.user.friends_count), (status.user.location).encode('utf-8'),
(status.entities['media'][0]['expanded_url']).encode('utf-8'), (status.text).encode('utf-8')]
wr.writerow(list)
print(list)
print('\n\n')
print(media[0]['media_url'])
media_files.append(media[0]['media_url'])
else:
continue
# print media_files
count = count_num
print("\nNumber of images downloaded is " + str(len(media_files)))
print("\nImage url extraction successful")
folder_name = 'twitter_images'
if(not os.path.isdir(os.path.join(os.getcwd()))):
os.makedirs(folder_name)
for media_file in media_files:
count = count + 1
if(count%5 == 0):
print("\n" + str(count) + " images have been downloaded")
try:
filename = wget.download(media_file)
except:
print("\nException Handled!")
continue
shutil.copyfile(filename, "." + "/" + folder_name + "/img" + str(count))
os.remove(filename)
return count
def sigmoid(x):
return 1/float(1+exp(-x))
def preprocess():
df = DataFrame.from_csv('names.csv', header = 0)
num = []
# print (df.keys)
temp = list(df['Tweets(K)'])
temp2 = list(df['Organisation_Handle'])
# print (temp[0])
# temp.remove('Tweets(K)')
mean = sum(temp)/float(len(temp))
count = 0
max_elem = max(temp)
min_elem = min(temp)
for x in temp:
temp[count] = (temp[count] - mean)/float(max_elem - min_elem)
count = count + 1
for i in range(0, len(temp)):
j = int(300*sigmoid(temp[i]))
num.append(j)
return num, temp2
# names = ['Audi', 'BMW', 'CocaCola', 'drpepper', 'subway']
num_imgs, names = preprocess()
current_count = 0
count = 0
for name in names[0:18]:
print('Starting with following corp: ')
print(name)
current_count = compute(name, current_count, num_imgs[count])
count = count + 1
|
mit
|
Frankenberrypi/Plotter
|
charts.py
|
1
|
2282
|
#!/usr/bin/python
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import pandas
# Start some empty lists
#timeBase = []
#timeL = []
#tempL = []
#humiL = []
# import the csv file
df = pandas.read_csv('data.csv')
print df.ix[:,1]
# Temperature plot
plt.plot(df.ix[:,0],df.ix[:,1])
#plt.plot(timeL,tempL)
plt.ylim([60,85])
plt.ylabel('Temperature, F')
plt.xlabel('Some sort of time')
plt.savefig('temperature.png', bbox_inches='tight')
plt.clf()
# Humidity plot
plt.plot(df.ix[:,0],df.ix[:,2])
#plt.plot(timeL,humiL)
plt.ylim([20,70])
plt.ylabel('Relative Humidity, %')
plt.xlabel('Some sort of time')
plt.savefig('humidity.png', bbox_inches='tight')
plt.clf()
'''
# Continuously append data
while(True):
# Run the DHT program to get the humidity and temperature readings
output = subprocess.check_output(["./Adafruit_DHT", "2302", "4"]);
print output
matches = re.search("Temp =\s+([0-9.]+)", output)
if (not matches):
time.sleep(3)
continue
tempC = float(matches.group(1))
temp = tempC * 1.8 + 32
# search for humidity printout
matches = re.search("Hum =\s+([0-9.]+)", output)
if (not matches):
time.sleep(3)
continue
humidity = float(matches.group(1))
print "Temperature: %.1f F" % temp
print "Humidity: %.1f %%" % humidity
# Stick it in some lists
timeNow = datetime.datetime.now()
timeBase.append(timeNow)
timeL = matplotlib.dates.date2num(timeBase)
tempL.append(temp)
humiL.append(humidity)
# make a row
dataRow = [timeNow, temp, humidity]
# Open a .csv file
with open('data.csv', 'a') as dataFile:
dataWriter = csv.writer(dataFile, delimiter=',',quotechar='"', quoting=csv.QUOTE_MINIMAL)
dataWriter.writerow(dataRow)
# Temp plot
plt.plot(timeL,tempL)
plt.ylim([60,85])
plt.ylabel('Temperature, F')
plt.xlabel('Some sort of time')
plt.savefig('temperature.png', bbox_inches='tight')
plt.clf()
# Humidity plot
plt.plot(timeL,humiL)
plt.ylim([20,70])
plt.ylabel('Relative Humidity, %')
plt.xlabel('Some sort of time')
plt.savefig('humidity.png', bbox_inches='tight')
plt.clf()
# Show me the data
# print timeL
# print tempL
# print humiL
# Wait 30 seconds before continuing
print "Wrote a row to output"
time.sleep(30)
'''
|
gpl-2.0
|
CityPulse/CP_Resourcemanagement
|
virtualisation/misc/stats.py
|
1
|
11883
|
'''
Created on 19 Oct 2015
@author: thiggena
'''
from collections import OrderedDict
import csv
import datetime
from matplotlib import pyplot
from virtualisation.misc.buffer import NumericRingBuffer
from virtualisation.misc.jsonobject import JSONObject
PATH = "./"
BUFFER_SIZE = 1000
class Element(object):
def __init__(self, name):
self.name = name
def finish(self):
return "Name: " + self.name
class TimeElement(Element):
def __init__(self, name, value=0):
super(TimeElement, self).__init__(name)
self.buffer = NumericRingBuffer(BUFFER_SIZE)
[self.buffer.add(0) for _i in range(0, value)]
self.startTime = None
def started(self, test=False):
if self.startTime:
return True
return False
def start(self):
if self.startTime is None:
self.startTime = datetime.datetime.now()
else:
print self.name, "already started!"
def stop(self, stoptime):
if self.startTime is not None:
self.buffer.add((stoptime - self.startTime).total_seconds())
self.startTime = None
else:
print "TimeElement", self.name, "already stopped"
def finish(self):
print super(TimeElement, self).finish()
print "Mean:", self.mean()
return super(TimeElement, self).finish()
def mean(self):
return self.buffer.mean()
def sum(self):
return sum(self.buffer)
def getData(self, name):
return (name, self.buffer)
def insertNotUsedValue(self, values=0):
for _i in range(0, (values-self.buffer.len())):
self.buffer.add(0)
class TimeElementList(TimeElement):
def __init__(self, name, value=0):
super(TimeElementList, self).__init__(name, value)
self.timeElementMap = {}
def getData(self, name):
dataList = []
for element in self.timeElementMap:
dataList.append(self.timeElementMap[element].getData(name + "." + element))
dataList.append(super(TimeElementList, self).getData(name))
return dataList
def startElement(self,categoryList):
timeElementList = None
if categoryList[0] in self.timeElementMap:
timeElementList = self.timeElementMap[categoryList[0]]
else:
timeElementList = TimeElementList(categoryList[0], self.buffer.len())
timeElementList.start()
self.timeElementMap[categoryList[0]] = timeElementList
if not timeElementList.started():
timeElementList.start()
if len(categoryList) > 1:
timeElementList.startElement(categoryList[1:])
def stopElement(self, categoryList, stoptime):
if categoryList[0] in self.timeElementMap:
timeElementList = self.timeElementMap[categoryList[0]]
if len(categoryList) > 1:
timeElementList.stopElement(categoryList[1:], stoptime)
else:
if timeElementList.started():
timeElementList.stop(stoptime)
def start(self):
super(TimeElementList, self).start()
def stop(self, stoptime):
super(TimeElementList, self).stop(stoptime)
[e.stop(stoptime) for e in self.timeElementMap.values() if e.started(True)]
self.insertNotUsedValue(self.buffer.len())
def insertNotUsedValue(self, values=0):
super(TimeElementList, self).insertNotUsedValue(values)
[e.insertNotUsedValue(values) for e in self.timeElementMap.values()]
def finish(self):
super(TimeElementList, self).finish()
for e in self.timeElementMap:
self.timeElementMap[e].finish()
# def writeCSVFile(self, name):
# data = self.getData(self.name)
# tuples = []
# for e in data:
# self.parse(e, tuples)
#
# csvfile = open(PATH + str(name) + "_" + str(self.name) + ".csv", 'w')
# csvf = csv.writer(csvfile, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
#
# header = []
# maxEntries = 0
# for e in tuples:
# header.append(e[0])
# maxEntries = max(maxEntries, e[1].len())
# csvf.writerow(header)
#
# for i in range(0, maxEntries):
# row = []
# for e in tuples:
# data = e[1]
# if data.len() >= i+1:
# row.append(data.items[i])
# else:
# row.append("")
# csvf.writerow(row)
# csvfile.close()
def parse(self, data, tuples):
if isinstance(data, list):
for e in data:
self.parse(e, tuples)
else:
tuples.append(data)
def getAverageProcessingTimes(self):
job = JSONObject()
job.name = self.name
job.value = self.mean()
if len(self.timeElementMap) > 0:
job.values = []
for element in self.timeElementMap:
if len(self.timeElementMap[element].timeElementMap) > 0:
job.values.append(self.timeElementMap[element].getAverageProcessingTimes())
else:
job2 = JSONObject()
job2.name = element
job2.value = self.timeElementMap[element].mean()
job.values.append(job2)
return job
class CounterElement(Element):
def __init__(self, name):
super(CounterElement, self).__init__(name)
self.counter = 0
self.counterMap = OrderedDict()
def count(self, timestamp=None):
self.counter += 1
if timestamp:
self.counterMap[timestamp] = self.counter
else:
self.counterMap[datetime.datetime.now()] = self.counter
# def writeCSVFile(self, name):
# csvfile = open(PATH + name + "_" + self.name + "_count.csv", 'w')
# csvf = csv.writer(csvfile, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
# header = ["date", "count"]
# csvf.writerow(header)
# for element in self.counterMap:
# csvf.writerow([element, self.counterMap[element]])
# csvfile.close()
class SizeElement(Element):
def __init__(self, name):
super(SizeElement, self).__init__(name)
self.items = OrderedDict()
def addItem(self, time, value):
self.items[time] = value
def finish(self):
print super(SizeElement, self).finish()
for item in self.items:
print item, self.items[item]
def plot(self, name):
x = self.items.keys()
y = self.items.values()
pyplot.plot(x, y)
print "x", x, min(x), max(x)
print "y", y, min(y), max(y)
pyplot.axis([min(x), max(x), min(y), max(y)])
pyplot.savefig(PATH + name + "_" + self.name+ ".png")
def getData(self, name):
return (name, self.items)
# def writeCSVFile(self, name):
# csvfile = open(PATH + name + "_" + self.name + "_size.csv", 'w')
# csvf = csv.writer(csvfile, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
# header = ["date", "value"]
# csvf.writerow(header)
# for element in self.items:
# csvf.writerow([element, self.items[element]])
# csvfile.close()
class Stats(object):
instances = {}
def __new__(cls, name):
if name not in Stats.instances:
Stats.instances[name] = Stats.__Stats(name)
return Stats.instances[name]
@classmethod
def getOrMake(cls, name):
if name not in Stats.instances:
Stats.instances[name] = Stats.__Stats(name)
return Stats.instances[name]
@classmethod
def getAllStats(cls):
return Stats.instances.values()
@classmethod
def get(cls, name):
if name in Stats.instances:
return Stats.instances[name]
return None
# @classmethod
# def writeCSVs(cls):
# for e in Stats.instances.values():
# e.writeCSVFiles()
@classmethod
def finish(cls):
jobs = JSONObject()
jobs.stats = []
for e in Stats.instances:
job = JSONObject()
job.name = e
job.value = e.getvalue()
jobs.stats.append(job)
return jobs
class __Stats(object):
def __init__(self, name):
self.name = name
self.elements = {}
def finish(self):
print "Stats for", self.name
for e in self.elements:
self.elements[e].finish()
# def writeCSVFiles(self):
# for e in self.elements:
# self.elements[e].writeCSVFile(self.name)
def addSize(self, name, time, value):
if name not in self.elements:
element = SizeElement(name)
self.elements[name] = element
else:
element = self.elements[name]
element.addItem(time, value)
def count(self, name, timestamp=None):
if name not in self.elements:
element = CounterElement(name)
self.elements[name] = element
else:
element = self.elements[name]
element.count(timestamp)
def startMeasurement(self, categoryString):
categories = categoryString.split(".")
timeElementList = None
if categories[0] in self.elements:
timeElementList = self.elements[categories[0]]
else:
timeElementList = TimeElementList(categories[0])
self.elements[categories[0]] = timeElementList
if not timeElementList.started():
timeElementList.start()
if len(categories) > 1:
timeElementList.startElement(categories[1:])
def stopMeasurement(self, categoryString):
stoptime = datetime.datetime.now()
categories = categoryString.split(".")
if categories[0] in self.elements:
timeElementList = self.elements[categories[0]]
if len(categories) > 1:
timeElementList.stopElement(categories[1:], stoptime)
else:
if timeElementList.started():
timeElementList.stop(stoptime)
else:
print "cannot stop element", categories[0], ", related elements not stopped yet"
def getAverageProcessingTimes(self):
times = []
for element in self.elements:
if isinstance(self.elements[element], TimeElementList):
times.append(self.elements[element].getAverageProcessingTimes())
return times
# if __name__ == '__main__':
# from time import sleep
# s = Stats("test")
# for i in range(0, 10):
# s.startMeasurement("method")
# # print i
# if i is 0:
# s.startMeasurement("method.if")
# sleep(0.0005)
# s.startMeasurement("method.if.test")
# sleep(0.0005)
# s.stopMeasurement("method.if.test")
# s.stopMeasurement("method.if")
#
# else:
# s.startMeasurement("method.else")
# sleep(0.0005)
# s.stopMeasurement("method.else")
# s.stopMeasurement("method")
# print s.getAverageProcessingTimes()[0].dumps()
# print "#####"
#
# s.writeCSVFiles()
|
mit
|
mirrorcoloured/slcypi
|
MA/Robot_V002l.py
|
1
|
11863
|
# Import statements
import sys
sys.path.append("/home/pi/Documents/Robots/slcypi/MA") ### ADD PATH
sys.path.append("/home/pi/Documents/Robots/slcypi/HAT_Python3") ### ADD PATH
import cv2
import numpy as np
import matplotlib.pyplot as plt
from Tank import Tank
from ImageAnalysis import ImageAnalysis
import picamera
import picamera.array
import time
import pygame
from scipy import ndimage
from time import sleep
# Settings
WIDTH = 320
HEIGHT = 240
# Initialize Tank
robot = Tank()
robot.correctDirections(True,True,True)
# Initialize ImageAnalysis
IA = ImageAnalysis()
IA.filterLower = np.array([25,35,70])
IA.filterUpper = np.array([65,255,205])
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')
def faceDetection(bgr):
gray = cv2.cvtColor(bgr, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x,y,w,h) in faces:
cv2.rectangle(bgr,(x,y),(x+w,y+h),(255,0,0),2)
roi_gray = gray[y:y+h, x:x+w]
roi_color = bgr[y:y+h, x:x+w]
eyes = eye_cascade.detectMultiScale(roi_gray)
for (ex,ey,ew,eh) in eyes:
cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2)
return(faces,bgr)
# Initialize Pygame
pygame.init()
pygame.display.set_caption('My Robot')
screen = pygame.display.set_mode((WIDTH,HEIGHT),0)
# Start settings
auto = False
done = False
viewOptions = ["noFilter","colorFilter","lineDetection","faceDetection","opticalFlow","featureMatch"]
viewNr = 0
startTime = time.time()
def toggleView(viewNr):
viewNr = viewNr + 1
if viewNr > 3:
viewNr = 0
print(viewOptions[viewNr])
return(viewNr)
with picamera.PiCamera() as camera:
with picamera.array.PiRGBArray(camera) as stream:
camera.resolution = (WIDTH, HEIGHT)
print("HI")
while done == False:
# Image capture
camera.capture(stream, 'bgr', use_video_port=True)
bgr = stream.array
# Image process
res, mask = IA.colorFilter(bgr, False, False)
if viewOptions[viewNr] == "noFilter":
res = bgr
if viewOptions[viewNr] == "lineDetection":
res = IA.edgeDetection(bgr)
if viewOptions[viewNr] == "faceDetection":
faces, res = faceDetection(bgr)
if viewOptions[viewNr] == "featureMatch":
res = IA.featureMatch(bgr,previous)
previous = current
if viewOptions[viewNr] == "opticalFlow":
res = IA.opticalFlow(bgr,previous, hsv)
previous = current
# Image transpose
res = cv2.transpose(res)
mask = np.transpose(mask)
# Image display
sface = pygame.surfarray.make_surface(res)
screen.blit(sface,(0,0))
pygame.display.update()
# User events
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
# Exit on escape
if (event.key == pygame.K_ESCAPE):
done = True
# View toggle
if event.key == (pygame.K_v):
viewNr = toggleView(viewNr)
# Create hsv required for optical flow
previous = bgr
hsv = np.zeros_like(previous)
hsv[...,1] = 255
# Drive commands
if event.key == (pygame.K_UP):
robot.driveSync(1)
if event.key == (pygame.K_DOWN):
robot.driveSync(-1)
if (event.key == pygame.K_LEFT):
robot.rotateSync(1,45)
if (event.key == pygame.K_RIGHT):
robot.rotateSync(-1,45)
if (event.key == pygame.K_q):
auto = True
if (event.key == pygame.K_w):
auto = False
robot.driveSync(0)
robot.rotateSync(0)
if (event.key == pygame.K_7):
IA.filterUpper[0] = IA.filterUpper[0] + 5
print(IA.filterUpper)
if (event.key == pygame.K_u):
IA.filterUpper[0] = IA.filterUpper[0] - 5
print(IA.filterUpper)
if (event.key == pygame.K_j):
IA.filterLower[0] = IA.filterLower[0] + 5
print(IA.filterLower)
if (event.key == pygame.K_m):
IA.filterLower[0] = IA.filterLower[0] - 5
print(IA.filterLower)
if (event.key == pygame.K_8):
IA.filterUpper[1] = IA.filterUpper[1] + 5
print(IA.filterUpper)
if (event.key == pygame.K_i):
IA.filterUpper[1] = IA.filterUpper[1] - 5
print(IA.filterUpper)
if (event.key == pygame.K_k):
IA.filterLower[1] = IA.filterLower[1] + 5
print(IA.filterLower)
if (event.key == pygame.K_COMMA):
IA.filterLower[1] = IA.filterLower[1] - 5
print(IA.filterLower)
if (event.key == pygame.K_9):
IA.filterUpper[2] = IA.filterUpper[2] + 5
print(IA.filterUpper)
if (event.key == pygame.K_o):
IA.filterUpper[2] = IA.filterUpper[2] - 5
print(IA.filterUpper)
if (event.key == pygame.K_l):
IA.filterLower[2] = IA.filterLower[2] + 5
print(IA.filterLower)
if (event.key == pygame.K_PERIOD):
IA.filterLower[2] = IA.filterLower[2] - 5
print(IA.filterLower)
if event.type == pygame.KEYUP:
if event.key == (pygame.K_UP):
robot.driveSync(0)
if event.key == (pygame.K_DOWN):
robot.driveSync(0)
if (event.key == pygame.K_LEFT):
robot.rotateSync(0)
if (event.key == pygame.K_RIGHT):
robot.rotateSync(0)
# Autonomous
if auto == True:
# Analyze line
aRes = IA.blockAnalyze(mask)
print(aRes)
dir = aRes[0]
count = aRes[1]
# Drive
if abs(dir) > 0.20:
rotateSpeed = 50
if abs(dir) > 0.5:
rotateSpeed = 80
if abs(dir) > 0.75:
rotateSpeed = 90
if dir > 0:
print("Rotate -1")
robot.rotateSync(-1, rotateSpeed)
sleep(0.05)
robot.rotateSync(0)
else:
print("Rotate 1")
robot.rotateSync(1, rotateSpeed)
sleep(0.05)
robot.rotateSync(0)
if dir > -999:
relCount = (1 - abs(dir)) * count
if count > 800:
driveSpeed = 50
if count > 8000:
driveSpeed = int(relCount / 8000 * 50)
print(driveSpeed)
if driveSpeed > 45 :
robot.driveSync(1, driveSpeed)
else:
robot.driveSync(0)
else:
robot.driveSync(0)
# Handle stream
stream.seek(0)
stream.truncate()
# Compute fps
lapseTime = (time.time() - startTime)
startTime = time.time()
if lapseTime > 0:
fps = 1.0 / lapseTime
print("fps: " + str(fps))
robot.stop()
pygame.quit()
|
mit
|
relacs/relacs
|
plugins/efield/printtrace.py
|
3
|
1988
|
# example script for printing all visible traces
# set plotcommand to "python printtrace.py" in relacs.cfg
import numpy as np
import matplotlib.pyplot as plt
import subprocess
def is_float( s ):
try:
float(s)
return True
except ValueError:
return False
def loaddat( filename ):
""" Load ascii data files into a numpy array
"""
header = {}
key = []
data = []
inkey = False
for l in open( filename ) :
if l.startswith( "#" ) :
if l.startswith( "#Key" ) :
inkey = True
elif ":" in l :
inkey = False
tmp = [e.strip() for e in l[1:].partition(':')]
header[tmp[0]] = tmp[2]
elif inkey :
key.append( l.split()[1:] )
elif l and not l.isspace() :
inkey = False
data.append( [ float( i ) for i in l.split() if is_float( i ) ] )
return np.array( data ), key, header
# load data:
data, key, header = loaddat( "traces.dat" )
# plot data:
ncols = len( key[-1] )-1
fw = 21.0
fh = 29.7
if ncols == 1 :
fh = 12.0
fig = plt.figure( figsize=( fw/2.54, fh/2.54 ) )
for col in xrange( 1, ncols+1 ) :
ax = fig.add_subplot( ncols, 1, col )
if col == 1 :
ax.set_title( header['Species'] + ", EODf=" + header['EOD Rate'], fontsize=18 )
ax.set_xlim( data[0,0], data[-1,0] )
# if col == ncols :
ax.set_xlabel( key[0][0] + ' [' + key[1][0] + ']' )
# else :
# plt.setp( ax.get_xticklabels(), visible=False)
ax.set_ylabel( key[0][col] + ' [' + key[1][col] + ']' )
ax.ticklabel_format(useOffset=False)
ax.plot( data[:,0], data[:,col], lw=2 )
# fig.subplots_adjust(left=0.2, hspace=0.0 )
fig.subplots_adjust(left=0.2, right=0.9, bottom=0.13, top=0.87, hspace=0.18 )
#plt.tight_layout()
plt.savefig( 'traces.pdf', papertype="a4" )
# fix pdf file and print:
subprocess.call( 'pdfjam -q -o /dev/stdout traces.pdf 1 | lpr', shell=True )
|
gpl-3.0
|
mringel/ThinkStats2
|
code/regression.py
|
62
|
9652
|
"""This file contains code used in "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2010 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function, division
import math
import pandas
import random
import numpy as np
import statsmodels.api as sm
import statsmodels.formula.api as smf
import re
import chap01soln
import first
import linear
import thinkplot
import thinkstats2
def QuickLeastSquares(xs, ys):
"""Estimates linear least squares fit and returns MSE.
xs: sequence of values
ys: sequence of values
returns: inter, slope, mse
"""
n = float(len(xs))
meanx = xs.mean()
dxs = xs - meanx
varx = np.dot(dxs, dxs) / n
meany = ys.mean()
dys = ys - meany
cov = np.dot(dxs, dys) / n
slope = cov / varx
inter = meany - slope * meanx
res = ys - (inter + slope * xs)
mse = np.dot(res, res) / n
return inter, slope, mse
def ReadVariables():
"""Reads Stata dictionary files for NSFG data.
returns: DataFrame that maps variables names to descriptions
"""
vars1 = thinkstats2.ReadStataDct('2002FemPreg.dct').variables
vars2 = thinkstats2.ReadStataDct('2002FemResp.dct').variables
all_vars = vars1.append(vars2)
all_vars.index = all_vars.name
return all_vars
def JoinFemResp(df):
"""Reads the female respondent file and joins on caseid.
df: DataFrame
"""
resp = chap01soln.ReadFemResp()
resp.index = resp.caseid
join = df.join(resp, on='caseid', rsuffix='_r')
# convert from colon-separated time strings to datetimes
join.screentime = pandas.to_datetime(join.screentime)
return join
def GoMining(df):
"""Searches for variables that predict birth weight.
df: DataFrame of pregnancy records
returns: list of (rsquared, variable name) pairs
"""
variables = []
for name in df.columns:
try:
if df[name].var() < 1e-7:
continue
formula = 'totalwgt_lb ~ agepreg + ' + name
formula = formula.encode('ascii')
model = smf.ols(formula, data=df)
if model.nobs < len(df)/2:
continue
results = model.fit()
except (ValueError, TypeError):
continue
variables.append((results.rsquared, name))
return variables
def MiningReport(variables, n=30):
"""Prints variables with the highest R^2.
t: list of (R^2, variable name) pairs
n: number of pairs to print
"""
all_vars = ReadVariables()
variables.sort(reverse=True)
for mse, name in variables[:n]:
key = re.sub('_r$', '', name)
try:
desc = all_vars.loc[key].desc
if isinstance(desc, pandas.Series):
desc = desc[0]
print(name, mse, desc)
except KeyError:
print(name, mse)
def PredictBirthWeight(live):
"""Predicts birth weight of a baby at 30 weeks.
live: DataFrame of live births
"""
live = live[live.prglngth>30]
join = JoinFemResp(live)
t = GoMining(join)
MiningReport(t)
formula = ('totalwgt_lb ~ agepreg + C(race) + babysex==1 + '
'nbrnaliv>1 + paydu==1 + totincr')
results = smf.ols(formula, data=join).fit()
SummarizeResults(results)
def SummarizeResults(results):
"""Prints the most important parts of linear regression results:
results: RegressionResults object
"""
for name, param in results.params.iteritems():
pvalue = results.pvalues[name]
print('%s %0.3g (%.3g)' % (name, param, pvalue))
try:
print('R^2 %.4g' % results.rsquared)
ys = results.model.endog
print('Std(ys) %.4g' % ys.std())
print('Std(res) %.4g' % results.resid.std())
except AttributeError:
print('R^2 %.4g' % results.prsquared)
def RunSimpleRegression(live):
"""Runs a simple regression and compare results to thinkstats2 functions.
live: DataFrame of live births
"""
# run the regression with thinkstats2 functions
live_dropna = live.dropna(subset=['agepreg', 'totalwgt_lb'])
ages = live_dropna.agepreg
weights = live_dropna.totalwgt_lb
inter, slope = thinkstats2.LeastSquares(ages, weights)
res = thinkstats2.Residuals(ages, weights, inter, slope)
r2 = thinkstats2.CoefDetermination(weights, res)
# run the regression with statsmodels
formula = 'totalwgt_lb ~ agepreg'
model = smf.ols(formula, data=live)
results = model.fit()
SummarizeResults(results)
def AlmostEquals(x, y, tol=1e-6):
return abs(x-y) < tol
assert(AlmostEquals(results.params['Intercept'], inter))
assert(AlmostEquals(results.params['agepreg'], slope))
assert(AlmostEquals(results.rsquared, r2))
def PivotTables(live):
"""Prints a pivot table comparing first babies to others.
live: DataFrame of live births
"""
table = pandas.pivot_table(live, rows='isfirst',
values=['totalwgt_lb', 'agepreg'])
print(table)
def FormatRow(results, columns):
"""Converts regression results to a string.
results: RegressionResults object
returns: string
"""
t = []
for col in columns:
coef = results.params.get(col, np.nan)
pval = results.pvalues.get(col, np.nan)
if np.isnan(coef):
s = '--'
elif pval < 0.001:
s = '%0.3g (*)' % (coef)
else:
s = '%0.3g (%0.2g)' % (coef, pval)
t.append(s)
try:
t.append('%.2g' % results.rsquared)
except AttributeError:
t.append('%.2g' % results.prsquared)
return t
def RunModels(live):
"""Runs regressions that predict birth weight.
live: DataFrame of pregnancy records
"""
columns = ['isfirst[T.True]', 'agepreg', 'agepreg2']
header = ['isfirst', 'agepreg', 'agepreg2']
rows = []
formula = 'totalwgt_lb ~ isfirst'
results = smf.ols(formula, data=live).fit()
rows.append(FormatRow(results, columns))
print(formula)
SummarizeResults(results)
formula = 'totalwgt_lb ~ agepreg'
results = smf.ols(formula, data=live).fit()
rows.append(FormatRow(results, columns))
print(formula)
SummarizeResults(results)
formula = 'totalwgt_lb ~ isfirst + agepreg'
results = smf.ols(formula, data=live).fit()
rows.append(FormatRow(results, columns))
print(formula)
SummarizeResults(results)
live['agepreg2'] = live.agepreg**2
formula = 'totalwgt_lb ~ isfirst + agepreg + agepreg2'
results = smf.ols(formula, data=live).fit()
rows.append(FormatRow(results, columns))
print(formula)
SummarizeResults(results)
PrintTabular(rows, header)
def PrintTabular(rows, header):
"""Prints results in LaTeX tabular format.
rows: list of rows
header: list of strings
"""
s = r'\hline ' + ' & '.join(header) + r' \\ \hline'
print(s)
for row in rows:
s = ' & '.join(row) + r' \\'
print(s)
print(r'\hline')
def LogisticRegressionExample():
"""Runs a simple example of logistic regression and prints results.
"""
y = np.array([0, 1, 0, 1])
x1 = np.array([0, 0, 0, 1])
x2 = np.array([0, 1, 1, 1])
beta = [-1.5, 2.8, 1.1]
log_o = beta[0] + beta[1] * x1 + beta[2] * x2
print(log_o)
o = np.exp(log_o)
print(o)
p = o / (o+1)
print(p)
like = y * p + (1-y) * (1-p)
print(like)
print(np.prod(like))
df = pandas.DataFrame(dict(y=y, x1=x1, x2=x2))
results = smf.logit('y ~ x1 + x2', data=df).fit()
print(results.summary())
def RunLogisticModels(live):
"""Runs regressions that predict sex.
live: DataFrame of pregnancy records
"""
#live = linear.ResampleRowsWeighted(live)
df = live[live.prglngth>30]
df['boy'] = (df.babysex==1).astype(int)
df['isyoung'] = (df.agepreg<20).astype(int)
df['isold'] = (df.agepreg<35).astype(int)
df['season'] = (((df.datend+1) % 12) / 3).astype(int)
# run the simple model
model = smf.logit('boy ~ agepreg', data=df)
results = model.fit()
print('nobs', results.nobs)
print(type(results))
SummarizeResults(results)
# run the complex model
model = smf.logit('boy ~ agepreg + hpagelb + birthord + C(race)', data=df)
results = model.fit()
print('nobs', results.nobs)
print(type(results))
SummarizeResults(results)
# make the scatter plot
exog = pandas.DataFrame(model.exog, columns=model.exog_names)
endog = pandas.DataFrame(model.endog, columns=[model.endog_names])
xs = exog['agepreg']
lo = results.fittedvalues
o = np.exp(lo)
p = o / (o+1)
#thinkplot.Scatter(xs, p, alpha=0.1)
#thinkplot.Show()
# compute accuracy
actual = endog['boy']
baseline = actual.mean()
predict = (results.predict() >= 0.5)
true_pos = predict * actual
true_neg = (1 - predict) * (1 - actual)
acc = (sum(true_pos) + sum(true_neg)) / len(actual)
print(acc, baseline)
columns = ['agepreg', 'hpagelb', 'birthord', 'race']
new = pandas.DataFrame([[35, 39, 3, 1]], columns=columns)
y = results.predict(new)
print(y)
def main(name, data_dir='.'):
thinkstats2.RandomSeed(17)
LogisticRegressionExample()
live, firsts, others = first.MakeFrames()
live['isfirst'] = (live.birthord == 1)
RunLogisticModels(live)
RunSimpleRegression(live)
RunModels(live)
PredictBirthWeight(live)
if __name__ == '__main__':
import sys
main(*sys.argv)
|
gpl-3.0
|
Bismarrck/tensorflow
|
tensorflow/contrib/timeseries/examples/known_anomaly.py
|
24
|
7880
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Example of using an exogenous feature to ignore a known anomaly."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
from os import path
import numpy as np
import tensorflow as tf
try:
import matplotlib # pylint: disable=g-import-not-at-top
matplotlib.use("TkAgg") # Need Tk for interactive plots.
from matplotlib import pyplot # pylint: disable=g-import-not-at-top
HAS_MATPLOTLIB = True
except ImportError:
# Plotting requires matplotlib, but the unit test running this code may
# execute in an environment without it (i.e. matplotlib is not a build
# dependency). We'd still like to test the TensorFlow-dependent parts of this
# example, namely train_and_predict.
HAS_MATPLOTLIB = False
_MODULE_PATH = path.dirname(__file__)
_DATA_FILE = path.join(_MODULE_PATH, "data/changepoints.csv")
def state_space_estimator(exogenous_feature_columns):
"""Constructs a StructuralEnsembleRegressor."""
def _exogenous_update_condition(times, features):
del times # unused
# Make exogenous updates sparse by setting an update condition. This in
# effect allows missing exogenous features: if the condition evaluates to
# False, no update is performed. Otherwise we sometimes end up with "leaky"
# updates which add unnecessary uncertainty to the model even when there is
# no changepoint.
return tf.equal(tf.squeeze(features["is_changepoint"], axis=-1), "yes")
return (
tf.contrib.timeseries.StructuralEnsembleRegressor(
periodicities=12,
# Extract a smooth period by constraining the number of latent values
# being cycled between.
cycle_num_latent_values=3,
num_features=1,
exogenous_feature_columns=exogenous_feature_columns,
exogenous_update_condition=_exogenous_update_condition),
# Use truncated backpropagation with a window size of 64, batching
# together 4 of these windows (random offsets) per training step. Training
# with exogenous features often requires somewhat larger windows.
4, 64)
def autoregressive_estimator(exogenous_feature_columns):
input_window_size = 8
output_window_size = 2
return (
tf.contrib.timeseries.ARRegressor(
periodicities=12,
num_features=1,
input_window_size=input_window_size,
output_window_size=output_window_size,
exogenous_feature_columns=exogenous_feature_columns),
64, input_window_size + output_window_size)
def train_and_evaluate_exogenous(
estimator_fn, csv_file_name=_DATA_FILE, train_steps=300):
"""Training, evaluating, and predicting on a series with changepoints."""
# Indicate the format of our exogenous feature, in this case a string
# representing a boolean value.
string_feature = tf.feature_column.categorical_column_with_vocabulary_list(
key="is_changepoint", vocabulary_list=["no", "yes"])
# Specify the way this feature is presented to the model, here using a one-hot
# encoding.
one_hot_feature = tf.feature_column.indicator_column(
categorical_column=string_feature)
estimator, batch_size, window_size = estimator_fn(
exogenous_feature_columns=[one_hot_feature])
reader = tf.contrib.timeseries.CSVReader(
csv_file_name,
# Indicate the format of our CSV file. First we have two standard columns,
# one for times and one for values. The third column is a custom exogenous
# feature indicating whether each timestep is a changepoint. The
# changepoint feature name must match the string_feature column name
# above.
column_names=(tf.contrib.timeseries.TrainEvalFeatures.TIMES,
tf.contrib.timeseries.TrainEvalFeatures.VALUES,
"is_changepoint"),
# Indicate dtypes for our features.
column_dtypes=(tf.int64, tf.float32, tf.string),
# This CSV has a header line; here we just ignore it.
skip_header_lines=1)
train_input_fn = tf.contrib.timeseries.RandomWindowInputFn(
reader, batch_size=batch_size, window_size=window_size)
estimator.train(input_fn=train_input_fn, steps=train_steps)
evaluation_input_fn = tf.contrib.timeseries.WholeDatasetInputFn(reader)
evaluation = estimator.evaluate(input_fn=evaluation_input_fn, steps=1)
# Create an input_fn for prediction, with a simulated changepoint. Since all
# of the anomalies in the training data are explained by the exogenous
# feature, we should get relatively confident predictions before the indicated
# changepoint (since we are telling the model that no changepoint exists at
# those times) and relatively uncertain predictions after.
(predictions,) = tuple(estimator.predict(
input_fn=tf.contrib.timeseries.predict_continuation_input_fn(
evaluation, steps=100,
exogenous_features={
"is_changepoint": [["no"] * 49 + ["yes"] + ["no"] * 50]})))
times = evaluation["times"][0]
observed = evaluation["observed"][0, :, 0]
mean = np.squeeze(np.concatenate(
[evaluation["mean"][0], predictions["mean"]], axis=0))
variance = np.squeeze(np.concatenate(
[evaluation["covariance"][0], predictions["covariance"]], axis=0))
all_times = np.concatenate([times, predictions["times"]], axis=0)
upper_limit = mean + np.sqrt(variance)
lower_limit = mean - np.sqrt(variance)
# Indicate the locations of the changepoints for plotting vertical lines.
anomaly_locations = []
with open(csv_file_name, "r") as csv_file:
csv_reader = csv.DictReader(csv_file)
for row in csv_reader:
if row["is_changepoint"] == "yes":
anomaly_locations.append(int(row["time"]))
anomaly_locations.append(predictions["times"][49])
return (times, observed, all_times, mean, upper_limit, lower_limit,
anomaly_locations)
def make_plot(name, training_times, observed, all_times, mean,
upper_limit, lower_limit, anomaly_locations):
"""Plot the time series and anomalies in a new figure."""
pyplot.figure()
pyplot.plot(training_times, observed, "b", label="training series")
pyplot.plot(all_times, mean, "r", label="forecast")
pyplot.axvline(anomaly_locations[0], linestyle="dotted", label="changepoints")
for anomaly_location in anomaly_locations[1:]:
pyplot.axvline(anomaly_location, linestyle="dotted")
pyplot.fill_between(all_times, lower_limit, upper_limit, color="grey",
alpha="0.2")
pyplot.axvline(training_times[-1], color="k", linestyle="--")
pyplot.xlabel("time")
pyplot.ylabel("observations")
pyplot.legend(loc=0)
pyplot.title(name)
def main(unused_argv):
if not HAS_MATPLOTLIB:
raise ImportError(
"Please install matplotlib to generate a plot from this example.")
make_plot("Ignoring a known anomaly (state space)",
*train_and_evaluate_exogenous(
estimator_fn=state_space_estimator))
make_plot("Ignoring a known anomaly (autoregressive)",
*train_and_evaluate_exogenous(
estimator_fn=autoregressive_estimator, train_steps=3000))
pyplot.show()
if __name__ == "__main__":
tf.app.run(main=main)
|
apache-2.0
|
flohorovicic/pynoddy
|
pynoddy/experiment/uncertainty_analysis_bak.py
|
1
|
12689
|
import sys, os
import pynoddy
from pynoddy.experiment.monte_carlo import MonteCarlo
from pynoddy.output import NoddyOutput
import numpy as np
import math
class UncertaintyAnalysis(MonteCarlo):
"""Perform uncertainty analysis experiments for kinematic models
"""
def __init__(self, history, parameters, basename="out"):
"""Creates an experiment class for uncertainty analysis methods for kinematic models
**Arguments**:
- *history* = The .his file this experiment is based on
- *parameters* = A string pointing to a csv file defining the statistical
properties of the model properties being varied, or alternatively an array
of python dictionaries with the same function. This file/dictionary array
should have collumns/keys defining:
1) the event and parameter being varied (titled 'event' and 'parameter')
2) the statistical distribution to sample from (titled 'type' and containing either 'normal',
'vonmises' or 'uniform')
3) the distribution mean (titled 'mean') and,
4) a collumn defining the distance between the 2.5th and 97.5th percentiles
(titled '+-') OR one defining the standard deviation (titled 'stdev')
"""
# init monte carlo class
MonteCarlo.__init__(self, history, parameters, basename)
# add empty block (otherwise something breaks...)
self.block = None
def estimate_uncertainty(self, n_trials, **kwds):
"""
Samples the specified number of models, given the pdf's defined in the params file used to create this model.
**Arguments**:
- *n_trials* = The number of random draws to produce. The variation between these random draws
is used to estimate uncertainty.
**Optional Keywords**:
- *verbose* = If true, this funciton prints information to the print buffer. Default is True.
- *model_path* = The directory to write models to. Default is a local directory called 'tmp'.
- *cleanup* = True if this function should delete any models it creates (they're not needed anymore). Default
is True.
"""
vb = kwds.get('verbose', False)
model_path = kwds.get('model_path', 'tmp')
cleanup = kwds.get('cleanup', True)
# generate & load initial model
self.write_history('tmp.his')
pynoddy.compute_model('tmp.his', self.basename)
self.load_model_info()
self.load_geology()
os.remove('tmp.his')
# perform monte carlo sampling
if vb:
print("Producing model realisations...")
self.generate_model_instances(model_path, n_trials, verbose=vb, write_changes=None)
# thought: it would be more efficient (memory wise) to load models 1 at a time rather than
# dumping them all in memory....
# load results
if vb:
print("Loading models...")
models = MonteCarlo.load_noddy_realisations(model_path, verbose=vb)
self.models = models
# compute strat column
# self.determine_model_stratigraphy()
# self.n_rocktypes = len(self.model_stratigraphy)
# self.nx = models[0].nx
# self.ny = models[0].ny
# self.nz = models[0].nz
# calculate probabilities for each lithology. p_block[lithology][x][y][z] = p(lithology | x, y ,z)
self.p_block = [[[[0. for z in range(self.nz)] for y in range(self.ny)] for x in range(self.nx)] for l in
range(self.n_rocktypes)]
p1 = 1 / float(n_trials) # probability increment gained on each observation
for m in models:
# loop through voxels
for x in range(self.nx):
for y in range(self.ny):
for z in range(self.nz):
# get litho
litho = int(m.block[x][y][z]) - 1
# update litho probability
self.p_block[litho][x][y][z] += p1
# calculate entropy & store in self.e_block
self.e_block = np.ndarray((self.nx, self.ny, self.nz))
for x in range(self.nx):
for y in range(self.ny):
for z in range(self.nz):
entropy = 0 # calculate shannons information entropy
for litho in range(self.n_rocktypes):
p = self.p_block[litho][x][y][z]
# fix domain to 0 < p < 1
if p == 0:
p = 0.0000000000000001
if p >= 0.9999999999999999:
p = 0.9999999999999999
# calculate
entropy += p * math.log(p, 2) + (1 - p) * (math.log(1 - p, 2))
entropy = entropy * -1 / float(self.n_rocktypes) # divide by n
self.e_block[x][y][z] = entropy
# cleanup
if vb:
print("Cleaning up...")
if cleanup:
self.cleanup()
if vb:
print("Finished.")
def estimate_uncertainty_from_existing(self, path, **kwds):
'''
Calculates the information entropy from a set of pre-calculated models (of the same dimensions).
**Arguments**:
- *path* = The directory to load the models from. All models in this directory are loaded.
**Optional Keywords**:
- *verbose* = True if this function should write to the print buffer. Default is False.
'''
vb = kwds.get('verbose', False)
# compute strat column
self.determine_model_stratigraphy()
self.n_rocktypes = len(self.model_stratigraphy)
# compute block dimensions
blocksize = self.get_cube_size()
ex, ey, ez = self.get_extent()
self.nx = (int)(ex / blocksize)
self.ny = (int)(ey / blocksize)
self.nz = (int)(ez / blocksize)
if vb:
print("block dimensions = %d,%d,%d" % (self.nx, self.ny, self.nz))
# initialise blocks containing probability fields
self.p_block = [[[[0. for z in range(self.nz)] for y in range(self.ny)] for x in range(self.nx)] for l in
range(self.n_rocktypes)]
# loop through directory loading models & building probability fields based on this
n_models = 0 # number of models loaded
for root, dirnames, filenames in os.walk(path): # walk the directory
for f in filenames:
if ('.g12' in f): # find all lithology voxets
base = os.path.join(root, f.split('.')[0])
if vb:
print('Loading %s' % base)
# load model
m = NoddyOutput(base)
# loop through voxels and tally frequencies
for x in range(self.nx):
for y in range(self.ny):
for z in range(self.nz):
# get litho
litho = int(m.block[x][y][z]) - 1
# update litho frequency
self.p_block[litho][x][y][z] += 1
# keep track of the number of models we've loaded
n_models += 1
# convert frequency fields to probabilities & calculate information entropy
self.e_block = np.ndarray((self.nx, self.ny, self.nz))
for x in range(self.nx):
for y in range(self.ny):
for z in range(self.nz):
entropy = 0
for litho in range(self.n_rocktypes):
# convert frequency to probability
self.p_block[litho][x][y][z] = self.p_block[litho][x][y][z] / float(n_models)
# fix domain to 0 < p < 1
if self.p_block[litho][x][y][z] == 0:
self.p_block[litho][x][y][z] = 0.0000000000000001
if self.p_block[litho][x][y][z] >= 0.9999999999999999:
self.p_block[litho][x][y][z] = 0.9999999999999999
# calculate
p = self.p_block[litho][x][y][z] # shorthand
entropy += p * math.log(p, 2) + (1 - p) * (math.log(1 - p, 2))
entropy = entropy * -1 / float(self.n_rocktypes) # divide by n
self.e_block[x][y][z] = entropy
def plot_entropy(self, direction='y', position='center', **kwds):
'''
Plots the information entropy of each cell in the model. This can be used
as a proxy for uncertainty, as cells with higher entropy values have a higher
uncertainty.
**Arguments**:
- *direction* = 'x', 'y', 'z' : coordinate direction of section plot (default: 'y')
- *position* = int or 'center' : cell position of section as integer value
or identifier (default: 'center')
**Optional Keywords**:
- *ax* = matplotlib.axis : append plot to axis (default: create new plot)
- *figsize* = (x,y) : matplotlib figsize
- *colorbar* = bool : plot colorbar (default: True)
- *colorbar_orientation* = 'horizontal' or 'vertical' : orientation of colorbar
(default: 'vertical')
- *title* = string : plot title
- *savefig* = bool : save figure to file (default: show directly on screen)
- *cmap* = matplotlib.cmap : colormap (default: RdBu_r)
- *fig_filename* = string : figure filename
- *ve* = float : vertical exaggeration
- *layer_labels* = list of strings: labels for each unit in plot
'''
if 'cmap' not in kwds:
kwds['cmap'] = 'RdBu_r'
kwds['data'] = np.array(self.e_block) # specify the data we want to plot
self.plot_section(direction, position, **kwds)
def plot_probability(self, litho_ID, direction='y', position='center', **kwds):
'''
Plots the probability of observing the given lithology in space.
**Arguments**:
- *direction* = 'x', 'y', 'z' : coordinate direction of section plot (default: 'y')
- *position* = int or 'center' : cell position of section as integer value
or identifier (default: 'center')
**Optional Keywords**:
- *ax* = matplotlib.axis : append plot to axis (default: create new plot)
- *figsize* = (x,y) : matplotlib figsize
- *colorbar* = bool : plot colorbar (default: True)
- *colorbar_orientation* = 'horizontal' or 'vertical' : orientation of colorbar
(default: 'vertical')
- *title* = string : plot title
- *savefig* = bool : save figure to file (default: show directly on screen)
- *cmap* = matplotlib.cmap : colormap (default: RdBu_r)
- *fig_filename* = string : figure filename
- *ve* = float : vertical exaggeration
- *layer_labels* = list of strings: labels for each unit in plot
'''
if 'cmap' not in kwds:
kwds['cmap'] = 'RdBu_r'
kwds['data'] = np.array(self.p_block[litho_ID]) # specify the data we want to plot
self.plot_section(direction, position, **kwds)
def get_average_entropy(self):
"""
Calculates the average entropy of the model, by averaging the entropy of all the voxels.
**Returns**
- the average entropy of the model suite
"""
return np.average(self.e_block)
if __name__ == '__main__':
# setup
pynoddy.ensure_discrete_volumes = True
# setup working directory
os.chdir(r'C:\Users\Sam\Documents\Temporary Model Files\NFault')
# os.chdir("/Users/flow/git/pynoddy/sandbox")
his_file = "NFault.his"
# his_file = "simple_two_faults_no_gps.his"
params_file = "NFault_ds.csv"
# params_file = "params.csv"
outpath = 'NFault_ds'
# create new Uncertainty Analysis
ua = UncertaintyAnalysis(his_file, params_file)
# load models & estimate uncertainty
ua.estimate_uncertainty_from_existing(outpath)
# ua.estimate_uncertainty(n)
# ua.plot_probability(2)
ua.plot_entropy()
|
gpl-2.0
|
silky/sms-tools
|
lectures/05-Sinusoidal-model/plots-code/synthesis-window-2.py
|
22
|
2038
|
import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming, triang, blackmanharris
import sys, os, functools, time
from scipy.fftpack import fft, ifft, fftshift
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import dftModel as DFT
import utilFunctions as UF
(fs, x) = UF.wavread('../../../sounds/oboe-A4.wav')
M = 601
w = np.blackman(M)
N = 1024
hN = N/2
Ns = 512
hNs = Ns/2
H = Ns/4
pin = 5000
t = -70
x1 = x[pin:pin+w.size]
mX, pX = DFT.dftAnal(x1, w, N)
ploc = UF.peakDetection(mX, t)
iploc, ipmag, ipphase = UF.peakInterp(mX, pX, ploc)
freqs = iploc*fs/N
Y = UF.genSpecSines(freqs, ipmag, ipphase, Ns, fs)
mY = 20*np.log10(abs(Y[:hNs]))
pY = np.unwrap(np.angle(Y[:hNs]))
y= fftshift(ifft(Y))*sum(blackmanharris(Ns))
sw = np.zeros(Ns)
ow = triang(2*H);
sw[hNs-H:hNs+H] = ow
bh = blackmanharris(Ns)
bh = bh / sum(bh)
sw[hNs-H:hNs+H] = sw[hNs-H:hNs+H] / bh[hNs-H:hNs+H]
plt.figure(1, figsize=(9, 6))
plt.subplot(3,1,1)
plt.plot(np.arange(-hNs,hNs), y, 'b', lw=1.5)
plt.plot(np.arange(-hNs,hNs), max(y)*bh/max(bh), 'k', alpha=.5, lw=1.5)
plt.axis([-hNs, hNs,min(y),max(y)+.1])
plt.title("y; size = Ns = 512 (Blackman-Harris)")
plt.subplot(3,3,4)
plt.plot(np.arange(-hNs,hNs), bh/max(bh), 'k', alpha=.9, lw=1.5)
plt.axis([-hNs, hNs,0,1])
plt.title("Blackman-Harris")
plt.subplot(3,3,5)
plt.plot(np.arange(-hNs/2,hNs/2), ow/max(ow), 'k', alpha=.9, lw=1.5)
plt.axis([-hNs/2, hNs/2,0,1])
plt.title("triangular")
plt.subplot(3,3,6)
plt.plot(np.arange(-hNs/2,hNs/2), sw[hNs-H:hNs+H]/max(sw), 'k', alpha=.9, lw=1.5)
plt.axis([-hNs, hNs,0,1])
plt.title("triangular / Blackman-Harris")
yw = y * sw / max(sw)
plt.subplot(3,1,3)
plt.plot(np.arange(-hNs,hNs), yw, 'b', lw=1.5)
plt.plot(np.arange(-hNs/2,hNs/2), max(y)*ow/max(ow), 'k', alpha=.5, lw=1.5)
plt.axis([-hNs, hNs,min(yw),max(yw)+.1])
plt.title("yw = y * triangular / Blackman Harris; size = Ns/2 = 256")
plt.tight_layout()
plt.savefig('synthesis-window-2.png')
plt.show()
|
agpl-3.0
|
DuCorey/bokeh
|
sphinx/source/docs/user_guide/examples/extensions_example_latex.py
|
5
|
2681
|
""" The LaTex example was derived from: http://matplotlib.org/users/usetex.html
"""
import numpy as np
from bokeh.models import Label
from bokeh.plotting import figure, show
JS_CODE = """
import {Label, LabelView} from "models/annotations/label"
export class LatexLabelView extends LabelView
render: () ->
#--- Start of copied section from ``Label.render`` implementation
ctx = @plot_view.canvas_view.ctx
# Here because AngleSpec does units tranform and label doesn't support specs
switch @model.angle_units
when "rad" then angle = -1 * @model.angle
when "deg" then angle = -1 * @model.angle * Math.PI/180.0
if @model.x_units == "data"
vx = @xscale.compute(@model.x)
else
vx = @model.x
sx = @canvas.vx_to_sx(vx)
if @model.y_units == "data"
vy = @yscale.compute(@model.y)
else
vy = @model.y
sy = @canvas.vy_to_sy(vy)
if @model.panel?
panel_offset = @_get_panel_offset()
sx += panel_offset.x
sy += panel_offset.y
#--- End of copied section from ``Label.render`` implementation
# Must render as superpositioned div (not on canvas) so that KaTex
# css can properly style the text
@_css_text(ctx, "", sx + @model.x_offset, sy - @model.y_offset, angle)
# ``katex`` is loaded into the global window at runtime
# katex.renderToString returns a html ``span`` element
katex.render(@model.text, @el, {displayMode: true})
export class LatexLabel extends Label
type: 'LatexLabel'
default_view: LatexLabelView
"""
class LatexLabel(Label):
"""A subclass of the Bokeh built-in `Label` that supports rendering
LaTex using the KaTex typesetting library.
Only the render method of LabelView is overloaded to perform the
text -> latex (via katex) conversion. Note: ``render_mode="canvas``
isn't supported and certain DOM manipulation happens in the Label
superclass implementation that requires explicitly setting
`render_mode='css'`).
"""
__javascript__ = ["https://cdnjs.cloudflare.com/ajax/libs/KaTeX/0.6.0/katex.min.js"]
__css__ = ["https://cdnjs.cloudflare.com/ajax/libs/KaTeX/0.6.0/katex.min.css"]
__implementation__ = JS_CODE
x = np.arange(0.0, 1.0 + 0.01, 0.01)
y = np.cos(2*2*np.pi*x) + 2
p = figure(title="LaTex Demonstration", plot_width=500, plot_height=500)
p.line(x, y)
# Note: must set ``render_mode="css"``
latex = LatexLabel(text="f = \sum_{n=1}^\infty\\frac{-e^{i\pi}}{2^n}!",
x=35, y=445, x_units='screen', y_units='screen',
render_mode='css', text_font_size='16pt',
background_fill_color='#ffffff')
p.add_layout(latex)
show(p)
|
bsd-3-clause
|
mratsim/Arraymancer
|
tools/visualize_tensor_csv.py
|
1
|
2700
|
#!/usr/bin/env python
from __future__ import print_function
import argparse
import sys
try:
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
except ImportError:
print("Failed to import matplotlib. This tool requires matplotlib.")
try:
import pandas as pd
except ImportError:
print("Failed to import pandas. This tool requires pandas.")
def parse_args():
parser = argparse.ArgumentParser(
description="Tool to visualize tensors generated from Arraymancer's "
"Tensor.to_csv(...). It plots each given CSV file into a "
"corresponding PNG file with the same file name.")
parser.add_argument(
"file",
nargs="+",
help="CSV file(s) to plot",
)
parser.add_argument(
"-i",
dest="interactive",
help="Shows plot interactively",
action="store_true",
default=False,
)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser.parse_args()
def plot(f, args):
try:
df = pd.read_csv(f)
print("\n *** Plotting file '{}'. Tensor value stats:\n{}".format(
f, df["value"].describe())
)
except pd.parser.CParserError:
print("Failed to load file: {}".format(f))
return
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(12, 6))
# value histogram
raw_values = df["value"].values
axes[0].hist(raw_values, bins=70)
# rank specific plot: plain value plot (1D) or heatmap (2D)
tensor_rank = len(df.columns) - 1
if tensor_rank == 1:
x_values = range(len(df))
y_values = df["value"].values
axes[1].plot(x_values, y_values, "o", ms=2)
elif tensor_rank == 2:
df_pivot = df.pivot(index="dimension_1", columns="dimension_2", values="value")
im = axes[1].imshow(df_pivot, aspect="auto", interpolation="none")
divider = make_axes_locatable(axes[1])
cax = divider.append_axes("right", size="5%", pad=0.05)
fig.colorbar(im, cax=cax)
else:
axes[1].text(
0.5, 0.5,
"No visualization available for tensors of rank {}".format(tensor_rank),
horizontalalignment="center", verticalalignment="center", fontsize=10,
transform=axes[1].transAxes
)
tensor_shape = [df.iloc[:, i].max() + 1 for i in range(tensor_rank)]
fig.suptitle("{} (shape: {})".format(f, tensor_shape), fontsize=16)
if args.interactive:
plt.show()
fig.savefig(f + ".png")
plt.close(fig)
if __name__ == "__main__":
args = parse_args()
for f in args.file:
plot(f, args)
|
apache-2.0
|
arahuja/scikit-learn
|
examples/cluster/plot_color_quantization.py
|
297
|
3443
|
# -*- coding: utf-8 -*-
"""
==================================
Color Quantization using K-Means
==================================
Performs a pixel-wise Vector Quantization (VQ) of an image of the summer palace
(China), reducing the number of colors required to show the image from 96,615
unique colors to 64, while preserving the overall appearance quality.
In this example, pixels are represented in a 3D-space and K-means is used to
find 64 color clusters. In the image processing literature, the codebook
obtained from K-means (the cluster centers) is called the color palette. Using
a single byte, up to 256 colors can be addressed, whereas an RGB encoding
requires 3 bytes per pixel. The GIF file format, for example, uses such a
palette.
For comparison, a quantized image using a random codebook (colors picked up
randomly) is also shown.
"""
# Authors: Robert Layton <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
#
# License: BSD 3 clause
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.metrics import pairwise_distances_argmin
from sklearn.datasets import load_sample_image
from sklearn.utils import shuffle
from time import time
n_colors = 64
# Load the Summer Palace photo
china = load_sample_image("china.jpg")
# Convert to floats instead of the default 8 bits integer coding. Dividing by
# 255 is important so that plt.imshow behaves works well on float data (need to
# be in the range [0-1]
china = np.array(china, dtype=np.float64) / 255
# Load Image and transform to a 2D numpy array.
w, h, d = original_shape = tuple(china.shape)
assert d == 3
image_array = np.reshape(china, (w * h, d))
print("Fitting model on a small sub-sample of the data")
t0 = time()
image_array_sample = shuffle(image_array, random_state=0)[:1000]
kmeans = KMeans(n_clusters=n_colors, random_state=0).fit(image_array_sample)
print("done in %0.3fs." % (time() - t0))
# Get labels for all points
print("Predicting color indices on the full image (k-means)")
t0 = time()
labels = kmeans.predict(image_array)
print("done in %0.3fs." % (time() - t0))
codebook_random = shuffle(image_array, random_state=0)[:n_colors + 1]
print("Predicting color indices on the full image (random)")
t0 = time()
labels_random = pairwise_distances_argmin(codebook_random,
image_array,
axis=0)
print("done in %0.3fs." % (time() - t0))
def recreate_image(codebook, labels, w, h):
"""Recreate the (compressed) image from the code book & labels"""
d = codebook.shape[1]
image = np.zeros((w, h, d))
label_idx = 0
for i in range(w):
for j in range(h):
image[i][j] = codebook[labels[label_idx]]
label_idx += 1
return image
# Display all results, alongside original image
plt.figure(1)
plt.clf()
ax = plt.axes([0, 0, 1, 1])
plt.axis('off')
plt.title('Original image (96,615 colors)')
plt.imshow(china)
plt.figure(2)
plt.clf()
ax = plt.axes([0, 0, 1, 1])
plt.axis('off')
plt.title('Quantized image (64 colors, K-Means)')
plt.imshow(recreate_image(kmeans.cluster_centers_, labels, w, h))
plt.figure(3)
plt.clf()
ax = plt.axes([0, 0, 1, 1])
plt.axis('off')
plt.title('Quantized image (64 colors, Random)')
plt.imshow(recreate_image(codebook_random, labels_random, w, h))
plt.show()
|
bsd-3-clause
|
jpautom/scikit-learn
|
sklearn/gaussian_process/gaussian_process.py
|
17
|
34896
|
# -*- coding: utf-8 -*-
# Author: Vincent Dubourg <[email protected]>
# (mostly translation, see implementation details)
# Licence: BSD 3 clause
from __future__ import print_function
import numpy as np
from scipy import linalg, optimize
from ..base import BaseEstimator, RegressorMixin
from ..metrics.pairwise import manhattan_distances
from ..utils import check_random_state, check_array, check_X_y
from ..utils.validation import check_is_fitted
from . import regression_models as regression
from . import correlation_models as correlation
from ..utils import deprecated
MACHINE_EPSILON = np.finfo(np.double).eps
@deprecated("l1_cross_distances is deprecated and will be removed in 0.20.")
def l1_cross_distances(X):
"""
Computes the nonzero componentwise L1 cross-distances between the vectors
in X.
Parameters
----------
X: array_like
An array with shape (n_samples, n_features)
Returns
-------
D: array with shape (n_samples * (n_samples - 1) / 2, n_features)
The array of componentwise L1 cross-distances.
ij: arrays with shape (n_samples * (n_samples - 1) / 2, 2)
The indices i and j of the vectors in X associated to the cross-
distances in D: D[k] = np.abs(X[ij[k, 0]] - Y[ij[k, 1]]).
"""
X = check_array(X)
n_samples, n_features = X.shape
n_nonzero_cross_dist = n_samples * (n_samples - 1) // 2
ij = np.zeros((n_nonzero_cross_dist, 2), dtype=np.int)
D = np.zeros((n_nonzero_cross_dist, n_features))
ll_1 = 0
for k in range(n_samples - 1):
ll_0 = ll_1
ll_1 = ll_0 + n_samples - k - 1
ij[ll_0:ll_1, 0] = k
ij[ll_0:ll_1, 1] = np.arange(k + 1, n_samples)
D[ll_0:ll_1] = np.abs(X[k] - X[(k + 1):n_samples])
return D, ij
@deprecated("GaussianProcess is deprecated and will be removed in 0.20. "
"Use the GaussianProcessRegressor instead.")
class GaussianProcess(BaseEstimator, RegressorMixin):
"""The legacy Gaussian Process model class.
Note that this class is deprecated and will be removed in 0.20.
Use the GaussianProcessRegressor instead.
Read more in the :ref:`User Guide <gaussian_process>`.
Parameters
----------
regr : string or callable, optional
A regression function returning an array of outputs of the linear
regression functional basis. The number of observations n_samples
should be greater than the size p of this basis.
Default assumes a simple constant regression trend.
Available built-in regression models are::
'constant', 'linear', 'quadratic'
corr : string or callable, optional
A stationary autocorrelation function returning the autocorrelation
between two points x and x'.
Default assumes a squared-exponential autocorrelation model.
Built-in correlation models are::
'absolute_exponential', 'squared_exponential',
'generalized_exponential', 'cubic', 'linear'
beta0 : double array_like, optional
The regression weight vector to perform Ordinary Kriging (OK).
Default assumes Universal Kriging (UK) so that the vector beta of
regression weights is estimated using the maximum likelihood
principle.
storage_mode : string, optional
A string specifying whether the Cholesky decomposition of the
correlation matrix should be stored in the class (storage_mode =
'full') or not (storage_mode = 'light').
Default assumes storage_mode = 'full', so that the
Cholesky decomposition of the correlation matrix is stored.
This might be a useful parameter when one is not interested in the
MSE and only plan to estimate the BLUP, for which the correlation
matrix is not required.
verbose : boolean, optional
A boolean specifying the verbose level.
Default is verbose = False.
theta0 : double array_like, optional
An array with shape (n_features, ) or (1, ).
The parameters in the autocorrelation model.
If thetaL and thetaU are also specified, theta0 is considered as
the starting point for the maximum likelihood estimation of the
best set of parameters.
Default assumes isotropic autocorrelation model with theta0 = 1e-1.
thetaL : double array_like, optional
An array with shape matching theta0's.
Lower bound on the autocorrelation parameters for maximum
likelihood estimation.
Default is None, so that it skips maximum likelihood estimation and
it uses theta0.
thetaU : double array_like, optional
An array with shape matching theta0's.
Upper bound on the autocorrelation parameters for maximum
likelihood estimation.
Default is None, so that it skips maximum likelihood estimation and
it uses theta0.
normalize : boolean, optional
Input X and observations y are centered and reduced wrt
means and standard deviations estimated from the n_samples
observations provided.
Default is normalize = True so that data is normalized to ease
maximum likelihood estimation.
nugget : double or ndarray, optional
Introduce a nugget effect to allow smooth predictions from noisy
data. If nugget is an ndarray, it must be the same length as the
number of data points used for the fit.
The nugget is added to the diagonal of the assumed training covariance;
in this way it acts as a Tikhonov regularization in the problem. In
the special case of the squared exponential correlation function, the
nugget mathematically represents the variance of the input values.
Default assumes a nugget close to machine precision for the sake of
robustness (nugget = 10. * MACHINE_EPSILON).
optimizer : string, optional
A string specifying the optimization algorithm to be used.
Default uses 'fmin_cobyla' algorithm from scipy.optimize.
Available optimizers are::
'fmin_cobyla', 'Welch'
'Welch' optimizer is dued to Welch et al., see reference [WBSWM1992]_.
It consists in iterating over several one-dimensional optimizations
instead of running one single multi-dimensional optimization.
random_start : int, optional
The number of times the Maximum Likelihood Estimation should be
performed from a random starting point.
The first MLE always uses the specified starting point (theta0),
the next starting points are picked at random according to an
exponential distribution (log-uniform on [thetaL, thetaU]).
Default does not use random starting point (random_start = 1).
random_state: integer or numpy.RandomState, optional
The generator used to shuffle the sequence of coordinates of theta in
the Welch optimizer. If an integer is given, it fixes the seed.
Defaults to the global numpy random number generator.
Attributes
----------
theta_ : array
Specified theta OR the best set of autocorrelation parameters (the \
sought maximizer of the reduced likelihood function).
reduced_likelihood_function_value_ : array
The optimal reduced likelihood function value.
Examples
--------
>>> import numpy as np
>>> from sklearn.gaussian_process import GaussianProcess
>>> X = np.array([[1., 3., 5., 6., 7., 8.]]).T
>>> y = (X * np.sin(X)).ravel()
>>> gp = GaussianProcess(theta0=0.1, thetaL=.001, thetaU=1.)
>>> gp.fit(X, y) # doctest: +ELLIPSIS
GaussianProcess(beta0=None...
...
Notes
-----
The presentation implementation is based on a translation of the DACE
Matlab toolbox, see reference [NLNS2002]_.
References
----------
.. [NLNS2002] `H.B. Nielsen, S.N. Lophaven, H. B. Nielsen and J.
Sondergaard. DACE - A MATLAB Kriging Toolbox.` (2002)
http://www2.imm.dtu.dk/~hbn/dace/dace.pdf
.. [WBSWM1992] `W.J. Welch, R.J. Buck, J. Sacks, H.P. Wynn, T.J. Mitchell,
and M.D. Morris (1992). Screening, predicting, and computer
experiments. Technometrics, 34(1) 15--25.`
http://www.jstor.org/pss/1269548
"""
_regression_types = {
'constant': regression.constant,
'linear': regression.linear,
'quadratic': regression.quadratic}
_correlation_types = {
'absolute_exponential': correlation.absolute_exponential,
'squared_exponential': correlation.squared_exponential,
'generalized_exponential': correlation.generalized_exponential,
'cubic': correlation.cubic,
'linear': correlation.linear}
_optimizer_types = [
'fmin_cobyla',
'Welch']
def __init__(self, regr='constant', corr='squared_exponential', beta0=None,
storage_mode='full', verbose=False, theta0=1e-1,
thetaL=None, thetaU=None, optimizer='fmin_cobyla',
random_start=1, normalize=True,
nugget=10. * MACHINE_EPSILON, random_state=None):
self.regr = regr
self.corr = corr
self.beta0 = beta0
self.storage_mode = storage_mode
self.verbose = verbose
self.theta0 = theta0
self.thetaL = thetaL
self.thetaU = thetaU
self.normalize = normalize
self.nugget = nugget
self.optimizer = optimizer
self.random_start = random_start
self.random_state = random_state
def fit(self, X, y):
"""
The Gaussian Process model fitting method.
Parameters
----------
X : double array_like
An array with shape (n_samples, n_features) with the input at which
observations were made.
y : double array_like
An array with shape (n_samples, ) or shape (n_samples, n_targets)
with the observations of the output to be predicted.
Returns
-------
gp : self
A fitted Gaussian Process model object awaiting data to perform
predictions.
"""
# Run input checks
self._check_params()
self.random_state = check_random_state(self.random_state)
# Force data to 2D numpy.array
X, y = check_X_y(X, y, multi_output=True, y_numeric=True)
self.y_ndim_ = y.ndim
if y.ndim == 1:
y = y[:, np.newaxis]
# Check shapes of DOE & observations
n_samples, n_features = X.shape
_, n_targets = y.shape
# Run input checks
self._check_params(n_samples)
# Normalize data or don't
if self.normalize:
X_mean = np.mean(X, axis=0)
X_std = np.std(X, axis=0)
y_mean = np.mean(y, axis=0)
y_std = np.std(y, axis=0)
X_std[X_std == 0.] = 1.
y_std[y_std == 0.] = 1.
# center and scale X if necessary
X = (X - X_mean) / X_std
y = (y - y_mean) / y_std
else:
X_mean = np.zeros(1)
X_std = np.ones(1)
y_mean = np.zeros(1)
y_std = np.ones(1)
# Calculate matrix of distances D between samples
D, ij = l1_cross_distances(X)
if (np.min(np.sum(D, axis=1)) == 0.
and self.corr != correlation.pure_nugget):
raise Exception("Multiple input features cannot have the same"
" target value.")
# Regression matrix and parameters
F = self.regr(X)
n_samples_F = F.shape[0]
if F.ndim > 1:
p = F.shape[1]
else:
p = 1
if n_samples_F != n_samples:
raise Exception("Number of rows in F and X do not match. Most "
"likely something is going wrong with the "
"regression model.")
if p > n_samples_F:
raise Exception(("Ordinary least squares problem is undetermined "
"n_samples=%d must be greater than the "
"regression model size p=%d.") % (n_samples, p))
if self.beta0 is not None:
if self.beta0.shape[0] != p:
raise Exception("Shapes of beta0 and F do not match.")
# Set attributes
self.X = X
self.y = y
self.D = D
self.ij = ij
self.F = F
self.X_mean, self.X_std = X_mean, X_std
self.y_mean, self.y_std = y_mean, y_std
# Determine Gaussian Process model parameters
if self.thetaL is not None and self.thetaU is not None:
# Maximum Likelihood Estimation of the parameters
if self.verbose:
print("Performing Maximum Likelihood Estimation of the "
"autocorrelation parameters...")
self.theta_, self.reduced_likelihood_function_value_, par = \
self._arg_max_reduced_likelihood_function()
if np.isinf(self.reduced_likelihood_function_value_):
raise Exception("Bad parameter region. "
"Try increasing upper bound")
else:
# Given parameters
if self.verbose:
print("Given autocorrelation parameters. "
"Computing Gaussian Process model parameters...")
self.theta_ = self.theta0
self.reduced_likelihood_function_value_, par = \
self.reduced_likelihood_function()
if np.isinf(self.reduced_likelihood_function_value_):
raise Exception("Bad point. Try increasing theta0.")
self.beta = par['beta']
self.gamma = par['gamma']
self.sigma2 = par['sigma2']
self.C = par['C']
self.Ft = par['Ft']
self.G = par['G']
if self.storage_mode == 'light':
# Delete heavy data (it will be computed again if required)
# (it is required only when MSE is wanted in self.predict)
if self.verbose:
print("Light storage mode specified. "
"Flushing autocorrelation matrix...")
self.D = None
self.ij = None
self.F = None
self.C = None
self.Ft = None
self.G = None
return self
def predict(self, X, eval_MSE=False, batch_size=None):
"""
This function evaluates the Gaussian Process model at x.
Parameters
----------
X : array_like
An array with shape (n_eval, n_features) giving the point(s) at
which the prediction(s) should be made.
eval_MSE : boolean, optional
A boolean specifying whether the Mean Squared Error should be
evaluated or not.
Default assumes evalMSE = False and evaluates only the BLUP (mean
prediction).
batch_size : integer, optional
An integer giving the maximum number of points that can be
evaluated simultaneously (depending on the available memory).
Default is None so that all given points are evaluated at the same
time.
Returns
-------
y : array_like, shape (n_samples, ) or (n_samples, n_targets)
An array with shape (n_eval, ) if the Gaussian Process was trained
on an array of shape (n_samples, ) or an array with shape
(n_eval, n_targets) if the Gaussian Process was trained on an array
of shape (n_samples, n_targets) with the Best Linear Unbiased
Prediction at x.
MSE : array_like, optional (if eval_MSE == True)
An array with shape (n_eval, ) or (n_eval, n_targets) as with y,
with the Mean Squared Error at x.
"""
check_is_fitted(self, "X")
# Check input shapes
X = check_array(X)
n_eval, _ = X.shape
n_samples, n_features = self.X.shape
n_samples_y, n_targets = self.y.shape
# Run input checks
self._check_params(n_samples)
if X.shape[1] != n_features:
raise ValueError(("The number of features in X (X.shape[1] = %d) "
"should match the number of features used "
"for fit() "
"which is %d.") % (X.shape[1], n_features))
if batch_size is None:
# No memory management
# (evaluates all given points in a single batch run)
# Normalize input
X = (X - self.X_mean) / self.X_std
# Initialize output
y = np.zeros(n_eval)
if eval_MSE:
MSE = np.zeros(n_eval)
# Get pairwise componentwise L1-distances to the input training set
dx = manhattan_distances(X, Y=self.X, sum_over_features=False)
# Get regression function and correlation
f = self.regr(X)
r = self.corr(self.theta_, dx).reshape(n_eval, n_samples)
# Scaled predictor
y_ = np.dot(f, self.beta) + np.dot(r, self.gamma)
# Predictor
y = (self.y_mean + self.y_std * y_).reshape(n_eval, n_targets)
if self.y_ndim_ == 1:
y = y.ravel()
# Mean Squared Error
if eval_MSE:
C = self.C
if C is None:
# Light storage mode (need to recompute C, F, Ft and G)
if self.verbose:
print("This GaussianProcess used 'light' storage mode "
"at instantiation. Need to recompute "
"autocorrelation matrix...")
reduced_likelihood_function_value, par = \
self.reduced_likelihood_function()
self.C = par['C']
self.Ft = par['Ft']
self.G = par['G']
rt = linalg.solve_triangular(self.C, r.T, lower=True)
if self.beta0 is None:
# Universal Kriging
u = linalg.solve_triangular(self.G.T,
np.dot(self.Ft.T, rt) - f.T,
lower=True)
else:
# Ordinary Kriging
u = np.zeros((n_targets, n_eval))
MSE = np.dot(self.sigma2.reshape(n_targets, 1),
(1. - (rt ** 2.).sum(axis=0)
+ (u ** 2.).sum(axis=0))[np.newaxis, :])
MSE = np.sqrt((MSE ** 2.).sum(axis=0) / n_targets)
# Mean Squared Error might be slightly negative depending on
# machine precision: force to zero!
MSE[MSE < 0.] = 0.
if self.y_ndim_ == 1:
MSE = MSE.ravel()
return y, MSE
else:
return y
else:
# Memory management
if type(batch_size) is not int or batch_size <= 0:
raise Exception("batch_size must be a positive integer")
if eval_MSE:
y, MSE = np.zeros(n_eval), np.zeros(n_eval)
for k in range(max(1, n_eval / batch_size)):
batch_from = k * batch_size
batch_to = min([(k + 1) * batch_size + 1, n_eval + 1])
y[batch_from:batch_to], MSE[batch_from:batch_to] = \
self.predict(X[batch_from:batch_to],
eval_MSE=eval_MSE, batch_size=None)
return y, MSE
else:
y = np.zeros(n_eval)
for k in range(max(1, n_eval / batch_size)):
batch_from = k * batch_size
batch_to = min([(k + 1) * batch_size + 1, n_eval + 1])
y[batch_from:batch_to] = \
self.predict(X[batch_from:batch_to],
eval_MSE=eval_MSE, batch_size=None)
return y
def reduced_likelihood_function(self, theta=None):
"""
This function determines the BLUP parameters and evaluates the reduced
likelihood function for the given autocorrelation parameters theta.
Maximizing this function wrt the autocorrelation parameters theta is
equivalent to maximizing the likelihood of the assumed joint Gaussian
distribution of the observations y evaluated onto the design of
experiments X.
Parameters
----------
theta : array_like, optional
An array containing the autocorrelation parameters at which the
Gaussian Process model parameters should be determined.
Default uses the built-in autocorrelation parameters
(ie ``theta = self.theta_``).
Returns
-------
reduced_likelihood_function_value : double
The value of the reduced likelihood function associated to the
given autocorrelation parameters theta.
par : dict
A dictionary containing the requested Gaussian Process model
parameters:
sigma2
Gaussian Process variance.
beta
Generalized least-squares regression weights for
Universal Kriging or given beta0 for Ordinary
Kriging.
gamma
Gaussian Process weights.
C
Cholesky decomposition of the correlation matrix [R].
Ft
Solution of the linear equation system : [R] x Ft = F
G
QR decomposition of the matrix Ft.
"""
check_is_fitted(self, "X")
if theta is None:
# Use built-in autocorrelation parameters
theta = self.theta_
# Initialize output
reduced_likelihood_function_value = - np.inf
par = {}
# Retrieve data
n_samples = self.X.shape[0]
D = self.D
ij = self.ij
F = self.F
if D is None:
# Light storage mode (need to recompute D, ij and F)
D, ij = l1_cross_distances(self.X)
if (np.min(np.sum(D, axis=1)) == 0.
and self.corr != correlation.pure_nugget):
raise Exception("Multiple X are not allowed")
F = self.regr(self.X)
# Set up R
r = self.corr(theta, D)
R = np.eye(n_samples) * (1. + self.nugget)
R[ij[:, 0], ij[:, 1]] = r
R[ij[:, 1], ij[:, 0]] = r
# Cholesky decomposition of R
try:
C = linalg.cholesky(R, lower=True)
except linalg.LinAlgError:
return reduced_likelihood_function_value, par
# Get generalized least squares solution
Ft = linalg.solve_triangular(C, F, lower=True)
try:
Q, G = linalg.qr(Ft, econ=True)
except:
#/usr/lib/python2.6/dist-packages/scipy/linalg/decomp.py:1177:
# DeprecationWarning: qr econ argument will be removed after scipy
# 0.7. The economy transform will then be available through the
# mode='economic' argument.
Q, G = linalg.qr(Ft, mode='economic')
sv = linalg.svd(G, compute_uv=False)
rcondG = sv[-1] / sv[0]
if rcondG < 1e-10:
# Check F
sv = linalg.svd(F, compute_uv=False)
condF = sv[0] / sv[-1]
if condF > 1e15:
raise Exception("F is too ill conditioned. Poor combination "
"of regression model and observations.")
else:
# Ft is too ill conditioned, get out (try different theta)
return reduced_likelihood_function_value, par
Yt = linalg.solve_triangular(C, self.y, lower=True)
if self.beta0 is None:
# Universal Kriging
beta = linalg.solve_triangular(G, np.dot(Q.T, Yt))
else:
# Ordinary Kriging
beta = np.array(self.beta0)
rho = Yt - np.dot(Ft, beta)
sigma2 = (rho ** 2.).sum(axis=0) / n_samples
# The determinant of R is equal to the squared product of the diagonal
# elements of its Cholesky decomposition C
detR = (np.diag(C) ** (2. / n_samples)).prod()
# Compute/Organize output
reduced_likelihood_function_value = - sigma2.sum() * detR
par['sigma2'] = sigma2 * self.y_std ** 2.
par['beta'] = beta
par['gamma'] = linalg.solve_triangular(C.T, rho)
par['C'] = C
par['Ft'] = Ft
par['G'] = G
return reduced_likelihood_function_value, par
def _arg_max_reduced_likelihood_function(self):
"""
This function estimates the autocorrelation parameters theta as the
maximizer of the reduced likelihood function.
(Minimization of the opposite reduced likelihood function is used for
convenience)
Parameters
----------
self : All parameters are stored in the Gaussian Process model object.
Returns
-------
optimal_theta : array_like
The best set of autocorrelation parameters (the sought maximizer of
the reduced likelihood function).
optimal_reduced_likelihood_function_value : double
The optimal reduced likelihood function value.
optimal_par : dict
The BLUP parameters associated to thetaOpt.
"""
# Initialize output
best_optimal_theta = []
best_optimal_rlf_value = []
best_optimal_par = []
if self.verbose:
print("The chosen optimizer is: " + str(self.optimizer))
if self.random_start > 1:
print(str(self.random_start) + " random starts are required.")
percent_completed = 0.
# Force optimizer to fmin_cobyla if the model is meant to be isotropic
if self.optimizer == 'Welch' and self.theta0.size == 1:
self.optimizer = 'fmin_cobyla'
if self.optimizer == 'fmin_cobyla':
def minus_reduced_likelihood_function(log10t):
return - self.reduced_likelihood_function(
theta=10. ** log10t)[0]
constraints = []
for i in range(self.theta0.size):
constraints.append(lambda log10t, i=i:
log10t[i] - np.log10(self.thetaL[0, i]))
constraints.append(lambda log10t, i=i:
np.log10(self.thetaU[0, i]) - log10t[i])
for k in range(self.random_start):
if k == 0:
# Use specified starting point as first guess
theta0 = self.theta0
else:
# Generate a random starting point log10-uniformly
# distributed between bounds
log10theta0 = (np.log10(self.thetaL)
+ self.random_state.rand(*self.theta0.shape)
* np.log10(self.thetaU / self.thetaL))
theta0 = 10. ** log10theta0
# Run Cobyla
try:
log10_optimal_theta = \
optimize.fmin_cobyla(minus_reduced_likelihood_function,
np.log10(theta0).ravel(), constraints,
iprint=0)
except ValueError as ve:
print("Optimization failed. Try increasing the ``nugget``")
raise ve
optimal_theta = 10. ** log10_optimal_theta
optimal_rlf_value, optimal_par = \
self.reduced_likelihood_function(theta=optimal_theta)
# Compare the new optimizer to the best previous one
if k > 0:
if optimal_rlf_value > best_optimal_rlf_value:
best_optimal_rlf_value = optimal_rlf_value
best_optimal_par = optimal_par
best_optimal_theta = optimal_theta
else:
best_optimal_rlf_value = optimal_rlf_value
best_optimal_par = optimal_par
best_optimal_theta = optimal_theta
if self.verbose and self.random_start > 1:
if (20 * k) / self.random_start > percent_completed:
percent_completed = (20 * k) / self.random_start
print("%s completed" % (5 * percent_completed))
optimal_rlf_value = best_optimal_rlf_value
optimal_par = best_optimal_par
optimal_theta = best_optimal_theta
elif self.optimizer == 'Welch':
# Backup of the given atrributes
theta0, thetaL, thetaU = self.theta0, self.thetaL, self.thetaU
corr = self.corr
verbose = self.verbose
# This will iterate over fmin_cobyla optimizer
self.optimizer = 'fmin_cobyla'
self.verbose = False
# Initialize under isotropy assumption
if verbose:
print("Initialize under isotropy assumption...")
self.theta0 = check_array(self.theta0.min())
self.thetaL = check_array(self.thetaL.min())
self.thetaU = check_array(self.thetaU.max())
theta_iso, optimal_rlf_value_iso, par_iso = \
self._arg_max_reduced_likelihood_function()
optimal_theta = theta_iso + np.zeros(theta0.shape)
# Iterate over all dimensions of theta allowing for anisotropy
if verbose:
print("Now improving allowing for anisotropy...")
for i in self.random_state.permutation(theta0.size):
if verbose:
print("Proceeding along dimension %d..." % (i + 1))
self.theta0 = check_array(theta_iso)
self.thetaL = check_array(thetaL[0, i])
self.thetaU = check_array(thetaU[0, i])
def corr_cut(t, d):
return corr(check_array(np.hstack([optimal_theta[0][0:i],
t[0],
optimal_theta[0][(i +
1)::]])),
d)
self.corr = corr_cut
optimal_theta[0, i], optimal_rlf_value, optimal_par = \
self._arg_max_reduced_likelihood_function()
# Restore the given atrributes
self.theta0, self.thetaL, self.thetaU = theta0, thetaL, thetaU
self.corr = corr
self.optimizer = 'Welch'
self.verbose = verbose
else:
raise NotImplementedError("This optimizer ('%s') is not "
"implemented yet. Please contribute!"
% self.optimizer)
return optimal_theta, optimal_rlf_value, optimal_par
def _check_params(self, n_samples=None):
# Check regression model
if not callable(self.regr):
if self.regr in self._regression_types:
self.regr = self._regression_types[self.regr]
else:
raise ValueError("regr should be one of %s or callable, "
"%s was given."
% (self._regression_types.keys(), self.regr))
# Check regression weights if given (Ordinary Kriging)
if self.beta0 is not None:
self.beta0 = np.atleast_2d(self.beta0)
if self.beta0.shape[1] != 1:
# Force to column vector
self.beta0 = self.beta0.T
# Check correlation model
if not callable(self.corr):
if self.corr in self._correlation_types:
self.corr = self._correlation_types[self.corr]
else:
raise ValueError("corr should be one of %s or callable, "
"%s was given."
% (self._correlation_types.keys(), self.corr))
# Check storage mode
if self.storage_mode != 'full' and self.storage_mode != 'light':
raise ValueError("Storage mode should either be 'full' or "
"'light', %s was given." % self.storage_mode)
# Check correlation parameters
self.theta0 = np.atleast_2d(self.theta0)
lth = self.theta0.size
if self.thetaL is not None and self.thetaU is not None:
self.thetaL = np.atleast_2d(self.thetaL)
self.thetaU = np.atleast_2d(self.thetaU)
if self.thetaL.size != lth or self.thetaU.size != lth:
raise ValueError("theta0, thetaL and thetaU must have the "
"same length.")
if np.any(self.thetaL <= 0) or np.any(self.thetaU < self.thetaL):
raise ValueError("The bounds must satisfy O < thetaL <= "
"thetaU.")
elif self.thetaL is None and self.thetaU is None:
if np.any(self.theta0 <= 0):
raise ValueError("theta0 must be strictly positive.")
elif self.thetaL is None or self.thetaU is None:
raise ValueError("thetaL and thetaU should either be both or "
"neither specified.")
# Force verbose type to bool
self.verbose = bool(self.verbose)
# Force normalize type to bool
self.normalize = bool(self.normalize)
# Check nugget value
self.nugget = np.asarray(self.nugget)
if np.any(self.nugget) < 0.:
raise ValueError("nugget must be positive or zero.")
if (n_samples is not None
and self.nugget.shape not in [(), (n_samples,)]):
raise ValueError("nugget must be either a scalar "
"or array of length n_samples.")
# Check optimizer
if self.optimizer not in self._optimizer_types:
raise ValueError("optimizer should be one of %s"
% self._optimizer_types)
# Force random_start type to int
self.random_start = int(self.random_start)
|
bsd-3-clause
|
supernifty/reference-bias
|
bin/draw_matrix.py
|
1
|
2392
|
#!/usr/bin/env python
#
# generates a chart illustrating the bias as generated from the calculate_bias_matrix command
# also requires a mapping from donor to the label to display on the chart
# usage
# python draw_matrix.py genome_map < matrix.out > chart.pdf
# genome_map has the format:
# donor_filename,label,extra
import sys
import matplotlib.pyplot as plt
import numpy as np
import pylab
from matplotlib import rcParams
rcParams.update({'figure.autolayout': True})
#pylab.rcParams['figure.figsize'] = 8, 6
pylab.rcParams['figure.figsize'] = 10, 8
def genome_sort( gmap ):
def sorter( x ):
return gmap[x]
return sorter
def draw_matrix( in_fh, genome_map, out_fh ):
gmap = {}
for line in genome_map:
x, y, _ = [ z.strip() for z in line.split(',') ]
gmap[x] = y
genomes = set()
data = {}
first = True
for line in in_fh:
sys.stderr.write( line )
if first: # header
first = False
continue
fields = line.strip().split()
donor = fields[0]
reference = fields[1]
bias = float( fields[4] )
if donor not in genomes:
genomes.add( donor )
if reference not in genomes:
genomes.add( reference )
data[ '{0},{1}'.format( donor, reference ) ] = bias
genome_list = sorted( list( genomes ), key=genome_sort( gmap ) )
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_aspect(0.5)
ax.set_ylabel('Donor')
ax.set_xlabel('Reference')
data_array = np.empty([len( genome_list ), len( genome_list )])
for xi, xv in enumerate( genome_list ):
for yi, yv in enumerate( genome_list ):
data_array[yi][xi] = data[ '{0},{1}'.format( yv, xv ) ]
ax.annotate(str(data_array[yi][xi]), xy=(xi, yi), horizontalalignment='center', verticalalignment='center')
res = ax.imshow(data_array, interpolation='nearest', cmap=plt.cm.RdYlGn_r)
mapped_genome_list = [ gmap[x] for x in genome_list ]
sys.stderr.write( '{0}\n'.format( mapped_genome_list ) )
ax.set_xticks(np.arange(len(mapped_genome_list)), minor=False)
ax.set_yticks(np.arange(len(mapped_genome_list)), minor=False)
ax.set_xticklabels( mapped_genome_list, rotation=45, horizontalalignment='right' )
ax.set_yticklabels( mapped_genome_list )
cb = fig.colorbar(res, label='% loss')
fig.savefig( out_fh, format='pdf', dpi=1000)
if __name__ == '__main__':
draw_matrix( sys.stdin, open( sys.argv[1], 'r' ), sys.stdout )
|
apache-2.0
|
malariagen/agam-report-base
|
src/python/ag1k/phase1_ar3.py
|
1
|
5818
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, division
import os
import pyfasta
import allel
import seaborn as sns
import petl as etl
import h5py
import pandas
title = 'Phase 1 AR3 release'
pop_ids = 'AOM', 'BFM', 'GWA', 'GNS', 'BFS', 'CMS', 'GAS', 'UGS', 'KES'
pop_labels = {
'AOM': 'AO $coluzzii$',
'BFM': 'BF $coluzzii$',
'GWA': 'GW',
'GNS': 'GN $gambiae$',
'BFS': 'BF $gambiae$',
'CMS': 'CM $gambiae$',
'UGS': 'UG $gambiae$',
'GAS': 'GA $gambiae$',
'KES': 'KE',
'colony': 'colony',
}
pop_colors = {
'AOM': sns.color_palette('YlOrBr', 5)[4],
'BFM': sns.color_palette('Reds', 3)[1],
'GWA': sns.color_palette('YlOrBr', 5)[1],
'GNS': sns.color_palette('Blues', 3)[0],
'BFS': sns.color_palette('Blues', 3)[1],
'CMS': sns.color_palette('Blues', 3)[2],
'UGS': sns.color_palette('Greens', 2)[0],
'GAS': sns.color_palette('Greens', 2)[1],
'KES': sns.color_palette('Greys', 5)[2],
'colony': sns.color_palette('Greys', 5)[-1]
}
# convert to hex notation for ease of use elsewhere
for p in pop_colors:
h = '#%02x%02x%02x' % tuple(int(255*c) for c in pop_colors[p])
# chromatin
_data_chromatin = b"""CHX chro X 20009764 24393108
CH2R chro 2R 58984778 61545105
CH2L chro 2L 1 2431617
PEU2L chro 2L 2487770 5042389
IH2L chro 2L 5078962 5788875
IH3R chro 3R 38988757 41860198
CH3R chro 3R 52161877 53200684
CH3L chro 3L 1 1815119
PEU3L chro 3L 1896830 4235209
IH3L chro 3L 4264713 5031692
"""
tbl_chromatin = (
etl
.fromtext(etl.MemorySource(_data_chromatin))
.split('lines', '\s+', ['name', 'type', 'chrom', 'start', 'stop'])
.convert(('start', 'stop'), int)
.cutout('type')
)
# genome regions
region_X_speciation = 'X-speciation', 'X', 15000000, 24000000
region_X_free = 'X-free', 'X', 1, 14000000
region_3L_free = '3L-free', '3L', 15000000, 41000000
region_3R_free = '3R-free', '3R', 1, 37000000
# noinspection PyGlobalUndefined
def init(release_dir, load_geneset=False):
"""Initialise data resources.
Parameters
----------
release_dir : string
Local filesystem path where data from the release are stored.
load_geneset : string
If True, load geneset into memory.
"""
# reference sequence
####################
global genome_fn, genome
genome_dir = os.path.join(release_dir, 'genome')
genome_fn = os.path.join(genome_dir, 'Anopheles-gambiae-PEST_CHROMOSOMES_AgamP3.fa')
if os.path.exists(genome_fn):
genome = pyfasta.Fasta(genome_fn)
# genome annotations
####################
global geneset_agamp42_fn, geneset_agamp42
geneset_dir = os.path.join(release_dir, 'geneset')
geneset_agamp42_fn = os.path.join(
geneset_dir,
'Anopheles-gambiae-PEST_BASEFEATURES_AgamP4.2.sorted.gff3.gz')
if os.path.exists(geneset_agamp42_fn) and load_geneset:
geneset_agamp42 = allel.FeatureTable.from_gff3(geneset_agamp42_fn)
# variant callsets
##################
global callset, callset_pass
variation_dir = os.path.join(release_dir, 'variation')
# main callset
callset_h5_fn = os.path.join(variation_dir, 'main', 'hdf5', 'ag1000g.phase1.ar3.h5')
if os.path.exists(callset_h5_fn):
callset = h5py.File(callset_h5_fn, mode='r')
# main callset, PASS variants only
callset_pass_h5_fn = os.path.join(variation_dir, 'main', 'hdf5', 'ag1000g.phase1.ar3.pass.h5')
if os.path.exists(callset_pass_h5_fn):
callset_pass = h5py.File(callset_pass_h5_fn, mode='r')
# accessibility
###############
global accessibility
accessibility_dir = os.path.join(release_dir, 'accessibility')
accessibility_fn = os.path.join(accessibility_dir, 'accessibility.h5')
if os.path.exists(accessibility_fn):
accessibility = h5py.File(accessibility_fn, mode='r')
# sample metadata
#################
global samples_fn, tbl_samples, lkp_samples, sample_ids, df_samples
samples_dir = os.path.join(release_dir, 'samples')
samples_fn = os.path.join(samples_dir, 'samples.all.txt')
if os.path.exists(samples_fn):
tbl_samples = (
etl
.fromtsv(samples_fn)
.convert(('index', 'year', 'n_sequences', 'kt_2la', 'kt_2rb'), int)
.convert(('mean_coverage', 'latitude', 'longitude') + tuple(range(20, 36)), float)
)
lkp_samples = tbl_samples.recordlookupone('ox_code')
sample_ids = tbl_samples.values('ox_code').list()
df_samples = pandas.read_csv(samples_fn, sep='\t', index_col='index')
# extras
########
global allele_counts, allele_counts_gq10, outgroup_alleles, outgroup_allele_counts, \
outgroup_species
extras_dir = os.path.join(release_dir, 'extras')
# allele counts
allele_counts_fn = os.path.join(extras_dir, 'allele_counts.h5')
if os.path.exists(allele_counts_fn):
allele_counts = h5py.File(allele_counts_fn, mode='r')
allele_counts_gq10_fn = os.path.join(extras_dir, 'allele_counts.gq10.h5')
if os.path.exists(allele_counts_gq10_fn):
allele_counts_gq10 = h5py.File(allele_counts_gq10_fn, mode='r')
# outgroup data
outgroup_species = 'arab', 'meru', 'mela', 'quad', 'epir', 'chri'
outgroup_alleles_fn = os.path.join(extras_dir, 'outgroup_alleles.h5')
if os.path.exists(outgroup_alleles_fn):
outgroup_alleles = h5py.File(outgroup_alleles_fn, mode='r')
outgroup_allele_counts_fn = os.path.join(extras_dir, 'outgroup_allele_counts.h5')
if os.path.exists(outgroup_allele_counts_fn):
outgroup_allele_counts = h5py.File(outgroup_allele_counts_fn, mode='r')
|
mit
|
iABC2XYZ/abc
|
DM/BTL/T1.py
|
1
|
4591
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 11 16:51:40 2017
Author: Peiyong Jiang : [email protected]
Function:
"""
import matplotlib.pyplot as plt
#import tensorflow as tf
import numpy as np
#from basicTF import Quad4D, Drift4D,TwissO4D
plt.close('all')
def MapQuad(K,L):
K2=np.sqrt(np.abs(K*1.))
K2_L=K2*L
if K>0:
C=np.cos(K2_L)
S=np.sin(K2_L)/K2
Sp=-np.sin(K2_L)*K2
else:
C=np.cosh(K2_L)
S=np.sinh(K2_L)/K2
Sp=np.sinh(K2_L)*K2
M=np.array([[C,S],[Sp,C]])
return M
def MapDrift(L):
M=np.array([[1.,L],[0.,1.]])
return M
def RandSigma(betaMax):
betaT=np.random.random()*betaMax
alphaT=np.random.random()*np.sign(np.random.random()-0.5)
gammaT=(1.+alphaT**2)/betaT
sigmaT=np.array([[betaT,-alphaT],[-alphaT,gammaT]])
return sigmaT
def RandLatticeBPL(numQuad):
quadL=np.random.random([numQuad])
quadK=np.random.random([numQuad])-0.5
driftL=np.random.random([numQuad+1])
return quadL,quadK,driftL
def CalBTL(numSample,sigmaTx,sigmaTy,quadL,quadK,driftL):
lenBTL=np.sum(quadL)+np.sum(driftL)
nCell=len(quadK)
kStart=np.zeros([nCell])
kEnd=np.zeros([nCell])
for iCell in range(nCell):
if iCell==0:
kStart[0]=driftL[0]
kEnd[0]=kStart[0]+quadL[0]
continue
kStart[iCell]=kEnd[iCell-1]+driftL[iCell]
kEnd[iCell]=kStart[iCell]+quadL[iCell]
Z=np.linspace(0.,lenBTL,numSample)
K=np.zeros([numSample])
for iCell in range(nCell):
K[(Z>=kStart[iCell]) * (Z<=kEnd[iCell])]=quadK[iCell]
dL=lenBTL/(numSample-1.)
betaX=np.zeros([numSample])
betaY=np.zeros([numSample])
for iL in range(numSample):
if iL==0:
betaX[0]=sigmaTx[0,0]
betaY[0]=sigmaTy[0,0]
kLocal=K[iL]
if np.abs(kLocal)<1e-6:
Mx=MapDrift(dL)
My=Mx
else:
Mx=MapQuad(kLocal,dL)
My=MapQuad(-kLocal,dL)
sigmaTx=np.matmul(np.matmul(Mx,sigmaTx),Mx.T)
sigmaTy=np.matmul(np.matmul(My,sigmaTy),My.T)
betaX[iL]=sigmaTx[0,0]
betaY[iL]=sigmaTy[0,0]
return Z,betaX,betaY
def RandItemSingle(numSample,numQuadHigh):
numQuad=np.random.randint(0,high=numQuadHigh)
flagEle=np.zeros([numSample])
flagEle[0:numQuad+1:2]=1 # D
flagEle[1:numQuad:2]=4 # Q
quadL,quadK,driftL=RandLatticeBPL(numQuad)
betaMax=100.
sigmaTx=RandSigma(betaMax)
sigmaTy=RandSigma(betaMax)
Z,betaX,betaY=CalBTL(numSample,sigmaTx,sigmaTy,quadL,quadK,driftL)
dataLattice=np.zeros([numQuadHigh,3])
dataBeam=np.zeros([numSample,4])
dataLattice[0:numQuad+1,0]=driftL
dataLattice[0:numQuad,1]=quadK
dataLattice[0:numQuad,2]=quadL
dataBeam[:,0]=Z
dataBeam[:,1]=betaX
dataBeam[:,2]=betaY
dataBeam[:,3]=flagEle
return dataLattice,dataBeam
def RandItemMulti(numItem,numSample,numQuadHigh):
dataLattice=np.zeros([numItem,numQuadHigh,3])
dataBeam=np.zeros([numItem,numSample,4])
for iItem in range(numItem):
dataLatticeSingle,dataBeamSingle=RandItemSingle(numSample,numQuadHigh)
dataLattice[iItem,:,:]=dataLatticeSingle
dataBeam[iItem,:,:]=dataBeamSingle
return dataLattice,dataBeam
numItem=10
numSample=1024
numQuadHigh=20
dataLattice,dataBeam=RandItemMulti(numItem,numSample,numQuadHigh)
for iItem in range(numItem):
driftL=dataLattice[iItem,:,0]
quadK=dataLattice[iItem,:,1]
quadL=dataLattice[iItem,:,2]
Z=dataBeam[iItem,:,0]
betaX=dataBeam[iItem,:,1]
betaY=dataBeam[iItem,:,2]
flagEle=dataBeam[iItem,:,3]
print('quadL------------------',len(quadL))
print(quadL)
print('quadK------------------',len(quadK))
print(quadK)
print('driftL------------------',len(driftL))
print(driftL)
print('Z------------------',len(Z))
print(Z)
print('betaX------------------',len(betaX))
print(betaX)
print('betaY------------------',len(betaY))
print(betaY)
plt.figure(1)
plt.clf()
plt.plot(quadL,'b')
plt.hold
plt.plot(quadK,'r')
plt.plot(driftL,'g')
plt.plot(flagEle,'k-*')
plt.axis([-1,numQuadHigh+1,-1,4])
plt.show()
plt.pause(0.5)
plt.figure(2)
plt.clf()
plt.subplot(121)
plt.plot(Z,betaX,'b')
plt.subplot(122)
plt.plot(Z,betaY,'r')
plt.show()
plt.pause(0.5)
|
gpl-3.0
|
RomainBrault/scikit-learn
|
doc/tutorial/text_analytics/solutions/exercise_01_language_train_model.py
|
73
|
2264
|
"""Build a language detector model
The goal of this exercise is to train a linear classifier on text features
that represent sequences of up to 3 consecutive characters so as to be
recognize natural languages by using the frequencies of short character
sequences as 'fingerprints'.
"""
# Author: Olivier Grisel <[email protected]>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
from sklearn.datasets import load_files
from sklearn.model_selection import train_test_split
from sklearn import metrics
# The training data folder must be passed as first argument
languages_data_folder = sys.argv[1]
dataset = load_files(languages_data_folder)
# Split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.5)
# TASK: Build a vectorizer that splits strings into sequence of 1 to 3
# characters instead of word tokens
vectorizer = TfidfVectorizer(ngram_range=(1, 3), analyzer='char',
use_idf=False)
# TASK: Build a vectorizer / classifier pipeline using the previous analyzer
# the pipeline instance should stored in a variable named clf
clf = Pipeline([
('vec', vectorizer),
('clf', Perceptron()),
])
# TASK: Fit the pipeline on the training set
clf.fit(docs_train, y_train)
# TASK: Predict the outcome on the testing set in a variable named y_predicted
y_predicted = clf.predict(docs_test)
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
#import matlotlib.pyplot as plt
#plt.matshow(cm, cmap=plt.cm.jet)
#plt.show()
# Predict the result on some short new sentences:
sentences = [
u'This is a language detection test.',
u'Ceci est un test de d\xe9tection de la langue.',
u'Dies ist ein Test, um die Sprache zu erkennen.',
]
predicted = clf.predict(sentences)
for s, p in zip(sentences, predicted):
print(u'The language of "%s" is "%s"' % (s, dataset.target_names[p]))
|
bsd-3-clause
|
zehpunktbarron/iOSMAnalyzer
|
scripts/c5_poi_tags.py
|
1
|
7057
|
# -*- coding: utf-8 -*-
#!/usr/bin/python2.7
#description :This file creates a plot: Calculates the development if the avarage tag-number of all POIs
#author :Christopher Barron @ http://giscience.uni-hd.de/
#date :19.01.2013
#version :0.1
#usage :python pyscript.py
#==============================================================================
import psycopg2
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import numpy as np
import pylab
# import db connection parameters
import db_conn_para as db
###
### Connect to database with psycopg2. Add arguments from parser to the connection-string
###
try:
conn_string="dbname= %s user= %s host= %s password= %s" %(db.g_my_dbname, db.g_my_username, db.g_my_hostname, db.g_my_dbpassword)
print "Connecting to database\n->%s" % (conn_string)
# Verbindung mit der DB mittels psycopg2 herstellen
conn = psycopg2.connect(conn_string)
print "Connection to database was established succesfully"
except:
print "Connection to database failed"
###
### Execute SQL query
###
# Mit dieser neuen "cursor Methode" koennen SQL-Abfragen abgefeuert werden
cur = conn.cursor()
# Execute SQL query. For more than one row use three '"'
try:
cur.execute("""
-- Amount of Tags of each POI
SELECT generate_series,
ROUND(coalesce((SELECT
avg(count)
FROM
(SELECT
id,
count(skeys)
FROM
(SELECT
id,
skeys(tags),
tags
FROM
hist_plp h
WHERE
-- POI-Tags & Keys
-- POI-Tags & Keys
(
-- accomodation & gastronomy
((tags->'amenity') = 'bar') OR
((tags->'amenity') = 'bbq') OR
((tags->'amenity') = 'biergarten') OR
((tags->'amenity') = 'cafe') OR
((tags->'amenity') = 'drinking_water') OR
((tags->'amenity') = 'fast_food') OR
((tags->'amenity') = 'food_court') OR
((tags->'amenity') = 'ice_cream') OR
((tags->'amenity') = 'pub') OR
((tags->'amenity') = 'restaurant') OR
-- education
((tags->'amenity') = 'college') OR
((tags->'amenity') = 'kindergarten') OR
((tags->'amenity') = 'library') OR
((tags->'amenity') = 'school') OR
((tags->'amenity') = 'university') OR
-- transport
((tags->'amenity') = 'bicycle_parking') OR
((tags->'amenity') = 'bicycle_rental') OR
((tags->'amenity') = 'bus_station') OR
((tags->'amenity') = 'car_rental') OR
((tags->'amenity') = 'car_sharing') OR
((tags->'amenity') = 'car_wash') OR
((tags->'amenity') = 'ev_charging') OR
((tags->'amenity') = 'ferry_terminal') OR
((tags->'amenity') = 'fuel') OR
((tags->'amenity') = 'grit_bin') OR
((tags->'amenity') = 'parking') OR
((tags->'amenity') = 'parking_entrance') OR
((tags->'amenity') = 'parking_space') OR
((tags->'amenity') = 'taxi') OR
-- finances
((tags->'amenity') = 'atm') OR
((tags->'amenity') = 'bank') OR
((tags->'amenity') = 'bureau_de_change') OR
-- health care
((tags->'amenity') = 'baby_hatch') OR
((tags->'amenity') = 'clinic') OR
((tags->'amenity') = 'dentist') OR
((tags->'amenity') = 'doctors') OR
((tags->'amenity') = 'hospital') OR
((tags->'amenity') = 'nursing_home') OR
((tags->'amenity') = 'pharmacy') OR
((tags->'amenity') = 'social_facility') OR
((tags->'amenity') = 'veterinary') OR
-- art & cilture
((tags->'amenity') = 'arts_centre') OR
((tags->'amenity') = 'cinema') OR
((tags->'amenity') = 'community_centre') OR
((tags->'amenity') = 'fountain') OR
((tags->'amenity') = 'nightclub') OR
((tags->'amenity') = 'social_centre') OR
((tags->'amenity') = 'stripclub') OR
((tags->'amenity') = 'studio') OR
((tags->'amenity') = 'swingerclub') OR
((tags->'amenity') = 'theatre') OR
-- shops
tags ? 'shop' OR
-- tourism
tags ? 'tourism' OR
-- other
((tags->'amenity') = 'animal_boarding') OR
((tags->'amenity') = 'animal_shelter') OR
((tags->'amenity') = 'bench') OR
((tags->'amenity') = 'brothel') OR
((tags->'amenity') = 'clock') OR
((tags->'amenity') = 'courthouse') OR
((tags->'amenity') = 'crematorium') OR
((tags->'amenity') = 'crypt') OR
((tags->'amenity') = 'embassy') OR
((tags->'amenity') = 'fire_station') OR
((tags->'amenity') = 'grave_yard') OR
((tags->'amenity') = 'hunting_stand') OR
((tags->'amenity') = 'marketplace') OR
((tags->'amenity') = 'place_of_worship') OR
((tags->'amenity') = 'police') OR
((tags->'amenity') = 'post_box') OR
((tags->'amenity') = 'post_office') OR
((tags->'amenity') = 'prison') OR
((tags->'amenity') = 'public_building') OR
((tags->'amenity') = 'recycling') OR
((tags->'amenity') = 'sauna') OR
((tags->'amenity') = 'shelter') OR
((tags->'amenity') = 'shower') OR
((tags->'amenity') = 'telephone') OR
((tags->'amenity') = 'toilets') OR
((tags->'amenity') = 'townhall') OR
((tags->'amenity') = 'vending_machine') OR
((tags->'amenity') = 'waste_basket') OR
((tags->'amenity') = 'waste_disposal') OR
((tags->'amenity') = 'watering_place')
)
AND visible = 'true'
AND
(version = (SELECT max(version) FROM hist_plp WHERE typ = h.typ AND h.id = hist_plp.id) AND
( valid_from <= generate_series AND (valid_to >= generate_series OR valid_to is null))
AND minor = (SELECT max(minor) from hist_plp where typ = h.typ AND h.id = hist_plp.id AND h.version = hist_plp.version AND
( valid_from <= generate_series AND (valid_to >= generate_series OR valid_to is null))))
) AS foo
GROUP BY id) AS foo2
), 0), 2)::float AS avg
FROM generate_series(
(SELECT date_trunc ('month',(
SELECT MIN(valid_from) FROM hist_plp)) as foo), -- Select minimum date (month)
(SELECT MAX(valid_from) FROM hist_plp)::date, -- Select maximum date
interval '1 month')
;
""")
# Getting a list of tuples from the database-cursor (cur)
data_tuples = []
for row in cur:
data_tuples.append(row)
except:
print "Query could not be executed"
###
### Plot (Bar-Chart)
###
# Datatypes of the returning data: column 1(col1) --> integer, column 2(date) --> string
datatypes = [('date', 'S20'), ('col1', 'double')]
# Data-tuple and datatype
data = np.array(data_tuples, dtype=datatypes)
# Date comes from 'col1'
col1 = data['col1']
# Converts date to a manageable date-format for matplotlib
dates = mdates.num2date(mdates.datestr2num(data['date']))
fig, ax1 = plt.subplots()
# Create barchart (x-axis=dates, y-axis=col1,
ax1.plot(dates, col1, linewidth=2, color = '#2dd700')
# Place a gray dashed grid behind the thicks (only for y-axis)
ax1.yaxis.grid(color='gray', linestyle='dashed')
# Set this grid behind the thicks
ax1.set_axisbelow(True)
# Rotate x-labels on the x-axis
fig.autofmt_xdate()
# Label x and y axis
plt.xlabel('Date')
plt.ylabel('Average number of Tags')
# Plot-title
plt.title("Development of the average Tag-Number of all POIs")
# Save plot to *.jpeg-file
plt.savefig('pics/c5_poi_tags.jpeg')
plt.clf()
|
gpl-3.0
|
deepinsight/Deformable-ConvNets
|
deeplab/runs_CAIScene/infer5.py
|
1
|
11273
|
import os
os.environ['MXNET_CUDNN_AUTOTUNE_DEFAULT'] = '0'
import sys
import argparse
import numpy as np
import cv2
import math
import datetime
import random
import json
import pandas as pd
#import multiprocessing
from Queue import Queue
from threading import Thread
import mxnet as mx
import mxnet.ndarray as nd
from easydict import EasyDict as edict
parser = argparse.ArgumentParser(description="",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--lst', type=str, default='./data/val.lst',
help='')
parser.add_argument('--val-root-path', type=str, default='/raid5data/dplearn/aichallenger/scene/val')
parser.add_argument('--test-root-path', type=str, default='/raid5data/dplearn/aichallenger/scene/test_a')
parser.add_argument('--gpu', type=int, default=0,
help='')
parser.add_argument('--gpus', type=str, default='0,1,2,3,4,5,6,7',
help='')
parser.add_argument('--num-classes', type=int, default=80,
help='')
parser.add_argument('--batch-size', type=int, default=128,
help='')
parser.add_argument('--mode', type=int, default=0,
help='')
parser.add_argument('--size', type=str, default='448,504')
#parser.add_argument('--size', type=str, default='224,256')
parser.add_argument('--step', type=int, default=-40,
help='if negative, use random crops')
#parser.add_argument('--model', type=str, default='./model/ft448deformsqex0.0001_9682,3|./model/sft320deformsqex_9692,1')
#parser.add_argument('--model', type=str, default='./model/sft320deformsqex_9692,1')
#parser.add_argument('--model', type=str, default='./model/ft224deformsqex0003_9587,20')
#parser.add_argument('--model', type=str, default='./model/a1,8,14')
#parser.add_argument('--model', type=str, default='./model/a1_2,2,6')
#parser.add_argument('--model', type=str, default='./model/a1_6,1')
#parser.add_argument('--model', type=str, default='./model/a1_6,1|./model/a1_4,3|./model/a1_5,6|./model/a1_7,2')
#parser.add_argument('--model', type=str, default='./model/a1_6,6|./model/a1_4,6|./model/a1_5,6|./model/a1_7,6')
#parser.add_argument('--model', type=str, default='./model/ft224nude0003_97,50,224|./model/sft448from32097nude00003_9740,11,448|./model/sft320nude00003_97,19,320')
#parser.add_argument('--model', type=str, default='./model/ft224nude0003_97,50,224|./model/sft448from32097nude00003_9740,11,448')
#parser.add_argument('--model', type=str, default='./model/sft448from32097nude00003_9740,11,448|./model/sft320nude00003_97,19,320')
#parser.add_argument('--model', type=str, default='./model/sft448from32097nude00003_9740,11,448')
parser.add_argument('--model', type=str, default='./model/ft224nude0003_97,50,224|./model/sft448from32097nude00003_9740,11,448')
#parser.add_argument('--model', type=str, default='./model/sft448from32097nude00003_9740,11,448|./model/ft224nude0003_97,50,224')
parser.add_argument('--output-dir', type=str, default='./rt',
help='')
args = parser.parse_args()
def prt(msg):
ts = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
print("%s] %s" % (ts, msg))
sys.stdout.flush()
def ch_dev(arg_params, aux_params, ctx):
new_args = dict()
new_auxs = dict()
for k, v in arg_params.items():
new_args[k] = v.as_in_context(ctx)
for k, v in aux_params.items():
new_auxs[k] = v.as_in_context(ctx)
return new_args, new_auxs
def image_preprocess(img_full_path):
_size = args.size.split(",")
img_sz = int(_size[1])
crop_sz = int(_size[0])
#print(img_full_path)
img = cv2.cvtColor(cv2.imread(img_full_path), cv2.COLOR_BGR2RGB)
img = np.float32(img)
ori_shape = img.shape
assert img.shape[2]==3
rows, cols = img.shape[:2]
_high = min(rows, cols)
_high = min(_high, crop_sz*2)
_high = max(_high, img_sz)
_img_sz = img_sz
if _high>img_sz:
_img_sz = np.random.randint(low=img_sz, high=_high)
if cols < rows:
resize_width = _img_sz
resize_height = resize_width * rows / cols;
else:
resize_height = _img_sz
resize_width = resize_height * cols / rows;
img = cv2.resize(img, (resize_width, resize_height), interpolation=cv2.INTER_CUBIC)
#print(_high,ori_shape,img.shape)
h, w, _ = img.shape
#x0 = int((w - crop_sz) / 2)
#y0 = int((h - crop_sz) / 2)
x0_max = w-crop_sz
y0_max = h-crop_sz
x0 = np.random.randint(low=0, high=x0_max)
y0 = np.random.randint(low=0, high=y0_max)
img = img[y0:y0+crop_sz, x0:x0+crop_sz, :]
#lr flip
if random.randint(0,1)==1:
for j in xrange(3):
img[:,:,j] = np.fliplr(img[:,:,j])
img = np.swapaxes(img, 0, 2)
img = np.swapaxes(img, 1, 2) # change to CHW
return img
def read_image(path):
img = cv2.cvtColor(cv2.imread(path), cv2.COLOR_BGR2RGB)
img = np.float32(img)
return img
def image_preprocess2(img, crop_sz, blockid, cornerid, flipid):
nd_img = nd.array(img)
expand = 32
#if crop_sz<300:
# expand = 16
img_sz = crop_sz+expand
nd_img = mx.image.resize_short(nd_img, img_sz)
if flipid==1:
nd_img = nd.flip(nd_img, axis=1)
img = nd_img.asnumpy()
h = img.shape[0]
w = img.shape[1]
block_size = min(h,w)
blockh = 0
blockw = 0
if h>w:
if blockid==1:
_half = int( (h-w)/2 )
blockh = _half
elif blockid==2:
blockh = h-w
else:
if blockid==1:
_half = int( (w-h)/2 )
blockw = _half
elif blockid==2:
blockw = w-h
block = img[blockh:(blockh+block_size), blockw:(blockw+block_size), :]
if cornerid==0:
cornerh = int((block_size-crop_sz)/2)
cornerw = int((block_size-crop_sz)/2)
elif cornerid==1:
cornerh = 0
cornerw = 0
elif cornerid==2:
cornerh = 0
cornerw = block_size-crop_sz
elif cornerid==3:
cornerh = block_size-crop_sz
cornerw = 0
elif cornerid==4:
cornerh = block_size-crop_sz
cornerw = block_size-crop_sz
img = block[cornerh:(cornerh+crop_sz), cornerw:(cornerw+crop_sz), :]
img = np.swapaxes(img, 0, 2)
img = np.swapaxes(img, 1, 2) # change to CHW
#print(img.shape)
return img
def val(X, imgs):
top1=0
top3=0
for ii in range(X.shape[0]):
score = X[ii]
gt_label = imgs[ii][1]
#print("%d sum %f" % (ii, _sum))
sort_index = np.argsort(score)[::-1]
for k in xrange(3):
if sort_index[k]==gt_label:
top3+=1
if k==0:
top1+=1
print('top3', float(top3)/X.shape[0])
print('top1', float(top1)/X.shape[0])
if args.mode>0:
args.root_path = args.test_root_path
else:
args.root_path = args.val_root_path
args.crop_size = int(args.size.split(',')[0])
args.resize = int(args.size.split(',')[1])
#ctxs = [mx.gpu(int(i)) for i in args.gpus.split(',')]
nets = []
gpuid = args.gpu
ctx = mx.gpu(gpuid)
for model_str in args.model.split('|'):
vec = model_str.split(',')
assert len(vec)>1
prefix = vec[0]
epoch = int(vec[1])
crop_sz = int(vec[2])
print('loading',prefix, epoch)
net = edict()
net.crop_sz = crop_sz
net.ctx = ctx
net.sym, net.arg_params, net.aux_params = mx.model.load_checkpoint(prefix, epoch)
net.arg_params, net.aux_params = ch_dev(net.arg_params, net.aux_params, net.ctx)
nets.append(net)
gpuid+=1
imgs = []
i = 0
for line in open(args.lst, 'r'):
vec = line.strip().split("\t")
imgs.append( (i, int(vec[1]), os.path.join(args.root_path, vec[2])) )
i+=1
#models = []
#for net in nets:
# model = mx.mod.Module(
# context = ctxs,
# symbol = net.sym,
# )
# hw = int(args.size.split(',')[0])
# model.bind(data_shapes=[('data', (args.batch_size, 3, hw, hw))], label_shapes=[('softmax_label',(args.batch_size,))], for_training=False, grad_req="null")
# model.set_params(net.arg_params, net.aux_params)
# models.append(model)
X = np.zeros( (len(imgs), args.num_classes) , dtype=np.float32 )
num_batches = int( math.ceil(len(imgs) / args.batch_size) )
print("num_batches %d" % num_batches)
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
for blockid in [1,0,2]:
for cornerid in xrange(0,5):
for flipid in xrange(0,2):
print('start loop', blockid, cornerid, flipid)
score_weight = 1.0
#if blockid==1:
# score_weight = 2.0
batch_head = 0
batch_num = 0
while batch_head<len(imgs):
prt("processing batch %d" % batch_num)
current_batch_sz = min(args.batch_size, len(imgs)-batch_head)
#print batch_head
ids = []
datas = []
for index in range(batch_head, batch_head+current_batch_sz):
img_path = imgs[index][2]
data = read_image(img_path)
datas.append(data)
ids.append(imgs[index][0])
#assert len(datas)==1
#_data = datas[0]
#_hw = min(_data.shape[0], _data.shape[1])
#if _hw<256:
# _nets = [nets[0]]
#else:
# _nets = nets
#for model in models:
for net in nets:
input_blob = np.zeros((current_batch_sz,3,net.crop_sz,net.crop_sz))
for idx in xrange(len(datas)):
data = datas[idx]
img = image_preprocess2(data, net.crop_sz, blockid, cornerid, flipid)
#print(img.shape)
input_blob[idx,:,:,:] = img
#print(input_blob.shape)
net.arg_params["data"] = mx.nd.array(input_blob, net.ctx)
net.arg_params["softmax_label"] = mx.nd.empty((current_batch_sz,), net.ctx)
exe = net.sym.bind(net.ctx, net.arg_params ,args_grad=None, grad_req="null", aux_states=net.aux_params)
exe.forward(is_train=False)
net_out = exe.outputs[0].asnumpy()
#_data = mx.nd.array(input_blob)
#_label = nd.ones( (current_batch_sz,) )
#db = mx.io.DataBatch(data=(_data,), label=(_label,))
#model.forward(db, is_train=False)
#net_out = model.get_outputs()[0].asnumpy()
#print(net_out.shape)
for bz in xrange(current_batch_sz):
probs = net_out[bz,:]
score = np.squeeze(probs)
score *= score_weight
#print(score.shape)
#print(score)
im_id = ids[bz]
X[im_id,:] += score
batch_head += current_batch_sz
batch_num += 1
val(X, imgs)
out_filename = os.path.join(args.output_dir, 'result.hdf')
print(out_filename)
if os.path.exists(out_filename):
print("exists, delete first..")
os.remove(out_filename)
_X = X
print("_X row sum %f" % np.sum(_X[0]))
df = pd.DataFrame(_X)
df.to_hdf(out_filename, "result")
top1 = 0
top5 = 0
if args.mode==0:
val(X, imgs)
else:
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
with open(os.path.join(args.output_dir,'result.json'), 'w') as opfile:
json_data = []
for ii in range(X.shape[0]):
score = X[ii]
#print("%d sum %f" % (ii, _sum))
sort_index = np.argsort(score)[::-1]
top_k = list(sort_index[0:3])
_data = {'image_id' : imgs[ii][2].split('/')[-1], 'label_id': top_k}
json_data.append(_data)
opfile.write(json.dumps(json_data))
out_filename = os.path.join(args.output_dir, 'result.hdf')
print(out_filename)
if os.path.exists(out_filename):
print("exists, delete first..")
os.remove(out_filename)
_X = X
print("_X row sum %f" % np.sum(_X[0]))
df = pd.DataFrame(_X)
df.to_hdf(out_filename, "result")
|
apache-2.0
|
deokwooj/DDEA
|
webgui/lib_bnlearn.py
|
2
|
12256
|
#!/usr/bin/python
# To force float point division
from __future__ import division
"""
Created on Mon Mar 24 19:24:11 2014
@author: NGO Quang Minh Khiem
@e-mail: [email protected]
"""
from __future__ import division # To forace float point division
import numpy as np
from pandas import DataFrame
# R libs
import rpy2.robjects as robjects
from rpy2.robjects.packages import importr
import pandas.rpy.common as com
from rpy2.robjects import pandas2ri
import networkx as nx
import matplotlib.pyplot as plt
#============================================================#
# Use R bnlearn to learn the Bayes network structure
#============================================================#
### BN Learn
## load some R libs
r = robjects.r
utils = importr("utils")
bnlearn = importr("bnlearn")
rgraphviz = importr("Rgraphviz")
# this is important to seamlessly convert from pandas to R data frame
pandas2ri.activate()
#============================================================#
# Utility functions and Misc
#============================================================#
def write_to_file(filename,text):
with open(filename,'w') as f:
f.write(text)
# Close X11 window
def dev_off():
r['dev.off']()
#============================================================#
# Methods for Plotting
#============================================================#
# visualize graph from adjacence matrix r_graph
# for quick usage: set simple=True (by default)
# otherwise, function allows customize some properties of the graph
def nx_plot(r_graph, cols_names, simple=True, labels=None, graph_layout='shell',
node_size=1600, node_color='blue', node_alpha=0.3,
node_text_size=12,
edge_color='blue', edge_alpha=0.3, edge_tickness=1,
edge_text_pos=0.3,
text_font='sans-serif'):
#G = nx.Graph()
dg = nx.DiGraph()
edges = []
np_amat = np.asarray(bnlearn.amat(r_graph))
for ri in range(np_amat.shape[0]):
for ci in range(np_amat.shape[1]):
if np_amat[ri,ci] == 1:
#G.add_edge(cols_names[ri],cols_names[ci])
dg.add_edge(cols_names[ri],cols_names[ci])
edges.append((cols_names[ri],cols_names[ci]))
if simple:
if graph_layout=='spectral':
nx.draw_spectral(dg,font_size=node_text_size)
elif graph_layout=='random':
nx.draw_random(dg,font_size=node_text_size)
elif graph_layout=='circular':
nx.draw_circular(dg,font_size=node_text_size)
elif graph_layout=='spring':
nx.draw_spring(dg,font_size=node_text_size)
else:
nx.draw(dg,font_size=node_text_size)
else:
draw_graph(edges,directed=True, labels=labels, graph_layout=graph_layout,
node_size=node_size, node_color=node_color, node_alpha=node_alpha,
node_text_size=node_text_size,
edge_color=edge_color, edge_alpha=edge_alpha, edge_tickness=edge_tickness,
edge_text_pos=edge_text_pos,
text_font=text_font)
#nxlib.draw_graph(dg,labels=cols_names)
def nx_plot2(r_graph,cols_names,is_bnlearn=True):
G = nx.Graph()
dg = nx.DiGraph()
if is_bnlearn:
np_amat = np.asarray(bnlearn.amat(r_graph))
for ri in range(np_amat.shape[0]):
for ci in range(np_amat.shape[1]):
if np_amat[ri,ci] == 1:
G.add_edge(cols_names[ri],cols_names[ci])
dg.add_edge(cols_names[ri],cols_names[ci])
else:
np_amat = np.asarray(r_graph)
for ri in range(np_amat.shape[0]):
for ci in range(np_amat.shape[1]):
if np_amat[ri,ci] >= 0:
#G.add_weighted_edges_from([(cols_names[ri],cols_names[ci],{'weight': np_amat[ri,ci]})])
G.add_edge(cols_names[ri],cols_names[ci],weight=np_amat[ri,ci])
#dg.add_weighted_edges_from([(cols_names[ri],cols_names[ci],np_amat[ri,ci])])
#nx.draw(G,nx.shell_layout)
nx.draw(G)
#nxlib.draw_graph(dg,labels=cols_names)
# a more generic graph plotting function, using networkx lib
# graph is a list of edges
def draw_graph(graph, directed=True, labels=None, graph_layout='shell',
node_size=1600, node_color='blue', node_alpha=0.3,
node_text_size=12,
edge_color='blue', edge_alpha=0.3, edge_tickness=1,
edge_text_pos=0.3,
text_font='sans-serif'):
# create networkx graph
#G=nx.Graph()
if directed:
G = nx.DiGraph()
else:
G = nx.Graph()
# add edges
for edge in graph:
G.add_edge(edge[0], edge[1])
# these are different layouts for the network you may try
# shell seems to work best
if graph_layout == 'spring':
graph_pos=nx.spring_layout(G)
elif graph_layout == 'spectral':
graph_pos=nx.spectral_layout(G)
elif graph_layout == 'random':
graph_pos=nx.random_layout(G)
else:
graph_pos=nx.shell_layout(G)
# draw graph
nx.draw_networkx_nodes(G,graph_pos,node_size=node_size,
alpha=node_alpha, node_color=node_color)
nx.draw_networkx_edges(G,graph_pos,width=edge_tickness,
alpha=edge_alpha,edge_color=edge_color)
nx.draw_networkx_labels(G, graph_pos,font_size=node_text_size,
font_family=text_font)
"""
if labels is None:
labels = range(len(graph))
edge_labels = dict(zip(graph, labels))
"""
if labels is not None:
edge_labels = dict(zip(graph, labels))
nx.draw_networkx_edge_labels(G, graph_pos, edge_labels=edge_labels,
label_pos=edge_text_pos)
# show graph
plt.show()
#============================================================#
# bnlearn wrapper APIs
#============================================================#
###
# Construct list of arcs used for blacklisting/whitelisting
# arc list is a list of arcs. For example:
# arc_list =
# [['A','B'] , ['A','C']]
#
# return data frame in the following format
# from to
# 0 A B
# 1 A C
###
def construct_arcs_frame(arc_list):
data_frame = DataFrame(data=np.array(arc_list),columns=['from','to'])
return data_frame
def print_bw_rules():
rules = """
============================================================
Blacklisting Rules:
-------------------
1. any arc blacklisted in one of its possible directions is never present in the graph.
if A-->B is blacklisted (but B-->A is not), A-->B and A--B are never
present in the graph (but not B-->A)
2. any arc blacklisted in both directions, as well as the corresponding
undirected arc, is never present in the graph.
B(A-->B,B-->A) => B(A--B)
Whitelisting Rules:
-------------------
1. arcs whitelisted in one direction only (i.e. A-->B is whitelisted but B-->A is not)
have the respective reverse arcs blacklisted,
and are always present in the graph.
W(A-->B) => B(B-->A,A--B)
2. arcs whitelisted in both directions (i.e. both A--> B and B-->A are whitelisted)
are present in the graph,
but their direction is set by the learning algorithm.
3. any arc whitelisted and blacklisted at the same time is assumed to be whitelisted,
and is thus removed from the blacklist.
============================================================
"""
print rules
def convert_pymat_to_rfactor(py_mat):
mat_shape = py_mat.shape
r_factor_vec = r.factor(py_mat)
r_factor_mat = r.matrix(r_factor_vec, nrow=mat_shape[1], byrow=True)
return np.array(r_factor_mat).reshape(mat_shape[0],mat_shape[1],order='C')
def construct_data_frame(data_mat,columns=[]):
if len(columns) == 0:
column_names = range(data_mat.shape[1])
else:
column_names = columns
return DataFrame(data=data_mat,columns=column_names)
"""
def py_bnlearn(data_frame,method='gs',blacklist=None, whitelist=None):
# For hill-climbing, the data must be real or factor
#
if method == 'hc':
bn_structure = bnlearn.hc(data_frame)
else:
bn_structure = bnlearn.gs(data_frame)
return bn_structure
"""
#============================================================#
# APIs related to bn_learn structure
#============================================================#
#=======================|
# bn structure and graph|
#=======================|
def acyclic(bn_structure):
return bool(bnlearn.acyclic(bn_structure)[0])
def amat(bn_structure):
return np.array(bnlearn.amat(bn_structure))
def py_get_amat(bn_structure):
return np.array(bnlearn.amat(bn_structure))
#=======================|
# Arcs |
#=======================|
def narcs(bn_structure):
return bnlearn.narcs(bn_structure)[0]
def arcs(bn_structure):
arcs = np.array(bnlearn.arcs(bn_structure))
ncols = 2
nrows = len(arcs) / 2
arcs = arcs.reshape(nrows,ncols,order='F')
return arcs
def directed_arcs(bn_structure):
arcs = np.array(bnlearn.directed_arcs(bn_structure))
ncols = 2
nrows = len(arcs) / 2
arcs = arcs.reshape(nrows,ncols,order='F')
return arcs
def undirected_arcs(bn_structure):
arcs = np.array(bnlearn.undirected_arcs(bn_structure))
ncols = 2
nrows = len(arcs) / 2
arcs = arcs.reshape(nrows,ncols,order='F')
return arcs
def incoming_arcs(bn_structure, node_name):
arcs = np.array(bnlearn.incoming_arcs(bn_structure, node_name))
ncols = 2
nrows = len(arcs) / 2
arcs = arcs.reshape(nrows,ncols,order='F')
return arcs
def outgoing_arcs(bn_structure, node_name):
arcs = np.array(bnlearn.outgoing_arcs(bn_structure, node_name))
ncols = 2
nrows = len(arcs) / 2
arcs = arcs.reshape(nrows,ncols,order='F')
return arcs
#=======================|
# Nodes |
#=======================|
def nnodes(bn_structure):
return bnlearn.nnodes(bn_structure)[0]
def degree(bn_structure, node_name):
return bnlearn.degree(bn_structure, node_name)[0]
def in_degree(bn_structure, node_name):
return bnlearn.in_degree(bn_structure, node_name)[0]
def out_degree(bn_structure, node_name):
return bnlearn.out_degree(bn_structure, node_name)[0]
def root_nodes(bn_structure):
return np.array(bnlearn.root_nodes(bn_structure))
def leaf_nodes(bn_structure):
return np.array(bnlearn.leaf_nodes(bn_structure))
def children(bn_structure, node_name):
return np.array(bnlearn.children(bn_structure, node_name))
def parents(bn_structure, node_name):
return np.array(bnlearn.parents(bn_structure, node_name))
def nbr(bn_structure, node_name):
return np.array(bnlearn.nbr(bn_structure, node_name))
#=======================|
# bn fit |
#=======================|
###
# To fit data to bn structure, the graph must be completely directed
###
def py_bn_fit(bn_structure,data_frame):
fit = bnlearn.bn_fit(bn_structure,data_frame)
return fit
def py_get_node_cond_mat(fit,node_indx):
"""
Each item in fit is a list vector with dimension attributes
fit[node_indx] has 4 attributes ['node', 'parents', 'children', 'prob']
"""
node_fit = fit[node_indx]
node = node_fit[0]
parents = node_fit[1]
children = node_fit[2]
prob = node_fit[3]
"""
prob is a vector Array type in R, which contains the conditional
probability table of this node.
prob is a (n_0 x n_1 x ... x n_parents) matrix, where each n_i is the number
of discrete values of each node in the list prob_dimnames
prob_dimnames contains the name of each dimension.
"""
prob_dimnames = np.array(prob.dimnames.names)
prob_factors = np.array(prob.dimnames)
prob_mat = np.array(prob)
#prob_frame = DataFrame(data=prob_mat[0],columns=prob_dimnames)
return prob_dimnames,prob_factors,prob_mat
def bn_fit_barchart(fit, node_idx):
print bnlearn.bn_fit_barchart(fit[node_idx])
def bn_fit_dotplot(fit, node_idx):
print bnlearn.bn_fit_dotplot(fit[node_idx])
|
gpl-2.0
|
cclauss/itinerant-tester
|
trending_python_tester.py
|
1
|
4517
|
#!/usr/bin/env python3
"""
Update the repo's '.travis.yml' to trigger Travis CI to run flake8 tests on
the top 25 GitHub Trending Python repos.
Requires: pip3 install --upgrade beautifulsoup4 github3.py requests lxml
If lxml is not available, html5lib should be a workable substitute
"""
import getpass
import webbrowser
from datetime import datetime as dt
# self._put_long_string("%s=%s" % (key, val)) # self, key, val: undefined names
import bs4 # will require lxml or html5lib
import requests
from github3 import login as github3_login
HEADERS = {
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.14; rv:60.0)"
" Gecko/20100101 Firefox/60.0"
}
token = "cf2800ef2802b619a8ca8bf360f68ee4547a38c3"
def my_two_factor_function():
return input("Enter 2FA code: ").strip() or my_two_factor_function()
username = getpass.getuser() # Does local username == GitHub username?
print("Please enter the GitHub password for user: {}".format(username))
gh = github3_login(username, token, two_factor_callback=my_two_factor_function)
# url = 'https://github.com/trending/jupyter-notebook' # GitHub Trending top 25 repos
url = "https://github.com/trending/python" # GitHub Trending top 25 repos
# url += '?since=weekly'
# url += '?since=monthly'
# these repos pass tests, have pull requests to pass tests, or are Py3 only
ignore = []
# the boilerplate content of the .travis.yml file
fmt = """group: travis_latest
dist: bionic
language: python
cache: pip
python:
#- 2.7
#- 3.5
#- 3.6
#- 3.7
- 3.8
matrix:
allow_failures:
- python: 2.7
- python: nightly
- python: pypy
- python: pypy3
env:
%s
install:
#- pip install -r requirements.txt
- pip install flake8 # pytest # add other testing frameworks later
before_script:
- URL=https://github.com/${REPO}
- echo ; echo -n "flake8 testing of ${URL} on " ; python -V
- git clone --depth=50 ${URL} ~/${REPO} # --branch=master
- cd ~/${REPO}
script:
- echo stop the build if there are Python syntax errors or undefined names
- echo ; echo -n "flake8 testing of ${URL} on " ; python -V
- time flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics
- echo exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
- time flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
#- true # pytest --capture=sys
notifications:
on_success: change
on_failure: change # `always` will be the setting once code changes slow down
"""
print(f"{dt.now():%a %b %d %H:%M:%S %Z %Y}")
# extract the repo names of GitHub's Top 25 Trending Python list
soup = bs4.BeautifulSoup(
requests.get(url, headers=HEADERS).content, "lxml"
) # or 'html5lib'
# 'python / cpython'
# repos = soup.find('ol', class_="repo-list").find_all('a', href=True)
repos = [h3.find("a", href=True) for h3 in soup.find_all(class_="h3 lh-condensed")]
print(repos)
# 'python/cpython'
repos = (
repo.text.strip().replace(" ", "").replace("\n" * 4, "")
for repo in repos
if "/" in repo.text and "://" not in repo.text
)
repos = list(repos) + [
"n1nj4sec/pupy",
"PythonCharmers/python-future",
"ansible/ansible",
"oaubert/python-vlc",
"ColdGrub1384/Pyto",
"CoreSecurity/impacket",
"internetarchive/openlibrary",
"google/ffn",
"apache/beam",
"apache/incubator-mxnet",
"hyperledger/iroha-python",
"Supervisor/supervisor",
"cheshirekow/cmake_format",
"ckan/ckan",
"ibm-watson-iot/connector-cloudant",
"ibm-watson-iot/device-kodi",
"ibm-watson-iot/functions",
"apache/spark",
"Tribler/tribler",
"webpy/webpy",
"nodejs/node",
"nodejs/node-gyp",
"ArduPilot/ardupilot",
"ArduPilot/pymavlink",
"dronekit/dronekit-python",
"ansible/awx",
"matplotlib/matplotlib",
"ggtracker/sc2reader",
"httplib2/httplib2",
"hyperledger/fabric-sdk-py",
"getsentry/sentry",
"v8/v8",
]
# ' - REPO=python/cpython' also strip out any repos that are in ignore list
repos = "\n".join(" - REPO=" + repo for repo in repos)
# if 'shadowsocks' not in repo and repo not in ignore)
print(repos)
travis_text = fmt % repos
# log into GitHub and commit an update to .travis.yml which will trigger tests
travis = gh.repository(username, "itinerant-tester").file_contents("/.travis.yml")
print(travis.update("trigger a new build", travis_text.encode("utf-8")))
webbrowser.open("https://travis-ci.com/{}/itinerant-tester".format(username))
|
apache-2.0
|
iohannez/gnuradio
|
gnuradio-runtime/apps/evaluation_random_numbers.py
|
7
|
5284
|
#!/usr/bin/env python
#
# Copyright 2015 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from gnuradio import gr
import numpy as np
from scipy.stats import norm, laplace, rayleigh
from matplotlib import pyplot as plt
# NOTE: scipy and matplotlib are optional packages and not included in the default gnuradio dependencies
#*** SETUP ***#
# Number of realisations per histogram
num_tests = 1000000
# Set number of bins in histograms
uniform_num_bins = 31
gauss_num_bins = 31
rayleigh_num_bins = 31
laplace_num_bins = 31
rndm = gr.random() # instance of gnuradio random class (gr::random)
print('All histograms contain',num_tests,'realisations.')
#*** GENERATE DATA ***#
uniform_values = np.zeros(num_tests)
gauss_values = np.zeros(num_tests)
rayleigh_values = np.zeros(num_tests)
laplace_values = np.zeros(num_tests)
for k in range(num_tests):
uniform_values[k] = rndm.ran1()
gauss_values[k] = rndm.gasdev()
rayleigh_values[k] = rndm.rayleigh()
laplace_values[k] = rndm.laplacian()
#*** HISTOGRAM DATA AND CALCULATE EXPECTED COUNTS ***#
uniform_bins = np.linspace(0,1,uniform_num_bins)
gauss_bins = np.linspace(-8,8,gauss_num_bins)
laplace_bins = np.linspace(-8,8,laplace_num_bins)
rayleigh_bins = np.linspace(0,10,rayleigh_num_bins)
uniform_hist = np.histogram(uniform_values,uniform_bins)
gauss_hist = np.histogram(gauss_values,gauss_bins)
rayleigh_hist = np.histogram(rayleigh_values,rayleigh_bins)
laplace_hist = np.histogram(laplace_values,laplace_bins)
uniform_expected = np.zeros(uniform_num_bins-1)
gauss_expected = np.zeros(gauss_num_bins-1)
rayleigh_expected = np.zeros(rayleigh_num_bins-1)
laplace_expected = np.zeros(laplace_num_bins-1)
for k in range(len(uniform_hist[0])):
uniform_expected[k] = num_tests / float(uniform_num_bins-1)
for k in range(len(gauss_hist[0])):
gauss_expected[k] = float(norm.cdf(gauss_hist[1][k+1])-norm.cdf(gauss_hist[1][k]))*num_tests
for k in range(len(rayleigh_hist[0])):
rayleigh_expected[k] = float(rayleigh.cdf(rayleigh_hist[1][k+1])-rayleigh.cdf(rayleigh_hist[1][k]))*num_tests
for k in range(len(laplace_hist[0])):
laplace_expected[k] = float(laplace.cdf(laplace_hist[1][k+1])-laplace.cdf(laplace_hist[1][k]))*num_tests
#*** PLOT HISTOGRAMS AND EXPECTATIONS TAKEN FROM SCIPY ***#
uniform_bins_center = uniform_bins[0:-1]+(uniform_bins[1]-uniform_bins[0]) / 2.0
gauss_bins_center = gauss_bins[0:-1]+(gauss_bins[1]-gauss_bins[0]) / 2.0
rayleigh_bins_center = rayleigh_bins[0:-1]+(rayleigh_bins[1]-rayleigh_bins[0]) / 2.0
laplace_bins_center = laplace_bins[0:-1]+(laplace_bins[1]-laplace_bins[0]) / 2.0
plt.figure(1)
plt.subplot(2,1,1)
plt.plot(uniform_bins_center,uniform_hist[0],'s--',uniform_bins_center,uniform_expected,'o:')
plt.xlabel('Bins'), plt.ylabel('Count'), plt.title('Uniform: Distribution')
plt.legend(['histogram gr::random','calculation scipy'],loc=1)
plt.subplot(2,1,2)
plt.plot(uniform_bins_center,uniform_hist[0] / uniform_expected,'rs--')
plt.xlabel('Bins'), plt.ylabel('Relative deviation'), plt.title('Uniform: Relative deviation to scipy')
plt.figure(2)
plt.subplot(2,1,1)
plt.plot(gauss_bins_center,gauss_hist[0],'s--',gauss_bins_center,gauss_expected,'o:')
plt.xlabel('Bins'), plt.ylabel('Count'), plt.title('Gauss: Distribution')
plt.legend(['histogram gr::random','calculation scipy'],loc=1)
plt.subplot(2,1,2)
plt.plot(gauss_bins_center,gauss_hist[0] / gauss_expected,'rs--')
plt.xlabel('Bins'), plt.ylabel('Relative deviation'), plt.title('Gauss: Relative deviation to scipy')
plt.figure(3)
plt.subplot(2,1,1)
plt.plot(rayleigh_bins_center,rayleigh_hist[0],'s--',rayleigh_bins_center,rayleigh_expected,'o:')
plt.xlabel('Bins'), plt.ylabel('Count'), plt.title('Rayleigh: Distribution')
plt.legend(['histogram gr::random','calculation scipy'],loc=1)
plt.subplot(2,1,2)
plt.plot(rayleigh_bins_center,rayleigh_hist[0] / rayleigh_expected,'rs--')
plt.xlabel('Bins'), plt.ylabel('Relative deviation'), plt.title('Rayleigh: Relative deviation to scipy')
plt.figure(4)
plt.subplot(2,1,1)
plt.plot(laplace_bins_center,laplace_hist[0],'s--',laplace_bins_center,laplace_expected,'o:')
plt.xlabel('Bins'), plt.ylabel('Count'), plt.title('Laplace: Distribution')
plt.legend(['histogram gr::random','calculation scipy'],loc=1)
plt.subplot(2,1,2)
plt.plot(laplace_bins_center,laplace_hist[0] / laplace_expected,'rs--')
plt.xlabel('Bins'), plt.ylabel('Relative deviation'), plt.title('Laplace: Relative deviation to scipy')
plt.show()
|
gpl-3.0
|
cpcloud/ibis
|
ibis/pandas/tests/test_client.py
|
1
|
2779
|
import numpy as np
import pandas as pd
import pandas.util.testing as tm
import pytest
from pytest import param
import ibis
from ibis.pandas.client import PandasTable # noqa: E402
pytestmark = pytest.mark.pandas
@pytest.fixture
def client():
return ibis.pandas.connect(
{
'df': pd.DataFrame({'a': [1, 2, 3], 'b': list('abc')}),
'df_unknown': pd.DataFrame(
{'array_of_strings': [['a', 'b'], [], ['c']]}
),
}
)
@pytest.fixture
def table(client):
return client.table('df')
def test_client_table(table):
assert isinstance(table.op(), ibis.expr.operations.DatabaseTable)
assert isinstance(table.op(), PandasTable)
def test_client_table_repr(table):
assert 'PandasTable' in repr(table)
def test_load_data(client):
client.load_data('testing', tm.makeDataFrame())
assert client.exists_table('testing')
assert client.get_schema('testing')
def test_create_table(client):
client.create_table('testing', obj=tm.makeDataFrame())
assert client.exists_table('testing')
client.create_table('testingschema', schema=client.get_schema('testing'))
assert client.exists_table('testingschema')
def test_literal(client):
lit = ibis.literal(1)
result = client.execute(lit)
assert result == 1
def test_list_tables(client):
assert client.list_tables(like='df_unknown')
assert not client.list_tables(like='not_in_the_database')
assert client.list_tables()
def test_read_with_undiscoverable_type(client):
with pytest.raises(TypeError):
client.table('df_unknown')
def test_drop(table):
table = table.mutate(c=table.a)
expr = table.drop(['a'])
result = expr.execute()
expected = table[['b', 'c']].execute()
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
'unit',
[
param('Y', marks=pytest.mark.xfail(raises=TypeError)),
param('M', marks=pytest.mark.xfail(raises=TypeError)),
param('D', marks=pytest.mark.xfail(raises=TypeError)),
param('h', marks=pytest.mark.xfail(raises=TypeError)),
param('m', marks=pytest.mark.xfail(raises=TypeError)),
param('s', marks=pytest.mark.xfail(raises=TypeError)),
param('ms', marks=pytest.mark.xfail(raises=TypeError)),
param('us', marks=pytest.mark.xfail(raises=TypeError)),
'ns',
param('ps', marks=pytest.mark.xfail(raises=TypeError)),
param('fs', marks=pytest.mark.xfail(raises=TypeError)),
param('as', marks=pytest.mark.xfail(raises=TypeError)),
],
)
def test_datetime64_infer(client, unit):
value = np.datetime64('2018-01-02', unit)
expr = ibis.literal(value, type='timestamp')
result = client.execute(expr)
assert result == value
|
apache-2.0
|
rhiever/tpot
|
tpot/config/classifier_mdr.py
|
4
|
1756
|
# -*- coding: utf-8 -*-
"""This file is part of the TPOT library.
TPOT was primarily developed at the University of Pennsylvania by:
- Randal S. Olson ([email protected])
- Weixuan Fu ([email protected])
- Daniel Angell ([email protected])
- and many more generous open source contributors
TPOT is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
TPOT is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with TPOT. If not, see <http://www.gnu.org/licenses/>.
"""
# Check the TPOT documentation for information on the structure of config dicts
tpot_mdr_classifier_config_dict = {
# Classifiers
'sklearn.linear_model.LogisticRegression': {
'penalty': ["l1", "l2"],
'C': [1e-4, 1e-3, 1e-2, 1e-1, 0.5, 1., 5., 10., 15., 20., 25.],
'dual': [True, False]
},
# Feature constructors
'mdr.MDR': {
'tie_break': [0, 1],
'default_label': [0, 1]
},
# Feature Selectors
'skrebate.ReliefF': {
'n_features_to_select': range(1, 6),
'n_neighbors': [2, 10, 50, 100, 250, 500]
},
'skrebate.SURF': {
'n_features_to_select': range(1, 6)
},
'skrebate.SURFstar': {
'n_features_to_select': range(1, 6)
},
'skrebate.MultiSURF': {
'n_features_to_select': range(1, 6)
}
}
|
lgpl-3.0
|
ud3sh/coursework
|
deeplearning.ai/coursera-improving-neural-networks/week1/assignment1/Initialization.py
|
1
|
17555
|
# coding: utf-8
# # Initialization
#
# Welcome to the first assignment of "Improving Deep Neural Networks".
#
# Training your neural network requires specifying an initial value of the weights. A well chosen initialization method will help learning.
#
# If you completed the previous course of this specialization, you probably followed our instructions for weight initialization, and it has worked out so far. But how do you choose the initialization for a new neural network? In this notebook, you will see how different initializations lead to different results.
#
# A well chosen initialization can:
# - Speed up the convergence of gradient descent
# - Increase the odds of gradient descent converging to a lower training (and generalization) error
#
# To get started, run the following cell to load the packages and the planar dataset you will try to classify.
# In[5]:
import numpy as np
import matplotlib.pyplot as plt
import sklearn
import sklearn.datasets
from init_utils import sigmoid, relu, compute_loss, forward_propagation, backward_propagation
from init_utils import update_parameters, predict, load_dataset, plot_decision_boundary, predict_dec
get_ipython().magic('matplotlib inline')
plt.rcParams['figure.figsize'] = (7.0, 4.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# load image dataset: blue/red dots in circles
train_X, train_Y, test_X, test_Y = load_dataset()
# You would like a classifier to separate the blue dots from the red dots.
# ## 1 - Neural Network model
# You will use a 3-layer neural network (already implemented for you). Here are the initialization methods you will experiment with:
# - *Zeros initialization* -- setting `initialization = "zeros"` in the input argument.
# - *Random initialization* -- setting `initialization = "random"` in the input argument. This initializes the weights to large random values.
# - *He initialization* -- setting `initialization = "he"` in the input argument. This initializes the weights to random values scaled according to a paper by He et al., 2015.
#
# **Instructions**: Please quickly read over the code below, and run it. In the next part you will implement the three initialization methods that this `model()` calls.
# In[7]:
def model(X, Y, learning_rate = 0.01, num_iterations = 15000, print_cost = True, initialization = "he"):
"""
Implements a three-layer neural network: LINEAR->RELU->LINEAR->RELU->LINEAR->SIGMOID.
Arguments:
X -- input data, of shape (2, number of examples)
Y -- true "label" vector (containing 0 for red dots; 1 for blue dots), of shape (1, number of examples)
learning_rate -- learning rate for gradient descent
num_iterations -- number of iterations to run gradient descent
print_cost -- if True, print the cost every 1000 iterations
initialization -- flag to choose which initialization to use ("zeros","random" or "he")
Returns:
parameters -- parameters learnt by the model
"""
grads = {}
costs = [] # to keep track of the loss
m = X.shape[1] # number of examples
layers_dims = [X.shape[0], 10, 5, 1]
# Initialize parameters dictionary.
if initialization == "zeros":
parameters = initialize_parameters_zeros(layers_dims)
elif initialization == "random":
parameters = initialize_parameters_random(layers_dims)
elif initialization == "he":
parameters = initialize_parameters_he(layers_dims)
# Loop (gradient descent)
for i in range(0, num_iterations):
# Forward propagation: LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID.
a3, cache = forward_propagation(X, parameters)
# Loss
cost = compute_loss(a3, Y)
# Backward propagation.
grads = backward_propagation(X, Y, cache)
# Update parameters.
parameters = update_parameters(parameters, grads, learning_rate)
# Print the loss every 1000 iterations
if print_cost and i % 1000 == 0:
print("Cost after iteration {}: {}".format(i, cost))
costs.append(cost)
# plot the loss
plt.plot(costs)
plt.ylabel('cost')
plt.xlabel('iterations (per hundreds)')
plt.title("Learning rate =" + str(learning_rate))
plt.show()
return parameters
# ## 2 - Zero initialization
#
# There are two types of parameters to initialize in a neural network:
# - the weight matrices $(W^{[1]}, W^{[2]}, W^{[3]}, ..., W^{[L-1]}, W^{[L]})$
# - the bias vectors $(b^{[1]}, b^{[2]}, b^{[3]}, ..., b^{[L-1]}, b^{[L]})$
#
# **Exercise**: Implement the following function to initialize all parameters to zeros. You'll see later that this does not work well since it fails to "break symmetry", but lets try it anyway and see what happens. Use np.zeros((..,..)) with the correct shapes.
# In[13]:
# GRADED FUNCTION: initialize_parameters_zeros
def initialize_parameters_zeros(layers_dims):
"""
Arguments:
layer_dims -- python array (list) containing the size of each layer.
Returns:
parameters -- python dictionary containing your parameters "W1", "b1", ..., "WL", "bL":
W1 -- weight matrix of shape (layers_dims[1], layers_dims[0])
b1 -- bias vector of shape (layers_dims[1], 1)
...
WL -- weight matrix of shape (layers_dims[L], layers_dims[L-1])
bL -- bias vector of shape (layers_dims[L], 1)
"""
parameters = {}
L = len(layers_dims) # number of layers in the network
for l in range(1, L):
### START CODE HERE ### (≈ 2 lines of code)
parameters['W' + str(l)] = np.zeros(shape = (layers_dims[l], layers_dims[l-1]))
parameters['b' + str(l)] = np.zeros(shape = (layers_dims[l], 1))
### END CODE HERE ###
return parameters
# In[14]:
parameters = initialize_parameters_zeros([3,2,1])
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
# **Expected Output**:
#
# <table>
# <tr>
# <td>
# **W1**
# </td>
# <td>
# [[ 0. 0. 0.]
# [ 0. 0. 0.]]
# </td>
# </tr>
# <tr>
# <td>
# **b1**
# </td>
# <td>
# [[ 0.]
# [ 0.]]
# </td>
# </tr>
# <tr>
# <td>
# **W2**
# </td>
# <td>
# [[ 0. 0.]]
# </td>
# </tr>
# <tr>
# <td>
# **b2**
# </td>
# <td>
# [[ 0.]]
# </td>
# </tr>
#
# </table>
# Run the following code to train your model on 15,000 iterations using zeros initialization.
# In[15]:
parameters = model(train_X, train_Y, initialization = "zeros")
print ("On the train set:")
predictions_train = predict(train_X, train_Y, parameters)
print ("On the test set:")
predictions_test = predict(test_X, test_Y, parameters)
# The performance is really bad, and the cost does not really decrease, and the algorithm performs no better than random guessing. Why? Lets look at the details of the predictions and the decision boundary:
# In[16]:
print ("predictions_train = " + str(predictions_train))
print ("predictions_test = " + str(predictions_test))
# In[17]:
plt.title("Model with Zeros initialization")
axes = plt.gca()
axes.set_xlim([-1.5,1.5])
axes.set_ylim([-1.5,1.5])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
# The model is predicting 0 for every example.
#
# In general, initializing all the weights to zero results in the network failing to break symmetry. This means that every neuron in each layer will learn the same thing, and you might as well be training a neural network with $n^{[l]}=1$ for every layer, and the network is no more powerful than a linear classifier such as logistic regression.
# <font color='blue'>
# **What you should remember**:
# - The weights $W^{[l]}$ should be initialized randomly to break symmetry.
# - It is however okay to initialize the biases $b^{[l]}$ to zeros. Symmetry is still broken so long as $W^{[l]}$ is initialized randomly.
#
# ## 3 - Random initialization
#
# To break symmetry, lets intialize the weights randomly. Following random initialization, each neuron can then proceed to learn a different function of its inputs. In this exercise, you will see what happens if the weights are intialized randomly, but to very large values.
#
# **Exercise**: Implement the following function to initialize your weights to large random values (scaled by \*10) and your biases to zeros. Use `np.random.randn(..,..) * 10` for weights and `np.zeros((.., ..))` for biases. We are using a fixed `np.random.seed(..)` to make sure your "random" weights match ours, so don't worry if running several times your code gives you always the same initial values for the parameters.
# In[19]:
# GRADED FUNCTION: initialize_parameters_random
def initialize_parameters_random(layers_dims):
"""
Arguments:
layer_dims -- python array (list) containing the size of each layer.
Returns:
parameters -- python dictionary containing your parameters "W1", "b1", ..., "WL", "bL":
W1 -- weight matrix of shape (layers_dims[1], layers_dims[0])
b1 -- bias vector of shape (layers_dims[1], 1)
...
WL -- weight matrix of shape (layers_dims[L], layers_dims[L-1])
bL -- bias vector of shape (layers_dims[L], 1)
"""
np.random.seed(3) # This seed makes sure your "random" numbers will be the as ours
parameters = {}
L = len(layers_dims) # integer representing the number of layers
for l in range(1, L):
### START CODE HERE ### (≈ 2 lines of code)
parameters['W' + str(l)] = np.random.randn( layers_dims[l], layers_dims[l-1]) * 10
parameters['b' + str(l)] = np.zeros(shape = (layers_dims[l], 1))
### END CODE HERE ###
return parameters
# In[20]:
parameters = initialize_parameters_random([3, 2, 1])
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
# **Expected Output**:
#
# <table>
# <tr>
# <td>
# **W1**
# </td>
# <td>
# [[ 17.88628473 4.36509851 0.96497468]
# [-18.63492703 -2.77388203 -3.54758979]]
# </td>
# </tr>
# <tr>
# <td>
# **b1**
# </td>
# <td>
# [[ 0.]
# [ 0.]]
# </td>
# </tr>
# <tr>
# <td>
# **W2**
# </td>
# <td>
# [[-0.82741481 -6.27000677]]
# </td>
# </tr>
# <tr>
# <td>
# **b2**
# </td>
# <td>
# [[ 0.]]
# </td>
# </tr>
#
# </table>
# Run the following code to train your model on 15,000 iterations using random initialization.
# In[21]:
parameters = model(train_X, train_Y, initialization = "random")
print ("On the train set:")
predictions_train = predict(train_X, train_Y, parameters)
print ("On the test set:")
predictions_test = predict(test_X, test_Y, parameters)
# If you see "inf" as the cost after the iteration 0, this is because of numerical roundoff; a more numerically sophisticated implementation would fix this. But this isn't worth worrying about for our purposes.
#
# Anyway, it looks like you have broken symmetry, and this gives better results. than before. The model is no longer outputting all 0s.
# In[22]:
print (predictions_train)
print (predictions_test)
# In[23]:
plt.title("Model with large random initialization")
axes = plt.gca()
axes.set_xlim([-1.5,1.5])
axes.set_ylim([-1.5,1.5])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
# **Observations**:
# - The cost starts very high. This is because with large random-valued weights, the last activation (sigmoid) outputs results that are very close to 0 or 1 for some examples, and when it gets that example wrong it incurs a very high loss for that example. Indeed, when $\log(a^{[3]}) = \log(0)$, the loss goes to infinity.
# - Poor initialization can lead to vanishing/exploding gradients, which also slows down the optimization algorithm.
# - If you train this network longer you will see better results, but initializing with overly large random numbers slows down the optimization.
#
# <font color='blue'>
# **In summary**:
# - Initializing weights to very large random values does not work well.
# - Hopefully intializing with small random values does better. The important question is: how small should be these random values be? Lets find out in the next part!
# ## 4 - He initialization
#
# Finally, try "He Initialization"; this is named for the first author of He et al., 2015. (If you have heard of "Xavier initialization", this is similar except Xavier initialization uses a scaling factor for the weights $W^{[l]}$ of `sqrt(1./layers_dims[l-1])` where He initialization would use `sqrt(2./layers_dims[l-1])`.)
#
# **Exercise**: Implement the following function to initialize your parameters with He initialization.
#
# **Hint**: This function is similar to the previous `initialize_parameters_random(...)`. The only difference is that instead of multiplying `np.random.randn(..,..)` by 10, you will multiply it by $\sqrt{\frac{2}{\text{dimension of the previous layer}}}$, which is what He initialization recommends for layers with a ReLU activation.
# In[28]:
# GRADED FUNCTION: initialize_parameters_he
def initialize_parameters_he(layers_dims):
"""
Arguments:
layer_dims -- python array (list) containing the size of each layer.
Returns:
parameters -- python dictionary containing your parameters "W1", "b1", ..., "WL", "bL":
W1 -- weight matrix of shape (layers_dims[1], layers_dims[0])
b1 -- bias vector of shape (layers_dims[1], 1)
...
WL -- weight matrix of shape (layers_dims[L], layers_dims[L-1])
bL -- bias vector of shape (layers_dims[L], 1)
"""
np.random.seed(3)
parameters = {}
L = len(layers_dims) - 1 # integer representing the number of layers
for l in range(1, L + 1):
### START CODE HERE ### (≈ 2 lines of code)
parameters['W' + str(l)] = np.random.randn( layers_dims[l], layers_dims[l-1]) * np.sqrt(2/layers_dims[l-1])
parameters['b' + str(l)] = np.zeros(shape = (layers_dims[l], 1))
### END CODE HERE ###
return parameters
# In[29]:
parameters = initialize_parameters_he([2, 4, 1])
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
# **Expected Output**:
#
# <table>
# <tr>
# <td>
# **W1**
# </td>
# <td>
# [[ 1.78862847 0.43650985]
# [ 0.09649747 -1.8634927 ]
# [-0.2773882 -0.35475898]
# [-0.08274148 -0.62700068]]
# </td>
# </tr>
# <tr>
# <td>
# **b1**
# </td>
# <td>
# [[ 0.]
# [ 0.]
# [ 0.]
# [ 0.]]
# </td>
# </tr>
# <tr>
# <td>
# **W2**
# </td>
# <td>
# [[-0.03098412 -0.33744411 -0.92904268 0.62552248]]
# </td>
# </tr>
# <tr>
# <td>
# **b2**
# </td>
# <td>
# [[ 0.]]
# </td>
# </tr>
#
# </table>
# Run the following code to train your model on 15,000 iterations using He initialization.
# In[30]:
parameters = model(train_X, train_Y, initialization = "he")
print ("On the train set:")
predictions_train = predict(train_X, train_Y, parameters)
print ("On the test set:")
predictions_test = predict(test_X, test_Y, parameters)
# In[31]:
plt.title("Model with He initialization")
axes = plt.gca()
axes.set_xlim([-1.5,1.5])
axes.set_ylim([-1.5,1.5])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
# **Observations**:
# - The model with He initialization separates the blue and the red dots very well in a small number of iterations.
#
# ## 5 - Conclusions
# You have seen three different types of initializations. For the same number of iterations and same hyperparameters the comparison is:
#
# <table>
# <tr>
# <td>
# **Model**
# </td>
# <td>
# **Train accuracy**
# </td>
# <td>
# **Problem/Comment**
# </td>
#
# </tr>
# <td>
# 3-layer NN with zeros initialization
# </td>
# <td>
# 50%
# </td>
# <td>
# fails to break symmetry
# </td>
# <tr>
# <td>
# 3-layer NN with large random initialization
# </td>
# <td>
# 83%
# </td>
# <td>
# too large weights
# </td>
# </tr>
# <tr>
# <td>
# 3-layer NN with He initialization
# </td>
# <td>
# 99%
# </td>
# <td>
# recommended method
# </td>
# </tr>
# </table>
# <font color='blue'>
# **What you should remember from this notebook**:
# - Different initializations lead to different results
# - Random initialization is used to break symmetry and make sure different hidden units can learn different things
# - Don't intialize to values that are too large
# - He initialization works well for networks with ReLU activations.
|
unlicense
|
EPFLMachineLearningTeam01/Project1
|
src/scripts/plot_histograms.py
|
1
|
4107
|
from matplotlib import pyplot as plt
import numpy as np
from tqdm import tqdm
def savefig(name):
kwargs = {'bbox_inches': 'tight', 'transparent': False, 'pad_inches': 0}
plt.savefig('./analysis/%s.pdf' % name, **kwargs)
plt.savefig('./analysis/%s.png' % name, **kwargs)
def do_hist_scatter(X, y_, header, idx_x = None, idx_y = None, bins = 20):
N = X.shape[1]
if idx_x == None:
idx_x = range(N)
if idx_y == None:
idx_y = range(N)
_, plots = plt.subplots(len(idx_x), len(idx_y), figsize=(15,15))
with tqdm(total=len(idx_x) * len(idx_y)) as pbar:
for x, i in enumerate(idx_x):
for y, j in enumerate(idx_y):
ax = plots[x][y]
ax.set_rasterized(True)
if i == j:
ax.hist(X[:,i], color='green', bins=bins,
histtype='bar', align='mid')
else:
ax.scatter(X[:,j], X[:,i],
c = ['red' if t > 0 else 'blue' for t in y_],
s=1)
if x == 0 or x == len(idx_x) - 1:
ax.xaxis.set_label_position('top' if x == 0 else 'bottom')
ax.set_xlabel(header[j])
if y == 0 or y == len(idx_y) - 1:
ax.yaxis.set_label_position('left' if y == 0 else 'right')
ax.set_ylabel(header[i])
pbar.update(1)
def get_ranges(N, k):
all_range = list(range(N))
n_intervals = N // k
if n_intervals * k < N: n_intervals += 1
for i in range(n_intervals):
for j in range(n_intervals):
range_x = range(i * k, min(N, i * k + k))
range_y = range(j * k, min(N, j * k + k))
yield (range_x, range_y)
def do_hist(X, header, shape = (None, None), bins = 20):
N = X.shape[1]
assert(shape[0] * shape[1] >= N)
_, plots = plt.subplots(shape[0], shape[1], figsize=(15,15))
with tqdm(total=N) as pbar:
for i in range(shape[0]):
for j in range(shape[1]):
n = i * shape[1] + j
if n >= N:
continue
ax = plots[i][j]
ax.hist(X[:,n], color='green', bins=bins, histtype='bar', align='mid')
ax.set_title(label = header[n], fontsize=7)
ax.get_yaxis().set_tick_params(which='both', direction='in')
ax.get_xaxis().set_tick_params(which='both', direction='in')
pbar.update(1)
def do_Xy(X, y, header, shape = (None, None), bins = 15):
N = X.shape[1]
assert(shape[0] * shape[1] >= N)
_, plots = plt.subplots(shape[0], shape[1], figsize=(15,15))
with tqdm(total=N) as pbar:
for i in range(shape[0]):
for j in range(shape[1]):
n = i * shape[1] + j
if n >= N:
continue
ax = plots[i][j]
ax.hist(X[y > 0,n], bins=bins, alpha=0.5, label='+1')
ax.hist(X[y <= 0,n], bins=bins, alpha=0.5, label='-1')
ax.get_yaxis().set_tick_params(which='both', direction='in')
ax.get_xaxis().set_tick_params(which='both', direction='in')
ax.set_title(label = header[n], fontsize=7)
if i == 0 and j == 0:
ax.legend()
pbar.update(1)
def do_boxplot(X, header, shape = (None, None)):
N = X.shape[1]
assert(shape[0] * shape[1] >= N)
_, plots = plt.subplots(shape[0], shape[1], figsize=(15,30))
with tqdm(total=N) as pbar:
for i in range(shape[0]):
for j in range(shape[1]):
n = i * shape[1] + j
if n >= N:
continue
ax = plots[i][j]
ax.boxplot(X[:, n], vert = False)
ax.get_yaxis().set_tick_params(which='both', direction='in')
ax.get_xaxis().set_tick_params(which='both', direction='in')
ax.set_title(label = header[n], fontsize=7)
pbar.update(1)
|
mit
|
alex-ta/Fontinator
|
NeuralNet/Oli/libs/ProcessingPipeline.py
|
1
|
5521
|
from keras.optimizers import RMSprop
from keras.utils import np_utils
from numpy.core.multiarray import ndarray
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
import numpy as np
from NeuralNet.Oli.libs.TrainingLogger import TrainingLogger
from NeuralNet.Oli.libs.ImageLoader import ImageLoader
from NeuralNet.Oli.libs.ModelSerializer import ModelSerializer
from NeuralNet.Oli.libs.Preprocessor import IPreprocessor
class ProcessingPipeline:
'''
Manages the whole pipeline from data loading, preprocessing to model training and evaluation
'''
def __init__(self):
self.font_names: list = None
self.model_path: str = None
self.model = None
self.preprocessor: IPreprocessor = None
self.label_encoder = None
pass
def load_features_and_preprocess(self, img_path: str, img_preprocessor: IPreprocessor) -> (ndarray, ndarray):
self.preprocessor = img_preprocessor
# Loads the images from the defined path
data_loader: ImageLoader = ImageLoader(img_path)
self.font_names = data_loader.get_font_names()
image_count = data_loader.get_image_count()
font_count = data_loader.get_font_count()
print("Found {0} images with {1} different fonts".format(image_count, font_count))
# Map labels(str) to class_ids(int)
self.label_encoder = LabelEncoder()
self.label_encoder.fit(self.font_names)
label_ids = self.label_encoder.transform(self.label_encoder.classes_)
print("Mapping labels:\n{0} \n -> {1}".format(self.label_encoder.classes_, label_ids))
print("Start preprocessing images ...")
features = []
labels = []
# Iterate over all fonts
for f_name in self.font_names:
print(" -> {0}".format(f_name))
label_id = self.label_encoder.transform([f_name])
font_labels = np.full(data_loader.get_img_count_for_font(f_name), label_id, dtype=np.float32)
labels.extend(font_labels)
# Iterate over all images for one font
for img_path in data_loader.iterate_images_for_fontname(f_name):
nd_img: ndarray = self.preprocessor.prepare_image(img_path)
features.append(nd_img)
x: ndarray = np.array(features)
y: ndarray = np.array(labels)
return x, y
def __compile_model(self):
print("Compiling NN model ...")
nn_optimizer = RMSprop(lr=0.0001)
self.model.compile(optimizer=nn_optimizer,
loss='categorical_crossentropy',
metrics=['accuracy'])
def load_model(self, model_path):
# Load the NN model from disk
print("Loading model from disk")
model_serializer = ModelSerializer(model_path)
self.model = model_serializer.load_from_path()
self.__compile_model()
def train_model(self, keras_model, x: ndarray, y: ndarray, epos=1000, train_ratio=0.8, batch_size=100):
self.model = keras_model
# Convert labels to categorical one-hot encoding; e.g. [1, 2, 3] -> [[1,0,0], [0,1,0], [0,0,1]]
y_onehotenc = np_utils.to_categorical(y)
# Splitting to train- /test data
train_X, test_X, train_y, test_y = train_test_split(x, y_onehotenc, train_size=train_ratio)
# Saves stats while training the NN
self.train_logger: TrainingLogger = TrainingLogger(self.model_path, frequent_write=False)
self.__compile_model()
print("Training the NN model")
if type(batch_size) == float and 0.0 < batch_size <= 1.0:
batch_size = int(batch_size * train_X.shape[0])
self.model.fit(train_X, train_y, epochs=epos, batch_size=batch_size,
validation_data=(test_X, test_y), callbacks=[self.train_logger])
# Calculate the metrics for the trained model
loss_and_metrics = keras_model.evaluate(test_X, test_y, batch_size=batch_size)
print(loss_and_metrics)
def save_model(self, model_save_path: str, include_stats=True):
# save the mapping of the features to disk
model_serializer = ModelSerializer(model_save_path)
label_ids = self.label_encoder.transform(self.label_encoder.classes_)
model_serializer.save_label_mapping(self.label_encoder.classes_, label_ids)
# Write csv file and plot image for training stats
if include_stats is True:
self.train_logger.set_basepath(model_save_path)
self.train_logger.write_csv()
self.train_logger.make_plots()
# Save the NN model to disk
print("Saving NN model and the label index mapping")
model_serializer.serialize_to_disk(self.model)
def predict(self, x):
# Make predictions
y_pred_onehotenc = self.model.predict(x)
# Retransform one hot encoding to indexes
y_pred = y_pred_onehotenc.argmax(axis=1)
return y_pred
def evaluate(self, y, y_pred):
# Calculate correct and wrong prediction count
correct_pred_items = np.equal(y, y_pred)
cor_pred_count = np.sum(correct_pred_items)
wrong_pred_count = y_pred.size - cor_pred_count
cor_pred_ratio = cor_pred_count / y_pred.size
print("Summary:")
print("Correct predictions: {0} | Wrong predictions: {1}"
.format(cor_pred_count, wrong_pred_count))
print("{0}".format(cor_pred_ratio))
|
apache-2.0
|
DGrady/pandas
|
pandas/tests/io/parser/compression.py
|
6
|
5431
|
# -*- coding: utf-8 -*-
"""
Tests compressed data parsing functionality for all
of the parsers defined in parsers.py
"""
import pytest
import pandas.util.testing as tm
class CompressionTests(object):
def test_zip(self):
import zipfile
with open(self.csv1, 'rb') as data_file:
data = data_file.read()
expected = self.read_csv(self.csv1)
with tm.ensure_clean('test_file.zip') as path:
tmp = zipfile.ZipFile(path, mode='w')
tmp.writestr('test_file', data)
tmp.close()
result = self.read_csv(path, compression='zip')
tm.assert_frame_equal(result, expected)
result = self.read_csv(path, compression='infer')
tm.assert_frame_equal(result, expected)
if self.engine is not 'python':
with open(path, 'rb') as f:
result = self.read_csv(f, compression='zip')
tm.assert_frame_equal(result, expected)
with tm.ensure_clean('combined_zip.zip') as path:
inner_file_names = ['test_file', 'second_file']
tmp = zipfile.ZipFile(path, mode='w')
for file_name in inner_file_names:
tmp.writestr(file_name, data)
tmp.close()
tm.assert_raises_regex(ValueError, 'Multiple files',
self.read_csv, path, compression='zip')
tm.assert_raises_regex(ValueError, 'Multiple files',
self.read_csv, path,
compression='infer')
with tm.ensure_clean() as path:
tmp = zipfile.ZipFile(path, mode='w')
tmp.close()
tm.assert_raises_regex(ValueError, 'Zero files',
self.read_csv, path, compression='zip')
with tm.ensure_clean() as path:
with open(path, 'wb') as f:
pytest.raises(zipfile.BadZipfile, self.read_csv,
f, compression='zip')
def test_gzip(self):
import gzip
with open(self.csv1, 'rb') as data_file:
data = data_file.read()
expected = self.read_csv(self.csv1)
with tm.ensure_clean() as path:
tmp = gzip.GzipFile(path, mode='wb')
tmp.write(data)
tmp.close()
result = self.read_csv(path, compression='gzip')
tm.assert_frame_equal(result, expected)
with open(path, 'rb') as f:
result = self.read_csv(f, compression='gzip')
tm.assert_frame_equal(result, expected)
with tm.ensure_clean('test.gz') as path:
tmp = gzip.GzipFile(path, mode='wb')
tmp.write(data)
tmp.close()
result = self.read_csv(path, compression='infer')
tm.assert_frame_equal(result, expected)
def test_bz2(self):
import bz2
with open(self.csv1, 'rb') as data_file:
data = data_file.read()
expected = self.read_csv(self.csv1)
with tm.ensure_clean() as path:
tmp = bz2.BZ2File(path, mode='wb')
tmp.write(data)
tmp.close()
result = self.read_csv(path, compression='bz2')
tm.assert_frame_equal(result, expected)
pytest.raises(ValueError, self.read_csv,
path, compression='bz3')
with open(path, 'rb') as fin:
result = self.read_csv(fin, compression='bz2')
tm.assert_frame_equal(result, expected)
with tm.ensure_clean('test.bz2') as path:
tmp = bz2.BZ2File(path, mode='wb')
tmp.write(data)
tmp.close()
result = self.read_csv(path, compression='infer')
tm.assert_frame_equal(result, expected)
def test_xz(self):
lzma = tm._skip_if_no_lzma()
with open(self.csv1, 'rb') as data_file:
data = data_file.read()
expected = self.read_csv(self.csv1)
with tm.ensure_clean() as path:
tmp = lzma.LZMAFile(path, mode='wb')
tmp.write(data)
tmp.close()
result = self.read_csv(path, compression='xz')
tm.assert_frame_equal(result, expected)
with open(path, 'rb') as f:
result = self.read_csv(f, compression='xz')
tm.assert_frame_equal(result, expected)
with tm.ensure_clean('test.xz') as path:
tmp = lzma.LZMAFile(path, mode='wb')
tmp.write(data)
tmp.close()
result = self.read_csv(path, compression='infer')
tm.assert_frame_equal(result, expected)
def test_read_csv_infer_compression(self):
# see gh-9770
expected = self.read_csv(self.csv1, index_col=0, parse_dates=True)
inputs = [self.csv1, self.csv1 + '.gz',
self.csv1 + '.bz2', open(self.csv1)]
for f in inputs:
df = self.read_csv(f, index_col=0, parse_dates=True,
compression='infer')
tm.assert_frame_equal(expected, df)
inputs[3].close()
def test_invalid_compression(self):
msg = 'Unrecognized compression type: sfark'
with tm.assert_raises_regex(ValueError, msg):
self.read_csv('test_file.zip', compression='sfark')
|
bsd-3-clause
|
jblackburne/scikit-learn
|
sklearn/grid_search.py
|
4
|
38651
|
"""
The :mod:`sklearn.grid_search` includes utilities to fine-tune the parameters
of an estimator.
"""
from __future__ import print_function
# Author: Alexandre Gramfort <[email protected]>,
# Gael Varoquaux <[email protected]>
# Andreas Mueller <[email protected]>
# Olivier Grisel <[email protected]>
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
from collections import Mapping, namedtuple, Sized
from functools import partial, reduce
from itertools import product
import operator
import warnings
import numpy as np
from .base import BaseEstimator, is_classifier, clone
from .base import MetaEstimatorMixin
from .cross_validation import check_cv
from .cross_validation import _fit_and_score
from .externals.joblib import Parallel, delayed
from .externals import six
from .utils import check_random_state
from .utils.random import sample_without_replacement
from .utils.validation import _num_samples, indexable
from .utils.metaestimators import if_delegate_has_method
from .metrics.scorer import check_scoring
from .exceptions import ChangedBehaviorWarning
__all__ = ['GridSearchCV', 'ParameterGrid', 'fit_grid_point',
'ParameterSampler', 'RandomizedSearchCV']
warnings.warn("This module was deprecated in version 0.18 in favor of the "
"model_selection module into which all the refactored classes "
"and functions are moved. This module will be removed in 0.20.",
DeprecationWarning)
class ParameterGrid(object):
"""Grid of parameters with a discrete number of values for each.
Can be used to iterate over parameter value combinations with the
Python built-in function iter.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
param_grid : dict of string to sequence, or sequence of such
The parameter grid to explore, as a dictionary mapping estimator
parameters to sequences of allowed values.
An empty dict signifies default parameters.
A sequence of dicts signifies a sequence of grids to search, and is
useful to avoid exploring parameter combinations that make no sense
or have no effect. See the examples below.
Examples
--------
>>> from sklearn.grid_search import ParameterGrid
>>> param_grid = {'a': [1, 2], 'b': [True, False]}
>>> list(ParameterGrid(param_grid)) == (
... [{'a': 1, 'b': True}, {'a': 1, 'b': False},
... {'a': 2, 'b': True}, {'a': 2, 'b': False}])
True
>>> grid = [{'kernel': ['linear']}, {'kernel': ['rbf'], 'gamma': [1, 10]}]
>>> list(ParameterGrid(grid)) == [{'kernel': 'linear'},
... {'kernel': 'rbf', 'gamma': 1},
... {'kernel': 'rbf', 'gamma': 10}]
True
>>> ParameterGrid(grid)[1] == {'kernel': 'rbf', 'gamma': 1}
True
See also
--------
:class:`GridSearchCV`:
uses ``ParameterGrid`` to perform a full parallelized parameter search.
"""
def __init__(self, param_grid):
if isinstance(param_grid, Mapping):
# wrap dictionary in a singleton list to support either dict
# or list of dicts
param_grid = [param_grid]
self.param_grid = param_grid
def __iter__(self):
"""Iterate over the points in the grid.
Returns
-------
params : iterator over dict of string to any
Yields dictionaries mapping each estimator parameter to one of its
allowed values.
"""
for p in self.param_grid:
# Always sort the keys of a dictionary, for reproducibility
items = sorted(p.items())
if not items:
yield {}
else:
keys, values = zip(*items)
for v in product(*values):
params = dict(zip(keys, v))
yield params
def __len__(self):
"""Number of points on the grid."""
# Product function that can handle iterables (np.product can't).
product = partial(reduce, operator.mul)
return sum(product(len(v) for v in p.values()) if p else 1
for p in self.param_grid)
def __getitem__(self, ind):
"""Get the parameters that would be ``ind``th in iteration
Parameters
----------
ind : int
The iteration index
Returns
-------
params : dict of string to any
Equal to list(self)[ind]
"""
# This is used to make discrete sampling without replacement memory
# efficient.
for sub_grid in self.param_grid:
# XXX: could memoize information used here
if not sub_grid:
if ind == 0:
return {}
else:
ind -= 1
continue
# Reverse so most frequent cycling parameter comes first
keys, values_lists = zip(*sorted(sub_grid.items())[::-1])
sizes = [len(v_list) for v_list in values_lists]
total = np.product(sizes)
if ind >= total:
# Try the next grid
ind -= total
else:
out = {}
for key, v_list, n in zip(keys, values_lists, sizes):
ind, offset = divmod(ind, n)
out[key] = v_list[offset]
return out
raise IndexError('ParameterGrid index out of range')
class ParameterSampler(object):
"""Generator on parameters sampled from given distributions.
Non-deterministic iterable over random candidate combinations for hyper-
parameter search. If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Note that as of SciPy 0.12, the ``scipy.stats.distributions`` do not accept
a custom RNG instance and always use the singleton RNG from
``numpy.random``. Hence setting ``random_state`` will not guarantee a
deterministic iteration whenever ``scipy.stats`` distributions are used to
define the parameter search space.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
param_distributions : dict
Dictionary where the keys are parameters and values
are distributions from which a parameter is to be sampled.
Distributions either have to provide a ``rvs`` function
to sample from them, or can be given as a list of values,
where a uniform distribution is assumed.
n_iter : integer
Number of parameter settings that are produced.
random_state : int or RandomState
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
Returns
-------
params : dict of string to any
**Yields** dictionaries mapping each estimator parameter to
as sampled value.
Examples
--------
>>> from sklearn.grid_search import ParameterSampler
>>> from scipy.stats.distributions import expon
>>> import numpy as np
>>> np.random.seed(0)
>>> param_grid = {'a':[1, 2], 'b': expon()}
>>> param_list = list(ParameterSampler(param_grid, n_iter=4))
>>> rounded_list = [dict((k, round(v, 6)) for (k, v) in d.items())
... for d in param_list]
>>> rounded_list == [{'b': 0.89856, 'a': 1},
... {'b': 0.923223, 'a': 1},
... {'b': 1.878964, 'a': 2},
... {'b': 1.038159, 'a': 2}]
True
"""
def __init__(self, param_distributions, n_iter, random_state=None):
self.param_distributions = param_distributions
self.n_iter = n_iter
self.random_state = random_state
def __iter__(self):
# check if all distributions are given as lists
# in this case we want to sample without replacement
all_lists = np.all([not hasattr(v, "rvs")
for v in self.param_distributions.values()])
rnd = check_random_state(self.random_state)
if all_lists:
# look up sampled parameter settings in parameter grid
param_grid = ParameterGrid(self.param_distributions)
grid_size = len(param_grid)
if grid_size < self.n_iter:
raise ValueError(
"The total space of parameters %d is smaller "
"than n_iter=%d." % (grid_size, self.n_iter)
+ " For exhaustive searches, use GridSearchCV.")
for i in sample_without_replacement(grid_size, self.n_iter,
random_state=rnd):
yield param_grid[i]
else:
# Always sort the keys of a dictionary, for reproducibility
items = sorted(self.param_distributions.items())
for _ in six.moves.range(self.n_iter):
params = dict()
for k, v in items:
if hasattr(v, "rvs"):
params[k] = v.rvs()
else:
params[k] = v[rnd.randint(len(v))]
yield params
def __len__(self):
"""Number of points that will be sampled."""
return self.n_iter
def fit_grid_point(X, y, estimator, parameters, train, test, scorer,
verbose, error_score='raise', **fit_params):
"""Run fit on one set of parameters.
Parameters
----------
X : array-like, sparse matrix or list
Input data.
y : array-like or None
Targets for input data.
estimator : estimator object
A object of that type is instantiated for each grid point.
This is assumed to implement the scikit-learn estimator interface.
Either estimator needs to provide a ``score`` function,
or ``scoring`` must be passed.
parameters : dict
Parameters to be set on estimator for this grid point.
train : ndarray, dtype int or bool
Boolean mask or indices for training set.
test : ndarray, dtype int or bool
Boolean mask or indices for test set.
scorer : callable or None.
If provided must be a scorer callable object / function with signature
``scorer(estimator, X, y)``.
verbose : int
Verbosity level.
**fit_params : kwargs
Additional parameter passed to the fit function of the estimator.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Returns
-------
score : float
Score of this parameter setting on given training / test split.
parameters : dict
The parameters that have been evaluated.
n_samples_test : int
Number of test samples in this split.
"""
score, n_samples_test, _ = _fit_and_score(estimator, X, y, scorer, train,
test, verbose, parameters,
fit_params, error_score)
return score, parameters, n_samples_test
def _check_param_grid(param_grid):
if hasattr(param_grid, 'items'):
param_grid = [param_grid]
for p in param_grid:
for name, v in p.items():
if isinstance(v, np.ndarray) and v.ndim > 1:
raise ValueError("Parameter array should be one-dimensional.")
check = [isinstance(v, k) for k in (list, tuple, np.ndarray)]
if True not in check:
raise ValueError("Parameter values for parameter ({0}) need "
"to be a sequence.".format(name))
if len(v) == 0:
raise ValueError("Parameter values for parameter ({0}) need "
"to be a non-empty sequence.".format(name))
class _CVScoreTuple (namedtuple('_CVScoreTuple',
('parameters',
'mean_validation_score',
'cv_validation_scores'))):
# A raw namedtuple is very memory efficient as it packs the attributes
# in a struct to get rid of the __dict__ of attributes in particular it
# does not copy the string for the keys on each instance.
# By deriving a namedtuple class just to introduce the __repr__ method we
# would also reintroduce the __dict__ on the instance. By telling the
# Python interpreter that this subclass uses static __slots__ instead of
# dynamic attributes. Furthermore we don't need any additional slot in the
# subclass so we set __slots__ to the empty tuple.
__slots__ = ()
def __repr__(self):
"""Simple custom repr to summarize the main info"""
return "mean: {0:.5f}, std: {1:.5f}, params: {2}".format(
self.mean_validation_score,
np.std(self.cv_validation_scores),
self.parameters)
class BaseSearchCV(six.with_metaclass(ABCMeta, BaseEstimator,
MetaEstimatorMixin)):
"""Base class for hyper parameter search with cross-validation."""
@abstractmethod
def __init__(self, estimator, scoring=None,
fit_params=None, n_jobs=1, iid=True,
refit=True, cv=None, verbose=0, pre_dispatch='2*n_jobs',
error_score='raise'):
self.scoring = scoring
self.estimator = estimator
self.n_jobs = n_jobs
self.fit_params = fit_params if fit_params is not None else {}
self.iid = iid
self.refit = refit
self.cv = cv
self.verbose = verbose
self.pre_dispatch = pre_dispatch
self.error_score = error_score
@property
def _estimator_type(self):
return self.estimator._estimator_type
def score(self, X, y=None):
"""Returns the score on the given data, if the estimator has been refit.
This uses the score defined by ``scoring`` where provided, and the
``best_estimator_.score`` method otherwise.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Input data, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
Returns
-------
score : float
Notes
-----
* The long-standing behavior of this method changed in version 0.16.
* It no longer uses the metric provided by ``estimator.score`` if the
``scoring`` parameter was set when fitting.
"""
if self.scorer_ is None:
raise ValueError("No score function explicitly defined, "
"and the estimator doesn't provide one %s"
% self.best_estimator_)
if self.scoring is not None and hasattr(self.best_estimator_, 'score'):
warnings.warn("The long-standing behavior to use the estimator's "
"score function in {0}.score has changed. The "
"scoring parameter is now used."
"".format(self.__class__.__name__),
ChangedBehaviorWarning)
return self.scorer_(self.best_estimator_, X, y)
@if_delegate_has_method(delegate='estimator')
def predict(self, X):
"""Call predict on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict(X)
@if_delegate_has_method(delegate='estimator')
def predict_proba(self, X):
"""Call predict_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_proba``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict_proba(X)
@if_delegate_has_method(delegate='estimator')
def predict_log_proba(self, X):
"""Call predict_log_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_log_proba``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict_log_proba(X)
@if_delegate_has_method(delegate='estimator')
def decision_function(self, X):
"""Call decision_function on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``decision_function``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.decision_function(X)
@if_delegate_has_method(delegate='estimator')
def transform(self, X):
"""Call transform on the estimator with the best found parameters.
Only available if the underlying estimator supports ``transform`` and
``refit=True``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.transform(X)
@if_delegate_has_method(delegate='estimator')
def inverse_transform(self, Xt):
"""Call inverse_transform on the estimator with the best found parameters.
Only available if the underlying estimator implements ``inverse_transform`` and
``refit=True``.
Parameters
-----------
Xt : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.transform(Xt)
def _fit(self, X, y, parameter_iterable):
"""Actual fitting, performing the search over parameters."""
estimator = self.estimator
cv = self.cv
self.scorer_ = check_scoring(self.estimator, scoring=self.scoring)
n_samples = _num_samples(X)
X, y = indexable(X, y)
if y is not None:
if len(y) != n_samples:
raise ValueError('Target variable (y) has a different number '
'of samples (%i) than data (X: %i samples)'
% (len(y), n_samples))
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
if self.verbose > 0:
if isinstance(parameter_iterable, Sized):
n_candidates = len(parameter_iterable)
print("Fitting {0} folds for each of {1} candidates, totalling"
" {2} fits".format(len(cv), n_candidates,
n_candidates * len(cv)))
base_estimator = clone(self.estimator)
pre_dispatch = self.pre_dispatch
out = Parallel(
n_jobs=self.n_jobs, verbose=self.verbose,
pre_dispatch=pre_dispatch
)(
delayed(_fit_and_score)(clone(base_estimator), X, y, self.scorer_,
train, test, self.verbose, parameters,
self.fit_params, return_parameters=True,
error_score=self.error_score)
for parameters in parameter_iterable
for train, test in cv)
# Out is a list of triplet: score, estimator, n_test_samples
n_fits = len(out)
n_folds = len(cv)
scores = list()
grid_scores = list()
for grid_start in range(0, n_fits, n_folds):
n_test_samples = 0
score = 0
all_scores = []
for this_score, this_n_test_samples, _, parameters in \
out[grid_start:grid_start + n_folds]:
all_scores.append(this_score)
if self.iid:
this_score *= this_n_test_samples
n_test_samples += this_n_test_samples
score += this_score
if self.iid:
score /= float(n_test_samples)
else:
score /= float(n_folds)
scores.append((score, parameters))
# TODO: shall we also store the test_fold_sizes?
grid_scores.append(_CVScoreTuple(
parameters,
score,
np.array(all_scores)))
# Store the computed scores
self.grid_scores_ = grid_scores
# Find the best parameters by comparing on the mean validation score:
# note that `sorted` is deterministic in the way it breaks ties
best = sorted(grid_scores, key=lambda x: x.mean_validation_score,
reverse=True)[0]
self.best_params_ = best.parameters
self.best_score_ = best.mean_validation_score
if self.refit:
# fit the best estimator using the entire dataset
# clone first to work around broken estimators
best_estimator = clone(base_estimator).set_params(
**best.parameters)
if y is not None:
best_estimator.fit(X, y, **self.fit_params)
else:
best_estimator.fit(X, **self.fit_params)
self.best_estimator_ = best_estimator
return self
class GridSearchCV(BaseSearchCV):
"""Exhaustive search over specified parameter values for an estimator.
Important members are fit, predict.
GridSearchCV implements a "fit" and a "score" method.
It also implements "predict", "predict_proba", "decision_function",
"transform" and "inverse_transform" if they are implemented in the
estimator used.
The parameters of the estimator used to apply these methods are optimized
by cross-validated grid-search over a parameter grid.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
estimator : estimator object.
A object of that type is instantiated for each grid point.
This is assumed to implement the scikit-learn estimator interface.
Either estimator needs to provide a ``score`` function,
or ``scoring`` must be passed.
param_grid : dict or list of dictionaries
Dictionary with parameters names (string) as keys and lists of
parameter settings to try as values, or a list of such
dictionaries, in which case the grids spanned by each dictionary
in the list are explored. This enables searching over any sequence
of parameter settings.
scoring : string, callable or None, default=None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
If ``None``, the ``score`` method of the estimator is used.
fit_params : dict, optional
Parameters to pass to the fit method.
n_jobs : int, default=1
Number of jobs to run in parallel.
.. versionchanged:: 0.17
Upgraded to joblib 0.9.3.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
iid : boolean, default=True
If True, the data is assumed to be identically distributed across
the folds, and the loss minimized is the total loss per sample,
and not the mean loss across the folds.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass,
:class:`sklearn.model_selection.StratifiedKFold` is used. In all
other cases, :class:`sklearn.model_selection.KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
refit : boolean, default=True
Refit the best estimator with the entire dataset.
If "False", it is impossible to make predictions using
this GridSearchCV instance after fitting.
verbose : integer
Controls the verbosity: the higher, the more messages.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Examples
--------
>>> from sklearn import svm, grid_search, datasets
>>> iris = datasets.load_iris()
>>> parameters = {'kernel':('linear', 'rbf'), 'C':[1, 10]}
>>> svr = svm.SVC()
>>> clf = grid_search.GridSearchCV(svr, parameters)
>>> clf.fit(iris.data, iris.target)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
GridSearchCV(cv=None, error_score=...,
estimator=SVC(C=1.0, cache_size=..., class_weight=..., coef0=...,
decision_function_shape=None, degree=..., gamma=...,
kernel='rbf', max_iter=-1, probability=False,
random_state=None, shrinking=True, tol=...,
verbose=False),
fit_params={}, iid=..., n_jobs=1,
param_grid=..., pre_dispatch=..., refit=...,
scoring=..., verbose=...)
Attributes
----------
grid_scores_ : list of named tuples
Contains scores for all parameter combinations in param_grid.
Each entry corresponds to one parameter setting.
Each named tuple has the attributes:
* ``parameters``, a dict of parameter settings
* ``mean_validation_score``, the mean score over the
cross-validation folds
* ``cv_validation_scores``, the list of scores for each fold
best_estimator_ : estimator
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if refit=False.
best_score_ : float
Score of best_estimator on the left out data.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
scorer_ : function
Scorer function used on the held out data to choose the best
parameters for the model.
Notes
------
The parameters selected are those that maximize the score of the left out
data, unless an explicit score is passed in which case it is used instead.
If `n_jobs` was set to a value higher than one, the data is copied for each
point in the grid (and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
See Also
---------
:class:`ParameterGrid`:
generates all the combinations of a hyperparameter grid.
:func:`sklearn.cross_validation.train_test_split`:
utility function to split the data into a development set usable
for fitting a GridSearchCV instance and an evaluation set for
its final evaluation.
:func:`sklearn.metrics.make_scorer`:
Make a scorer from a performance metric or loss function.
"""
def __init__(self, estimator, param_grid, scoring=None, fit_params=None,
n_jobs=1, iid=True, refit=True, cv=None, verbose=0,
pre_dispatch='2*n_jobs', error_score='raise'):
super(GridSearchCV, self).__init__(
estimator, scoring, fit_params, n_jobs, iid,
refit, cv, verbose, pre_dispatch, error_score)
self.param_grid = param_grid
_check_param_grid(param_grid)
def fit(self, X, y=None):
"""Run fit with all sets of parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
"""
return self._fit(X, y, ParameterGrid(self.param_grid))
class RandomizedSearchCV(BaseSearchCV):
"""Randomized search on hyper parameters.
RandomizedSearchCV implements a "fit" and a "score" method.
It also implements "predict", "predict_proba", "decision_function",
"transform" and "inverse_transform" if they are implemented in the
estimator used.
The parameters of the estimator used to apply these methods are optimized
by cross-validated search over parameter settings.
In contrast to GridSearchCV, not all parameter values are tried out, but
rather a fixed number of parameter settings is sampled from the specified
distributions. The number of parameter settings that are tried is
given by n_iter.
If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Read more in the :ref:`User Guide <randomized_parameter_search>`.
Parameters
----------
estimator : estimator object.
A object of that type is instantiated for each grid point.
This is assumed to implement the scikit-learn estimator interface.
Either estimator needs to provide a ``score`` function,
or ``scoring`` must be passed.
param_distributions : dict
Dictionary with parameters names (string) as keys and distributions
or lists of parameters to try. Distributions must provide a ``rvs``
method for sampling (such as those from scipy.stats.distributions).
If a list is given, it is sampled uniformly.
n_iter : int, default=10
Number of parameter settings that are sampled. n_iter trades
off runtime vs quality of the solution.
scoring : string, callable or None, default=None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
If ``None``, the ``score`` method of the estimator is used.
fit_params : dict, optional
Parameters to pass to the fit method.
n_jobs : int, default=1
Number of jobs to run in parallel.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
iid : boolean, default=True
If True, the data is assumed to be identically distributed across
the folds, and the loss minimized is the total loss per sample,
and not the mean loss across the folds.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass,
:class:`sklearn.model_selection.StratifiedKFold` is used. In all
other cases, :class:`sklearn.model_selection.KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
refit : boolean, default=True
Refit the best estimator with the entire dataset.
If "False", it is impossible to make predictions using
this RandomizedSearchCV instance after fitting.
verbose : integer
Controls the verbosity: the higher, the more messages.
random_state : int or RandomState
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Attributes
----------
grid_scores_ : list of named tuples
Contains scores for all parameter combinations in param_grid.
Each entry corresponds to one parameter setting.
Each named tuple has the attributes:
* ``parameters``, a dict of parameter settings
* ``mean_validation_score``, the mean score over the
cross-validation folds
* ``cv_validation_scores``, the list of scores for each fold
best_estimator_ : estimator
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if refit=False.
best_score_ : float
Score of best_estimator on the left out data.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
Notes
-----
The parameters selected are those that maximize the score of the held-out
data, according to the scoring parameter.
If `n_jobs` was set to a value higher than one, the data is copied for each
parameter setting(and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
See Also
--------
:class:`GridSearchCV`:
Does exhaustive search over a grid of parameters.
:class:`ParameterSampler`:
A generator over parameter settings, constructed from
param_distributions.
"""
def __init__(self, estimator, param_distributions, n_iter=10, scoring=None,
fit_params=None, n_jobs=1, iid=True, refit=True, cv=None,
verbose=0, pre_dispatch='2*n_jobs', random_state=None,
error_score='raise'):
self.param_distributions = param_distributions
self.n_iter = n_iter
self.random_state = random_state
super(RandomizedSearchCV, self).__init__(
estimator=estimator, scoring=scoring, fit_params=fit_params,
n_jobs=n_jobs, iid=iid, refit=refit, cv=cv, verbose=verbose,
pre_dispatch=pre_dispatch, error_score=error_score)
def fit(self, X, y=None):
"""Run fit on the estimator with randomly drawn parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
"""
sampled_params = ParameterSampler(self.param_distributions,
self.n_iter,
random_state=self.random_state)
return self._fit(X, y, sampled_params)
|
bsd-3-clause
|
pnedunuri/scikit-learn
|
sklearn/datasets/tests/test_svmlight_format.py
|
228
|
11221
|
from bz2 import BZ2File
import gzip
from io import BytesIO
import numpy as np
import os
import shutil
from tempfile import NamedTemporaryFile
from sklearn.externals.six import b
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_in
import sklearn
from sklearn.datasets import (load_svmlight_file, load_svmlight_files,
dump_svmlight_file)
currdir = os.path.dirname(os.path.abspath(__file__))
datafile = os.path.join(currdir, "data", "svmlight_classification.txt")
multifile = os.path.join(currdir, "data", "svmlight_multilabel.txt")
invalidfile = os.path.join(currdir, "data", "svmlight_invalid.txt")
invalidfile2 = os.path.join(currdir, "data", "svmlight_invalid_order.txt")
def test_load_svmlight_file():
X, y = load_svmlight_file(datafile)
# test X's shape
assert_equal(X.indptr.shape[0], 7)
assert_equal(X.shape[0], 6)
assert_equal(X.shape[1], 21)
assert_equal(y.shape[0], 6)
# test X's non-zero values
for i, j, val in ((0, 2, 2.5), (0, 10, -5.2), (0, 15, 1.5),
(1, 5, 1.0), (1, 12, -3),
(2, 20, 27)):
assert_equal(X[i, j], val)
# tests X's zero values
assert_equal(X[0, 3], 0)
assert_equal(X[0, 5], 0)
assert_equal(X[1, 8], 0)
assert_equal(X[1, 16], 0)
assert_equal(X[2, 18], 0)
# test can change X's values
X[0, 2] *= 2
assert_equal(X[0, 2], 5)
# test y
assert_array_equal(y, [1, 2, 3, 4, 1, 2])
def test_load_svmlight_file_fd():
# test loading from file descriptor
X1, y1 = load_svmlight_file(datafile)
fd = os.open(datafile, os.O_RDONLY)
try:
X2, y2 = load_svmlight_file(fd)
assert_array_equal(X1.data, X2.data)
assert_array_equal(y1, y2)
finally:
os.close(fd)
def test_load_svmlight_file_multilabel():
X, y = load_svmlight_file(multifile, multilabel=True)
assert_equal(y, [(0, 1), (2,), (), (1, 2)])
def test_load_svmlight_files():
X_train, y_train, X_test, y_test = load_svmlight_files([datafile] * 2,
dtype=np.float32)
assert_array_equal(X_train.toarray(), X_test.toarray())
assert_array_equal(y_train, y_test)
assert_equal(X_train.dtype, np.float32)
assert_equal(X_test.dtype, np.float32)
X1, y1, X2, y2, X3, y3 = load_svmlight_files([datafile] * 3,
dtype=np.float64)
assert_equal(X1.dtype, X2.dtype)
assert_equal(X2.dtype, X3.dtype)
assert_equal(X3.dtype, np.float64)
def test_load_svmlight_file_n_features():
X, y = load_svmlight_file(datafile, n_features=22)
# test X'shape
assert_equal(X.indptr.shape[0], 7)
assert_equal(X.shape[0], 6)
assert_equal(X.shape[1], 22)
# test X's non-zero values
for i, j, val in ((0, 2, 2.5), (0, 10, -5.2),
(1, 5, 1.0), (1, 12, -3)):
assert_equal(X[i, j], val)
# 21 features in file
assert_raises(ValueError, load_svmlight_file, datafile, n_features=20)
def test_load_compressed():
X, y = load_svmlight_file(datafile)
with NamedTemporaryFile(prefix="sklearn-test", suffix=".gz") as tmp:
tmp.close() # necessary under windows
with open(datafile, "rb") as f:
shutil.copyfileobj(f, gzip.open(tmp.name, "wb"))
Xgz, ygz = load_svmlight_file(tmp.name)
# because we "close" it manually and write to it,
# we need to remove it manually.
os.remove(tmp.name)
assert_array_equal(X.toarray(), Xgz.toarray())
assert_array_equal(y, ygz)
with NamedTemporaryFile(prefix="sklearn-test", suffix=".bz2") as tmp:
tmp.close() # necessary under windows
with open(datafile, "rb") as f:
shutil.copyfileobj(f, BZ2File(tmp.name, "wb"))
Xbz, ybz = load_svmlight_file(tmp.name)
# because we "close" it manually and write to it,
# we need to remove it manually.
os.remove(tmp.name)
assert_array_equal(X.toarray(), Xbz.toarray())
assert_array_equal(y, ybz)
@raises(ValueError)
def test_load_invalid_file():
load_svmlight_file(invalidfile)
@raises(ValueError)
def test_load_invalid_order_file():
load_svmlight_file(invalidfile2)
@raises(ValueError)
def test_load_zero_based():
f = BytesIO(b("-1 4:1.\n1 0:1\n"))
load_svmlight_file(f, zero_based=False)
def test_load_zero_based_auto():
data1 = b("-1 1:1 2:2 3:3\n")
data2 = b("-1 0:0 1:1\n")
f1 = BytesIO(data1)
X, y = load_svmlight_file(f1, zero_based="auto")
assert_equal(X.shape, (1, 3))
f1 = BytesIO(data1)
f2 = BytesIO(data2)
X1, y1, X2, y2 = load_svmlight_files([f1, f2], zero_based="auto")
assert_equal(X1.shape, (1, 4))
assert_equal(X2.shape, (1, 4))
def test_load_with_qid():
# load svmfile with qid attribute
data = b("""
3 qid:1 1:0.53 2:0.12
2 qid:1 1:0.13 2:0.1
7 qid:2 1:0.87 2:0.12""")
X, y = load_svmlight_file(BytesIO(data), query_id=False)
assert_array_equal(y, [3, 2, 7])
assert_array_equal(X.toarray(), [[.53, .12], [.13, .1], [.87, .12]])
res1 = load_svmlight_files([BytesIO(data)], query_id=True)
res2 = load_svmlight_file(BytesIO(data), query_id=True)
for X, y, qid in (res1, res2):
assert_array_equal(y, [3, 2, 7])
assert_array_equal(qid, [1, 1, 2])
assert_array_equal(X.toarray(), [[.53, .12], [.13, .1], [.87, .12]])
@raises(ValueError)
def test_load_invalid_file2():
load_svmlight_files([datafile, invalidfile, datafile])
@raises(TypeError)
def test_not_a_filename():
# in python 3 integers are valid file opening arguments (taken as unix
# file descriptors)
load_svmlight_file(.42)
@raises(IOError)
def test_invalid_filename():
load_svmlight_file("trou pic nic douille")
def test_dump():
Xs, y = load_svmlight_file(datafile)
Xd = Xs.toarray()
# slicing a csr_matrix can unsort its .indices, so test that we sort
# those correctly
Xsliced = Xs[np.arange(Xs.shape[0])]
for X in (Xs, Xd, Xsliced):
for zero_based in (True, False):
for dtype in [np.float32, np.float64, np.int32]:
f = BytesIO()
# we need to pass a comment to get the version info in;
# LibSVM doesn't grok comments so they're not put in by
# default anymore.
dump_svmlight_file(X.astype(dtype), y, f, comment="test",
zero_based=zero_based)
f.seek(0)
comment = f.readline()
try:
comment = str(comment, "utf-8")
except TypeError: # fails in Python 2.x
pass
assert_in("scikit-learn %s" % sklearn.__version__, comment)
comment = f.readline()
try:
comment = str(comment, "utf-8")
except TypeError: # fails in Python 2.x
pass
assert_in(["one", "zero"][zero_based] + "-based", comment)
X2, y2 = load_svmlight_file(f, dtype=dtype,
zero_based=zero_based)
assert_equal(X2.dtype, dtype)
assert_array_equal(X2.sorted_indices().indices, X2.indices)
if dtype == np.float32:
assert_array_almost_equal(
# allow a rounding error at the last decimal place
Xd.astype(dtype), X2.toarray(), 4)
else:
assert_array_almost_equal(
# allow a rounding error at the last decimal place
Xd.astype(dtype), X2.toarray(), 15)
assert_array_equal(y, y2)
def test_dump_multilabel():
X = [[1, 0, 3, 0, 5],
[0, 0, 0, 0, 0],
[0, 5, 0, 1, 0]]
y = [[0, 1, 0], [1, 0, 1], [1, 1, 0]]
f = BytesIO()
dump_svmlight_file(X, y, f, multilabel=True)
f.seek(0)
# make sure it dumps multilabel correctly
assert_equal(f.readline(), b("1 0:1 2:3 4:5\n"))
assert_equal(f.readline(), b("0,2 \n"))
assert_equal(f.readline(), b("0,1 1:5 3:1\n"))
def test_dump_concise():
one = 1
two = 2.1
three = 3.01
exact = 1.000000000000001
# loses the last decimal place
almost = 1.0000000000000001
X = [[one, two, three, exact, almost],
[1e9, 2e18, 3e27, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]]
y = [one, two, three, exact, almost]
f = BytesIO()
dump_svmlight_file(X, y, f)
f.seek(0)
# make sure it's using the most concise format possible
assert_equal(f.readline(),
b("1 0:1 1:2.1 2:3.01 3:1.000000000000001 4:1\n"))
assert_equal(f.readline(), b("2.1 0:1000000000 1:2e+18 2:3e+27\n"))
assert_equal(f.readline(), b("3.01 \n"))
assert_equal(f.readline(), b("1.000000000000001 \n"))
assert_equal(f.readline(), b("1 \n"))
f.seek(0)
# make sure it's correct too :)
X2, y2 = load_svmlight_file(f)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
def test_dump_comment():
X, y = load_svmlight_file(datafile)
X = X.toarray()
f = BytesIO()
ascii_comment = "This is a comment\nspanning multiple lines."
dump_svmlight_file(X, y, f, comment=ascii_comment, zero_based=False)
f.seek(0)
X2, y2 = load_svmlight_file(f, zero_based=False)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
# XXX we have to update this to support Python 3.x
utf8_comment = b("It is true that\n\xc2\xbd\xc2\xb2 = \xc2\xbc")
f = BytesIO()
assert_raises(UnicodeDecodeError,
dump_svmlight_file, X, y, f, comment=utf8_comment)
unicode_comment = utf8_comment.decode("utf-8")
f = BytesIO()
dump_svmlight_file(X, y, f, comment=unicode_comment, zero_based=False)
f.seek(0)
X2, y2 = load_svmlight_file(f, zero_based=False)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
f = BytesIO()
assert_raises(ValueError,
dump_svmlight_file, X, y, f, comment="I've got a \0.")
def test_dump_invalid():
X, y = load_svmlight_file(datafile)
f = BytesIO()
y2d = [y]
assert_raises(ValueError, dump_svmlight_file, X, y2d, f)
f = BytesIO()
assert_raises(ValueError, dump_svmlight_file, X, y[:-1], f)
def test_dump_query_id():
# test dumping a file with query_id
X, y = load_svmlight_file(datafile)
X = X.toarray()
query_id = np.arange(X.shape[0]) // 2
f = BytesIO()
dump_svmlight_file(X, y, f, query_id=query_id, zero_based=True)
f.seek(0)
X1, y1, query_id1 = load_svmlight_file(f, query_id=True, zero_based=True)
assert_array_almost_equal(X, X1.toarray())
assert_array_almost_equal(y, y1)
assert_array_almost_equal(query_id, query_id1)
|
bsd-3-clause
|
slipguru/ignet
|
icing/core/learning_function.py
|
2
|
17070
|
#!/usr/bin/env python
"""Learning function module for the mutation level correction.
Author: Federico Tomasi
Copyright (c) 2016, Federico Tomasi.
Licensed under the FreeBSD license (see LICENSE.txt).
"""
from __future__ import division, print_function
import logging
import matplotlib; matplotlib.use("Agg")
import matplotlib.pyplot as plt
import numpy as np
import os
import seaborn as sns
import scipy.stats
import six
import warnings
from functools import partial
from itertools import chain
# from scipy.optimize import curve_fit
from scipy.optimize import least_squares
from sklearn import mixture
from sklearn.utils import shuffle
from icing.core import cloning
from icing.core import parallel_distance
from icing.models.model import model_matrix
from icing.utils import io, extra
def least_squares_mdl(x, u):
"""Model for least squares. Used by scipy.optimize.least_squares."""
return x[0] * (u ** 2 + x[1] * u) / (u ** 2 + x[2] * u + x[3])
def least_squares_jacobian(x, u, y):
"""Jacobian for least squares. Used by scipy.optimize.least_squares."""
J = np.empty((u.size, x.size))
den = u ** 2 + x[2] * u + x[3]
num = u ** 2 + x[1] * u
J[:, 0] = num / den
J[:, 1] = x[0] * u / den
J[:, 2] = -x[0] * num * u / den ** 2
J[:, 3] = -x[0] * num / den ** 2
return J
# import md5
#
# def remove_duplicate_junctions(igs_list):
# """Remove igs which have same junction."""
# igs, juncs = [], []
# md5_list = []
# for ig in igs_list:
# junc = extra.junction_re(ig.junction)
# md = md5.new(junc).digest()
# if md not in md5_list:
# igs.append(ig)
# juncs.append(junc)
# md5_list.append(md)
# return igs, juncs
def remove_duplicate_junctions(igs):
igs = list(igs)
return igs, map(lambda x: extra.junction_re(x.junction), igs)
def make_hist(juncs1, juncs2, filename, lim_mut1, lim_mut2, type_ig='Mem',
mut=None, donor1='B4', donor2=None, bins=100,
min_seqs=0, ig1=None, ig2=None, is_intra=True,
sim_func_args=None, correction=False):
"""Make histogram and main computation of nearest similarities."""
if os.path.exists(filename + '.npz'):
logging.critical(filename + '.npz esists.')
return filename
if len(juncs1) < min_seqs or len(juncs2) < min_seqs:
return ''
sim_func_args_2 = sim_func_args.copy()
cloning.set_defaults_sim_func(
sim_func_args_2, ig1 if is_intra else ig1 + ig2)
sim_func_args_2['correct'] = correction
sim_func_args_2['rm_duplicates'] = True
if not correction:
sim_func_args_2['tol'] = 1000
else:
sim_func_args_2['correction_function'] = correction
sim_func = partial(cloning.sim_function, **sim_func_args_2)
logging.info("Computing %s", filename)
if is_intra:
# dnearest = parallel_distance.dnearest_inter_padding(
# ig1, ig1, sim_func, filt=lambda x: 0 < x, func=max)
dnearest = parallel_distance.dnearest_intra_padding(
ig1, sim_func, filt=lambda x: x > 0, func=max)
# ig1, ig1, sim_func, filt=lambda x: 0 < x < 1, func=max)
else:
dnearest = parallel_distance.dnearest_inter_padding(
ig1, ig2, sim_func, filt=lambda x: 0 < x < 1, func=max)
if not os.path.exists(filename.split('/')[0]):
os.makedirs(filename.split('/')[0])
np.savez(filename, X=dnearest, mut=mut)
# Plot distance distribution
plt.figure(figsize=(20, 10))
plt.hist(dnearest, bins=bins, normed=True)
plt.title("Distances between " +
("{}-{}".format(donor1, donor2) if donor2 else "") +
" {} {:.3f}-{:.3f}% and {:.3f}-{:.3f}%"
.format(type_ig, lim_mut1[0], lim_mut1[1], *lim_mut2))
plt.ylabel('Count')
# plt.xlim([0, 1])
plt.xticks(np.linspace(0, 1, 21))
# plt.xlabel('Ham distance (normalised)')
plt.savefig(filename + ".png")
plt.close()
return filename
def shuffle_ig(igs, juncs, max_seqs):
if len(juncs) > max_seqs:
igs, juncs = shuffle(igs, juncs)
igs = igs[:max_seqs]
juncs = juncs[:max_seqs]
return igs, juncs
def intra_donor_distance(db='', lim_mut1=(0, 0), lim_mut2=(0, 0), type_ig='Mem',
quantity=.15, donor='B4', bins=100, max_seqs=1000,
n_tot=0,
min_seqs=100, sim_func_args=None, correction=False):
"""Nearest distances intra donor.
Subsets of Igs can be selected choosing two ranges of mutations.
"""
filename = \
"{0}/dist2nearest_{0}_{1}-{2}_vs_{3}-{4}_{5}bins_norm_{6}maxseqs" \
.format(donor, lim_mut1[0], lim_mut1[1], lim_mut2[0],
lim_mut2[1], bins, max_seqs) + \
('_correction' if correction else '')
# mut = min(lim_mut1[0], lim_mut2[0])
if os.path.exists(filename + '.npz'):
logging.info("File %s exists.", filename + '.npz')
# Plot distance distribution
plt.figure(figsize=(20, 10))
dnearest = np.load(filename + '.npz')['X']
plt.hist(dnearest, bins=bins, normed=True)
plt.title("Similarities for " +
("{}".format(donor)) +
" {} {:.3f}-{:.3f}% and {:.3f}-{:.3f}%"
.format(type_ig, lim_mut1[0], lim_mut1[1], *lim_mut2))
plt.ylabel('Count')
# plt.xlim([0, 1])
# plt.xticks(np.linspace(0, 1, 21))
# plt.xlabel('Ham distance (normalised)')
plt.savefig(filename + ".pdf")
plt.close()
return filename, float(np.load(filename + '.npz')['mut'])
readdb = partial(io.read_db, db, max_records=quantity * n_tot)
if max(lim_mut1[1], lim_mut2[1]) == 0:
igs = readdb(filt=(lambda x: x.mut == 0))
igs1, juncs1 = remove_duplicate_junctions(igs)
if len(igs1) < 2:
return '', 0
igs1, juncs1 = shuffle_ig(igs1, juncs1, max_seqs)
igs2 = igs1
juncs2 = juncs1
mut = 0
elif (lim_mut1[0] == lim_mut2[0] and lim_mut1[1] == lim_mut2[1]):
igs = readdb(filt=(lambda x: lim_mut1[0] < x.mut <= lim_mut1[1]))
igs1, juncs1 = remove_duplicate_junctions(igs)
if len(igs1) < 2:
return '', 0
igs1, juncs1 = shuffle_ig(igs1, juncs1, max_seqs)
igs2 = igs1
juncs2 = juncs1
mut = np.mean(list(chain((x.mut for x in igs1),
(x.mut for x in igs2))))
else:
igs = readdb(filt=(lambda x: lim_mut1[0] < x.mut <= lim_mut1[1]))
igs1, juncs1 = remove_duplicate_junctions(igs)
if len(igs1) < 2:
return '', 0
igs = readdb(filt=(lambda x: lim_mut2[0] < x.mut <= lim_mut2[1]))
igs2, juncs2 = remove_duplicate_junctions(igs)
if len(igs2) < 2:
return '', 0
if not len(juncs1) or not len(juncs2):
return '', 0
igs1, juncs1 = shuffle_ig(igs1, juncs1, max_seqs)
igs2, juncs2 = shuffle_ig(igs2, juncs2, max_seqs)
mut = np.mean(list(chain((x.mut for x in igs1),
(x.mut for x in igs2))))
# logging.info("Computing similarity ")
return make_hist(
juncs1, juncs2, filename, lim_mut1, lim_mut2, type_ig, mut,
donor, None, bins, min_seqs, ig1=igs1, ig2=igs2,
sim_func_args=sim_func_args, correction=correction), mut
def inter_donor_distance(f1='', f2='', lim_mut1=(0, 0), lim_mut2=(0, 0),
type_ig='Mem', donor1='B4', donor2='B5', bins=100,
max_seqs=1000, quantity=.15, sim_func_args=None):
"""Nearest distances inter donors.
Igs involved can be selected by choosing two possibly different ranges
of mutations.
"""
filename = \
"{0}/dnearest_{0}_{1}_{2}-{3}_vs_{4}-{5}_{6}bins_norm_{7}maxseqs" \
.format(donor1, donor2, lim_mut1[0], lim_mut1[1],
lim_mut2[0], lim_mut2[1], bins, max_seqs)
# mut = min(lim_mut1[0], lim_mut2[0])
if os.path.exists(filename + '.npz'):
logging.info("File %s exists.", filename + '.npz')
return filename, float(np.load(filename + '.npz')['mut'])
if max(lim_mut1[1], lim_mut2[1]) == 0:
igs = io.read_db(f1, filt=(lambda x: x.mut == 0))
_, juncs1 = remove_duplicate_junctions(igs)
igs = io.read_db(f2, filt=(lambda x: x.mut == 0))
_, juncs2 = remove_duplicate_junctions(igs)
mut = 0
elif max(lim_mut1[1], lim_mut2[1]) < 0:
# not specified: get at random
igs = io.read_db(f1)
_, juncs1 = remove_duplicate_junctions(igs)
igs = io.read_db(f2)
_, juncs2 = remove_duplicate_junctions(igs)
else:
igs = io.read_db(
f1, filt=(lambda x: lim_mut1[0] < x.mut <= lim_mut1[1]))
_, juncs1 = remove_duplicate_junctions(igs)
igs = io.read_db(
f2, filt=(lambda x: lim_mut2[0] < x.mut <= lim_mut2[1]))
_, juncs2 = remove_duplicate_junctions(igs)
juncs1 = juncs1[:int(quantity * len(juncs1))]
juncs2 = juncs2[:int(quantity * len(juncs2))]
return make_hist(
juncs1, juncs2, filename, lim_mut1, lim_mut2, type_ig, donor1,
donor2, bins, max_seqs, sim_func_args=sim_func_args), mut
def distr_muts(db, quantity=0.15, bins=50, max_seqs=4000, min_seqs=100,
sim_func_args=None, correction=False):
"""Create histograms and relative mutation levels using intra groups."""
logging.info("Analysing %s ...", db)
try:
max_mut, n_tot = io.get_max_mut(db)
# if max_mut < 1:
lin = np.linspace(0, max_mut, min(n_tot / 15., 12))
# lin = np.linspace(0, max_mut, 10.)
sets = [(0, 0)] + zip(lin[:-1], lin[1:])
# sets = [(0, 0)] + [(i - 1, i) for i in range(1, int(max_mut) + 1)]
if len(sets) == 1:
# no correction needs to be applied
return None
out_muts = [
intra_donor_distance(
db, i, j, quantity=quantity, donor=db.split('/')[-1],
bins=bins, max_seqs=max_seqs, min_seqs=min_seqs,
correction=correction, n_tot=n_tot,
sim_func_args=sim_func_args) for i, j in zip(sets, sets)]
except StandardError as msg:
logging.critical(msg)
out_muts = []
d = dict()
for f, m in out_muts:
d.setdefault(m, []).append(f)
return d
def _gaussian_fit(array):
if array.shape[0] < 2:
logging.error("Cannot fit a Gaussian with two distances.")
return 0
array_2 = -(np.array(sorted(array)).reshape(-1, 1))
array = np.array(list(array_2) + list(array)).reshape(-1, 1)
try:
# new sklearn GMM
gmm = mixture.GaussianMixture(n_components=3,
covariance_type='diag')
gmm.fit(array)
# gmmmean = np.max(gmm.means_)
# gmmsigma = gmm.covariances_[np.argmax(gmm.means_)]
except AttributeError:
# use old sklearn method
gmm = mixture.GMM(n_components=3)
gmm.fit(array)
# gmmmean = np.max(gmm.means_)
# gmmsigma = gmm.covars_[np.argmax(gmm.means_)]
# Extract optimal threshold
plt.hist(array, bins=50, normed=True) # debug, print
lin = np.linspace(0, 1, 10000)[:, np.newaxis]
# plt.plot(lin, np.exp(gmm.score_samples(lin)[0]), 'r')
pred = gmm.predict(lin)
try:
idx = np.min(np.where(pred == np.argmax(gmm.means_))[0])
except ValueError:
# print("Error", np.unique(pred))
idx = 0
plt.axvline(x=lin[idx], linestyle='--', color='r')
# plt.gcf().savefig("threshold_naive{}.png".format(k))
plt.close()
threshold = lin[idx][0] # threshold
# np.save("threshold_naive", threshold)
return threshold
def mean_confidence_interval(data, confidence=0.95):
"""Return mean and confidence interval."""
data = np.array(data, dtype=float)
mean, se = np.mean(data), scipy.stats.sem(data)
h = se * scipy.stats.t._ppf((1 + confidence) / 2., data.shape[0] - 1)
return mean, mean - h, mean + h, h
def learning_function(my_dict, order=3, aplot='alphaplot.pdf'):
"""Learn the correction function given data in a dictionary.
Parameters
----------
mydict : dict
Organised as {mut: [mean_similarities]}. Calculated by `distr_muts`.
order : int, optional, default: 3
Order of the learning function (polynomial).
aplot : str, optional, default: 'alpha_plot.pdf'
Filename where to save the correction function plot.
Returns
-------
func : function
Function which accept the mutation level as parameter, returns the
correction to apply on the similarity measure calculated.
threshold : float
Deprecated. Returns the threshold for methods like Hierarchical
clustering to work in defining clones.
"""
if my_dict is None:
logging.critical("Cannot learn function with empty dict")
return lambda _: 1, 0
d_dict = dict()
samples, thresholds = [], []
for k, v in six.iteritems(my_dict):
for o in (_ for _ in v if _):
dnearest = np.array(np.load("{}.npz".format(o))['X']) \
.reshape(-1, 1)
var = np.var(dnearest)
if var == 0:
continue
med = np.median(dnearest)
mean, _, _, h = mean_confidence_interval(dnearest)
samples.append(dnearest.shape[0])
d_dict.setdefault(o.split('/')[0], dict()).setdefault(k, [med, h])
# for the threshold, fit a gaussian (unused for AP)
thresholds.append(_gaussian_fit(dnearest))
for k, v in six.iteritems(d_dict): # there is only one
xdata = np.array(sorted([x for x in v]))
ydata = np.array([np.mean(v[x][0]) for x in xdata])
yerr = np.array([np.mean(v[x][1]) for x in xdata])
# Take only significant values, higher than 0
mask = ydata > 0
xdata = xdata[mask]
if xdata.shape[0] < 2:
logging.critical("Too few points to learn function")
# no correction can be applied
return lambda _: 1, 0
ydata = ydata[mask]
ydata = ydata[0] / ydata # normalise
yerr = yerr[mask]
# res = least_squares(
# lambda x, u, y: least_squares_mdl(x, u) - y,
# x0=np.array([2.5, 3.9, 4.15, 3.9]),
# jac=least_squares_jacobian, bounds=(0, 100), args=(xdata, ydata),
# ftol=1e-12, loss='soft_l1')
order = min(order, xdata.shape[0] - 1)
warnings.filterwarnings("ignore")
with warnings.catch_warnings():
warnings.filterwarnings('error')
try:
poly = np.poly1d(np.polyfit(
xdata, ydata, order, w=1. / (yerr + 1e-15)))
except np.RankWarning:
logging.critical(
"Cannot fit polynomial with degree %d, npoints %d",
order, xdata.shape[0])
return lambda _: 1, 0
with sns.axes_style('whitegrid'):
sns.set_context('paper')
xp = np.linspace(np.min(xdata), np.max(xdata), 1000)[:, None]
plt.figure()
plt.errorbar(xdata, ydata, yerr,
label='Nearest similarity', marker='s')
plt.plot(xp, poly(xp), '-',
label='Learning function (poly of order {})'.format(order))
# plt.plot(xp, least_squares_mdl(res.x, xp), '-', label='least squares')
plt.xlabel(r'Mutation level')
plt.ylabel(r'Average similarity (not normalised)')
plt.legend(loc='lower left')
plt.savefig(aplot, transparent=True, bbox_inches='tight')
plt.close()
# poly = partial(model, res.x)
return poly, 1 - (filter(
lambda x: x > 0,
np.array(thresholds)[np.array(samples).argsort()[::-1]]) or [0])[0]
def generate_correction_function(db, quantity, sim_func_args=None, order=3,
root=''):
"""Generate correction function on the database analysed."""
db_no_ext = ".".join(db.split(".")[:-1])
filename = db_no_ext + "_correction_function.npy"
# case 1: file exists
aplot = os.path.join(root, db_no_ext.split('/')[-1] + '_alphaplot.pdf')
if os.path.exists(filename) and os.path.exists("threshold_naive.npy"):
logging.critical("Best parameters exists. Loading them ...")
popt = np.load(filename)
threshold_naive = np.load("threshold_naive.npy")
# case 2: file not exists
else:
my_dict = distr_muts(
db, quantity=quantity, min_seqs=10, max_seqs=1000,
sim_func_args=sim_func_args)
popt, threshold_naive = learning_function(my_dict, order, aplot)
# save for later, in case of analysis on the same db
# np.save(filename, popt) # TODO
# partial(extra.negative_exponential, a=popt[0], c=popt[1], d=popt[2]),
# my_dict = distr_muts(
# db, quantity=quantity, min_seqs=2, max_seqs=1000,
# sim_func_args=sim_func_args, correction=popt)
return (popt, threshold_naive, aplot)
|
bsd-2-clause
|
zak-k/iris
|
lib/iris/tests/unit/quickplot/test_points.py
|
11
|
2168
|
# (C) British Crown Copyright 2014 - 2016, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""Unit tests for the `iris.quickplot.points` function."""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
# Import iris.tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests
import numpy as np
from iris.tests.stock import simple_2d
from iris.tests.unit.plot import TestGraphicStringCoord, MixinCoords
if tests.MPL_AVAILABLE:
import iris.quickplot as qplt
@tests.skip_plot
class TestStringCoordPlot(TestGraphicStringCoord):
def test_yaxis_labels(self):
qplt.points(self.cube, coords=('bar', 'str_coord'))
self.assertBoundsTickLabels('yaxis')
def test_xaxis_labels(self):
qplt.points(self.cube, coords=('str_coord', 'bar'))
self.assertBoundsTickLabels('xaxis')
@tests.skip_plot
class TestCoords(tests.IrisTest, MixinCoords):
def setUp(self):
# We have a 2d cube with dimensionality (bar: 3; foo: 4)
self.cube = simple_2d(with_bounds=False)
self.foo = self.cube.coord('foo').points
self.foo_index = np.arange(self.foo.size)
self.bar = self.cube.coord('bar').points
self.bar_index = np.arange(self.bar.size)
self.data = None
self.dataT = None
self.mpl_patch = self.patch('matplotlib.pyplot.scatter')
self.draw_func = qplt.points
if __name__ == "__main__":
tests.main()
|
gpl-3.0
|
luo66/scikit-learn
|
sklearn/tests/test_isotonic.py
|
230
|
11087
|
import numpy as np
import pickle
from sklearn.isotonic import (check_increasing, isotonic_regression,
IsotonicRegression)
from sklearn.utils.testing import (assert_raises, assert_array_equal,
assert_true, assert_false, assert_equal,
assert_array_almost_equal,
assert_warns_message, assert_no_warnings)
from sklearn.utils import shuffle
def test_permutation_invariance():
# check that fit is permuation invariant.
# regression test of missing sorting of sample-weights
ir = IsotonicRegression()
x = [1, 2, 3, 4, 5, 6, 7]
y = [1, 41, 51, 1, 2, 5, 24]
sample_weight = [1, 2, 3, 4, 5, 6, 7]
x_s, y_s, sample_weight_s = shuffle(x, y, sample_weight, random_state=0)
y_transformed = ir.fit_transform(x, y, sample_weight=sample_weight)
y_transformed_s = ir.fit(x_s, y_s, sample_weight=sample_weight_s).transform(x)
assert_array_equal(y_transformed, y_transformed_s)
def test_check_increasing_up():
x = [0, 1, 2, 3, 4, 5]
y = [0, 1.5, 2.77, 8.99, 8.99, 50]
# Check that we got increasing=True and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_true(is_increasing)
def test_check_increasing_up_extreme():
x = [0, 1, 2, 3, 4, 5]
y = [0, 1, 2, 3, 4, 5]
# Check that we got increasing=True and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_true(is_increasing)
def test_check_increasing_down():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1.5, -2.77, -8.99, -8.99, -50]
# Check that we got increasing=False and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_false(is_increasing)
def test_check_increasing_down_extreme():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1, -2, -3, -4, -5]
# Check that we got increasing=False and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_false(is_increasing)
def test_check_ci_warn():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1, 2, -3, 4, -5]
# Check that we got increasing=False and CI interval warning
is_increasing = assert_warns_message(UserWarning, "interval",
check_increasing,
x, y)
assert_false(is_increasing)
def test_isotonic_regression():
y = np.array([3, 7, 5, 9, 8, 7, 10])
y_ = np.array([3, 6, 6, 8, 8, 8, 10])
assert_array_equal(y_, isotonic_regression(y))
x = np.arange(len(y))
ir = IsotonicRegression(y_min=0., y_max=1.)
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(ir.transform(x), ir.predict(x))
# check that it is immune to permutation
perm = np.random.permutation(len(y))
ir = IsotonicRegression(y_min=0., y_max=1.)
assert_array_equal(ir.fit_transform(x[perm], y[perm]),
ir.fit_transform(x, y)[perm])
assert_array_equal(ir.transform(x[perm]), ir.transform(x)[perm])
# check we don't crash when all x are equal:
ir = IsotonicRegression()
assert_array_equal(ir.fit_transform(np.ones(len(x)), y), np.mean(y))
def test_isotonic_regression_ties_min():
# Setup examples with ties on minimum
x = [0, 1, 1, 2, 3, 4, 5]
y = [0, 1, 2, 3, 4, 5, 6]
y_true = [0, 1.5, 1.5, 3, 4, 5, 6]
# Check that we get identical results for fit/transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(y_true, ir.fit_transform(x, y))
def test_isotonic_regression_ties_max():
# Setup examples with ties on maximum
x = [1, 2, 3, 4, 5, 5]
y = [1, 2, 3, 4, 5, 6]
y_true = [1, 2, 3, 4, 5.5, 5.5]
# Check that we get identical results for fit/transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(y_true, ir.fit_transform(x, y))
def test_isotonic_regression_ties_secondary_():
"""
Test isotonic regression fit, transform and fit_transform
against the "secondary" ties method and "pituitary" data from R
"isotone" package, as detailed in: J. d. Leeuw, K. Hornik, P. Mair,
Isotone Optimization in R: Pool-Adjacent-Violators Algorithm
(PAVA) and Active Set Methods
Set values based on pituitary example and
the following R command detailed in the paper above:
> library("isotone")
> data("pituitary")
> res1 <- gpava(pituitary$age, pituitary$size, ties="secondary")
> res1$x
`isotone` version: 1.0-2, 2014-09-07
R version: R version 3.1.1 (2014-07-10)
"""
x = [8, 8, 8, 10, 10, 10, 12, 12, 12, 14, 14]
y = [21, 23.5, 23, 24, 21, 25, 21.5, 22, 19, 23.5, 25]
y_true = [22.22222, 22.22222, 22.22222, 22.22222, 22.22222, 22.22222,
22.22222, 22.22222, 22.22222, 24.25, 24.25]
# Check fit, transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_almost_equal(ir.transform(x), y_true, 4)
assert_array_almost_equal(ir.fit_transform(x, y), y_true, 4)
def test_isotonic_regression_reversed():
y = np.array([10, 9, 10, 7, 6, 6.1, 5])
y_ = IsotonicRegression(increasing=False).fit_transform(
np.arange(len(y)), y)
assert_array_equal(np.ones(y_[:-1].shape), ((y_[:-1] - y_[1:]) >= 0))
def test_isotonic_regression_auto_decreasing():
# Set y and x for decreasing
y = np.array([10, 9, 10, 7, 6, 6.1, 5])
x = np.arange(len(y))
# Create model and fit_transform
ir = IsotonicRegression(increasing='auto')
y_ = assert_no_warnings(ir.fit_transform, x, y)
# Check that relationship decreases
is_increasing = y_[0] < y_[-1]
assert_false(is_increasing)
def test_isotonic_regression_auto_increasing():
# Set y and x for decreasing
y = np.array([5, 6.1, 6, 7, 10, 9, 10])
x = np.arange(len(y))
# Create model and fit_transform
ir = IsotonicRegression(increasing='auto')
y_ = assert_no_warnings(ir.fit_transform, x, y)
# Check that relationship increases
is_increasing = y_[0] < y_[-1]
assert_true(is_increasing)
def test_assert_raises_exceptions():
ir = IsotonicRegression()
rng = np.random.RandomState(42)
assert_raises(ValueError, ir.fit, [0, 1, 2], [5, 7, 3], [0.1, 0.6])
assert_raises(ValueError, ir.fit, [0, 1, 2], [5, 7])
assert_raises(ValueError, ir.fit, rng.randn(3, 10), [0, 1, 2])
assert_raises(ValueError, ir.transform, rng.randn(3, 10))
def test_isotonic_sample_weight_parameter_default_value():
# check if default value of sample_weight parameter is one
ir = IsotonicRegression()
# random test data
rng = np.random.RandomState(42)
n = 100
x = np.arange(n)
y = rng.randint(-50, 50, size=(n,)) + 50. * np.log(1 + np.arange(n))
# check if value is correctly used
weights = np.ones(n)
y_set_value = ir.fit_transform(x, y, sample_weight=weights)
y_default_value = ir.fit_transform(x, y)
assert_array_equal(y_set_value, y_default_value)
def test_isotonic_min_max_boundaries():
# check if min value is used correctly
ir = IsotonicRegression(y_min=2, y_max=4)
n = 6
x = np.arange(n)
y = np.arange(n)
y_test = [2, 2, 2, 3, 4, 4]
y_result = np.round(ir.fit_transform(x, y))
assert_array_equal(y_result, y_test)
def test_isotonic_sample_weight():
ir = IsotonicRegression()
x = [1, 2, 3, 4, 5, 6, 7]
y = [1, 41, 51, 1, 2, 5, 24]
sample_weight = [1, 2, 3, 4, 5, 6, 7]
expected_y = [1, 13.95, 13.95, 13.95, 13.95, 13.95, 24]
received_y = ir.fit_transform(x, y, sample_weight=sample_weight)
assert_array_equal(expected_y, received_y)
def test_isotonic_regression_oob_raise():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="raise")
ir.fit(x, y)
# Check that an exception is thrown
assert_raises(ValueError, ir.predict, [min(x) - 10, max(x) + 10])
def test_isotonic_regression_oob_clip():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="clip")
ir.fit(x, y)
# Predict from training and test x and check that min/max match.
y1 = ir.predict([min(x) - 10, max(x) + 10])
y2 = ir.predict(x)
assert_equal(max(y1), max(y2))
assert_equal(min(y1), min(y2))
def test_isotonic_regression_oob_nan():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="nan")
ir.fit(x, y)
# Predict from training and test x and check that we have two NaNs.
y1 = ir.predict([min(x) - 10, max(x) + 10])
assert_equal(sum(np.isnan(y1)), 2)
def test_isotonic_regression_oob_bad():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="xyz")
# Make sure that we throw an error for bad out_of_bounds value
assert_raises(ValueError, ir.fit, x, y)
def test_isotonic_regression_oob_bad_after():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="raise")
# Make sure that we throw an error for bad out_of_bounds value in transform
ir.fit(x, y)
ir.out_of_bounds = "xyz"
assert_raises(ValueError, ir.transform, x)
def test_isotonic_regression_pickle():
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="clip")
ir.fit(x, y)
ir_ser = pickle.dumps(ir, pickle.HIGHEST_PROTOCOL)
ir2 = pickle.loads(ir_ser)
np.testing.assert_array_equal(ir.predict(x), ir2.predict(x))
def test_isotonic_duplicate_min_entry():
x = [0, 0, 1]
y = [0, 0, 1]
ir = IsotonicRegression(increasing=True, out_of_bounds="clip")
ir.fit(x, y)
all_predictions_finite = np.all(np.isfinite(ir.predict(x)))
assert_true(all_predictions_finite)
def test_isotonic_zero_weight_loop():
# Test from @ogrisel's issue:
# https://github.com/scikit-learn/scikit-learn/issues/4297
# Get deterministic RNG with seed
rng = np.random.RandomState(42)
# Create regression and samples
regression = IsotonicRegression()
n_samples = 50
x = np.linspace(-3, 3, n_samples)
y = x + rng.uniform(size=n_samples)
# Get some random weights and zero out
w = rng.uniform(size=n_samples)
w[5:8] = 0
regression.fit(x, y, sample_weight=w)
# This will hang in failure case.
regression.fit(x, y, sample_weight=w)
|
bsd-3-clause
|
mblondel/scikit-learn
|
examples/covariance/plot_covariance_estimation.py
|
250
|
5070
|
"""
=======================================================================
Shrinkage covariance estimation: LedoitWolf vs OAS and max-likelihood
=======================================================================
When working with covariance estimation, the usual approach is to use
a maximum likelihood estimator, such as the
:class:`sklearn.covariance.EmpiricalCovariance`. It is unbiased, i.e. it
converges to the true (population) covariance when given many
observations. However, it can also be beneficial to regularize it, in
order to reduce its variance; this, in turn, introduces some bias. This
example illustrates the simple regularization used in
:ref:`shrunk_covariance` estimators. In particular, it focuses on how to
set the amount of regularization, i.e. how to choose the bias-variance
trade-off.
Here we compare 3 approaches:
* Setting the parameter by cross-validating the likelihood on three folds
according to a grid of potential shrinkage parameters.
* A close formula proposed by Ledoit and Wolf to compute
the asymptotically optimal regularization parameter (minimizing a MSE
criterion), yielding the :class:`sklearn.covariance.LedoitWolf`
covariance estimate.
* An improvement of the Ledoit-Wolf shrinkage, the
:class:`sklearn.covariance.OAS`, proposed by Chen et al. Its
convergence is significantly better under the assumption that the data
are Gaussian, in particular for small samples.
To quantify estimation error, we plot the likelihood of unseen data for
different values of the shrinkage parameter. We also show the choices by
cross-validation, or with the LedoitWolf and OAS estimates.
Note that the maximum likelihood estimate corresponds to no shrinkage,
and thus performs poorly. The Ledoit-Wolf estimate performs really well,
as it is close to the optimal and is computational not costly. In this
example, the OAS estimate is a bit further away. Interestingly, both
approaches outperform cross-validation, which is significantly most
computationally costly.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg
from sklearn.covariance import LedoitWolf, OAS, ShrunkCovariance, \
log_likelihood, empirical_covariance
from sklearn.grid_search import GridSearchCV
###############################################################################
# Generate sample data
n_features, n_samples = 40, 20
np.random.seed(42)
base_X_train = np.random.normal(size=(n_samples, n_features))
base_X_test = np.random.normal(size=(n_samples, n_features))
# Color samples
coloring_matrix = np.random.normal(size=(n_features, n_features))
X_train = np.dot(base_X_train, coloring_matrix)
X_test = np.dot(base_X_test, coloring_matrix)
###############################################################################
# Compute the likelihood on test data
# spanning a range of possible shrinkage coefficient values
shrinkages = np.logspace(-2, 0, 30)
negative_logliks = [-ShrunkCovariance(shrinkage=s).fit(X_train).score(X_test)
for s in shrinkages]
# under the ground-truth model, which we would not have access to in real
# settings
real_cov = np.dot(coloring_matrix.T, coloring_matrix)
emp_cov = empirical_covariance(X_train)
loglik_real = -log_likelihood(emp_cov, linalg.inv(real_cov))
###############################################################################
# Compare different approaches to setting the parameter
# GridSearch for an optimal shrinkage coefficient
tuned_parameters = [{'shrinkage': shrinkages}]
cv = GridSearchCV(ShrunkCovariance(), tuned_parameters)
cv.fit(X_train)
# Ledoit-Wolf optimal shrinkage coefficient estimate
lw = LedoitWolf()
loglik_lw = lw.fit(X_train).score(X_test)
# OAS coefficient estimate
oa = OAS()
loglik_oa = oa.fit(X_train).score(X_test)
###############################################################################
# Plot results
fig = plt.figure()
plt.title("Regularized covariance: likelihood and shrinkage coefficient")
plt.xlabel('Regularizaton parameter: shrinkage coefficient')
plt.ylabel('Error: negative log-likelihood on test data')
# range shrinkage curve
plt.loglog(shrinkages, negative_logliks, label="Negative log-likelihood")
plt.plot(plt.xlim(), 2 * [loglik_real], '--r',
label="Real covariance likelihood")
# adjust view
lik_max = np.amax(negative_logliks)
lik_min = np.amin(negative_logliks)
ymin = lik_min - 6. * np.log((plt.ylim()[1] - plt.ylim()[0]))
ymax = lik_max + 10. * np.log(lik_max - lik_min)
xmin = shrinkages[0]
xmax = shrinkages[-1]
# LW likelihood
plt.vlines(lw.shrinkage_, ymin, -loglik_lw, color='magenta',
linewidth=3, label='Ledoit-Wolf estimate')
# OAS likelihood
plt.vlines(oa.shrinkage_, ymin, -loglik_oa, color='purple',
linewidth=3, label='OAS estimate')
# best CV estimator likelihood
plt.vlines(cv.best_estimator_.shrinkage, ymin,
-cv.best_estimator_.score(X_test), color='cyan',
linewidth=3, label='Cross-validation best estimate')
plt.ylim(ymin, ymax)
plt.xlim(xmin, xmax)
plt.legend()
plt.show()
|
bsd-3-clause
|
hande-qmc/hande
|
tools/pyhande/tests/test_canonical.py
|
1
|
7116
|
"""Tests for canonical.py."""
import warnings
import unittest
import pandas as pd
import numpy as np
import pyhande.canonical as canonical
import tests.create_mock_df as create_mock_df
class TestAnalyseHFObservables(unittest.TestCase):
"""Test canonical.analyse_hf_observables."""
def setUp(self):
# Create mock series containing means of various DMQMC
# observables as well as their convariances.
rng = np.random.default_rng(375)
means = [-90.0, -99.0, -198.0, 2.8, 10.0]
self.cols = [
r'Tr(T\rho_HF)', r'Tr(V\rho_HF)', r'Tr(H\rho_HF)', r'Tr(\rho_HF)',
'alt1'
]
self.means_series = pd.Series(means, index=self.cols)
cov_orig = create_mock_df.create_cov_frame(rng, self.cols, means, 1)
# reblock iterations are not a concept here:
self.cov_orig = cov_orig.loc[0]
# Shared exp results
self.result_exp = pd.Series(
[-32.142857, 4.004706, -35.357143, 4.122886, -70.714286, 10.74825],
index=[
'T_HF', 'T_HF_error', 'V_HF', 'V_HF_error', 'U_HF',
'U_HF_error'
]
)
def test_basic_input(self):
"""Basic input."""
nsamples = 100
result = canonical.analyse_hf_observables(
self.means_series.loc[self.cols[:-1]], self.cov_orig.drop(
columns=['alt1'], index=['alt1']
), nsamples
)
pd.testing.assert_series_equal(
result, self.result_exp, check_exact=False
)
def test_ignore_col(self):
"""Input with a column ('alt1') which should be ignored."""
nsamples = 100
result = canonical.analyse_hf_observables(
self.means_series, self.cov_orig, nsamples
)
pd.testing.assert_series_equal(
result, self.result_exp, check_exact=False
)
def test_unchanged_mutable(self):
"""Check that mutable objects, such as pd DataFrames, don't
change when they shouldn't.
"""
means_series_copy = self.means_series.copy()
cov_orig_copy = self.cov_orig.copy()
nsamples = 100
_ = canonical.analyse_hf_observables(
self.means_series, self.cov_orig, nsamples
)
pd.testing.assert_series_equal(
means_series_copy, self.means_series, check_exact=True
)
pd.testing.assert_frame_equal(
cov_orig_copy, self.cov_orig, check_exact=True
)
class TestEstimates(unittest.TestCase):
"""Test canonical.estimates."""
def setUp(self):
# Create mock DMQMC data as well as metadata.
rng = np.random.default_rng(375)
means = [-36.1, -39.3, -90.0, -99.0, 2.8, 0.13]
cols = [
'<T>_0', '<V>_0', r'Tr(T\rho_HF)', r'Tr(V\rho_HF)', r'Tr(\rho_HF)',
'N_ACC/N_ATT'
]
sine_periods = [3.2, 4.2, 7.1, 2.22, 5.73, 1.67]
noise_facs = [0.1, 0.9, 1.2, 0.2, 0.76, 0.04]
self.data = create_mock_df.create_qmc_frame(
rng, cols, means, sine_periods, noise_facs
)
# Note that metadata is most likely a dictionary not
# pd.DataFrame (which is not what the function docstring says)
# See test_suite/mc_canonical_estimates/np1/H2O_ccpvdz_RHF for
# similar values (magnitude)
self.metadata1 = {
'nattempts': 8000, 'beta': 0.44, 'free_energy_corr': -77.4,
'fermi_temperature': False, 'alt1': 1
}
# See test_suite/mc_canonical_estimates/np1/ueg_n7_ec20_rs1 for
# similar values (magnitude)
self.metadata2 = {
'nattempts': 5000, 'beta': 0.9, 'free_energy_corr': -26.1,
'fermi_temperature': True, 'alt1': 1, 'system': {
'ueg': {'E_fermi': 3.2}
}
}
def test_not_ueg(self):
"""Not UEG."""
result = canonical.estimates(self.metadata1, self.data)
ind_exp = [
'U_0', 'T_0', 'V_0', 'N_ACC/N_ATT', 'F_0', 'S_0', 'T_HF', 'V_HF',
'U_HF'
]
ind_exp_full = []
for i in ind_exp:
ind_exp_full.extend([i, i+'_error'])
ind_exp_full = ['Beta'] + ind_exp_full
result_exp = pd.Series([
4.40000000e-01, -7.45375108e+01, 7.95735784e-01, -3.57175099e+01,
4.73795670e-01, -3.88200009e+01, 5.72059025e-01, 1.41132320e-01,
5.98715584e-03, -7.29498696e+01, 9.64142895e-02, 1.63822382e+01,
2.01610646e-01, -3.18572634e+01, 1.43161978e+00, -3.51519450e+01,
1.61877967e+00, -6.70092084e+01, 2.98339505e+00
], index=ind_exp_full)
pd.testing.assert_series_equal(result, result_exp, check_exact=False)
def test_ueg(self):
"""UEG."""
result = canonical.estimates(self.metadata2, self.data)
ind_exp = ['U_0', 'T_0', 'V_0', 'N_ACC/N_ATT', 'F_0', 'S_0',
'T_HF', 'V_HF', 'U_HF']
ind_exp_full = []
for i in ind_exp:
ind_exp_full.extend([i, i+'_error'])
ind_exp_full = ['Beta'] + ind_exp_full
result_exp = pd.Series([
9.00000000e-01, -7.45375108e+01, 7.95735784e-01, -3.57175099e+01,
4.73795670e-01, -3.88200009e+01, 5.72059025e-01, 1.41132320e-01,
5.98715584e-03, -19.138018, 0.1508347996, -4.662982060842,
0.12887270046, -3.18572634e+01, 1.43161978e+00, -3.51519450e+01,
1.61877967e+00, -6.70092084e+01, 2.98339505e+00
], index=ind_exp_full)
pd.testing.assert_series_equal(result, result_exp, check_exact=False)
def test_not_ueg_no_naccnatt_col(self):
"""No 'N_ACC/N_ATT' in columns (not UEG)
[todo] Should the metadata be adjusted as well?
"""
self.data.drop(columns=(['N_ACC/N_ATT']), inplace=True)
result = canonical.estimates(self.metadata1, self.data)
ind_exp = ['U_0', 'T_0', 'V_0', 'T_HF', 'V_HF', 'U_HF']
ind_exp_full = []
for i in ind_exp:
ind_exp_full.extend([i, i+'_error'])
ind_exp_full = ['Beta'] + ind_exp_full
result_exp = pd.Series([
4.40000000e-01, -74.574386427, 0.78720024, -35.86308032,
0.46069296, -38.7113061, 0.58596805, -32.23777087, 1.44645184,
-35.44043608, 1.63181849, -67.67820695, 3.01213831
], index=ind_exp_full)
pd.testing.assert_series_equal(result, result_exp, check_exact=False)
def test_unchanged_mutable(self):
"""Check that mutable objects, such as pd DataFrames, don't
change when they shouldn't.
"""
# data_copy = self.data.copy()
metadata1_copy = self.metadata1.copy()
_ = canonical.estimates(self.metadata1, self.data)
# pd.testing.assert_frame_equal(self.data, data_copy, check_exact=True)
# [todo] Is there a better way to do this warning?
warnings.warn("TestEstimates.test_unchanged_mutable: "
"Mutable data is changed in function! Fix?")
self.assertDictEqual(self.metadata1, metadata1_copy)
|
lgpl-2.1
|
SciTools/cartopy
|
lib/cartopy/tests/mpl/test_contour.py
|
2
|
2820
|
# Copyright Cartopy Contributors
#
# This file is part of Cartopy and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
import matplotlib.pyplot as plt
from matplotlib.testing.decorators import cleanup
import numpy as np
from numpy.testing import assert_array_almost_equal
from scipy.interpolate import NearestNDInterpolator
from scipy.signal import convolve2d
import cartopy.crs as ccrs
@cleanup
def test_contour_plot_bounds():
x = np.linspace(-2763217.0, 2681906.0, 200)
y = np.linspace(-263790.62, 3230840.5, 130)
data = np.hypot(*np.meshgrid(x, y)) / 2e5
proj_lcc = ccrs.LambertConformal(central_longitude=-95,
central_latitude=25,
standard_parallels=[25])
ax = plt.axes(projection=proj_lcc)
ax.contourf(x, y, data, levels=np.arange(0, 40, 1))
assert_array_almost_equal(ax.get_extent(),
np.array([x[0], x[-1], y[0], y[-1]]))
# Levels that don't include data should not fail.
plt.figure()
ax = plt.axes(projection=proj_lcc)
ax.contourf(x, y, data, levels=np.max(data) + np.arange(1, 3))
@cleanup
def test_contour_linear_ring():
"""Test contourf with a section that only has 3 points."""
ax = plt.axes([0.01, 0.05, 0.898, 0.85], projection=ccrs.Mercator(),
aspect='equal')
ax.set_extent([-99.6, -89.0, 39.8, 45.5])
xbnds = ax.get_xlim()
ybnds = ax.get_ylim()
ll = ccrs.Geodetic().transform_point(xbnds[0], ybnds[0], ax.projection)
ul = ccrs.Geodetic().transform_point(xbnds[0], ybnds[1], ax.projection)
ur = ccrs.Geodetic().transform_point(xbnds[1], ybnds[1], ax.projection)
lr = ccrs.Geodetic().transform_point(xbnds[1], ybnds[0], ax.projection)
xi = np.linspace(min(ll[0], ul[0]), max(lr[0], ur[0]), 100)
yi = np.linspace(min(ll[1], ul[1]), max(ul[1], ur[1]), 100)
xi, yi = np.meshgrid(xi, yi)
nn = NearestNDInterpolator((np.arange(-94, -85), np.arange(36, 45)),
np.arange(9))
vals = nn(xi, yi)
lons = xi
lats = yi
window = np.ones((6, 6))
vals = convolve2d(vals, window / window.sum(), mode='same',
boundary='symm')
ax.contourf(lons, lats, vals, np.arange(9), transform=ccrs.PlateCarree())
plt.draw()
def test_contour_update_bounds():
"""Test that contour updates the extent"""
xs, ys = np.meshgrid(np.linspace(0, 360), np.linspace(-80, 80))
zs = ys**2
ax = plt.axes(projection=ccrs.Orthographic())
ax.contour(xs, ys, zs, transform=ccrs.PlateCarree())
# Force a draw, which is a smoke test to make sure contouring
# doesn't raise with an Orthographic projection
# GH issue 1673
plt.draw()
|
lgpl-3.0
|
dantexier/Frameword-PC
|
extraccion.py
|
1
|
1073
|
import pandas as pd
import csv
headers = ["Author","Title","C1","Abstract","C2","Cant02","Keywords","Cant03","Publication Title","Year"]
names = headers
df = pd.read_csv("CorpusCompleto_UNdeC.csv", header=0, names = headers)
# Resumen por archivo
x=0
for i in df['Abstract']:
x+=1
archivos = "Archivo_"+str(x)
file = open(archivos,"w")
file.write(i)
file.close()
'''
# LISTADO DE Keywords
for i in df['Keywords']:
x+=1
print (i)
'''
'''
#LISTA DE TITULOS
for i in df['Title']:
print (i,'.')
'''
''' Listado de los 42 artículos
x = 0
for i in df['Title']:
x +=1
print (x)
print (df.iloc[(x-1)]['Author'])
print ("- TÍTULO:\n",i)
print (" Cantidad:", len(i.split(" ")))
print ("- RESUMEN:\n",df.iloc[(x-1)]['Abstract'])
print (" Cantidad:", len(str(df.iloc[(x-1)]['Abstract']).split(" ")))
print ("- PALABRAS CLAVE:\n",df.iloc[(x-1)]['Keywords'])
print ("- REVISTA:\n",df.iloc[(x-1)]['Publication Title'])
print ("- AÑO:\n",df.iloc[(x-1)]['Year'])
print ("\n\n\n")
'''
|
gpl-3.0
|
jakobworldpeace/scikit-learn
|
examples/model_selection/plot_learning_curve.py
|
76
|
4509
|
"""
========================
Plotting Learning Curves
========================
On the left side the learning curve of a naive Bayes classifier is shown for
the digits dataset. Note that the training score and the cross-validation score
are both not very good at the end. However, the shape of the curve can be found
in more complex datasets very often: the training score is very high at the
beginning and decreases and the cross-validation score is very low at the
beginning and increases. On the right side we see the learning curve of an SVM
with RBF kernel. We can see clearly that the training score is still around
the maximum and the validation score could be increased with more training
samples.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.datasets import load_digits
from sklearn.model_selection import learning_curve
from sklearn.model_selection import ShuffleSplit
def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,
n_jobs=1, train_sizes=np.linspace(.1, 1.0, 5)):
"""
Generate a simple plot of the test and training learning curve.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
title : string
Title for the chart.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
ylim : tuple, shape (ymin, ymax), optional
Defines minimum and maximum yvalues plotted.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`StratifiedKFold` used. If the estimator is not a classifier
or if ``y`` is neither binary nor multiclass, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validators that can be used here.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
"""
plt.figure()
plt.title(title)
if ylim is not None:
plt.ylim(*ylim)
plt.xlabel("Training examples")
plt.ylabel("Score")
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.grid()
plt.fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1,
color="r")
plt.fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1, color="g")
plt.plot(train_sizes, train_scores_mean, 'o-', color="r",
label="Training score")
plt.plot(train_sizes, test_scores_mean, 'o-', color="g",
label="Cross-validation score")
plt.legend(loc="best")
return plt
digits = load_digits()
X, y = digits.data, digits.target
title = "Learning Curves (Naive Bayes)"
# Cross validation with 100 iterations to get smoother mean test and train
# score curves, each time with 20% data randomly selected as a validation set.
cv = ShuffleSplit(n_splits=100, test_size=0.2, random_state=0)
estimator = GaussianNB()
plot_learning_curve(estimator, title, X, y, ylim=(0.7, 1.01), cv=cv, n_jobs=4)
title = "Learning Curves (SVM, RBF kernel, $\gamma=0.001$)"
# SVC is more expensive so we do a lower number of CV iterations:
cv = ShuffleSplit(n_splits=10, test_size=0.2, random_state=0)
estimator = SVC(gamma=0.001)
plot_learning_curve(estimator, title, X, y, (0.7, 1.01), cv=cv, n_jobs=4)
plt.show()
|
bsd-3-clause
|
DamienIrving/ocean-analysis
|
modules/kde.py
|
1
|
11449
|
"""
Code for kernel density estimation.
- Source: http://nbviewer.jupyter.org/gist/tillahoffmann/f844bce2ec264c1c8cb5
"""
import numpy as np
from scipy.spatial.distance import cdist
class gaussian_kde(object):
"""Representation of a kernel-density estimate using Gaussian kernels.
Kernel density estimation is a way to estimate the probability density
function (PDF) of a random variable in a non-parametric way.
`gaussian_kde` works for both uni-variate and multi-variate data. It
includes automatic bandwidth determination. The estimation works best for
a unimodal distribution; bimodal or multi-modal distributions tend to be
oversmoothed.
Parameters
----------
dataset : array_like
Datapoints to estimate from. In case of univariate data this is a 1-D
array, otherwise a 2-D array with shape (# of dims, # of data).
bw_method : str, scalar or callable, optional
The method used to calculate the estimator bandwidth. This can be
'scott', 'silverman', a scalar constant or a callable. If a scalar,
this will be used directly as `kde.factor`. If a callable, it should
take a `gaussian_kde` instance as only parameter and return a scalar.
If None (default), 'scott' is used. See Notes for more details.
weights : array_like, shape (n, ), optional, default: None
An array of weights, of the same shape as `x`. Each value in `x`
only contributes its associated weight towards the bin count
(instead of 1).
Attributes
----------
dataset : ndarray
The dataset with which `gaussian_kde` was initialized.
d : int
Number of dimensions.
n : int
Number of datapoints.
neff : float
Effective sample size using Kish's approximation.
factor : float
The bandwidth factor, obtained from `kde.covariance_factor`, with which
the covariance matrix is multiplied.
covariance : ndarray
The covariance matrix of `dataset`, scaled by the calculated bandwidth
(`kde.factor`).
inv_cov : ndarray
The inverse of `covariance`.
Methods
-------
kde.evaluate(points) : ndarray
Evaluate the estimated pdf on a provided set of points.
kde(points) : ndarray
Same as kde.evaluate(points)
kde.pdf(points) : ndarray
Alias for ``kde.evaluate(points)``.
kde.set_bandwidth(bw_method='scott') : None
Computes the bandwidth, i.e. the coefficient that multiplies the data
covariance matrix to obtain the kernel covariance matrix.
.. versionadded:: 0.11.0
kde.covariance_factor : float
Computes the coefficient (`kde.factor`) that multiplies the data
covariance matrix to obtain the kernel covariance matrix.
The default is `scotts_factor`. A subclass can overwrite this method
to provide a different method, or set it through a call to
`kde.set_bandwidth`.
Notes
-----
Bandwidth selection strongly influences the estimate obtained from the KDE
(much more so than the actual shape of the kernel). Bandwidth selection
can be done by a "rule of thumb", by cross-validation, by "plug-in
methods" or by other means; see [3]_, [4]_ for reviews. `gaussian_kde`
uses a rule of thumb, the default is Scott's Rule.
Scott's Rule [1]_, implemented as `scotts_factor`, is::
n**(-1./(d+4)),
with ``n`` the number of data points and ``d`` the number of dimensions.
Silverman's Rule [2]_, implemented as `silverman_factor`, is::
(n * (d + 2) / 4.)**(-1. / (d + 4)).
Good general descriptions of kernel density estimation can be found in [1]_
and [2]_, the mathematics for this multi-dimensional implementation can be
found in [1]_.
References
----------
.. [1] D.W. Scott, "Multivariate Density Estimation: Theory, Practice, and
Visualization", John Wiley & Sons, New York, Chicester, 1992.
.. [2] B.W. Silverman, "Density Estimation for Statistics and Data
Analysis", Vol. 26, Monographs on Statistics and Applied Probability,
Chapman and Hall, London, 1986.
.. [3] B.A. Turlach, "Bandwidth Selection in Kernel Density Estimation: A
Review", CORE and Institut de Statistique, Vol. 19, pp. 1-33, 1993.
.. [4] D.M. Bashtannyk and R.J. Hyndman, "Bandwidth selection for kernel
conditional density estimation", Computational Statistics & Data
Analysis, Vol. 36, pp. 279-298, 2001.
Examples
--------
Generate some random two-dimensional data:
>>> from scipy import stats
>>> def measure(n):
>>> "Measurement model, return two coupled measurements."
>>> m1 = np.random.normal(size=n)
>>> m2 = np.random.normal(scale=0.5, size=n)
>>> return m1+m2, m1-m2
>>> m1, m2 = measure(2000)
>>> xmin = m1.min()
>>> xmax = m1.max()
>>> ymin = m2.min()
>>> ymax = m2.max()
Perform a kernel density estimate on the data:
>>> X, Y = np.mgrid[xmin:xmax:100j, ymin:ymax:100j]
>>> positions = np.vstack([X.ravel(), Y.ravel()])
>>> values = np.vstack([m1, m2])
>>> kernel = stats.gaussian_kde(values)
>>> Z = np.reshape(kernel(positions).T, X.shape)
Plot the results:
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.imshow(np.rot90(Z), cmap=plt.cm.gist_earth_r,
... extent=[xmin, xmax, ymin, ymax])
>>> ax.plot(m1, m2, 'k.', markersize=2)
>>> ax.set_xlim([xmin, xmax])
>>> ax.set_ylim([ymin, ymax])
>>> plt.show()
"""
def __init__(self, dataset, bw_method=None, weights=None):
self.dataset = np.atleast_2d(dataset)
if not self.dataset.size > 1:
raise ValueError("`dataset` input should have multiple elements.")
self.d, self.n = self.dataset.shape
if weights is not None:
self.weights = weights / np.sum(weights)
else:
self.weights = np.ones(self.n) / self.n
# Compute the effective sample size
# http://surveyanalysis.org/wiki/Design_Effects_and_Effective_Sample_Size#Kish.27s_approximate_formula_for_computing_effective_sample_size
self.neff = 1.0 / np.sum(self.weights ** 2)
self.set_bandwidth(bw_method=bw_method)
def evaluate(self, points):
"""Evaluate the estimated pdf on a set of points.
Parameters
----------
points : (# of dimensions, # of points)-array
Alternatively, a (# of dimensions,) vector can be passed in and
treated as a single point.
Returns
-------
values : (# of points,)-array
The values at each point.
Raises
------
ValueError : if the dimensionality of the input points is different than
the dimensionality of the KDE.
"""
points = np.atleast_2d(points)
d, m = points.shape
if d != self.d:
if d == 1 and m == self.d:
# points was passed in as a row vector
points = np.reshape(points, (self.d, 1))
m = 1
else:
msg = "points have dimension %s, dataset has dimension %s" % (d,
self.d)
raise ValueError(msg)
# compute the normalised residuals
chi2 = cdist(points.T, self.dataset.T, 'mahalanobis', VI=self.inv_cov) ** 2
# compute the pdf
result = np.sum(np.exp(-.5 * chi2) * self.weights, axis=1) / self._norm_factor
return result
__call__ = evaluate
def scotts_factor(self):
return np.power(self.neff, -1./(self.d+4))
def silverman_factor(self):
return np.power(self.neff*(self.d+2.0)/4.0, -1./(self.d+4))
# Default method to calculate bandwidth, can be overwritten by subclass
covariance_factor = scotts_factor
def set_bandwidth(self, bw_method=None):
"""Compute the estimator bandwidth with given method.
The new bandwidth calculated after a call to `set_bandwidth` is used
for subsequent evaluations of the estimated density.
Parameters
----------
bw_method : str, scalar or callable, optional
The method used to calculate the estimator bandwidth. This can be
'scott', 'silverman', a scalar constant or a callable. If a
scalar, this will be used directly as `kde.factor`. If a callable,
it should take a `gaussian_kde` instance as only parameter and
return a scalar. If None (default), nothing happens; the current
`kde.covariance_factor` method is kept.
Notes
-----
.. versionadded:: 0.11
Examples
--------
>>> x1 = np.array([-7, -5, 1, 4, 5.])
>>> kde = stats.gaussian_kde(x1)
>>> xs = np.linspace(-10, 10, num=50)
>>> y1 = kde(xs)
>>> kde.set_bandwidth(bw_method='silverman')
>>> y2 = kde(xs)
>>> kde.set_bandwidth(bw_method=kde.factor / 3.)
>>> y3 = kde(xs)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x1, np.ones(x1.shape) / (4. * x1.size), 'bo',
... label='Data points (rescaled)')
>>> ax.plot(xs, y1, label='Scott (default)')
>>> ax.plot(xs, y2, label='Silverman')
>>> ax.plot(xs, y3, label='Const (1/3 * Silverman)')
>>> ax.legend()
>>> plt.show()
"""
if bw_method is None:
pass
elif bw_method == 'scott':
self.covariance_factor = self.scotts_factor
elif bw_method == 'silverman':
self.covariance_factor = self.silverman_factor
elif np.isscalar(bw_method) and not isinstance(bw_method, string_types):
self._bw_method = 'use constant'
self.covariance_factor = lambda: bw_method
elif callable(bw_method):
self._bw_method = bw_method
self.covariance_factor = lambda: self._bw_method(self)
else:
msg = "`bw_method` should be 'scott', 'silverman', a scalar " \
"or a callable."
raise ValueError(msg)
self._compute_covariance()
def _compute_covariance(self):
"""Computes the covariance matrix for each Gaussian kernel using
covariance_factor().
"""
self.factor = self.covariance_factor()
# Cache covariance and inverse covariance of the data
if not hasattr(self, '_data_inv_cov'):
# Compute the mean and residuals
_mean = np.sum(self.weights * self.dataset, axis=1)
_residual = (self.dataset - _mean[:, None])
# Compute the biased covariance
self._data_covariance = np.atleast_2d(np.dot(_residual * self.weights, _residual.T))
# Correct for bias (http://en.wikipedia.org/wiki/Weighted_arithmetic_mean#Weighted_sample_covariance)
self._data_covariance /= (1 - np.sum(self.weights ** 2))
self._data_inv_cov = np.linalg.inv(self._data_covariance)
self.covariance = self._data_covariance * self.factor**2
self.inv_cov = self._data_inv_cov / self.factor**2
self._norm_factor = np.sqrt(np.linalg.det(2*np.pi*self.covariance)) #* self.n
|
mit
|
tdhopper/scikit-learn
|
examples/model_selection/plot_roc_crossval.py
|
247
|
3253
|
"""
=============================================================
Receiver Operating Characteristic (ROC) with cross validation
=============================================================
Example of Receiver Operating Characteristic (ROC) metric to evaluate
classifier output quality using cross-validation.
ROC curves typically feature true positive rate on the Y axis, and false
positive rate on the X axis. This means that the top left corner of the plot is
the "ideal" point - a false positive rate of zero, and a true positive rate of
one. This is not very realistic, but it does mean that a larger area under the
curve (AUC) is usually better.
The "steepness" of ROC curves is also important, since it is ideal to maximize
the true positive rate while minimizing the false positive rate.
This example shows the ROC response of different datasets, created from K-fold
cross-validation. Taking all of these curves, it is possible to calculate the
mean area under curve, and see the variance of the curve when the
training set is split into different subsets. This roughly shows how the
classifier output is affected by changes in the training data, and how
different the splits generated by K-fold cross-validation are from one another.
.. note::
See also :func:`sklearn.metrics.auc_score`,
:func:`sklearn.cross_validation.cross_val_score`,
:ref:`example_model_selection_plot_roc.py`,
"""
print(__doc__)
import numpy as np
from scipy import interp
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from sklearn.metrics import roc_curve, auc
from sklearn.cross_validation import StratifiedKFold
###############################################################################
# Data IO and generation
# import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
X, y = X[y != 2], y[y != 2]
n_samples, n_features = X.shape
# Add noisy features
random_state = np.random.RandomState(0)
X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
###############################################################################
# Classification and ROC analysis
# Run classifier with cross-validation and plot ROC curves
cv = StratifiedKFold(y, n_folds=6)
classifier = svm.SVC(kernel='linear', probability=True,
random_state=random_state)
mean_tpr = 0.0
mean_fpr = np.linspace(0, 1, 100)
all_tpr = []
for i, (train, test) in enumerate(cv):
probas_ = classifier.fit(X[train], y[train]).predict_proba(X[test])
# Compute ROC curve and area the curve
fpr, tpr, thresholds = roc_curve(y[test], probas_[:, 1])
mean_tpr += interp(mean_fpr, fpr, tpr)
mean_tpr[0] = 0.0
roc_auc = auc(fpr, tpr)
plt.plot(fpr, tpr, lw=1, label='ROC fold %d (area = %0.2f)' % (i, roc_auc))
plt.plot([0, 1], [0, 1], '--', color=(0.6, 0.6, 0.6), label='Luck')
mean_tpr /= len(cv)
mean_tpr[-1] = 1.0
mean_auc = auc(mean_fpr, mean_tpr)
plt.plot(mean_fpr, mean_tpr, 'k--',
label='Mean ROC (area = %0.2f)' % mean_auc, lw=2)
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
|
bsd-3-clause
|
altaetran/bayesianoracle
|
tests/quadraticBayesianAveraging/2Dexample_multiple.py
|
1
|
2463
|
import numpy as np
import bayesianoracle as bo
from pprint import pprint as pp
ndim = 2
bmao = bo.optimizer.QuadraticBMAOptimizer(ndim=ndim)
for i in xrange(100):
H = np.array([[ 1389.4217, 1151.5168],
[ 1151.5168, 36896.3534]])
g = np.array([ 643.2191, 6206.7597])+np.random.normal(0, 10, (2,1))[:,0]
f = 596.83446220293399 + np.random.normal(0,10)
xk = np.array([ 0., 0.])
bmao.add_observation(xk, f, g, H)
print("hyperprior")
print(bmao.bma.kernel_prior.a)
print(bmao.bma.kernel_prior.scale)
bmao.optimize_hyperparameters()
#print(bmao.predict_with_unc(np.array([[-3.0797e-01 , -1.2921e-01]]).T))
#print(bmao.bma.estimate_model_weights(np.array([[-3.0797e-01 , -1.2921e-01]]).T, return_likelihoods=True))
print(bmao.predict_with_unc(np.array([xk]).T))
#print("lololol")
#pp(bmao.bma.estimate_model_weights(np.array([xk]).T, return_likelihoods=True))
print('done')
#pp(bmao.bma.calc_relevance_weights(np.array([xk]).T))
#pp(bmao.bma.quadratic_models)
#pp(bmao.bma.estimate_model_priors(np.array([xk]).T))
#pp(bmao.bma.model_predictions(np.array([xk]).T))
bma = bmao.bma
import matplotlib.pyplot as plt
### Likelihood plots
fig4, ax = plt.subplots(3, sharex=True)
kernel_grid = np.logspace(-2.0, 2.0, num=50)
# Get the likelihoods
unreg_loglikelihood = np.array([bma.loglikelihood(kernel_range, regularization=False, skew=False) for kernel_range in kernel_grid])
skewness = np.array([bma.estimate_skewness(kernel_range) for kernel_range in kernel_grid])
reg_loglikelihood = np.array([bma.loglikelihood(kernel_range) for kernel_range in kernel_grid])
# Plot the two terms
ll1 = ax[0].plot(kernel_grid, unreg_loglikelihood)
ax[0].set_xscale('log')
ll2 = ax[1].plot(kernel_grid, skewness)
ax[1].set_xscale('log')
ll3 = ax[2].plot(kernel_grid, reg_loglikelihood)
ax[2].set_xscale('log')
pp(reg_loglikelihood)
ax[0].set_xlim([kernel_grid[0],kernel_grid[-1]])
ax[1].set_xlim([kernel_grid[0],kernel_grid[-1]])
ax[2].set_xlim([kernel_grid[0],kernel_grid[-1]])
plt.setp(ll1, color="red", linewidth=3.0, alpha=0.5, linestyle='-',
dash_capstyle='round')
plt.setp(ll2, color="red", linewidth=3.0, alpha=0.5, linestyle='-',
dash_capstyle='round')
plt.setp(ll3, color="red", linewidth=3.0, alpha=0.5, linestyle='-',
dash_capstyle='round')
ax[2].set_xlabel("kernel range",fontsize=16)
plt.savefig("figures/2Dexample_bma_loglikelihood.png")
print("skew")
print(bma.estimate_skewness())
|
apache-2.0
|
anhaidgroup/py_entitymatching
|
py_entitymatching/tests/test_matcherselector_mlmatcherselection.py
|
1
|
27721
|
import os
from nose.tools import *
import unittest
import pandas as pd
import numpy as np
import six
from py_entitymatching.utils.generic_helper import get_install_path, list_diff
from py_entitymatching.io.parsers import read_csv_metadata
from py_entitymatching.matcherselector.mlmatcherselection import select_matcher
from py_entitymatching.matcher.dtmatcher import DTMatcher
from py_entitymatching.matcher.linregmatcher import LinRegMatcher
from py_entitymatching.matcher.logregmatcher import LogRegMatcher
from py_entitymatching.matcher.nbmatcher import NBMatcher
from py_entitymatching.matcher.rfmatcher import RFMatcher
from py_entitymatching.matcher.svmmatcher import SVMMatcher
import py_entitymatching.catalog.catalog_manager as cm
datasets_path = os.sep.join([get_install_path(), 'tests', 'test_datasets',
'matcherselector'])
path_a = os.sep.join([datasets_path, 'DBLP_demo.csv'])
path_b = os.sep.join([datasets_path, 'ACM_demo.csv'])
path_c = os.sep.join([datasets_path, 'dblp_acm_demo_labels.csv'])
path_f = os.sep.join([datasets_path, 'feat_vecs.csv'])
class MLMatcherSelectionTestCases(unittest.TestCase):
def setUp(self):
cm.del_catalog()
def tearDown(self):
cm.del_catalog()
# @nottest
def test_select_matcher_valid_1(self):
A = read_csv_metadata(path_a, key='id')
B = read_csv_metadata(path_b, key='id')
# C = read_csv_metadata(path_c, ltable=A, rtable=B, fk_ltable='ltable.id',
# fk_rtable='rtable.id', key='_id')
# C['labels'] = labels
feature_vectors = read_csv_metadata(path_f, ltable=A, rtable=B)
dtmatcher = DTMatcher()
nbmatcher = NBMatcher()
rfmatcher = RFMatcher()
svmmatcher = SVMMatcher()
linregmatcher = LinRegMatcher()
logregmatcher = LogRegMatcher()
# xgmatcher = XGBoostMatcher()
matchers = [dtmatcher, nbmatcher, rfmatcher, svmmatcher, linregmatcher,
logregmatcher]
result = select_matcher(matchers, x=None, y=None, table=feature_vectors,
exclude_attrs=['ltable.id', 'rtable.id', '_id', 'gold'],
target_attr='gold', k=7)
header = ['Name', 'Matcher', 'Num folds']
result_df = result['drill_down_cv_stats']['precision']
self.assertEqual(set(header) == set(list(result_df.columns[[0, 1, 2]])), True)
self.assertEqual('Mean score', result_df.columns[len(result_df.columns) - 1])
d = result_df.set_index('Name')
p_max = d.loc[result['selected_matcher'].name, 'Mean score']
a_max = np.max(d['Mean score'])
self.assertEqual(p_max, a_max)
# @nottest
def test_select_matcher_valid_2(self):
A = read_csv_metadata(path_a, key='id')
B = read_csv_metadata(path_b, key='id')
# C = read_csv_metadata(path_c, ltable=A, rtable=B, fk_ltable='ltable.id',
# fk_rtable='rtable.id', key='_id')
# labels = [0] * 7
# labels.extend([1] * 8)
# C['labels'] = labels
# feature_table = get_features_for_matching(A, B)
# feature_vectors = extract_feature_vecs(C, feature_table=feature_table, attrs_after='gold')
# feature_vectors.fillna(0, inplace=True)
feature_vectors = read_csv_metadata(path_f, ltable=A, rtable=B)
dtmatcher = DTMatcher()
nbmatcher = NBMatcher()
rfmatcher = RFMatcher()
svmmatcher = SVMMatcher()
linregmatcher = LinRegMatcher()
logregmatcher = LogRegMatcher()
matchers = [dtmatcher, nbmatcher, rfmatcher, svmmatcher, linregmatcher, logregmatcher]
col_list = list(feature_vectors.columns)
l = list_diff(col_list, [cm.get_key(feature_vectors), cm.get_fk_ltable(feature_vectors),
cm.get_fk_rtable(feature_vectors),
'gold'])
X = feature_vectors[l]
Y = feature_vectors['gold']
result = select_matcher(matchers, x=X, y=Y)
header = ['Name', 'Matcher', 'Num folds']
result_df = result['drill_down_cv_stats']['precision']
self.assertEqual(set(header) == set(list(result_df.columns[[0, 1, 2]])), True)
self.assertEqual('Mean score', result_df.columns[len(result_df.columns) - 1])
d = result_df.set_index('Name')
p_max = d.loc[result['selected_matcher'].name, 'Mean score']
a_max = np.max(d['Mean score'])
self.assertEqual(p_max, a_max)
# @nottest
def test_select_matcher_valid_3(self):
A = read_csv_metadata(path_a, key='id')
B = read_csv_metadata(path_b, key='id')
# C = read_csv_metadata(path_c, ltable=A, rtable=B, fk_ltable='ltable.id',
# fk_rtable='rtable.id', key='_id')
# labels = [0] * 7
# labels.extend([1] * 8)
# C['labels'] = labels
# feature_table = get_features_for_matching(A, B)
# feature_vectors = extract_feature_vecs(C, feature_table=feature_table, attrs_after='gold')
# feature_vectors.fillna(0, inplace=True)
feature_vectors = read_csv_metadata(path_f, ltable=A, rtable=B)
dtmatcher = DTMatcher()
nbmatcher = NBMatcher()
rfmatcher = RFMatcher()
svmmatcher = SVMMatcher()
linregmatcher = LinRegMatcher()
logregmatcher = LogRegMatcher()
matchers = [dtmatcher, nbmatcher, rfmatcher, svmmatcher, linregmatcher, logregmatcher]
col_list = list(feature_vectors.columns)
l = list_diff(col_list, [cm.get_key(feature_vectors), cm.get_fk_ltable(feature_vectors),
cm.get_fk_rtable(feature_vectors),
'gold'])
X = feature_vectors[l]
Y = feature_vectors['gold']
result = select_matcher(matchers, x=X, y=Y, metric_to_select_matcher='recall', metrics_to_display=['recall'])
header = ['Name', 'Matcher', 'Num folds']
result_df = result['drill_down_cv_stats']['recall']
self.assertEqual(set(header) == set(list(result_df.columns[[0, 1, 2]])), True)
self.assertEqual('Mean score', result_df.columns[len(result_df.columns) - 1])
d = result_df.set_index('Name')
p_max = d.loc[result['selected_matcher'].name, 'Mean score']
a_max = np.max(d['Mean score'])
self.assertEqual(p_max, a_max)
# @nottest
def test_select_matcher_valid_4(self):
A = read_csv_metadata(path_a, key='id')
B = read_csv_metadata(path_b, key='id')
# C = read_csv_metadata(path_c, ltable=A, rtable=B, fk_ltable='ltable.id',
# fk_rtable='rtable.id', key='_id')
# labels = [0] * 7
# labels.extend([1] * 8)
# C['labels'] = labels
# feature_table = get_features_for_matching(A, B)
# feature_vectors = extract_feature_vecs(C, feature_table=feature_table, attrs_after='gold')
# feature_vectors.fillna(0, inplace=True)
feature_vectors = read_csv_metadata(path_f, ltable=A, rtable=B)
dtmatcher = DTMatcher()
nbmatcher = NBMatcher()
rfmatcher = RFMatcher()
svmmatcher = SVMMatcher()
linregmatcher = LinRegMatcher()
logregmatcher = LogRegMatcher()
matchers = [dtmatcher, nbmatcher, rfmatcher, svmmatcher, linregmatcher, logregmatcher]
col_list = list(feature_vectors.columns)
l = list_diff(col_list, [cm.get_key(feature_vectors), cm.get_fk_ltable(feature_vectors),
cm.get_fk_rtable(feature_vectors),
'gold'])
X = feature_vectors[l]
Y = feature_vectors['gold']
result = select_matcher(matchers, x=X, y=Y, metric_to_select_matcher='f1', metrics_to_display=['f1'])
header = ['Name', 'Matcher', 'Num folds']
result_df = result['drill_down_cv_stats']['f1']
self.assertEqual(set(header) == set(list(result_df.columns[[0, 1, 2]])), True)
self.assertEqual('Mean score', result_df.columns[len(result_df.columns) - 1])
d = result_df.set_index('Name')
p_max = d.loc[result['selected_matcher'].name, 'Mean score']
a_max = np.max(d['Mean score'])
self.assertEqual(p_max, a_max)
# @nottest
def test_select_matcher_valid_5(self):
A = read_csv_metadata(path_a, key='id')
B = read_csv_metadata(path_b, key='id')
# C = read_csv_metadata(path_c, ltable=A, rtable=B, fk_ltable='ltable.id',
# fk_rtable='rtable.id', key='_id')
# labels = [0] * 7
# labels.extend([1] * 8)
# C['labels'] = labels
# feature_table = get_features_for_matching(A, B)
# feature_vectors = extract_feature_vecs(C, feature_table=feature_table, attrs_after='gold')
# feature_vectors.fillna(0, inplace=True)
feature_vectors = read_csv_metadata(path_f, ltable=A, rtable=B)
dtmatcher = DTMatcher()
nbmatcher = NBMatcher()
rfmatcher = RFMatcher()
svmmatcher = SVMMatcher()
linregmatcher = LinRegMatcher()
logregmatcher = LogRegMatcher()
matchers = [dtmatcher, nbmatcher, rfmatcher, svmmatcher, linregmatcher, logregmatcher]
col_list = list(feature_vectors.columns)
l = list_diff(col_list, [cm.get_key(feature_vectors), cm.get_fk_ltable(feature_vectors),
cm.get_fk_rtable(feature_vectors),
'gold'])
X = feature_vectors[l]
Y = feature_vectors['gold']
result = select_matcher(matchers, x=X, y=Y, metric_to_select_matcher='f1', metrics_to_display=['f1'], k=4)
header = ['Name', 'Matcher', 'Num folds']
result_df = result['drill_down_cv_stats']['f1']
self.assertEqual(set(header) == set(list(result_df.columns[[0, 1, 2]])), True)
self.assertEqual('Mean score', result_df.columns[len(result_df.columns) - 1])
d = result_df.set_index('Name')
p_max = d.loc[result['selected_matcher'].name, 'Mean score']
a_max = np.max(d['Mean score'])
self.assertEqual(p_max, a_max)
def test_select_matcher_valid_6(self):
A = read_csv_metadata(path_a, key='id')
B = read_csv_metadata(path_b, key='id')
# C = read_csv_metadata(path_c, ltable=A, rtable=B, fk_ltable='ltable.id',
# fk_rtable='rtable.id', key='_id')
# labels = [0] * 7
# labels.extend([1] * 8)
# C['labels'] = labels
# feature_table = get_features_for_matching(A, B)
# feature_vectors = extract_feature_vecs(C, feature_table=feature_table, attrs_after='gold')
# feature_vectors.fillna(0, inplace=True)
feature_vectors = read_csv_metadata(path_f, ltable=A, rtable=B)
dtmatcher = DTMatcher()
nbmatcher = NBMatcher()
rfmatcher = RFMatcher()
svmmatcher = SVMMatcher()
linregmatcher = LinRegMatcher()
logregmatcher = LogRegMatcher()
matchers = [dtmatcher, nbmatcher, rfmatcher, svmmatcher, linregmatcher, logregmatcher]
col_list = list(feature_vectors.columns)
l = list_diff(col_list, [cm.get_fk_ltable(feature_vectors),
cm.get_fk_rtable(feature_vectors),
'gold'])
X = feature_vectors[l]
Y = feature_vectors['gold']
result = select_matcher(matchers, x=X, y=Y)
header = ['Name', 'Matcher', 'Num folds']
result_df = result['drill_down_cv_stats']['precision']
self.assertEqual(set(header) == set(list(result_df.columns[[0, 1, 2]])), True)
self.assertEqual('Mean score', result_df.columns[len(result_df.columns) - 1])
d = result_df.set_index('Name')
p_max = d.loc[result['selected_matcher'].name, 'Mean score']
a_max = np.max(d['Mean score'])
self.assertEqual(p_max, a_max)
def test_select_matcher_valid_7(self):
A = read_csv_metadata(path_a, key='id')
B = read_csv_metadata(path_b, key='id')
# C = read_csv_metadata(path_c, ltable=A, rtable=B, fk_ltable='ltable.id',
# fk_rtable='rtable.id', key='_id')
# labels = [0] * 7
# labels.extend([1] * 8)
# C['labels'] = labels
# feature_table = get_features_for_matching(A, B)
# feature_vectors = extract_feature_vecs(C, feature_table=feature_table, attrs_after='gold')
# feature_vectors.fillna(0, inplace=True)
feature_vectors = read_csv_metadata(path_f, ltable=A, rtable=B)
dtmatcher = DTMatcher()
nbmatcher = NBMatcher()
rfmatcher = RFMatcher()
svmmatcher = SVMMatcher()
linregmatcher = LinRegMatcher()
logregmatcher = LogRegMatcher()
matchers = [dtmatcher, nbmatcher, rfmatcher, svmmatcher, linregmatcher, logregmatcher]
col_list = list(feature_vectors.columns)
l = list_diff(col_list, [cm.get_fk_ltable(feature_vectors),
cm.get_fk_rtable(feature_vectors)
])
feature_vectors = feature_vectors[l]
result = select_matcher(matchers, x=None, y=None, table=feature_vectors,
exclude_attrs='_id',
target_attr='gold', k=2)
header = ['Name', 'Matcher', 'Num folds']
result_df = result['drill_down_cv_stats']['precision']
self.assertEqual(set(header) == set(list(result_df.columns[[0, 1, 2]])), True)
self.assertEqual('Mean score', result_df.columns[len(result_df.columns) - 1])
d = result_df.set_index('Name')
p_max = d.loc[result['selected_matcher'].name, 'Mean score']
a_max = np.max(d['Mean score'])
self.assertEqual(p_max, a_max)
@raises(AssertionError)
def test_select_matcher_invalid_df(self):
select_matcher(matchers=[], table="", exclude_attrs=[], target_attr="")
@raises(SyntaxError)
def test_select_matcher_invalid_args(self):
select_matcher(matchers=[], table="", exclude_attrs=[])
@raises(AssertionError)
def test_select_matcher_target_attr_not_series(self):
A = read_csv_metadata(path_a, key='id')
B = read_csv_metadata(path_b, key='id')
# C = read_csv_metadata(path_c, ltable=A, rtable=B, fk_ltable='ltable.id',
# fk_rtable='rtable.id', key='_id')
# labels = [0] * 7
# labels.extend([1] * 8)
# C['labels'] = labels
# feature_table = get_features_for_matching(A, B)
# feature_vectors = extract_feature_vecs(C, feature_table=feature_table, attrs_after='gold')
# feature_vectors.fillna(0, inplace=True)
feature_vectors = read_csv_metadata(path_f, ltable=A, rtable=B)
dtmatcher = DTMatcher()
nbmatcher = NBMatcher()
rfmatcher = RFMatcher()
svmmatcher = SVMMatcher()
linregmatcher = LinRegMatcher()
logregmatcher = LogRegMatcher()
matchers = [dtmatcher, nbmatcher, rfmatcher, svmmatcher, linregmatcher, logregmatcher]
col_list = list(feature_vectors.columns)
l = list_diff(col_list, [cm.get_fk_ltable(feature_vectors),
cm.get_fk_rtable(feature_vectors),
'gold'])
X = feature_vectors[l]
Y = feature_vectors[['gold']]
result = select_matcher(matchers, x=X, y=Y)
@raises(AssertionError)
def test_select_matcher_ex_attrs_not_present(self):
A = read_csv_metadata(path_a, key='id')
B = read_csv_metadata(path_b, key='id')
# C = read_csv_metadata(path_c, ltable=A, rtable=B, fk_ltable='ltable.id',
# fk_rtable='rtable.id', key='_id')
# labels = [0] * 7
# labels.extend([1] * 8)
# C['labels'] = labels
# feature_table = get_features_for_matching(A, B)
# feature_vectors = extract_feature_vecs(C, feature_table=feature_table, attrs_after='gold')
# feature_vectors.fillna(0, inplace=True)
feature_vectors = read_csv_metadata(path_f, ltable=A, rtable=B)
dtmatcher = DTMatcher()
nbmatcher = NBMatcher()
rfmatcher = RFMatcher()
svmmatcher = SVMMatcher()
linregmatcher = LinRegMatcher()
logregmatcher = LogRegMatcher()
matchers = [dtmatcher, nbmatcher, rfmatcher, svmmatcher, linregmatcher, logregmatcher]
col_list = list(feature_vectors.columns)
l = list_diff(col_list, [cm.get_fk_ltable(feature_vectors),
cm.get_fk_rtable(feature_vectors)
])
feature_vectors = feature_vectors[l]
result = select_matcher(matchers, x=None, y=None, table=feature_vectors,
exclude_attrs='_id1',
target_attr='gold', k=2)
@raises(AssertionError)
def test_select_matcher_target_attr_not_present(self):
A = read_csv_metadata(path_a, key='id')
B = read_csv_metadata(path_b, key='id')
# C = read_csv_metadata(path_c, ltable=A, rtable=B, fk_ltable='ltable.id',
# fk_rtable='rtable.id', key='_id')
# labels = [0] * 7
# labels.extend([1] * 8)
# C['labels'] = labels
# feature_table = get_features_for_matching(A, B)
# feature_vectors = extract_feature_vecs(C, feature_table=feature_table, attrs_after='gold')
# feature_vectors.fillna(0, inplace=True)
feature_vectors = read_csv_metadata(path_f, ltable=A, rtable=B)
dtmatcher = DTMatcher()
nbmatcher = NBMatcher()
rfmatcher = RFMatcher()
svmmatcher = SVMMatcher()
linregmatcher = LinRegMatcher()
logregmatcher = LogRegMatcher()
matchers = [dtmatcher, nbmatcher, rfmatcher, svmmatcher, linregmatcher, logregmatcher]
col_list = list(feature_vectors.columns)
l = list_diff(col_list, [cm.get_fk_ltable(feature_vectors),
cm.get_fk_rtable(feature_vectors)
])
feature_vectors = feature_vectors[l]
result = select_matcher(matchers, x=None, y=None, table=feature_vectors,
exclude_attrs='_id',
target_attr='labels1', k=2)
def test_select_matcher_valid_multiple_metrics(self):
A = read_csv_metadata(path_a, key='id')
B = read_csv_metadata(path_b, key='id')
feature_vectors = read_csv_metadata(path_f, ltable=A, rtable=B)
dtmatcher = DTMatcher()
nbmatcher = NBMatcher()
rfmatcher = RFMatcher()
svmmatcher = SVMMatcher()
linregmatcher = LinRegMatcher()
logregmatcher = LogRegMatcher()
matchers = [dtmatcher, nbmatcher, rfmatcher, svmmatcher, linregmatcher, logregmatcher]
result = select_matcher(matchers, x=None, y=None, table=feature_vectors,
exclude_attrs=['ltable.id', 'rtable.id', '_id', 'gold'],
target_attr='gold', k=7)
header = ['Name', 'Matcher', 'Num folds']
result_df_p = result['drill_down_cv_stats']['precision']
result_df_f = result['drill_down_cv_stats']['f1']
result_df_r = result['drill_down_cv_stats']['recall']
# Check header of precision dataframe
self.assertEqual(set(header) == set(list(result_df_p.columns[[0, 1, 2]])), True)
self.assertEqual('Mean score', result_df_p.columns[len(result_df_p.columns) - 1])
# Check header of f1 dataframe
self.assertEqual(set(header) == set(list(result_df_f.columns[[0, 1, 2]])), True)
self.assertEqual('Mean score', result_df_f.columns[len(result_df_f.columns) - 1])
# Check header of recall dataframe
self.assertEqual(set(header) == set(list(result_df_r.columns[[0, 1, 2]])), True)
self.assertEqual('Mean score', result_df_p.columns[len(result_df_r.columns) - 1])
d = result_df_p.set_index('Name')
p_max = d.loc[result['selected_matcher'].name, 'Mean score']
a_max = np.max(d['Mean score'])
self.assertEqual(p_max, a_max)
def test_select_matcher_valid_cv_stats(self):
A = read_csv_metadata(path_a, key='id')
B = read_csv_metadata(path_b, key='id')
feature_vectors = read_csv_metadata(path_f, ltable=A, rtable=B)
dtmatcher = DTMatcher()
nbmatcher = NBMatcher()
rfmatcher = RFMatcher()
svmmatcher = SVMMatcher()
linregmatcher = LinRegMatcher()
logregmatcher = LogRegMatcher()
matchers = [dtmatcher, nbmatcher, rfmatcher, svmmatcher, linregmatcher, logregmatcher]
result = select_matcher(matchers, x=None, y=None, table=feature_vectors,
exclude_attrs=['ltable.id', 'rtable.id', '_id', 'gold'],
target_attr='gold', k=7)
header = ['Matcher', 'Average precision', 'Average recall', 'Average f1']
result_df = result['cv_stats']
result_df_p = result['drill_down_cv_stats']['precision']
self.assertEqual(set(header) == set(list(result_df.columns[[0, 1, 2, 3]])), True)
d = result_df.set_index('Matcher')
p_max = d.loc[result['selected_matcher'].name, 'Average precision']
a_max = np.max(result_df_p['Mean score'])
self.assertEqual(p_max, a_max)
def test_select_matcher_valid_cv_stats_2(self):
A = read_csv_metadata(path_a, key='id')
B = read_csv_metadata(path_b, key='id')
feature_vectors = read_csv_metadata(path_f, ltable=A, rtable=B)
dtmatcher = DTMatcher()
nbmatcher = NBMatcher()
rfmatcher = RFMatcher()
svmmatcher = SVMMatcher()
linregmatcher = LinRegMatcher()
logregmatcher = LogRegMatcher()
matchers = [dtmatcher, nbmatcher, rfmatcher, svmmatcher, linregmatcher, logregmatcher]
result = select_matcher(matchers, x=None, y=None, table=feature_vectors,
exclude_attrs=['ltable.id', 'rtable.id', '_id', 'gold'],
metric_to_select_matcher='recall',
metrics_to_display=['recall', 'f1'],
target_attr='gold', k=7)
header = ['Matcher', 'Average recall', 'Average f1']
result_df = result['cv_stats']
result_df_r = result['drill_down_cv_stats']['recall']
self.assertEqual(set(header) == set(list(result_df.columns[[0, 1, 2]])), True)
d = result_df.set_index('Matcher')
p_max = d.loc[result['selected_matcher'].name, 'Average recall']
a_max = np.max(result_df_r['Mean score'])
self.assertEqual(p_max, a_max)
def test_select_matcher_valid_cv_stats_3(self):
A = read_csv_metadata(path_a, key='id')
B = read_csv_metadata(path_b, key='id')
feature_vectors = read_csv_metadata(path_f, ltable=A, rtable=B)
dtmatcher = DTMatcher()
nbmatcher = NBMatcher()
rfmatcher = RFMatcher()
svmmatcher = SVMMatcher()
linregmatcher = LinRegMatcher()
logregmatcher = LogRegMatcher()
matchers = [dtmatcher, nbmatcher, rfmatcher, svmmatcher, linregmatcher, logregmatcher]
result = select_matcher(matchers, x=None, y=None, table=feature_vectors,
exclude_attrs=['ltable.id', 'rtable.id', '_id', 'gold'],
metric_to_select_matcher='recall',
metrics_to_display='recall',
target_attr='gold', k=7)
header = ['Matcher', 'Average recall']
result_df = result['cv_stats']
result_df_r = result['drill_down_cv_stats']['recall']
self.assertEqual(set(header) == set(list(result_df.columns[[0, 1]])), True)
d = result_df.set_index('Matcher')
p_max = d.loc[result['selected_matcher'].name, 'Average recall']
a_max = np.max(result_df_r['Mean score'])
self.assertEqual(p_max, a_max)
@raises(KeyError)
def test_select_matcher_invalid_no_display_drill_down(self):
A = read_csv_metadata(path_a, key='id')
B = read_csv_metadata(path_b, key='id')
feature_vectors = read_csv_metadata(path_f, ltable=A, rtable=B)
dtmatcher = DTMatcher()
nbmatcher = NBMatcher()
rfmatcher = RFMatcher()
svmmatcher = SVMMatcher()
linregmatcher = LinRegMatcher()
logregmatcher = LogRegMatcher()
matchers = [dtmatcher, nbmatcher, rfmatcher, svmmatcher, linregmatcher, logregmatcher]
result = select_matcher(matchers, x=None, y=None, table=feature_vectors,
exclude_attrs=['ltable.id', 'rtable.id', '_id', 'gold'],
metrics_to_display=['precision'],
target_attr='gold', k=7)
result_df_p = result['drill_down_cv_stats']['recall']
@raises(AssertionError)
def test_select_matcher_invalid_metrics_to_display_1(self):
A = read_csv_metadata(path_a, key='id')
B = read_csv_metadata(path_b, key='id')
feature_vectors = read_csv_metadata(path_f, ltable=A, rtable=B)
dtmatcher = DTMatcher()
nbmatcher = NBMatcher()
rfmatcher = RFMatcher()
svmmatcher = SVMMatcher()
linregmatcher = LinRegMatcher()
logregmatcher = LogRegMatcher()
matchers = [dtmatcher, nbmatcher, rfmatcher, svmmatcher, linregmatcher, logregmatcher]
result = select_matcher(matchers, x=None, y=None, table=feature_vectors,
exclude_attrs=['ltable.id', 'rtable.id', '_id', 'gold'],
metrics_to_display=None,
target_attr='gold', k=7)
@raises(AssertionError)
def test_select_matcher_invalid_metrics_to_display_2(self):
A = read_csv_metadata(path_a, key='id')
B = read_csv_metadata(path_b, key='id')
feature_vectors = read_csv_metadata(path_f, ltable=A, rtable=B)
dtmatcher = DTMatcher()
nbmatcher = NBMatcher()
rfmatcher = RFMatcher()
svmmatcher = SVMMatcher()
linregmatcher = LinRegMatcher()
logregmatcher = LogRegMatcher()
matchers = [dtmatcher, nbmatcher, rfmatcher, svmmatcher, linregmatcher, logregmatcher]
result = select_matcher(matchers, x=None, y=None, table=feature_vectors,
exclude_attrs=['ltable.id', 'rtable.id', '_id', 'gold'],
metrics_to_display=['test'],
target_attr='gold', k=7)
@raises(AssertionError)
def test_select_matcher_invalid_metric_to_select_matcher(self):
A = read_csv_metadata(path_a, key='id')
B = read_csv_metadata(path_b, key='id')
feature_vectors = read_csv_metadata(path_f, ltable=A, rtable=B)
dtmatcher = DTMatcher()
nbmatcher = NBMatcher()
rfmatcher = RFMatcher()
svmmatcher = SVMMatcher()
linregmatcher = LinRegMatcher()
logregmatcher = LogRegMatcher()
matchers = [dtmatcher, nbmatcher, rfmatcher, svmmatcher, linregmatcher, logregmatcher]
result = select_matcher(matchers, x=None, y=None, table=feature_vectors,
exclude_attrs=['ltable.id', 'rtable.id', '_id', 'gold'],
metric_to_select_matcher='test',
target_attr='gold', k=7)
|
bsd-3-clause
|
dfci/pancanmet_analysis
|
oldcode/pancan/covariation_onemetabolite.py
|
1
|
4702
|
# A script to examine the co-variation of one metabolite in particular in the pancan metabolomics data.
import os, sys, pandas as pd, numpy as np, scipy as sp, csv, scipy.stats as st, matplotlib.pyplot as plt, matplotlib.cm as cm, itertools as it, pdb
sys.path.append('..')
import reportertools as rt
plt.close('all')
plt.ion()
#########################################################################################
# Input parameters
#########################################################################################
hotmet = '2-Hydroxyglutarate'
# Read in the pancan metabolite file
alldata = pd.io.parsers.read_csv('../../data/merged_metabolon/alldata.csv', index_col = 0, header = 0)
# Make sure to drop first two rows
studylist = alldata.loc['Study']
tissuelist = alldata.loc['TissueType']
# Define the studies you are interested in, or leave as 'all' to do all
if len(sys.argv) > 2:
uqstudy = sys.argv[2].split() # Input is study names separated by space
colidx = [item for item in range(len(studylist)) if studylist[item] in uqstudy]
alldata = alldata[colidx]
studylist = [studylist[item] for item in colidx]
tissuelist = [tissuelist[item] for item in colidx]
alldata = alldata.ix[2:]
else:
uqstudy = 'all'
# Find the unique kinds of studies
uqstudy = np.unique( studylist )
uqstudy = [item for item in uqstudy if item !='nan']
colidx = [item for item in range(len(studylist)) if studylist[item] in uqstudy]
studylist = [studylist[item] for item in colidx[:-2]]
tissuelist = [tissuelist[item] for item in colidx[:-2]]
alldata = alldata.ix[2:]
# For each type of unique study, calculate the co-variation among metabolites
studyctr = 0
plotctr = 1
numsamples = np.zeros(len(uqstudy))
# Make one big dataframe
tempcols = [item + '_Tumor' for item in uqstudy] + [item + '_Normal' for item in uqstudy]
res = pd.DataFrame(index = alldata.index, columns = tempcols)
for study in uqstudy:
print study
# Find all tumors
tumidx = [item for item in range(len(studylist)) if (studylist[item] == study and tissuelist[item] == 'Tumor') ]
normidx = [item for item in range(len(studylist)) if (studylist[item] == study and tissuelist[item] == 'Normal') ]
# Retain only non-NA data
d = alldata.ix[:,tumidx + normidx]
d = d.dropna()
# Make sure this study has measured the metabolite in question
if hotmet not in d.index:
continue
else:
hotidx = np.where(d.index == hotmet)[0][0]
tumdata = d.ix[:,0:len(tumidx)].as_matrix()
normdata = d.ix[:,len(tumidx):].as_matrix()
# Create a dictionary indicating how metabolites map
mapdict = dict()
for item in d.index:
mapdict[ item ] = [idx for idx in range(alldata.shape[0]) if alldata.index[idx] == item][0]
# Calculate covariation of metabolites in this study
corrcalc = st.spearmanr( np.transpose( tumdata ) )
corrcalc_normal = st.spearmanr( np.transpose( normdata ) )
# Get rid of nans
corrcalc[0][ np.isnan(corrcalc[0]) ] = 0
corrcalc[1][ np.isnan(corrcalc[0]) ] = 1
corrcalc_normal[0][ np.isnan(corrcalc_normal[0]) ] = 0
corrcalc_normal[1][ np.isnan(corrcalc_normal[0]) ] = 1
# Filter out insignificant correlations
corrcalc[0][np.where(corrcalc[1]>0.05)] = 0
corrcalc_normal[0][np.where(corrcalc_normal[1]>0.05)] = 0
# Extract data of interest
hotcorr = corrcalc[0][hotidx,:]
hotcorr_normal = corrcalc_normal[0][hotidx,:]
# Save it in the big data file
nz = np.where(hotcorr!=0)[0]
temp =[d.index[item] for item in nz]
restumidx = [mapdict[item] for item in temp]
res.ix[restumidx,study+'_Tumor'] = hotcorr[nz]
nz_normal = np.where(hotcorr_normal!=0)[0]
temp =[d.index[item] for item in nz_normal]
resnormidx = [mapdict[item] for item in temp]
res.ix[resnormidx,study+'_Normal'] = hotcorr_normal[nz_normal]
print [d.index[item] for item in np.argsort(hotcorr)]
print '\n\n'
print [d.index[item] for item in np.argsort(hotcorr_normal)]
print '\n\n'
mergedata = np.vstack((hotcorr,hotcorr_normal)).transpose()
#tempdf = pd.DataFrame(mergedata,columns = ['Tumor','Normal'],index = d.index)
#tempdf.to_csv('/Users/ereznik/Documents/randomprojects/inklehofer/2hg_pancan/' + study+'.csv')
# Make a heatmap
resdrop = res.dropna(how = 'all')
resdrop = resdrop.fillna(0)
rsums = np.sum( np.sign( np.abs( resdrop ) ), axis =1 )
nzmets = np.where(rsums>4)[0]
plotdata = resdrop.ix[nzmets,:].as_matrix()
temp1,temp2,index = rt.cluster(plotdata,0)
plotdata = plotdata[index,:]
nzmets = [nzmets[item] for item in index]
plt.imshow(plotdata,interpolation = 'nearest',cmap=cm.seismic, aspect = 'auto' )
plt.xticks( range(len(tempcols)), tempcols, fontsize=6, rotation = 90 )
plt.yticks( range(len(nzmets)), resdrop.index[nzmets], fontsize = 6)
plt.colorbar()
|
lgpl-3.0
|
schuelaw/EngineeringMathPython
|
FiniteDifferences/CrankNicolsonAnim.py
|
1
|
1836
|
# -*- coding: utf-8 -*-
"""
Created on Tue May 5 18:40:07 2015
@author: Albert
"""
""" Solve the 1D heat equation using a Crank-Nicolson scheme. """
""" Assuming fixed, homog, BC's """
import sys
import numpy as np
import matplotlib.pyplot as plt
from scipy.linalg import solveh_banded
import matplotlib.animation as animation
# Length of interval.
L = 1
# Thermal conductivity.
k = 1
# Mesh size in x direction
N = 100
dx = L/float(N)
# Step size in time, no stability condition needed.
dt = dx/10
# Number of time steps to compute
M = 100
# Define the initial condition
def f(x):
""" Initial temperature. """
return (np.sin(2*np.pi*x))**2
#if .25 <= x <=.5:
# return 1
#else:
# return 0
# Generate mesh of x values
X = [(j+1)*dx for j in range(N-1)]
# Solution will be a list of lists, one list for each time step.
u = []
# Compute the t=0 list from initial condition, add to list of time steps
q = []
for j in range(N-1):
q.append(f(dx*(j+1)))
u.append(q)
# Compute subsequent timesteps
s = k*dt/dx**2
# Create banded matrix
A = np.zeros(2*(N-1)).reshape(2,N-1)
for i in range(1,N-1):
A[0,i]=-s
for i in range(0,N-1):
A[1,i]=1+2*s
# Compute successive time steps.
for m in range(M):
d = np.zeros(N-1)
d[0] = (1-2*s)*u[m][0] + s*u[m][1]
d[N-2]=s*u[m][N-3] + (1-2*s)*u[m][N-2]
for j in range(1,N-2):
d[j]=(s*u[m][j-1] + (1-2*s)*u[m][j] + s*u[m][j+1])
q = solveh_banded(A,d)
u.append(q)
fig, ax = plt.subplots()
line, = ax.plot(X,u[0])
def animate(i):
line.set_ydata(u[i])
return line,
ani = animation.FuncAnimation(fig,animate,np.arange(1,M),interval=50)
plt.show()
print("time step:", dt)
print("final time:",dt*(M-1))
|
gpl-2.0
|
alexnowakvila/DCN
|
code/K-means/data_generator.py
|
1
|
4780
|
import numpy as np
import os
from scipy.spatial import ConvexHull
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
import matplotlib.cm as cm
class Generator(object):
def __init__(
self, num_examples_train, num_examples_test, num_clusters,
dataset_path, batch_size
):
self.num_examples_train = num_examples_train
self.num_examples_test = num_examples_test
self.batch_size = batch_size
self.dataset_path = dataset_path
self.input_size = 2
self.task = 'kmeans'
# clusters_train = [4, 8, 16]
clusters_train = [num_clusters]
clusters_test = [num_clusters]
self.clusters = {'train': clusters_train, 'test': clusters_test}
self.data = {'train': {}, 'test': {}}
def load_dataset(self):
for mode in ['train', 'test']:
for cl in self.clusters[mode]:
path = os.path.join(self.dataset_path, mode + str(cl))
path = path + 'kmeans_gauss.npz'
if os.path.exists(path):
print('Reading {} dataset for {} scales'
.format(mode, cl))
npz = np.load(path)
self.data[mode][cl] = {'x': npz['x'], 'y': npz['y']}
else:
x, y = self.create(clusters=cl, mode=mode)
self.data[mode][cl] = {'x': x, 'y': y}
# save
np.savez(path, x=x, y=y)
print('Created {} dataset for {} scales'
.format(mode, cl))
def get_batch(self, batch=0, clusters=3, mode="train"):
bs = self.batch_size
batch_x = self.data[mode][clusters]['x'][batch * bs: (batch + 1) * bs]
batch_y = self.data[mode][clusters]['y'][batch * bs: (batch + 1) * bs]
return batch_x, batch_y
def compute_length(self, clusters):
length = np.random.randint(10 * clusters, 10 * clusters + 1)
max_length = 10 * clusters
return length, max_length
def kmeans_example(self, length, clusters):
points = np.random.uniform(0, 1, [length, 2])
kmeans = KMeans(n_clusters=clusters).fit(points)
labels = kmeans.labels_.astype(int)
target = np.array(labels)
# target = np.zeros([length])
return points, target
def pca_example(self, length):
points = np.random.uniform(0, 1, [length, 2])
ind1 = np.where(points[:, 0] < 0.5)[0]
target = np.zeros([length])
target[ind1] = 1
return points, target
def gaussian_example(self, length, clusters):
centers = np.random.uniform(0, 1, [clusters, 2])
per_cl = length // clusters
Pts = []
cov = 0.001 * np.eye(2, 2)
target = np.zeros([length])
for c in range(clusters):
points = np.random.multivariate_normal(centers[c], cov, per_cl)
target[c * per_cl: (c + 1) * per_cl] = c
Pts.append(points)
points = np.reshape(Pts, [-1, 2])
rand_perm = np.random.permutation(length)
points = points[rand_perm]
target = target[rand_perm]
return points, target
def plot_example(self, x, y, clusters, length):
plt.figure(0)
plt.clf()
colors = cm.rainbow(np.linspace(0, 1, clusters))
for c in range(clusters):
ind = np.where(y == c)[0]
plt.scatter(x[ind, 0], x[ind, 1], c=colors[c])
path = '/home/anowak/DynamicProgramming/DP/plots/example.png'
plt.savefig(path)
def create(self, clusters=3, mode='train'):
if mode == 'train':
num_examples = self.num_examples_train
else:
num_examples = self.num_examples_test
_, max_length = self.compute_length(clusters)
x = -1 * np.ones([num_examples, max_length, self.input_size])
y = 1e6 * np.ones([num_examples, max_length])
for ex in range(num_examples):
length, max_length = self.compute_length(clusters)
if self.task == "kmeans":
# x_ex, y_ex = self.kmeans_example(length, clusters)
# x_ex, y_ex = self.pca_example(length)
x_ex, y_ex = self.gaussian_example(length, clusters)
if ex % 8000 == 7999:
print('Created example {}'.format(ex))
# self.plot_example(x_ex, y_ex, clusters, length)
else:
raise ValueError("task {} not implemented"
.format(self.task))
x[ex, :length], y[ex, :length] = x_ex, y_ex
return x, y
|
bsd-3-clause
|
roshantha9/AbstractManycoreSim
|
src/RunSim_Exp_PSAlgoTest.py
|
1
|
6431
|
import sys, os, csv, pprint, math
import argparse
import numpy as np
import random
import shutil
import time
## uncomment when running under CLI only version ##
#import matplotlib
#matplotlib.use('Agg')
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import scipy.stats
from matplotlib.colors import ListedColormap, NoNorm
from matplotlib import mlab
from itertools import cycle # for automatic markers
import matplotlib.cm as cm
from matplotlib.font_manager import FontProperties
from libResourceManager.AdmissionControllerOptions import AdmissionControllerOptions
from libMappingAndScheduling.SemiDynamic.TaskMappingSchemes import TaskMappingSchemes
from libMappingAndScheduling.SemiDynamic.TaskSemiDynamicPrioritySchemes import TaskSemiDynamicPrioritySchemes
from libMappingAndScheduling.SemiDynamic.TaskMappingAndPriAssCombinedSchemes import TaskMappingAndPriAssCombinedSchemes
from libMappingAndScheduling.Decentralised.TaskRemapDecentSchemes import TaskRemapDecentSchemes
from SimParams import SimParams
import Multicore_MPEG_Model as MMMSim
NUM_WORKFLOWS = range(8, 17, 2)
#NUM_WORKFLOWS = [12]
NUM_NODES = [2,4,8,16,32]
NOC_XY = [(2,1), (2,2), (2,4), (2,8), (2,16)]
# lateness vs. number of cores
def runSim_Simple():
seed = 99108
print "SEED === " + str(seed)
# fixed params
SimParams.SLACK_FEEDBACK_ENABLED = False
SimParams.NOC_W = 4
SimParams.NOC_H = 4
SimParams.NUM_NODES = (SimParams.NOC_W * SimParams.NOC_H)
SimParams.NUM_WORKFLOWS = SimParams.NUM_NODES + 3
SimParams.NUM_INPUTBUFFERS = SimParams.NUM_WORKFLOWS
SimParams.PSALGO_ENABLED = True
SimParams.DYNAMIC_TASK_REMAPPING_ENABLED = True
SimParams.DYNAMIC_TASK_REMAPPING_SCHEME = TaskRemapDecentSchemes.TASKREMAPPINGDECENTSCHEMES_RANDOM_QUEEN
SimParams.CPUNODE_MONITOR_TASKSET_SLACK = True
random.seed(seed)
np.random.seed(seed)
print "----------------------------------------------------------------------------------------------------------------------------"
print "Running runSim_Simple : num_wf=" + str(SimParams.NUM_WORKFLOWS) + \
", mapping="+str(SimParams.DYNAMIC_TASK_MAPPING_SCHEME) + \
", pri_ass="+str(SimParams.DYNAMIC_TASK_PRIASS_SCHEME)
print "----------------------------------------------------------------------------------------------------------------------------"
env, last_scheduled_task_time = MMMSim.runMainSimulation()
env.run(until=last_scheduled_task_time+SimParams.SIM_RUNTIME)
print env.now
# name the report filenames
tm_fname = "test__timeline.png"
vs_bs_fname = "test__vsbs.js"
util_fname = "test__util.js"
wf_res_fname = "test__wfressumm.js"
gops_opbuff_fname = "test__gopsopbuffsumm.js"
rmtbl_dt_fname = "test__rmtbldt.js"
ibuff_fname = "test__ibuff.js"
obuff_fname = "test__obuff.js"
nodetqs_fname = "test__nodetqs.js"
rmtaskrelease_fname = "test__rmtaskrel.js"
mappingandpriass_fname = "test__mappingandpriass.js"
flowscompleted_fname = "test__flwcompleted.js"
nodetaskexectime_fname = "test__nodetaskexectime.js"
psalgo_nodeprops = "test__psalgonodeprops.js"
flowsadded_fname = "test__flwsadded.js"
trminfo_fname = "test__taskremappinginfo.js"
nodecumslack_fname = "test__nodecumslack.js"
(wf_results_summary, gops_in_outputbuff_summary) = MMMSim.SimMon.report_DecodedWorkflows_Summary(timeline_fname=tm_fname,
wf_res_summary_fname = wf_res_fname,
gops_opbuff_summary_fname = gops_opbuff_fname,
rmtbl_dt_summary_fname = rmtbl_dt_fname,
output_format = "json")
MMMSim.SimMon.report_VideoStream_BasicStats(wf_results_summary, vs_bs_fname)
MMMSim.SimMon.report_InstUtilisation(dump_to_file=util_fname)
MMMSim.SimMon.report_InputBuffer(dump_to_file=ibuff_fname)
MMMSim.SimMon.report_NodeTQs(dump_to_file=nodetqs_fname)
MMMSim.SimMon.report_OutputBufferContents(dump_to_file=obuff_fname)
MMMSim.SimMon.report_RMTaskReleaseInfo(dump_to_file=rmtaskrelease_fname)
MMMSim.SimMon.report_MappingAndPriAssInfo(dump_to_file=mappingandpriass_fname)
MMMSim.SimMon.report_FlowsCompleted(dump_to_file=flowscompleted_fname)
MMMSim.SimMon.report_NodeTaskExecTimeline(dump_to_file=nodetaskexectime_fname)
MMMSim.SimMon.report_PSAlgoNodePSProps(dump_to_file=psalgo_nodeprops)
MMMSim.SimMon.report_FlowsAdded(dump_to_file=flowsadded_fname)
MMMSim.SimMon.report_TaskRemappingInfo(dump_to_file=trminfo_fname)
MMMSim.SimMon.report_NodeCumSlack(dump_to_file=nodecumslack_fname)
def _makeDir(directory):
try:
os.stat(directory)
except:
os.makedirs(directory)
############################################################################
############################################################################
## MAIN SCRIPT SECTION
############################################################################
############################################################################
sys.setrecursionlimit(1500)
# collect command line params
parser = argparse.ArgumentParser(__file__, description="Run specified experiment on abstract simulator")
parser.add_argument("--exp_type", "-t", help="Experiment Type", default=None) # which experiment ?
# params for mapping + pri ass exp
parser.add_argument("--wf_num", "-w", help="Number of workflows", type=int, default=-1)
parser.add_argument("--forced_seed", help="experiment - seed", type=int, default=-1)
args = parser.parse_args()
####################################
## check which experiment to run ##
####################################
#### multiple workflows - variable heuristics ####
if(args.exp_type == "Exp_Simple"):
runSim_Simple()
else:
parser.print_usage()
sys.exit("invalid arguments")
|
gpl-3.0
|
sodafree/backend
|
build/ipython/build/lib.linux-i686-2.7/IPython/core/magics/pylab.py
|
3
|
3273
|
"""Implementation of magic functions for matplotlib/pylab support.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 The IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Our own packages
from IPython.config.application import Application
from IPython.core.magic import Magics, magics_class, line_magic
from IPython.testing.skipdoctest import skip_doctest
#-----------------------------------------------------------------------------
# Magic implementation classes
#-----------------------------------------------------------------------------
@magics_class
class PylabMagics(Magics):
"""Magics related to matplotlib's pylab support"""
@skip_doctest
@line_magic
def pylab(self, parameter_s=''):
"""Load numpy and matplotlib to work interactively.
%pylab [GUINAME]
This function lets you activate pylab (matplotlib, numpy and
interactive support) at any point during an IPython session.
It will import at the top level numpy as np, pyplot as plt, matplotlib,
pylab and mlab, as well as all names from numpy and pylab.
If you are using the inline matplotlib backend for embedded figures,
you can adjust its behavior via the %config magic::
# enable SVG figures, necessary for SVG+XHTML export in the qtconsole
In [1]: %config InlineBackend.figure_format = 'svg'
# change the behavior of closing all figures at the end of each
# execution (cell), or allowing reuse of active figures across
# cells:
In [2]: %config InlineBackend.close_figures = False
Parameters
----------
guiname : optional
One of the valid arguments to the %gui magic ('qt', 'wx', 'gtk',
'osx' or 'tk'). If given, the corresponding Matplotlib backend is
used, otherwise matplotlib's default (which you can override in your
matplotlib config file) is used.
Examples
--------
In this case, where the MPL default is TkAgg::
In [2]: %pylab
Welcome to pylab, a matplotlib-based Python environment.
Backend in use: TkAgg
For more information, type 'help(pylab)'.
But you can explicitly request a different backend::
In [3]: %pylab qt
Welcome to pylab, a matplotlib-based Python environment.
Backend in use: Qt4Agg
For more information, type 'help(pylab)'.
"""
if Application.initialized():
app = Application.instance()
try:
import_all_status = app.pylab_import_all
except AttributeError:
import_all_status = True
else:
import_all_status = True
self.shell.enable_pylab(parameter_s, import_all=import_all_status)
|
bsd-3-clause
|
amaggi/bda
|
chapter_02/ex_05.py
|
1
|
2121
|
import numpy as np
import matplotlib.pyplot as plt
from scipy.misc import comb
from scipy.stats import uniform, beta
from scipy.integrate import trapz
N_THROWS = 10
NPTS = 100
k_vals = np.arange(N_THROWS+1)
def Pr_y_is_k_given_theta(k, theta):
# coin toss probability
return comb(N_THROWS, k) * theta**k * (1-theta)**(N_THROWS-k)
def Pr_y_is_k(k, theta):
# integrate over all values of theta
y = Pr_y_is_k_given_theta(k, theta)
return trapz(y, theta)
# prior for theta
#theta_prior = uniform(0, 1)
theta_prior = beta(1, 2)
theta = np.linspace(theta_prior.ppf(0), theta_prior.ppf(1), NPTS)
Pr_y = np.empty(N_THROWS+1, dtype=float)
for k in xrange(N_THROWS+1):
Pr_y[k] = Pr_y_is_k(k, theta)
expected = np.ones(N_THROWS+1) * 1/np.float(N_THROWS+1)
#plt.figure()
fig, axes = plt.subplots(1, 3)
fig.set_size_inches(15, 5)
plt.sca(axes[0])
plt.plot(k_vals, Pr_y_is_k_given_theta(k_vals, 0.2), 'b', label='theta=0.2')
plt.plot(k_vals, Pr_y_is_k_given_theta(k_vals, 0.8), 'r', label='theta=0.8')
plt.plot(k_vals, Pr_y_is_k_given_theta(k_vals, 0.4), 'g', label='theta=0.4')
plt.legend()
plt.xlabel('k')
plt.ylabel('Pr(y=k|theta)')
plt.sca(axes[1])
plt.plot(theta, Pr_y_is_k_given_theta(0, theta), 'b', label='k=0')
plt.plot(theta, Pr_y_is_k_given_theta(N_THROWS/2, theta), 'r',
label='k=%d'%(N_THROWS/2))
plt.plot(theta, Pr_y_is_k_given_theta(N_THROWS, theta), 'g',
label='k=%d'%(N_THROWS))
plt.legend()
plt.xlabel('theta')
plt.ylabel('Pr(y=k|theta)')
plt.sca(axes[2])
plt.plot(k_vals, Pr_y, 'b', label='computed')
plt.plot(k_vals, expected, 'r', label='expected')
plt.xlabel('k')
plt.ylabel('Pr(y=k)')
plt.legend()
#plt.show()
plt.close()
Pr_theta = np.empty(N_THROWS+1, dtype=object)
for k in xrange(N_THROWS+1):
Pr_y[k] = Pr_y_is_k(k, theta)
Pr_theta[k] = theta_prior.pdf(theta) * Pr_y_is_k_given_theta(k, theta) *\
(N_THROWS+1)
print trapz(Pr_theta[k], theta)
fig, axes = plt.subplots(1, 2)
plt.sca(axes[0])
for k in xrange(N_THROWS+1):
plt.plot(theta, Pr_theta[k])
plt.xlabel('theta')
plt.ylabel('P(theta|k)')
plt.show()
plt.close()
|
gpl-2.0
|
stephenslab/dsc2
|
src/yhat_sqldf.py
|
1
|
5997
|
import inspect
from contextlib import contextmanager
from pandas.io.sql import to_sql, read_sql
from sqlalchemy import create_engine
import re
from warnings import catch_warnings, filterwarnings
from sqlalchemy.exc import DatabaseError, ResourceClosedError
from sqlalchemy.pool import NullPool
from sqlalchemy.event import listen
__all__ = ['PandaSQL', 'PandaSQLException', 'sqldf']
class PandaSQLException(Exception):
pass
class PandaSQL:
def __init__(self, db_uri='sqlite:///:memory:', persist=False):
"""
Initialize with a specific database.
:param db_uri: SQLAlchemy-compatible database URI.
:param persist: keep tables in database between different calls on the same object of this class.
"""
self.engine = create_engine(db_uri, poolclass=NullPool)
if self.engine.name == 'sqlite':
listen(self.engine, 'connect', self._set_text_factory)
if self.engine.name not in ('sqlite', 'postgresql'):
raise PandaSQLException(
'Currently only sqlite and postgresql are supported.')
self.persist = persist
self.loaded_tables = set()
if self.persist:
self._conn = self.engine.connect()
self._init_connection(self._conn)
def __call__(self, query, env=None, names=None):
"""
Execute the SQL query.
Automatically creates tables mentioned in the query from dataframes before executing.
:param query: SQL query string, which can reference pandas dataframes as SQL tables.
:param env: Variables environment - a dict mapping table names to pandas dataframes.
If not specified use local and global variables of the caller.
:return: Pandas dataframe with the result of the SQL query.
"""
if env is None:
env = get_outer_frame_variables()
if names is None:
names = extract_table_names(query)
with self.conn as conn:
for table_name in names:
if table_name not in env:
# don't raise error because the table may be already in the database
continue
if self.persist and table_name in self.loaded_tables:
# table was loaded before using the same instance, don't do it again
continue
self.loaded_tables.add(table_name)
write_table(env[table_name], table_name, conn)
try:
result = read_sql(query, conn)
except DatabaseError as ex:
raise PandaSQLException(ex)
except ResourceClosedError:
# query returns nothing
result = None
return result
@property
@contextmanager
def conn(self):
if self.persist:
# the connection is created in __init__, so just return it
yield self._conn
# no cleanup needed
else:
# create the connection
conn = self.engine.connect()
self._init_connection(conn)
try:
yield conn
finally:
# cleanup - close connection on exit
conn.close()
def _init_connection(self, conn):
if self.engine.name == 'postgresql':
conn.execute('set search_path to pg_temp')
@staticmethod
def _set_text_factory(dbapi_con, connection_record):
dbapi_con.text_factory = str
def get_outer_frame_variables():
""" Get a dict of local and global variables of the first outer frame from another file. """
cur_filename = inspect.getframeinfo(inspect.currentframe()).filename
outer_frame = next(f
for f in inspect.getouterframes(inspect.currentframe())
if f.filename != cur_filename)
variables = {}
variables.update(outer_frame.frame.f_globals)
variables.update(outer_frame.frame.f_locals)
return variables
def extract_table_names(query):
""" Extract table names from an SQL query. """
# a good old fashioned regex. turns out this worked better than actually parsing the code
tables_blocks = re.findall(r'(?:FROM|JOIN)\s+(\w+(?:\s*,\s*\w+)*)', query,
re.IGNORECASE)
tables = [
tbl for block in tables_blocks for tbl in re.findall(r'\w+', block)
]
return set(tables)
def write_table(df, tablename, conn):
""" Write a dataframe to the database. """
with catch_warnings():
filterwarnings(
'ignore',
message=
'The provided table name \'%s\' is not found exactly as such in the database'
% tablename)
to_sql(df,
name=tablename,
con=conn,
index=not any(name is None for name in df.index.names)
) # load index into db if all levels are named
def sqldf(query, env=None, names=None, db_uri='sqlite:///:memory:'):
"""
Query pandas data frames using sql syntax
This function is meant for backward compatibility only. New users are encouraged to use the PandaSQL class.
Parameters
----------
query: string
a sql query using DataFrames as tables
env: locals() or globals()
variable environment; locals() or globals() in your function
allows sqldf to access the variables in your python environment
db_uri: string
SQLAlchemy-compatible database URI
Returns
-------
result: DataFrame
returns a DataFrame with your query's result
Examples
--------
>>> import pandas as pd
>>> df = pd.DataFrame({
"x": range(100),
"y": range(100)
})
>>> from pandasql import sqldf
>>> sqldf("select * from df;", globals())
>>> sqldf("select * from df;", locals())
>>> sqldf("select avg(x) from df;", locals())
"""
return PandaSQL(db_uri)(query, env, set([x for x in names if x]))
|
mit
|
larsmans/scikit-learn
|
benchmarks/bench_tree.py
|
297
|
3617
|
"""
To run this, you'll need to have installed.
* scikit-learn
Does two benchmarks
First, we fix a training set, increase the number of
samples to classify and plot number of classified samples as a
function of time.
In the second benchmark, we increase the number of dimensions of the
training set, classify a sample and plot the time taken as a function
of the number of dimensions.
"""
import numpy as np
import pylab as pl
import gc
from datetime import datetime
# to store the results
scikit_classifier_results = []
scikit_regressor_results = []
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
def bench_scikit_tree_classifier(X, Y):
"""Benchmark with scikit-learn decision tree classifier"""
from sklearn.tree import DecisionTreeClassifier
gc.collect()
# start time
tstart = datetime.now()
clf = DecisionTreeClassifier()
clf.fit(X, Y).predict(X)
delta = (datetime.now() - tstart)
# stop time
scikit_classifier_results.append(
delta.seconds + delta.microseconds / mu_second)
def bench_scikit_tree_regressor(X, Y):
"""Benchmark with scikit-learn decision tree regressor"""
from sklearn.tree import DecisionTreeRegressor
gc.collect()
# start time
tstart = datetime.now()
clf = DecisionTreeRegressor()
clf.fit(X, Y).predict(X)
delta = (datetime.now() - tstart)
# stop time
scikit_regressor_results.append(
delta.seconds + delta.microseconds / mu_second)
if __name__ == '__main__':
print('============================================')
print('Warning: this is going to take a looong time')
print('============================================')
n = 10
step = 10000
n_samples = 10000
dim = 10
n_classes = 10
for i in range(n):
print('============================================')
print('Entering iteration %s of %s' % (i, n))
print('============================================')
n_samples += step
X = np.random.randn(n_samples, dim)
Y = np.random.randint(0, n_classes, (n_samples,))
bench_scikit_tree_classifier(X, Y)
Y = np.random.randn(n_samples)
bench_scikit_tree_regressor(X, Y)
xx = range(0, n * step, step)
pl.figure('scikit-learn tree benchmark results')
pl.subplot(211)
pl.title('Learning with varying number of samples')
pl.plot(xx, scikit_classifier_results, 'g-', label='classification')
pl.plot(xx, scikit_regressor_results, 'r-', label='regression')
pl.legend(loc='upper left')
pl.xlabel('number of samples')
pl.ylabel('Time (s)')
scikit_classifier_results = []
scikit_regressor_results = []
n = 10
step = 500
start_dim = 500
n_classes = 10
dim = start_dim
for i in range(0, n):
print('============================================')
print('Entering iteration %s of %s' % (i, n))
print('============================================')
dim += step
X = np.random.randn(100, dim)
Y = np.random.randint(0, n_classes, (100,))
bench_scikit_tree_classifier(X, Y)
Y = np.random.randn(100)
bench_scikit_tree_regressor(X, Y)
xx = np.arange(start_dim, start_dim + n * step, step)
pl.subplot(212)
pl.title('Learning in high dimensional spaces')
pl.plot(xx, scikit_classifier_results, 'g-', label='classification')
pl.plot(xx, scikit_regressor_results, 'r-', label='regression')
pl.legend(loc='upper left')
pl.xlabel('number of dimensions')
pl.ylabel('Time (s)')
pl.axis('tight')
pl.show()
|
bsd-3-clause
|
konstantint/matplotlib-venn
|
matplotlib_venn/__init__.py
|
1
|
2781
|
'''
Venn diagram plotting routines.
Copyright 2012, Konstantin Tretyakov.
http://kt.era.ee/
Licensed under MIT license.
This package contains routines for plotting area-weighted two- and three-circle venn diagrams.
There are four main functions here: :code:`venn2`, :code:`venn2_circles`, :code:`venn3`, :code:`venn3_circles`.
The :code:`venn2` and :code:`venn2_circles` accept as their only required argument a 3-element list of subset sizes:
subsets = (Ab, aB, AB)
That is, for example, subsets[0] contains the size of the subset (A and not B), and
subsets[2] contains the size of the set (A and B), etc.
Similarly, the functions :code:`venn3` and :code:`venn3_circles` require a 7-element list:
subsets = (Abc, aBc, ABc, abC, AbC, aBC, ABC)
The functions :code:`venn2_circles` and :code:`venn3_circles` simply draw two or three circles respectively,
such that their intersection areas correspond to the desired set intersection sizes.
Note that for a three-circle venn diagram it is not possible to achieve exact correspondence, although in
most cases the picture will still provide a decent indication.
The functions :code:`venn2` and :code:`venn3` draw diagram as a collection of separate colored patches with text labels.
The functions :code:`venn2_circles` and :code:`venn3_circles` return the list of Circle patches that may be tuned further
to your liking.
The functions :code:`venn2` and :code:`venn3` return an object of class :code:`Venn2` or :code:`Venn3` respectively,
which give access to constituent patches and text elements.
Example::
from matplotlib import pyplot as plt
import numpy as np
from matplotlib_venn import venn3, venn3_circles
plt.figure(figsize=(4,4))
v = venn3(subsets=(1, 1, 1, 1, 1, 1, 1), set_labels = ('A', 'B', 'C'))
v.get_patch_by_id('100').set_alpha(1.0)
v.get_patch_by_id('100').set_color('white')
v.get_label_by_id('100').set_text('Unknown')
v.get_label_by_id('A').set_text('Set "A"')
c = venn3_circles(subsets=(1, 1, 1, 1, 1, 1, 1), linestyle='dashed')
c[0].set_lw(1.0)
c[0].set_ls('dotted')
plt.title("Sample Venn diagram")
plt.annotate('Unknown set', xy=v.get_text_by_id('100').get_position() - np.array([0, 0.05]), xytext=(-70,-70),
ha='center', textcoords='offset points', bbox=dict(boxstyle='round,pad=0.5', fc='gray', alpha=0.1),
arrowprops=dict(arrowstyle='->', connectionstyle='arc3,rad=0.5',color='gray'))
'''
from matplotlib_venn._venn2 import venn2, venn2_circles
from matplotlib_venn._venn3 import venn3, venn3_circles
from matplotlib_venn._util import venn2_unweighted, venn3_unweighted
___all___ = ['venn2', 'venn2_circles', 'venn3', 'venn3_circles', 'venn2_unweighted', 'venn3_unweighted']
__version__ = '0.11.6'
|
mit
|
markslwong/tensorflow
|
tensorflow/contrib/learn/python/learn/estimators/estimator.py
|
1
|
55289
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base Estimator class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import copy
import os
import tempfile
import numpy as np
import six
from tensorflow.contrib import framework as contrib_framework
from tensorflow.contrib import layers
from tensorflow.contrib import metrics as metrics_lib
from tensorflow.contrib.framework import deprecated
from tensorflow.contrib.framework import deprecated_args
from tensorflow.contrib.framework import list_variables
from tensorflow.contrib.framework import load_variable
from tensorflow.contrib.framework.python.ops import variables as contrib_variables
from tensorflow.contrib.learn.python.learn import evaluable
from tensorflow.contrib.learn.python.learn import metric_spec
from tensorflow.contrib.learn.python.learn import monitors as monitor_lib
from tensorflow.contrib.learn.python.learn import trainable
from tensorflow.contrib.learn.python.learn.estimators import _sklearn as sklearn
from tensorflow.contrib.learn.python.learn.estimators import constants
from tensorflow.contrib.learn.python.learn.estimators import metric_key
from tensorflow.contrib.learn.python.learn.estimators import model_fn as model_fn_lib
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.estimators import tensor_signature
from tensorflow.contrib.learn.python.learn.estimators._sklearn import NotFittedError
from tensorflow.contrib.learn.python.learn.learn_io import data_feeder
from tensorflow.contrib.learn.python.learn.utils import export
from tensorflow.contrib.learn.python.learn.utils import saved_model_export_utils
from tensorflow.contrib.training.python.training import evaluation
from tensorflow.core.framework import summary_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session as tf_session
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import resources
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import builder as saved_model_builder
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import device_setter
from tensorflow.python.training import monitored_session
from tensorflow.python.training import saver
from tensorflow.python.training import summary_io
from tensorflow.python.util import compat
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
AS_ITERABLE_DATE = '2016-09-15'
AS_ITERABLE_INSTRUCTIONS = (
'The default behavior of predict() is changing. The default value for\n'
'as_iterable will change to True, and then the flag will be removed\n'
'altogether. The behavior of this flag is described below.')
SCIKIT_DECOUPLE_DATE = '2016-12-01'
SCIKIT_DECOUPLE_INSTRUCTIONS = (
'Estimator is decoupled from Scikit Learn interface by moving into\n'
'separate class SKCompat. Arguments x, y and batch_size are only\n'
'available in the SKCompat class, Estimator will only accept input_fn.\n'
'Example conversion:\n'
' est = Estimator(...) -> est = SKCompat(Estimator(...))')
def _verify_input_args(x, y, input_fn, feed_fn, batch_size):
"""Verifies validity of co-existance of input arguments."""
if input_fn is None:
if x is None:
raise ValueError('Either x or input_fn must be provided.')
if contrib_framework.is_tensor(x) or (y is not None and
contrib_framework.is_tensor(y)):
raise ValueError('Inputs cannot be tensors. Please provide input_fn.')
if feed_fn is not None:
raise ValueError('Can not provide both feed_fn and x or y.')
else:
if (x is not None) or (y is not None):
raise ValueError('Can not provide both input_fn and x or y.')
if batch_size is not None:
raise ValueError('Can not provide both input_fn and batch_size.')
def _get_input_fn(x, y, input_fn, feed_fn, batch_size, shuffle=False, epochs=1):
"""Make inputs into input and feed functions.
Args:
x: Numpy, Pandas or Dask matrix or iterable.
y: Numpy, Pandas or Dask matrix or iterable.
input_fn: Pre-defined input function for training data.
feed_fn: Pre-defined data feeder function.
batch_size: Size to split data into parts. Must be >= 1.
shuffle: Whether to shuffle the inputs.
epochs: Number of epochs to run.
Returns:
Data input and feeder function based on training data.
Raises:
ValueError: Only one of `(x & y)` or `input_fn` must be provided.
"""
_verify_input_args(x, y, input_fn, feed_fn, batch_size)
if input_fn is not None:
return input_fn, feed_fn
df = data_feeder.setup_train_data_feeder(
x,
y,
n_classes=None,
batch_size=batch_size,
shuffle=shuffle,
epochs=epochs)
return df.input_builder, df.get_feed_dict_fn()
def infer_real_valued_columns_from_input_fn(input_fn):
"""Creates `FeatureColumn` objects for inputs defined by `input_fn`.
This interprets all inputs as dense, fixed-length float values. This creates
a local graph in which it calls `input_fn` to build the tensors, then discards
it.
Args:
input_fn: Input function returning a tuple of:
features - Dictionary of string feature name to `Tensor` or `Tensor`.
labels - `Tensor` of label values.
Returns:
List of `FeatureColumn` objects.
"""
with ops.Graph().as_default():
features, _ = input_fn()
return layers.infer_real_valued_columns(features)
def infer_real_valued_columns_from_input(x):
"""Creates `FeatureColumn` objects for inputs defined by input `x`.
This interprets all inputs as dense, fixed-length float values.
Args:
x: Real-valued matrix of shape [n_samples, n_features...]. Can be
iterator that returns arrays of features.
Returns:
List of `FeatureColumn` objects.
"""
input_fn, _ = _get_input_fn(
x=x, y=None, input_fn=None, feed_fn=None, batch_size=None)
return infer_real_valued_columns_from_input_fn(input_fn)
def _model_fn_args(fn):
"""Get argument names for function-like object.
Args:
fn: Function, or function-like object (e.g., result of `functools.partial`).
Returns:
`tuple` of string argument names.
Raises:
ValueError: if partial function has positionally bound arguments
"""
_, fn = tf_decorator.unwrap(fn)
if hasattr(fn, 'func') and hasattr(fn, 'keywords') and hasattr(fn, 'args'):
# Handle functools.partial and similar objects.
return tuple([
arg for arg in tf_inspect.getargspec(fn.func).args[len(fn.args):]
if arg not in set(fn.keywords.keys())
])
# Handle function.
return tuple(tf_inspect.getargspec(fn).args)
def _get_replica_device_setter(config):
"""Creates a replica device setter if required.
Args:
config: A RunConfig instance.
Returns:
A replica device setter, or None.
"""
ps_ops = [
'Variable', 'VariableV2', 'AutoReloadVariable', 'MutableHashTable',
'MutableHashTableOfTensors', 'MutableDenseHashTable'
]
if config.task_type:
worker_device = '/job:%s/task:%d' % (config.task_type, config.task_id)
else:
worker_device = '/job:worker'
if config.num_ps_replicas > 0:
return device_setter.replica_device_setter(
ps_tasks=config.num_ps_replicas, worker_device=worker_device,
merge_devices=True, ps_ops=ps_ops, cluster=config.cluster_spec)
else:
return None
def _make_metrics_ops(metrics, features, labels, predictions):
"""Add metrics based on `features`, `labels`, and `predictions`.
`metrics` contains a specification for how to run metrics. It is a dict
mapping friendly names to either `MetricSpec` objects, or directly to a metric
function (assuming that `predictions` and `labels` are single tensors), or to
`(pred_name, metric)` `tuple`, which passes `predictions[pred_name]` and
`labels` to `metric` (assuming `labels` is a single tensor).
Users are encouraged to use `MetricSpec` objects, which are more flexible and
cleaner. They also lead to clearer errors.
Args:
metrics: A dict mapping names to metrics specification, for example
`MetricSpec` objects.
features: A dict of tensors returned from an input_fn as features/inputs.
labels: A single tensor or a dict of tensors returned from an input_fn as
labels.
predictions: A single tensor or a dict of tensors output from a model as
predictions.
Returns:
A dict mapping the friendly given in `metrics` to the result of calling the
given metric function.
Raises:
ValueError: If metrics specifications do not work with the type of
`features`, `labels`, or `predictions` provided. Mostly, a dict is given
but no pred_name specified.
"""
metrics = metrics or {}
# If labels is a dict with a single key, unpack into a single tensor.
labels_tensor_or_dict = labels
if isinstance(labels, dict) and len(labels) == 1:
labels_tensor_or_dict = labels[list(labels.keys())[0]]
result = {}
# Iterate in lexicographic order, so the graph is identical among runs.
for name, metric in sorted(six.iteritems(metrics)):
if isinstance(metric, metric_spec.MetricSpec):
result[name] = metric.create_metric_ops(features, labels, predictions)
continue
# TODO(b/31229024): Remove the rest of this loop
logging.warning('Please specify metrics using MetricSpec. Using bare '
'functions or (key, fn) tuples is deprecated and support '
'for it will be removed on Oct 1, 2016.')
if isinstance(name, tuple):
# Multi-head metrics.
if len(name) != 2:
raise ValueError('Invalid metric for {}. It returned a tuple with '
'len {}, expected 2.'.format(name, len(name)))
if not isinstance(predictions, dict):
raise ValueError(
'Metrics passed provide (name, prediction), '
'but predictions are not dict. '
'Metrics: %s, Predictions: %s.' % (metrics, predictions))
# Here are two options: labels are single Tensor or a dict.
if isinstance(labels, dict) and name[1] in labels:
# If labels are dict and the prediction name is in it, apply metric.
result[name[0]] = metric(predictions[name[1]], labels[name[1]])
else:
# Otherwise pass the labels to the metric.
result[name[0]] = metric(predictions[name[1]], labels_tensor_or_dict)
else:
# Single head metrics.
if isinstance(predictions, dict):
raise ValueError(
'Metrics passed provide only name, no prediction, '
'but predictions are dict. '
'Metrics: %s, Labels: %s.' % (metrics, labels_tensor_or_dict))
result[name] = metric(predictions, labels_tensor_or_dict)
return result
def _dict_to_str(dictionary):
"""Get a `str` representation of a `dict`.
Args:
dictionary: The `dict` to be represented as `str`.
Returns:
A `str` representing the `dictionary`.
"""
return ', '.join('%s = %s' % (k, v) for k, v in sorted(dictionary.items()))
def _write_dict_to_summary(output_dir,
dictionary,
current_global_step):
"""Writes a `dict` into summary file in given output directory.
Args:
output_dir: `str`, directory to write the summary file in.
dictionary: the `dict` to be written to summary file.
current_global_step: `int`, the current global step.
"""
logging.info('Saving dict for global step %d: %s', current_global_step,
_dict_to_str(dictionary))
summary_writer = summary_io.SummaryWriterCache.get(output_dir)
summary_proto = summary_pb2.Summary()
for key in dictionary:
if dictionary[key] is None:
continue
value = summary_proto.value.add()
value.tag = key
if (isinstance(dictionary[key], np.float32) or
isinstance(dictionary[key], float)):
value.simple_value = float(dictionary[key])
else:
logging.warn('Skipping summary for %s, must be a float or np.float32.',
key)
summary_writer.add_summary(summary_proto, current_global_step)
summary_writer.flush()
class BaseEstimator(
sklearn.BaseEstimator, evaluable.Evaluable, trainable.Trainable):
"""Abstract BaseEstimator class to train and evaluate TensorFlow models.
Users should not instantiate or subclass this class. Instead, use `Estimator`.
"""
__metaclass__ = abc.ABCMeta
# Note that for Google users, this is overriden with
# learn_runner.EstimatorConfig.
# TODO(wicke): Remove this once launcher takes over config functionality
_Config = run_config.RunConfig # pylint: disable=invalid-name
def __init__(self, model_dir=None, config=None):
"""Initializes a BaseEstimator instance.
Args:
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model. If `None`, the model_dir in
`config` will be used if set. If both are set, they must be same.
config: A RunConfig instance.
"""
# Create a run configuration.
if config is None:
self._config = BaseEstimator._Config()
logging.info('Using default config.')
else:
self._config = config
logging.info('Using config: %s', str(vars(self._config)))
if self._config.session_config is None:
self._session_config = config_pb2.ConfigProto(allow_soft_placement=True)
else:
self._session_config = self._config.session_config
# Model directory.
if (model_dir is not None) and (self._config.model_dir is not None):
if model_dir != self._config.model_dir:
# TODO(b/9965722): remove this suppression after it is no longer
# necessary.
# pylint: disable=g-doc-exception
raise ValueError(
"model_dir are set both in constructor and RunConfig, but with "
"different values. In constructor: '{}', in RunConfig: "
"'{}' ".format(model_dir, self._config.model_dir))
self._model_dir = model_dir or self._config.model_dir
if self._model_dir is None:
self._model_dir = tempfile.mkdtemp()
logging.warning('Using temporary folder as model directory: %s',
self._model_dir)
if self._config.model_dir is None:
self._config = self._config.replace(model_dir=self._model_dir)
# Set device function depending if there are replicas or not.
self._device_fn = _get_replica_device_setter(self._config)
# Features and labels TensorSignature objects.
# TODO(wicke): Rename these to something more descriptive
self._features_info = None
self._labels_info = None
self._graph = None
@property
def config(self):
# TODO(wicke): make RunConfig immutable, and then return it without a copy.
return copy.deepcopy(self._config)
@deprecated_args(
SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, ('x', None),
('y', None), ('batch_size', None)
)
def fit(self, x=None, y=None, input_fn=None, steps=None, batch_size=None,
monitors=None, max_steps=None):
# pylint: disable=g-doc-args,g-doc-return-or-yield
"""See `Trainable`.
Raises:
ValueError: If `x` or `y` are not `None` while `input_fn` is not `None`.
ValueError: If both `steps` and `max_steps` are not `None`.
"""
if (steps is not None) and (max_steps is not None):
raise ValueError('Can not provide both steps and max_steps.')
_verify_input_args(x, y, input_fn, None, batch_size)
if x is not None:
SKCompat(self).fit(x, y, batch_size, steps, max_steps, monitors)
return self
if max_steps is not None:
try:
start_step = load_variable(self._model_dir, ops.GraphKeys.GLOBAL_STEP)
if max_steps <= start_step:
logging.info('Skipping training since max_steps has already saved.')
return self
except: # pylint: disable=bare-except
pass
hooks = monitor_lib.replace_monitors_with_hooks(monitors, self)
if steps is not None or max_steps is not None:
hooks.append(basic_session_run_hooks.StopAtStepHook(steps, max_steps))
loss = self._train_model(input_fn=input_fn, hooks=hooks)
logging.info('Loss for final step: %s.', loss)
return self
@deprecated_args(
SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, ('x', None),
('y', None), ('batch_size', None)
)
def partial_fit(
self, x=None, y=None, input_fn=None, steps=1, batch_size=None,
monitors=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different or the same chunks of the dataset. This either can
implement iterative training or out-of-core/online training.
This is especially useful when the whole dataset is too big to
fit in memory at the same time. Or when model is taking long time
to converge, and you want to split up training into subparts.
Args:
x: Matrix of shape [n_samples, n_features...]. Can be iterator that
returns arrays of features. The training input samples for fitting the
model. If set, `input_fn` must be `None`.
y: Vector or matrix [n_samples] or [n_samples, n_outputs]. Can be
iterator that returns array of labels. The training label values
(class labels in classification, real numbers in regression). If set,
`input_fn` must be `None`.
input_fn: Input function. If set, `x`, `y`, and `batch_size` must be
`None`.
steps: Number of steps for which to train model. If `None`, train forever.
batch_size: minibatch size to use on the input, defaults to first
dimension of `x`. Must be `None` if `input_fn` is provided.
monitors: List of `BaseMonitor` subclass instances. Used for callbacks
inside the training loop.
Returns:
`self`, for chaining.
Raises:
ValueError: If at least one of `x` and `y` is provided, and `input_fn` is
provided.
"""
logging.warning('The current implementation of partial_fit is not optimized'
' for use in a loop. Consider using fit() instead.')
return self.fit(x=x, y=y, input_fn=input_fn, steps=steps,
batch_size=batch_size, monitors=monitors)
@deprecated_args(
SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, ('x', None),
('y', None), ('batch_size', None)
)
def evaluate(self,
x=None,
y=None,
input_fn=None,
feed_fn=None,
batch_size=None,
steps=None,
metrics=None,
name=None,
checkpoint_path=None,
hooks=None,
log_progress=True):
# pylint: disable=g-doc-args,g-doc-return-or-yield
"""See `Evaluable`.
Raises:
ValueError: If at least one of `x` or `y` is provided, and at least one of
`input_fn` or `feed_fn` is provided.
Or if `metrics` is not `None` or `dict`.
"""
_verify_input_args(x, y, input_fn, feed_fn, batch_size)
if x is not None:
return SKCompat(self).score(x, y, batch_size, steps, metrics)
if metrics is not None and not isinstance(metrics, dict):
raise ValueError('Metrics argument should be None or dict. '
'Got %s.' % metrics)
eval_results, global_step = self._evaluate_model(
input_fn=input_fn,
feed_fn=feed_fn,
steps=steps,
metrics=metrics,
name=name,
checkpoint_path=checkpoint_path,
hooks=hooks,
log_progress=log_progress)
if eval_results is not None:
eval_results.update({'global_step': global_step})
return eval_results
@deprecated_args(
SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, ('x', None),
('batch_size', None), ('as_iterable', True)
)
def predict(
self, x=None, input_fn=None, batch_size=None, outputs=None,
as_iterable=True):
"""Returns predictions for given features.
Args:
x: Matrix of shape [n_samples, n_features...]. Can be iterator that
returns arrays of features. The training input samples for fitting the
model. If set, `input_fn` must be `None`.
input_fn: Input function. If set, `x` and 'batch_size' must be `None`.
batch_size: Override default batch size. If set, 'input_fn' must be
'None'.
outputs: list of `str`, name of the output to predict.
If `None`, returns all.
as_iterable: If True, return an iterable which keeps yielding predictions
for each example until inputs are exhausted. Note: The inputs must
terminate if you want the iterable to terminate (e.g. be sure to pass
num_epochs=1 if you are using something like read_batch_features).
Returns:
A numpy array of predicted classes or regression values if the
constructor's `model_fn` returns a `Tensor` for `predictions` or a `dict`
of numpy arrays if `model_fn` returns a `dict`. Returns an iterable of
predictions if as_iterable is True.
Raises:
ValueError: If x and input_fn are both provided or both `None`.
"""
_verify_input_args(x, None, input_fn, None, batch_size)
if x is not None and not as_iterable:
return SKCompat(self).predict(x, batch_size)
input_fn, feed_fn = _get_input_fn(x, None, input_fn, None, batch_size)
return self._infer_model(
input_fn=input_fn,
feed_fn=feed_fn,
outputs=outputs,
as_iterable=as_iterable)
def get_variable_value(self, name):
"""Returns value of the variable given by name.
Args:
name: string, name of the tensor.
Returns:
Numpy array - value of the tensor.
"""
return load_variable(self.model_dir, name)
def get_variable_names(self):
"""Returns list of all variable names in this model.
Returns:
List of names.
"""
return [name for name, _ in list_variables(self.model_dir)]
@property
def model_dir(self):
return self._model_dir
@deprecated('2017-03-25', 'Please use Estimator.export_savedmodel() instead.')
def export(self,
export_dir,
input_fn=export._default_input_fn, # pylint: disable=protected-access
input_feature_key=None,
use_deprecated_input_fn=True,
signature_fn=None,
prediction_key=None,
default_batch_size=1,
exports_to_keep=None,
checkpoint_path=None):
"""Exports inference graph into given dir.
Args:
export_dir: A string containing a directory to write the exported graph
and checkpoints.
input_fn: If `use_deprecated_input_fn` is true, then a function that given
`Tensor` of `Example` strings, parses it into features that are then
passed to the model. Otherwise, a function that takes no argument and
returns a tuple of (features, labels), where features is a dict of
string key to `Tensor` and labels is a `Tensor` that's currently not
used (and so can be `None`).
input_feature_key: Only used if `use_deprecated_input_fn` is false. String
key into the features dict returned by `input_fn` that corresponds to a
the raw `Example` strings `Tensor` that the exported model will take as
input. Can only be `None` if you're using a custom `signature_fn` that
does not use the first arg (examples).
use_deprecated_input_fn: Determines the signature format of `input_fn`.
signature_fn: Function that returns a default signature and a named
signature map, given `Tensor` of `Example` strings, `dict` of `Tensor`s
for features and `Tensor` or `dict` of `Tensor`s for predictions.
prediction_key: The key for a tensor in the `predictions` dict (output
from the `model_fn`) to use as the `predictions` input to the
`signature_fn`. Optional. If `None`, predictions will pass to
`signature_fn` without filtering.
default_batch_size: Default batch size of the `Example` placeholder.
exports_to_keep: Number of exports to keep.
checkpoint_path: the checkpoint path of the model to be exported. If it is
`None` (which is default), will use the latest checkpoint in
export_dir.
Returns:
The string path to the exported directory. NB: this functionality was
added ca. 2016/09/25; clients that depend on the return value may need
to handle the case where this function returns None because subclasses
are not returning a value.
"""
# pylint: disable=protected-access
return export._export_estimator(
estimator=self,
export_dir=export_dir,
signature_fn=signature_fn,
prediction_key=prediction_key,
input_fn=input_fn,
input_feature_key=input_feature_key,
use_deprecated_input_fn=use_deprecated_input_fn,
default_batch_size=default_batch_size,
exports_to_keep=exports_to_keep,
checkpoint_path=checkpoint_path)
@abc.abstractproperty
def _get_train_ops(self, features, labels):
"""Method that builds model graph and returns trainer ops.
Expected to be overridden by sub-classes that require custom support.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
Returns:
A `ModelFnOps` object.
"""
pass
@abc.abstractproperty
def _get_predict_ops(self, features):
"""Method that builds model graph and returns prediction ops.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
Returns:
A `ModelFnOps` object.
"""
pass
def _get_eval_ops(self, features, labels, metrics):
"""Method that builds model graph and returns evaluation ops.
Expected to be overriden by sub-classes that require custom support.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
metrics: Dict of metrics to run. If None, the default metric functions
are used; if {}, no metrics are used. Otherwise, `metrics` should map
friendly names for the metric to a `MetricSpec` object defining which
model outputs to evaluate against which labels with which metric
function. Metric ops should support streaming, e.g., returning
update_op and value tensors. See more details in
`../../../../metrics/python/metrics/ops/streaming_metrics.py` and
`../metric_spec.py`.
Returns:
A `ModelFnOps` object.
"""
raise NotImplementedError('_get_eval_ops not implemented in BaseEstimator')
@deprecated(
'2016-09-23',
'The signature of the input_fn accepted by export is changing to be '
'consistent with what\'s used by tf.Learn Estimator\'s train/evaluate, '
'which makes this function useless. This will be removed after the '
'deprecation date.')
def _get_feature_ops_from_example(self, examples_batch):
"""Returns feature parser for given example batch using features info.
This function requires `fit()` has been called.
Args:
examples_batch: batch of tf.Example
Returns:
features: `Tensor` or `dict` of `Tensor` objects.
Raises:
ValueError: If `_features_info` attribute is not available (usually
because `fit()` has not been called).
"""
if self._features_info is None:
raise ValueError('Features information missing, was fit() ever called?')
return tensor_signature.create_example_parser_from_signatures(
self._features_info, examples_batch)
def _check_inputs(self, features, labels):
if self._features_info is not None:
logging.debug('Given features: %s, required signatures: %s.',
str(features), str(self._features_info))
if not tensor_signature.tensors_compatible(features, self._features_info):
raise ValueError('Features are incompatible with given information. '
'Given features: %s, required signatures: %s.' %
(str(features), str(self._features_info)))
else:
self._features_info = tensor_signature.create_signatures(features)
logging.debug('Setting feature info to %s.', str(self._features_info))
if labels is not None:
if self._labels_info is not None:
logging.debug('Given labels: %s, required signatures: %s.',
str(labels), str(self._labels_info))
if not tensor_signature.tensors_compatible(labels, self._labels_info):
raise ValueError('Labels are incompatible with given information. '
'Given labels: %s, required signatures: %s.' %
(str(labels), str(self._labels_info)))
else:
self._labels_info = tensor_signature.create_signatures(labels)
logging.debug('Setting labels info to %s', str(self._labels_info))
def _extract_metric_update_ops(self, eval_dict):
"""Separate update operations from metric value operations."""
update_ops = []
value_ops = {}
for name, metric_ops in six.iteritems(eval_dict):
if isinstance(metric_ops, (list, tuple)):
if len(metric_ops) == 2:
value_ops[name] = metric_ops[0]
update_ops.append(metric_ops[1])
else:
logging.warning(
'Ignoring metric {}. It returned a list|tuple with len {}, '
'expected 2'.format(name, len(metric_ops)))
value_ops[name] = metric_ops
else:
value_ops[name] = metric_ops
if update_ops:
update_ops = control_flow_ops.group(*update_ops)
else:
update_ops = None
return update_ops, value_ops
def _evaluate_model(self,
input_fn,
steps,
feed_fn=None,
metrics=None,
name='',
checkpoint_path=None,
hooks=None,
log_progress=True):
# TODO(wicke): Remove this once Model and associated code are gone.
if (hasattr(self._config, 'execution_mode') and
self._config.execution_mode not in ('all', 'evaluate', 'eval_evalset')):
return None, None
# Check that model has been trained (if nothing has been set explicitly).
if not checkpoint_path:
latest_path = saver.latest_checkpoint(self._model_dir)
if not latest_path:
raise NotFittedError("Couldn't find trained model at %s."
% self._model_dir)
checkpoint_path = latest_path
# Setup output directory.
eval_dir = os.path.join(self._model_dir, 'eval' if not name else
'eval_' + name)
with ops.Graph().as_default() as g:
random_seed.set_random_seed(self._config.tf_random_seed)
global_step = contrib_framework.create_global_step(g)
features, labels = input_fn()
self._check_inputs(features, labels)
model_fn_results = self._get_eval_ops(features, labels, metrics)
eval_dict = model_fn_results.eval_metric_ops
update_op, eval_dict = self._extract_metric_update_ops(eval_dict)
# We need to copy the hook array as we modify it, thus [:].
hooks = hooks[:] if hooks else []
if feed_fn:
hooks.append(basic_session_run_hooks.FeedFnHook(feed_fn))
if steps:
hooks.append(
evaluation.StopAfterNEvalsHook(
steps, log_progress=log_progress))
global_step_key = 'global_step'
while global_step_key in eval_dict:
global_step_key = '_' + global_step_key
eval_dict[global_step_key] = global_step
eval_results = evaluation.evaluate_once(
checkpoint_path=checkpoint_path,
master=self._config.evaluation_master,
scaffold=model_fn_results.scaffold,
eval_ops=update_op,
final_ops=eval_dict,
hooks=hooks,
config=self._session_config)
current_global_step = eval_results[global_step_key]
_write_dict_to_summary(eval_dir, eval_results, current_global_step)
return eval_results, current_global_step
def _get_features_from_input_fn(self, input_fn):
result = input_fn()
if isinstance(result, (list, tuple)):
return result[0]
return result
def _infer_model(self,
input_fn,
feed_fn=None,
outputs=None,
as_iterable=True,
iterate_batches=False):
# Check that model has been trained.
checkpoint_path = saver.latest_checkpoint(self._model_dir)
if not checkpoint_path:
raise NotFittedError("Couldn't find trained model at %s."
% self._model_dir)
with ops.Graph().as_default() as g:
random_seed.set_random_seed(self._config.tf_random_seed)
contrib_framework.create_global_step(g)
features = self._get_features_from_input_fn(input_fn)
infer_ops = self._get_predict_ops(features)
predictions = self._filter_predictions(infer_ops.predictions, outputs)
mon_sess = monitored_session.MonitoredSession(
session_creator=monitored_session.ChiefSessionCreator(
checkpoint_filename_with_path=checkpoint_path,
scaffold=infer_ops.scaffold,
config=self._session_config))
if not as_iterable:
with mon_sess:
if not mon_sess.should_stop():
return mon_sess.run(predictions, feed_fn() if feed_fn else None)
else:
return self._predict_generator(mon_sess, predictions, feed_fn,
iterate_batches)
def _predict_generator(self, mon_sess, predictions, feed_fn, iterate_batches):
with mon_sess:
while not mon_sess.should_stop():
preds = mon_sess.run(predictions, feed_fn() if feed_fn else None)
if iterate_batches:
yield preds
elif not isinstance(predictions, dict):
for pred in preds:
yield pred
else:
first_tensor = list(preds.values())[0]
if isinstance(first_tensor, sparse_tensor.SparseTensorValue):
batch_length = first_tensor.dense_shape[0]
else:
batch_length = first_tensor.shape[0]
for i in range(batch_length):
yield {key: value[i] for key, value in six.iteritems(preds)}
if self._is_input_constant(feed_fn, mon_sess.graph):
return
def _is_input_constant(self, feed_fn, graph):
# If there are no queue_runners, the input `predictions` is a
# constant, and we should stop after the first epoch. If,
# instead, there are queue_runners, eventually they should throw
# an `OutOfRangeError`.
if graph.get_collection(ops.GraphKeys.QUEUE_RUNNERS):
return False
# data_feeder uses feed_fn to generate `OutOfRangeError`.
if feed_fn is not None:
return False
return True
def _filter_predictions(self, predictions, outputs):
if not outputs:
return predictions
if not isinstance(predictions, dict):
raise ValueError(
'outputs argument is not valid in case of non-dict predictions.')
existing_keys = predictions.keys()
predictions = {
key: value
for key, value in six.iteritems(predictions) if key in outputs
}
if not predictions:
raise ValueError('Expected to run at least one output from %s, '
'provided %s.' % (existing_keys, outputs))
return predictions
def _train_model(self, input_fn, hooks):
all_hooks = []
self._graph = ops.Graph()
with self._graph.as_default() as g, g.device(self._device_fn):
random_seed.set_random_seed(self._config.tf_random_seed)
global_step = contrib_framework.create_global_step(g)
features, labels = input_fn()
self._check_inputs(features, labels)
model_fn_ops = self._get_train_ops(features, labels)
ops.add_to_collection(ops.GraphKeys.LOSSES, model_fn_ops.loss)
all_hooks.extend([
basic_session_run_hooks.NanTensorHook(model_fn_ops.loss),
basic_session_run_hooks.LoggingTensorHook(
{
'loss': model_fn_ops.loss,
'step': global_step
},
every_n_iter=100)
])
all_hooks.extend(hooks)
scaffold = model_fn_ops.scaffold or monitored_session.Scaffold()
if not (scaffold.saver or ops.get_collection(ops.GraphKeys.SAVERS)):
ops.add_to_collection(
ops.GraphKeys.SAVERS,
saver.Saver(
sharded=True,
max_to_keep=self._config.keep_checkpoint_max,
defer_build=True,
save_relative_paths=True))
chief_hooks = []
if (self._config.save_checkpoints_secs or
self._config.save_checkpoints_steps):
saver_hook_exists = any([
isinstance(h, basic_session_run_hooks.CheckpointSaverHook)
for h in (all_hooks + model_fn_ops.training_hooks + chief_hooks +
model_fn_ops.training_chief_hooks)
])
if not saver_hook_exists:
chief_hooks = [
basic_session_run_hooks.CheckpointSaverHook(
self._model_dir,
save_secs=self._config.save_checkpoints_secs,
save_steps=self._config.save_checkpoints_steps,
scaffold=scaffold)
]
with monitored_session.MonitoredTrainingSession(
master=self._config.master,
is_chief=self._config.is_chief,
checkpoint_dir=self._model_dir,
scaffold=scaffold,
hooks=all_hooks + model_fn_ops.training_hooks,
chief_only_hooks=chief_hooks + model_fn_ops.training_chief_hooks,
save_checkpoint_secs=0, # Saving is handled by a hook.
save_summaries_steps=self._config.save_summary_steps,
config=self._session_config
) as mon_sess:
loss = None
while not mon_sess.should_stop():
_, loss = mon_sess.run([model_fn_ops.train_op, model_fn_ops.loss])
summary_io.SummaryWriterCache.clear()
return loss
def _identity_feature_engineering_fn(features, labels):
return features, labels
class Estimator(BaseEstimator):
"""Estimator class is the basic TensorFlow model trainer/evaluator.
"""
def __init__(self,
model_fn=None,
model_dir=None,
config=None,
params=None,
feature_engineering_fn=None):
"""Constructs an `Estimator` instance.
Args:
model_fn: Model function. Follows the signature:
* Args:
* `features`: single `Tensor` or `dict` of `Tensor`s
(depending on data passed to `fit`),
* `labels`: `Tensor` or `dict` of `Tensor`s (for multi-head
models). If mode is `ModeKeys.INFER`, `labels=None` will be
passed. If the `model_fn`'s signature does not accept
`mode`, the `model_fn` must still be able to handle
`labels=None`.
* `mode`: Optional. Specifies if this training, evaluation or
prediction. See `ModeKeys`.
* `params`: Optional `dict` of hyperparameters. Will receive what
is passed to Estimator in `params` parameter. This allows
to configure Estimators from hyper parameter tuning.
* `config`: Optional configuration object. Will receive what is passed
to Estimator in `config` parameter, or the default `config`.
Allows updating things in your model_fn based on configuration
such as `num_ps_replicas`.
* `model_dir`: Optional directory where model parameters, graph etc
are saved. Will receive what is passed to Estimator in
`model_dir` parameter, or the default `model_dir`. Allows
updating things in your model_fn that expect model_dir, such as
training hooks.
* Returns:
`ModelFnOps`
Also supports a legacy signature which returns tuple of:
* predictions: `Tensor`, `SparseTensor` or dictionary of same.
Can also be any type that is convertible to a `Tensor` or
`SparseTensor`, or dictionary of same.
* loss: Scalar loss `Tensor`.
* train_op: Training update `Tensor` or `Operation`.
Supports next three signatures for the function:
* `(features, labels) -> (predictions, loss, train_op)`
* `(features, labels, mode) -> (predictions, loss, train_op)`
* `(features, labels, mode, params) -> (predictions, loss, train_op)`
* `(features, labels, mode, params, config) ->
(predictions, loss, train_op)`
* `(features, labels, mode, params, config, model_dir) ->
(predictions, loss, train_op)`
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model.
config: Configuration object.
params: `dict` of hyper parameters that will be passed into `model_fn`.
Keys are names of parameters, values are basic python types.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and
returns features and labels which will be fed
into `model_fn`. Please check `model_fn` for
a definition of features and labels.
Raises:
ValueError: parameters of `model_fn` don't match `params`.
"""
super(Estimator, self).__init__(model_dir=model_dir, config=config)
if model_fn is not None:
# Check number of arguments of the given function matches requirements.
model_fn_args = _model_fn_args(model_fn)
if params is not None and 'params' not in model_fn_args:
raise ValueError('Estimator\'s model_fn (%s) has less than 4 '
'arguments, but not None params (%s) are passed.' %
(model_fn, params))
if params is None and 'params' in model_fn_args:
logging.warning('Estimator\'s model_fn (%s) includes params '
'argument, but params are not passed to Estimator.',
model_fn)
self._model_fn = model_fn
self.params = params
self._feature_engineering_fn = (
feature_engineering_fn or _identity_feature_engineering_fn)
def _call_model_fn(self, features, labels, mode):
"""Calls model function with support of 2, 3 or 4 arguments.
Args:
features: features dict.
labels: labels dict.
mode: ModeKeys
Returns:
A `ModelFnOps` object. If model_fn returns a tuple, wraps them up in a
`ModelFnOps` object.
Raises:
ValueError: if model_fn returns invalid objects.
"""
features, labels = self._feature_engineering_fn(features, labels)
model_fn_args = _model_fn_args(self._model_fn)
kwargs = {}
if 'mode' in model_fn_args:
kwargs['mode'] = mode
if 'params' in model_fn_args:
kwargs['params'] = self.params
if 'config' in model_fn_args:
kwargs['config'] = self.config
if 'model_dir' in model_fn_args:
kwargs['model_dir'] = self.model_dir
model_fn_results = self._model_fn(features, labels, **kwargs)
if isinstance(model_fn_results, model_fn_lib.ModelFnOps):
return model_fn_results
# Here model_fn_results should be a tuple with 3 elements.
if len(model_fn_results) != 3:
raise ValueError('Unrecognized value returned by model_fn, '
'please return ModelFnOps.')
return model_fn_lib.ModelFnOps(
mode=mode,
predictions=model_fn_results[0],
loss=model_fn_results[1],
train_op=model_fn_results[2])
def _get_train_ops(self, features, labels):
"""Method that builds model graph and returns trainer ops.
Expected to be overriden by sub-classes that require custom support.
This implementation uses `model_fn` passed as parameter to constructor to
build model.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
Returns:
`ModelFnOps` object.
"""
return self._call_model_fn(features, labels, model_fn_lib.ModeKeys.TRAIN)
def _get_eval_ops(self, features, labels, metrics):
"""Method that builds model graph and returns evaluation ops.
Expected to be overriden by sub-classes that require custom support.
This implementation uses `model_fn` passed as parameter to constructor to
build model.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
metrics: Dict of metrics to run. If None, the default metric functions
are used; if {}, no metrics are used. Otherwise, `metrics` should map
friendly names for the metric to a `MetricSpec` object defining which
model outputs to evaluate against which labels with which metric
function. Metric ops should support streaming, e.g., returning
update_op and value tensors. See more details in
`../../../../metrics/python/metrics/ops/streaming_metrics.py` and
`../metric_spec.py`.
Returns:
`ModelFnOps` object.
Raises:
ValueError: if `metrics` don't match `labels`.
"""
model_fn_ops = self._call_model_fn(
features, labels, model_fn_lib.ModeKeys.EVAL)
features, labels = self._feature_engineering_fn(features, labels)
# Custom metrics should overwrite defaults.
if metrics:
model_fn_ops.eval_metric_ops.update(_make_metrics_ops(
metrics, features, labels, model_fn_ops.predictions))
if metric_key.MetricKey.LOSS not in model_fn_ops.eval_metric_ops:
model_fn_ops.eval_metric_ops[metric_key.MetricKey.LOSS] = (
metrics_lib.streaming_mean(model_fn_ops.loss))
return model_fn_ops
def _get_predict_ops(self, features):
"""Method that builds model graph and returns prediction ops.
Expected to be overriden by sub-classes that require custom support.
This implementation uses `model_fn` passed as parameter to constructor to
build model.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
Returns:
`ModelFnOps` object.
"""
labels = tensor_signature.create_placeholders_from_signatures(
self._labels_info)
return self._call_model_fn(features, labels, model_fn_lib.ModeKeys.INFER)
def export_savedmodel(
self, export_dir_base, serving_input_fn,
default_output_alternative_key=None,
assets_extra=None,
as_text=False,
checkpoint_path=None):
"""Exports inference graph as a SavedModel into given dir.
Args:
export_dir_base: A string containing a directory to write the exported
graph and checkpoints.
serving_input_fn: A function that takes no argument and
returns an `InputFnOps`.
default_output_alternative_key: the name of the head to serve when none is
specified. Not needed for single-headed models.
assets_extra: A dict specifying how to populate the assets.extra directory
within the exported SavedModel. Each key should give the destination
path (including the filename) relative to the assets.extra directory.
The corresponding value gives the full path of the source file to be
copied. For example, the simple case of copying a single file without
renaming it is specified as
`{'my_asset_file.txt': '/path/to/my_asset_file.txt'}`.
as_text: whether to write the SavedModel proto in text format.
checkpoint_path: The checkpoint path to export. If None (the default),
the most recent checkpoint found within the model directory is chosen.
Returns:
The string path to the exported directory.
Raises:
ValueError: if an unrecognized export_type is requested.
"""
if serving_input_fn is None:
raise ValueError('serving_input_fn must be defined.')
with ops.Graph().as_default() as g:
contrib_variables.create_global_step(g)
# Call the serving_input_fn and collect the input alternatives.
input_ops = serving_input_fn()
input_alternatives, features = (
saved_model_export_utils.get_input_alternatives(input_ops))
# TODO(b/34388557) This is a stopgap, pending recording model provenance.
# Record which features are expected at serving time. It is assumed that
# these are the features that were used in training.
for feature_key in input_ops.features.keys():
ops.add_to_collection(
constants.COLLECTION_DEF_KEY_FOR_INPUT_FEATURE_KEYS, feature_key)
# Call the model_fn and collect the output alternatives.
model_fn_ops = self._call_model_fn(features, None,
model_fn_lib.ModeKeys.INFER)
output_alternatives, actual_default_output_alternative_key = (
saved_model_export_utils.get_output_alternatives(
model_fn_ops, default_output_alternative_key))
# Build the SignatureDefs from all pairs of input and output alternatives
signature_def_map = saved_model_export_utils.build_all_signature_defs(
input_alternatives, output_alternatives,
actual_default_output_alternative_key)
if not checkpoint_path:
# Locate the latest checkpoint
checkpoint_path = saver.latest_checkpoint(self._model_dir)
if not checkpoint_path:
raise NotFittedError("Couldn't find trained model at %s."
% self._model_dir)
export_dir = saved_model_export_utils.get_timestamped_export_dir(
export_dir_base)
if (model_fn_ops.scaffold is not None and
model_fn_ops.scaffold.saver is not None):
saver_for_restore = model_fn_ops.scaffold.saver
else:
saver_for_restore = saver.Saver(sharded=True)
with tf_session.Session('') as session:
saver_for_restore.restore(session, checkpoint_path)
init_op = control_flow_ops.group(
variables.local_variables_initializer(),
resources.initialize_resources(resources.shared_resources()),
data_flow_ops.tables_initializer())
# Perform the export
builder = saved_model_builder.SavedModelBuilder(export_dir)
builder.add_meta_graph_and_variables(
session, [tag_constants.SERVING],
signature_def_map=signature_def_map,
assets_collection=ops.get_collection(
ops.GraphKeys.ASSET_FILEPATHS),
legacy_init_op=init_op)
builder.save(as_text)
# Add the extra assets
if assets_extra:
assets_extra_path = os.path.join(compat.as_bytes(export_dir),
compat.as_bytes('assets.extra'))
for dest_relative, source in assets_extra.items():
dest_absolute = os.path.join(compat.as_bytes(assets_extra_path),
compat.as_bytes(dest_relative))
dest_path = os.path.dirname(dest_absolute)
gfile.MakeDirs(dest_path)
gfile.Copy(source, dest_absolute)
return export_dir
# For time of deprecation x,y from Estimator allow direct access.
# pylint: disable=protected-access
class SKCompat(sklearn.BaseEstimator):
"""Scikit learn wrapper for TensorFlow Learn Estimator."""
def __init__(self, estimator):
self._estimator = estimator
def fit(self, x, y, batch_size=128, steps=None, max_steps=None,
monitors=None):
input_fn, feed_fn = _get_input_fn(x, y, input_fn=None, feed_fn=None,
batch_size=batch_size, shuffle=True,
epochs=None)
all_monitors = []
if feed_fn:
all_monitors = [basic_session_run_hooks.FeedFnHook(feed_fn)]
if monitors:
all_monitors.extend(monitors)
self._estimator.fit(input_fn=input_fn,
steps=steps,
max_steps=max_steps,
monitors=all_monitors)
return self
def score(self, x, y, batch_size=128, steps=None, metrics=None):
input_fn, feed_fn = _get_input_fn(x, y, input_fn=None,
feed_fn=None, batch_size=batch_size,
shuffle=False, epochs=1)
if metrics is not None and not isinstance(metrics, dict):
raise ValueError('Metrics argument should be None or dict. '
'Got %s.' % metrics)
eval_results, global_step = self._estimator._evaluate_model(
input_fn=input_fn,
feed_fn=feed_fn,
steps=steps,
metrics=metrics,
name='score')
if eval_results is not None:
eval_results.update({'global_step': global_step})
return eval_results
def predict(self, x, batch_size=128, outputs=None):
input_fn, feed_fn = _get_input_fn(
x, None, input_fn=None, feed_fn=None, batch_size=batch_size,
shuffle=False, epochs=1)
results = list(
self._estimator._infer_model(
input_fn=input_fn,
feed_fn=feed_fn,
outputs=outputs,
as_iterable=True,
iterate_batches=True))
if not isinstance(results[0], dict):
return np.concatenate([output for output in results], axis=0)
return {
key: np.concatenate(
[output[key] for output in results], axis=0)
for key in results[0]
}
|
apache-2.0
|
samchrisinger/osf.io
|
scripts/analytics/links.py
|
55
|
1227
|
# -*- coding: utf-8 -*-
import os
import matplotlib.pyplot as plt
from framework.mongo import database
from website import settings
from .utils import plot_dates, mkdirp
link_collection = database['privatelink']
FIG_PATH = os.path.join(settings.ANALYTICS_PATH, 'figs', 'features')
mkdirp(FIG_PATH)
def analyze_view_only_links():
dates = [
record['date_created']
for record in link_collection.find({}, {'date_created': True})
]
if not dates:
return
fig = plot_dates(dates)
plt.title('view-only links ({} total)'.format(len(dates)))
plt.savefig(os.path.join(FIG_PATH, 'view-only-links.png'))
plt.close()
def analyze_view_only_links_anonymous():
dates = [
record['date_created']
for record in link_collection.find(
{'anonymous': True},
{'date_created': True},
)
]
if not dates:
return
fig = plot_dates(dates)
plt.title('anonymous view-only links ({} total)'.format(len(dates)))
plt.savefig(os.path.join(FIG_PATH, 'view-only-links-anonymous.png'))
plt.close()
def main():
analyze_view_only_links()
analyze_view_only_links_anonymous()
if __name__ == '__main__':
main()
|
apache-2.0
|
Cocuyo-Labs-Team/ccytsk
|
ccytsk/core/database/fake.py
|
1
|
7867
|
import random
import os
import csv
import datetime
import pandas as pd
import copy
from orm import ForeignKey, ColumnFake
from ccytsk.core.fake.mfaker import MFaker
class SchemaFaker(object):
def __init__(self, schema, locale=None):
self.schema = schema
self.data = {}
self.fkey_data = {}
self.value_from_union_data = {}
self.serials = {}
self.set_idx = {}
self.timelapses = {}
self.faker = MFaker.get_faker(locale=locale)
def get_colum_fake_value(self, column, row=None):
try:
if column.fake is None:
return None
elif column.fake.name == 'default':
return column.default
elif column.fake.name == 'fkey':
if (
not isinstance(column.fkey, ForeignKey)
or '%s.%s' % (column.fkey.table, column.fkey.column) not in self.fkey_data
):
return None
return random.choice(self.fkey_data['%s.%s' % (column.fkey.table, column.fkey.column)])
elif column.fake.name == 'serial':
if column.id not in self.serials:
self.serials[column.id] = 0
self.serials[column.id] += 1
return self.serials[column.id]
elif column.fake.name == 'timelapse':
if column.id not in self.timelapses:
self.timelapses[column.id] = dict(
index=-1,
data=[day.strftime(column.fake.format if column.fake.format else '%Y-%m-%d') for day in pd.date_range(column.fake.data[0], column.fake.data[1])]
)
self.timelapses[column.id]['index'] += 1
return None if self.timelapses[column.id]['index'] >= len(self.timelapses[column.id]['data']) else self.timelapses[column.id]['data'][self.timelapses[column.id]['index']]
elif column.fake.name == 'set':
if isinstance(column.fake.data, list) and len(column.fake.data) > 0:
if column.id not in self.set_idx:
self.set_idx[column.id] = -1
self.set_idx[column.id] += 1
if column.unique:
return None if self.set_idx[column.id] >= len(column.fake.data) else column.fake.data[self.set_idx[column.id]]
else:
return column.fake.data[self.set_idx[column.id] % len(column.fake.data)]
else:
return None
elif column.fake.name == 'value_from_union':
column_values = []
for c in column.fake.data:
column_values += (self.fkey_data[c] if c in self.fkey_data else self.value_from_union_data[c])
return random.choice(column_values)
elif column.fake.name == 'eval':
return eval(column.fake.data, dict(row=row, fkey_data=self.fkey_data))
else:
fake_data = copy.deepcopy(column.fake.data)
if row is not None and isinstance(fake_data, dict):
for k in fake_data:
if fake_data[k] in row:
fake_data[k] = row[fake_data[k]]
value = MFaker.get_value(self.faker, column.fake.name, fake_data)
if value is not None:
if isinstance(value, datetime.datetime):
return value.strftime(column.fake.format if column.fake.format else '%Y-%m-%d %H:%M:%S')
return value
except Exception as e:
raise Exception('FakeDataGenerationError: %s on %s' % (str(e), column.id))
def generate_table_data(self, table, nrows):
data = []
for i in xrange(1, nrows + 1):
record = {}
for column in table.columns:
record[column.name] = (
self.fkey_data[column.id][i-1]
if column.id in self.fkey_data
and len(self.fkey_data[column.id]) >= i
else self.get_colum_fake_value(column=column, row=record)
)
data.append(record)
return data
def generate_schema_data(self, default_nrows=1, **kwargs):
for table in self.schema.tables:
if table.has_fkey:
for column in table.columns:
try:
if isinstance(column.fkey, ForeignKey):
fkey_ref = '%s.%s' % (column.fkey.table, column.fkey.column)
if fkey_ref not in self.fkey_data:
fkey_ref_column = self.schema.get_column(fkey_ref)
fkey_ref_table = self.schema.get_table(column.fkey.table)
nrows = (
int(kwargs['%s_nrows' % fkey_ref_table.name])
if '%s_nrows' % fkey_ref_table.name in kwargs
else fkey_ref_table.fake.nrows if fkey_ref_table.fake and fkey_ref_table.fake.nrows else int(default_nrows)
)
self.fkey_data[fkey_ref] = [
self.get_colum_fake_value(fkey_ref_column) for _ in xrange(1, nrows + 1)
]
except Exception as e:
raise Exception('FakeDataGenerationError: %s on %s' % (str(e), column.id))
for table in self.schema.tables:
for column in table.columns:
try:
if isinstance(column.fake, ColumnFake) and column.fake.name == 'value_from_union':
for fkey_ref in column.fake.data:
if fkey_ref not in self.fkey_data and fkey_ref not in self.value_from_union_data:
fkey_ref_column = self.schema.get_column(fkey_ref)
fkey_ref_table = self.schema.get_table('.'.join(fkey_ref_column.split('.')[:-1]))
nrows = (
int(kwargs['%s_nrows' % fkey_ref_table.name])
if '%s_nrows' % fkey_ref_table.name in kwargs
else fkey_ref_table.fake.nrows if fkey_ref_table.fake and fkey_ref_table.fake.nrows else int(default_nrows)
)
self.value_from_union_data[fkey_ref] = [
self.get_colum_fake_value(fkey_ref_column) for _ in xrange(1, nrows + 1)
]
except Exception as e:
raise Exception('FakeDataGenerationError: %s on %s' % (str(e), column.id))
for table in self.schema.tables:
nrows = (
int(kwargs['%s_nrows' % table.name])
if '%s_nrows' % table.name in kwargs
else table.fake.nrows if table.fake and table.fake.nrows else int(default_nrows)
)
self.data[table.name] = self.generate_table_data(table=table, nrows=nrows)
def save_data_to_files(self, directory):
copy_queries = []
for table in self.schema.tables:
if len(self.data[table.name]) == 0:
continue
filename = os.path.join(directory, '%s.csv' % table.name)
with open(filename, 'wb') as output_file:
dict_writer = csv.DictWriter(output_file, [c.name for c in table.columns])
dict_writer.writerows(self.data[table.name])
copy_queries.append("copy %s from '%s' delimiter ',' null '';" % (table.name, filename))
return copy_queries
|
bsd-3-clause
|
scikit-xray/scikit-xray
|
doc/sphinxext/plot_generator.py
|
8
|
10150
|
"""
Sphinx plugin to run example scripts and create a gallery page.
Taken from seaborn project, which is turn was lightly
modified from the mpld3 project.
"""
from __future__ import division
import os
import os.path as op
import re
import glob
import token
import tokenize
import shutil
import json
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib import image
RST_TEMPLATE = """
.. _{sphinx_tag}:
{docstring}
.. image:: {img_file}
**Python source code:** :download:`[download source: {fname}]<{fname}>`
.. literalinclude:: {fname}
:lines: {end_line}-
"""
INDEX_TEMPLATE = """
.. raw:: html
<style type="text/css">
.figure {{
position: relative;
float: left;
margin: 10px;
width: 180px;
height: 200px;
}}
.figure img {{
position: absolute;
display: inline;
left: 0;
width: 170px;
height: 170px;
opacity:1.0;
filter:alpha(opacity=100); /* For IE8 and earlier */
}}
.figure:hover img {{
-webkit-filter: blur(3px);
-moz-filter: blur(3px);
-o-filter: blur(3px);
-ms-filter: blur(3px);
filter: blur(3px);
opacity:1.0;
filter:alpha(opacity=100); /* For IE8 and earlier */
}}
.figure span {{
position: absolute;
display: inline;
left: 0;
width: 170px;
height: 170px;
background: #000;
color: #fff;
visibility: hidden;
opacity: 0;
z-index: 100;
}}
.figure p {{
position: absolute;
top: 45%;
width: 170px;
font-size: 110%;
}}
.figure:hover span {{
visibility: visible;
opacity: .4;
}}
.caption {{
position: absolue;
width: 180px;
top: 170px;
text-align: center !important;
}}
</style>
.. _{sphinx_tag}:
Example gallery
===============
{toctree}
{contents}
.. raw:: html
<div style="clear: both"></div>
"""
def create_thumbnail(infile, thumbfile,
width=300, height=300,
cx=0.5, cy=0.5, border=4):
baseout, extout = op.splitext(thumbfile)
im = image.imread(infile)
rows, cols = im.shape[:2]
x0 = int(cx * cols - .5 * width)
y0 = int(cy * rows - .5 * height)
xslice = slice(x0, x0 + width)
yslice = slice(y0, y0 + height)
thumb = im[yslice, xslice]
thumb[:border, :, :3] = thumb[-border:, :, :3] = 0
thumb[:, :border, :3] = thumb[:, -border:, :3] = 0
dpi = 100
fig = plt.figure(figsize=(width / dpi, height / dpi), dpi=dpi)
ax = fig.add_axes([0, 0, 1, 1], aspect='auto',
frameon=False, xticks=[], yticks=[])
ax.imshow(thumb, aspect='auto', resample=True,
interpolation='bilinear')
fig.savefig(thumbfile, dpi=dpi)
return fig
def indent(s, N=4):
"""indent a string"""
return s.replace('\n', '\n' + N * ' ')
class ExampleGenerator(object):
"""Tools for generating an example page from a file"""
def __init__(self, filename, target_dir):
self.filename = filename
self.target_dir = target_dir
self.thumbloc = .5, .5
self.extract_docstring()
with open(filename, "r") as fid:
self.filetext = fid.read()
outfilename = op.join(target_dir, self.rstfilename)
# Only actually run it if the output RST file doesn't
# exist or it was modified less recently than the example
if (not op.exists(outfilename)
or (op.getmtime(outfilename) < op.getmtime(filename))):
self.exec_file()
else:
print("skipping {0}".format(self.filename))
@property
def dirname(self):
return op.split(self.filename)[0]
@property
def fname(self):
return op.split(self.filename)[1]
@property
def modulename(self):
return op.splitext(self.fname)[0]
@property
def pyfilename(self):
return self.modulename + '.py'
@property
def rstfilename(self):
return self.modulename + ".rst"
@property
def htmlfilename(self):
return self.modulename + '.html'
@property
def pngfilename(self):
pngfile = self.modulename + '.png'
return "_images/" + pngfile
@property
def thumbfilename(self):
pngfile = self.modulename + '_thumb.png'
return pngfile
@property
def sphinxtag(self):
return self.modulename
@property
def pagetitle(self):
return self.docstring.strip().split('\n')[0].strip()
@property
def plotfunc(self):
match = re.search(r"sns\.(.+plot)\(", self.filetext)
if match:
return match.group(1)
match = re.search(r"sns\.(.+map)\(", self.filetext)
if match:
return match.group(1)
match = re.search(r"sns\.(.+Grid)\(", self.filetext)
if match:
return match.group(1)
return ""
def extract_docstring(self):
""" Extract a module-level docstring
"""
lines = open(self.filename).readlines()
start_row = 0
if lines[0].startswith('#!'):
lines.pop(0)
start_row = 1
docstring = ''
first_par = ''
tokens = tokenize.generate_tokens(lines.__iter__().next)
for tok_type, tok_content, _, (erow, _), _ in tokens:
tok_type = token.tok_name[tok_type]
if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
continue
elif tok_type == 'STRING':
docstring = eval(tok_content)
# If the docstring is formatted with several paragraphs,
# extract the first one:
paragraphs = '\n'.join(line.rstrip()
for line in docstring.split('\n')
).split('\n\n')
if len(paragraphs) > 0:
first_par = paragraphs[0]
break
thumbloc = None
for i, line in enumerate(docstring.split("\n")):
m = re.match(r"^_thumb: (\.\d+),\s*(\.\d+)", line)
if m:
thumbloc = float(m.group(1)), float(m.group(2))
break
if thumbloc is not None:
self.thumbloc = thumbloc
docstring = "\n".join([l for l in docstring.split("\n")
if not l.startswith("_thumb")])
self.docstring = docstring
self.short_desc = first_par
self.end_line = erow + 1 + start_row
def exec_file(self):
print("running {0}".format(self.filename))
plt.close('all')
my_globals = {'pl': plt,
'plt': plt}
execfile(self.filename, my_globals)
fig = plt.gcf()
fig.canvas.draw()
pngfile = op.join(self.target_dir, self.pngfilename)
thumbfile = op.join("example_thumbs", self.thumbfilename)
self.html = "<img src=../%s>" % self.pngfilename
fig.savefig(pngfile, dpi=75, bbox_inches="tight")
cx, cy = self.thumbloc
create_thumbnail(pngfile, thumbfile, cx=cx, cy=cy)
def toctree_entry(self):
return " ./%s\n\n" % op.splitext(self.htmlfilename)[0]
def contents_entry(self):
return (".. raw:: html\n\n"
" <div class='figure align-center'>\n"
" <a href=./{0}>\n"
" <img src=../_static/{1}>\n"
" <span class='figure-label'>\n"
" <p>{2}</p>\n"
" </span>\n"
" </a>\n"
" </div>\n\n"
"\n\n"
"".format(self.htmlfilename,
self.thumbfilename,
self.plotfunc))
def main(app):
static_dir = op.join(app.builder.srcdir, '_static')
target_dir = op.join(app.builder.srcdir, 'examples')
image_dir = op.join(app.builder.srcdir, 'examples/_images')
thumb_dir = op.join(app.builder.srcdir, "example_thumbs")
source_dir = op.abspath(op.join(app.builder.srcdir,
'..', 'examples'))
if not op.exists(static_dir):
os.makedirs(static_dir)
if not op.exists(target_dir):
os.makedirs(target_dir)
if not op.exists(image_dir):
os.makedirs(image_dir)
if not op.exists(thumb_dir):
os.makedirs(thumb_dir)
if not op.exists(source_dir):
os.makedirs(source_dir)
banner_data = []
toctree = ("\n\n"
".. toctree::\n"
" :hidden:\n\n")
contents = "\n\n"
# Write individual example files
for filename in glob.glob(op.join(source_dir, "*.py")):
ex = ExampleGenerator(filename, target_dir)
banner_data.append({"title": ex.pagetitle,
"url": op.join('examples', ex.htmlfilename),
"thumb": op.join(ex.thumbfilename)})
shutil.copyfile(filename, op.join(target_dir, ex.pyfilename))
output = RST_TEMPLATE.format(sphinx_tag=ex.sphinxtag,
docstring=ex.docstring,
end_line=ex.end_line,
fname=ex.pyfilename,
img_file=ex.pngfilename)
with open(op.join(target_dir, ex.rstfilename), 'w') as f:
f.write(output)
toctree += ex.toctree_entry()
contents += ex.contents_entry()
if len(banner_data) < 10:
banner_data = (4 * banner_data)[:10]
# write index file
index_file = op.join(target_dir, 'index.rst')
with open(index_file, 'w') as index:
index.write(INDEX_TEMPLATE.format(sphinx_tag="example_gallery",
toctree=toctree,
contents=contents))
def setup(app):
app.connect('builder-inited', main)
return {'parallel_read_safe': True, 'parallel_write_safe': True}
|
bsd-3-clause
|
mahajrod/MACE
|
scripts/draw_boxplots.py
|
1
|
5821
|
#!/usr/bin/env python
__author__ = 'Sergei F. Kliver'
import argparse
from collections import OrderedDict
import pandas as pd
import matplotlib.pyplot as plt
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input", action="store", dest="input", required=True,
help="Input file with two columns containing label in the first one and filename in the second."
"Boxplots will be drawn in the same order as labels")
parser.add_argument("-o", "--output_prefix", action="store", dest="output_prefix", required=True,
help="Prefix of output files")
parser.add_argument("-d", "--dpi", action="store", dest="dpi", type=int, default=300,
help="Dpi of figure")
parser.add_argument("--figure_height", action="store", dest="figure_height", type=float, default=6.0,
help="Height of figure in inches. Default: 6")
parser.add_argument("--figure_width_per_sample", action="store", dest="figure_width_per_sample", type=float, default=1,
help="Per sample width of figure in inches. Default: 1")
parser.add_argument("-e", "--output_formats", action="store", dest="output_formats", type=lambda s: s.split(","),
default=("png", ),
help="Comma-separated list of formats (supported by matlotlib) of "
"output figure.Default: svg,png")
parser.add_argument("-l", "--title", action="store", dest="title", default="Variant density",
help="Suptitle of figure. Default: 'Variant density'")
parser.add_argument("-w", "--window_size", action="store", dest="window_size", required=True, type=float,
help="Size of the windows use for counts.")
parser.add_argument("-m", "--multiplicator", action="store", dest="multiplicator", default=1000, type=float,
help="Multiplicator for variant counts. "
"Default: 1000, i.e variant counts will be scaled to per 1 kbp ")
parser.add_argument("--ymin", action="store", dest="ymin", type=float, default=-0.1,
help="Minimum limit for Y axis . Default: -0.1")
parser.add_argument("--ymax", action="store", dest="ymax", type=float, default=None,
help="Maximum limit for Y axis. Default: not set")
parser.add_argument("--yticklist", action="store", dest="yticklist", type=lambda s: list(map(float, s.split(","))),
default=[0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.25, 1.5, 2, 3, 4, 5],
help="Comma-separated tick list for Y axis. "
"Default: 0.05,0.0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1.0,1.25,1.5,2,3,4,5")
parser.add_argument("--rotation", action="store", dest="rotation", type=float, default=90,
help="Rotation angle for X labels. Default: 90")
parser.add_argument("--horizontal_lines", action="store", dest="horizontal_lines",
type=lambda s: list(map(float, s.split())),
help="Comma-separated list of y-coordinates to draw horizontal lines. "
"Default: not set")
"""
parser.add_argument("-q", "--figure_width", action="store", dest="figure_width", default=12, type=int,
help="Width of figure in inches. Default: 12")
parser.add_argument("-u", "--figure_height_scale_factor", action="store", dest="figure_height_scale_factor",
default=0.5, type=float,
help="Figure height scale factor. Figure height is calculated in inches as "
"int(figure_scale_factor * scaffold_number * sample_number). Default: 0.5")
"""
parser.add_argument("--subplots_adjust_left", action="store", dest="subplots_adjust_left", type=float,
help="Adjust left border of subplots on the figure. Default: matplotlib defaults")
parser.add_argument("--subplots_adjust_top", action="store", dest="subplots_adjust_top", type=float,
help="Adjust top border of subplots on the figure. Default: matplotlib defaults")
parser.add_argument("--subplots_adjust_right", action="store", dest="subplots_adjust_right", type=float,
help="Adjust right border of subplots on the figure. Default: matplotlib defaults")
parser.add_argument("--subplots_adjust_bottom", action="store", dest="subplots_adjust_bottom", type=float,
help="Adjust bottom border of subplots on the figure. Default: matplotlib defaults")
parser.add_argument("--only_count", action="store_true", dest="only_count", default=False,
help="Only count variants, do not draw them. Default: False")
args = parser.parse_args()
with open(args.input, "r") as in_fd:
file_dict = OrderedDict([line.strip().split("\t") for line in in_fd ])
df_dict = OrderedDict({})
for entry in file_dict:
df_dict[entry] = pd.read_csv(file_dict[entry], sep="\t", index_col=["CHROM",])
df_dict[entry]["density"] = df_dict[entry]["All"] / args.window_size * args.multiplicator
fig, ax = plt.subplots(figsize=(args.figure_width_per_sample * len(file_dict), args.figure_height), dpi=args.dpi)
plt.xticks(rotation=args.rotation)
plt.boxplot([df_dict[entry]["density"] for entry in df_dict], labels=list(df_dict.keys()))
plt.yticks(args.yticklist)
if args.horizontal_lines:
for ycoord in args.horizontal_lines:
plt.axhline(y=ycoord, color="red", linestyle="--", linewidth=0.5)
plt.ylim(ymax=args.ymax, ymin=args.ymin)
plt.subplots_adjust(left=args.subplots_adjust_left, right=args.subplots_adjust_right,
top=args.subplots_adjust_top, bottom=args.subplots_adjust_bottom)
plt.title(args.title)
for ext in args.output_formats:
plt.savefig("{0}.{1}".format(args.output_prefix, ext))
|
apache-2.0
|
ericd/redeem
|
redeem/Autotune_2.py
|
2
|
135061
|
"""
Autotune for Redeem
Author: Elias Bakken
email: elias(dot)bakken(at)gmail(dot)com
Website: http://www.thing-printer.com
License: GNU GPL v3: http://www.gnu.org/copyleft/gpl.html
Redeem is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Redeem is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Redeem. If not, see <http://www.gnu.org/licenses/>.
"""
from __future__ import division, print_function
import time
import logging
import numpy as np
from threading import Thread
try:
from Gcode import Gcode
from Util import Util
except ImportError:
from redeem.Gcode import Gcode
from redeem.Util import Util
class Autotune_2:
def __init__(self, heater, temp=200.0, cycles=5, g=None, printer=None):
self.heater = heater
self.steady_temperature = temp # Steady state starting temperture
self.cycles = cycles
self.g = g
self.printer = printer
self.ambient_temp = 22.2
self.output_step = 10.0 # Degrees to step
self.stable_start_seconds = 10
self.sleep = 0.1
self.stable_temp = 30.0
self.pre_calibrate_temp = 200.0
self.E = 1.0 # Hysteresis
self.tuning_algorithm = "ZN" # Tyreus-Luyben
self.plot_temps = []
def cancel(self):
self.running = False
def send_temperatures(self):
while self.running:
m105 = Gcode({"message": "M105", "prot": self.g.prot})
self.printer.processor.execute(m105)
answer = m105.get_answer()
m105.set_answer(answer[2:]) # strip away the "ok"
self.printer.reply(m105)
self.plot_temps.append("({}, {:10.4f})".format(time.time(), self.heater.get_temperature_raw() ))
time.sleep(1)
logging.debug(self.plot_temps)
def run(self):
""" Start the PID autotune loop """
# Reset found peaks
self.running = True
# Start sending thread
self.t = Thread(target=self.send_temperatures)
self.t.start()
# Enable on-off control
self.has_onoff_control = self.heater.onoff_control
self.heater.onoff_control = True
# Set the standard parameters
self.old_ok_range = self.heater.ok_range
self.heater.P = 0.5
self.heater.I = 0.0
self.heater.D = 0.0
self.heater.ok_range = 0.5
self.d = self.bias = 0.5
if False:
self.heater.max_power = 0.708521336912
max_heat_rate = []
self.heater.set_target_temperature(250)
while self.heater.get_temperature() < 250:
time.sleep(1)
self.heater.set_target_temperature(0)
self.running = False
self.t.join()
return
# Run pre-calibration
self._pre_calibrate()
# Start stepping temperatures
logging.debug("Starting cycles")
self._tune()
logging.debug("Tuning data: "+str(self.temps))
# Smooth the data using hanning window
self.smooth_temps = Util.smooth(np.array(self.temps))
# Discover peaks
peaks = Util.detect_peaks(self.smooth_temps)
valleys = Util.detect_peaks(self.smooth_temps, valley=True)
logging.debug("Found peaks: "+str(peaks))
logging.debug("Found valleys: "+str(valleys))
# Calculate the new PID values
self.calculate_PID(self.smooth_temps, self.times, peaks, valleys)
# Set the new PID settings
self.heater.ok_range = self.old_ok_range
self.heater.P = self.Kp
self.heater.I = self.Ki
self.heater.D = self.Kd
# Clean shit up
self.heater.onoff_control = self.has_onoff_control
self.running = False
self.t.join()
def _pre_calibrate(self):
logging.debug("Starting pre-calibrate")
# Wait for temperature to reach < 40 deg.
self.heater.set_target_temperature(0)
while self.heater.get_temperature() > self.stable_temp:
time.sleep(1)
# Get the noise band from the thermistor
self.noise_band = self.heater.get_noise_magnitude()
# Rev B has a very low noise floor, so if 0 is returned,
# set it to 0.5
self.noise_band = self.noise_band
logging.debug("Found noise magnitude: "+str(self.noise_band))
current_temp = self.heater.get_temperature()
#self.ambient_temp = current_temp
# Set the heater at 25% max power
self.heater.max_power = 0.25
heatup_temps = []
# Start heating at 25%
dead_time = 0
stop_temp = current_temp + 2.0*self.noise_band
self.heater.set_target_temperature(self.pre_calibrate_temp)
while self.heater.get_temperature_raw() < stop_temp:
time.sleep(0.1)
dead_time += 0.1
heatup_temps.append( "({}, {:10.4f})".format(time.time(), self.heater.get_temperature_raw()) )
logging.debug("Found dead time: "+str(dead_time))
# Wait for heatup curve to establish
stop_time = 2.0*dead_time
while stop_time > 0:
time.sleep(1)
heatup_temps.append("({}, {:10.4f})".format(time.time(), self.heater.get_temperature_raw() ))
stop_time -= 1
# (5) Record slope of heat up curve
delta_temps = []
delta_times = []
delta_time = np.minimum(np.maximum(dead_time*4.0, 10.0), 30.0)
self.delta_time = delta_time
logging.debug("Starting delta measurements, time: "+str(delta_time))
while delta_time > 0:
delta_temps.append(self.heater.get_temperature_raw())
delta_times.append(time.time())
time.sleep(0.1)
delta_time -= 0.1
heatup_temps.append("({}, {:10.4f})".format(time.time(), self.heater.get_temperature_raw() ))
logging.debug("Stopping delta measurements")
#logging.debug("Heatup temps: "+str(heatup_temps))
# (6) Calculate heat-up rate
heat_rate = (delta_temps[-1]-delta_temps[0])/(delta_times[-1]-delta_times[0])
logging.debug("heat up rate at 25%: "+str(heat_rate)+" deg/s")
# (7) Calculate max heat rate
self.max_heat_rate = heat_rate*4.0# * 1.16
logging.debug("Max heat rate: "+str(self.max_heat_rate)+" deg/s")
# (8) Estimate cutoff point
self.cutoff_band = self.max_heat_rate*dead_time
logging.debug("Cutoff band: "+str(self.cutoff_band)+" deg")
# (9) Raise temp until cutoff.
cutoff_temp = self.pre_calibrate_temp - self.cutoff_band
self.heater.max_power = 1.0
cutoff_temps = []
cutoff_times = []
logging.debug("Cutoff temp: "+str(cutoff_temp)+ " deg")
while self.heater.get_temperature_raw() < cutoff_temp:
cutoff_temps.append(self.heater.get_temperature_raw())
cutoff_times.append(time.time())
time.sleep(0.1)
# (10) Calculate slope in degrees/second, store as setpoint_heating_rate
self.setpoint_heating_rate = (cutoff_temps[-1]-cutoff_temps[-20])/(cutoff_times[-1]-cutoff_times[-20])
logging.debug("Found setpoint heating rate: "+str(self.setpoint_heating_rate))
if self.setpoint_heating_rate > self.max_heat_rate:
self.max_heat_rate = self.setpoint_heating_rate
logging.debug("Updated max heat rate to: "+str(self.setpoint_heating_rate))
# (11) Set power to zero
self.heater.set_target_temperature(0)
logging.debug("Disabling heater and looking for peak")
# (12) Find temp peak
highest_temp = self.heater.get_temperature_raw()
new_temp = highest_temp
while new_temp >= highest_temp:
time.sleep(0.1)
highest_temp = new_temp
new_temp = self.heater.get_temperature_raw()
logging.debug("Found max peak: "+str(highest_temp)+" deg")
# (13) Adding dead time for kicks
dead_time = highest_temp-20
while self.heater.get_temperature_raw() > dead_time:
time.sleep(0.1)
# (14) Record cooling rates
logging.debug("Started recording cooling rates")
cooling_temps = []
cooling_times = []
cooldown_temps = []
# Get 120 seconds of cooling data
for temp in range(1200):
cooling_temps.append(self.heater.get_temperature_raw())
cooling_times.append(time.time())
time.sleep(0.1)
cooldown_temps.append("({}, {:10.4f})".format(time.time(), self.heater.get_temperature_raw() ))
temps = ",".join(cooldown_temps)
logging.debug("Cooling temps: "+str(temps))
diffs = np.array([(cooling_temps[200+(i*200)]-cooling_temps[0+(i*200)]) for i in range(5)])
times = np.array([(cooling_times[200+(i*200)]-cooling_times[0+(i*200)]) for i in range(5)])
slopes = abs(diffs/times)
temp_deltas = [cooling_temps[100+(i*200)]-self.ambient_temp for i in range(5)]
# Wait until we are below cutoff-temp, so we can get some traction
while self.heater.get_temperature_raw() > cutoff_temp - 20.0:
time.sleep(1)
# (15) Record setpoint cooling rate
self.cooling_rate = slopes[0]
logging.debug("Cooling rate: "+str(self.cooling_rate)+ " deg/s")
logging.debug("Diffs: "+str(diffs)+ " deg")
logging.debug("Times: "+str(times)+ " s")
logging.debug("Cooling rates: "+str(slopes)+ " deg/s")
logging.debug("Deltas: "+str(temp_deltas)+ " deg")
# (16) Calculate heat_loss_constant
self.heat_loss_constant = [ slopes[n]/temp_deltas[n] for n in range(len(slopes))]
logging.debug("Heat loss constant: "+str(self.heat_loss_constant))
# (17) Calculate heat_loss_K
self.heat_loss_k = np.average(self.heat_loss_constant)
logging.debug("Heat loss K: "+str(self.heat_loss_k))
# (18) This is Python, no need to delete squat.
# (19) Calculate gain skew
self.gain_skew = np.sqrt(self.setpoint_heating_rate/self.cooling_rate)
logging.debug("Gain skew: "+str(self.gain_skew))
logging.debug("Pre calibrate done")
def _tune(self):
# (1) Calculate rate of heat loss in degrees/second at desired setpoint using heat loss model,
setpoint_loss = self.heat_loss_k * (self.steady_temperature - self.ambient_temp)
logging.debug("Setpoint loss: "+str(setpoint_loss))
# (2) Calculate setpoint heater power requirement,
self.setpoint_power = setpoint_loss / self.max_heat_rate
logging.debug("Setpoint_power: "+str(self.setpoint_power))
# (3) Calculate high-cycle power
self.high_cycle_power = self.setpoint_power*(1.0+1.0/(self.gain_skew**2))
logging.debug("High-cycle_power: "+str(self.high_cycle_power))
# (4) Check if high-cycle power exceeds max_PWM
if self.high_cycle_power > 1.0:
# notify user the heater is too weak to cycle effectively at the chosen setpoint,
# and change setpoint_power=max_PWM/2, ignore gain_skew, and use high-cycle power = max_PWM.
# TODO: fix this
logging.warning("High cycle power exceedes max. Setting to 1.0")
self.high_cycle_power = 1.0
# (5) Apply max heater power until reaching temp=setpoint - cutoff_band
cutoff_temp = self.steady_temperature - self.cutoff_band
self.heater.max_power = 1.0
self.heater.set_target_temperature(self.steady_temperature)
logging.debug("Cutoff temp: "+str(cutoff_temp)+ " deg")
while self.heater.get_temperature_raw() < cutoff_temp:
time.sleep(0.1)
logging.debug("Cutoff temp reached")
self.heater.set_target_temperature(0)
logging.debug("Disabling heater and looking for peak")
highest_temp = self.heater.get_temperature_raw()
new_temp = highest_temp
while new_temp >= highest_temp:
time.sleep(0.1)
highest_temp = new_temp
new_temp = self.heater.get_temperature_raw()
logging.debug("Found max peak: "+str(highest_temp)+" deg")
# (6) Apply setpoint_power heater power and hold until stable
self.heater.max_power = self.setpoint_power
# Set temp to something above the desired. setpoint power should enforce this.
self.heater.set_target_temperature(230)
while self.heater.get_noise_magnitude(300) > 1.0:
time.sleep(1)
logging.debug("Stable temp reached")
# (7) Replace the tuning setpoint with this stable temp
self.steady_temperature = self.heater.get_temperature()
# Set the heater power same as fall time
self.heater.max_power = self.high_cycle_power
self.temps = []
self.times = []
for cycle in range(self.cycles):
logging.debug("Doing cycle: "+str(cycle))
# (8) Turn off heater and wait until temp<(setpoint-E)
self.heater.set_target_temperature(self.steady_temperature - self.output_step)
while self.heater.get_temperature_raw() > self.steady_temperature - self.E:
self.temps.append(self.heater.get_temperature_raw())
self.times.append(time.time())
time.sleep(0.1)
# (9) Turn on heater at high-cycle power and wait until temp>(setpoint+E)
self.heater.set_target_temperature(self.steady_temperature + self.output_step)
while self.heater.get_temperature_raw() < self.steady_temperature + self.E:
self.temps.append(self.heater.get_temperature_raw())
self.times.append(time.time())
time.sleep(0.1)
# (10) Repeat steps 8,9 cycling power
logging.debug("Cycles completed")
self.heater.set_target_temperature(0)
smooth = Util.smooth(np.array(self.temps))
peaks = Util.detect_peaks(smooth)
diff = np.diff(smooth[peaks[-2:]])
logging.debug("Difference between last two peaks: "+str(diff)+" deg. C.")
self.heater.max_power = 1.0
def calculate_PID(self, temps, times, peaks, valleys):
abs_max = temps[peaks[-2]]
abs_min = temps[valleys[-1]]
# (12) Calculate amplitude response
a_single=(abs_max-abs_min)/2.0
d_single=(self.high_cycle_power)/2.0
# (13) Geometric mean
#d_single = np.sqrt((self.setpoint_power-0)*(self.high_cycle_power-self.setpoint_power))
# Calculate the oscillation period of the peaks
Pu = (times[peaks[-2]]-times[peaks[-3]])
# Calculate the ultimate gain
Ku=(4.0/np.pi) * (d_single / np.sqrt(a_single**2+self.E**2))
print("Temperature diff: "+str(abs_max-abs_min)+" deg. C")
print("Oscillation period: "+str(Pu)+" seconds")
print("Ultimate gain: "+str(Ku))
# Tyreus-Luyben:
if self.tuning_algorithm == "TL":
Kc = 0.45*Ku
Ti = 2.2*Pu
Td = Pu/6.3
# Zieger-Nichols
elif self.tuning_algorithm == "ZN":
Kc = 0.6*Ku
Ti = Pu/2.0
Td = Pu/8.0
# Convert from Standard to Ideal form
self.Kp = Kc
self.Ki = Kc / Ti
self.Kd = Kc * Td
self.Ku = Ku
self.Pu = Pu
self.max_temp = abs_max
self.min_temp = abs_min
if __name__ == '__main__':
import matplotlib.pyplot as plt
data = np.array([198.6208006992352, 198.6208006992352, 198.4933092296128, 198.4933092296128, 198.6208006992352, 198.6208006992352, 198.6208006992352, 198.6208006992352, 198.6208006992352, 198.6208006992352, 198.6208006992352, 198.6208006992352, 198.6208006992352, 198.6208006992352, 198.6208006992352, 198.4933092296128, 198.4933092296128, 198.6208006992352, 198.6208006992352, 198.6208006992352, 198.6208006992352, 198.6208006992352, 198.7486086056315, 198.7486086056315, 198.7486086056315, 198.6208006992352, 198.6208006992352, 198.4933092296128, 198.4933092296128, 198.4933092296128, 198.7486086056315, 198.7486086056315, 198.6208006992352, 198.6208006992352, 198.6208006992352, 198.4933092296128, 198.4933092296128, 198.4933092296128, 198.4933092296128, 198.4933092296128, 198.4933092296128, 198.4933092296128, 198.4933092296128, 198.6208006992352, 198.6208006992352, 198.4933092296128, 198.4933092296128, 198.4933092296128, 198.4933092296128, 198.4933092296128, 198.36613261681248, 198.36613261681248, 198.36613261681248, 198.6208006992352, 198.6208006992352, 198.4933092296128, 198.4933092296128, 198.4933092296128, 198.23926929228196, 198.23926929228196, 198.36613261681248, 198.36613261681248, 198.36613261681248, 198.36613261681248, 198.36613261681248, 198.23926929228196, 198.23926929228196, 198.23926929228196, 198.23926929228196, 198.23926929228196, 198.36613261681248, 198.36613261681248, 198.36613261681248, 198.11271769876066, 198.11271769876066, 198.23926929228196, 198.23926929228196, 198.23926929228196, 198.11271769876066, 198.11271769876066, 198.11271769876066, 197.98647629017267, 197.98647629017267, 198.23926929228196, 198.23926929228196, 198.23926929228196, 198.11271769876066, 198.11271769876066, 197.860543531522, 197.860543531522, 197.860543531522, 197.860543531522, 197.860543531522, 198.11271769876066, 198.11271769876066, 198.11271769876066, 197.860543531522, 197.860543531522, 197.73491789878778, 197.73491789878778, 197.73491789878778, 197.98647629017267, 197.98647629017267,
197.73491789878778, 197.73491789878778, 197.73491789878778, 197.48458196924713, 197.48458196924713, 197.48458196924713, 197.48458196924713, 197.48458196924713, 197.48458196924713, 197.48458196924713, 197.35986867835635, 197.35986867835635, 197.35986867835635, 197.48458196924713, 197.48458196924713, 197.2354565250131, 197.2354565250131, 197.2354565250131, 197.35986867835635, 197.35986867835635, 197.35986867835635, 197.2354565250131, 197.2354565250131, 197.11134403855397, 197.11134403855397, 197.11134403855397, 196.9875297586907, 196.9875297586907, 196.9875297586907, 196.9875297586907, 196.9875297586907, 197.11134403855397, 197.11134403855397, 196.9875297586907, 196.9875297586907, 196.9875297586907, 196.9875297586907, 196.9875297586907, 196.86401223541463, 196.86401223541463, 196.86401223541463, 196.74079002890164, 196.74079002890164, 196.9875297586907, 196.9875297586907, 196.9875297586907, 196.74079002890164, 196.74079002890164, 196.61786170941832, 196.61786170941832, 196.61786170941832, 196.74079002890164, 196.74079002890164, 196.49522585722917, 196.49522585722917, 196.49522585722917, 196.61786170941832, 196.61786170941832, 196.61786170941832, 196.49522585722917, 196.49522585722917, 196.37288106250492, 196.37288106250492, 196.37288106250492, 196.49522585722917, 196.49522585722917, 196.37288106250492, 196.37288106250492, 196.37288106250492, 196.12905905512292, 196.12905905512292, 196.2508259252321, 196.2508259252321, 196.2508259252321, 196.37288106250492, 196.37288106250492, 196.2508259252321, 196.2508259252321, 196.2508259252321, 196.2508259252321, 196.2508259252321, 196.2508259252321, 196.2508259252321, 196.2508259252321, 196.2508259252321, 196.2508259252321, 196.00757907152695, 196.00757907152695, 196.00757907152695, 196.00757907152695, 196.00757907152695, 196.12905905512292, 196.12905905512292, 196.12905905512292, 196.00757907152695, 196.00757907152695, 196.2508259252321, 196.2508259252321, 196.2508259252321, 195.76547428893474, 195.76547428893474, 196.00757907152695, 196.00757907152695, 196.00757907152695,
195.88638460334346, 195.88638460334346, 196.00757907152695, 196.00757907152695, 196.00757907152695, 196.00757907152695, 196.00757907152695, 196.00757907152695, 196.00757907152695, 196.00757907152695, 195.88638460334346, 195.88638460334346, 196.00757907152695, 196.00757907152695, 196.00757907152695, 196.12905905512292, 196.12905905512292, 195.88638460334346, 195.88638460334346, 195.88638460334346, 195.88638460334346, 195.88638460334346, 195.88638460334346, 196.00757907152695, 196.00757907152695, 196.00757907152695, 196.00757907152695, 196.00757907152695, 195.88638460334346, 195.88638460334346, 196.00757907152695, 196.00757907152695, 196.00757907152695, 196.12905905512292, 196.12905905512292, 196.00757907152695, 196.00757907152695, 196.00757907152695, 195.76547428893474, 195.76547428893474, 196.12905905512292, 196.12905905512292, 196.12905905512292, 196.00757907152695, 196.00757907152695, 196.2508259252321, 196.2508259252321, 196.2508259252321, 196.12905905512292, 196.12905905512292, 196.00757907152695, 196.00757907152695, 196.00757907152695, 195.88638460334346, 195.88638460334346, 196.37288106250492, 196.37288106250492, 196.37288106250492, 196.12905905512292, 196.12905905512292, 196.2508259252321, 196.2508259252321, 196.2508259252321, 196.2508259252321, 196.2508259252321, 196.12905905512292, 196.12905905512292, 196.12905905512292, 196.2508259252321, 196.2508259252321, 196.2508259252321, 196.2508259252321, 196.2508259252321, 196.2508259252321, 196.2508259252321, 196.2508259252321, 196.2508259252321, 196.2508259252321, 196.2508259252321, 196.2508259252321, 196.49522585722917, 196.49522585722917, 196.49522585722917, 196.37288106250492, 196.37288106250492, 196.37288106250492, 196.2508259252321, 196.2508259252321, 196.37288106250492, 196.37288106250492, 196.37288106250492, 196.61786170941832, 196.61786170941832, 196.61786170941832, 196.61786170941832, 196.61786170941832, 196.74079002890164, 196.74079002890164, 196.61786170941832, 196.61786170941832, 196.61786170941832, 196.61786170941832, 196.61786170941832, 196.74079002890164,
196.74079002890164, 196.74079002890164, 196.74079002890164, 196.74079002890164, 196.86401223541463, 196.86401223541463, 196.86401223541463, 196.9875297586907, 196.9875297586907, 196.9875297586907, 196.9875297586907, 196.9875297586907, 196.9875297586907, 196.9875297586907, 196.9875297586907, 196.9875297586907, 196.9875297586907, 196.9875297586907, 196.9875297586907, 197.2354565250131, 197.2354565250131, 197.2354565250131, 197.11134403855397, 197.11134403855397, 197.11134403855397, 197.11134403855397, 197.11134403855397, 197.35986867835635, 197.35986867835635, 197.35986867835635, 197.35986867835635, 197.35986867835635, 197.2354565250131, 197.2354565250131, 197.35986867835635, 197.35986867835635, 197.35986867835635, 197.609597878822, 197.609597878822, 197.35986867835635, 197.35986867835635, 197.35986867835635, 197.48458196924713, 197.48458196924713, 197.48458196924713, 197.48458196924713, 197.48458196924713, 197.609597878822, 197.609597878822, 197.73491789878778, 197.73491789878778, 197.73491789878778, 197.73491789878778, 197.73491789878778, 197.73491789878778, 197.860543531522, 197.860543531522, 197.860543531522, 197.860543531522, 197.860543531522, 198.11271769876066, 198.11271769876066, 197.98647629017267, 197.98647629017267, 197.98647629017267, 197.860543531522, 197.860543531522, 197.98647629017267, 197.98647629017267, 197.98647629017267, 198.23926929228196, 198.23926929228196, 198.11271769876066, 198.11271769876066, 198.11271769876066, 198.11271769876066, 198.11271769876066, 198.36613261681248, 198.36613261681248, 198.36613261681248, 198.4933092296128, 198.4933092296128, 198.36613261681248, 198.36613261681248, 198.36613261681248, 198.6208006992352, 198.6208006992352, 198.4933092296128, 198.4933092296128, 198.4933092296128, 198.4933092296128, 198.4933092296128, 198.7486086056315, 198.7486086056315, 198.7486086056315, 198.7486086056315, 198.7486086056315, 198.87673454026168, 198.87673454026168, 198.87673454026168, 198.7486086056315, 198.7486086056315, 199.0051801062051, 199.0051801062051, 199.0051801062051,
198.87673454026168, 198.87673454026168, 199.13394691827176, 199.13394691827176, 199.13394691827176, 199.13394691827176, 199.13394691827176, 199.13394691827176, 199.13394691827176, 199.13394691827176, 199.39245079934784, 199.39245079934784, 199.26303660311532, 199.26303660311532, 199.26303660311532, 199.13394691827176, 199.13394691827176, 199.13394691827176, 199.39245079934784, 199.39245079934784, 199.39245079934784, 199.39245079934784, 199.39245079934784, 199.5221911576557, 199.5221911576557, 199.65225934091603, 199.65225934091603, 199.65225934091603, 199.65225934091603, 199.65225934091603, 199.78265702431617, 199.78265702431617, 199.78265702431617, 199.78265702431617, 199.78265702431617, 199.65225934091603, 199.65225934091603, 199.65225934091603, 199.91338589547337, 199.91338589547337, 200.04444765455628, 200.04444765455628, 200.04444765455628, 200.04444765455628, 200.04444765455628, 200.30757670067032, 200.30757670067032, 200.30757670067032, 200.04444765455628, 200.04444765455628, 200.04444765455628, 200.04444765455628, 200.04444765455628, 200.30757670067032, 200.30757670067032, 200.30757670067032, 200.30757670067032, 200.30757670067032, 200.30757670067032, 200.30757670067032, 200.43964745191022, 200.43964745191022, 200.43964745191022, 200.7048101689802, 200.7048101689802, 200.57205801974663, 200.57205801974663, 200.57205801974663, 200.57205801974663, 200.57205801974663, 200.57205801974663, 200.57205801974663, 200.57205801974663, 200.57205801974663, 200.57205801974663, 200.57205801974663, 200.57205801974663, 200.57205801974663, 200.7048101689802, 200.7048101689802, 200.7048101689802, 200.7048101689802, 200.7048101689802, 200.57205801974663, 200.57205801974663, 200.7048101689802, 200.7048101689802, 200.7048101689802, 200.83790567772365, 200.83790567772365, 200.83790567772365, 200.83790567772365, 200.83790567772365, 200.83790567772365, 200.83790567772365, 200.83790567772365, 200.83790567772365, 200.83790567772365, 200.97134633753387, 200.97134633753387, 200.83790567772365, 200.83790567772365, 200.83790567772365,
200.97134633753387, 200.97134633753387, 200.97134633753387, 200.97134633753387, 200.97134633753387, 200.83790567772365, 200.83790567772365, 200.83790567772365, 200.83790567772365, 200.83790567772365, 200.97134633753387, 200.97134633753387, 200.97134633753387, 200.83790567772365, 200.83790567772365, 200.83790567772365, 200.83790567772365, 200.83790567772365, 200.83790567772365, 200.83790567772365, 200.83790567772365, 200.83790567772365, 200.83790567772365, 200.7048101689802, 200.7048101689802, 200.83790567772365, 200.83790567772365, 200.83790567772365, 200.83790567772365, 200.83790567772365, 200.7048101689802, 200.7048101689802, 200.7048101689802, 200.57205801974663, 200.57205801974663, 200.7048101689802, 200.7048101689802, 200.7048101689802, 200.83790567772365, 200.83790567772365, 200.57205801974663, 200.57205801974663, 200.57205801974663, 200.57205801974663, 200.57205801974663, 200.83790567772365, 200.83790567772365, 200.83790567772365, 200.7048101689802, 200.7048101689802, 200.57205801974663, 200.57205801974663, 200.57205801974663, 200.57205801974663, 200.57205801974663, 200.43964745191022, 200.43964745191022, 200.43964745191022, 200.43964745191022, 200.43964745191022, 200.30757670067032, 200.30757670067032, 200.30757670067032, 200.30757670067032, 200.30757670067032, 200.17584401440791, 200.17584401440791, 200.17584401440791, 200.17584401440791, 200.17584401440791, 200.17584401440791, 200.17584401440791, 200.17584401440791, 200.30757670067032, 200.30757670067032, 200.04444765455628, 200.04444765455628, 200.04444765455628, 200.04444765455628, 200.04444765455628, 200.04444765455628, 200.04444765455628, 200.04444765455628, 199.78265702431617, 199.78265702431617, 199.78265702431617, 199.78265702431617, 199.78265702431617, 199.78265702431617, 199.78265702431617, 199.78265702431617, 199.78265702431617, 199.78265702431617, 199.5221911576557, 199.5221911576557, 199.5221911576557, 199.65225934091603, 199.65225934091603, 199.65225934091603, 199.65225934091603, 199.65225934091603, 199.39245079934784, 199.39245079934784,
199.5221911576557, 199.5221911576557, 199.5221911576557, 199.5221911576557, 199.5221911576557, 199.26303660311532, 199.26303660311532, 199.26303660311532, 199.26303660311532, 199.26303660311532, 199.13394691827176, 199.13394691827176, 199.13394691827176, 199.0051801062051, 199.0051801062051, 199.0051801062051, 199.0051801062051, 199.0051801062051, 199.0051801062051, 199.0051801062051, 198.87673454026168, 198.87673454026168, 198.87673454026168, 198.6208006992352, 198.6208006992352, 198.7486086056315, 198.7486086056315, 198.7486086056315, 198.6208006992352, 198.6208006992352, 198.6208006992352, 198.6208006992352, 198.6208006992352, 198.36613261681248, 198.36613261681248, 198.4933092296128, 198.4933092296128, 198.4933092296128, 198.4933092296128, 198.4933092296128, 198.11271769876066, 198.11271769876066, 198.11271769876066, 198.23926929228196, 198.23926929228196, 197.98647629017267, 197.98647629017267, 197.98647629017267, 197.860543531522, 197.860543531522, 197.98647629017267, 197.98647629017267, 197.98647629017267, 197.609597878822, 197.609597878822, 197.73491789878778, 197.73491789878778, 197.73491789878778, 197.48458196924713, 197.48458196924713, 197.48458196924713, 197.609597878822, 197.609597878822, 197.609597878822, 197.609597878822, 197.609597878822, 197.35986867835635, 197.35986867835635, 197.11134403855397, 197.11134403855397, 197.11134403855397, 197.35986867835635, 197.35986867835635, 197.11134403855397, 197.11134403855397, 197.11134403855397, 196.86401223541463, 196.86401223541463, 196.86401223541463, 196.86401223541463, 196.86401223541463, 196.86401223541463, 196.86401223541463, 196.74079002890164, 196.74079002890164, 196.74079002890164, 196.61786170941832, 196.61786170941832, 196.49522585722917, 196.49522585722917, 196.49522585722917, 196.37288106250492, 196.37288106250492, 196.49522585722917, 196.49522585722917, 196.49522585722917, 196.2508259252321, 196.2508259252321, 196.2508259252321, 196.2508259252321, 196.2508259252321, 196.12905905512292, 196.12905905512292, 196.12905905512292, 196.12905905512292,
196.12905905512292, 195.88638460334346, 195.88638460334346, 195.88638460334346, 195.88638460334346, 195.88638460334346, 195.88638460334346, 195.88638460334346, 195.76547428893474, 195.76547428893474, 195.76547428893474, 195.76547428893474, 195.76547428893474, 195.64484677604037, 195.64484677604037, 195.64484677604037, 195.64484677604037, 195.64484677604037, 195.64484677604037, 195.64484677604037, 195.64484677604037, 195.40443479213207, 195.40443479213207, 195.52450072169233, 195.52450072169233, 195.52450072169233, 195.28464766272617, 195.28464766272617, 195.40443479213207, 195.40443479213207, 195.40443479213207, 195.04590455098526, 195.04590455098526, 195.40443479213207, 195.40443479213207, 195.40443479213207, 195.1651380178859, 195.1651380178859, 195.1651380178859, 195.1651380178859, 195.1651380178859, 195.04590455098526, 195.04590455098526, 195.04590455098526, 195.04590455098526, 195.04590455098526, 195.1651380178859, 195.1651380178859, 194.92694596428123, 194.92694596428123, 194.92694596428123, 194.92694596428123, 194.92694596428123, 194.92694596428123, 194.92694596428123, 194.92694596428123, 195.1651380178859, 195.1651380178859, 194.92694596428123, 194.92694596428123, 194.92694596428123, 194.80826096883465, 194.80826096883465, 194.92694596428123, 194.92694596428123, 194.92694596428123, 194.80826096883465, 194.80826096883465, 194.92694596428123, 194.92694596428123, 194.92694596428123, 194.68984828443138, 194.68984828443138, 194.80826096883465, 194.80826096883465, 194.80826096883465, 194.68984828443138, 194.68984828443138, 194.92694596428123, 194.92694596428123, 194.92694596428123, 194.80826096883465, 194.80826096883465, 194.80826096883465, 194.68984828443138, 194.68984828443138, 194.80826096883465, 194.80826096883465, 194.80826096883465, 194.80826096883465, 194.80826096883465, 194.80826096883465, 194.80826096883465, 194.80826096883465, 194.68984828443138, 194.68984828443138, 194.80826096883465, 194.80826096883465, 194.80826096883465, 194.80826096883465, 194.80826096883465, 194.92694596428123, 194.92694596428123,
194.92694596428123, 194.80826096883465, 194.80826096883465, 194.80826096883465, 194.80826096883465, 194.80826096883465, 194.80826096883465, 194.80826096883465, 194.80826096883465, 194.80826096883465, 194.80826096883465, 194.80826096883465, 194.80826096883465, 194.80826096883465, 194.80826096883465, 194.80826096883465, 194.80826096883465, 194.80826096883465, 194.80826096883465, 194.80826096883465, 194.80826096883465, 194.92694596428123, 194.92694596428123, 195.1651380178859, 195.1651380178859, 195.1651380178859, 195.04590455098526, 195.04590455098526, 195.04590455098526, 195.04590455098526, 195.04590455098526, 195.04590455098526, 195.04590455098526, 195.04590455098526, 195.04590455098526, 195.04590455098526, 194.92694596428123, 194.92694596428123, 195.04590455098526, 195.04590455098526, 195.04590455098526, 195.04590455098526, 195.04590455098526, 195.28464766272617, 195.28464766272617, 195.28464766272617, 195.40443479213207, 195.40443479213207, 195.40443479213207, 195.28464766272617, 195.28464766272617, 195.28464766272617, 195.28464766272617, 195.28464766272617, 195.28464766272617, 195.28464766272617, 195.52450072169233, 195.52450072169233, 195.52450072169233, 195.40443479213207, 195.40443479213207, 195.40443479213207, 195.40443479213207, 195.40443479213207, 195.28464766272617, 195.28464766272617, 195.40443479213207, 195.40443479213207, 195.40443479213207, 195.40443479213207, 195.40443479213207, 195.64484677604037, 195.64484677604037, 195.64484677604037, 195.64484677604037, 195.64484677604037, 195.64484677604037, 195.64484677604037, 195.64484677604037, 195.88638460334346, 195.88638460334346, 195.76547428893474, 195.76547428893474, 195.76547428893474, 195.88638460334346, 195.88638460334346, 196.00757907152695, 196.00757907152695, 196.00757907152695, 196.00757907152695, 196.00757907152695, 196.00757907152695, 196.00757907152695, 196.00757907152695, 196.12905905512292, 196.12905905512292, 196.2508259252321, 196.2508259252321, 196.2508259252321, 196.12905905512292, 196.12905905512292, 196.37288106250492,
196.37288106250492, 196.37288106250492, 196.2508259252321, 196.2508259252321, 196.37288106250492, 196.37288106250492, 196.37288106250492, 196.37288106250492, 196.37288106250492, 196.37288106250492, 196.37288106250492, 196.37288106250492, 196.49522585722917, 196.49522585722917, 196.37288106250492, 196.37288106250492, 196.37288106250492, 196.61786170941832, 196.61786170941832, 196.61786170941832, 196.61786170941832, 196.61786170941832, 196.86401223541463, 196.86401223541463, 196.86401223541463, 196.86401223541463, 196.86401223541463, 196.86401223541463, 196.86401223541463, 196.86401223541463, 196.9875297586907, 196.9875297586907, 197.11134403855397, 197.11134403855397, 197.11134403855397, 197.2354565250131, 197.2354565250131, 196.9875297586907, 196.9875297586907, 196.9875297586907, 197.11134403855397, 197.11134403855397, 197.35986867835635, 197.35986867835635, 197.35986867835635, 197.2354565250131, 197.2354565250131, 197.2354565250131, 197.2354565250131, 197.2354565250131, 197.35986867835635, 197.35986867835635, 197.609597878822, 197.609597878822, 197.609597878822, 197.609597878822, 197.609597878822, 197.35986867835635, 197.35986867835635, 197.35986867835635, 197.48458196924713, 197.48458196924713, 197.98647629017267, 197.98647629017267, 197.98647629017267, 197.860543531522, 197.860543531522, 197.860543531522, 197.860543531522, 197.860543531522, 197.98647629017267, 197.98647629017267, 198.11271769876066, 198.11271769876066, 198.11271769876066, 198.11271769876066, 198.11271769876066, 198.23926929228196, 198.23926929228196, 198.23926929228196, 198.23926929228196, 198.23926929228196, 198.36613261681248, 198.36613261681248, 198.36613261681248, 198.23926929228196, 198.23926929228196, 198.36613261681248, 198.36613261681248, 198.36613261681248, 198.4933092296128, 198.4933092296128, 198.6208006992352, 198.6208006992352, 198.6208006992352, 198.4933092296128, 198.4933092296128, 198.4933092296128, 198.87673454026168, 198.87673454026168, 198.7486086056315, 198.7486086056315, 198.7486086056315, 198.7486086056315, 198.7486086056315,
198.87673454026168, 198.87673454026168, 198.87673454026168, 198.87673454026168, 198.87673454026168, 199.0051801062051, 199.0051801062051, 199.0051801062051, 199.0051801062051, 199.0051801062051, 199.13394691827176, 199.13394691827176, 199.13394691827176, 199.13394691827176, 199.13394691827176, 199.5221911576557, 199.5221911576557, 199.5221911576557, 199.5221911576557, 199.5221911576557, 199.39245079934784, 199.39245079934784, 199.39245079934784, 199.5221911576557, 199.5221911576557, 199.65225934091603, 199.65225934091603, 199.65225934091603, 199.65225934091603, 199.65225934091603, 199.78265702431617, 199.78265702431617, 199.78265702431617, 199.91338589547337, 199.91338589547337, 199.65225934091603, 199.65225934091603, 199.65225934091603, 199.91338589547337, 199.91338589547337, 199.78265702431617, 199.78265702431617, 199.78265702431617, 200.04444765455628, 200.04444765455628, 200.17584401440791, 200.17584401440791, 200.17584401440791, 200.30757670067032, 200.30757670067032, 200.17584401440791, 200.17584401440791, 200.17584401440791, 200.43964745191022, 200.43964745191022, 200.30757670067032, 200.30757670067032, 200.30757670067032, 200.43964745191022, 200.43964745191022, 200.43964745191022, 200.43964745191022, 200.43964745191022, 200.7048101689802, 200.7048101689802, 200.57205801974663, 200.57205801974663, 200.57205801974663, 200.7048101689802, 200.7048101689802, 200.7048101689802, 200.7048101689802, 200.7048101689802, 200.83790567772365, 200.83790567772365, 200.83790567772365, 200.83790567772365, 200.83790567772365, 200.83790567772365, 200.83790567772365, 200.83790567772365, 200.83790567772365, 200.83790567772365, 200.97134633753387, 200.97134633753387, 200.97134633753387, 200.83790567772365, 200.83790567772365, 200.83790567772365, 200.83790567772365, 200.83790567772365, 201.10513395354656, 201.10513395354656, 201.10513395354656, 201.10513395354656, 201.10513395354656, 200.97134633753387, 200.97134633753387, 200.97134633753387, 201.10513395354656, 201.10513395354656, 201.10513395354656, 201.10513395354656, 201.10513395354656,
200.97134633753387, 200.97134633753387, 200.97134633753387, 200.97134633753387, 200.97134633753387, 200.97134633753387, 200.97134633753387, 200.97134633753387, 200.97134633753387, 200.97134633753387, 200.97134633753387, 200.97134633753387, 200.97134633753387, 200.97134633753387, 200.97134633753387, 201.10513395354656, 201.10513395354656, 200.97134633753387, 200.97134633753387, 200.97134633753387, 200.97134633753387, 200.97134633753387, 200.97134633753387, 200.97134633753387, 200.97134633753387, 200.83790567772365, 200.83790567772365, 200.83790567772365, 200.83790567772365, 200.83790567772365, 200.97134633753387, 200.97134633753387, 200.97134633753387, 200.83790567772365, 200.83790567772365, 200.83790567772365, 200.83790567772365, 200.83790567772365, 200.7048101689802, 200.7048101689802, 200.83790567772365, 200.83790567772365, 200.83790567772365, 200.7048101689802, 200.7048101689802, 200.83790567772365, 200.83790567772365, 200.83790567772365, 200.83790567772365, 200.83790567772365, 201.10513395354656, 201.10513395354656, 201.10513395354656, 200.83790567772365, 200.83790567772365, 200.7048101689802, 200.7048101689802, 200.7048101689802, 200.30757670067032, 200.30757670067032, 200.30757670067032, 200.30757670067032, 200.30757670067032, 200.43964745191022, 200.43964745191022, 200.43964745191022, 200.43964745191022, 200.43964745191022, 200.57205801974663, 200.57205801974663, 200.57205801974663, 200.43964745191022, 200.43964745191022, 200.30757670067032, 200.30757670067032, 200.30757670067032, 200.17584401440791, 200.17584401440791, 200.17584401440791, 200.17584401440791, 200.17584401440791, 200.17584401440791, 200.17584401440791, 200.30757670067032, 200.30757670067032, 200.30757670067032, 199.78265702431617, 199.78265702431617, 199.91338589547337, 199.91338589547337, 199.91338589547337, 199.78265702431617, 199.78265702431617, 199.78265702431617, 199.78265702431617, 199.78265702431617, 199.78265702431617, 199.78265702431617, 199.65225934091603, 199.65225934091603, 199.65225934091603, 199.65225934091603,
199.65225934091603, 199.78265702431617, 199.78265702431617, 199.78265702431617, 199.65225934091603, 199.65225934091603, 199.5221911576557, 199.5221911576557, 199.5221911576557, 199.39245079934784, 199.39245079934784, 199.26303660311532, 199.26303660311532, 199.26303660311532, 199.26303660311532, 199.26303660311532, 199.0051801062051, 199.0051801062051, 199.0051801062051, 199.13394691827176, 199.13394691827176, 199.0051801062051, 199.0051801062051, 199.0051801062051, 198.7486086056315, 198.7486086056315, 198.7486086056315, 198.7486086056315, 198.7486086056315, 198.6208006992352, 198.6208006992352, 198.6208006992352, 198.6208006992352, 198.6208006992352, 198.7486086056315, 198.7486086056315, 198.7486086056315, 198.6208006992352, 198.6208006992352, 198.4933092296128, 198.4933092296128, 198.4933092296128, 198.4933092296128, 198.4933092296128, 198.36613261681248, 198.36613261681248, 198.36613261681248, 198.23926929228196, 198.23926929228196, 198.36613261681248, 198.36613261681248, 198.36613261681248, 198.11271769876066, 198.11271769876066, 197.98647629017267, 197.98647629017267, 197.98647629017267, 197.860543531522, 197.860543531522, 197.73491789878778, 197.73491789878778, 197.73491789878778, 197.73491789878778, 197.73491789878778, 197.35986867835635, 197.35986867835635, 197.35986867835635, 197.609597878822, 197.609597878822, 197.609597878822, 197.35986867835635, 197.35986867835635, 197.35986867835635, 197.35986867835635, 197.35986867835635, 197.2354565250131, 197.2354565250131, 197.2354565250131, 197.2354565250131, 197.2354565250131, 196.9875297586907, 196.9875297586907, 196.74079002890164, 196.74079002890164, 196.74079002890164, 196.86401223541463, 196.86401223541463, 196.86401223541463, 196.86401223541463, 196.86401223541463, 196.74079002890164, 196.74079002890164, 196.61786170941832, 196.61786170941832, 196.61786170941832, 196.37288106250492, 196.37288106250492, 196.2508259252321, 196.2508259252321, 196.2508259252321, 196.37288106250492, 196.37288106250492, 196.12905905512292, 196.12905905512292, 196.12905905512292,
196.00757907152695, 196.00757907152695, 196.00757907152695, 196.00757907152695, 196.00757907152695, 196.12905905512292, 196.12905905512292, 196.12905905512292, 195.76547428893474, 195.76547428893474, 195.88638460334346, 195.88638460334346, 195.88638460334346, 195.76547428893474, 195.76547428893474, 195.76547428893474, 195.76547428893474, 195.76547428893474, 195.52450072169233, 195.52450072169233, 195.52450072169233, 195.52450072169233, 195.52450072169233, 195.76547428893474, 195.76547428893474, 195.28464766272617, 195.28464766272617, 195.28464766272617, 195.28464766272617, 195.28464766272617, 195.1651380178859, 195.1651380178859, 195.1651380178859, 195.28464766272617, 195.28464766272617, 195.1651380178859, 195.1651380178859, 195.1651380178859, 195.28464766272617, 195.28464766272617, 195.04590455098526, 195.04590455098526, 195.04590455098526, 195.04590455098526, 195.04590455098526, 195.04590455098526, 195.04590455098526, 195.04590455098526, 194.92694596428123, 194.92694596428123, 195.04590455098526, 195.04590455098526, 195.04590455098526, 195.1651380178859, 195.1651380178859, 194.92694596428123, 194.92694596428123, 194.92694596428123, 194.92694596428123, 194.92694596428123, 194.92694596428123, 194.80826096883465, 194.80826096883465, 194.80826096883465, 194.80826096883465, 194.80826096883465, 194.80826096883465, 194.80826096883465, 194.80826096883465, 194.80826096883465, 194.80826096883465, 194.80826096883465, 194.80826096883465, 194.68984828443138, 194.68984828443138, 194.68984828443138, 194.68984828443138, 194.68984828443138, 194.80826096883465, 194.80826096883465, 194.80826096883465, 194.80826096883465, 194.80826096883465, 194.80826096883465, 194.80826096883465, 194.80826096883465, 194.68984828443138, 194.68984828443138, 194.68984828443138, 194.80826096883465, 194.80826096883465, 194.80826096883465, 194.80826096883465, 194.80826096883465, 194.68984828443138, 194.68984828443138, 194.80826096883465, 194.80826096883465, 194.80826096883465, 194.5717066395054, 194.5717066395054, 194.68984828443138, 194.68984828443138,
194.68984828443138, 194.80826096883465, 194.80826096883465, 194.92694596428123, 194.92694596428123, 194.92694596428123, 194.68984828443138, 194.68984828443138, 194.80826096883465, 194.80826096883465, 194.80826096883465, 194.68984828443138, 194.68984828443138, 194.68984828443138, 194.92694596428123, 194.92694596428123, 194.80826096883465, 194.80826096883465, 194.80826096883465, 194.80826096883465, 194.80826096883465, 194.92694596428123, 194.92694596428123, 194.92694596428123, 195.04590455098526, 195.04590455098526, 194.80826096883465, 194.80826096883465, 194.80826096883465, 194.80826096883465, 194.80826096883465, 194.92694596428123, 194.92694596428123, 194.92694596428123, 194.92694596428123, 194.92694596428123, 194.92694596428123, 194.92694596428123, 194.92694596428123, 195.04590455098526, 195.04590455098526, 195.1651380178859, 195.1651380178859, 195.1651380178859, 195.04590455098526, 195.04590455098526, 195.04590455098526, 195.04590455098526, 195.04590455098526, 195.1651380178859, 195.1651380178859, 195.1651380178859, 195.1651380178859, 195.1651380178859, 195.1651380178859, 195.1651380178859, 195.1651380178859, 195.28464766272617, 195.28464766272617, 195.1651380178859, 195.1651380178859, 195.1651380178859, 195.28464766272617, 195.28464766272617, 195.28464766272617, 195.28464766272617, 195.28464766272617, 195.40443479213207, 195.40443479213207, 195.40443479213207, 195.40443479213207, 195.40443479213207, 195.40443479213207, 195.40443479213207, 195.40443479213207, 195.40443479213207, 195.40443479213207, 195.64484677604037, 195.64484677604037, 195.64484677604037, 195.52450072169233, 195.52450072169233, 195.64484677604037, 195.64484677604037, 195.64484677604037, 195.64484677604037, 195.64484677604037, 195.64484677604037, 195.64484677604037, 195.64484677604037, 195.64484677604037, 195.64484677604037, 195.88638460334346, 195.88638460334346, 195.88638460334346, 195.76547428893474, 195.76547428893474, 196.00757907152695, 196.00757907152695, 196.00757907152695, 195.76547428893474, 195.76547428893474, 195.76547428893474,
196.12905905512292, 196.12905905512292, 196.00757907152695, 196.00757907152695, 196.00757907152695, 196.2508259252321, 196.2508259252321, 196.12905905512292, 196.12905905512292, 196.12905905512292, 196.2508259252321, 196.2508259252321, 196.49522585722917, 196.49522585722917, 196.49522585722917, 196.2508259252321, 196.2508259252321, 196.37288106250492, 196.37288106250492, 196.37288106250492, 196.49522585722917, 196.49522585722917, 196.49522585722917, 196.49522585722917, 196.49522585722917, 196.61786170941832, 196.61786170941832, 196.61786170941832, 196.49522585722917, 196.49522585722917, 196.61786170941832, 196.61786170941832, 196.61786170941832, 196.86401223541463, 196.86401223541463, 196.86401223541463, 196.86401223541463, 196.86401223541463, 196.74079002890164, 196.74079002890164, 196.86401223541463, 196.86401223541463, 196.86401223541463, 196.9875297586907, 196.9875297586907, 197.11134403855397, 197.11134403855397, 197.11134403855397, 197.11134403855397, 197.11134403855397, 197.11134403855397, 197.11134403855397, 197.11134403855397, 197.35986867835635, 197.35986867835635, 197.35986867835635, 197.2354565250131, 197.2354565250131, 197.35986867835635, 197.35986867835635, 197.35986867835635, 197.609597878822, 197.609597878822, 197.48458196924713, 197.48458196924713, 197.48458196924713, 197.609597878822, 197.609597878822, 197.48458196924713, 197.48458196924713, 197.48458196924713, 197.73491789878778, 197.73491789878778, 197.73491789878778, 197.73491789878778, 197.73491789878778, 197.609597878822, 197.609597878822, 197.73491789878778, 197.73491789878778, 197.73491789878778, 197.98647629017267, 197.98647629017267, 197.98647629017267, 198.11271769876066, 198.11271769876066, 198.23926929228196, 198.23926929228196, 198.23926929228196, 198.23926929228196, 198.23926929228196, 198.23926929228196, 198.23926929228196, 198.23926929228196, 198.4933092296128, 198.4933092296128, 198.4933092296128, 198.4933092296128, 198.4933092296128, 198.4933092296128, 198.4933092296128, 198.6208006992352, 198.6208006992352, 198.6208006992352,
198.4933092296128, 198.4933092296128, 198.6208006992352, 198.6208006992352, 198.6208006992352, 198.6208006992352, 198.6208006992352, 198.87673454026168, 198.87673454026168, 198.87673454026168, 199.0051801062051, 199.0051801062051, 199.0051801062051, 199.0051801062051, 199.0051801062051, 199.0051801062051, 199.0051801062051, 199.0051801062051, 199.26303660311532, 199.26303660311532, 199.26303660311532, 199.26303660311532, 199.26303660311532, 199.13394691827176, 199.13394691827176, 199.26303660311532, 199.26303660311532, 199.26303660311532, 199.39245079934784, 199.39245079934784, 199.5221911576557, 199.5221911576557, 199.5221911576557, 199.39245079934784, 199.39245079934784, 199.65225934091603, 199.65225934091603, 199.65225934091603, 199.65225934091603, 199.65225934091603, 199.65225934091603, 199.65225934091603, 199.65225934091603, 199.91338589547337, 199.91338589547337, 200.04444765455628, 200.04444765455628, 200.04444765455628, 199.91338589547337, 199.91338589547337, 200.04444765455628, 200.04444765455628, 200.04444765455628, 200.17584401440791, 200.17584401440791, 200.04444765455628, 200.04444765455628, 200.04444765455628, 200.04444765455628, 200.04444765455628, 200.30757670067032, 200.30757670067032, 200.30757670067032, 200.57205801974663, 200.57205801974663, 200.57205801974663, 200.57205801974663, 200.57205801974663, 200.43964745191022, 200.43964745191022, 200.43964745191022, 200.57205801974663, 200.57205801974663, 200.57205801974663, 200.57205801974663, 200.57205801974663, 200.83790567772365, 200.83790567772365, 200.7048101689802, 200.7048101689802, 200.7048101689802, 200.83790567772365, 200.83790567772365, 200.83790567772365, 200.83790567772365, 200.83790567772365, 200.83790567772365, 200.83790567772365, 200.97134633753387, 200.97134633753387, 200.97134633753387, 200.83790567772365, 200.83790567772365, 200.97134633753387, 200.97134633753387, 200.97134633753387, 201.10513395354656, 201.10513395354656, 201.10513395354656, 201.10513395354656, 201.10513395354656, 200.97134633753387, 200.97134633753387, 200.97134633753387,
201.2392703446106, 201.2392703446106, 201.10513395354656, 201.10513395354656, 201.10513395354656, 201.2392703446106, 201.2392703446106, 200.97134633753387, 200.97134633753387, 200.97134633753387, 201.10513395354656, 201.10513395354656, 200.97134633753387, 200.97134633753387, 200.97134633753387, 200.97134633753387, 200.97134633753387, 201.2392703446106, 201.2392703446106, 201.2392703446106, 201.10513395354656, 201.10513395354656, 200.83790567772365, 200.83790567772365, 200.83790567772365, 201.2392703446106, 201.2392703446106, 200.97134633753387, 200.97134633753387, 200.97134633753387, 200.83790567772365, 200.83790567772365, 200.97134633753387, 200.97134633753387, 200.97134633753387, 201.10513395354656, 201.10513395354656, 201.10513395354656, 201.10513395354656, 201.10513395354656, 200.97134633753387, 200.97134633753387, 200.97134633753387, 200.83790567772365, 200.83790567772365, 201.10513395354656, 201.10513395354656, 201.10513395354656, 200.83790567772365, 200.83790567772365, 200.97134633753387, 200.97134633753387, 200.97134633753387, 200.7048101689802, 200.7048101689802, 200.57205801974663, 200.57205801974663, 200.57205801974663, 200.7048101689802, 200.7048101689802, 200.57205801974663, 200.57205801974663, 200.57205801974663, 200.7048101689802, 200.7048101689802, 200.57205801974663, 200.57205801974663, 200.57205801974663, 200.57205801974663, 200.57205801974663, 200.57205801974663, 200.57205801974663, 200.57205801974663, 200.43964745191022, 200.43964745191022, 200.43964745191022, 200.43964745191022, 200.43964745191022, 200.43964745191022, 200.43964745191022, 200.43964745191022, 200.57205801974663, 200.57205801974663, 200.17584401440791, 200.17584401440791, 200.17584401440791, 200.30757670067032, 200.30757670067032, 200.30757670067032, 200.30757670067032, 200.30757670067032, 200.17584401440791, 200.17584401440791, 199.91338589547337, 199.91338589547337, 199.91338589547337, 200.04444765455628, 200.04444765455628, 200.04444765455628, 200.04444765455628, 200.04444765455628, 200.17584401440791, 200.17584401440791,
200.04444765455628, 200.04444765455628, 200.04444765455628, 199.91338589547337, 199.91338589547337, 199.78265702431617, 199.78265702431617, 199.78265702431617, 199.65225934091603, 199.65225934091603, 199.65225934091603, 199.65225934091603, 199.65225934091603, 199.39245079934784, 199.39245079934784, 199.39245079934784, 199.65225934091603, 199.65225934091603, 199.26303660311532, 199.26303660311532, 199.26303660311532, 199.39245079934784, 199.39245079934784, 199.13394691827176, 199.13394691827176, 199.13394691827176, 199.26303660311532, 199.26303660311532, 199.13394691827176, 199.13394691827176, 199.13394691827176, 198.87673454026168, 198.87673454026168, 199.0051801062051, 199.0051801062051, 199.0051801062051, 198.87673454026168, 198.87673454026168, 198.7486086056315, 198.7486086056315, 198.7486086056315, 198.87673454026168, 198.87673454026168, 198.7486086056315, 198.7486086056315, 198.7486086056315, 198.4933092296128, 198.4933092296128, 198.6208006992352, 198.6208006992352, 198.6208006992352, 198.6208006992352, 198.6208006992352, 198.36613261681248, 198.36613261681248, 198.36613261681248, 198.36613261681248, 198.36613261681248, 198.23926929228196, 198.23926929228196, 198.23926929228196, 197.98647629017267, 197.98647629017267, 198.11271769876066, 198.11271769876066, 198.11271769876066, 198.11271769876066, 198.11271769876066, 197.73491789878778, 197.73491789878778, 197.73491789878778, 197.73491789878778, 197.73491789878778, 197.73491789878778, 197.73491789878778, 197.73491789878778, 197.609597878822, 197.609597878822, 197.609597878822, 197.48458196924713, 197.48458196924713, 197.48458196924713, 197.48458196924713, 197.48458196924713, 197.2354565250131, 197.2354565250131, 197.2354565250131, 197.2354565250131, 197.2354565250131, 196.86401223541463, 196.86401223541463, 196.9875297586907, 196.9875297586907, 196.9875297586907, 196.9875297586907, 196.9875297586907, 196.74079002890164, 196.74079002890164, 196.74079002890164, 196.74079002890164, 196.74079002890164, 196.49522585722917, 196.49522585722917, 196.49522585722917,
196.61786170941832, 196.61786170941832, 196.61786170941832, 196.61786170941832, 196.61786170941832, 196.37288106250492, 196.37288106250492, 196.12905905512292, 196.12905905512292, 196.12905905512292, 196.12905905512292, 196.12905905512292, 196.12905905512292, 196.12905905512292, 196.12905905512292, 196.2508259252321, 196.2508259252321, 196.2508259252321, 195.88638460334346, 195.88638460334346, 195.88638460334346, 195.88638460334346, 195.88638460334346, 195.64484677604037, 195.64484677604037, 195.88638460334346, 195.88638460334346, 195.88638460334346, 195.52450072169233, 195.52450072169233, 195.52450072169233, 195.52450072169233, 195.52450072169233, 195.76547428893474, 195.76547428893474, 195.40443479213207, 195.40443479213207, 195.40443479213207, 195.40443479213207, 195.40443479213207, 195.28464766272617, 195.28464766272617, 195.28464766272617, 195.1651380178859, 195.1651380178859, 195.40443479213207, 195.40443479213207, 195.40443479213207, 195.1651380178859, 195.1651380178859, 195.1651380178859, 195.1651380178859, 195.1651380178859, 195.04590455098526, 195.04590455098526, 195.04590455098526, 195.04590455098526, 195.04590455098526, 195.04590455098526, 195.04590455098526, 194.92694596428123, 194.92694596428123, 194.92694596428123, 194.92694596428123, 194.92694596428123, 194.92694596428123, 195.04590455098526, 195.04590455098526, 194.92694596428123, 194.92694596428123, 194.92694596428123, 194.80826096883465, 194.80826096883465, 194.80826096883465, 194.80826096883465, 194.80826096883465, 194.68984828443138, 194.68984828443138, 194.68984828443138, 194.68984828443138, 194.68984828443138, 194.68984828443138, 194.68984828443138, 194.80826096883465, 194.80826096883465, 194.80826096883465, 194.5717066395054, 194.5717066395054, 194.80826096883465, 194.80826096883465, 194.80826096883465, 194.68984828443138, 194.68984828443138, 194.5717066395054, 194.5717066395054, 194.5717066395054, 194.5717066395054, 194.5717066395054, 194.68984828443138, 194.68984828443138, 194.68984828443138, 194.5717066395054, 194.5717066395054,
194.45383477106168, 194.45383477106168, 194.45383477106168, 194.68984828443138, 194.68984828443138, 194.5717066395054, 194.5717066395054, 194.5717066395054, 194.5717066395054, 194.5717066395054, 194.5717066395054, 194.80826096883465, 194.80826096883465, 194.68984828443138, 194.68984828443138, 194.68984828443138, 194.5717066395054, 194.5717066395054, 194.5717066395054, 194.5717066395054, 194.5717066395054, 194.5717066395054, 194.5717066395054, 194.68984828443138, 194.68984828443138, 194.68984828443138, 194.5717066395054, 194.5717066395054, 194.80826096883465, 194.80826096883465, 194.80826096883465, 194.5717066395054, 194.5717066395054, 194.68984828443138, 194.68984828443138, 194.68984828443138, 194.80826096883465, 194.80826096883465, 194.80826096883465, 194.80826096883465, 194.80826096883465, 194.80826096883465, 194.80826096883465, 194.92694596428123, 194.92694596428123, 194.92694596428123, 194.80826096883465, 194.80826096883465, 194.80826096883465, 194.80826096883465, 194.80826096883465, 194.92694596428123, 194.92694596428123, 194.92694596428123, 194.92694596428123, 194.92694596428123, 195.04590455098526, 195.04590455098526, 195.04590455098526, 195.04590455098526, 195.04590455098526, 195.04590455098526, 195.04590455098526, 195.04590455098526, 195.1651380178859, 195.1651380178859, 195.04590455098526, 195.04590455098526, 195.04590455098526, 195.1651380178859, 195.1651380178859, 195.04590455098526, 195.04590455098526, 195.04590455098526, 195.1651380178859, 195.1651380178859, 195.52450072169233, 195.52450072169233, 195.52450072169233, 195.28464766272617, 195.28464766272617, 195.1651380178859, 195.1651380178859, 195.1651380178859, 195.40443479213207, 195.40443479213207, 195.40443479213207, 195.40443479213207, 195.40443479213207, 195.40443479213207, 195.40443479213207, 195.52450072169233, 195.52450072169233, 195.52450072169233, 195.64484677604037, 195.64484677604037, 195.76547428893474, 195.76547428893474, 195.76547428893474, 195.76547428893474, 195.76547428893474, 195.64484677604037, 195.64484677604037, 195.64484677604037,
195.64484677604037, 195.64484677604037, 195.76547428893474, 195.76547428893474, 195.76547428893474, 196.00757907152695, 196.00757907152695, 196.00757907152695, 195.88638460334346, 195.88638460334346, 196.00757907152695, 196.00757907152695, 196.00757907152695, 195.88638460334346, 195.88638460334346, 196.00757907152695, 196.00757907152695, 196.00757907152695, 196.12905905512292, 196.12905905512292, 196.12905905512292, 196.12905905512292, 196.12905905512292, 196.12905905512292, 196.12905905512292, 196.37288106250492, 196.37288106250492, 196.37288106250492, 196.2508259252321, 196.2508259252321, 196.49522585722917, 196.49522585722917, 196.49522585722917, 196.49522585722917, 196.49522585722917, 196.61786170941832, 196.61786170941832, 196.61786170941832, 196.61786170941832, 196.61786170941832, 196.49522585722917, 196.49522585722917, 196.49522585722917, 196.49522585722917, 196.49522585722917, 196.74079002890164, 196.74079002890164, 196.74079002890164, 196.86401223541463, 196.86401223541463, 196.86401223541463, 196.74079002890164, 196.74079002890164, 196.9875297586907, 196.9875297586907, 196.9875297586907, 196.9875297586907, 196.9875297586907, 196.9875297586907, 196.9875297586907, 196.9875297586907, 196.9875297586907, 196.9875297586907, 196.9875297586907, 196.9875297586907, 196.9875297586907, 197.11134403855397, 197.11134403855397, 197.2354565250131, 197.2354565250131, 197.2354565250131, 197.2354565250131, 197.2354565250131, 197.35986867835635, 197.35986867835635, 197.35986867835635, 197.48458196924713, 197.48458196924713, 197.48458196924713, 197.48458196924713, 197.48458196924713, 197.48458196924713, 197.48458196924713, 197.609597878822, 197.609597878822, 197.609597878822, 197.609597878822, 197.609597878822, 197.98647629017267, 197.98647629017267, 197.98647629017267, 197.73491789878778, 197.73491789878778, 197.860543531522, 197.860543531522, 197.860543531522, 198.11271769876066, 198.11271769876066, 197.98647629017267, 197.98647629017267, 197.98647629017267, 198.11271769876066, 198.11271769876066, 198.11271769876066,
198.11271769876066, 198.11271769876066, 198.36613261681248, 198.36613261681248, 198.36613261681248, 198.6208006992352, 198.6208006992352, 198.36613261681248, 198.36613261681248, 198.36613261681248, 198.4933092296128, 198.4933092296128, 198.6208006992352, 198.6208006992352, 198.6208006992352, 198.6208006992352, 198.6208006992352, 198.7486086056315, 198.7486086056315, 198.7486086056315, 198.7486086056315, 198.7486086056315, 198.87673454026168, 198.87673454026168, 198.87673454026168, 199.0051801062051, 199.0051801062051, 199.13394691827176, 199.13394691827176, 199.13394691827176, 199.13394691827176, 199.13394691827176, 199.0051801062051, 199.0051801062051, 199.0051801062051, 199.13394691827176, 199.13394691827176, 199.26303660311532, 199.26303660311532, 199.26303660311532, 199.26303660311532, 199.26303660311532, 199.26303660311532, 199.26303660311532, 199.26303660311532, 199.39245079934784, 199.39245079934784, 199.39245079934784, 199.65225934091603, 199.65225934091603, 199.78265702431617, 199.78265702431617, 199.78265702431617, 199.65225934091603, 199.65225934091603, 199.78265702431617, 199.78265702431617, 199.78265702431617, 199.78265702431617, 199.78265702431617, 199.78265702431617, 199.78265702431617, 199.78265702431617, 200.04444765455628, 200.04444765455628, 200.04444765455628, 200.04444765455628, 200.04444765455628, 200.17584401440791, 200.17584401440791, 200.17584401440791, 200.17584401440791, 200.17584401440791, 200.17584401440791, 200.17584401440791, 200.30757670067032, 200.30757670067032, 200.30757670067032, 200.30757670067032, 200.30757670067032, 200.30757670067032, 200.57205801974663, 200.57205801974663, 200.43964745191022, 200.43964745191022, 200.43964745191022, 200.7048101689802, 200.7048101689802, 200.57205801974663, 200.57205801974663, 200.57205801974663, 200.7048101689802, 200.7048101689802, 200.57205801974663, 200.57205801974663, 200.57205801974663, 200.7048101689802, 200.7048101689802, 200.7048101689802, 200.7048101689802, 200.7048101689802, 200.7048101689802, 200.7048101689802, 200.7048101689802,
200.7048101689802, 200.7048101689802, 200.97134633753387, 200.97134633753387, 200.97134633753387, 200.97134633753387, 200.97134633753387, 200.83790567772365, 200.83790567772365, 200.83790567772365, 200.83790567772365, 200.83790567772365, 200.83790567772365, 200.83790567772365, 200.83790567772365, 200.97134633753387, 200.97134633753387, 200.97134633753387, 200.97134633753387, 200.97134633753387, 201.10513395354656, 201.10513395354656, 200.97134633753387, 200.97134633753387, 200.97134633753387, 200.97134633753387, 200.97134633753387, 200.97134633753387, 200.97134633753387, 200.97134633753387, 200.97134633753387, 200.97134633753387, 201.10513395354656, 201.10513395354656, 201.10513395354656, 201.10513395354656, 201.10513395354656, 200.83790567772365, 200.83790567772365, 200.83790567772365, 200.7048101689802, 200.7048101689802, 200.97134633753387, 200.97134633753387, 200.97134633753387, 200.97134633753387, 200.97134633753387, 200.7048101689802, 200.7048101689802, 200.7048101689802, 200.97134633753387, 200.97134633753387, 200.97134633753387, 200.83790567772365, 200.83790567772365, 200.83790567772365, 200.83790567772365, 200.83790567772365, 200.97134633753387, 200.97134633753387, 200.7048101689802, 200.7048101689802, 200.7048101689802, 200.7048101689802, 200.7048101689802, 200.57205801974663, 200.57205801974663, 200.57205801974663, 200.43964745191022, 200.43964745191022, 200.7048101689802, 200.7048101689802, 200.7048101689802, 200.7048101689802, 200.7048101689802, 200.7048101689802, 200.7048101689802, 200.7048101689802, 200.57205801974663, 200.57205801974663, 200.43964745191022, 200.43964745191022, 200.43964745191022, 200.57205801974663, 200.57205801974663, 200.7048101689802, 200.7048101689802, 200.7048101689802, 200.30757670067032, 200.30757670067032, 200.17584401440791, 200.17584401440791, 200.17584401440791, 200.17584401440791, 200.17584401440791, 200.30757670067032, 200.30757670067032, 200.30757670067032, 200.17584401440791, 200.17584401440791, 200.30757670067032, 200.30757670067032, 200.30757670067032,
200.30757670067032, 200.30757670067032, 200.30757670067032, 200.17584401440791, 200.17584401440791, 200.17584401440791, 200.17584401440791, 200.17584401440791, 199.91338589547337, 199.91338589547337, 199.91338589547337, 199.91338589547337, 199.91338589547337, 199.78265702431617, 199.78265702431617, 199.91338589547337, 199.91338589547337, 199.91338589547337, 199.5221911576557, 199.5221911576557, 199.5221911576557, 199.5221911576557, 199.5221911576557, 199.65225934091603, 199.65225934091603, 199.5221911576557, 199.5221911576557, 199.5221911576557, 199.5221911576557, 199.5221911576557, 199.26303660311532, 199.26303660311532, 199.26303660311532, 199.26303660311532, 199.26303660311532, 199.26303660311532, 199.26303660311532, 199.26303660311532, 199.26303660311532, 199.26303660311532, 199.0051801062051, 199.0051801062051, 199.0051801062051, 199.0051801062051, 199.0051801062051, 199.0051801062051, 199.0051801062051, 199.0051801062051, 198.7486086056315, 198.7486086056315, 198.7486086056315, 198.7486086056315, 198.7486086056315, 198.7486086056315, 198.7486086056315, 198.7486086056315, 198.7486086056315, 198.7486086056315, 198.7486086056315, 198.7486086056315, 198.36613261681248, 198.36613261681248, 198.36613261681248, 198.6208006992352, 198.6208006992352, 198.36613261681248, 198.36613261681248, 198.36613261681248, 198.36613261681248, 198.36613261681248, 198.11271769876066, 198.11271769876066, 198.11271769876066, 198.11271769876066, 198.11271769876066, 197.860543531522, 197.860543531522, 197.860543531522, 197.609597878822, 197.609597878822, 197.860543531522, 197.860543531522, 197.860543531522, 197.860543531522, 197.860543531522, 197.860543531522, 197.860543531522, 197.860543531522, 197.48458196924713, 197.48458196924713, 197.48458196924713, 197.48458196924713, 197.48458196924713, 197.35986867835635, 197.35986867835635, 197.35986867835635, 197.2354565250131, 197.2354565250131, 196.86401223541463, 196.86401223541463, 196.86401223541463, 197.2354565250131, 197.2354565250131, 196.86401223541463, 196.86401223541463, 196.86401223541463,
196.86401223541463, 196.86401223541463, 196.74079002890164, 196.74079002890164, 196.74079002890164, 196.61786170941832, 196.61786170941832, 196.61786170941832, 196.61786170941832, 196.61786170941832, 196.49522585722917, 196.49522585722917, 196.37288106250492, 196.37288106250492, 196.37288106250492, 196.49522585722917, 196.49522585722917, 196.49522585722917, 196.37288106250492, 196.37288106250492, 196.2508259252321, 196.2508259252321, 196.2508259252321, 196.00757907152695, 196.00757907152695, 196.00757907152695, 196.00757907152695, 196.00757907152695, 195.88638460334346, 195.88638460334346, 195.88638460334346, 195.88638460334346, 195.88638460334346, 195.76547428893474, 195.76547428893474, 195.76547428893474, 195.76547428893474, 195.76547428893474, 195.64484677604037, 195.64484677604037, 195.64484677604037, 195.52450072169233, 195.52450072169233, 195.52450072169233, 195.52450072169233, 195.52450072169233, 195.52450072169233, 195.52450072169233, 195.64484677604037, 195.64484677604037, 195.64484677604037, 195.28464766272617, 195.28464766272617, 195.28464766272617, 195.28464766272617, 195.28464766272617, 195.28464766272617, 195.28464766272617, 195.04590455098526, 195.04590455098526, 195.04590455098526, 195.1651380178859, 195.1651380178859, 195.04590455098526, 195.04590455098526, 195.04590455098526, 195.04590455098526, 195.04590455098526, 195.04590455098526, 195.04590455098526, 195.04590455098526, 195.04590455098526, 195.04590455098526, 195.04590455098526, 195.04590455098526, 195.04590455098526, 194.92694596428123, 194.92694596428123, 194.92694596428123, 195.1651380178859, 195.1651380178859, 194.92694596428123, 194.92694596428123, 194.92694596428123, 194.80826096883465, 194.80826096883465, 195.04590455098526, 195.04590455098526, 195.04590455098526, 194.80826096883465, 194.80826096883465, 194.80826096883465, 194.80826096883465, 194.80826096883465, 194.92694596428123, 194.92694596428123, 194.92694596428123, 194.92694596428123, 194.92694596428123, 194.80826096883465, 194.80826096883465, 194.80826096883465,
194.80826096883465, 194.80826096883465, 194.68984828443138, 194.68984828443138, 194.68984828443138, 194.92694596428123, 194.92694596428123, 194.68984828443138, 194.68984828443138, 194.68984828443138, 194.68984828443138, 194.68984828443138, 194.80826096883465, 194.80826096883465, 194.80826096883465, 194.80826096883465, 194.80826096883465, 194.92694596428123, 194.92694596428123, 194.92694596428123, 194.68984828443138, 194.68984828443138, 194.68984828443138, 194.68984828443138, 194.68984828443138, 194.92694596428123, 194.92694596428123, 194.68984828443138, 194.68984828443138, 194.68984828443138, 194.68984828443138, 194.68984828443138, 194.68984828443138, 194.68984828443138, 194.68984828443138, 194.68984828443138, 194.68984828443138, 194.80826096883465, 194.80826096883465, 194.80826096883465, 194.92694596428123, 194.92694596428123, 194.92694596428123, 194.80826096883465, 194.80826096883465, 194.92694596428123, 194.92694596428123, 194.92694596428123, 194.92694596428123, 194.92694596428123, 195.04590455098526, 195.04590455098526, 195.04590455098526, 194.92694596428123, 194.92694596428123, 195.04590455098526, 195.04590455098526, 195.04590455098526, 194.92694596428123, 194.92694596428123, 195.04590455098526, 195.04590455098526, 195.04590455098526, 194.80826096883465, 194.80826096883465, 195.04590455098526, 195.04590455098526, 195.04590455098526, 195.1651380178859, 195.1651380178859, 195.1651380178859, 195.1651380178859, 195.1651380178859, 195.1651380178859, 195.1651380178859, 195.28464766272617, 195.28464766272617, 195.28464766272617, 195.40443479213207, 195.40443479213207, 195.40443479213207, 195.40443479213207, 195.40443479213207, 195.40443479213207, 195.40443479213207, 195.40443479213207, 195.40443479213207, 195.40443479213207, 195.40443479213207, 195.40443479213207, 195.40443479213207, 195.52450072169233, 195.52450072169233, 195.52450072169233, 195.52450072169233, 195.52450072169233, 195.40443479213207, 195.40443479213207, 195.52450072169233, 195.52450072169233, 195.52450072169233, 195.52450072169233, 195.52450072169233,
195.76547428893474, 195.76547428893474, 195.76547428893474, 195.64484677604037, 195.64484677604037, 195.88638460334346, 195.88638460334346, 195.88638460334346, 195.64484677604037, 195.64484677604037, 195.76547428893474, 195.76547428893474, 195.76547428893474, 195.88638460334346, 195.88638460334346, 195.88638460334346, 195.88638460334346, 195.88638460334346, 196.00757907152695, 196.00757907152695, 196.00757907152695, 196.12905905512292, 196.12905905512292, 195.88638460334346, 195.88638460334346, 195.88638460334346, 196.00757907152695, 196.00757907152695, 196.12905905512292, 196.12905905512292, 196.12905905512292, 196.2508259252321, 196.2508259252321, 196.2508259252321, 196.2508259252321, 196.2508259252321, 196.2508259252321, 196.2508259252321, 196.2508259252321, 196.2508259252321, 196.2508259252321, 196.37288106250492, 196.37288106250492, 196.61786170941832, 196.61786170941832, 196.61786170941832, 196.61786170941832, 196.61786170941832, 196.61786170941832, 196.61786170941832, 196.61786170941832, 196.49522585722917, 196.49522585722917, 196.49522585722917, 196.74079002890164, 196.74079002890164, 196.74079002890164, 196.74079002890164, 196.74079002890164, 196.9875297586907, 196.9875297586907, 196.86401223541463, 196.86401223541463, 196.86401223541463, 196.86401223541463, 196.86401223541463, 196.9875297586907, 196.9875297586907, 196.9875297586907, 197.2354565250131, 197.2354565250131, 196.9875297586907, 196.9875297586907, 196.9875297586907, 197.2354565250131, 197.2354565250131, 197.2354565250131, 197.35986867835635, 197.35986867835635, 197.48458196924713, 197.48458196924713, 197.48458196924713, 197.35986867835635, 197.35986867835635, 197.48458196924713, 197.48458196924713, 197.48458196924713, 197.860543531522, 197.860543531522, 197.860543531522, 197.860543531522, 197.860543531522, 197.609597878822, 197.609597878822, 197.73491789878778, 197.73491789878778, 197.73491789878778, 197.860543531522, 197.860543531522, 198.11271769876066, 198.11271769876066, 198.11271769876066, 197.860543531522, 197.860543531522, 198.11271769876066,
198.11271769876066, 198.11271769876066, 198.11271769876066, 198.11271769876066, 198.11271769876066, 198.23926929228196, 198.23926929228196, 198.11271769876066, 198.11271769876066, 198.11271769876066, 198.4933092296128, 198.4933092296128, 198.4933092296128, 198.4933092296128, 198.4933092296128, 198.4933092296128, 198.4933092296128, 198.4933092296128, 198.4933092296128, 198.4933092296128, 198.7486086056315, 198.7486086056315, 198.6208006992352, 198.6208006992352, 198.6208006992352, 198.87673454026168, 198.87673454026168, 198.7486086056315, 198.7486086056315, 198.7486086056315, 198.7486086056315, 198.7486086056315, 199.0051801062051, 199.0051801062051, 199.0051801062051, 198.87673454026168, 198.87673454026168, 198.87673454026168, 199.13394691827176, 199.13394691827176, 199.0051801062051, 199.0051801062051, 199.0051801062051, 199.13394691827176, 199.13394691827176, 199.13394691827176, 199.13394691827176, 199.13394691827176, 199.26303660311532, 199.26303660311532, 199.5221911576557, 199.5221911576557, 199.5221911576557, 199.5221911576557, 199.5221911576557, 199.5221911576557, 199.5221911576557, 199.5221911576557])
data = np.array([197.860543531522, 198.11271769876066, 198.11271769876066, 198.11271769876066, 198.11271769876066, 198.11271769876066, 198.11271769876066, 198.11271769876066, 198.11271769876066, 198.23926929228196, 198.23926929228196, 197.860543531522, 197.860543531522, 197.860543531522, 197.98647629017267, 197.98647629017267, 197.98647629017267, 197.98647629017267, 197.98647629017267, 198.11271769876066, 198.11271769876066, 197.98647629017267, 197.98647629017267, 197.98647629017267, 197.860543531522, 197.860543531522, 197.98647629017267, 197.98647629017267, 197.98647629017267, 197.860543531522, 197.860543531522, 197.98647629017267, 197.98647629017267, 197.98647629017267, 197.98647629017267, 197.98647629017267, 197.98647629017267, 197.98647629017267, 197.98647629017267, 197.860543531522, 197.860543531522, 197.98647629017267, 197.98647629017267, 197.98647629017267, 197.98647629017267, 197.98647629017267, 197.98647629017267, 197.98647629017267, 197.98647629017267, 197.860543531522, 197.860543531522, 197.860543531522, 197.73491789878778, 197.73491789878778, 197.73491789878778, 197.73491789878778, 197.73491789878778, 197.860543531522, 197.860543531522, 197.609597878822, 197.609597878822, 197.609597878822, 197.48458196924713, 197.48458196924713, 197.73491789878778, 197.73491789878778, 197.73491789878778, 197.2354565250131, 197.2354565250131, 197.48458196924713, 197.48458196924713, 197.48458196924713, 197.48458196924713, 197.48458196924713, 197.35986867835635, 197.35986867835635, 197.35986867835635, 197.35986867835635, 197.35986867835635, 197.2354565250131, 197.2354565250131, 197.2354565250131, 197.11134403855397, 197.11134403855397, 197.11134403855397, 197.11134403855397, 197.11134403855397, 196.86401223541463, 196.86401223541463, 197.11134403855397, 197.11134403855397, 197.11134403855397, 196.9875297586907, 196.9875297586907, 196.86401223541463, 196.86401223541463, 196.86401223541463, 196.86401223541463, 196.86401223541463, 196.86401223541463, 196.86401223541463, 196.86401223541463,
196.61786170941832, 196.61786170941832, 196.86401223541463, 196.86401223541463, 196.86401223541463, 196.74079002890164, 196.74079002890164, 196.49522585722917, 196.49522585722917, 196.49522585722917, 196.61786170941832, 196.61786170941832, 196.37288106250492, 196.37288106250492, 196.37288106250492, 196.49522585722917, 196.49522585722917, 196.49522585722917, 196.37288106250492, 196.37288106250492, 196.2508259252321, 196.2508259252321, 196.2508259252321, 196.12905905512292, 196.12905905512292, 196.2508259252321, 196.2508259252321, 196.2508259252321, 196.2508259252321, 196.2508259252321, 196.12905905512292, 196.12905905512292, 196.12905905512292, 196.00757907152695, 196.00757907152695, 196.00757907152695, 196.00757907152695, 196.00757907152695, 196.00757907152695, 196.00757907152695, 195.88638460334346, 195.88638460334346, 195.88638460334346, 195.76547428893474, 195.76547428893474, 195.88638460334346, 195.88638460334346, 195.88638460334346, 195.64484677604037, 195.64484677604037, 195.76547428893474, 195.76547428893474, 195.76547428893474, 195.76547428893474, 195.76547428893474, 195.64484677604037, 195.64484677604037, 195.64484677604037, 195.64484677604037, 195.64484677604037, 195.64484677604037, 195.64484677604037, 195.64484677604037, 195.52450072169233, 195.52450072169233, 195.76547428893474, 195.76547428893474, 195.76547428893474, 195.64484677604037, 195.64484677604037, 195.52450072169233, 195.52450072169233, 195.52450072169233, 195.52450072169233, 195.52450072169233, 195.64484677604037, 195.64484677604037, 195.64484677604037, 195.52450072169233, 195.52450072169233, 195.52450072169233, 195.52450072169233, 195.52450072169233, 195.28464766272617, 195.28464766272617, 195.28464766272617, 195.28464766272617, 195.28464766272617, 195.40443479213207, 195.40443479213207, 195.40443479213207, 195.40443479213207, 195.40443479213207, 195.52450072169233, 195.52450072169233, 195.52450072169233, 195.40443479213207, 195.40443479213207, 195.52450072169233, 195.52450072169233, 195.52450072169233, 195.52450072169233, 195.52450072169233,
195.40443479213207, 195.40443479213207, 195.40443479213207, 195.40443479213207, 195.40443479213207, 195.40443479213207, 195.40443479213207, 195.40443479213207, 195.64484677604037, 195.64484677604037, 195.64484677604037, 195.64484677604037, 195.64484677604037, 195.40443479213207, 195.40443479213207, 195.64484677604037, 195.64484677604037, 195.64484677604037, 195.40443479213207, 195.40443479213207, 195.40443479213207, 195.40443479213207, 195.40443479213207, 195.40443479213207, 195.40443479213207, 195.64484677604037, 195.64484677604037, 195.64484677604037, 195.40443479213207, 195.40443479213207, 195.40443479213207, 195.40443479213207, 195.40443479213207, 195.40443479213207, 195.40443479213207, 195.76547428893474, 195.76547428893474, 195.76547428893474, 195.76547428893474, 195.76547428893474, 195.76547428893474, 195.76547428893474, 195.76547428893474, 195.76547428893474, 195.76547428893474, 195.52450072169233, 195.52450072169233, 195.52450072169233, 195.64484677604037, 195.64484677604037, 195.76547428893474, 195.76547428893474, 195.76547428893474, 195.76547428893474, 195.76547428893474, 195.76547428893474, 195.76547428893474, 195.76547428893474, 195.76547428893474, 195.76547428893474, 195.76547428893474, 195.76547428893474, 195.76547428893474, 195.76547428893474, 195.76547428893474, 195.76547428893474, 195.76547428893474, 195.76547428893474, 195.88638460334346, 195.88638460334346, 195.88638460334346, 196.00757907152695, 196.00757907152695, 195.88638460334346, 195.88638460334346, 195.88638460334346, 196.12905905512292, 196.12905905512292, 196.2508259252321, 196.2508259252321, 196.2508259252321, 196.00757907152695, 196.00757907152695, 196.12905905512292, 196.12905905512292, 196.12905905512292, 196.12905905512292, 196.12905905512292, 196.12905905512292, 196.12905905512292, 196.12905905512292, 196.12905905512292, 196.12905905512292, 196.2508259252321, 196.2508259252321, 196.2508259252321, 196.37288106250492, 196.37288106250492, 196.37288106250492, 196.37288106250492, 196.37288106250492, 196.37288106250492,
196.37288106250492, 196.2508259252321, 196.2508259252321, 196.2508259252321, 196.61786170941832, 196.61786170941832, 196.49522585722917, 196.49522585722917, 196.49522585722917, 196.61786170941832, 196.61786170941832, 196.61786170941832, 196.61786170941832, 196.61786170941832, 196.61786170941832, 196.61786170941832, 196.61786170941832, 196.61786170941832, 196.61786170941832, 196.61786170941832, 196.61786170941832, 196.61786170941832, 196.61786170941832, 196.61786170941832, 196.86401223541463, 196.86401223541463, 196.9875297586907, 196.9875297586907, 196.9875297586907, 196.86401223541463, 196.86401223541463, 196.86401223541463, 197.11134403855397, 197.11134403855397, 196.9875297586907, 196.9875297586907, 196.9875297586907, 197.2354565250131, 197.2354565250131, 197.11134403855397, 197.11134403855397, 197.11134403855397, 197.2354565250131, 197.2354565250131, 196.9875297586907, 196.9875297586907, 196.9875297586907, 197.2354565250131, 197.2354565250131, 197.11134403855397, 197.11134403855397, 197.11134403855397, 197.2354565250131, 197.2354565250131, 197.2354565250131, 197.2354565250131, 197.2354565250131, 197.35986867835635, 197.35986867835635, 197.48458196924713, 197.48458196924713, 197.48458196924713, 197.48458196924713, 197.48458196924713, 197.48458196924713, 197.48458196924713, 197.48458196924713, 197.48458196924713, 197.48458196924713, 197.73491789878778, 197.73491789878778, 197.73491789878778, 197.609597878822, 197.609597878822, 197.609597878822, 197.609597878822, 197.609597878822, 197.73491789878778, 197.73491789878778, 198.23926929228196, 198.23926929228196, 198.23926929228196, 197.98647629017267, 197.98647629017267, 197.98647629017267, 197.98647629017267, 197.98647629017267, 198.11271769876066, 198.11271769876066, 198.23926929228196, 198.23926929228196, 198.23926929228196, 198.23926929228196, 198.23926929228196, 198.23926929228196, 198.36613261681248, 198.36613261681248, 198.36613261681248, 198.36613261681248, 198.36613261681248, 198.36613261681248, 198.36613261681248, 198.23926929228196, 198.23926929228196,
198.23926929228196, 198.4933092296128, 198.4933092296128, 198.6208006992352, 198.6208006992352, 198.6208006992352, 198.7486086056315, 198.7486086056315, 198.6208006992352, 198.6208006992352, 198.6208006992352, 198.87673454026168, 198.87673454026168, 198.6208006992352, 198.6208006992352, 198.6208006992352, 198.7486086056315, 198.7486086056315, 198.87673454026168, 198.87673454026168, 198.87673454026168, 198.87673454026168, 198.87673454026168, 199.0051801062051, 199.0051801062051, 199.0051801062051, 198.87673454026168, 198.87673454026168, 198.87673454026168, 198.87673454026168, 198.87673454026168, 199.13394691827176, 199.13394691827176, 199.13394691827176, 199.13394691827176, 199.13394691827176, 199.26303660311532, 199.26303660311532, 199.26303660311532, 199.26303660311532, 199.26303660311532, 199.39245079934784, 199.39245079934784, 199.5221911576557, 199.5221911576557, 199.5221911576557, 199.39245079934784, 199.39245079934784, 199.5221911576557, 199.5221911576557, 199.5221911576557, 199.5221911576557, 199.5221911576557, 199.39245079934784, 199.39245079934784, 199.39245079934784, 199.65225934091603, 199.65225934091603, 199.5221911576557, 199.5221911576557, 199.5221911576557, 199.65225934091603, 199.65225934091603, 199.91338589547337, 199.91338589547337, 199.91338589547337, 199.65225934091603, 199.65225934091603, 199.78265702431617, 199.78265702431617, 199.78265702431617, 199.5221911576557, 199.5221911576557, 199.91338589547337, 199.91338589547337, 199.91338589547337, 200.17584401440791, 200.17584401440791, 200.17584401440791, 199.91338589547337, 199.91338589547337, 199.78265702431617, 199.78265702431617, 199.78265702431617, 200.04444765455628, 200.04444765455628, 199.91338589547337, 199.91338589547337, 199.91338589547337, 199.78265702431617, 199.78265702431617, 199.91338589547337, 199.91338589547337, 199.91338589547337, 199.91338589547337, 199.91338589547337, 200.17584401440791, 200.17584401440791, 200.17584401440791, 199.78265702431617, 199.78265702431617, 200.04444765455628, 200.04444765455628, 200.04444765455628,
199.91338589547337, 199.91338589547337, 199.78265702431617, 199.78265702431617, 199.78265702431617, 199.78265702431617, 199.78265702431617, 199.91338589547337, 199.91338589547337, 199.91338589547337, 199.78265702431617, 199.78265702431617, 199.91338589547337, 199.91338589547337, 199.91338589547337, 199.78265702431617, 199.78265702431617, 199.78265702431617, 199.78265702431617, 199.78265702431617, 199.78265702431617, 199.78265702431617, 199.65225934091603, 199.65225934091603, 199.65225934091603, 199.78265702431617, 199.78265702431617, 199.65225934091603, 199.65225934091603, 199.65225934091603, 199.5221911576557, 199.5221911576557, 199.65225934091603, 199.65225934091603, 199.65225934091603, 199.5221911576557, 199.5221911576557, 199.65225934091603, 199.65225934091603, 199.65225934091603, 199.26303660311532, 199.26303660311532, 199.26303660311532, 199.39245079934784, 199.39245079934784, 199.5221911576557, 199.5221911576557, 199.5221911576557, 199.13394691827176, 199.13394691827176, 199.5221911576557, 199.5221911576557, 199.5221911576557, 199.26303660311532, 199.26303660311532, 199.26303660311532, 199.26303660311532, 199.26303660311532, 199.13394691827176, 199.13394691827176, 199.0051801062051, 199.0051801062051, 199.0051801062051, 198.87673454026168, 198.87673454026168, 199.0051801062051, 199.0051801062051, 199.0051801062051, 198.87673454026168, 198.87673454026168, 199.0051801062051, 199.0051801062051, 199.0051801062051, 198.87673454026168, 198.87673454026168, 198.7486086056315, 198.7486086056315, 198.7486086056315, 198.6208006992352, 198.6208006992352, 198.7486086056315, 198.7486086056315, 198.7486086056315, 198.6208006992352, 198.6208006992352, 198.4933092296128, 198.4933092296128, 198.4933092296128, 198.4933092296128, 198.4933092296128, 198.23926929228196, 198.23926929228196, 198.23926929228196, 198.23926929228196, 198.23926929228196, 198.11271769876066, 198.11271769876066, 198.11271769876066, 197.98647629017267, 197.98647629017267, 197.98647629017267, 197.98647629017267, 197.98647629017267, 197.860543531522, 197.860543531522,
197.73491789878778, 197.73491789878778, 197.73491789878778, 197.609597878822, 197.609597878822, 197.609597878822, 197.609597878822, 197.609597878822, 197.48458196924713, 197.48458196924713, 197.48458196924713, 197.35986867835635, 197.35986867835635, 197.35986867835635, 197.35986867835635, 197.35986867835635, 197.11134403855397, 197.11134403855397, 197.2354565250131, 197.2354565250131, 197.2354565250131, 197.11134403855397, 197.11134403855397, 196.9875297586907, 196.9875297586907, 196.9875297586907, 196.86401223541463, 196.86401223541463, 196.86401223541463, 196.86401223541463, 196.86401223541463, 196.86401223541463, 196.86401223541463, 196.74079002890164, 196.74079002890164, 196.74079002890164, 196.74079002890164, 196.74079002890164, 196.61786170941832, 196.61786170941832, 196.61786170941832, 196.37288106250492, 196.37288106250492, 196.2508259252321, 196.2508259252321, 196.2508259252321, 196.37288106250492, 196.37288106250492, 196.37288106250492, 196.37288106250492, 196.37288106250492, 196.12905905512292, 196.12905905512292, 196.12905905512292, 196.12905905512292, 196.12905905512292, 196.12905905512292, 196.12905905512292, 196.00757907152695, 196.00757907152695, 196.00757907152695, 195.88638460334346, 195.88638460334346, 195.76547428893474, 195.76547428893474, 195.76547428893474, 195.52450072169233, 195.52450072169233, 195.76547428893474, 195.76547428893474, 195.76547428893474, 195.40443479213207, 195.40443479213207, 195.28464766272617, 195.28464766272617, 195.28464766272617, 195.40443479213207, 195.40443479213207, 195.40443479213207, 195.28464766272617, 195.28464766272617, 195.40443479213207, 195.40443479213207, 195.40443479213207, 195.1651380178859, 195.1651380178859, 195.1651380178859, 195.1651380178859, 195.1651380178859, 195.04590455098526, 195.04590455098526, 194.92694596428123, 194.92694596428123, 194.92694596428123, 195.04590455098526, 195.04590455098526, 195.1651380178859, 195.1651380178859, 195.1651380178859, 194.80826096883465, 194.80826096883465, 194.80826096883465, 194.80826096883465,
194.80826096883465, 194.68984828443138, 194.68984828443138, 194.80826096883465, 194.80826096883465, 194.80826096883465, 194.80826096883465, 194.80826096883465, 194.5717066395054, 194.5717066395054, 194.5717066395054, 194.33623142460067, 194.33623142460067, 194.80826096883465, 194.80826096883465, 194.80826096883465, 194.5717066395054, 194.5717066395054, 194.45383477106168, 194.45383477106168, 194.45383477106168, 194.45383477106168, 194.45383477106168, 194.45383477106168, 194.45383477106168, 194.45383477106168, 194.33623142460067, 194.33623142460067, 194.45383477106168, 194.45383477106168, 194.45383477106168, 194.33623142460067, 194.33623142460067, 194.33623142460067, 194.33623142460067, 194.33623142460067, 194.33623142460067, 194.33623142460067, 194.45383477106168, 194.45383477106168, 194.45383477106168, 194.33623142460067, 194.33623142460067, 194.33623142460067, 194.33623142460067, 194.33623142460067, 194.33623142460067, 194.33623142460067, 194.33623142460067, 194.33623142460067, 194.33623142460067, 194.21889535404324, 194.21889535404324, 194.21889535404324, 194.33623142460067, 194.33623142460067, 194.21889535404324, 194.21889535404324, 194.21889535404324, 194.33623142460067, 194.33623142460067, 194.21889535404324, 194.21889535404324, 194.21889535404324, 194.21889535404324, 194.21889535404324, 194.45383477106168, 194.45383477106168, 194.45383477106168, 194.33623142460067, 194.33623142460067, 194.33623142460067, 194.33623142460067, 194.33623142460067, 194.33623142460067, 194.33623142460067, 194.33623142460067, 194.33623142460067, 194.33623142460067, 194.5717066395054, 194.5717066395054, 194.33623142460067, 194.33623142460067, 194.33623142460067, 194.5717066395054, 194.5717066395054, 194.5717066395054, 194.5717066395054, 194.5717066395054, 194.5717066395054, 194.5717066395054, 194.5717066395054, 194.5717066395054, 194.5717066395054, 194.5717066395054, 194.5717066395054, 194.68984828443138, 194.68984828443138, 194.68984828443138, 194.5717066395054, 194.5717066395054, 194.5717066395054, 194.5717066395054, 194.5717066395054,
194.45383477106168, 194.45383477106168, 194.5717066395054, 194.5717066395054, 194.5717066395054, 194.68984828443138, 194.68984828443138, 194.68984828443138, 194.68984828443138, 194.68984828443138, 194.68984828443138, 194.68984828443138, 194.68984828443138, 194.68984828443138, 194.68984828443138, 194.92694596428123, 194.92694596428123, 194.92694596428123, 194.80826096883465, 194.80826096883465, 194.92694596428123, 194.92694596428123, 194.92694596428123, 194.92694596428123, 194.92694596428123, 194.68984828443138, 194.68984828443138, 194.68984828443138, 194.92694596428123, 194.92694596428123, 195.04590455098526, 195.04590455098526, 195.04590455098526, 195.04590455098526, 195.04590455098526, 195.1651380178859, 195.1651380178859, 195.1651380178859, 195.04590455098526, 195.04590455098526, 195.04590455098526, 195.04590455098526, 195.04590455098526, 195.1651380178859, 195.1651380178859, 195.1651380178859, 195.1651380178859, 195.1651380178859, 195.28464766272617, 195.28464766272617, 195.40443479213207, 195.40443479213207, 195.40443479213207, 195.40443479213207, 195.40443479213207, 195.40443479213207, 195.40443479213207, 195.40443479213207, 195.28464766272617, 195.28464766272617, 195.64484677604037, 195.64484677604037, 195.64484677604037, 195.52450072169233, 195.52450072169233, 195.52450072169233, 195.52450072169233, 195.52450072169233, 195.52450072169233, 195.52450072169233, 195.76547428893474, 195.76547428893474, 195.76547428893474, 195.64484677604037, 195.64484677604037, 195.64484677604037, 195.64484677604037, 195.64484677604037, 195.76547428893474, 195.76547428893474, 195.88638460334346, 195.88638460334346, 195.88638460334346, 195.88638460334346, 195.88638460334346, 195.88638460334346, 195.88638460334346, 195.88638460334346, 196.00757907152695, 196.00757907152695, 196.12905905512292, 196.12905905512292, 196.12905905512292, 196.12905905512292, 196.12905905512292, 196.12905905512292, 196.12905905512292, 196.12905905512292, 196.2508259252321, 196.2508259252321, 196.2508259252321, 196.2508259252321, 196.2508259252321,
196.2508259252321, 196.2508259252321, 196.37288106250492, 196.37288106250492, 196.37288106250492, 196.37288106250492, 196.37288106250492, 196.49522585722917, 196.49522585722917, 196.49522585722917, 196.49522585722917, 196.49522585722917, 196.49522585722917, 196.49522585722917, 196.49522585722917, 196.49522585722917, 196.49522585722917, 196.49522585722917, 196.49522585722917, 196.49522585722917, 196.74079002890164, 196.74079002890164, 196.74079002890164, 196.74079002890164, 196.74079002890164, 196.9875297586907, 196.9875297586907, 196.9875297586907, 196.86401223541463, 196.86401223541463, 196.86401223541463, 196.86401223541463, 196.86401223541463, 196.9875297586907, 196.9875297586907, 196.9875297586907, 196.9875297586907, 196.9875297586907, 196.9875297586907, 196.9875297586907, 197.2354565250131, 197.2354565250131, 197.2354565250131, 197.11134403855397, 197.11134403855397, 197.11134403855397, 197.11134403855397, 197.11134403855397, 197.35986867835635, 197.35986867835635, 197.35986867835635, 197.35986867835635, 197.35986867835635, 197.48458196924713, 197.48458196924713, 197.48458196924713, 197.48458196924713, 197.48458196924713, 197.48458196924713, 197.48458196924713, 197.609597878822, 197.609597878822, 197.609597878822, 197.609597878822, 197.609597878822, 197.73491789878778, 197.73491789878778, 197.73491789878778, 197.98647629017267, 197.98647629017267, 197.73491789878778, 197.73491789878778, 197.73491789878778, 197.860543531522, 197.860543531522, 198.11271769876066, 198.11271769876066, 198.11271769876066, 198.11271769876066, 198.11271769876066, 197.98647629017267, 197.98647629017267, 197.98647629017267, 198.23926929228196, 198.23926929228196, 198.36613261681248, 198.36613261681248, 198.36613261681248, 198.36613261681248, 198.36613261681248, 198.4933092296128, 198.4933092296128, 198.4933092296128, 198.23926929228196, 198.23926929228196, 198.23926929228196, 198.6208006992352, 198.6208006992352, 198.4933092296128, 198.4933092296128, 198.4933092296128, 198.6208006992352, 198.6208006992352, 198.6208006992352, 198.6208006992352,
198.6208006992352, 198.6208006992352, 198.6208006992352, 198.7486086056315, 198.7486086056315, 198.7486086056315, 198.87673454026168, 198.87673454026168, 198.87673454026168, 198.87673454026168, 198.87673454026168, 198.87673454026168, 198.87673454026168, 198.87673454026168, 198.87673454026168, 198.87673454026168, 198.87673454026168, 198.87673454026168, 199.13394691827176, 199.13394691827176, 199.0051801062051, 199.0051801062051, 199.0051801062051, 199.26303660311532, 199.26303660311532, 199.26303660311532, 199.39245079934784, 199.39245079934784, 199.5221911576557, 199.5221911576557, 199.5221911576557, 199.13394691827176, 199.13394691827176, 199.5221911576557, 199.5221911576557, 199.5221911576557, 199.65225934091603, 199.65225934091603, 199.5221911576557, 199.5221911576557, 199.5221911576557, 199.5221911576557, 199.5221911576557, 199.78265702431617, 199.78265702431617, 199.78265702431617, 199.65225934091603, 199.65225934091603, 199.65225934091603, 199.65225934091603, 199.65225934091603, 199.78265702431617, 199.78265702431617, 200.04444765455628, 200.04444765455628, 200.04444765455628, 199.91338589547337, 199.91338589547337, 200.04444765455628, 200.04444765455628, 200.04444765455628, 200.04444765455628, 200.04444765455628, 200.04444765455628, 200.04444765455628, 200.04444765455628, 200.04444765455628, 200.04444765455628, 200.04444765455628, 200.04444765455628, 200.04444765455628, 200.04444765455628, 200.04444765455628, 200.04444765455628, 200.04444765455628, 200.04444765455628, 200.17584401440791, 200.17584401440791, 200.17584401440791, 200.17584401440791, 200.17584401440791, 200.04444765455628, 200.04444765455628, 200.30757670067032, 200.30757670067032, 200.30757670067032, 200.17584401440791, 200.17584401440791, 200.30757670067032, 200.30757670067032, 200.30757670067032, 200.17584401440791, 200.17584401440791, 200.17584401440791, 200.17584401440791, 200.17584401440791, 200.43964745191022, 200.43964745191022, 200.30757670067032, 200.30757670067032, 200.30757670067032, 200.17584401440791, 200.17584401440791,
200.17584401440791, 200.17584401440791, 200.17584401440791, 200.17584401440791, 200.17584401440791, 200.17584401440791, 200.30757670067032, 200.30757670067032, 200.04444765455628, 200.04444765455628, 200.04444765455628, 200.04444765455628, 200.04444765455628, 200.17584401440791, 200.17584401440791, 200.17584401440791, 200.30757670067032, 200.30757670067032, 200.04444765455628, 200.04444765455628, 200.04444765455628, 200.17584401440791, 200.17584401440791, 200.04444765455628, 200.04444765455628, 200.04444765455628, 200.04444765455628, 200.04444765455628, 199.78265702431617, 199.78265702431617, 199.78265702431617, 199.91338589547337, 199.91338589547337, 200.04444765455628, 200.04444765455628, 200.04444765455628, 199.78265702431617, 199.78265702431617, 199.78265702431617, 199.78265702431617, 199.78265702431617, 199.91338589547337, 199.91338589547337, 199.78265702431617, 199.78265702431617, 199.78265702431617, 199.5221911576557, 199.5221911576557, 199.65225934091603, 199.65225934091603, 199.65225934091603, 199.65225934091603, 199.65225934091603, 199.39245079934784, 199.39245079934784, 199.39245079934784, 199.26303660311532, 199.26303660311532, 199.5221911576557, 199.5221911576557, 199.5221911576557, 199.39245079934784, 199.39245079934784, 199.5221911576557, 199.5221911576557, 199.5221911576557, 199.13394691827176, 199.13394691827176, 199.26303660311532, 199.26303660311532, 199.26303660311532, 199.13394691827176, 199.13394691827176, 199.0051801062051, 199.0051801062051, 199.0051801062051, 199.0051801062051, 199.0051801062051, 199.0051801062051, 198.87673454026168, 198.87673454026168, 198.7486086056315, 198.7486086056315, 198.7486086056315, 198.87673454026168, 198.87673454026168, 198.6208006992352, 198.6208006992352, 198.6208006992352, 198.6208006992352, 198.6208006992352, 198.4933092296128, 198.4933092296128, 198.4933092296128, 198.36613261681248, 198.36613261681248, 198.6208006992352, 198.6208006992352, 198.6208006992352, 198.4933092296128, 198.4933092296128, 198.36613261681248, 198.36613261681248, 198.36613261681248,
198.23926929228196, 198.23926929228196, 198.23926929228196, 198.23926929228196, 198.23926929228196, 198.11271769876066, 198.11271769876066, 197.98647629017267, 197.98647629017267, 197.98647629017267, 197.860543531522, 197.860543531522, 197.860543531522, 197.860543531522, 197.860543531522, 197.73491789878778, 197.73491789878778, 197.609597878822, 197.609597878822, 197.609597878822, 197.609597878822, 197.609597878822, 197.48458196924713, 197.48458196924713, 197.48458196924713, 197.2354565250131, 197.2354565250131, 197.35986867835635, 197.35986867835635, 197.35986867835635, 197.11134403855397, 197.11134403855397, 197.11134403855397, 197.11134403855397, 197.11134403855397, 197.11134403855397, 197.11134403855397, 197.2354565250131, 197.2354565250131, 197.2354565250131, 196.86401223541463, 196.86401223541463, 196.86401223541463, 196.86401223541463, 196.86401223541463, 196.61786170941832, 196.61786170941832, 196.61786170941832, 196.37288106250492, 196.37288106250492, 196.49522585722917, 196.49522585722917, 196.49522585722917, 196.12905905512292, 196.12905905512292, 196.2508259252321, 196.2508259252321, 196.2508259252321, 196.12905905512292, 196.12905905512292, 196.00757907152695, 196.00757907152695, 196.00757907152695, 196.00757907152695, 196.00757907152695, 196.12905905512292, 196.12905905512292, 196.12905905512292, 195.76547428893474, 195.76547428893474, 195.76547428893474, 195.76547428893474, 195.76547428893474, 195.76547428893474, 195.76547428893474, 195.52450072169233, 195.52450072169233, 195.52450072169233, 195.52450072169233, 195.52450072169233, 195.40443479213207, 195.40443479213207, 195.40443479213207, 195.28464766272617, 195.28464766272617, 195.1651380178859, 195.1651380178859, 195.1651380178859, 195.28464766272617, 195.28464766272617, 195.04590455098526, 195.04590455098526, 195.04590455098526, 195.04590455098526, 195.04590455098526, 195.04590455098526, 195.04590455098526, 195.04590455098526, 194.80826096883465, 194.80826096883465, 194.92694596428123, 194.92694596428123, 194.92694596428123, 194.80826096883465,
194.80826096883465, 194.80826096883465, 194.80826096883465, 194.80826096883465, 194.80826096883465, 194.80826096883465, 194.68984828443138, 194.68984828443138, 194.68984828443138, 194.45383477106168, 194.45383477106168, 194.45383477106168, 194.45383477106168, 194.45383477106168, 194.45383477106168, 194.45383477106168, 194.33623142460067, 194.33623142460067, 194.33623142460067, 194.45383477106168, 194.45383477106168, 194.21889535404324, 194.21889535404324, 194.21889535404324, 194.33623142460067, 194.33623142460067, 194.33623142460067, 194.45383477106168, 194.45383477106168, 193.9850200979796, 193.9850200979796, 193.9850200979796, 194.10182532165618, 194.10182532165618, 194.21889535404324, 194.21889535404324, 194.21889535404324, 194.21889535404324, 194.21889535404324, 193.9850200979796, 193.9850200979796, 193.9850200979796, 193.86847846175363, 193.86847846175363, 193.9850200979796, 193.9850200979796, 193.9850200979796, 193.9850200979796, 193.9850200979796, 194.10182532165618, 194.10182532165618, 194.10182532165618, 193.9850200979796, 193.9850200979796, 193.9850200979796, 193.9850200979796, 193.9850200979796, 193.86847846175363, 193.86847846175363, 193.86847846175363, 193.86847846175363, 193.86847846175363, 193.86847846175363, 193.86847846175363, 193.86847846175363, 193.86847846175363, 193.86847846175363, 193.86847846175363, 193.86847846175363, 193.75219919984693, 193.75219919984693, 193.75219919984693, 193.75219919984693, 193.75219919984693, 193.9850200979796, 193.9850200979796, 193.9850200979796, 193.9850200979796, 193.9850200979796, 193.9850200979796, 193.86847846175363, 193.86847846175363, 193.75219919984693, 193.75219919984693, 193.75219919984693, 193.86847846175363, 193.86847846175363, 193.9850200979796, 193.9850200979796, 193.9850200979796, 193.9850200979796, 193.9850200979796, 193.86847846175363, 193.86847846175363, 193.86847846175363, 193.9850200979796, 193.9850200979796, 194.10182532165618, 194.10182532165618, 194.10182532165618, 193.86847846175363, 193.86847846175363, 193.9850200979796, 193.9850200979796,
193.9850200979796, 194.10182532165618, 194.10182532165618, 194.10182532165618, 194.10182532165618, 194.10182532165618, 194.21889535404324, 194.21889535404324, 194.21889535404324, 193.9850200979796, 193.9850200979796, 194.10182532165618, 194.10182532165618, 194.10182532165618, 194.10182532165618, 194.10182532165618, 194.10182532165618, 194.10182532165618, 194.10182532165618, 194.10182532165618, 194.10182532165618, 194.21889535404324, 194.21889535404324, 194.21889535404324, 194.10182532165618, 194.10182532165618, 194.21889535404324, 194.21889535404324, 194.21889535404324, 194.45383477106168, 194.45383477106168, 194.45383477106168, 194.45383477106168, 194.45383477106168, 194.45383477106168, 194.45383477106168, 194.45383477106168, 194.33623142460067, 194.33623142460067, 194.5717066395054, 194.5717066395054, 194.5717066395054, 194.5717066395054, 194.5717066395054, 194.5717066395054, 194.5717066395054, 194.5717066395054, 194.5717066395054, 194.5717066395054, 194.68984828443138, 194.68984828443138, 194.68984828443138, 194.45383477106168, 194.45383477106168, 194.68984828443138, 194.68984828443138, 194.68984828443138, 194.68984828443138, 194.68984828443138, 194.68984828443138, 194.68984828443138, 194.68984828443138, 194.68984828443138, 194.68984828443138, 194.92694596428123, 194.92694596428123, 194.92694596428123, 195.04590455098526, 195.04590455098526, 195.04590455098526, 194.92694596428123, 194.92694596428123, 195.04590455098526, 195.04590455098526, 195.04590455098526, 195.04590455098526, 195.04590455098526, 195.04590455098526, 195.04590455098526, 195.04590455098526, 195.1651380178859, 195.1651380178859, 195.1651380178859, 195.1651380178859, 195.1651380178859, 195.1651380178859, 195.1651380178859, 195.1651380178859, 195.1651380178859, 195.1651380178859, 195.40443479213207, 195.40443479213207, 195.40443479213207, 195.40443479213207, 195.40443479213207, 195.40443479213207, 195.40443479213207, 195.52450072169233, 195.52450072169233, 195.52450072169233, 195.52450072169233, 195.52450072169233, 195.64484677604037,
195.64484677604037, 195.64484677604037, 195.52450072169233, 195.52450072169233, 195.52450072169233, 195.64484677604037, 195.64484677604037, 195.76547428893474, 195.76547428893474, 195.76547428893474, 196.00757907152695, 196.00757907152695, 195.88638460334346, 195.88638460334346, 195.88638460334346, 195.76547428893474, 195.76547428893474, 195.76547428893474, 195.76547428893474, 195.76547428893474, 196.12905905512292, 196.12905905512292, 195.88638460334346, 195.88638460334346, 195.88638460334346, 196.00757907152695, 196.00757907152695, 196.2508259252321, 196.2508259252321, 196.2508259252321, 196.2508259252321, 196.2508259252321, 196.37288106250492, 196.37288106250492, 196.37288106250492, 196.2508259252321, 196.2508259252321, 196.2508259252321, 196.2508259252321, 196.2508259252321, 196.49522585722917, 196.49522585722917, 196.49522585722917, 196.49522585722917, 196.49522585722917, 196.74079002890164, 196.74079002890164, 196.74079002890164, 196.61786170941832, 196.61786170941832, 196.74079002890164, 196.74079002890164, 196.74079002890164, 196.86401223541463, 196.86401223541463, 196.86401223541463, 196.86401223541463, 196.86401223541463, 196.74079002890164, 196.74079002890164, 196.86401223541463, 196.86401223541463, 196.86401223541463, 196.9875297586907, 196.9875297586907, 196.86401223541463, 196.86401223541463, 196.86401223541463, 197.2354565250131, 197.2354565250131, 197.2354565250131, 197.35986867835635, 197.35986867835635, 197.11134403855397, 197.11134403855397, 197.11134403855397, 197.35986867835635, 197.35986867835635, 197.2354565250131, 197.2354565250131, 197.2354565250131, 197.48458196924713, 197.48458196924713, 197.48458196924713, 197.48458196924713, 197.48458196924713, 197.48458196924713, 197.48458196924713, 197.73491789878778, 197.73491789878778, 197.73491789878778, 197.73491789878778, 197.73491789878778, 197.860543531522, 197.860543531522, 197.860543531522, 197.860543531522, 197.860543531522, 197.98647629017267, 197.98647629017267, 197.98647629017267, 197.98647629017267, 197.98647629017267, 197.98647629017267,
197.98647629017267, 197.98647629017267, 198.11271769876066, 198.11271769876066, 198.11271769876066, 198.11271769876066, 198.11271769876066, 198.23926929228196, 198.23926929228196, 198.23926929228196, 198.36613261681248, 198.36613261681248, 198.4933092296128, 198.4933092296128, 198.4933092296128, 198.36613261681248, 198.36613261681248, 198.6208006992352, 198.6208006992352, 198.6208006992352, 198.36613261681248, 198.36613261681248, 198.6208006992352, 198.6208006992352, 198.6208006992352, 198.7486086056315, 198.7486086056315, 198.6208006992352, 198.6208006992352, 198.6208006992352, 198.6208006992352, 198.6208006992352, 198.7486086056315, 198.7486086056315, 198.7486086056315, 199.0051801062051, 199.0051801062051, 198.87673454026168, 198.87673454026168, 198.87673454026168, 198.87673454026168, 198.87673454026168, 199.0051801062051, 199.0051801062051, 199.0051801062051, 199.0051801062051, 199.0051801062051, 199.13394691827176, 199.13394691827176, 199.13394691827176, 199.13394691827176, 199.13394691827176, 199.26303660311532, 199.26303660311532, 199.26303660311532, 199.5221911576557, 199.5221911576557, 199.26303660311532, 199.26303660311532, 199.26303660311532, 199.39245079934784, 199.39245079934784, 199.39245079934784, 199.39245079934784, 199.39245079934784, 199.39245079934784, 199.39245079934784, 199.65225934091603, 199.65225934091603, 199.65225934091603, 199.5221911576557, 199.5221911576557, 199.5221911576557, 199.78265702431617, 199.78265702431617, 199.65225934091603, 199.65225934091603, 199.65225934091603, 199.78265702431617, 199.78265702431617, 199.78265702431617, 199.78265702431617, 199.78265702431617, 199.78265702431617, 199.78265702431617, 199.78265702431617, 199.78265702431617, 199.78265702431617, 199.91338589547337, 199.91338589547337, 200.04444765455628, 200.04444765455628, 200.04444765455628, 199.91338589547337, 199.91338589547337, 200.17584401440791, 200.17584401440791, 200.17584401440791, 200.04444765455628, 200.04444765455628, 200.04444765455628, 200.04444765455628, 200.04444765455628, 199.91338589547337,
199.91338589547337, 199.91338589547337, 199.91338589547337, 199.91338589547337, 200.04444765455628, 200.04444765455628, 200.04444765455628, 200.04444765455628, 200.04444765455628, 199.91338589547337, 199.91338589547337, 199.91338589547337, 199.91338589547337, 199.91338589547337, 200.04444765455628, 200.04444765455628, 200.17584401440791, 200.17584401440791, 200.17584401440791, 199.91338589547337, 199.91338589547337, 199.91338589547337, 199.91338589547337, 199.91338589547337, 199.91338589547337, 199.91338589547337, 199.78265702431617, 199.78265702431617, 199.78265702431617, 199.91338589547337, 199.91338589547337, 199.78265702431617, 199.78265702431617, 199.78265702431617, 199.65225934091603, 199.65225934091603, 199.78265702431617, 199.78265702431617, 199.78265702431617, 199.65225934091603, 199.65225934091603, 199.65225934091603, 199.65225934091603, 199.65225934091603, 199.65225934091603, 199.65225934091603, 199.65225934091603, 199.65225934091603, 199.65225934091603, 199.65225934091603, 199.65225934091603, 199.65225934091603, 199.5221911576557, 199.5221911576557, 199.5221911576557, 199.5221911576557, 199.5221911576557, 199.39245079934784, 199.39245079934784, 199.39245079934784, 199.39245079934784, 199.39245079934784, 199.26303660311532, 199.26303660311532, 199.39245079934784, 199.39245079934784, 199.39245079934784, 199.13394691827176, 199.13394691827176, 199.26303660311532, 199.26303660311532, 199.26303660311532, 199.39245079934784, 199.39245079934784, 199.0051801062051, 199.0051801062051, 199.0051801062051, 199.0051801062051, 199.0051801062051, 198.87673454026168, 198.87673454026168, 198.87673454026168, 198.87673454026168, 198.87673454026168, 198.87673454026168, 198.87673454026168, 198.87673454026168, 198.7486086056315, 198.7486086056315, 198.6208006992352, 198.6208006992352, 198.6208006992352, 198.6208006992352, 198.6208006992352, 198.7486086056315, 198.7486086056315, 198.7486086056315, 198.6208006992352, 198.6208006992352, 198.4933092296128, 198.4933092296128, 198.4933092296128, 198.36613261681248, 198.36613261681248,
198.36613261681248, 198.36613261681248, 198.36613261681248, 198.11271769876066, 198.11271769876066, 198.11271769876066, 198.4933092296128, 198.4933092296128, 198.11271769876066, 198.11271769876066, 198.11271769876066, 197.98647629017267, 197.98647629017267, 197.98647629017267, 197.98647629017267, 197.98647629017267, 197.98647629017267, 197.98647629017267, 197.860543531522, 197.860543531522, 197.860543531522, 197.609597878822, 197.609597878822, 197.73491789878778, 197.73491789878778, 197.73491789878778, 197.48458196924713, 197.48458196924713, 197.609597878822, 197.609597878822, 197.609597878822, 197.35986867835635, 197.35986867835635, 197.11134403855397, 197.11134403855397, 197.11134403855397, 197.35986867835635, 197.35986867835635, 196.9875297586907, 196.9875297586907, 196.9875297586907, 197.11134403855397, 197.11134403855397, 197.11134403855397, 197.11134403855397, 197.11134403855397, 197.11134403855397, 197.11134403855397, 196.74079002890164, 196.74079002890164, 196.74079002890164, 196.86401223541463, 196.86401223541463, 196.74079002890164, 196.74079002890164, 196.74079002890164, 196.61786170941832, 196.61786170941832, 196.61786170941832, 196.61786170941832, 196.61786170941832, 196.37288106250492, 196.37288106250492, 196.37288106250492, 196.37288106250492, 196.37288106250492, 196.2508259252321, 196.2508259252321, 196.2508259252321, 196.2508259252321, 196.2508259252321, 196.00757907152695, 196.00757907152695, 196.00757907152695, 196.00757907152695, 196.00757907152695, 195.88638460334346, 195.88638460334346, 195.64484677604037, 195.64484677604037, 195.64484677604037, 195.64484677604037, 195.64484677604037, 195.64484677604037, 195.52450072169233, 195.52450072169233, 195.52450072169233, 195.52450072169233, 195.52450072169233, 195.52450072169233, 195.52450072169233, 195.1651380178859, 195.1651380178859, 195.1651380178859, 195.1651380178859, 195.1651380178859, 195.04590455098526, 195.04590455098526, 195.04590455098526, 195.1651380178859, 195.1651380178859, 195.04590455098526, 195.04590455098526, 195.04590455098526,
195.04590455098526, 195.04590455098526, 194.92694596428123, 194.92694596428123, 194.92694596428123, 194.92694596428123, 194.92694596428123, 194.92694596428123, 194.92694596428123, 194.92694596428123, 194.92694596428123, 194.92694596428123, 194.68984828443138, 194.68984828443138, 194.68984828443138, 194.45383477106168, 194.45383477106168, 194.5717066395054, 194.5717066395054, 194.5717066395054, 194.5717066395054, 194.5717066395054, 194.5717066395054, 194.5717066395054, 194.5717066395054, 194.33623142460067, 194.33623142460067, 194.33623142460067, 194.33623142460067, 194.33623142460067, 194.21889535404324, 194.21889535404324, 194.21889535404324, 194.21889535404324, 194.21889535404324, 194.21889535404324, 194.21889535404324, 194.10182532165618, 194.10182532165618, 194.10182532165618, 194.21889535404324, 194.21889535404324, 194.10182532165618, 194.10182532165618, 194.10182532165618, 194.21889535404324, 194.21889535404324, 194.21889535404324, 194.21889535404324, 194.21889535404324, 194.21889535404324, 194.21889535404324, 193.9850200979796, 193.9850200979796, 193.9850200979796, 193.9850200979796, 193.9850200979796, 193.9850200979796, 193.9850200979796, 193.9850200979796, 193.9850200979796, 193.9850200979796, 193.9850200979796, 193.9850200979796, 193.9850200979796, 193.9850200979796, 193.9850200979796, 193.86847846175363, 193.86847846175363, 193.86847846175363, 193.86847846175363, 193.86847846175363, 193.86847846175363, 193.86847846175363, 193.86847846175363, 194.10182532165618, 194.10182532165618, 193.86847846175363, 193.86847846175363, 193.86847846175363, 193.9850200979796, 193.9850200979796, 193.9850200979796, 193.9850200979796, 193.9850200979796, 193.9850200979796, 193.9850200979796, 193.9850200979796, 193.9850200979796, 193.9850200979796, 193.86847846175363, 193.86847846175363, 193.86847846175363, 193.9850200979796, 193.9850200979796, 193.9850200979796, 193.9850200979796, 193.9850200979796, 193.9850200979796, 193.9850200979796, 193.86847846175363, 193.86847846175363, 193.86847846175363, 193.9850200979796,
193.9850200979796, 193.9850200979796, 193.9850200979796, 193.9850200979796, 194.10182532165618, 194.10182532165618, 194.10182532165618, 194.10182532165618, 194.10182532165618, 193.9850200979796, 193.9850200979796, 194.10182532165618, 194.10182532165618, 194.10182532165618, 193.86847846175363, 193.86847846175363, 194.21889535404324, 194.21889535404324, 194.21889535404324, 193.9850200979796, 193.9850200979796, 194.21889535404324, 194.21889535404324, 194.21889535404324, 194.33623142460067, 194.33623142460067, 194.21889535404324, 194.21889535404324, 194.21889535404324, 194.33623142460067, 194.33623142460067, 194.21889535404324, 194.21889535404324, 194.21889535404324, 194.45383477106168, 194.45383477106168, 194.33623142460067, 194.33623142460067, 194.33623142460067, 194.33623142460067, 194.33623142460067, 194.33623142460067, 194.33623142460067, 194.33623142460067, 194.33623142460067, 194.33623142460067, 194.5717066395054, 194.5717066395054, 194.5717066395054, 194.45383477106168, 194.45383477106168, 194.45383477106168, 194.45383477106168, 194.45383477106168, 194.68984828443138, 194.68984828443138, 194.92694596428123, 194.92694596428123, 194.92694596428123, 194.68984828443138, 194.68984828443138, 194.5717066395054, 194.5717066395054, 194.5717066395054, 194.92694596428123, 194.92694596428123, 194.68984828443138, 194.68984828443138, 194.68984828443138, 194.92694596428123, 194.92694596428123, 194.92694596428123, 194.92694596428123, 194.92694596428123, 194.80826096883465, 194.80826096883465, 194.80826096883465, 194.92694596428123, 194.92694596428123, 194.92694596428123, 194.92694596428123, 194.92694596428123, 195.04590455098526, 195.04590455098526, 194.80826096883465, 194.80826096883465, 194.80826096883465, 195.1651380178859, 195.1651380178859, 195.28464766272617, 195.28464766272617, 195.28464766272617, 195.1651380178859, 195.1651380178859, 195.28464766272617, 195.28464766272617, 195.28464766272617, 195.40443479213207, 195.40443479213207, 195.28464766272617, 195.28464766272617, 195.28464766272617, 195.40443479213207,
195.40443479213207, 195.28464766272617, 195.28464766272617, 195.28464766272617, 195.40443479213207, 195.40443479213207, 195.52450072169233, 195.52450072169233, 195.52450072169233, 195.64484677604037, 195.64484677604037, 195.64484677604037, 195.64484677604037, 195.64484677604037, 195.76547428893474, 195.76547428893474, 195.64484677604037, 195.64484677604037, 195.64484677604037, 195.88638460334346, 195.88638460334346, 195.88638460334346, 195.88638460334346, 195.88638460334346, 195.88638460334346, 195.88638460334346, 196.12905905512292, 196.12905905512292, 196.12905905512292, 196.12905905512292, 196.12905905512292, 196.12905905512292, 196.00757907152695, 196.00757907152695, 196.2508259252321, 196.2508259252321, 196.2508259252321, 196.2508259252321, 196.2508259252321, 196.2508259252321, 196.2508259252321, 196.2508259252321, 196.37288106250492, 196.37288106250492, 196.37288106250492, 196.37288106250492, 196.37288106250492, 196.49522585722917, 196.49522585722917, 196.37288106250492, 196.37288106250492, 196.37288106250492, 196.49522585722917, 196.49522585722917, 196.49522585722917, 196.49522585722917, 196.49522585722917, 196.61786170941832, 196.61786170941832, 196.86401223541463, 196.86401223541463, 196.86401223541463, 196.74079002890164, 196.74079002890164, 196.74079002890164, 196.74079002890164, 196.74079002890164, 196.9875297586907, 196.9875297586907, 196.86401223541463, 196.86401223541463, 196.86401223541463, 196.9875297586907, 196.9875297586907, 196.9875297586907, 196.9875297586907, 196.9875297586907, 197.11134403855397, 197.11134403855397, 197.11134403855397, 197.11134403855397, 197.11134403855397, 197.2354565250131, 197.2354565250131, 197.2354565250131, 197.2354565250131, 197.2354565250131, 197.35986867835635, 197.35986867835635, 197.35986867835635, 197.35986867835635, 197.35986867835635, 197.609597878822, 197.609597878822, 197.48458196924713, 197.48458196924713, 197.48458196924713, 197.609597878822, 197.609597878822, 197.609597878822, 197.609597878822, 197.609597878822, 197.73491789878778, 197.73491789878778, 197.73491789878778,
197.860543531522, 197.860543531522, 197.860543531522, 197.860543531522, 197.860543531522, 197.98647629017267, 197.98647629017267, 198.23926929228196, 198.23926929228196, 198.23926929228196, 197.98647629017267, 197.98647629017267, 198.23926929228196, 198.23926929228196, 198.23926929228196, 198.23926929228196, 198.23926929228196, 198.23926929228196, 198.23926929228196, 198.23926929228196, 198.36613261681248, 198.36613261681248, 198.4933092296128, 198.4933092296128, 198.4933092296128, 198.6208006992352, 198.6208006992352, 198.6208006992352, 198.6208006992352, 198.6208006992352, 198.6208006992352, 198.6208006992352, 198.7486086056315, 198.7486086056315, 198.7486086056315, 198.7486086056315, 198.7486086056315, 198.7486086056315, 198.7486086056315, 198.7486086056315, 198.7486086056315, 198.7486086056315, 199.0051801062051, 199.0051801062051, 199.0051801062051, 199.0051801062051, 199.0051801062051, 199.0051801062051, 199.0051801062051, 199.0051801062051, 199.26303660311532, 199.26303660311532, 199.26303660311532, 199.26303660311532, 199.26303660311532, 199.5221911576557, 199.5221911576557, 199.13394691827176, 199.13394691827176, 199.13394691827176, 199.26303660311532, 199.26303660311532, 199.39245079934784, 199.39245079934784, 199.39245079934784, 199.26303660311532, 199.26303660311532, 199.5221911576557, 199.5221911576557, 199.5221911576557, 199.5221911576557, 199.5221911576557, 199.65225934091603, 199.65225934091603, 199.65225934091603, 199.5221911576557, 199.5221911576557, 199.5221911576557, 199.5221911576557, 199.5221911576557, 199.65225934091603, 199.65225934091603, 199.65225934091603, 199.91338589547337, 199.91338589547337, 199.78265702431617, 199.78265702431617, 199.78265702431617, 199.65225934091603, 199.65225934091603, 199.78265702431617, 199.78265702431617, 199.78265702431617, 200.04444765455628, 200.04444765455628, 199.91338589547337, 199.91338589547337, 199.91338589547337, 199.91338589547337, 199.91338589547337, 200.04444765455628, 200.04444765455628, 200.04444765455628, 199.91338589547337,
199.91338589547337, 199.91338589547337, 199.91338589547337, 199.91338589547337, 200.04444765455628, 200.04444765455628, 199.91338589547337, 199.91338589547337, 199.91338589547337, 200.04444765455628, 200.04444765455628, 200.04444765455628, 200.04444765455628, 200.04444765455628, 199.91338589547337, 199.91338589547337, 200.04444765455628, 200.04444765455628, 200.04444765455628, 200.04444765455628, 200.04444765455628, 200.04444765455628, 200.04444765455628, 200.04444765455628, 200.04444765455628, 200.04444765455628, 199.91338589547337, 199.91338589547337, 199.91338589547337, 200.04444765455628, 200.04444765455628, 200.04444765455628, 200.04444765455628, 200.04444765455628, 200.04444765455628, 200.04444765455628, 199.91338589547337, 199.91338589547337, 199.91338589547337, 200.04444765455628, 200.04444765455628, 199.91338589547337, 199.91338589547337, 199.91338589547337, 199.78265702431617, 199.78265702431617, 199.78265702431617, 199.91338589547337, 199.91338589547337, 199.78265702431617, 199.78265702431617, 199.78265702431617, 199.91338589547337, 199.91338589547337, 199.65225934091603, 199.65225934091603, 199.65225934091603, 199.78265702431617, 199.78265702431617, 199.78265702431617, 199.78265702431617, 199.78265702431617, 199.65225934091603, 199.65225934091603, 199.5221911576557, 199.5221911576557, 199.5221911576557, 199.5221911576557, 199.5221911576557, 199.5221911576557, 199.5221911576557, 199.5221911576557, 199.65225934091603, 199.65225934091603, 199.39245079934784, 199.39245079934784, 199.39245079934784, 199.5221911576557, 199.5221911576557, 199.39245079934784, 199.39245079934784, 199.39245079934784, 199.39245079934784, 199.39245079934784, 199.13394691827176, 199.13394691827176, 199.13394691827176, 199.13394691827176, 199.13394691827176, 199.26303660311532, 199.26303660311532, 199.26303660311532, 199.13394691827176, 199.13394691827176, 199.13394691827176, 199.13394691827176, 199.13394691827176, 198.7486086056315, 198.7486086056315, 199.0051801062051, 199.0051801062051, 199.0051801062051, 198.7486086056315, 198.7486086056315,
198.87673454026168, 198.87673454026168, 198.87673454026168, 198.6208006992352, 198.6208006992352, 198.6208006992352, 198.6208006992352, 198.6208006992352, 198.6208006992352, 198.6208006992352, 198.6208006992352, 198.4933092296128, 198.4933092296128, 198.36613261681248, 198.36613261681248, 198.36613261681248, 198.23926929228196, 198.23926929228196, 198.23926929228196, 198.23926929228196, 198.23926929228196, 198.36613261681248, 198.36613261681248, 198.11271769876066, 198.11271769876066, 198.11271769876066, 197.98647629017267, 197.98647629017267, 197.98647629017267, 197.98647629017267, 197.98647629017267, 197.860543531522, 197.860543531522, 197.73491789878778, 197.73491789878778, 197.73491789878778, 197.73491789878778, 197.73491789878778, 197.73491789878778, 197.73491789878778, 197.73491789878778, 197.609597878822, 197.609597878822, 197.35986867835635, 197.35986867835635, 197.35986867835635, 197.35986867835635, 197.35986867835635, 197.35986867835635, 197.35986867835635, 197.35986867835635, 197.11134403855397, 197.11134403855397, 197.11134403855397, 197.11134403855397, 197.11134403855397, 196.9875297586907, 196.9875297586907, 197.11134403855397, 197.11134403855397, 197.11134403855397, 196.86401223541463, 196.86401223541463, 196.61786170941832, 196.61786170941832, 196.61786170941832, 196.61786170941832, 196.61786170941832, 196.61786170941832, 196.49522585722917, 196.49522585722917, 196.49522585722917, 196.49522585722917, 196.49522585722917, 196.49522585722917, 196.49522585722917, 196.37288106250492, 196.37288106250492, 196.37288106250492, 196.2508259252321, 196.2508259252321, 196.00757907152695, 196.00757907152695, 196.00757907152695, 196.00757907152695, 196.00757907152695, 195.88638460334346, 195.88638460334346, 195.88638460334346, 195.88638460334346, 195.88638460334346, 195.76547428893474, 195.76547428893474, 195.76547428893474, 195.64484677604037, 195.64484677604037, 195.40443479213207, 195.40443479213207, 195.40443479213207, 195.52450072169233, 195.52450072169233, 195.28464766272617, 195.28464766272617,
195.28464766272617, 195.40443479213207, 195.40443479213207, 195.28464766272617, 195.28464766272617, 195.28464766272617, 195.1651380178859, 195.1651380178859, 195.04590455098526, 195.04590455098526, 195.04590455098526, 195.04590455098526, 195.04590455098526, 195.04590455098526, 195.04590455098526, 195.04590455098526, 195.04590455098526, 195.04590455098526, 195.04590455098526, 195.04590455098526, 195.04590455098526, 194.92694596428123, 194.92694596428123, 194.80826096883465, 194.80826096883465, 194.80826096883465, 194.80826096883465, 194.80826096883465, 194.80826096883465, 194.68984828443138, 194.68984828443138, 194.68984828443138, 194.68984828443138, 194.68984828443138, 194.5717066395054, 194.5717066395054, 194.5717066395054, 194.5717066395054, 194.5717066395054, 194.68984828443138, 194.68984828443138, 194.5717066395054, 194.5717066395054, 194.5717066395054, 194.45383477106168, 194.45383477106168, 194.45383477106168, 194.45383477106168, 194.45383477106168, 194.45383477106168, 194.45383477106168, 194.45383477106168, 194.45383477106168, 194.45383477106168, 194.33623142460067, 194.33623142460067, 194.21889535404324, 194.21889535404324, 194.21889535404324, 194.45383477106168, 194.45383477106168, 194.21889535404324, 194.21889535404324, 194.21889535404324, 194.10182532165618, 194.10182532165618, 194.10182532165618, 194.10182532165618, 194.10182532165618, 194.33623142460067, 194.33623142460067, 194.21889535404324, 194.21889535404324, 194.21889535404324, 194.10182532165618, 194.10182532165618, 194.10182532165618, 194.10182532165618, 194.10182532165618, 194.10182532165618, 194.10182532165618, 194.33623142460067, 194.33623142460067, 194.33623142460067, 193.9850200979796, 193.9850200979796, 193.9850200979796, 193.9850200979796, 193.9850200979796, 194.10182532165618, 194.10182532165618, 194.10182532165618, 194.10182532165618, 194.10182532165618, 194.10182532165618, 194.10182532165618, 194.10182532165618, 194.10182532165618, 194.10182532165618, 193.9850200979796, 193.9850200979796, 193.9850200979796, 194.33623142460067, 194.33623142460067,
194.21889535404324, 194.21889535404324, 194.21889535404324, 194.21889535404324, 194.21889535404324, 194.21889535404324, 194.21889535404324, 194.21889535404324, 194.21889535404324, 194.21889535404324, 194.21889535404324, 194.21889535404324, 194.21889535404324, 194.21889535404324, 194.21889535404324, 194.33623142460067, 194.33623142460067, 194.33623142460067, 194.33623142460067, 194.33623142460067, 194.33623142460067, 194.21889535404324, 194.21889535404324, 194.33623142460067, 194.33623142460067, 194.33623142460067, 194.45383477106168, 194.45383477106168, 194.33623142460067, 194.33623142460067, 194.33623142460067, 194.45383477106168, 194.45383477106168, 194.33623142460067, 194.33623142460067, 194.33623142460067, 194.68984828443138, 194.68984828443138, 194.33623142460067, 194.33623142460067, 194.33623142460067, 194.5717066395054, 194.5717066395054, 194.45383477106168, 194.45383477106168, 194.45383477106168, 194.33623142460067, 194.33623142460067, 194.68984828443138, 194.68984828443138, 194.68984828443138, 194.68984828443138, 194.68984828443138, 194.68984828443138, 194.68984828443138, 194.68984828443138, 194.5717066395054, 194.5717066395054, 194.5717066395054, 194.5717066395054, 194.5717066395054, 194.92694596428123, 194.92694596428123, 194.80826096883465, 194.80826096883465, 194.80826096883465, 194.92694596428123, 194.92694596428123, 194.92694596428123, 194.92694596428123, 194.92694596428123, 194.80826096883465, 194.80826096883465, 194.92694596428123, 194.92694596428123, 194.92694596428123, 194.92694596428123, 194.92694596428123, 195.04590455098526, 195.04590455098526, 195.04590455098526, 195.04590455098526, 195.04590455098526, 195.1651380178859, 195.1651380178859, 195.1651380178859, 195.04590455098526, 195.04590455098526, 195.04590455098526, 195.28464766272617, 195.28464766272617, 195.1651380178859, 195.1651380178859, 195.1651380178859, 195.40443479213207, 195.40443479213207, 195.28464766272617, 195.28464766272617, 195.28464766272617, 195.28464766272617, 195.28464766272617, 195.40443479213207, 195.40443479213207,
195.40443479213207, 195.52450072169233, 195.52450072169233, 195.52450072169233, 195.52450072169233, 195.52450072169233, 195.64484677604037, 195.64484677604037, 195.40443479213207, 195.40443479213207, 195.40443479213207, 195.52450072169233, 195.52450072169233, 195.64484677604037, 195.64484677604037, 195.64484677604037, 195.88638460334346, 195.88638460334346, 195.88638460334346, 195.88638460334346, 195.88638460334346, 196.00757907152695, 196.00757907152695, 195.76547428893474, 195.76547428893474, 195.76547428893474, 196.00757907152695, 196.00757907152695, 196.12905905512292, 196.12905905512292, 196.12905905512292, 196.12905905512292, 196.12905905512292, 196.00757907152695, 196.00757907152695, 196.00757907152695, 196.2508259252321, 196.2508259252321, 196.2508259252321, 196.2508259252321, 196.2508259252321, 196.2508259252321, 196.2508259252321, 196.2508259252321, 196.2508259252321, 196.2508259252321, 196.37288106250492, 196.37288106250492, 196.37288106250492, 196.37288106250492, 196.37288106250492, 196.37288106250492, 196.37288106250492, 196.37288106250492, 196.49522585722917, 196.49522585722917, 196.86401223541463, 196.86401223541463, 196.86401223541463, 196.86401223541463, 196.86401223541463, 196.74079002890164, 196.74079002890164, 196.74079002890164, 196.74079002890164, 196.74079002890164, 196.86401223541463, 196.86401223541463, 196.86401223541463, 196.86401223541463, 196.86401223541463, 197.11134403855397, 197.11134403855397, 197.11134403855397, 196.9875297586907, 196.9875297586907, 196.9875297586907, 197.11134403855397, 197.11134403855397, 197.2354565250131, 197.2354565250131, 197.2354565250131, 197.2354565250131, 197.2354565250131, 197.2354565250131, 197.2354565250131, 197.2354565250131, 197.35986867835635, 197.35986867835635, 197.48458196924713, 197.48458196924713, 197.48458196924713, 197.48458196924713, 197.48458196924713, 197.48458196924713, 197.48458196924713, 197.48458196924713, 197.48458196924713, 197.48458196924713, 197.609597878822, 197.609597878822, 197.609597878822, 197.609597878822, 197.609597878822,
197.73491789878778, 197.73491789878778, 197.73491789878778, 197.98647629017267, 197.98647629017267, 197.98647629017267, 197.98647629017267, 197.98647629017267, 198.23926929228196, 198.23926929228196, 198.23926929228196, 198.23926929228196, 198.23926929228196, 198.11271769876066, 198.11271769876066, 198.11271769876066, 197.98647629017267, 197.98647629017267, 198.23926929228196, 198.23926929228196, 198.23926929228196, 198.36613261681248, 198.36613261681248, 198.36613261681248, 198.36613261681248, 198.36613261681248, 198.36613261681248, 198.36613261681248, 198.36613261681248, 198.4933092296128, 198.4933092296128, 198.6208006992352, 198.6208006992352, 198.6208006992352, 198.4933092296128, 198.4933092296128, 198.7486086056315, 198.7486086056315, 198.7486086056315, 198.6208006992352, 198.6208006992352, 198.87673454026168, 198.87673454026168, 198.87673454026168])
#cooling_data = np.array([106.5000, 106.5000, 105.5330, 105.5330, 104.5670, 104.5670, 104.5670, 103.6000, 103.6000, 102.6330, 102.6330, 101.6670, 101.6670, 100.7000, 100.7000, 99.7333, 99.7333, 98.7667, 98.7667, 97.8000, 97.8000, 96.8333, 95.8667, 95.8667, 94.9000, 94.9000, 93.9333, 93.9333, 92.9667, 92.9667, 92.0000, 92.0000, 91.0333, 91.0333, 91.0333, 90.0667, 90.0667, 89.1000, 89.1000, 88.1333, 88.1333, 88.1333, 87.1667, 87.1667, 86.2000, 86.2000, 86.2000, 85.2333, 85.2333, 84.2667, 84.2667, 83.3000, 83.3000, 83.3000, 82.3333, 82.3333, 82.3333, 81.3667, 81.3667, 80.4000, 80.4000, 80.4000, 79.4333, 79.4333, 79.4333, 78.4667, 78.4667, 77.5000, 77.5000, 77.5000, 77.5000, 76.5333, 76.5333, 75.5667, 75.5667, 75.5667, 74.6000, 74.6000, 74.6000, 74.6000, 73.6333, 73.6333, 73.6333, 72.6667, 72.6667, 72.6667, 71.7000, 71.7000, 71.7000, 71.7000, 70.7333, 70.7333, 70.7333, 69.7667, 69.7667, 69.7667, 69.7667, 68.8000, 68.8000, 68.8000, 67.8333, 67.8333, 67.8333, 67.8333, 66.8667, 66.8667, 66.8667, 66.8667, 65.9000, 65.9000, 65.9000, 65.9000, 64.9333, 64.9333, 64.9333, 63.9667, 63.9667, 63.9667, 63.9667, 63.9667, 63.0000, 63.0000, 63.0000, 63.0000, 62.0333, 62.0333, 62.0333, 62.0333, 61.0667, 61.0667, 61.0667, 61.0667, 60.1000, 60.1000, 60.1000, 60.1000, 60.1000, 59.1333])
times = np.array(range(len(data)))*0.1
class MyClass:
def __init__(self):
self.sleep = 0.1
tune = Autotune(MyClass())
tune.high_cycle_power = 0.540748960518
tune.setpoint_power = 0.375799612552
tune.tuning_algorithm = "ZN"
peaks = Util.detect_peaks(data)
data = Util.smooth(data)
peaks = Util.detect_peaks(data, show=True)
valleys = Util.detect_peaks(data, valley=True, show=True)
tune.calculate_PID(data, times, peaks, valleys)
print(tune.Kp)
print(tune.Ki)
print(tune.Kd)
|
gpl-3.0
|
PythonCharmers/orange3
|
Orange/widgets/classify/owclassificationtreegraph.py
|
2
|
18846
|
import sys
import numpy
from sklearn.tree._tree import TREE_LEAF
from Orange.widgets.classify.owtreeviewer2d import *
from Orange.data import Table
from Orange.classification.tree import TreeClassifier
from Orange.preprocess.transformation import Indicator
from Orange.widgets.utils.colorpalette import ColorPaletteDlg
from Orange.widgets.settings import \
Setting, ContextSetting, ClassValuesContextHandler
from Orange.widgets import gui
class OWClassificationTreeGraph(OWTreeViewer2D):
name = "Classification Tree Viewer"
description = "Graphical visualization of a classification tree."
icon = "icons/ClassificationTree.svg"
settingsHandler = ClassValuesContextHandler()
target_class_index = ContextSetting(0)
color_settings = Setting(None)
selected_color_settings_index = Setting(0)
inputs = [("Classification Tree", TreeClassifier, "ctree")]
outputs = [("Data", Table)]
def __init__(self):
super().__init__()
self.domain = None
self.classifier = None
self.dataset = None
self.clf_dataset = None
self.scene = TreeGraphicsScene(self)
self.scene_view = TreeGraphicsView(self.scene)
self.scene_view.setViewportUpdateMode(QGraphicsView.FullViewportUpdate)
self.mainArea.layout().addWidget(self.scene_view)
self.toggle_zoom_slider()
self.scene.selectionChanged.connect(self.update_selection)
box = gui.widgetBox(self.controlArea, "Nodes", addSpace=True)
self.target_combo = gui.comboBox(
box, self, "target_class_index", orientation=0, items=[],
label="Target class", callback=self.toggle_target_class,
contentsLength=8)
gui.separator(box)
gui.button(box, self, "Set Colors", callback=self.set_colors)
dlg = self.create_color_dialog()
self.scene.colorPalette = dlg.getDiscretePalette("colorPalette")
gui.rubber(self.controlArea)
def sendReport(self):
if self.tree:
tclass = str(self.targetCombo.currentText())
tsize = "%i nodes, %i leaves" % (orngTree.countNodes(self.tree),
orngTree.countLeaves(self.tree))
else:
tclass = tsize = "N/A"
self.reportSettings(
"Information",
[("Target class", tclass),
("Line widths",
["Constant", "Proportion of all instances",
"Proportion of parent's instances"][self.line_width_method]),
("Tree size", tsize)])
super().sendReport()
def set_colors(self):
dlg = self.create_color_dialog()
if dlg.exec_():
self.color_settings = dlg.getColorSchemas()
self.selected_color_settings_index = dlg.selectedSchemaIndex
self.scene.colorPalette = dlg.getDiscretePalette("colorPalette")
self.scene.update()
self.toggle_node_color()
def create_color_dialog(self):
c = ColorPaletteDlg(self, "Color Palette")
c.createDiscretePalette("colorPalette", "Discrete Palette")
c.setColorSchemas(self.color_settings,
self.selected_color_settings_index)
return c
def set_node_info(self):
for node in self.scene.nodes():
node.set_rect(QRectF())
self.update_node_info(node)
w = max([n.rect().width() for n in self.scene.nodes()] + [0])
if w > self.max_node_width:
w = self.max_node_width
for node in self.scene.nodes():
node.set_rect(QRectF(node.rect().x(), node.rect().y(),
w, node.rect().height()))
self.scene.fix_pos(self.root_node, 10, 10)
def update_node_info(self, node):
distr = node.get_distribution()
total = int(node.num_instances())
if self.target_class_index:
tabs = distr[self.target_class_index - 1]
text = ""
else:
modus = node.majority()
tabs = distr[modus]
text = self.domain.class_vars[0].values[modus] + "<br/>"
if tabs > 0.999:
text += "100%, {}/{}".format(total, total)
else:
text += "{:2.1f}%, {}/{}".format(100 * tabs,
int(total * tabs), total)
if not node.is_leaf():
attribute = self.domain.attributes[node.attribute()]
if isinstance(attribute.compute_value, Indicator):
attribute = attribute.compute_value.variable
text += "<hr/>{}".format(attribute.name)
node.setHtml('<p style="line-height: 120%; margin-bottom: 0">'
'{}</p>'.
format(text))
def activate_loaded_settings(self):
if not self.tree:
return
super().activate_loaded_settings()
self.set_node_info()
self.toggle_node_color()
def toggle_node_size(self):
self.set_node_info()
self.scene.update()
self.scene_view.repaint()
def toggle_node_color(self):
palette = self.scene.colorPalette
for node in self.scene.nodes():
distr = node.get_distribution()
total = numpy.sum(distr)
if self.target_class_index:
p = distr[self.target_class_index - 1] / total
color = palette[self.target_class_index].light(200 - 100 * p)
else:
modus = node.majority()
p = distr[modus] / (total or 1)
color = palette[int(modus)].light(400 - 300 * p)
node.backgroundBrush = QBrush(color)
self.scene.update()
def toggle_target_class(self):
self.toggle_node_color()
self.set_node_info()
self.scene.update()
def ctree(self, clf=None):
self.clear()
self.closeContext()
self.classifier = clf
if clf is None:
self.info.setText('No tree.')
self.tree = None
self.root_node = None
self.dataset = None
else:
self.tree = clf.skl_model.tree_
self.domain = clf.domain
self.dataset = getattr(clf, "instances", None)
if self.dataset is not None and self.dataset.domain != self.domain:
self.clf_dataset = \
Table.from_table(self.classifier.domain, self.dataset)
else:
self.clf_dataset = self.dataset
self.target_combo.clear()
self.target_combo.addItem("None")
self.target_combo.addItems(self.domain.class_vars[0].values)
self.target_class_index = 0
self.openContext(self.domain.class_var)
self.root_node = self.walkcreate(self.tree, 0, None)
self.info.setText(
'{} nodes, {} leaves'.
format(self.tree.node_count,
numpy.count_nonzero(
self.tree.children_left == TREE_LEAF)))
self.scene.fix_pos(self.root_node, self._HSPACING, self._VSPACING)
self.activate_loaded_settings()
self.scene_view.centerOn(self.root_node.x(), self.root_node.y())
self.update_node_tooltips()
self.scene.update()
self.send("Data", None)
def walkcreate(self, tree, node_id, parent=None):
node = ClassificationTreeNode(tree, self.domain, parent, None,
self.scene, i=node_id)
if parent:
parent.graph_add_edge(
GraphicsEdge(None, self.scene, node1=parent, node2=node))
left_child_index = tree.children_left[node_id]
right_child_index = tree.children_right[node_id]
if left_child_index != TREE_LEAF:
self.walkcreate(tree, node_id=left_child_index, parent=node)
if right_child_index != TREE_LEAF:
self.walkcreate(tree, node_id=right_child_index, parent=node)
return node
def node_tooltip(self, node):
if node.i > 0:
text = " AND<br/>".join(
"%s %s %s" % (n, s, v) for n, s, v in node.rule())
else:
text = "Root"
return text
def update_selection(self):
if self.dataset is None or self.classifier is None or self.tree is None:
return
items = [item for item in self.scene.selectedItems()
if isinstance(item, ClassificationTreeNode)]
selected_leaves = [_leaf_indices(self.tree, item.node_id)
for item in items]
if selected_leaves:
selected_leaves = numpy.unique(numpy.hstack(selected_leaves))
all_leaves = _leaf_indices(self.tree, 0)
if len(selected_leaves) > 0:
ind = numpy.searchsorted(all_leaves, selected_leaves, side="left")
leaf_samples = _assign_samples(self.tree, self.clf_dataset.X)
leaf_samples = [leaf_samples[i] for i in ind]
indices = numpy.hstack(leaf_samples)
else:
indices = []
if len(indices):
data = self.dataset[indices]
else:
data = None
self.send("Data", data)
class PieChart(QGraphicsRectItem):
def __init__(self, dist, r, parent, scene):
super().__init__(parent, scene)
self.dist = dist
self.r = r
# noinspection PyPep8Naming
def setR(self, r):
self.prepareGeometryChange()
self.r = r
def boundingRect(self):
return QRectF(-self.r, -self.r, 2*self.r, 2*self.r)
def paint(self, painter, option, widget=None):
dist_sum = sum(self.dist)
start_angle = 0
colors = self.scene().colorPalette
for i in range(len(self.dist)):
angle = self.dist[i] * 16 * 360. / dist_sum
if angle == 0:
continue
painter.setBrush(QBrush(colors[i]))
painter.setPen(QPen(colors[i]))
painter.drawPie(-self.r, -self.r, 2 * self.r, 2 * self.r,
int(start_angle), int(angle))
start_angle += angle
painter.setPen(QPen(Qt.white))
painter.setBrush(QBrush())
painter.drawEllipse(-self.r, -self.r, 2 * self.r, 2 * self.r)
def _subnode_range(tree, node_id):
right = left = node_id
if tree.children_left[left] == TREE_LEAF:
assert tree.children_right[node_id] == TREE_LEAF
return node_id, node_id
else:
left = tree.children_left[left]
# run down to the right most node
while tree.children_right[right] != TREE_LEAF:
right = tree.children_right[right]
return left, right + 1
def _leaf_indices(tree, node_id):
start, stop = _subnode_range(tree, node_id)
if start == stop:
# leaf
return numpy.array([node_id], dtype=int)
else:
isleaf = tree.children_left[start: stop] == TREE_LEAF
assert numpy.flatnonzero(isleaf).size > 0
return start + numpy.flatnonzero(isleaf)
def _assign_samples(tree, X):
def assign(node_id, indices):
if tree.children_left[node_id] == TREE_LEAF:
return [indices]
else:
feature_idx = tree.feature[node_id]
thresh = tree.threshold[node_id]
column = X[indices, feature_idx]
leftmask = column <= thresh
leftind = assign(tree.children_left[node_id], indices[leftmask])
rightind = assign(tree.children_right[node_id], indices[~leftmask])
return list.__iadd__(leftind, rightind)
N, _ = X.shape
items = numpy.arange(N, dtype=int)
leaf_indices = assign(0, items)
return leaf_indices
class ClassificationTreeNode(GraphicsNode):
def __init__(self, tree, domain, parent=None, parent_item=None,
scene=None, i=0, distr=None):
super().__init__(tree, parent, parent_item, scene)
self.distribution = distr
self.tree = tree
self.domain = domain
self.i = i
self.node_id = i
self.parent = parent
self.pie = PieChart(self.get_distribution(), 8, self, scene)
fm = QFontMetrics(self.document().defaultFont())
self.attr_text_w = fm.width(str(self.attribute() if self.attribute()
else ""))
self.attr_text_h = fm.lineSpacing()
self.line_descent = fm.descent()
self._rect = None
def get_distribution(self):
"""
:return: Distribution of class values.
"""
if self.is_leaf():
counts = self.tree.value[self.node_id]
else:
leaf_ind = _leaf_indices(self.tree, self.node_id)
values = self.tree.value[leaf_ind]
counts = numpy.sum(values, axis=0)
assert counts.shape[0] == 1, "n_outputs > 1 "
counts = counts[0]
counts_sum = numpy.sum(counts)
if counts_sum > 0:
counts /= counts_sum
return counts
def num_instances(self):
"""
:return: Number of instances in a particular node.
"""
return self.tree.n_node_samples[self.i]
def split_condition(self):
"""
:return: split condition to reach a particular node.
"""
if self.i > 0:
attribute = self.domain.attributes[self.attribute()]
parent_attr = self.domain.attributes[self.parent.attribute()]
parent_attr_cv = parent_attr.compute_value
is_left_child = self.tree.children_left[self.parent.i] == self.i
if isinstance(parent_attr_cv, Indicator) and \
hasattr(parent_attr_cv.variable, "values"):
values = parent_attr_cv.variable.values
return values[abs(parent_attr_cv.value - is_left_child)] \
if len(values) == 2 \
else "≠ " * is_left_child + values[parent_attr_cv.value]
else:
thresh = self.tree.threshold[self.parent.i]
return "%s %s" % ([">", "<="][is_left_child],
attribute.str_val(thresh))
else:
return ""
def rule(self):
"""
:return:
Rule to reach node as list of tuples (attr index, sign, threshold)
"""
# TODO: this is easily extended to Classification Rules-compatible form
return self.rulew(i=self.i)
def rulew(self, i=0):
"""
:param i:
Index of current node.
:return:
Rule to reach node i, represented as list of tuples (attr name,
sign, threshold)
"""
if i > 0:
parent_attr = self.domain.attributes[self.parent.attribute()]
parent_attr_cv = parent_attr.compute_value
is_left_child = self.tree.children_left[self.parent.i] == i
pr = self.parent.rule()
if isinstance(parent_attr_cv, Indicator) and \
hasattr(parent_attr_cv.variable, "values"):
values = parent_attr_cv.variable.values
attr_name = parent_attr_cv.variable.name
sign = ["=", "≠"][is_left_child * (len(values) != 2)]
value = values[abs(parent_attr_cv.value -
is_left_child * (len(values) == 2))]
else:
attr_name = parent_attr.name
sign = [">", "<="][is_left_child]
value = "%.3f" % self.tree.threshold[self.parent.i]
pr.append((attr_name, sign, value))
return pr
else:
return []
def is_leaf(self):
"""
:return: Node is leaf
"""
return self.tree.children_left[self.node_id] < 0 and \
self.tree.children_right[self.node_id] < 0
def attribute(self):
"""
:return: Node attribute index.
"""
return self.tree.feature[self.node_id]
def majority(self):
"""
:return:
Majority class at node.
"""
return numpy.argmax(self.get_distribution())
def update_contents(self):
self.prepareGeometryChange()
self.setTextWidth(-1)
self.setTextWidth(self.document().idealWidth())
self.droplet.setPos(self.rect().center().x(), self.rect().height())
self.droplet.setVisible(bool(self.branches))
self.pie.setPos(self.rect().right(), self.rect().center().y())
fm = QFontMetrics(self.document().defaultFont())
self.attr_text_w = fm.width(str(self.attribute() if self.attribute()
else ""))
self.attr_text_h = fm.lineSpacing()
self.line_descent = fm.descent()
def rect(self):
if self._rect and self._rect.isValid():
return self._rect
else:
return QRectF(QPointF(0, 0), self.document().size()).\
adjusted(0, 0, 8, 0) | \
(getattr(self, "_rect") or QRectF(0, 0, 1, 1))
def set_rect(self, rect):
self.prepareGeometryChange()
rect = QRectF() if rect is None else rect
self._rect = rect
self.setTextWidth(-1)
self.update_contents()
self.update()
def boundingRect(self):
if hasattr(self, "attr"):
attr_rect = QRectF(QPointF(0, -self.attr_text_h),
QSizeF(self.attr_text_w, self.attr_text_h))
else:
attr_rect = QRectF(0, 0, 1, 1)
rect = self.rect().adjusted(-5, -5, 5, 5)
return rect | attr_rect
def paint(self, painter, option, widget=None):
rect = self.rect()
if self.isSelected():
option.state ^= QStyle.State_Selected
painter.setFont(self.document().defaultFont())
draw_text = str(self.split_condition())
painter.drawText(QPointF(4, -self.line_descent - 1), draw_text)
painter.save()
painter.setBrush(self.backgroundBrush)
if self.isSelected():
painter.setPen(QPen(QBrush(Qt.black), 2))
else:
painter.setPen(QPen(Qt.gray))
if self.is_leaf():
painter.drawRect(rect.adjusted(-3, 0, 0, 0))
else:
painter.drawRoundedRect(rect.adjusted(-3, 0, 0, 0), 4, 4)
painter.restore()
painter.setClipRect(rect)
return QGraphicsTextItem.paint(self, painter, option, widget)
if __name__ == "__main__":
from Orange.classification.tree import TreeLearner
a = QApplication(sys.argv)
ow = OWClassificationTreeGraph()
data = Table("iris")
clf = TreeLearner(max_depth=3)(data)
clf.instances = data
ow.ctree(clf)
ow.show()
ow.raise_()
a.exec_()
ow.saveSettings()
|
gpl-3.0
|
ningchi/scikit-learn
|
examples/cluster/plot_mini_batch_kmeans.py
|
265
|
4081
|
"""
====================================================================
Comparison of the K-Means and MiniBatchKMeans clustering algorithms
====================================================================
We want to compare the performance of the MiniBatchKMeans and KMeans:
the MiniBatchKMeans is faster, but gives slightly different results (see
:ref:`mini_batch_kmeans`).
We will cluster a set of data, first with KMeans and then with
MiniBatchKMeans, and plot the results.
We will also plot the points that are labelled differently between the two
algorithms.
"""
print(__doc__)
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import MiniBatchKMeans, KMeans
from sklearn.metrics.pairwise import pairwise_distances_argmin
from sklearn.datasets.samples_generator import make_blobs
##############################################################################
# Generate sample data
np.random.seed(0)
batch_size = 45
centers = [[1, 1], [-1, -1], [1, -1]]
n_clusters = len(centers)
X, labels_true = make_blobs(n_samples=3000, centers=centers, cluster_std=0.7)
##############################################################################
# Compute clustering with Means
k_means = KMeans(init='k-means++', n_clusters=3, n_init=10)
t0 = time.time()
k_means.fit(X)
t_batch = time.time() - t0
k_means_labels = k_means.labels_
k_means_cluster_centers = k_means.cluster_centers_
k_means_labels_unique = np.unique(k_means_labels)
##############################################################################
# Compute clustering with MiniBatchKMeans
mbk = MiniBatchKMeans(init='k-means++', n_clusters=3, batch_size=batch_size,
n_init=10, max_no_improvement=10, verbose=0)
t0 = time.time()
mbk.fit(X)
t_mini_batch = time.time() - t0
mbk_means_labels = mbk.labels_
mbk_means_cluster_centers = mbk.cluster_centers_
mbk_means_labels_unique = np.unique(mbk_means_labels)
##############################################################################
# Plot result
fig = plt.figure(figsize=(8, 3))
fig.subplots_adjust(left=0.02, right=0.98, bottom=0.05, top=0.9)
colors = ['#4EACC5', '#FF9C34', '#4E9A06']
# We want to have the same colors for the same cluster from the
# MiniBatchKMeans and the KMeans algorithm. Let's pair the cluster centers per
# closest one.
order = pairwise_distances_argmin(k_means_cluster_centers,
mbk_means_cluster_centers)
# KMeans
ax = fig.add_subplot(1, 3, 1)
for k, col in zip(range(n_clusters), colors):
my_members = k_means_labels == k
cluster_center = k_means_cluster_centers[k]
ax.plot(X[my_members, 0], X[my_members, 1], 'w',
markerfacecolor=col, marker='.')
ax.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
ax.set_title('KMeans')
ax.set_xticks(())
ax.set_yticks(())
plt.text(-3.5, 1.8, 'train time: %.2fs\ninertia: %f' % (
t_batch, k_means.inertia_))
# MiniBatchKMeans
ax = fig.add_subplot(1, 3, 2)
for k, col in zip(range(n_clusters), colors):
my_members = mbk_means_labels == order[k]
cluster_center = mbk_means_cluster_centers[order[k]]
ax.plot(X[my_members, 0], X[my_members, 1], 'w',
markerfacecolor=col, marker='.')
ax.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
ax.set_title('MiniBatchKMeans')
ax.set_xticks(())
ax.set_yticks(())
plt.text(-3.5, 1.8, 'train time: %.2fs\ninertia: %f' %
(t_mini_batch, mbk.inertia_))
# Initialise the different array to all False
different = (mbk_means_labels == 4)
ax = fig.add_subplot(1, 3, 3)
for l in range(n_clusters):
different += ((k_means_labels == k) != (mbk_means_labels == order[k]))
identic = np.logical_not(different)
ax.plot(X[identic, 0], X[identic, 1], 'w',
markerfacecolor='#bbbbbb', marker='.')
ax.plot(X[different, 0], X[different, 1], 'w',
markerfacecolor='m', marker='.')
ax.set_title('Difference')
ax.set_xticks(())
ax.set_yticks(())
plt.show()
|
bsd-3-clause
|
ningchi/scikit-learn
|
sklearn/metrics/tests/test_classification.py
|
15
|
49665
|
from __future__ import division, print_function
import numpy as np
from scipy import linalg
from functools import partial
from itertools import product
import warnings
from sklearn import datasets
from sklearn import svm
from sklearn.datasets import make_multilabel_classification
from sklearn.preprocessing import LabelBinarizer, MultiLabelBinarizer
from sklearn.utils.fixes import np_version
from sklearn.utils.validation import check_random_state
from sklearn.utils.testing import assert_raises, clean_warning_registry
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import ignore_warnings
from sklearn.metrics import accuracy_score
from sklearn.metrics import average_precision_score
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import f1_score
from sklearn.metrics import fbeta_score
from sklearn.metrics import hamming_loss
from sklearn.metrics import hinge_loss
from sklearn.metrics import jaccard_similarity_score
from sklearn.metrics import log_loss
from sklearn.metrics import matthews_corrcoef
from sklearn.metrics import precision_recall_fscore_support
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import zero_one_loss
from sklearn.metrics import brier_score_loss
from sklearn.metrics.classification import _check_targets
from sklearn.metrics.base import UndefinedMetricWarning
###############################################################################
# Utilities for testing
def make_prediction(dataset=None, binary=False):
"""Make some classification predictions on a toy dataset using a SVC
If binary is True restrict to a binary classification problem instead of a
multiclass classification problem
"""
if dataset is None:
# import some data to play with
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
if binary:
# restrict to a binary classification task
X, y = X[y < 2], y[y < 2]
n_samples, n_features = X.shape
p = np.arange(n_samples)
rng = check_random_state(37)
rng.shuffle(p)
X, y = X[p], y[p]
half = int(n_samples / 2)
# add noisy features to make the problem harder and avoid perfect results
rng = np.random.RandomState(0)
X = np.c_[X, rng.randn(n_samples, 200 * n_features)]
# run classifier, get class probabilities and label predictions
clf = svm.SVC(kernel='linear', probability=True, random_state=0)
probas_pred = clf.fit(X[:half], y[:half]).predict_proba(X[half:])
if binary:
# only interested in probabilities of the positive case
# XXX: do we really want a special API for the binary case?
probas_pred = probas_pred[:, 1]
y_pred = clf.predict(X[half:])
y_true = y[half:]
return y_true, y_pred, probas_pred
###############################################################################
# Tests
def test_multilabel_accuracy_score_subset_accuracy():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
assert_equal(accuracy_score(y1, y2), 0.5)
assert_equal(accuracy_score(y1, y1), 1)
assert_equal(accuracy_score(y2, y2), 1)
assert_equal(accuracy_score(y2, np.logical_not(y2)), 0)
assert_equal(accuracy_score(y1, np.logical_not(y1)), 0)
assert_equal(accuracy_score(y1, np.zeros(y1.shape)), 0)
assert_equal(accuracy_score(y2, np.zeros(y1.shape)), 0)
with ignore_warnings(): # sequence of sequences is deprecated
# List of tuple of label
y1 = [(1, 2,), (0, 2,)]
y2 = [(2,), (0, 2,)]
assert_equal(accuracy_score(y1, y2), 0.5)
assert_equal(accuracy_score(y1, y1), 1)
assert_equal(accuracy_score(y2, y2), 1)
assert_equal(accuracy_score(y2, [(), ()]), 0)
assert_equal(accuracy_score(y1, y2, normalize=False), 1)
assert_equal(accuracy_score(y1, y1, normalize=False), 2)
assert_equal(accuracy_score(y2, y2, normalize=False), 2)
assert_equal(accuracy_score(y2, [(), ()], normalize=False), 0)
def test_precision_recall_f1_score_binary():
# Test Precision Recall and F1 Score for binary classification task
y_true, y_pred, _ = make_prediction(binary=True)
# detailed measures for each class
p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None)
assert_array_almost_equal(p, [0.73, 0.85], 2)
assert_array_almost_equal(r, [0.88, 0.68], 2)
assert_array_almost_equal(f, [0.80, 0.76], 2)
assert_array_equal(s, [25, 25])
# individual scoring function that can be used for grid search: in the
# binary class case the score is the value of the measure for the positive
# class (e.g. label == 1). This is deprecated for average != 'binary'.
assert_dep_warning = partial(assert_warns, DeprecationWarning)
for kwargs, my_assert in [({}, assert_no_warnings),
({'average': 'binary'}, assert_no_warnings),
({'average': 'micro'}, assert_dep_warning)]:
ps = my_assert(precision_score, y_true, y_pred, **kwargs)
assert_array_almost_equal(ps, 0.85, 2)
rs = my_assert(recall_score, y_true, y_pred, **kwargs)
assert_array_almost_equal(rs, 0.68, 2)
fs = my_assert(f1_score, y_true, y_pred, **kwargs)
assert_array_almost_equal(fs, 0.76, 2)
assert_almost_equal(my_assert(fbeta_score, y_true, y_pred, beta=2,
**kwargs),
(1 + 2 ** 2) * ps * rs / (2 ** 2 * ps + rs), 2)
@ignore_warnings
def test_precision_recall_f_binary_single_class():
# Test precision, recall and F1 score behave with a single positive or
# negative class
# Such a case may occur with non-stratified cross-validation
assert_equal(1., precision_score([1, 1], [1, 1]))
assert_equal(1., recall_score([1, 1], [1, 1]))
assert_equal(1., f1_score([1, 1], [1, 1]))
assert_equal(0., precision_score([-1, -1], [-1, -1]))
assert_equal(0., recall_score([-1, -1], [-1, -1]))
assert_equal(0., f1_score([-1, -1], [-1, -1]))
def test_average_precision_score_score_non_binary_class():
# Test that average_precision_score function returns an error when trying
# to compute average_precision_score for multiclass task.
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
average_precision_score, y_true, y_pred)
def test_average_precision_score_duplicate_values():
# Duplicate values with precision-recall require a different
# processing than when computing the AUC of a ROC, because the
# precision-recall curve is a decreasing curve
# The following situtation corresponds to a perfect
# test statistic, the average_precision_score should be 1
y_true = [0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1]
y_score = [0, .1, .1, .4, .5, .6, .6, .9, .9, 1, 1]
assert_equal(average_precision_score(y_true, y_score), 1)
def test_average_precision_score_tied_values():
# Here if we go from left to right in y_true, the 0 values are
# are separated from the 1 values, so it appears that we've
# Correctly sorted our classifications. But in fact the first two
# values have the same score (0.5) and so the first two values
# could be swapped around, creating an imperfect sorting. This
# imperfection should come through in the end score, making it less
# than one.
y_true = [0, 1, 1]
y_score = [.5, .5, .6]
assert_not_equal(average_precision_score(y_true, y_score), 1.)
@ignore_warnings
def test_precision_recall_fscore_support_errors():
y_true, y_pred, _ = make_prediction(binary=True)
# Bad beta
assert_raises(ValueError, precision_recall_fscore_support,
y_true, y_pred, beta=0.0)
# Bad pos_label
assert_raises(ValueError, precision_recall_fscore_support,
y_true, y_pred, pos_label=2, average='macro')
# Bad average option
assert_raises(ValueError, precision_recall_fscore_support,
[0, 1, 2], [1, 2, 0], average='mega')
def test_confusion_matrix_binary():
# Test confusion matrix - binary classification case
y_true, y_pred, _ = make_prediction(binary=True)
def test(y_true, y_pred):
cm = confusion_matrix(y_true, y_pred)
assert_array_equal(cm, [[22, 3], [8, 17]])
tp, fp, fn, tn = cm.flatten()
num = (tp * tn - fp * fn)
den = np.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))
true_mcc = 0 if den == 0 else num / den
mcc = matthews_corrcoef(y_true, y_pred)
assert_array_almost_equal(mcc, true_mcc, decimal=2)
assert_array_almost_equal(mcc, 0.57, decimal=2)
test(y_true, y_pred)
test([str(y) for y in y_true],
[str(y) for y in y_pred])
@ignore_warnings
def test_matthews_corrcoef_nan():
assert_equal(matthews_corrcoef([0], [1]), 0.0)
assert_equal(matthews_corrcoef([0, 0], [0, 1]), 0.0)
def test_precision_recall_f1_score_multiclass():
# Test Precision Recall and F1 Score for multiclass classification task
y_true, y_pred, _ = make_prediction(binary=False)
# compute scores with default labels introspection
p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None)
assert_array_almost_equal(p, [0.83, 0.33, 0.42], 2)
assert_array_almost_equal(r, [0.79, 0.09, 0.90], 2)
assert_array_almost_equal(f, [0.81, 0.15, 0.57], 2)
assert_array_equal(s, [24, 31, 20])
# averaging tests
ps = precision_score(y_true, y_pred, pos_label=1, average='micro')
assert_array_almost_equal(ps, 0.53, 2)
rs = recall_score(y_true, y_pred, average='micro')
assert_array_almost_equal(rs, 0.53, 2)
fs = f1_score(y_true, y_pred, average='micro')
assert_array_almost_equal(fs, 0.53, 2)
ps = precision_score(y_true, y_pred, average='macro')
assert_array_almost_equal(ps, 0.53, 2)
rs = recall_score(y_true, y_pred, average='macro')
assert_array_almost_equal(rs, 0.60, 2)
fs = f1_score(y_true, y_pred, average='macro')
assert_array_almost_equal(fs, 0.51, 2)
ps = precision_score(y_true, y_pred, average='weighted')
assert_array_almost_equal(ps, 0.51, 2)
rs = recall_score(y_true, y_pred, average='weighted')
assert_array_almost_equal(rs, 0.53, 2)
fs = f1_score(y_true, y_pred, average='weighted')
assert_array_almost_equal(fs, 0.47, 2)
assert_raises(ValueError, precision_score, y_true, y_pred,
average="samples")
assert_raises(ValueError, recall_score, y_true, y_pred, average="samples")
assert_raises(ValueError, f1_score, y_true, y_pred, average="samples")
assert_raises(ValueError, fbeta_score, y_true, y_pred, average="samples",
beta=0.5)
# same prediction but with and explicit label ordering
p, r, f, s = precision_recall_fscore_support(
y_true, y_pred, labels=[0, 2, 1], average=None)
assert_array_almost_equal(p, [0.83, 0.41, 0.33], 2)
assert_array_almost_equal(r, [0.79, 0.90, 0.10], 2)
assert_array_almost_equal(f, [0.81, 0.57, 0.15], 2)
assert_array_equal(s, [24, 20, 31])
def test_precision_refcall_f1_score_multilabel_unordered_labels():
# test that labels need not be sorted in the multilabel case
y_true = np.array([[1, 1, 0, 0]])
y_pred = np.array([[0, 0, 1, 1]])
for average in ['samples', 'micro', 'macro', 'weighted', None]:
p, r, f, s = precision_recall_fscore_support(
y_true, y_pred, labels=[4, 1, 2, 3], warn_for=[], average=average)
assert_array_equal(p, 0)
assert_array_equal(r, 0)
assert_array_equal(f, 0)
if average is None:
assert_array_equal(s, [0, 1, 1, 0])
def test_precision_recall_f1_score_multiclass_pos_label_none():
# Test Precision Recall and F1 Score for multiclass classification task
# GH Issue #1296
# initialize data
y_true = np.array([0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1])
y_pred = np.array([1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1])
# compute scores with default labels introspection
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
pos_label=None,
average='weighted')
def test_zero_precision_recall():
# Check that pathological cases do not bring NaNs
old_error_settings = np.seterr(all='raise')
try:
y_true = np.array([0, 1, 2, 0, 1, 2])
y_pred = np.array([2, 0, 1, 1, 2, 0])
assert_almost_equal(precision_score(y_true, y_pred,
average='weighted'), 0.0, 2)
assert_almost_equal(recall_score(y_true, y_pred, average='weighted'),
0.0, 2)
assert_almost_equal(f1_score(y_true, y_pred, average='weighted'),
0.0, 2)
finally:
np.seterr(**old_error_settings)
def test_confusion_matrix_multiclass():
# Test confusion matrix - multi-class case
y_true, y_pred, _ = make_prediction(binary=False)
def test(y_true, y_pred, string_type=False):
# compute confusion matrix with default labels introspection
cm = confusion_matrix(y_true, y_pred)
assert_array_equal(cm, [[19, 4, 1],
[4, 3, 24],
[0, 2, 18]])
# compute confusion matrix with explicit label ordering
labels = ['0', '2', '1'] if string_type else [0, 2, 1]
cm = confusion_matrix(y_true,
y_pred,
labels=labels)
assert_array_equal(cm, [[19, 1, 4],
[0, 18, 2],
[4, 24, 3]])
test(y_true, y_pred)
test(list(str(y) for y in y_true),
list(str(y) for y in y_pred),
string_type=True)
def test_confusion_matrix_multiclass_subset_labels():
# Test confusion matrix - multi-class case with subset of labels
y_true, y_pred, _ = make_prediction(binary=False)
# compute confusion matrix with only first two labels considered
cm = confusion_matrix(y_true, y_pred, labels=[0, 1])
assert_array_equal(cm, [[19, 4],
[4, 3]])
# compute confusion matrix with explicit label ordering for only subset
# of labels
cm = confusion_matrix(y_true, y_pred, labels=[2, 1])
assert_array_equal(cm, [[18, 2],
[24, 3]])
def test_classification_report_multiclass():
# Test performance report
iris = datasets.load_iris()
y_true, y_pred, _ = make_prediction(dataset=iris, binary=False)
# print classification report with class names
expected_report = """\
precision recall f1-score support
setosa 0.83 0.79 0.81 24
versicolor 0.33 0.10 0.15 31
virginica 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(
y_true, y_pred, labels=np.arange(len(iris.target_names)),
target_names=iris.target_names)
assert_equal(report, expected_report)
# print classification report with label detection
expected_report = """\
precision recall f1-score support
0 0.83 0.79 0.81 24
1 0.33 0.10 0.15 31
2 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_classification_report_multiclass_with_digits():
# Test performance report with added digits in floating point values
iris = datasets.load_iris()
y_true, y_pred, _ = make_prediction(dataset=iris, binary=False)
# print classification report with class names
expected_report = """\
precision recall f1-score support
setosa 0.82609 0.79167 0.80851 24
versicolor 0.33333 0.09677 0.15000 31
virginica 0.41860 0.90000 0.57143 20
avg / total 0.51375 0.53333 0.47310 75
"""
report = classification_report(
y_true, y_pred, labels=np.arange(len(iris.target_names)),
target_names=iris.target_names, digits=5)
assert_equal(report, expected_report)
# print classification report with label detection
expected_report = """\
precision recall f1-score support
0 0.83 0.79 0.81 24
1 0.33 0.10 0.15 31
2 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_classification_report_multiclass_with_string_label():
y_true, y_pred, _ = make_prediction(binary=False)
y_true = np.array(["blue", "green", "red"])[y_true]
y_pred = np.array(["blue", "green", "red"])[y_pred]
expected_report = """\
precision recall f1-score support
blue 0.83 0.79 0.81 24
green 0.33 0.10 0.15 31
red 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
expected_report = """\
precision recall f1-score support
a 0.83 0.79 0.81 24
b 0.33 0.10 0.15 31
c 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred,
target_names=["a", "b", "c"])
assert_equal(report, expected_report)
def test_classification_report_multiclass_with_unicode_label():
y_true, y_pred, _ = make_prediction(binary=False)
labels = np.array([u"blue\xa2", u"green\xa2", u"red\xa2"])
y_true = labels[y_true]
y_pred = labels[y_pred]
expected_report = u"""\
precision recall f1-score support
blue\xa2 0.83 0.79 0.81 24
green\xa2 0.33 0.10 0.15 31
red\xa2 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
if np_version[:3] < (1, 7, 0):
expected_message = ("NumPy < 1.7.0 does not implement"
" searchsorted on unicode data correctly.")
assert_raise_message(RuntimeError, expected_message,
classification_report, y_true, y_pred)
else:
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
@ignore_warnings # sequence of sequences is deprecated
def test_multilabel_classification_report():
n_classes = 4
n_samples = 50
make_ml = make_multilabel_classification
_, y_true_ll = make_ml(n_features=1, n_classes=n_classes, random_state=0,
n_samples=n_samples)
_, y_pred_ll = make_ml(n_features=1, n_classes=n_classes, random_state=1,
n_samples=n_samples)
expected_report = """\
precision recall f1-score support
0 0.50 0.67 0.57 24
1 0.51 0.74 0.61 27
2 0.29 0.08 0.12 26
3 0.52 0.56 0.54 27
avg / total 0.45 0.51 0.46 104
"""
lb = MultiLabelBinarizer()
lb.fit([range(4)])
y_true_bi = lb.transform(y_true_ll)
y_pred_bi = lb.transform(y_pred_ll)
for y_true, y_pred in [(y_true_ll, y_pred_ll), (y_true_bi, y_pred_bi)]:
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_multilabel_zero_one_loss_subset():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
assert_equal(zero_one_loss(y1, y2), 0.5)
assert_equal(zero_one_loss(y1, y1), 0)
assert_equal(zero_one_loss(y2, y2), 0)
assert_equal(zero_one_loss(y2, np.logical_not(y2)), 1)
assert_equal(zero_one_loss(y1, np.logical_not(y1)), 1)
assert_equal(zero_one_loss(y1, np.zeros(y1.shape)), 1)
assert_equal(zero_one_loss(y2, np.zeros(y1.shape)), 1)
with ignore_warnings(): # sequence of sequences is deprecated
# List of tuple of label
y1 = [(1, 2,), (0, 2,)]
y2 = [(2,), (0, 2,)]
assert_equal(zero_one_loss(y1, y2), 0.5)
assert_equal(zero_one_loss(y1, y1), 0)
assert_equal(zero_one_loss(y2, y2), 0)
assert_equal(zero_one_loss(y2, [(), ()]), 1)
assert_equal(zero_one_loss(y2, [tuple(), (10, )]), 1)
def test_multilabel_hamming_loss():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
assert_equal(hamming_loss(y1, y2), 1 / 6)
assert_equal(hamming_loss(y1, y1), 0)
assert_equal(hamming_loss(y2, y2), 0)
assert_equal(hamming_loss(y2, np.logical_not(y2)), 1)
assert_equal(hamming_loss(y1, np.logical_not(y1)), 1)
assert_equal(hamming_loss(y1, np.zeros(y1.shape)), 4 / 6)
assert_equal(hamming_loss(y2, np.zeros(y1.shape)), 0.5)
with ignore_warnings(): # sequence of sequences is deprecated
# List of tuple of label
y1 = [(1, 2,), (0, 2,)]
y2 = [(2,), (0, 2,)]
assert_equal(hamming_loss(y1, y2), 1 / 6)
assert_equal(hamming_loss(y1, y1), 0)
assert_equal(hamming_loss(y2, y2), 0)
assert_equal(hamming_loss(y2, [(), ()]), 0.75)
assert_equal(hamming_loss(y1, [tuple(), (10, )]), 0.625)
assert_almost_equal(hamming_loss(y2, [tuple(), (10, )],
classes=np.arange(11)), 0.1818, 2)
def test_multilabel_jaccard_similarity_score():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
# size(y1 \inter y2) = [1, 2]
# size(y1 \union y2) = [2, 2]
assert_equal(jaccard_similarity_score(y1, y2), 0.75)
assert_equal(jaccard_similarity_score(y1, y1), 1)
assert_equal(jaccard_similarity_score(y2, y2), 1)
assert_equal(jaccard_similarity_score(y2, np.logical_not(y2)), 0)
assert_equal(jaccard_similarity_score(y1, np.logical_not(y1)), 0)
assert_equal(jaccard_similarity_score(y1, np.zeros(y1.shape)), 0)
assert_equal(jaccard_similarity_score(y2, np.zeros(y1.shape)), 0)
with ignore_warnings(): # sequence of sequences is deprecated
# List of tuple of label
y1 = [(1, 2,), (0, 2,)]
y2 = [(2,), (0, 2,)]
assert_equal(jaccard_similarity_score(y1, y2), 0.75)
assert_equal(jaccard_similarity_score(y1, y1), 1)
assert_equal(jaccard_similarity_score(y2, y2), 1)
assert_equal(jaccard_similarity_score(y2, [(), ()]), 0)
# |y3 inter y4 | = [0, 1, 1]
# |y3 union y4 | = [2, 1, 3]
y3 = [(0,), (1,), (3,)]
y4 = [(4,), (4,), (5, 6)]
assert_almost_equal(jaccard_similarity_score(y3, y4), 0)
# |y5 inter y6 | = [0, 1, 1]
# |y5 union y6 | = [2, 1, 3]
y5 = [(0,), (1,), (2, 3)]
y6 = [(1,), (1,), (2, 0)]
assert_almost_equal(jaccard_similarity_score(y5, y6), (1 + 1 / 3) / 3)
@ignore_warnings
def test_precision_recall_f1_score_multilabel_1():
# Test precision_recall_f1_score on a crafted multilabel example
# First crafted example
y_true_ll = [(0,), (1,), (2, 3)]
y_pred_ll = [(1,), (1,), (2, 0)]
lb = LabelBinarizer()
lb.fit([range(4)])
y_true_bi = lb.transform(y_true_ll)
y_pred_bi = lb.transform(y_pred_ll)
for y_true, y_pred in [(y_true_ll, y_pred_ll), (y_true_bi, y_pred_bi)]:
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average=None)
#tp = [0, 1, 1, 0]
#fn = [1, 0, 0, 1]
#fp = [1, 1, 0, 0]
# Check per class
assert_array_almost_equal(p, [0.0, 0.5, 1.0, 0.0], 2)
assert_array_almost_equal(r, [0.0, 1.0, 1.0, 0.0], 2)
assert_array_almost_equal(f, [0.0, 1 / 1.5, 1, 0.0], 2)
assert_array_almost_equal(s, [1, 1, 1, 1], 2)
f2 = fbeta_score(y_true, y_pred, beta=2, average=None)
support = s
assert_array_almost_equal(f2, [0, 0.83, 1, 0], 2)
# Check macro
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="macro")
assert_almost_equal(p, 1.5 / 4)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 2.5 / 1.5 * 0.25)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="macro"),
np.mean(f2))
# Check micro
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="micro")
assert_almost_equal(p, 0.5)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 0.5)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="micro"),
(1 + 4) * p * r / (4 * p + r))
# Check weigted
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="weighted")
assert_almost_equal(p, 1.5 / 4)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 2.5 / 1.5 * 0.25)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="weighted"),
np.average(f2, weights=support))
# Check weigted
# |h(x_i) inter y_i | = [0, 1, 1]
# |y_i| = [1, 1, 2]
# |h(x_i)| = [1, 1, 2]
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="samples")
assert_almost_equal(p, 0.5)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 0.5)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="samples"),
0.5)
@ignore_warnings
def test_precision_recall_f1_score_multilabel_2():
# Test precision_recall_f1_score on a crafted multilabel example 2
# Second crafted example
y_true_ll = [(1,), (2,), (2, 3)]
y_pred_ll = [(4,), (4,), (2, 1)]
lb = LabelBinarizer()
lb.fit([range(1, 5)])
y_true_bi = lb.transform(y_true_ll)
y_pred_bi = lb.transform(y_pred_ll)
for y_true, y_pred in [(y_true_ll, y_pred_ll), (y_true_bi, y_pred_bi)]:
# tp = [ 0. 1. 0. 0.]
# fp = [ 1. 0. 0. 2.]
# fn = [ 1. 1. 1. 0.]
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average=None)
assert_array_almost_equal(p, [0.0, 1.0, 0.0, 0.0], 2)
assert_array_almost_equal(r, [0.0, 0.5, 0.0, 0.0], 2)
assert_array_almost_equal(f, [0.0, 0.66, 0.0, 0.0], 2)
assert_array_almost_equal(s, [1, 2, 1, 0], 2)
f2 = fbeta_score(y_true, y_pred, beta=2, average=None)
support = s
assert_array_almost_equal(f2, [0, 0.55, 0, 0], 2)
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="micro")
assert_almost_equal(p, 0.25)
assert_almost_equal(r, 0.25)
assert_almost_equal(f, 2 * 0.25 * 0.25 / 0.5)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="micro"),
(1 + 4) * p * r / (4 * p + r))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="macro")
assert_almost_equal(p, 0.25)
assert_almost_equal(r, 0.125)
assert_almost_equal(f, 2 / 12)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="macro"),
np.mean(f2))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="weighted")
assert_almost_equal(p, 2 / 4)
assert_almost_equal(r, 1 / 4)
assert_almost_equal(f, 2 / 3 * 2 / 4)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="weighted"),
np.average(f2, weights=support))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="samples")
# Check weigted
# |h(x_i) inter y_i | = [0, 0, 1]
# |y_i| = [1, 1, 2]
# |h(x_i)| = [1, 1, 2]
assert_almost_equal(p, 1 / 6)
assert_almost_equal(r, 1 / 6)
assert_almost_equal(f, 2 / 4 * 1 / 3)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="samples"),
0.1666, 2)
@ignore_warnings
def test_precision_recall_f1_score_with_an_empty_prediction():
y_true_ll = [(1,), (0,), (2, 1,)]
y_pred_ll = [tuple(), (3,), (2, 1)]
lb = LabelBinarizer()
lb.fit([range(4)])
y_true_bi = lb.transform(y_true_ll)
y_pred_bi = lb.transform(y_pred_ll)
for y_true, y_pred in [(y_true_ll, y_pred_ll), (y_true_bi, y_pred_bi)]:
# true_pos = [ 0. 1. 1. 0.]
# false_pos = [ 0. 0. 0. 1.]
# false_neg = [ 1. 1. 0. 0.]
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average=None)
assert_array_almost_equal(p, [0.0, 1.0, 1.0, 0.0], 2)
assert_array_almost_equal(r, [0.0, 0.5, 1.0, 0.0], 2)
assert_array_almost_equal(f, [0.0, 1 / 1.5, 1, 0.0], 2)
assert_array_almost_equal(s, [1, 2, 1, 0], 2)
f2 = fbeta_score(y_true, y_pred, beta=2, average=None)
support = s
assert_array_almost_equal(f2, [0, 0.55, 1, 0], 2)
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="macro")
assert_almost_equal(p, 0.5)
assert_almost_equal(r, 1.5 / 4)
assert_almost_equal(f, 2.5 / (4 * 1.5))
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="macro"),
np.mean(f2))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="micro")
assert_almost_equal(p, 2 / 3)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 2 / 3 / (2 / 3 + 0.5))
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="micro"),
(1 + 4) * p * r / (4 * p + r))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="weighted")
assert_almost_equal(p, 3 / 4)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, (2 / 1.5 + 1) / 4)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="weighted"),
np.average(f2, weights=support))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="samples")
# |h(x_i) inter y_i | = [0, 0, 2]
# |y_i| = [1, 1, 2]
# |h(x_i)| = [0, 1, 2]
assert_almost_equal(p, 1 / 3)
assert_almost_equal(r, 1 / 3)
assert_almost_equal(f, 1 / 3)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="samples"),
0.333, 2)
def test_precision_recall_f1_no_labels():
y_true = np.zeros((20, 3))
y_pred = np.zeros_like(y_true)
# tp = [0, 0, 0]
# fn = [0, 0, 0]
# fp = [0, 0, 0]
# support = [0, 0, 0]
# |y_hat_i inter y_i | = [0, 0, 0]
# |y_i| = [0, 0, 0]
# |y_hat_i| = [0, 0, 0]
for beta in [1]:
p, r, f, s = assert_warns(UndefinedMetricWarning,
precision_recall_fscore_support,
y_true, y_pred, average=None, beta=beta)
assert_array_almost_equal(p, [0, 0, 0], 2)
assert_array_almost_equal(r, [0, 0, 0], 2)
assert_array_almost_equal(f, [0, 0, 0], 2)
assert_array_almost_equal(s, [0, 0, 0], 2)
fbeta = assert_warns(UndefinedMetricWarning, fbeta_score,
y_true, y_pred, beta=beta, average=None)
assert_array_almost_equal(fbeta, [0, 0, 0], 2)
for average in ["macro", "micro", "weighted", "samples"]:
p, r, f, s = assert_warns(UndefinedMetricWarning,
precision_recall_fscore_support,
y_true, y_pred, average=average,
beta=beta)
assert_almost_equal(p, 0)
assert_almost_equal(r, 0)
assert_almost_equal(f, 0)
assert_equal(s, None)
fbeta = assert_warns(UndefinedMetricWarning, fbeta_score,
y_true, y_pred,
beta=beta, average=average)
assert_almost_equal(fbeta, 0)
def test_prf_warnings():
# average of per-label scores
f, w = precision_recall_fscore_support, UndefinedMetricWarning
my_assert = assert_warns_message
for average in [None, 'weighted', 'macro']:
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 in labels with no predicted samples.')
my_assert(w, msg, f, [0, 1, 2], [1, 1, 2], average=average)
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 in labels with no true samples.')
my_assert(w, msg, f, [1, 1, 2], [0, 1, 2], average=average)
# average of per-sample scores
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 in samples with no predicted labels.')
my_assert(w, msg, f, np.array([[1, 0], [1, 0]]),
np.array([[1, 0], [0, 0]]), average='samples')
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 in samples with no true labels.')
my_assert(w, msg, f, np.array([[1, 0], [0, 0]]),
np.array([[1, 0], [1, 0]]),
average='samples')
# single score: micro-average
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 due to no predicted samples.')
my_assert(w, msg, f, np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]), average='micro')
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 due to no true samples.')
my_assert(w, msg, f, np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]), average='micro')
# single postive label
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 due to no predicted samples.')
my_assert(w, msg, f, [1, 1], [-1, -1], average='macro')
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 due to no true samples.')
my_assert(w, msg, f, [-1, -1], [1, 1], average='macro')
def test_recall_warnings():
assert_no_warnings(recall_score,
np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]),
average='micro')
clean_warning_registry()
with warnings.catch_warnings(record=True) as record:
warnings.simplefilter('always')
recall_score(np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]),
average='micro')
assert_equal(str(record.pop().message),
'Recall is ill-defined and '
'being set to 0.0 due to no true samples.')
def test_precision_warnings():
clean_warning_registry()
with warnings.catch_warnings(record=True) as record:
warnings.simplefilter('always')
precision_score(np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]),
average='micro')
assert_equal(str(record.pop().message),
'Precision is ill-defined and '
'being set to 0.0 due to no predicted samples.')
assert_no_warnings(precision_score,
np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]),
average='micro')
def test_fscore_warnings():
clean_warning_registry()
with warnings.catch_warnings(record=True) as record:
warnings.simplefilter('always')
for score in [f1_score, partial(fbeta_score, beta=2)]:
score(np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]),
average='micro')
assert_equal(str(record.pop().message),
'F-score is ill-defined and '
'being set to 0.0 due to no predicted samples.')
score(np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]),
average='micro')
assert_equal(str(record.pop().message),
'F-score is ill-defined and '
'being set to 0.0 due to no true samples.')
def test_prf_average_compat():
# Ensure warning if f1_score et al.'s average is implicit for multiclass
y_true = [1, 2, 3, 3]
y_pred = [1, 2, 3, 1]
y_true_bin = [0, 1, 1]
y_pred_bin = [0, 1, 0]
for metric in [precision_score, recall_score, f1_score,
partial(fbeta_score, beta=2)]:
score = assert_warns(DeprecationWarning, metric, y_true, y_pred)
score_weighted = assert_no_warnings(metric, y_true, y_pred,
average='weighted')
assert_equal(score, score_weighted,
'average does not act like "weighted" by default')
# check binary passes without warning
assert_no_warnings(metric, y_true_bin, y_pred_bin)
# but binary with pos_label=None should behave like multiclass
score = assert_warns(DeprecationWarning, metric,
y_true_bin, y_pred_bin, pos_label=None)
score_weighted = assert_no_warnings(metric, y_true_bin, y_pred_bin,
pos_label=None, average='weighted')
assert_equal(score, score_weighted,
'average does not act like "weighted" by default with '
'binary data and pos_label=None')
@ignore_warnings # sequence of sequences is deprecated
def test__check_targets():
# Check that _check_targets correctly merges target types, squeezes
# output and fails if input lengths differ.
IND = 'multilabel-indicator'
SEQ = 'multilabel-sequences'
MC = 'multiclass'
BIN = 'binary'
CNT = 'continuous'
MMC = 'multiclass-multioutput'
MCN = 'continuous-multioutput'
# all of length 3
EXAMPLES = [
(IND, np.array([[0, 1, 1], [1, 0, 0], [0, 0, 1]])),
# must not be considered binary
(IND, np.array([[0, 1], [1, 0], [1, 1]])),
(SEQ, [[2, 3], [1], [3]]),
(MC, [2, 3, 1]),
(BIN, [0, 1, 1]),
(CNT, [0., 1.5, 1.]),
(MC, np.array([[2], [3], [1]])),
(BIN, np.array([[0], [1], [1]])),
(CNT, np.array([[0.], [1.5], [1.]])),
(MMC, np.array([[0, 2], [1, 3], [2, 3]])),
(MCN, np.array([[0.5, 2.], [1.1, 3.], [2., 3.]])),
]
# expected type given input types, or None for error
# (types will be tried in either order)
EXPECTED = {
(IND, IND): IND,
(SEQ, SEQ): IND,
(MC, MC): MC,
(BIN, BIN): BIN,
(IND, SEQ): None,
(MC, SEQ): None,
(BIN, SEQ): None,
(MC, IND): None,
(BIN, IND): None,
(BIN, MC): MC,
# Disallowed types
(CNT, CNT): None,
(MMC, MMC): None,
(MCN, MCN): None,
(IND, CNT): None,
(SEQ, CNT): None,
(MC, CNT): None,
(BIN, CNT): None,
(MMC, CNT): None,
(MCN, CNT): None,
(IND, MMC): None,
(SEQ, MMC): None,
(MC, MMC): None,
(BIN, MMC): None,
(MCN, MMC): None,
(IND, MCN): None,
(SEQ, MCN): None,
(MC, MCN): None,
(BIN, MCN): None,
}
for (type1, y1), (type2, y2) in product(EXAMPLES, repeat=2):
try:
expected = EXPECTED[type1, type2]
except KeyError:
expected = EXPECTED[type2, type1]
if expected is None:
assert_raises(ValueError, _check_targets, y1, y2)
if type1 != type2:
assert_raise_message(
ValueError,
"Can't handle mix of {0} and {1}".format(type1, type2),
_check_targets, y1, y2)
else:
if type1 not in (BIN, MC, SEQ, IND):
assert_raise_message(ValueError,
"{0} is not supported".format(type1),
_check_targets, y1, y2)
else:
merged_type, y1out, y2out = _check_targets(y1, y2)
assert_equal(merged_type, expected)
if merged_type.startswith('multilabel'):
assert_equal(y1out.format, 'csr')
assert_equal(y2out.format, 'csr')
else:
assert_array_equal(y1out, np.squeeze(y1))
assert_array_equal(y2out, np.squeeze(y2))
assert_raises(ValueError, _check_targets, y1[:-1], y2)
def test_hinge_loss_binary():
y_true = np.array([-1, 1, 1, -1])
pred_decision = np.array([-8.5, 0.5, 1.5, -0.3])
assert_equal(hinge_loss(y_true, pred_decision), 1.2 / 4)
y_true = np.array([0, 2, 2, 0])
pred_decision = np.array([-8.5, 0.5, 1.5, -0.3])
assert_equal(hinge_loss(y_true, pred_decision), 1.2 / 4)
def test_hinge_loss_multiclass():
pred_decision = np.array([
[0.36, -0.17, -0.58, -0.99],
[-0.54, -0.37, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17],
[-0.54, -0.38, -0.48, -0.58],
[-2.36, -0.79, -0.27, 0.24],
[-1.45, -0.58, -0.38, -0.17]
])
y_true = np.array([0, 1, 2, 1, 3, 2])
dummy_losses = np.array([
1 - pred_decision[0][0] + pred_decision[0][1],
1 - pred_decision[1][1] + pred_decision[1][2],
1 - pred_decision[2][2] + pred_decision[2][3],
1 - pred_decision[3][1] + pred_decision[3][2],
1 - pred_decision[4][3] + pred_decision[4][2],
1 - pred_decision[5][2] + pred_decision[5][3]
])
dummy_losses[dummy_losses <= 0] = 0
dummy_hinge_loss = np.mean(dummy_losses)
assert_equal(hinge_loss(y_true, pred_decision),
dummy_hinge_loss)
def test_hinge_loss_multiclass_missing_labels_with_labels_none():
y_true = np.array([0, 1, 2, 2])
pred_decision = np.array([
[1.27, 0.034, -0.68, -1.40],
[-1.45, -0.58, -0.38, -0.17],
[-2.36, -0.79, -0.27, 0.24],
[-2.36, -0.79, -0.27, 0.24]
])
error_message = ("Please include all labels in y_true "
"or pass labels as third argument")
assert_raise_message(ValueError,
error_message,
hinge_loss, y_true, pred_decision)
def test_hinge_loss_multiclass_with_missing_labels():
pred_decision = np.array([
[0.36, -0.17, -0.58, -0.99],
[-0.55, -0.38, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17],
[-0.55, -0.38, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17]
])
y_true = np.array([0, 1, 2, 1, 2])
labels = np.array([0, 1, 2, 3])
dummy_losses = np.array([
1 - pred_decision[0][0] + pred_decision[0][1],
1 - pred_decision[1][1] + pred_decision[1][2],
1 - pred_decision[2][2] + pred_decision[2][3],
1 - pred_decision[3][1] + pred_decision[3][2],
1 - pred_decision[4][2] + pred_decision[4][3]
])
dummy_losses[dummy_losses <= 0] = 0
dummy_hinge_loss = np.mean(dummy_losses)
assert_equal(hinge_loss(y_true, pred_decision, labels=labels),
dummy_hinge_loss)
def test_hinge_loss_multiclass_invariance_lists():
# Currently, invariance of string and integer labels cannot be tested
# in common invariance tests because invariance tests for multiclass
# decision functions is not implemented yet.
y_true = ['blue', 'green', 'red',
'green', 'white', 'red']
pred_decision = [
[0.36, -0.17, -0.58, -0.99],
[-0.55, -0.38, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17],
[-0.55, -0.38, -0.48, -0.58],
[-2.36, -0.79, -0.27, 0.24],
[-1.45, -0.58, -0.38, -0.17]]
dummy_losses = np.array([
1 - pred_decision[0][0] + pred_decision[0][1],
1 - pred_decision[1][1] + pred_decision[1][2],
1 - pred_decision[2][2] + pred_decision[2][3],
1 - pred_decision[3][1] + pred_decision[3][2],
1 - pred_decision[4][3] + pred_decision[4][2],
1 - pred_decision[5][2] + pred_decision[5][3]
])
dummy_losses[dummy_losses <= 0] = 0
dummy_hinge_loss = np.mean(dummy_losses)
assert_equal(hinge_loss(y_true, pred_decision),
dummy_hinge_loss)
def test_log_loss():
# binary case with symbolic labels ("no" < "yes")
y_true = ["no", "no", "no", "yes", "yes", "yes"]
y_pred = np.array([[0.5, 0.5], [0.1, 0.9], [0.01, 0.99],
[0.9, 0.1], [0.75, 0.25], [0.001, 0.999]])
loss = log_loss(y_true, y_pred)
assert_almost_equal(loss, 1.8817971)
# multiclass case; adapted from http://bit.ly/RJJHWA
y_true = [1, 0, 2]
y_pred = [[0.2, 0.7, 0.1], [0.6, 0.2, 0.2], [0.6, 0.1, 0.3]]
loss = log_loss(y_true, y_pred, normalize=True)
assert_almost_equal(loss, 0.6904911)
# check that we got all the shapes and axes right
# by doubling the length of y_true and y_pred
y_true *= 2
y_pred *= 2
loss = log_loss(y_true, y_pred, normalize=False)
assert_almost_equal(loss, 0.6904911 * 6, decimal=6)
# check eps and handling of absolute zero and one probabilities
y_pred = np.asarray(y_pred) > .5
loss = log_loss(y_true, y_pred, normalize=True, eps=.1)
assert_almost_equal(loss, log_loss(y_true, np.clip(y_pred, .1, .9)))
# raise error if number of classes are not equal.
y_true = [1, 0, 2]
y_pred = [[0.2, 0.7], [0.6, 0.5], [0.4, 0.1]]
assert_raises(ValueError, log_loss, y_true, y_pred)
# case when y_true is a string array object
y_true = ["ham", "spam", "spam", "ham"]
y_pred = [[0.2, 0.7], [0.6, 0.5], [0.4, 0.1], [0.7, 0.2]]
loss = log_loss(y_true, y_pred)
assert_almost_equal(loss, 1.0383217, decimal=6)
def test_brier_score_loss():
# Check brier_score_loss function
y_true = np.array([0, 1, 1, 0, 1, 1])
y_pred = np.array([0.1, 0.8, 0.9, 0.3, 1., 0.95])
true_score = linalg.norm(y_true - y_pred) ** 2 / len(y_true)
assert_almost_equal(brier_score_loss(y_true, y_true), 0.0)
assert_almost_equal(brier_score_loss(y_true, y_pred), true_score)
assert_almost_equal(brier_score_loss(1. + y_true, y_pred),
true_score)
assert_almost_equal(brier_score_loss(2 * y_true - 1, y_pred),
true_score)
assert_raises(ValueError, brier_score_loss, y_true, y_pred[1:])
assert_raises(ValueError, brier_score_loss, y_true, y_pred + 1.)
assert_raises(ValueError, brier_score_loss, y_true, y_pred - 1.)
|
bsd-3-clause
|
carrillo/scikit-learn
|
examples/linear_model/plot_omp.py
|
385
|
2263
|
"""
===========================
Orthogonal Matching Pursuit
===========================
Using orthogonal matching pursuit for recovering a sparse signal from a noisy
measurement encoded with a dictionary
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import OrthogonalMatchingPursuit
from sklearn.linear_model import OrthogonalMatchingPursuitCV
from sklearn.datasets import make_sparse_coded_signal
n_components, n_features = 512, 100
n_nonzero_coefs = 17
# generate the data
###################
# y = Xw
# |x|_0 = n_nonzero_coefs
y, X, w = make_sparse_coded_signal(n_samples=1,
n_components=n_components,
n_features=n_features,
n_nonzero_coefs=n_nonzero_coefs,
random_state=0)
idx, = w.nonzero()
# distort the clean signal
##########################
y_noisy = y + 0.05 * np.random.randn(len(y))
# plot the sparse signal
########################
plt.figure(figsize=(7, 7))
plt.subplot(4, 1, 1)
plt.xlim(0, 512)
plt.title("Sparse signal")
plt.stem(idx, w[idx])
# plot the noise-free reconstruction
####################################
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coefs)
omp.fit(X, y)
coef = omp.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 2)
plt.xlim(0, 512)
plt.title("Recovered signal from noise-free measurements")
plt.stem(idx_r, coef[idx_r])
# plot the noisy reconstruction
###############################
omp.fit(X, y_noisy)
coef = omp.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 3)
plt.xlim(0, 512)
plt.title("Recovered signal from noisy measurements")
plt.stem(idx_r, coef[idx_r])
# plot the noisy reconstruction with number of non-zeros set by CV
##################################################################
omp_cv = OrthogonalMatchingPursuitCV()
omp_cv.fit(X, y_noisy)
coef = omp_cv.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 4)
plt.xlim(0, 512)
plt.title("Recovered signal from noisy measurements with CV")
plt.stem(idx_r, coef[idx_r])
plt.subplots_adjust(0.06, 0.04, 0.94, 0.90, 0.20, 0.38)
plt.suptitle('Sparse signal recovery with Orthogonal Matching Pursuit',
fontsize=16)
plt.show()
|
bsd-3-clause
|
sirrice/pygg
|
tests/pygg_tests.py
|
2
|
15120
|
import unittest
import io
import pandas
import tempfile
import os.path
import pygg
import pandas.util.testing as pdt
class IPythonTests(unittest.TestCase):
"""Test IPython integration"""
def setUp(self):
"""Setup IPython tests, skipping if IPython isn't present"""
try:
import IPython
except ImportError:
self.skipTest("Couldn't import IPython")
def testSizing(self):
"""Test that computing R sizes works properly"""
self.assertAlmostEqual(pygg.size_r_img_inches(width=800, height=800),
(pygg.R_IMAGE_SIZE, pygg.R_IMAGE_SIZE))
self.assertAlmostEqual(pygg.size_r_img_inches(width=400, height=400),
(pygg.R_IMAGE_SIZE, pygg.R_IMAGE_SIZE))
self.assertAlmostEqual(pygg.size_r_img_inches(width=800, height=400),
(pygg.R_IMAGE_SIZE, pygg.R_IMAGE_SIZE / 2.))
self.assertAlmostEqual(pygg.size_r_img_inches(width=400, height=800),
(pygg.R_IMAGE_SIZE, pygg.R_IMAGE_SIZE * 2.))
def testIPython(self):
"""Test that gg_ipython returns a IPython formatted Image"""
p = pygg.ggplot('diamonds', pygg.aes(x='carat', y='price'))
p += pygg.geom_point()
img = pygg.gg_ipython(p, data=None)
self.assertIsNotNone(img.data)
self.assertEqual(img.format, "jpeg")
self.assertEqual(img.width, pygg.IPYTHON_IMAGE_SIZE)
self.assertEqual(img.height, pygg.IPYTHON_IMAGE_SIZE)
img = pygg.gg_ipython(p, data=None, width=600, height=400)
self.assertEqual(img.width, 600)
self.assertEqual(img.height, 400)
img = pygg.gg_ipython(p, data=None, width=600)
self.assertEqual(img.width, 600)
self.assertEqual(img.height, 600)
class TestUnits(unittest.TestCase):
"""Basic unit testing for pygg"""
def testIsDataFrame(self):
"""Test that is_pandas_df works"""
df = pandas.read_csv(io.StringIO(IRIS_DATA_CSV))
self.assertTrue(pygg.is_pandas_df(df))
self.assertTrue(pygg.is_pandas_df(df[0:1]))
self.assertFalse(pygg.is_pandas_df(df.SepalLength))
self.assertFalse(pygg.is_pandas_df(1))
self.assertFalse(pygg.is_pandas_df(1.0))
self.assertFalse(pygg.is_pandas_df([]))
self.assertFalse(pygg.is_pandas_df([1]))
self.assertFalse(pygg.is_pandas_df({}))
self.assertFalse(pygg.is_pandas_df({'a': 1}))
def check_me(self, stmt, expectation):
self.assertEqual(stmt.r.replace(" ", ""), expectation)
def testDataPyWithDF(self):
df = pandas.DataFrame({'a': [1, 2], 'b': [3, 4]})
datao = pygg.data_py(df)
dffile, expr = datao.fname, str(datao)
iodf = pandas.read_csv(dffile)
pdt.assert_frame_equal(df, iodf)
def testDataPyLoadStmtPlain(self):
df = pandas.DataFrame({'a': [1, 2], 'b': [3, 4]})
datao = pygg.data_py(df)
dffile, expr = datao.fname, str(datao)
self.assertEqual(expr,
'data = read.csv("{}",sep=",")'.format(dffile))
def testDataPyLoadStmtArgs(self):
df = pandas.DataFrame({'a': [1, 2], 'b': [3, 4]})
datao = pygg.data_py(df, 1, kwd=2)
dffile, expr = datao.fname, str(datao)
expected = 'data = read.csv("{}",1,kwd=2,sep=",")'.format(dffile)
self.assertEqual(expr, expected)
def testDataPyWithDict(self):
src = {'a': [1, 2], 'b': [3, 4]}
datao = pygg.data_py(src)
dffile, expr = datao.fname, str(datao)
iodf = pandas.read_csv(dffile)
pdt.assert_frame_equal(pandas.DataFrame(src), iodf)
def testDataPyWithListOfDict(self):
src = [{'a': 1, 'b': 3}, {'a': 2, 'b': 4}]
datao = pygg.data_py(src)
dffile, expr = datao.fname, str(datao)
iodf = pandas.read_csv(dffile)
pdt.assert_frame_equal(pandas.DataFrame({'a': [1, 2], 'b': [3, 4]}),
pandas.read_csv(dffile))
def testDataPyWithString(self):
src = "my.csv"
datao = pygg.data_py(src)
dffile, expr = datao.fname, str(datao)
self.assertEqual(dffile, src)
self.assertEqual(expr, 'data = read.csv("{}",sep=",")'.format(src))
def testGGStatementToR(self):
"""Test that GGStatement converts to R properly"""
self.check_me(pygg.geom_point(), "geom_point()")
self.check_me(pygg.geom_point(size=1.0), "geom_point(size=1.0)")
self.check_me(pygg.geom_point(size=1.0, alpha=2.0),
"geom_point(alpha=2.0,size=1.0)")
def testGGStatementsToR(self):
"""Test that GGStatement converts to R properly"""
self.check_me(pygg.geom_point(), "geom_point()")
self.check_me(pygg.geom_bar(), "geom_bar()")
self.check_me(pygg.geom_point() + pygg.geom_bar(),
"geom_point()+geom_bar()")
self.check_me(pygg.geom_bar() + pygg.geom_point(),
"geom_bar()+geom_point()")
def testPython2RTypes(self):
"""Test GGStatement converts many python types properly"""
self.check_me(pygg.geom_point(a=1), "geom_point(a=1)")
self.check_me(pygg.geom_point(a=None), "geom_point(a=NA)")
self.check_me(pygg.geom_point(a=1.0), "geom_point(a=1.0)")
self.check_me(pygg.geom_point(a=1e-2), "geom_point(a=0.01)")
self.check_me(pygg.geom_point(a="foo"), 'geom_point(a=foo)')
self.check_me(pygg.geom_point(a=pygg.esc("foo")), 'geom_point(a="foo")')
self.check_me(pygg.geom_point(a=True), 'geom_point(a=TRUE)')
self.check_me(pygg.geom_point(a=False), 'geom_point(a=FALSE)')
self.check_me(pygg.geom_point(a=[1, 2]), 'geom_point(a=c(1,2))')
self.check_me(pygg.geom_point(a={'list1': 1, 'list2': 2}),
'geom_point(a=list(list1=1,list2=2))')
self.check_me(pygg.geom_point(1, a=2.0, b=[3, 4],
c={'list1': pygg.esc('s1'), 'list2': 2}),
'geom_point(1,a=2.0,b=c(3,4),c=list(list1="s1",list2=2))')
def testPython2RStringEsc(self):
"""Test GGStatement escapes strings properly"""
self.check_me(pygg.geom_point(a="b"), 'geom_point(a=b)')
self.check_me(pygg.geom_point(a='b'), 'geom_point(a=b)')
self.check_me(pygg.geom_point(a="'b'"), 'geom_point(a=\'b\')')
self.check_me(pygg.geom_point(a='"b"'), 'geom_point(a="b")')
self.check_me(pygg.geom_point(a={'k': pygg.esc("v")}),
'geom_point(a=list(k="v"))')
self.check_me(pygg.geom_point(a=[pygg.esc("a"), pygg.esc("b")]),
'geom_point(a=c("a","b"))')
class TestIntegration(unittest.TestCase):
"""Basic unit testing for pygg"""
def testE2E(self):
"""Test end-to-end creation of figures with outputs to pdf and png"""
p = pygg.ggplot('diamonds', pygg.aes(x='carat', y='price', color='clarity'))
p += pygg.geom_point(alpha=0.5, size = .75)
p += pygg.scale_x_log10()
p += pygg.theme_bw()
self.check_ggsave(p, None, ext=".pdf")
self.check_ggsave(p, None, ext=".png")
self.check_ggsave(p, None, ext=".jpg")
def testPandasDF(self):
data = pandas.read_csv(io.StringIO(IRIS_DATA_CSV))
self.assertIsInstance(data, pandas.DataFrame)
p = pygg.ggplot('data',
pygg.aes(x='SepalLength', y='PetalLength', color='Name'))
p += pygg.geom_point()
p += pygg.geom_smooth()
p += pygg.ggtitle(pygg.esc('Test title'))
self.check_ggsave(p, data)
def testPandasDFggplot(self):
data = pandas.read_csv(io.StringIO(IRIS_DATA_CSV))
self.assertIsInstance(data, pandas.DataFrame)
p = pygg.ggplot(data,
pygg.aes(x='SepalLength', y='PetalLength', color='Name'))
p += pygg.geom_point()
p += pygg.geom_smooth()
p += pygg.ggtitle(pygg.esc('Test title'))
self.check_ggsave(p)
def testBasicDataggplot(self):
data = dict(x=list(range(10)), y=list(range(10)))
p = pygg.ggplot(data, pygg.aes(x='x', y='y'))
p += pygg.geom_point()
p += pygg.geom_smooth()
p += pygg.ggtitle(pygg.esc('Test title'))
self.check_ggsave(p)
def testBasicDataggplot(self):
data = [dict(x=x, y=y) for x, y in zip(list(range(10)), list(range(10)))]
p = pygg.ggplot(data, pygg.aes(x='x', y='y'))
p += pygg.geom_point()
p += pygg.geom_smooth()
p += pygg.ggtitle(pygg.esc('Test title'))
self.check_ggsave(p)
def testLimits(self):
p = pygg.ggplot('diamonds', pygg.aes(x='carat', y='price', color='clarity'))
p += pygg.geom_point(alpha=0.5, size = .75)
p += pygg.scale_x_log10(limits=[1, 2])
self.check_ggsave(p, None)
def testFacets1(self):
p = pygg.ggplot('diamonds', pygg.aes(x='carat', y='price'))
p += pygg.geom_point()
p += pygg.facet_grid("clarity~.")
self.check_ggsave(p, None)
def testFacets2(self):
p = pygg.ggplot('diamonds', pygg.aes(x='carat', y='price'))
p += pygg.geom_point()
p += pygg.facet_wrap("~clarity")
self.check_ggsave(p, None)
def testLibs(self):
p = pygg.ggplot('diamonds', pygg.aes(x='carat', y='price'))
p += pygg.geom_point()
tmpfile = tempfile.NamedTemporaryFile(suffix='.pdf').name
pygg.ggsave(tmpfile, p, data=None, libs=['grid'], quiet=True)
self.assertTrue(os.path.exists(tmpfile))
self.assertTrue(os.path.getsize(tmpfile) > 0)
def testNativeRDataset(self):
p = pygg.ggplot('diamonds', pygg.aes(x='carat', y='carat')) + pygg.geom_point()
self.check_ggsave(p, None)
def check_ggsave(self, plotobj, data=None, ext='.pdf'):
tmpfile = tempfile.NamedTemporaryFile(suffix=ext).name
pygg.ggsave(tmpfile, plotobj, data=data, quiet=True)
self.assertTrue(os.path.exists(tmpfile))
self.assertTrue(os.path.getsize(tmpfile) > 0)
def testBadGGPlotFails(self):
p = pygg.ggplot('diamonds', pygg.aes(x='MISSING')) + pygg.geom_point()
with self.assertRaises(ValueError):
tmpfile = tempfile.NamedTemporaryFile(suffix=".png").name
pygg.ggsave(tmpfile, p, data=None, quiet=True)
IRIS_DATA_CSV = """SepalLength,SepalWidth,PetalLength,PetalWidth,Name
5.1,3.5,1.4,0.2,Iris-setosa
4.9,3.0,1.4,0.2,Iris-setosa
4.7,3.2,1.3,0.2,Iris-setosa
4.6,3.1,1.5,0.2,Iris-setosa
5.0,3.6,1.4,0.2,Iris-setosa
5.4,3.9,1.7,0.4,Iris-setosa
4.6,3.4,1.4,0.3,Iris-setosa
5.0,3.4,1.5,0.2,Iris-setosa
4.4,2.9,1.4,0.2,Iris-setosa
4.9,3.1,1.5,0.1,Iris-setosa
5.4,3.7,1.5,0.2,Iris-setosa
4.8,3.4,1.6,0.2,Iris-setosa
4.8,3.0,1.4,0.1,Iris-setosa
4.3,3.0,1.1,0.1,Iris-setosa
5.8,4.0,1.2,0.2,Iris-setosa
5.7,4.4,1.5,0.4,Iris-setosa
5.4,3.9,1.3,0.4,Iris-setosa
5.1,3.5,1.4,0.3,Iris-setosa
5.7,3.8,1.7,0.3,Iris-setosa
5.1,3.8,1.5,0.3,Iris-setosa
5.4,3.4,1.7,0.2,Iris-setosa
5.1,3.7,1.5,0.4,Iris-setosa
4.6,3.6,1.0,0.2,Iris-setosa
5.1,3.3,1.7,0.5,Iris-setosa
4.8,3.4,1.9,0.2,Iris-setosa
5.0,3.0,1.6,0.2,Iris-setosa
5.0,3.4,1.6,0.4,Iris-setosa
5.2,3.5,1.5,0.2,Iris-setosa
5.2,3.4,1.4,0.2,Iris-setosa
4.7,3.2,1.6,0.2,Iris-setosa
4.8,3.1,1.6,0.2,Iris-setosa
5.4,3.4,1.5,0.4,Iris-setosa
5.2,4.1,1.5,0.1,Iris-setosa
5.5,4.2,1.4,0.2,Iris-setosa
4.9,3.1,1.5,0.1,Iris-setosa
5.0,3.2,1.2,0.2,Iris-setosa
5.5,3.5,1.3,0.2,Iris-setosa
4.9,3.1,1.5,0.1,Iris-setosa
4.4,3.0,1.3,0.2,Iris-setosa
5.1,3.4,1.5,0.2,Iris-setosa
5.0,3.5,1.3,0.3,Iris-setosa
4.5,2.3,1.3,0.3,Iris-setosa
4.4,3.2,1.3,0.2,Iris-setosa
5.0,3.5,1.6,0.6,Iris-setosa
5.1,3.8,1.9,0.4,Iris-setosa
4.8,3.0,1.4,0.3,Iris-setosa
5.1,3.8,1.6,0.2,Iris-setosa
4.6,3.2,1.4,0.2,Iris-setosa
5.3,3.7,1.5,0.2,Iris-setosa
5.0,3.3,1.4,0.2,Iris-setosa
7.0,3.2,4.7,1.4,Iris-versicolor
6.4,3.2,4.5,1.5,Iris-versicolor
6.9,3.1,4.9,1.5,Iris-versicolor
5.5,2.3,4.0,1.3,Iris-versicolor
6.5,2.8,4.6,1.5,Iris-versicolor
5.7,2.8,4.5,1.3,Iris-versicolor
6.3,3.3,4.7,1.6,Iris-versicolor
4.9,2.4,3.3,1.0,Iris-versicolor
6.6,2.9,4.6,1.3,Iris-versicolor
5.2,2.7,3.9,1.4,Iris-versicolor
5.0,2.0,3.5,1.0,Iris-versicolor
5.9,3.0,4.2,1.5,Iris-versicolor
6.0,2.2,4.0,1.0,Iris-versicolor
6.1,2.9,4.7,1.4,Iris-versicolor
5.6,2.9,3.6,1.3,Iris-versicolor
6.7,3.1,4.4,1.4,Iris-versicolor
5.6,3.0,4.5,1.5,Iris-versicolor
5.8,2.7,4.1,1.0,Iris-versicolor
6.2,2.2,4.5,1.5,Iris-versicolor
5.6,2.5,3.9,1.1,Iris-versicolor
5.9,3.2,4.8,1.8,Iris-versicolor
6.1,2.8,4.0,1.3,Iris-versicolor
6.3,2.5,4.9,1.5,Iris-versicolor
6.1,2.8,4.7,1.2,Iris-versicolor
6.4,2.9,4.3,1.3,Iris-versicolor
6.6,3.0,4.4,1.4,Iris-versicolor
6.8,2.8,4.8,1.4,Iris-versicolor
6.7,3.0,5.0,1.7,Iris-versicolor
6.0,2.9,4.5,1.5,Iris-versicolor
5.7,2.6,3.5,1.0,Iris-versicolor
5.5,2.4,3.8,1.1,Iris-versicolor
5.5,2.4,3.7,1.0,Iris-versicolor
5.8,2.7,3.9,1.2,Iris-versicolor
6.0,2.7,5.1,1.6,Iris-versicolor
5.4,3.0,4.5,1.5,Iris-versicolor
6.0,3.4,4.5,1.6,Iris-versicolor
6.7,3.1,4.7,1.5,Iris-versicolor
6.3,2.3,4.4,1.3,Iris-versicolor
5.6,3.0,4.1,1.3,Iris-versicolor
5.5,2.5,4.0,1.3,Iris-versicolor
5.5,2.6,4.4,1.2,Iris-versicolor
6.1,3.0,4.6,1.4,Iris-versicolor
5.8,2.6,4.0,1.2,Iris-versicolor
5.0,2.3,3.3,1.0,Iris-versicolor
5.6,2.7,4.2,1.3,Iris-versicolor
5.7,3.0,4.2,1.2,Iris-versicolor
5.7,2.9,4.2,1.3,Iris-versicolor
6.2,2.9,4.3,1.3,Iris-versicolor
5.1,2.5,3.0,1.1,Iris-versicolor
5.7,2.8,4.1,1.3,Iris-versicolor
6.3,3.3,6.0,2.5,Iris-virginica
5.8,2.7,5.1,1.9,Iris-virginica
7.1,3.0,5.9,2.1,Iris-virginica
6.3,2.9,5.6,1.8,Iris-virginica
6.5,3.0,5.8,2.2,Iris-virginica
7.6,3.0,6.6,2.1,Iris-virginica
4.9,2.5,4.5,1.7,Iris-virginica
7.3,2.9,6.3,1.8,Iris-virginica
6.7,2.5,5.8,1.8,Iris-virginica
7.2,3.6,6.1,2.5,Iris-virginica
6.5,3.2,5.1,2.0,Iris-virginica
6.4,2.7,5.3,1.9,Iris-virginica
6.8,3.0,5.5,2.1,Iris-virginica
5.7,2.5,5.0,2.0,Iris-virginica
5.8,2.8,5.1,2.4,Iris-virginica
6.4,3.2,5.3,2.3,Iris-virginica
6.5,3.0,5.5,1.8,Iris-virginica
7.7,3.8,6.7,2.2,Iris-virginica
7.7,2.6,6.9,2.3,Iris-virginica
6.0,2.2,5.0,1.5,Iris-virginica
6.9,3.2,5.7,2.3,Iris-virginica
5.6,2.8,4.9,2.0,Iris-virginica
7.7,2.8,6.7,2.0,Iris-virginica
6.3,2.7,4.9,1.8,Iris-virginica
6.7,3.3,5.7,2.1,Iris-virginica
7.2,3.2,6.0,1.8,Iris-virginica
6.2,2.8,4.8,1.8,Iris-virginica
6.1,3.0,4.9,1.8,Iris-virginica
6.4,2.8,5.6,2.1,Iris-virginica
7.2,3.0,5.8,1.6,Iris-virginica
7.4,2.8,6.1,1.9,Iris-virginica
7.9,3.8,6.4,2.0,Iris-virginica
6.4,2.8,5.6,2.2,Iris-virginica
6.3,2.8,5.1,1.5,Iris-virginica
6.1,2.6,5.6,1.4,Iris-virginica
7.7,3.0,6.1,2.3,Iris-virginica
6.3,3.4,5.6,2.4,Iris-virginica
6.4,3.1,5.5,1.8,Iris-virginica
6.0,3.0,4.8,1.8,Iris-virginica
6.9,3.1,5.4,2.1,Iris-virginica
6.7,3.1,5.6,2.4,Iris-virginica
6.9,3.1,5.1,2.3,Iris-virginica
5.8,2.7,5.1,1.9,Iris-virginica
6.8,3.2,5.9,2.3,Iris-virginica
6.7,3.3,5.7,2.5,Iris-virginica
6.7,3.0,5.2,2.3,Iris-virginica
6.3,2.5,5.0,1.9,Iris-virginica
6.5,3.0,5.2,2.0,Iris-virginica
6.2,3.4,5.4,2.3,Iris-virginica
5.9,3.0,5.1,1.8,Iris-virginica
"""
if __name__ == '__main__':
unittest.main()
|
mit
|
sinhrks/pandas-ml
|
pandas_ml/skaccessors/test/test_naive_bayes.py
|
1
|
1236
|
#!/usr/bin/env python
import sklearn.datasets as datasets
import sklearn.naive_bayes as nb
import pandas_ml as pdml
import pandas_ml.util.testing as tm
class TestNaiveBayes(tm.TestCase):
def test_objectmapper(self):
df = pdml.ModelFrame([])
self.assertIs(df.naive_bayes.GaussianNB, nb.GaussianNB)
self.assertIs(df.naive_bayes.MultinomialNB, nb.MultinomialNB)
self.assertIs(df.naive_bayes.BernoulliNB, nb.BernoulliNB)
def test_Classifications(self):
iris = datasets.load_iris()
df = pdml.ModelFrame(iris)
models = ['GaussianNB', 'MultinomialNB', 'BernoulliNB']
for model in models:
mod1 = getattr(df.naive_bayes, model)()
mod2 = getattr(nb, model)()
df.fit(mod1)
mod2.fit(iris.data, iris.target)
result = df.predict(mod1)
expected = mod2.predict(iris.data)
self.assertIsInstance(result, pdml.ModelSeries)
self.assert_numpy_array_almost_equal(result.values, expected)
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
|
bsd-3-clause
|
vybstat/scikit-learn
|
sklearn/tree/tests/test_export.py
|
130
|
9950
|
"""
Testing for export functions of decision trees (sklearn.tree.export).
"""
from re import finditer
from numpy.testing import assert_equal
from nose.tools import assert_raises
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.tree import export_graphviz
from sklearn.externals.six import StringIO
from sklearn.utils.testing import assert_in
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
y2 = [[-1, 1], [-1, 2], [-1, 3], [1, 1], [1, 2], [1, 3]]
w = [1, 1, 1, .5, .5, .5]
def test_graphviz_toy():
# Check correctness of export_graphviz
clf = DecisionTreeClassifier(max_depth=3,
min_samples_split=1,
criterion="gini",
random_state=2)
clf.fit(X, y)
# Test export code
out = StringIO()
export_graphviz(clf, out_file=out)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="X[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]"] ;\n' \
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test with feature_names
out = StringIO()
export_graphviz(clf, out_file=out, feature_names=["feature0", "feature1"])
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="feature0 <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]"] ;\n' \
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test with class_names
out = StringIO()
export_graphviz(clf, out_file=out, class_names=["yes", "no"])
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="X[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]\\nclass = yes"] ;\n' \
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]\\n' \
'class = yes"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]\\n' \
'class = no"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test plot_options
out = StringIO()
export_graphviz(clf, out_file=out, filled=True, impurity=False,
proportion=True, special_characters=True, rounded=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled, rounded", color="black", ' \
'fontname=helvetica] ;\n' \
'edge [fontname=helvetica] ;\n' \
'0 [label=<X<SUB>0</SUB> ≤ 0.0<br/>samples = 100.0%<br/>' \
'value = [0.5, 0.5]>, fillcolor="#e5813900"] ;\n' \
'1 [label=<samples = 50.0%<br/>value = [1.0, 0.0]>, ' \
'fillcolor="#e58139ff"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label=<samples = 50.0%<br/>value = [0.0, 1.0]>, ' \
'fillcolor="#399de5ff"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test max_depth
out = StringIO()
export_graphviz(clf, out_file=out, max_depth=0, class_names=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="X[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]\\nclass = y[0]"] ;\n' \
'1 [label="(...)"] ;\n' \
'0 -> 1 ;\n' \
'2 [label="(...)"] ;\n' \
'0 -> 2 ;\n' \
'}'
assert_equal(contents1, contents2)
# Test max_depth with plot_options
out = StringIO()
export_graphviz(clf, out_file=out, max_depth=0, filled=True,
node_ids=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled", color="black"] ;\n' \
'0 [label="node #0\\nX[0] <= 0.0\\ngini = 0.5\\n' \
'samples = 6\\nvalue = [3, 3]", fillcolor="#e5813900"] ;\n' \
'1 [label="(...)", fillcolor="#C0C0C0"] ;\n' \
'0 -> 1 ;\n' \
'2 [label="(...)", fillcolor="#C0C0C0"] ;\n' \
'0 -> 2 ;\n' \
'}'
assert_equal(contents1, contents2)
# Test multi-output with weighted samples
clf = DecisionTreeClassifier(max_depth=2,
min_samples_split=1,
criterion="gini",
random_state=2)
clf = clf.fit(X, y2, sample_weight=w)
out = StringIO()
export_graphviz(clf, out_file=out, filled=True, impurity=False)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled", color="black"] ;\n' \
'0 [label="X[0] <= 0.0\\nsamples = 6\\n' \
'value = [[3.0, 1.5, 0.0]\\n' \
'[1.5, 1.5, 1.5]]", fillcolor="#e5813900"] ;\n' \
'1 [label="X[1] <= -1.5\\nsamples = 3\\n' \
'value = [[3, 0, 0]\\n[1, 1, 1]]", ' \
'fillcolor="#e5813965"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="samples = 1\\nvalue = [[1, 0, 0]\\n' \
'[0, 0, 1]]", fillcolor="#e58139ff"] ;\n' \
'1 -> 2 ;\n' \
'3 [label="samples = 2\\nvalue = [[2, 0, 0]\\n' \
'[1, 1, 0]]", fillcolor="#e581398c"] ;\n' \
'1 -> 3 ;\n' \
'4 [label="X[0] <= 1.5\\nsamples = 3\\n' \
'value = [[0.0, 1.5, 0.0]\\n[0.5, 0.5, 0.5]]", ' \
'fillcolor="#e5813965"] ;\n' \
'0 -> 4 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'5 [label="samples = 2\\nvalue = [[0.0, 1.0, 0.0]\\n' \
'[0.5, 0.5, 0.0]]", fillcolor="#e581398c"] ;\n' \
'4 -> 5 ;\n' \
'6 [label="samples = 1\\nvalue = [[0.0, 0.5, 0.0]\\n' \
'[0.0, 0.0, 0.5]]", fillcolor="#e58139ff"] ;\n' \
'4 -> 6 ;\n' \
'}'
assert_equal(contents1, contents2)
# Test regression output with plot_options
clf = DecisionTreeRegressor(max_depth=3,
min_samples_split=1,
criterion="mse",
random_state=2)
clf.fit(X, y)
out = StringIO()
export_graphviz(clf, out_file=out, filled=True, leaves_parallel=True,
rotate=True, rounded=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled, rounded", color="black", ' \
'fontname=helvetica] ;\n' \
'graph [ranksep=equally, splines=polyline] ;\n' \
'edge [fontname=helvetica] ;\n' \
'rankdir=LR ;\n' \
'0 [label="X[0] <= 0.0\\nmse = 1.0\\nsamples = 6\\n' \
'value = 0.0", fillcolor="#e581397f"] ;\n' \
'1 [label="mse = 0.0\\nsamples = 3\\nvalue = -1.0", ' \
'fillcolor="#e5813900"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="True"] ;\n' \
'2 [label="mse = 0.0\\nsamples = 3\\nvalue = 1.0", ' \
'fillcolor="#e58139ff"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=45, ' \
'headlabel="False"] ;\n' \
'{rank=same ; 0} ;\n' \
'{rank=same ; 1; 2} ;\n' \
'}'
assert_equal(contents1, contents2)
def test_graphviz_errors():
# Check for errors of export_graphviz
clf = DecisionTreeClassifier(max_depth=3, min_samples_split=1)
clf.fit(X, y)
# Check feature_names error
out = StringIO()
assert_raises(IndexError, export_graphviz, clf, out, feature_names=[])
# Check class_names error
out = StringIO()
assert_raises(IndexError, export_graphviz, clf, out, class_names=[])
def test_friedman_mse_in_graphviz():
clf = DecisionTreeRegressor(criterion="friedman_mse", random_state=0)
clf.fit(X, y)
dot_data = StringIO()
export_graphviz(clf, out_file=dot_data)
clf = GradientBoostingClassifier(n_estimators=2, random_state=0)
clf.fit(X, y)
for estimator in clf.estimators_:
export_graphviz(estimator[0], out_file=dot_data)
for finding in finditer("\[.*?samples.*?\]", dot_data.getvalue()):
assert_in("friedman_mse", finding.group())
|
bsd-3-clause
|
ifarup/website
|
imt6131/code/12_anisotropic_diff/12_anisotropic_diff.py
|
1
|
1125
|
#!/usr/bin/env python3
import numpy as np
import matplotlib.pyplot as plt
im = plt.imread('lena.png')
im = np.sum(im, 2) / 3.
dt = .25
eps = 1e-5
kappa = 1e4
gx, gy = np.gradient(im)
gnormsq = gx**2 + gy**2
gnorm = np.sqrt(gnormsq)
v1x = np.zeros(np.shape(gx))
v1y = np.zeros(np.shape(gx))
v2x = np.zeros(np.shape(gx))
v2y = np.zeros(np.shape(gx))
lambda1 = np.zeros(np.shape(gx))
lambda2 = np.zeros(np.shape(gx))
v1x[...] = 1
v1y[...] = 0
v2x[...] = 0
v2y[...] = 1
lambda1[...] = 1
lambda2[...] = 1
ind = (gnormsq != 0)
v1x[ind] = gx[ind] / gnorm[ind]
v1y[ind] = gy[ind] / gnorm[ind]
v2x[ind] = v1y[ind]
v2y[ind] = -v1x[ind]
lambda1[ind] = 1 / (1 + kappa * gnormsq[ind])
lambda2[ind] = 1
D11 = lambda1 * v1x**2 + lambda2 * v2x**2
D22 = lambda1 * v1y**2 + lambda2 * v2y**2
D12 = lambda1 * v1x * v1y + lambda2 * v2x * v2y
for i in range(100):
plt.imsave('im_%04d.png' % i, im, cmap=plt.cm.gray, vmin=0, vmax=1)
gx, gy = np.gradient(im)
gxx, tmp = np.gradient(D11 * gx + D12 * gy)
tmp, gyy = np.gradient(D12 * gx + D22 * gy)
tv = gxx + gyy
im[1:-1, 1:-1] = im[1:-1, 1:-1] + dt * tv[1:-1, 1:-1]
|
gpl-3.0
|
aleksandr-bakanov/astropy
|
astropy/modeling/powerlaws.py
|
3
|
15873
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Power law model variants
"""
import numpy as np
from .core import Fittable1DModel
from .parameters import Parameter, InputParameterError
from astropy.units import Quantity
__all__ = ['PowerLaw1D', 'BrokenPowerLaw1D', 'SmoothlyBrokenPowerLaw1D',
'ExponentialCutoffPowerLaw1D', 'LogParabola1D']
class PowerLaw1D(Fittable1DModel):
"""
One dimensional power law model.
Parameters
----------
amplitude : float
Model amplitude at the reference point
x_0 : float
Reference point
alpha : float
Power law index
See Also
--------
BrokenPowerLaw1D, ExponentialCutoffPowerLaw1D, LogParabola1D
Notes
-----
Model formula (with :math:`A` for ``amplitude`` and :math:`\\alpha` for ``alpha``):
.. math:: f(x) = A (x / x_0) ^ {-\\alpha}
"""
amplitude = Parameter(default=1)
x_0 = Parameter(default=1)
alpha = Parameter(default=1)
@staticmethod
def evaluate(x, amplitude, x_0, alpha):
"""One dimensional power law model function"""
xx = x / x_0
return amplitude * xx ** (-alpha)
@staticmethod
def fit_deriv(x, amplitude, x_0, alpha):
"""One dimensional power law derivative with respect to parameters"""
xx = x / x_0
d_amplitude = xx ** (-alpha)
d_x_0 = amplitude * alpha * d_amplitude / x_0
d_alpha = -amplitude * d_amplitude * np.log(xx)
return [d_amplitude, d_x_0, d_alpha]
@property
def input_units(self):
if self.x_0.unit is None:
return None
else:
return {'x': self.x_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {'x_0': inputs_unit['x'],
'amplitude': outputs_unit['y']}
class BrokenPowerLaw1D(Fittable1DModel):
"""
One dimensional power law model with a break.
Parameters
----------
amplitude : float
Model amplitude at the break point.
x_break : float
Break point.
alpha_1 : float
Power law index for x < x_break.
alpha_2 : float
Power law index for x > x_break.
See Also
--------
PowerLaw1D, ExponentialCutoffPowerLaw1D, LogParabola1D
Notes
-----
Model formula (with :math:`A` for ``amplitude`` and :math:`\\alpha_1`
for ``alpha_1`` and :math:`\\alpha_2` for ``alpha_2``):
.. math::
f(x) = \\left \\{
\\begin{array}{ll}
A (x / x_{break}) ^ {-\\alpha_1} & : x < x_{break} \\\\
A (x / x_{break}) ^ {-\\alpha_2} & : x > x_{break} \\\\
\\end{array}
\\right.
"""
amplitude = Parameter(default=1)
x_break = Parameter(default=1)
alpha_1 = Parameter(default=1)
alpha_2 = Parameter(default=1)
@staticmethod
def evaluate(x, amplitude, x_break, alpha_1, alpha_2):
"""One dimensional broken power law model function"""
alpha = np.where(x < x_break, alpha_1, alpha_2)
xx = x / x_break
return amplitude * xx ** (-alpha)
@staticmethod
def fit_deriv(x, amplitude, x_break, alpha_1, alpha_2):
"""One dimensional broken power law derivative with respect to parameters"""
alpha = np.where(x < x_break, alpha_1, alpha_2)
xx = x / x_break
d_amplitude = xx ** (-alpha)
d_x_break = amplitude * alpha * d_amplitude / x_break
d_alpha = -amplitude * d_amplitude * np.log(xx)
d_alpha_1 = np.where(x < x_break, d_alpha, 0)
d_alpha_2 = np.where(x >= x_break, d_alpha, 0)
return [d_amplitude, d_x_break, d_alpha_1, d_alpha_2]
@property
def input_units(self):
if self.x_break.unit is None:
return None
else:
return {'x': self.x_break.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {'x_break': inputs_unit['x'],
'amplitude': outputs_unit['y']}
class SmoothlyBrokenPowerLaw1D(Fittable1DModel):
"""One dimensional smoothly broken power law model.
Parameters
----------
amplitude : float
Model amplitude at the break point.
x_break : float
Break point.
alpha_1 : float
Power law index for ``x << x_break``.
alpha_2 : float
Power law index for ``x >> x_break``.
delta : float
Smoothness parameter.
See Also
--------
BrokenPowerLaw1D
Notes
-----
Model formula (with :math:`A` for ``amplitude``, :math:`x_b` for
``x_break``, :math:`\\alpha_1` for ``alpha_1``,
:math:`\\alpha_2` for ``alpha_2`` and :math:`\\Delta` for
``delta``):
.. math::
f(x) = A \\left( \\frac{x}{x_b} \\right) ^ {-\\alpha_1}
\\left\\{
\\frac{1}{2}
\\left[
1 + \\left( \\frac{x}{x_b}\\right)^{1 / \\Delta}
\\right]
\\right\\}^{(\\alpha_1 - \\alpha_2) \\Delta}
The change of slope occurs between the values :math:`x_1`
and :math:`x_2` such that:
.. math::
\\log_{10} \\frac{x_2}{x_b} = \\log_{10} \\frac{x_b}{x_1}
\\sim \\Delta
At values :math:`x \\lesssim x_1` and :math:`x \\gtrsim x_2` the
model is approximately a simple power law with index
:math:`\\alpha_1` and :math:`\\alpha_2` respectively. The two
power laws are smoothly joined at values :math:`x_1 < x < x_2`,
hence the :math:`\\Delta` parameter sets the "smoothness" of the
slope change.
The ``delta`` parameter is bounded to values greater than 1e-3
(corresponding to :math:`x_2 / x_1 \\gtrsim 1.002`) to avoid
overflow errors.
The ``amplitude`` parameter is bounded to positive values since
this model is typically used to represent positive quantities.
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling import models
x = np.logspace(0.7, 2.3, 500)
f = models.SmoothlyBrokenPowerLaw1D(amplitude=1, x_break=20,
alpha_1=-2, alpha_2=2)
plt.figure()
plt.title("amplitude=1, x_break=20, alpha_1=-2, alpha_2=2")
f.delta = 0.5
plt.loglog(x, f(x), '--', label='delta=0.5')
f.delta = 0.3
plt.loglog(x, f(x), '-.', label='delta=0.3')
f.delta = 0.1
plt.loglog(x, f(x), label='delta=0.1')
plt.axis([x.min(), x.max(), 0.1, 1.1])
plt.legend(loc='lower center')
plt.grid(True)
plt.show()
"""
amplitude = Parameter(default=1, min=0)
x_break = Parameter(default=1)
alpha_1 = Parameter(default=-2)
alpha_2 = Parameter(default=2)
delta = Parameter(default=1, min=1.e-3)
@amplitude.validator
def amplitude(self, value):
if np.any(value <= 0):
raise InputParameterError(
"amplitude parameter must be > 0")
@delta.validator
def delta(self, value):
if np.any(value < 0.001):
raise InputParameterError(
"delta parameter must be >= 0.001")
@staticmethod
def evaluate(x, amplitude, x_break, alpha_1, alpha_2, delta):
"""One dimensional smoothly broken power law model function"""
# Pre-calculate `x/x_b`
xx = x / x_break
# Initialize the return value
f = np.zeros_like(xx, subok=False)
if isinstance(amplitude, Quantity):
return_unit = amplitude.unit
amplitude = amplitude.value
else:
return_unit = None
# The quantity `t = (x / x_b)^(1 / delta)` can become quite
# large. To avoid overflow errors we will start by calculating
# its natural logarithm:
logt = np.log(xx) / delta
# When `t >> 1` or `t << 1` we don't actually need to compute
# the `t` value since the main formula (see docstring) can be
# significantly simplified by neglecting `1` or `t`
# respectively. In the following we will check whether `t` is
# much greater, much smaller, or comparable to 1 by comparing
# the `logt` value with an appropriate threshold.
threshold = 30 # corresponding to exp(30) ~ 1e13
i = logt > threshold
if (i.max()):
# In this case the main formula reduces to a simple power
# law with index `alpha_2`.
f[i] = amplitude * xx[i] ** (-alpha_2) \
/ (2. ** ((alpha_1 - alpha_2) * delta))
i = logt < -threshold
if (i.max()):
# In this case the main formula reduces to a simple power
# law with index `alpha_1`.
f[i] = amplitude * xx[i] ** (-alpha_1) \
/ (2. ** ((alpha_1 - alpha_2) * delta))
i = np.abs(logt) <= threshold
if (i.max()):
# In this case the `t` value is "comparable" to 1, hence we
# we will evaluate the whole formula.
t = np.exp(logt[i])
r = (1. + t) / 2.
f[i] = amplitude * xx[i] ** (-alpha_1) \
* r ** ((alpha_1 - alpha_2) * delta)
if return_unit:
return Quantity(f, unit=return_unit, copy=False)
else:
return f
@staticmethod
def fit_deriv(x, amplitude, x_break, alpha_1, alpha_2, delta):
"""One dimensional smoothly broken power law derivative with respect
to parameters"""
# Pre-calculate `x_b` and `x/x_b` and `logt` (see comments in
# SmoothlyBrokenPowerLaw1D.evaluate)
xx = x / x_break
logt = np.log(xx) / delta
# Initialize the return values
f = np.zeros_like(xx)
d_amplitude = np.zeros_like(xx)
d_x_break = np.zeros_like(xx)
d_alpha_1 = np.zeros_like(xx)
d_alpha_2 = np.zeros_like(xx)
d_delta = np.zeros_like(xx)
threshold = 30 # (see comments in SmoothlyBrokenPowerLaw1D.evaluate)
i = logt > threshold
if (i.max()):
f[i] = amplitude * xx[i] ** (-alpha_2) \
/ (2. ** ((alpha_1 - alpha_2) * delta))
d_amplitude[i] = f[i] / amplitude
d_x_break[i] = f[i] * alpha_2 / x_break
d_alpha_1[i] = f[i] * (-delta * np.log(2))
d_alpha_2[i] = f[i] * (-np.log(xx[i]) + delta * np.log(2))
d_delta[i] = f[i] * (-(alpha_1 - alpha_2) * np.log(2))
i = logt < -threshold
if (i.max()):
f[i] = amplitude * xx[i] ** (-alpha_1) \
/ (2. ** ((alpha_1 - alpha_2) * delta))
d_amplitude[i] = f[i] / amplitude
d_x_break[i] = f[i] * alpha_1 / x_break
d_alpha_1[i] = f[i] * (-np.log(xx[i]) - delta * np.log(2))
d_alpha_2[i] = f[i] * delta * np.log(2)
d_delta[i] = f[i] * (-(alpha_1 - alpha_2) * np.log(2))
i = np.abs(logt) <= threshold
if (i.max()):
t = np.exp(logt[i])
r = (1. + t) / 2.
f[i] = amplitude * xx[i] ** (-alpha_1) \
* r ** ((alpha_1 - alpha_2) * delta)
d_amplitude[i] = f[i] / amplitude
d_x_break[i] = f[i] * (alpha_1 - (alpha_1 - alpha_2) * t / 2. / r) / x_break
d_alpha_1[i] = f[i] * (-np.log(xx[i]) + delta * np.log(r))
d_alpha_2[i] = f[i] * (-delta * np.log(r))
d_delta[i] = f[i] * (alpha_1 - alpha_2) \
* (np.log(r) - t / (1. + t) / delta * np.log(xx[i]))
return [d_amplitude, d_x_break, d_alpha_1, d_alpha_2, d_delta]
@property
def input_units(self):
if self.x_break.unit is None:
return None
else:
return {'x': self.x_break.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {'x_break': inputs_unit['x'],
'amplitude': outputs_unit['y']}
class ExponentialCutoffPowerLaw1D(Fittable1DModel):
"""
One dimensional power law model with an exponential cutoff.
Parameters
----------
amplitude : float
Model amplitude
x_0 : float
Reference point
alpha : float
Power law index
x_cutoff : float
Cutoff point
See Also
--------
PowerLaw1D, BrokenPowerLaw1D, LogParabola1D
Notes
-----
Model formula (with :math:`A` for ``amplitude`` and :math:`\\alpha` for ``alpha``):
.. math:: f(x) = A (x / x_0) ^ {-\\alpha} \\exp (-x / x_{cutoff})
"""
amplitude = Parameter(default=1)
x_0 = Parameter(default=1)
alpha = Parameter(default=1)
x_cutoff = Parameter(default=1)
@staticmethod
def evaluate(x, amplitude, x_0, alpha, x_cutoff):
"""One dimensional exponential cutoff power law model function"""
xx = x / x_0
return amplitude * xx ** (-alpha) * np.exp(-x / x_cutoff)
@staticmethod
def fit_deriv(x, amplitude, x_0, alpha, x_cutoff):
"""One dimensional exponential cutoff power law derivative with respect to parameters"""
xx = x / x_0
xc = x / x_cutoff
d_amplitude = xx ** (-alpha) * np.exp(-xc)
d_x_0 = alpha * amplitude * d_amplitude / x_0
d_alpha = -amplitude * d_amplitude * np.log(xx)
d_x_cutoff = amplitude * x * d_amplitude / x_cutoff ** 2
return [d_amplitude, d_x_0, d_alpha, d_x_cutoff]
@property
def input_units(self):
if self.x_0.unit is None:
return None
else:
return {'x': self.x_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {'x_0': inputs_unit['x'],
'x_cutoff': inputs_unit['x'],
'amplitude': outputs_unit['y']}
class LogParabola1D(Fittable1DModel):
"""
One dimensional log parabola model (sometimes called curved power law).
Parameters
----------
amplitude : float
Model amplitude
x_0 : float
Reference point
alpha : float
Power law index
beta : float
Power law curvature
See Also
--------
PowerLaw1D, BrokenPowerLaw1D, ExponentialCutoffPowerLaw1D
Notes
-----
Model formula (with :math:`A` for ``amplitude`` and :math:`\\alpha` for ``alpha`` and :math:`\\beta` for ``beta``):
.. math:: f(x) = A \\left(\\frac{x}{x_{0}}\\right)^{- \\alpha - \\beta \\log{\\left (\\frac{x}{x_{0}} \\right )}}
"""
amplitude = Parameter(default=1)
x_0 = Parameter(default=1)
alpha = Parameter(default=1)
beta = Parameter(default=0)
@staticmethod
def evaluate(x, amplitude, x_0, alpha, beta):
"""One dimensional log parabola model function"""
xx = x / x_0
exponent = -alpha - beta * np.log(xx)
return amplitude * xx ** exponent
@staticmethod
def fit_deriv(x, amplitude, x_0, alpha, beta):
"""One dimensional log parabola derivative with respect to parameters"""
xx = x / x_0
log_xx = np.log(xx)
exponent = -alpha - beta * log_xx
d_amplitude = xx ** exponent
d_beta = -amplitude * d_amplitude * log_xx ** 2
d_x_0 = amplitude * d_amplitude * (beta * log_xx / x_0 - exponent / x_0)
d_alpha = -amplitude * d_amplitude * log_xx
return [d_amplitude, d_x_0, d_alpha, d_beta]
@property
def input_units(self):
if self.x_0.unit is None:
return None
else:
return {'x': self.x_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {'x_0': inputs_unit['x'],
'amplitude': outputs_unit['y']}
|
bsd-3-clause
|
bryantbiggs/luther-02
|
src/aggregate.py
|
2
|
7563
|
'''
Combine data sources on fuzzy-matched titles to create final data set
'''
from multiprocessing import Pool, cpu_count
from functools import partial
import pandas as pd
import numpy as np
from fuzzywuzzy import process
from src.my_aws import S3
KEY_OMDB_TOR = 'OMDB_Torrents.csv'
KEY_NUM = 'TheNumbers_budgets.csv'
KEY_FINAL = 'Final_Data.csv'
BUCKET = 'movie-torrents'
def fuzzy_match(search_title, match_titles):
'''
Perform fuzzy match on provided title (searc_title)
Args:
search_title (str): Movie title to match up with
Returns:
dict: Dictionary of title searched, title it most closely
matches and the percent that they match up
'''
match_title, match_percent = process.extractOne(
search_title, match_titles)
return {'RowTitle': search_title,
'FuzzTitle': match_title,
'FuzzPercent': match_percent}
class Aggregate():
'''
Combine data sources on fuzzy-matched titles and
create final data set
'''
def __init__(self):
'''
Get a S3 connection object and pull down both data sources
upon instantiation
'''
self.s3_obj = S3()
self.tor_data = self.s3_obj.get_data(KEY_OMDB_TOR, BUCKET)
self.num_data = self.s3_obj.get_data(KEY_NUM, BUCKET)
self.final_data = None
def make_fuzzy_match(self):
'''
Fuzzy match `The Numbers` data up to Torrent data up to
a threshold of (95%) to prep Torrent data to be merged
with `The Numbers` data
Args:
none
Returns:
pd.dataframe: Dataframe of torrent data with fuzzy
matched title to join on
'''
torrent_titles = self.tor_data['Title'].tolist()
numbers_titles = self.num_data['title'].tolist()
# Map fuzz search over available cpus
num_cpus = cpu_count()
worker_pool = Pool(num_cpus)
fuzz_results = worker_pool.map(
partial(fuzzy_match, match_titles=numbers_titles), torrent_titles)
# fuzz_results = worker_pool.map(fuzzy_match, torrent_titles)
worker_pool.close()
worker_pool.join()
# Put results into a dataframe and rename columns
fuzz_data = pd.DataFrame(fuzz_results)
fuzz_data = fuzz_data[['RowTitle', 'FuzzTitle', 'FuzzPercent']]
fuzz_data.columns = ['Title', 'FuzzTitle', 'FuzzPercent']
# Append to torrent dataframe and drop duplicate titles
merge_df = pd.merge(self.tor_data, fuzz_data, how='inner', on='Title')
merge_df.drop_duplicates(subset='Title', inplace=True)
# Drop rows where match was below 95%
merge_df = merge_df[merge_df['FuzzPercent'] >= 95]
self.tor_data = merge_df
return merge_df
def clean_fuzz_data(self):
'''
Clean up date prior to merging with `The Numbers` data
Args:
none
Returns:
pd.dataframe: Dataframe of Torrent data ready to be merged
with `The Numbers` data
'''
tor = self.tor_data
# remove where no torrent counts were received from any source
tor['CheckTup'] = list(zip(tor['Kat_Count'].tolist(),
tor['Pirate_Count'].tolist(),
tor['Extra_Count'].tolist(),
tor['Torrentz_Count'].tolist(),
tor['Torrentz_Ver_Count'].tolist(),
tor['Zoogle_Ver_Count'].tolist()))
tor = tor[tor['CheckTup'] != ('Fail', 'Fail', 'Fail', 'Fail',
'Fail', 'Fail')].reset_index(drop=True)
# Drop temp column for checking
del tor['CheckTup']
# replace Fail, None, N and NaN with 0 - remove >, and <
int_cols = ['Metascore', 'Runtime', 'imdbRating', 'imdbVotes',
'Kat_Count', 'Pirate_Count', 'Extra_Count',
'Torrentz_Count', 'Torrentz_Ver_Count', 'Zoogle_Ver_Count']
for col in int_cols:
tor[col] = tor[col].replace(['Fail', 'None', 'N', 'NaN'], '0')
tor[col] = tor[col].apply(lambda x: str(x).replace(
'>', '').replace('<', '').replace(',', ''))
tor[col] = tor[col].replace(np.nan, 0)
tor[col] = tor[col].fillna(value=0)
tor[col] = pd.to_numeric(tor[col],
errors='coerce',
downcast='integer')
tor[col] = tor[col].fillna(value=0)
# fill in remaining NaN's with blanks
tor.fillna(value='', inplace=True)
self.tor_data = tor
return tor
def merge_data_sources(self):
'''
Merge Torrent data set with `The Numbers` data set
Args:
none
Returns:
pd.dataframe: Dataframe of resultant data set
'''
self.num_data.columns = ['FuzzTitle', 'ReleaseDate',
'ProductionBudget', 'DomesticBudget',
'WorldGross']
# merge data frames
data_df = pd.merge(self.tor_data, self.num_data,
how='left', on='FuzzTitle')
data_df = data_df.drop_duplicates(subset='imdbID')
self.final_data = data_df
return data_df
def prepare_final_data(self):
'''
Final preparations to final data set prior to
upload to S3
Args:
none
Returns:
pd.dataframe: Dataframe of final data set
'''
data_df = self.final_data
# Clean up dates
data_df['Released'] = pd.to_datetime(data_df['Released'])
data_df['ReleaseDate'] = pd.to_datetime(data_df['ReleaseDate'])
data_df.loc[data_df['Released'].isnull(
), 'Released'] = data_df['ReleaseDate']
# Drop columns no longer needed
del data_df['ReleaseDate']
del data_df['FuzzTitle']
del data_df['FuzzPercent']
# sum torrent counts
data_df['Total_Torrents'] = data_df[['Kat_Count',
'Pirate_Count',
'Extra_Count',
'Torrentz_Count',
'Torrentz_Ver_Count',
'Zoogle_Ver_Count']].sum(axis=1)
self.final_data = data_df
return data_df
def put_data_s3(self):
'''
Upload clean data to S3 storage
Args:
none
Returns:
none
'''
self.s3_obj.put_data(self.final_data, KEY_FINAL, BUCKET)
def export_data(self, write_file):
'''
Export data to csv file with the provided name/location (write_file)
Args:
write_file (str): Full file path of where to save csv file
Returns:
none
'''
self.final_data.to_csv(write_file, sep=',', index=False)
def aggregate_data(self):
'''
Aggregate both data sources, clean, and then uplpoad to S3
Args:
none
Returns:
none
'''
self.make_fuzzy_match()
self.clean_fuzz_data()
self.merge_data_sources()
self.prepare_final_data()
self.put_data_s3()
if __name__ == '__main__':
AGG = Aggregate()
AGG.aggregate_data()
|
mit
|
johankaito/fufuka
|
microblog/flask/venv/lib/python2.7/site-packages/scipy/signal/waveforms.py
|
17
|
14814
|
# Author: Travis Oliphant
# 2003
#
# Feb. 2010: Updated by Warren Weckesser:
# Rewrote much of chirp()
# Added sweep_poly()
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy import asarray, zeros, place, nan, mod, pi, extract, log, sqrt, \
exp, cos, sin, polyval, polyint
__all__ = ['sawtooth', 'square', 'gausspulse', 'chirp', 'sweep_poly']
def sawtooth(t, width=1):
"""
Return a periodic sawtooth or triangle waveform.
The sawtooth waveform has a period ``2*pi``, rises from -1 to 1 on the
interval 0 to ``width*2*pi``, then drops from 1 to -1 on the interval
``width*2*pi`` to ``2*pi``. `width` must be in the interval [0, 1].
Note that this is not band-limited. It produces an infinite number
of harmonics, which are aliased back and forth across the frequency
spectrum.
Parameters
----------
t : array_like
Time.
width : array_like, optional
Width of the rising ramp as a proportion of the total cycle.
Default is 1, producing a rising ramp, while 0 produces a falling
ramp. `t` = 0.5 produces a triangle wave.
If an array, causes wave shape to change over time, and must be the
same length as t.
Returns
-------
y : ndarray
Output array containing the sawtooth waveform.
Examples
--------
A 5 Hz waveform sampled at 500 Hz for 1 second:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> t = np.linspace(0, 1, 500)
>>> plt.plot(t, signal.sawtooth(2 * np.pi * 5 * t))
"""
t, w = asarray(t), asarray(width)
w = asarray(w + (t - t))
t = asarray(t + (w - w))
if t.dtype.char in ['fFdD']:
ytype = t.dtype.char
else:
ytype = 'd'
y = zeros(t.shape, ytype)
# width must be between 0 and 1 inclusive
mask1 = (w > 1) | (w < 0)
place(y, mask1, nan)
# take t modulo 2*pi
tmod = mod(t, 2 * pi)
# on the interval 0 to width*2*pi function is
# tmod / (pi*w) - 1
mask2 = (1 - mask1) & (tmod < w * 2 * pi)
tsub = extract(mask2, tmod)
wsub = extract(mask2, w)
place(y, mask2, tsub / (pi * wsub) - 1)
# on the interval width*2*pi to 2*pi function is
# (pi*(w+1)-tmod) / (pi*(1-w))
mask3 = (1 - mask1) & (1 - mask2)
tsub = extract(mask3, tmod)
wsub = extract(mask3, w)
place(y, mask3, (pi * (wsub + 1) - tsub) / (pi * (1 - wsub)))
return y
def square(t, duty=0.5):
"""
Return a periodic square-wave waveform.
The square wave has a period ``2*pi``, has value +1 from 0 to
``2*pi*duty`` and -1 from ``2*pi*duty`` to ``2*pi``. `duty` must be in
the interval [0,1].
Note that this is not band-limited. It produces an infinite number
of harmonics, which are aliased back and forth across the frequency
spectrum.
Parameters
----------
t : array_like
The input time array.
duty : array_like, optional
Duty cycle. Default is 0.5 (50% duty cycle).
If an array, causes wave shape to change over time, and must be the
same length as t.
Returns
-------
y : ndarray
Output array containing the square waveform.
Examples
--------
A 5 Hz waveform sampled at 500 Hz for 1 second:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> t = np.linspace(0, 1, 500, endpoint=False)
>>> plt.plot(t, signal.square(2 * np.pi * 5 * t))
>>> plt.ylim(-2, 2)
A pulse-width modulated sine wave:
>>> plt.figure()
>>> sig = np.sin(2 * np.pi * t)
>>> pwm = signal.square(2 * np.pi * 30 * t, duty=(sig + 1)/2)
>>> plt.subplot(2, 1, 1)
>>> plt.plot(t, sig)
>>> plt.subplot(2, 1, 2)
>>> plt.plot(t, pwm)
>>> plt.ylim(-1.5, 1.5)
"""
t, w = asarray(t), asarray(duty)
w = asarray(w + (t - t))
t = asarray(t + (w - w))
if t.dtype.char in ['fFdD']:
ytype = t.dtype.char
else:
ytype = 'd'
y = zeros(t.shape, ytype)
# width must be between 0 and 1 inclusive
mask1 = (w > 1) | (w < 0)
place(y, mask1, nan)
# on the interval 0 to duty*2*pi function is 1
tmod = mod(t, 2 * pi)
mask2 = (1 - mask1) & (tmod < w * 2 * pi)
place(y, mask2, 1)
# on the interval duty*2*pi to 2*pi function is
# (pi*(w+1)-tmod) / (pi*(1-w))
mask3 = (1 - mask1) & (1 - mask2)
place(y, mask3, -1)
return y
def gausspulse(t, fc=1000, bw=0.5, bwr=-6, tpr=-60, retquad=False,
retenv=False):
"""
Return a Gaussian modulated sinusoid:
``exp(-a t^2) exp(1j*2*pi*fc*t).``
If `retquad` is True, then return the real and imaginary parts
(in-phase and quadrature).
If `retenv` is True, then return the envelope (unmodulated signal).
Otherwise, return the real part of the modulated sinusoid.
Parameters
----------
t : ndarray or the string 'cutoff'
Input array.
fc : int, optional
Center frequency (e.g. Hz). Default is 1000.
bw : float, optional
Fractional bandwidth in frequency domain of pulse (e.g. Hz).
Default is 0.5.
bwr : float, optional
Reference level at which fractional bandwidth is calculated (dB).
Default is -6.
tpr : float, optional
If `t` is 'cutoff', then the function returns the cutoff
time for when the pulse amplitude falls below `tpr` (in dB).
Default is -60.
retquad : bool, optional
If True, return the quadrature (imaginary) as well as the real part
of the signal. Default is False.
retenv : bool, optional
If True, return the envelope of the signal. Default is False.
Returns
-------
yI : ndarray
Real part of signal. Always returned.
yQ : ndarray
Imaginary part of signal. Only returned if `retquad` is True.
yenv : ndarray
Envelope of signal. Only returned if `retenv` is True.
See Also
--------
scipy.signal.morlet
Examples
--------
Plot real component, imaginary component, and envelope for a 5 Hz pulse,
sampled at 100 Hz for 2 seconds:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> t = np.linspace(-1, 1, 2 * 100, endpoint=False)
>>> i, q, e = signal.gausspulse(t, fc=5, retquad=True, retenv=True)
>>> plt.plot(t, i, t, q, t, e, '--')
"""
if fc < 0:
raise ValueError("Center frequency (fc=%.2f) must be >=0." % fc)
if bw <= 0:
raise ValueError("Fractional bandwidth (bw=%.2f) must be > 0." % bw)
if bwr >= 0:
raise ValueError("Reference level for bandwidth (bwr=%.2f) must "
"be < 0 dB" % bwr)
# exp(-a t^2) <-> sqrt(pi/a) exp(-pi^2/a * f^2) = g(f)
ref = pow(10.0, bwr / 20.0)
# fdel = fc*bw/2: g(fdel) = ref --- solve this for a
#
# pi^2/a * fc^2 * bw^2 /4=-log(ref)
a = -(pi * fc * bw) ** 2 / (4.0 * log(ref))
if t == 'cutoff': # compute cut_off point
# Solve exp(-a tc**2) = tref for tc
# tc = sqrt(-log(tref) / a) where tref = 10^(tpr/20)
if tpr >= 0:
raise ValueError("Reference level for time cutoff must be < 0 dB")
tref = pow(10.0, tpr / 20.0)
return sqrt(-log(tref) / a)
yenv = exp(-a * t * t)
yI = yenv * cos(2 * pi * fc * t)
yQ = yenv * sin(2 * pi * fc * t)
if not retquad and not retenv:
return yI
if not retquad and retenv:
return yI, yenv
if retquad and not retenv:
return yI, yQ
if retquad and retenv:
return yI, yQ, yenv
def chirp(t, f0, t1, f1, method='linear', phi=0, vertex_zero=True):
"""Frequency-swept cosine generator.
In the following, 'Hz' should be interpreted as 'cycles per unit';
there is no requirement here that the unit is one second. The
important distinction is that the units of rotation are cycles, not
radians. Likewise, `t` could be a measurement of space instead of time.
Parameters
----------
t : array_like
Times at which to evaluate the waveform.
f0 : float
Frequency (e.g. Hz) at time t=0.
t1 : float
Time at which `f1` is specified.
f1 : float
Frequency (e.g. Hz) of the waveform at time `t1`.
method : {'linear', 'quadratic', 'logarithmic', 'hyperbolic'}, optional
Kind of frequency sweep. If not given, `linear` is assumed. See
Notes below for more details.
phi : float, optional
Phase offset, in degrees. Default is 0.
vertex_zero : bool, optional
This parameter is only used when `method` is 'quadratic'.
It determines whether the vertex of the parabola that is the graph
of the frequency is at t=0 or t=t1.
Returns
-------
y : ndarray
A numpy array containing the signal evaluated at `t` with the
requested time-varying frequency. More precisely, the function
returns ``cos(phase + (pi/180)*phi)`` where `phase` is the integral
(from 0 to `t`) of ``2*pi*f(t)``. ``f(t)`` is defined below.
See Also
--------
sweep_poly
Notes
-----
There are four options for the `method`. The following formulas give
the instantaneous frequency (in Hz) of the signal generated by
`chirp()`. For convenience, the shorter names shown below may also be
used.
linear, lin, li:
``f(t) = f0 + (f1 - f0) * t / t1``
quadratic, quad, q:
The graph of the frequency f(t) is a parabola through (0, f0) and
(t1, f1). By default, the vertex of the parabola is at (0, f0).
If `vertex_zero` is False, then the vertex is at (t1, f1). The
formula is:
if vertex_zero is True:
``f(t) = f0 + (f1 - f0) * t**2 / t1**2``
else:
``f(t) = f1 - (f1 - f0) * (t1 - t)**2 / t1**2``
To use a more general quadratic function, or an arbitrary
polynomial, use the function `scipy.signal.waveforms.sweep_poly`.
logarithmic, log, lo:
``f(t) = f0 * (f1/f0)**(t/t1)``
f0 and f1 must be nonzero and have the same sign.
This signal is also known as a geometric or exponential chirp.
hyperbolic, hyp:
``f(t) = f0*f1*t1 / ((f0 - f1)*t + f1*t1)``
f0 and f1 must be nonzero.
"""
# 'phase' is computed in _chirp_phase, to make testing easier.
phase = _chirp_phase(t, f0, t1, f1, method, vertex_zero)
# Convert phi to radians.
phi *= pi / 180
return cos(phase + phi)
def _chirp_phase(t, f0, t1, f1, method='linear', vertex_zero=True):
"""
Calculate the phase used by chirp_phase to generate its output.
See `chirp_phase` for a description of the arguments.
"""
t = asarray(t)
f0 = float(f0)
t1 = float(t1)
f1 = float(f1)
if method in ['linear', 'lin', 'li']:
beta = (f1 - f0) / t1
phase = 2 * pi * (f0 * t + 0.5 * beta * t * t)
elif method in ['quadratic', 'quad', 'q']:
beta = (f1 - f0) / (t1 ** 2)
if vertex_zero:
phase = 2 * pi * (f0 * t + beta * t ** 3 / 3)
else:
phase = 2 * pi * (f1 * t + beta * ((t1 - t) ** 3 - t1 ** 3) / 3)
elif method in ['logarithmic', 'log', 'lo']:
if f0 * f1 <= 0.0:
raise ValueError("For a logarithmic chirp, f0 and f1 must be "
"nonzero and have the same sign.")
if f0 == f1:
phase = 2 * pi * f0 * t
else:
beta = t1 / log(f1 / f0)
phase = 2 * pi * beta * f0 * (pow(f1 / f0, t / t1) - 1.0)
elif method in ['hyperbolic', 'hyp']:
if f0 == 0 or f1 == 0:
raise ValueError("For a hyperbolic chirp, f0 and f1 must be "
"nonzero.")
if f0 == f1:
# Degenerate case: constant frequency.
phase = 2 * pi * f0 * t
else:
# Singular point: the instantaneous frequency blows up
# when t == sing.
sing = -f1 * t1 / (f0 - f1)
phase = 2 * pi * (-sing * f0) * log(np.abs(1 - t/sing))
else:
raise ValueError("method must be 'linear', 'quadratic', 'logarithmic',"
" or 'hyperbolic', but a value of %r was given."
% method)
return phase
def sweep_poly(t, poly, phi=0):
"""
Frequency-swept cosine generator, with a time-dependent frequency.
This function generates a sinusoidal function whose instantaneous
frequency varies with time. The frequency at time `t` is given by
the polynomial `poly`.
Parameters
----------
t : ndarray
Times at which to evaluate the waveform.
poly : 1-D array_like or instance of numpy.poly1d
The desired frequency expressed as a polynomial. If `poly` is
a list or ndarray of length n, then the elements of `poly` are
the coefficients of the polynomial, and the instantaneous
frequency is
``f(t) = poly[0]*t**(n-1) + poly[1]*t**(n-2) + ... + poly[n-1]``
If `poly` is an instance of numpy.poly1d, then the
instantaneous frequency is
``f(t) = poly(t)``
phi : float, optional
Phase offset, in degrees, Default: 0.
Returns
-------
sweep_poly : ndarray
A numpy array containing the signal evaluated at `t` with the
requested time-varying frequency. More precisely, the function
returns ``cos(phase + (pi/180)*phi)``, where `phase` is the integral
(from 0 to t) of ``2 * pi * f(t)``; ``f(t)`` is defined above.
See Also
--------
chirp
Notes
-----
.. versionadded:: 0.8.0
If `poly` is a list or ndarray of length `n`, then the elements of
`poly` are the coefficients of the polynomial, and the instantaneous
frequency is:
``f(t) = poly[0]*t**(n-1) + poly[1]*t**(n-2) + ... + poly[n-1]``
If `poly` is an instance of `numpy.poly1d`, then the instantaneous
frequency is:
``f(t) = poly(t)``
Finally, the output `s` is:
``cos(phase + (pi/180)*phi)``
where `phase` is the integral from 0 to `t` of ``2 * pi * f(t)``,
``f(t)`` as defined above.
"""
# 'phase' is computed in _sweep_poly_phase, to make testing easier.
phase = _sweep_poly_phase(t, poly)
# Convert to radians.
phi *= pi / 180
return cos(phase + phi)
def _sweep_poly_phase(t, poly):
"""
Calculate the phase used by sweep_poly to generate its output.
See `sweep_poly` for a description of the arguments.
"""
# polyint handles lists, ndarrays and instances of poly1d automatically.
intpoly = polyint(poly)
phase = 2 * pi * polyval(intpoly, t)
return phase
|
apache-2.0
|
jblackburne/scikit-learn
|
examples/covariance/plot_outlier_detection.py
|
24
|
4318
|
"""
==========================================
Outlier detection with several methods.
==========================================
When the amount of contamination is known, this example illustrates three
different ways of performing :ref:`outlier_detection`:
- based on a robust estimator of covariance, which is assuming that the
data are Gaussian distributed and performs better than the One-Class SVM
in that case.
- using the One-Class SVM and its ability to capture the shape of the
data set, hence performing better when the data is strongly
non-Gaussian, i.e. with two well-separated clusters;
- using the Isolation Forest algorithm, which is based on random forests and
hence more adapted to large-dimensional settings, even if it performs
quite well in the examples below.
The ground truth about inliers and outliers is given by the points colors
while the orange-filled area indicates which points are reported as inliers
by each method.
Here, we assume that we know the fraction of outliers in the datasets.
Thus rather than using the 'predict' method of the objects, we set the
threshold on the decision_function to separate out the corresponding
fraction.
"""
print(__doc__)
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn import svm
from sklearn.covariance import EllipticEnvelope
from sklearn.ensemble import IsolationForest
rng = np.random.RandomState(42)
# Example settings
n_samples = 200
outliers_fraction = 0.25
clusters_separation = [0, 1, 2]
# define two outlier detection tools to be compared
classifiers = {
"One-Class SVM": svm.OneClassSVM(nu=0.95 * outliers_fraction + 0.05,
kernel="rbf", gamma=0.1),
"Robust covariance": EllipticEnvelope(contamination=outliers_fraction),
"Isolation Forest": IsolationForest(max_samples=n_samples,
contamination=outliers_fraction,
random_state=rng)}
# Compare given classifiers under given settings
xx, yy = np.meshgrid(np.linspace(-7, 7, 500), np.linspace(-7, 7, 500))
n_inliers = int((1. - outliers_fraction) * n_samples)
n_outliers = int(outliers_fraction * n_samples)
ground_truth = np.ones(n_samples, dtype=int)
ground_truth[-n_outliers:] = -1
# Fit the problem with varying cluster separation
for i, offset in enumerate(clusters_separation):
np.random.seed(42)
# Data generation
X1 = 0.3 * np.random.randn(n_inliers // 2, 2) - offset
X2 = 0.3 * np.random.randn(n_inliers // 2, 2) + offset
X = np.r_[X1, X2]
# Add outliers
X = np.r_[X, np.random.uniform(low=-6, high=6, size=(n_outliers, 2))]
# Fit the model
plt.figure(figsize=(10.8, 3.6))
for i, (clf_name, clf) in enumerate(classifiers.items()):
# fit the data and tag outliers
clf.fit(X)
scores_pred = clf.decision_function(X)
threshold = stats.scoreatpercentile(scores_pred,
100 * outliers_fraction)
y_pred = clf.predict(X)
n_errors = (y_pred != ground_truth).sum()
# plot the levels lines and the points
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
subplot = plt.subplot(1, 3, i + 1)
subplot.contourf(xx, yy, Z, levels=np.linspace(Z.min(), threshold, 7),
cmap=plt.cm.Blues_r)
a = subplot.contour(xx, yy, Z, levels=[threshold],
linewidths=2, colors='red')
subplot.contourf(xx, yy, Z, levels=[threshold, Z.max()],
colors='orange')
b = subplot.scatter(X[:-n_outliers, 0], X[:-n_outliers, 1], c='white')
c = subplot.scatter(X[-n_outliers:, 0], X[-n_outliers:, 1], c='black')
subplot.axis('tight')
subplot.legend(
[a.collections[0], b, c],
['learned decision function', 'true inliers', 'true outliers'],
prop=matplotlib.font_manager.FontProperties(size=11),
loc='lower right')
subplot.set_title("%d. %s (errors: %d)" % (i + 1, clf_name, n_errors))
subplot.set_xlim((-7, 7))
subplot.set_ylim((-7, 7))
plt.subplots_adjust(0.04, 0.1, 0.96, 0.92, 0.1, 0.26)
plt.show()
|
bsd-3-clause
|
dingocuster/scikit-learn
|
benchmarks/bench_plot_approximate_neighbors.py
|
244
|
6011
|
"""
Benchmark for approximate nearest neighbor search using
locality sensitive hashing forest.
There are two types of benchmarks.
First, accuracy of LSHForest queries are measured for various
hyper-parameters and index sizes.
Second, speed up of LSHForest queries compared to brute force
method in exact nearest neighbors is measures for the
aforementioned settings. In general, speed up is increasing as
the index size grows.
"""
from __future__ import division
import numpy as np
from tempfile import gettempdir
from time import time
from sklearn.neighbors import NearestNeighbors
from sklearn.neighbors.approximate import LSHForest
from sklearn.datasets import make_blobs
from sklearn.externals.joblib import Memory
m = Memory(cachedir=gettempdir())
@m.cache()
def make_data(n_samples, n_features, n_queries, random_state=0):
"""Create index and query data."""
print('Generating random blob-ish data')
X, _ = make_blobs(n_samples=n_samples + n_queries,
n_features=n_features, centers=100,
shuffle=True, random_state=random_state)
# Keep the last samples as held out query vectors: note since we used
# shuffle=True we have ensured that index and query vectors are
# samples from the same distribution (a mixture of 100 gaussians in this
# case)
return X[:n_samples], X[n_samples:]
def calc_exact_neighbors(X, queries, n_queries, n_neighbors):
"""Measures average times for exact neighbor queries."""
print ('Building NearestNeighbors for %d samples in %d dimensions' %
(X.shape[0], X.shape[1]))
nbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
average_time = 0
t0 = time()
neighbors = nbrs.kneighbors(queries, n_neighbors=n_neighbors,
return_distance=False)
average_time = (time() - t0) / n_queries
return neighbors, average_time
def calc_accuracy(X, queries, n_queries, n_neighbors, exact_neighbors,
average_time_exact, **lshf_params):
"""Calculates accuracy and the speed up of LSHForest."""
print('Building LSHForest for %d samples in %d dimensions' %
(X.shape[0], X.shape[1]))
lshf = LSHForest(**lshf_params)
t0 = time()
lshf.fit(X)
lshf_build_time = time() - t0
print('Done in %0.3fs' % lshf_build_time)
accuracy = 0
t0 = time()
approx_neighbors = lshf.kneighbors(queries, n_neighbors=n_neighbors,
return_distance=False)
average_time_approx = (time() - t0) / n_queries
for i in range(len(queries)):
accuracy += np.in1d(approx_neighbors[i], exact_neighbors[i]).mean()
accuracy /= n_queries
speed_up = average_time_exact / average_time_approx
print('Average time for lshf neighbor queries: %0.3fs' %
average_time_approx)
print ('Average time for exact neighbor queries: %0.3fs' %
average_time_exact)
print ('Average Accuracy : %0.2f' % accuracy)
print ('Speed up: %0.1fx' % speed_up)
return speed_up, accuracy
if __name__ == '__main__':
import matplotlib.pyplot as plt
# Initialize index sizes
n_samples = [int(1e3), int(1e4), int(1e5), int(1e6)]
n_features = int(1e2)
n_queries = 100
n_neighbors = 10
X_index, X_query = make_data(np.max(n_samples), n_features, n_queries,
random_state=0)
params_list = [{'n_estimators': 3, 'n_candidates': 50},
{'n_estimators': 5, 'n_candidates': 70},
{'n_estimators': 10, 'n_candidates': 100}]
accuracies = np.zeros((len(n_samples), len(params_list)), dtype=float)
speed_ups = np.zeros((len(n_samples), len(params_list)), dtype=float)
for i, sample_size in enumerate(n_samples):
print ('==========================================================')
print ('Sample size: %i' % sample_size)
print ('------------------------')
exact_neighbors, average_time_exact = calc_exact_neighbors(
X_index[:sample_size], X_query, n_queries, n_neighbors)
for j, params in enumerate(params_list):
print ('LSHF parameters: n_estimators = %i, n_candidates = %i' %
(params['n_estimators'], params['n_candidates']))
speed_ups[i, j], accuracies[i, j] = calc_accuracy(
X_index[:sample_size], X_query, n_queries, n_neighbors,
exact_neighbors, average_time_exact, random_state=0, **params)
print ('')
print ('==========================================================')
# Set labels for LSHForest parameters
colors = ['c', 'm', 'y']
legend_rects = [plt.Rectangle((0, 0), 0.1, 0.1, fc=color)
for color in colors]
legend_labels = ['n_estimators={n_estimators}, '
'n_candidates={n_candidates}'.format(**p)
for p in params_list]
# Plot precision
plt.figure()
plt.legend(legend_rects, legend_labels,
loc='upper left')
for i in range(len(params_list)):
plt.scatter(n_samples, accuracies[:, i], c=colors[i])
plt.plot(n_samples, accuracies[:, i], c=colors[i])
plt.ylim([0, 1.3])
plt.xlim(np.min(n_samples), np.max(n_samples))
plt.semilogx()
plt.ylabel("Precision@10")
plt.xlabel("Index size")
plt.grid(which='both')
plt.title("Precision of first 10 neighbors with index size")
# Plot speed up
plt.figure()
plt.legend(legend_rects, legend_labels,
loc='upper left')
for i in range(len(params_list)):
plt.scatter(n_samples, speed_ups[:, i], c=colors[i])
plt.plot(n_samples, speed_ups[:, i], c=colors[i])
plt.ylim(0, np.max(speed_ups))
plt.xlim(np.min(n_samples), np.max(n_samples))
plt.semilogx()
plt.ylabel("Speed up")
plt.xlabel("Index size")
plt.grid(which='both')
plt.title("Relationship between Speed up and index size")
plt.show()
|
bsd-3-clause
|
EconomicSL/housing-model
|
src/main/resources/calibration/code/IncomeDistByTenure.py
|
2
|
5549
|
# -*- coding: utf-8 -*-
"""
Code to read English Housing Survey data about income for different tenures and plot a histogram of it.
@author: daniel, Adrian Carro
"""
import Datasets as ds
import matplotlib.pyplot as plt
# Read data
rawData = ds.EHSinterview()
# Data filtering according to the following fields:
# - Prevten: Previous Tenure (new household; owned outright; buying with a mortgage; owned, unknown if outright or
# mortgage; council; housing association; privately rented; rented, landlord unknown; household reference person
# resident 3 years or more; does not apply; no answer)
# - agehrpx: Age of HRP (no answer; does not apply; 95 or over)
# - lenresb: Length of residence (less than 1 year; one year; two years; 3-4 years; 5-9 years; 10-19 years;
# 20-29 years; 30+ years; does not apply; no answer)
# - tenure4: Tenure Group 4 (no answer; owners; social sector; private renters; does not apply)
# - HYEARGRx: Household gross annual income (inc. income from all adult household members). An extension of the gross
# income of the HRP and any partner. This variable represents the household gross income of ALL adults living within
# the household (£100,000 or more)
#
# First, select only these 5 columns, and only rows where Prevten does not assume the values "does not apply",
# "no answer" nor "household reference person resident 3 years or more"
data = rawData[['Prevten', 'agehrpx', 'lenresb', 'tenure4', 'HYEARGRx']][(rawData['Prevten'] != "does not apply") &
(rawData['Prevten'] != "no answer") &
(rawData['Prevten'] != "household reference "
"person resident 3 "
"years or more")]
# data = rawData[['Prevten', 'agehrpx', 'lenresb', 'tenure4', 'HYEARGRx']][(rawData['Prevten'] > 0) &
# (rawData['Prevten'] < 9)]
# Replace the label "£100,000 or more" with the value 100000
data["HYEARGRx"].replace(to_replace=".*more$", value=100000, inplace=True, regex=True)
# TODO: Old and devoid of much sense code, to be kept for now and decided upon later
# # Transform labels at lenresb column into (average) values, filtering out "does not apply" and "no answer"
# data = data[(data["lenresb"] != "does not apply") & (data["lenresb"] != "no answer")]
# dictionary = {"less than 1 year": 0.5,
# "one year": 1,
# "two years": 2,
# "3-4 years": 3.5,
# "5-9 years": 7,
# "10-19 years": 15,
# "20-29 years": 25,
# "30+ years": 30}
# data["lenresb"].replace(dictionary, inplace=True)
#
# # Transform "95 or over" label at agehrpx into 95
# data["agehrpx"].replace({"95 or over": 95}, inplace=True)
#
# # Add new column with age of Household Repr. Person at moment of last move (current age - length of residence)
# data["ageAtMove"] = data['agehrpx'] - data['lenresb']
#
# # Transform labels at lenresb column into original value codes
# dictionary = {"new household": 1,
# "owned outright": 2,
# "buying with a mortgage": 3,
# "owned, unknown if outright or mortgage": 4,
# "council": 5,
# "housing association": 6,
# "privately rented": 7,
# "rented, landlord unknown": 8}
# data["Prevten"].replace(dictionary, inplace=True)
#
# # Transform labels at tenure4 column into original value codes
# dictionary = {"no answer": -8,
# "owners": 1,
# "social sector": 2,
# "private renters": 3,
# "does not apply": -9}
# data["tenure4"].replace(dictionary, inplace=True)
#
# # Some absolute nonsense calculation
# data['tenChange'] = ((data['Prevten'] < 5) & (data['Prevten'] > 1)) * 10.0 \
# + ((data['Prevten'] > 4) & (data['Prevten'] < 7)) * 20.0 + (data['Prevten'] > 6) * 30.0 \
# + data['tenure4']
#
# # Some absolute nonsense filtering of ageAtMove data
# formationData = data[['ageAtMove']][data['tenChange'] < 10]
# formationRentData = data[['ageAtMove']][data['tenChange'] == 3] # This selects private renters
# moverData = data[['ageAtMove']][(data['tenChange'] > 10) & (data['tenChange'] < 20)]
# rentownData = data[['ageAtMove']][data['tenChange'] == 31]
# fluxes = data[['tenChange']]
# Selecting income values for private renters
incomeFormRent = data[["HYEARGRx"]][data["tenure4"] == "private renters"]
# incomeFormRent = data[['HYEARGRx']][data['tenure4'] == 3] # Old version
# Selecting income values for socially housed households
incomeFormSoc = data[["HYEARGRx"]][data["tenure4"] == "social sector"]
# incomeFormSoc = data[['HYEARGRx']][data['tenure4'] == 2] # Old version
# Selecting income values for owners
incomeFormOwn = data[["HYEARGRx"]][data["tenure4"] == "owners"]
# incomeFormOwn = data[['HYEARGRx']][data['tenure4'] == 1] # Old version
# Plot histogram of data
plt.figure(figsize=(9, 6))
plt.hist([incomeFormSoc.values, incomeFormRent.values, incomeFormOwn.values], bins=20, stacked=False, density=True,
label=["Social housing", "Private renters", "Owners"], rwidth=0.9)
plt.xlabel("Annual net household income")
plt.ylabel("Density")
plt.legend()
plt.tight_layout()
plt.show()
|
mit
|
gwpy/seismon
|
seismon/bits.py
|
2
|
22194
|
#!/usr/bin/python
import os, glob, optparse, shutil, warnings, pickle, math, copy, pickle, matplotlib
import matplotlib.pyplot as plt
import numpy as np
import scipy.signal, scipy.stats
import seismon.NLNM, seismon.html
import seismon.eqmon, seismon.utils
try:
import gwpy.time, gwpy.timeseries
import gwpy.frequencyseries, gwpy.spectrogram
import gwpy.plotter
except:
print("gwpy import fails... no plotting possible.")
__author__ = "Michael Coughlin <[email protected]>"
__date__ = "2012/8/26"
__version__ = "0.1"
# =============================================================================
#
# DEFINITIONS
#
# =============================================================================
def save_data(params,channel,gpsStart,gpsEnd,data,attributeDics):
"""@saves spectral data in text files.
@param params
seismon params dictionary
@param channel
seismon channel structure
@param gpsStart
start gps
@param gpsStart
start gps
@param gpsEnd
end gps
@param data
spectral data structure
"""
ifo = seismon.utils.getIfo(params)
psdDirectory = params["dirPath"] + "/Text_Files/PSD/" + channel.station_underscore + "/" + str(params["fftDuration"])
seismon.utils.mkdir(psdDirectory)
fftDirectory = params["dirPath"] + "/Text_Files/FFT/" + channel.station_underscore + "/" + str(params["fftDuration"])
seismon.utils.mkdir(fftDirectory)
timeseriesDirectory = params["dirPath"] + "/Text_Files/Timeseries/" + channel.station_underscore + "/" + str(params["fftDuration"])
seismon.utils.mkdir(timeseriesDirectory)
earthquakesDirectory = params["dirPath"] + "/Text_Files/Earthquakes/" + channel.station_underscore + "/" + str(params["fftDuration"])
seismon.utils.mkdir(earthquakesDirectory)
freq = np.array(data["dataASD"].frequencies)
psdFile = os.path.join(psdDirectory,"%d-%d.txt"%(gpsStart,gpsEnd))
f = open(psdFile,"wb")
for i in range(len(freq)):
f.write("%e %e\n"%(freq[i],data["dataASD"][i]))
f.close()
freq = np.array(data["dataFFT"].frequencies)
fftFile = os.path.join(fftDirectory,"%d-%d.txt"%(gpsStart,gpsEnd))
f = open(fftFile,"wb")
for i in range(len(freq)):
f.write("%e %e %e\n"%(freq[i],data["dataFFT"].data[i].real,data["dataFFT"].data[i].imag))
f.close()
tt = np.array(data["dataLowpass"].times)
timeseriesFile = os.path.join(timeseriesDirectory,"%d-%d.txt"%(gpsStart,gpsEnd))
f = open(timeseriesFile,"wb")
f.write("%.10f %e\n"%(tt[np.argmin(data["dataLowpass"].data)],np.min(data["dataLowpass"].data)))
f.write("%.10f %e\n"%(tt[np.argmax(data["dataLowpass"].data)],np.max(data["dataLowpass"].data)))
f.close()
for attributeDic in attributeDics:
if params["ifo"] == "IRIS":
attributeDic = seismon.eqmon.ifotraveltimes(attributeDic, "IRIS", channel.latitude, channel.longitude)
traveltimes = attributeDic["traveltimes"]["IRIS"]
else:
traveltimes = attributeDic["traveltimes"][ifo]
Ptime = max(traveltimes["Ptimes"])
Stime = max(traveltimes["Stimes"])
Rtwotime = max(traveltimes["Rtwotimes"])
RthreePointFivetime = max(traveltimes["RthreePointFivetimes"])
Rfivetime = max(traveltimes["Rfivetimes"])
distance = max(traveltimes["Distances"])
indexes = np.intersect1d(np.where(tt >= Rfivetime)[0],np.where(tt <= Rtwotime)[0])
if len(indexes) == 0:
continue
indexMin = np.min(indexes)
indexMax = np.max(indexes)
ttCut = tt[indexes]
dataCut = data["dataLowpass"][indexMin:indexMax]
ampMax = np.max(dataCut.data)
ttMax = ttCut[np.argmax(dataCut.data)]
ttDiff = ttMax - attributeDic["GPS"]
velocity = distance / ttDiff
velocity = velocity / 1000.0
earthquakesFile = os.path.join(earthquakesDirectory,"%s.txt"%(attributeDic["eventName"]))
f = open(earthquakesFile,"wb")
f.write("%.10f %e %e %e %e\n"%(ttMax,ttDiff,distance,velocity,ampMax))
f.close()
def bits(params, channel, segment):
"""@calculates spectral data for given channel and segment.
@param params
seismon params dictionary
@param channel
seismon channel structure
@param segment
[start,end] gps
"""
ifo = seismon.utils.getIfo(params)
gpsStart = segment[0]
gpsEnd = segment[1]
# set the times
duration = np.ceil(gpsEnd-gpsStart)
# make timeseries
state = seismon.utils.retrieve_bits(params, channel, segment)
flags = state.to_dqflags(round=True)
if params["doPlots"]:
plotDirectory = params["path"] + "/" + channel.station_underscore
seismon.utils.mkdir(plotDirectory)
pngFile = os.path.join(plotDirectory,"bits.png")
#plot = gwpy.plotter.TimeSeriesPlot(figsize=[14,8])
valid={'facecolor': 'red'}
plot = state.plot(valid=valid)
#plot.ylabel = r"Velocity [$\mu$m/s]"
#plot.title = channel.station.replace("_","\_")
#plot.xlim = xlim
#plot.ylim = ylim
#plot.add_legend(loc=1,prop={'size':10})
plot.save(pngFile)
plot.close()
def freq_analysis(params,channel,tt,freq,spectra):
"""@frequency analysis of spectral data.
@param params
seismon params dictionary
@param channel
seismon channel structure
@param tt
array of start times
@param freq
frequency vector
@param spectra
spectrogram structure
"""
if params["doPlots"]:
plotDirectory = params["path"] + "/" + channel.station_underscore + "/freq"
seismon.utils.mkdir(plotDirectory)
indexes = np.logspace(0,np.log10(len(freq)-1),num=100)
indexes = list(np.unique(np.ceil(indexes)))
indexes = range(len(freq))
#indexes = range(16)
indexes = np.where(10.0 >= freq)[0]
deltaT = tt[1] - tt[0]
n_dist = []
for j in range(1000):
n_dist.append(scipy.stats.chi2.rvs(2))
p_chi2_vals = []
p_ks_vals = []
ttCoh_vals = []
for i in indexes:
vals = spectra[:,i]
meanPSD = np.mean(vals)
stdPSD = np.std(vals)
vals_norm = 2 * vals / meanPSD
bins = np.arange(0,10,1)
(n,bins) = np.histogram(vals_norm,bins=bins)
n_total = np.sum(n)
bins = (bins[1:] + bins[:len(bins)-1])/2
n_expected = []
for bin in bins:
expected_val = n_total * scipy.stats.chi2.pdf(bin, 2)
n_expected.append(expected_val)
n_expected = np.array(n_expected)
(stat_chi2,p_chi2) = scipy.stats.mstats.chisquare(n, f_exp=n_expected)
p_chi2_vals.append(p_chi2)
(stat_ks,p_ks) = scipy.stats.ks_2samp(vals_norm, n_dist)
p_ks_vals.append(p_ks)
acov = np.correlate(vals,vals,"full")
acov = acov / np.max(acov)
ttCov = (np.arange(len(acov)) - len(acov)/2) * float(deltaT)
#ttLimitMin = - 5/freq[i]
#ttLimitMax = 5 /freq[i]
ttLimitMin = - float('inf')
ttLimitMax = float('inf')
ttIndexes = np.intersect1d(np.where(ttCov >= ttLimitMin)[0],np.where(ttCov <= ttLimitMax)[0])
#ttCov = ttCov / (60)
acov_minus_05 = np.absolute(acov[ttIndexes] - 0.66)
index_min = acov_minus_05.argmin()
ttCoh = np.absolute(ttCov[ttIndexes[index_min]])
ttCoh_vals.append(ttCoh)
if len(ttIndexes) == 0:
continue
#if freq[i] > 0:
# continue
if params["doPlots"]:
ax = plt.subplot(111)
plt.plot(bins,n,label='true')
plt.plot(bins,n_expected,'k*',label='expected')
plt.xlabel("2 * data / mean")
plt.ylabel("Counts")
plot_title = "p-value: %f"%p_chi2
plt.title(plot_title)
plt.legend()
plt.show()
plt.savefig(os.path.join(plotDirectory,"%s_dist.png"%str(freq[i])),dpi=200)
plt.savefig(os.path.join(plotDirectory,"%s_dist.eps"%str(freq[i])),dpi=200)
plt.close('all')
ax = plt.subplot(111)
plt.semilogy(ttCov[ttIndexes],acov[ttIndexes])
plt.vlines(ttCoh,10**(-3),1,color='r')
plt.vlines(-ttCoh,10**(-3),1,color='r')
plt.xlabel("Time [Seconds]")
plt.ylabel("Correlation")
plt.show()
plt.savefig(os.path.join(plotDirectory,"%s_cov.png"%str(freq[i])),dpi=200)
plt.savefig(os.path.join(plotDirectory,"%s_cov.eps"%str(freq[i])),dpi=200)
plt.close('all')
if params["doPlots"]:
ax = plt.subplot(111)
plt.loglog(freq[indexes],p_chi2_vals,label='chi2')
plt.loglog(freq[indexes],p_ks_vals,label='k-s')
plt.xlabel("Frequency [Hz]")
plt.ylabel("p-value")
plt.legend(loc=3)
plt.show()
plt.savefig(os.path.join(plotDirectory,"freq_analysis.png"),dpi=200)
plt.savefig(os.path.join(plotDirectory,"freq_analysis.eps"),dpi=200)
plt.close('all')
ax = plt.subplot(111)
plt.semilogx(freq[indexes],ttCoh_vals)
plt.xlabel("Frequency [Hz]")
plt.ylabel("Coherence Time [s]")
plt.show()
plt.savefig(os.path.join(plotDirectory,"ttCohs.png"),dpi=200)
plt.savefig(os.path.join(plotDirectory,"ttCohs.eps"),dpi=200)
plt.close('all')
def analysis(params, channel):
"""@analysis of spectral data.
@param params
seismon params dictionary
@param channel
seismon channel structure
"""
psdDirectory = params["dirPath"] + "/Text_Files/PSD/" + channel.station_underscore + "/" + str(params["fftDuration"])
files = glob.glob(os.path.join(psdDirectory,"*.txt"))
files = sorted(files)
if not params["doFreqAnalysis"]:
if len(files) > 1000:
files = files[:1000]
#files = files[-10:]
tts = []
spectra = []
for file in files:
fileSplit = file.split("/")
txtFile = fileSplit[-1].replace(".txt","")
txtFileSplit = txtFile.split("-")
thisTTStart = int(txtFileSplit[0])
thisTTEnd = int(txtFileSplit[1])
tt = thisTTStart
if tt in tts:
continue
tts.append(tt)
spectra_out = gwpy.frequencyseries.Spectrum.read(file)
spectra_out.unit = 'counts/Hz^(1/2)'
spectra.append(spectra_out)
if tt == params["gpsStart"]:
spectraNow = spectra_out
if not 'spectraNow' in locals():
print("no data at requested time... continuing\n")
return
if np.mean(spectraNow.data) == 0.0:
print("data only zeroes... continuing\n")
return
dt = tts[1] - tts[0]
epoch = gwpy.time.Time(tts[0], format='gps')
specgram = gwpy.spectrogram.Spectrogram.from_spectra(*spectra, dt=dt,epoch=epoch)
freq = np.array(specgram.frequencies)
# Define bins for the spectral variation histogram
kwargs = {'log':True,'nbins':500,'norm':True}
#kwargs = {'log':True,'nbins':500}
specvar = gwpy.frequencyseries.hist.SpectralVariance.from_spectrogram(specgram,**kwargs)
bins = specvar.bins[:-1]
specvar = specvar * 100
if params["doFreqAnalysis"]:
freq_analysis(params,channel,ttStart,freq,specgram)
# Calculate percentiles
spectral_variation_1per = specvar.percentile(1)
spectral_variation_10per = specvar.percentile(10)
spectral_variation_50per = specvar.percentile(50)
spectral_variation_90per = specvar.percentile(90)
spectral_variation_99per = specvar.percentile(99)
textDirectory = params["path"] + "/" + channel.station_underscore
seismon.utils.mkdir(textDirectory)
f = open(os.path.join(textDirectory,"spectra.txt"),"w")
for i in range(len(freq)):
f.write("%e %e %e %e %e %e %e\n"%(freq[i],spectral_variation_1per[i],spectral_variation_10per[i],spectral_variation_50per[i],spectral_variation_90per[i],spectral_variation_99per[i],spectraNow[i]))
f.close()
sigDict = {}
# Break up entire frequency band into 6 segments
ff_ave = [1/float(128), 1/float(64), 0.1, 1, 3, 5, 10]
f = open(os.path.join(textDirectory,"sig.txt"),"w")
for i in range(len(ff_ave)-1):
newSpectra = []
newSpectraNow = []
newFreq = []
for j in range(len(freq)):
if ff_ave[i] <= freq[j] and freq[j] <= ff_ave[i+1]:
newFreq.append(freq[j])
newSpectraNow.append(spectraNow.data[j])
if newSpectra == []:
newSpectra = specgram.data[:,j]
else:
newSpectra = np.vstack([newSpectra,specgram.data[:,j]])
newSpectra = np.array(newSpectra)
if len(newSpectra.shape) > 1:
newSpectra = np.mean(newSpectra, axis = 0)
sig, bgcolor = seismon.utils.html_bgcolor(np.mean(newSpectraNow),newSpectra)
f.write("%e %e %e %e %s\n"%(ff_ave[i],ff_ave[i+1],np.mean(newSpectraNow),sig,bgcolor))
key = "%s-%s"%(ff_ave[i],ff_ave[i+1])
dt = tts[-1] - tts[-2]
epoch = gwpy.time.Time(tts[0], format='gps')
timeseries = gwpy.timeseries.TimeSeries(newSpectra, epoch=epoch, sample_rate=1.0/dt)
sigDict[key] = {}
#timeseries.data = np.log10(timeseries.data)
sigDict[key]["data"] = timeseries
f.close()
if params["doPlots"]:
plotDirectory = params["path"] + "/" + channel.station_underscore
seismon.utils.mkdir(plotDirectory)
fl, low, fh, high = seismon.NLNM.NLNM(2)
pngFile = os.path.join(plotDirectory,"psd.png")
plot = spectraNow.plot()
kwargs = {"linestyle":"-","color":"k"}
plot.add_line(freq, spectral_variation_10per, **kwargs)
plot.add_line(freq, spectral_variation_50per, **kwargs)
plot.add_line(freq, spectral_variation_90per, **kwargs)
kwargs = {"linestyle":"-.","color":"k"}
plot.add_line(fl, low, **kwargs)
plot.add_line(fh, high, **kwargs)
plot.xlim = [params["fmin"],params["fmax"]]
plot.ylim = [np.min(bins), np.max(bins)]
plot.xlabel = "Frequency [Hz]"
plot.ylabel = "Amplitude Spectrum [(m/s)/rtHz]"
plot.save(pngFile,dpi=200)
plot.close()
pngFile = os.path.join(plotDirectory,"disp.png")
spectraNowDisplacement = spectraNow / freq
plot = spectraNowDisplacement.plot()
kwargs = {"linestyle":"-","color":"w"}
plot.add_line(freq, spectral_variation_10per/freq, **kwargs)
plot.add_line(freq, spectral_variation_50per/freq, **kwargs)
plot.add_line(freq, spectral_variation_90per/freq, **kwargs)
kwargs = {"linestyle":"-.","color":"k"}
plot.add_line(fl, low/fl, **kwargs)
plot.add_line(fh, high/fh, **kwargs)
plot.xlim = [params["fmin"],params["fmax"]]
plot.ylim = [np.min(bins)/np.max(freq), np.max(bins)/np.min(freq)]
plot.xlabel = "Frequency [Hz]"
plot.ylabel = "Displacement Spectrum [m/rtHz]"
plot.save(pngFile,dpi=200)
plot.close()
pngFile = os.path.join(plotDirectory,"tf.png")
specgramLog = specgram.to_logf(fmin=np.min(freq),fmax=np.max(freq))
plot = specgramLog.plot()
plot.ylim = [params["fmin"],params["fmax"]]
plot.ylabel = "Frequency [Hz]"
colorbar_label = "Amplitude Spectrum [(m/s)/rtHz]"
kwargs = {}
plot.add_colorbar(location='right', log=True, label=colorbar_label, clim=None, visible=True, **kwargs)
plot.save(pngFile,dpi=200)
plot.close()
pngFile = os.path.join(plotDirectory,"psd.png")
plot = spectraNow.plot()
kwargs = {"linestyle":"-","color":"k"}
plot.add_line(freq, spectral_variation_10per, **kwargs)
plot.add_line(freq, spectral_variation_50per, **kwargs)
plot.add_line(freq, spectral_variation_90per, **kwargs)
kwargs = {"linestyle":"-.","color":"k"}
plot.add_line(fl, low, **kwargs)
plot.add_line(fh, high, **kwargs)
plot.xlim = [params["fmin"],params["fmax"]]
plot.ylim = [np.min(bins),np.max(bins)]
plot.xlabel = "Frequency [Hz]"
plot.ylabel = "Amplitude Spectrum [(m/s)/rtHz]"
plot.save(pngFile,dpi=200)
plot.close()
pngFile = os.path.join(plotDirectory,"specvar.png")
kwargs = {"linestyle":"-","color":"w"}
#plot = specvar.plot(**kwargs)
plot = spectraNow.plot(**kwargs)
kwargs = {"linestyle":"-","color":"k"}
plot.add_line(freq, spectral_variation_10per, **kwargs)
plot.add_line(freq, spectral_variation_50per, **kwargs)
plot.add_line(freq, spectral_variation_90per, **kwargs)
extent = [np.min(freq), np.max(freq),
np.min(bins), np.max(bins)]
kwargs = {}
#plot.plot_variance(specvar, extent=extent, **kwargs)
plot.axes[0].set_xscale("log")
plot.axes[0].set_yscale("log")
kwargs = {"linestyle":"-.","color":"k"}
plot.add_line(fl, low, **kwargs)
plot.add_line(fh, high, **kwargs)
plot.xlim = [params["fmin"],params["fmax"]]
plot.ylim = [np.min(bins), np.max(bins)]
plot.xlabel = "Frequency [Hz]"
plot.ylabel = "Amplitude Spectrum [(m/s)/rtHz]"
plot.save(pngFile,dpi=200)
plot.close()
X,Y = np.meshgrid(freq, bins)
ax = plt.subplot(111)
#im = plt.pcolor(X,Y,np.transpose(spectral_variation_norm), cmap=plt.cm.jet)
im = plt.pcolor(X,Y,np.transpose(specvar.data), cmap=plt.cm.jet)
ax.set_xscale('log')
ax.set_yscale('log')
plt.semilogx(freq,spectraNow, 'k', label='Current')
plt.semilogx(freq,spectral_variation_10per,'w',label='10')
plt.semilogx(freq,spectral_variation_50per,'w',label='50')
plt.semilogx(freq,spectral_variation_90per,'w',label='90')
plt.loglog(fl,low,'k-.')
plt.loglog(fh,high,'k-.',label='LNM/HNM')
plt.xlim([params["fmin"],params["fmax"]])
plt.ylim([np.min(bins), np.max(bins)])
plt.xlabel("Frequency [Hz]")
plt.ylabel("Amplitude Spectrum [(m/s)/rtHz]")
plt.clim(0,5)
plt.grid()
plt.show()
plt.savefig(pngFile,dpi=200)
plt.close('all')
pngFile = os.path.join(plotDirectory,"bands.png")
plot = gwpy.plotter.TimeSeriesPlot()
for key in sigDict.iterkeys():
label = key
plot.add_timeseries(sigDict[key]["data"], label=label)
plot.axes[0].set_yscale("log")
plot.ylabel = "Average Amplitude Spectrum log10[(m/s)/rtHz]"
plot.add_legend(loc=1,prop={'size':10})
plot.save(pngFile,dpi=200)
plot.close()
htmlPage = seismon.html.seismon_page(channel,textDirectory)
if htmlPage is not None:
f = open(os.path.join(textDirectory,"psd.html"),"w")
f.write(htmlPage)
f.close()
def channel_summary(params, segment):
"""@summary of channels of spectral data.
@param params
seismon params dictionary
@param segment
[start,end] gps
"""
gpsStart = segment[0]
gpsEnd = segment[1]
data = {}
for channel in params["channels"]:
psdDirectory = params["dirPath"] + "/Text_Files/PSD/" + channel.station_underscore + "/" + str(params["fftDuration"])
file = os.path.join(psdDirectory,"%d-%d.txt"%(gpsStart,gpsEnd))
if not os.path.isfile(file):
continue
spectra_out = gwpy.frequencyseries.Spectrum.read(file)
spectra_out.unit = 'counts/Hz^(1/2)'
if np.sum(spectra_out.data) == 0.0:
continue
data[channel.station_underscore] = {}
data[channel.station_underscore]["data"] = spectra_out
if data == {}:
return
if params["doPlots"]:
plotDirectory = params["path"] + "/summary"
seismon.utils.mkdir(plotDirectory)
fl, low, fh, high = seismon.NLNM.NLNM(2)
pngFile = os.path.join(plotDirectory,"psd.png")
lowBin = np.inf
highBin = -np.inf
plot = gwpy.plotter.Plot(figsize=[14,8])
for key in data.iterkeys():
label = key.replace("_","\_")
plot.add_spectrum(data[key]["data"], label=label)
lowBin = np.min([lowBin,np.min(data[key]["data"])])
highBin = np.max([highBin,np.max(data[key]["data"])])
kwargs = {"linestyle":"-.","color":"k"}
plot.add_line(fl, low, **kwargs)
plot.add_line(fh, high, **kwargs)
plot.xlim = [params["fmin"],params["fmax"]]
plot.ylim = [lowBin, highBin]
plot.xlabel = "Frequency [Hz]"
plot.ylabel = "Amplitude Spectrum [(m/s)/rtHz]"
plot.add_legend(loc=1,prop={'size':10})
plot.axes[0].set_xscale("log")
plot.axes[0].set_yscale("log")
plot.save(pngFile,dpi=200)
plot.close()
pngFile = os.path.join(plotDirectory,"ratio.png")
lowBin = np.inf
highBin = -np.inf
ref = params["referenceChannel"].replace(":","_")
plot = gwpy.plotter.Plot(figsize=[14,8])
for key in data.iterkeys():
label = key.replace("_","\_")
plot.add_spectrum(data[key]["data"] / data[ref]["data"], label=label)
lowBin = np.min([lowBin,np.min(data[key]["data"])])
highBin = np.max([highBin,np.max(data[key]["data"])])
kwargs = {"linestyle":"-.","color":"k"}
#plot.add_line(fl, low, **kwargs)
#plot.add_line(fh, high, **kwargs)
plot.xlim = [params["fmin"],params["fmax"]]
#plot.ylim = [lowBin, highBin]
plot.xlabel = "Frequency [Hz]"
label_ref = params["referenceChannel"].replace("_","\_")
plot.ylabel = "Spectrum / Reference [%s]"%(label_ref)
plot.add_legend(loc=1,prop={'size':10})
plot.axes[0].set_xscale("log")
plot.axes[0].set_yscale("log")
plot.save(pngFile,dpi=200)
plot.close()
|
gpl-3.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.